language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | sphinx-doc__sphinx | sphinx/ext/autodoc/_legacy_class_based/_documenters.py | {
"start": 94901,
"end": 96598
} | class ____(DataDocumenterMixinBase):
"""Mixin for AttributeDocumenter to provide the feature for supporting __slots__."""
def isslotsattribute(self) -> bool:
"""Check the subject is an attribute in __slots__."""
try:
if parent___slots__ := inspect.getslots(self.parent):
return self.objpath[-1] in parent___slots__
else:
return False
except (ValueError, TypeError):
return False
def import_object(self, raiseerror: bool = False) -> bool:
ret = super().import_object(raiseerror) # type: ignore[misc]
if self.isslotsattribute():
self.object = SLOTSATTR
return ret
def should_suppress_value_header(self) -> bool:
if self.object is SLOTSATTR:
return True
else:
return super().should_suppress_value_header()
def get_doc(self) -> list[list[str]] | None:
if self.object is SLOTSATTR:
try:
parent___slots__ = inspect.getslots(self.parent)
if parent___slots__ and (
docstring := parent___slots__.get(self.objpath[-1])
):
docstring = prepare_docstring(docstring)
return [docstring]
else:
return []
except ValueError as exc:
logger.warning(
__('Invalid __slots__ found on %s. Ignored.'),
(self.parent.__qualname__, exc),
type='autodoc',
)
return []
else:
return super().get_doc() # type: ignore[misc]
| SlotsMixin |
python | qdrant__qdrant-client | qdrant_client/http/models/models.py | {
"start": 122365,
"end": 122528
} | class ____(BaseModel, extra="forbid"):
target: "ShardKey" = Field(..., description="")
fallback: "ShardKey" = Field(..., description="")
| ShardKeyWithFallback |
python | pypa__warehouse | tests/unit/packaging/test_models.py | {
"start": 47282,
"end": 54219
} | class ____:
def test_upload_limit_size_with_no_limits(self, db_session):
project = DBProjectFactory.create(upload_limit=None)
assert project.upload_limit_size == MAX_FILESIZE
def test_upload_limit_size_with_project_limit(self, db_session):
project_limit = 50 * ONE_MIB
project = DBProjectFactory.create(upload_limit=project_limit)
# Should use the most generous limit
expected = max(MAX_FILESIZE, project_limit)
assert project.upload_limit_size == expected
def test_upload_limit_size_with_organization_limit(self, db_session):
org_limit = 100 * ONE_MIB
organization = DBOrganizationFactory.create(
orgtype=OrganizationType.Company, upload_limit=org_limit
)
project = DBProjectFactory.create(upload_limit=None)
# Manually set organization relationship since it's complex
project.organization = organization
# Should use the most generous limit
expected = max(MAX_FILESIZE, org_limit)
assert project.upload_limit_size == expected
def test_upload_limit_size_with_both_limits(self, db_session):
project_limit = 50 * ONE_MIB
org_limit = 100 * ONE_MIB
organization = DBOrganizationFactory.create(
orgtype=OrganizationType.Company, upload_limit=org_limit
)
project = DBProjectFactory.create(upload_limit=project_limit)
project.organization = organization
# Should use the most generous limit
expected = max(MAX_FILESIZE, project_limit, org_limit)
assert project.upload_limit_size == expected
def test_total_size_limit_value_with_no_limits(self, db_session):
project = DBProjectFactory.create(total_size_limit=None)
assert project.total_size_limit_value == MAX_PROJECT_SIZE
def test_total_size_limit_value_with_project_limit(self, db_session):
project_limit = 50 * ONE_GIB
project = DBProjectFactory.create(total_size_limit=project_limit)
# Should use the most generous limit
expected = max(MAX_PROJECT_SIZE, project_limit)
assert project.total_size_limit_value == expected
def test_total_size_limit_value_with_organization_limit(self, db_session):
org_limit = 100 * ONE_GIB
organization = DBOrganizationFactory.create(
orgtype=OrganizationType.Company, total_size_limit=org_limit
)
project = DBProjectFactory.create(total_size_limit=None)
project.organization = organization
# Should use the most generous limit
expected = max(MAX_PROJECT_SIZE, org_limit)
assert project.total_size_limit_value == expected
def test_total_size_limit_value_with_both_limits(self, db_session):
project_limit = 50 * ONE_GIB
org_limit = 100 * ONE_GIB
organization = DBOrganizationFactory.create(
orgtype=OrganizationType.Company, total_size_limit=org_limit
)
project = DBProjectFactory.create(total_size_limit=project_limit)
project.organization = organization
# Should use the most generous limit
expected = max(MAX_PROJECT_SIZE, project_limit, org_limit)
assert project.total_size_limit_value == expected
def test_upload_limit_size_edge_case_with_zero_limits(self, db_session):
"""Edge case: test behavior with zero/negative limits"""
# Create organization with zero limit (should be filtered out)
organization = DBOrganizationFactory.create(
orgtype=OrganizationType.Company, upload_limit=0
)
project = DBProjectFactory.create(upload_limit=0)
project.organization = organization
# Should fall back to system default since zero limits are filtered out
assert project.upload_limit_size == MAX_FILESIZE
def test_total_size_limit_value_edge_case_with_zero_limits(self, db_session):
"""Edge case: test behavior with zero/negative limits"""
# Create organization with zero limit (should be filtered out)
organization = DBOrganizationFactory.create(
orgtype=OrganizationType.Company, total_size_limit=0
)
project = DBProjectFactory.create(total_size_limit=0)
project.organization = organization
# Should fall back to system default since zero limits are filtered out
assert project.total_size_limit_value == MAX_PROJECT_SIZE
def test_upload_limit_size_edge_case_all_none_fallback(self, db_session):
"""Edge case: test fallback when all custom limits are None"""
# Create project with no organization and no limits
project = DBProjectFactory.create(upload_limit=None, total_size_limit=None)
# Explicitly ensure no organization
project.organization = None
# Should return system default even with all None values
assert project.upload_limit_size == MAX_FILESIZE
def test_total_size_limit_value_edge_case_all_none_fallback(self, db_session):
"""Edge case: test fallback when all custom limits are None"""
# Create project with no organization and no limits
project = DBProjectFactory.create(upload_limit=None, total_size_limit=None)
# Explicitly ensure no organization
project.organization = None
# Should return system default even with all None values
assert project.total_size_limit_value == MAX_PROJECT_SIZE
def test_upload_limit_size_large_values(self, db_session):
"""Edge case: test with very large limit values within INTEGER range"""
# Test with large values (1GB) - within INTEGER range
large_limit = 1000 * ONE_MIB # 1GB
organization = DBOrganizationFactory.create(
orgtype=OrganizationType.Company, upload_limit=large_limit
)
project = DBProjectFactory.create(upload_limit=500 * ONE_MIB) # 500MB
project.organization = organization
# Should use the largest value (1GB org limit)
expected = max(MAX_FILESIZE, 500 * ONE_MIB, large_limit)
assert project.upload_limit_size == expected
assert project.upload_limit_size == large_limit
def test_total_size_limit_value_large_values(self, db_session):
"""Edge case: test with very large limit values"""
# Test with very large values (10TB)
large_limit = 10000 * ONE_GIB
organization = DBOrganizationFactory.create(
orgtype=OrganizationType.Company, total_size_limit=large_limit
)
project = DBProjectFactory.create(total_size_limit=5000 * ONE_GIB)
project.organization = organization
# Should use the largest value (10TB org limit)
expected = max(MAX_PROJECT_SIZE, 5000 * ONE_GIB, large_limit)
assert project.total_size_limit_value == expected
assert project.total_size_limit_value == large_limit
| TestProjectLimitProperties |
python | pypa__pipenv | pipenv/vendor/click/shell_completion.py | {
"start": 10520,
"end": 11129
} | class ____(ShellComplete):
"""Shell completion for Zsh."""
name = "zsh"
source_template = _SOURCE_ZSH
def get_completion_args(self) -> t.Tuple[t.List[str], str]:
cwords = split_arg_string(os.environ["COMP_WORDS"])
cword = int(os.environ["COMP_CWORD"])
args = cwords[1:cword]
try:
incomplete = cwords[cword]
except IndexError:
incomplete = ""
return args, incomplete
def format_completion(self, item: CompletionItem) -> str:
return f"{item.type}\n{item.value}\n{item.help if item.help else '_'}"
| ZshComplete |
python | matplotlib__matplotlib | lib/matplotlib/units.py | {
"start": 3480,
"end": 4266
} | class ____:
"""
The minimal interface for a converter to take custom data types (or
sequences) and convert them to values Matplotlib can use.
"""
@staticmethod
def axisinfo(unit, axis):
"""Return an `.AxisInfo` for the axis with the specified units."""
return None
@staticmethod
def default_units(x, axis):
"""Return the default unit for *x* or ``None`` for the given axis."""
return None
@staticmethod
def convert(obj, unit, axis):
"""
Convert *obj* using *unit* for the specified *axis*.
If *obj* is a sequence, return the converted sequence. The output must
be a sequence of scalars that can be used by the numpy array layer.
"""
return obj
| ConversionInterface |
python | gevent__gevent | src/greentest/3.14/test_ssl.py | {
"start": 114650,
"end": 193093
} | class ____(unittest.TestCase):
@support.requires_resource('walltime')
def test_echo(self):
"""Basic test of an SSL client connecting to a server"""
if support.verbose:
sys.stdout.write("\n")
client_context, server_context, hostname = testing_context()
with self.subTest(client=ssl.PROTOCOL_TLS_CLIENT, server=ssl.PROTOCOL_TLS_SERVER):
server_params_test(client_context=client_context,
server_context=server_context,
chatty=True, connectionchatty=True,
sni_name=hostname)
client_context.check_hostname = False
with self.subTest(client=ssl.PROTOCOL_TLS_SERVER, server=ssl.PROTOCOL_TLS_CLIENT):
with self.assertRaises(ssl.SSLError) as e:
server_params_test(client_context=server_context,
server_context=client_context,
chatty=True, connectionchatty=True,
sni_name=hostname)
self.assertIn(
'Cannot create a client socket with a PROTOCOL_TLS_SERVER context',
str(e.exception)
)
with self.subTest(client=ssl.PROTOCOL_TLS_SERVER, server=ssl.PROTOCOL_TLS_SERVER):
with self.assertRaises(ssl.SSLError) as e:
server_params_test(client_context=server_context,
server_context=server_context,
chatty=True, connectionchatty=True)
self.assertIn(
'Cannot create a client socket with a PROTOCOL_TLS_SERVER context',
str(e.exception)
)
with self.subTest(client=ssl.PROTOCOL_TLS_CLIENT, server=ssl.PROTOCOL_TLS_CLIENT):
with self.assertRaises(ssl.SSLError) as e:
server_params_test(client_context=server_context,
server_context=client_context,
chatty=True, connectionchatty=True)
self.assertIn(
'Cannot create a client socket with a PROTOCOL_TLS_SERVER context',
str(e.exception))
@unittest.skipUnless(support.Py_GIL_DISABLED, "test is only useful if the GIL is disabled")
def test_ssl_in_multiple_threads(self):
# See GH-124984: OpenSSL is not thread safe.
threads = []
warnings_filters = sys.flags.context_aware_warnings
global USE_SAME_TEST_CONTEXT
USE_SAME_TEST_CONTEXT = True
try:
for func in (
self.test_echo,
self.test_alpn_protocols,
self.test_getpeercert,
self.test_crl_check,
functools.partial(
self.test_check_hostname_idn,
warnings_filters=warnings_filters,
),
self.test_wrong_cert_tls12,
self.test_wrong_cert_tls13,
):
# Be careful with the number of threads here.
# Too many can result in failing tests.
for num in range(5):
with self.subTest(func=func, num=num):
threads.append(Thread(target=func))
with threading_helper.catch_threading_exception() as cm:
for thread in threads:
with self.subTest(thread=thread):
thread.start()
for thread in threads:
with self.subTest(thread=thread):
thread.join()
if cm.exc_value is not None:
# Some threads can skip their test
if not isinstance(cm.exc_value, unittest.SkipTest):
raise cm.exc_value
finally:
USE_SAME_TEST_CONTEXT = False
def test_getpeercert(self):
if support.verbose:
sys.stdout.write("\n")
client_context, server_context, hostname = testing_context()
server = ThreadedEchoServer(context=server_context, chatty=False)
with server:
with client_context.wrap_socket(socket.socket(),
do_handshake_on_connect=False,
server_hostname=hostname) as s:
s.connect((HOST, server.port))
# getpeercert() raise ValueError while the handshake isn't
# done.
with self.assertRaises(ValueError):
s.getpeercert()
s.do_handshake()
cert = s.getpeercert()
self.assertTrue(cert, "Can't get peer certificate.")
cipher = s.cipher()
if support.verbose:
sys.stdout.write(pprint.pformat(cert) + '\n')
sys.stdout.write("Connection cipher is " + str(cipher) + '.\n')
if 'subject' not in cert:
self.fail("No subject field in certificate: %s." %
pprint.pformat(cert))
if ((('organizationName', 'Python Software Foundation'),)
not in cert['subject']):
self.fail(
"Missing or invalid 'organizationName' field in certificate subject; "
"should be 'Python Software Foundation'.")
self.assertIn('notBefore', cert)
self.assertIn('notAfter', cert)
before = ssl.cert_time_to_seconds(cert['notBefore'])
after = ssl.cert_time_to_seconds(cert['notAfter'])
self.assertLess(before, after)
def test_crl_check(self):
if support.verbose:
sys.stdout.write("\n")
client_context, server_context, hostname = testing_context()
tf = getattr(ssl, "VERIFY_X509_TRUSTED_FIRST", 0)
self.assertEqual(client_context.verify_flags, ssl.VERIFY_DEFAULT | tf)
# VERIFY_DEFAULT should pass
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
cert = s.getpeercert()
self.assertTrue(cert, "Can't get peer certificate.")
# VERIFY_CRL_CHECK_LEAF without a loaded CRL file fails
client_context.verify_flags |= ssl.VERIFY_CRL_CHECK_LEAF
server = ThreadedEchoServer(context=server_context, chatty=True)
# Allow for flexible libssl error messages.
regex = re.compile(r"""(
certificate verify failed # OpenSSL
|
CERTIFICATE_VERIFY_FAILED # AWS-LC
)""", re.X)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
with self.assertRaisesRegex(ssl.SSLError, regex):
s.connect((HOST, server.port))
# now load a CRL file. The CRL file is signed by the CA.
client_context.load_verify_locations(CRLFILE)
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
cert = s.getpeercert()
self.assertTrue(cert, "Can't get peer certificate.")
def test_check_hostname(self):
if support.verbose:
sys.stdout.write("\n")
client_context, server_context, hostname = testing_context()
# correct hostname should verify
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
cert = s.getpeercert()
self.assertTrue(cert, "Can't get peer certificate.")
# incorrect hostname should raise an exception
server = ThreadedEchoServer(context=server_context, chatty=True)
# Allow for flexible libssl error messages.
regex = re.compile(r"""(
certificate verify failed # OpenSSL
|
CERTIFICATE_VERIFY_FAILED # AWS-LC
)""", re.X)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname="invalid") as s:
with self.assertRaisesRegex(ssl.CertificateError, regex):
s.connect((HOST, server.port))
# missing server_hostname arg should cause an exception, too
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with socket.socket() as s:
with self.assertRaisesRegex(ValueError,
"check_hostname requires server_hostname"):
client_context.wrap_socket(s)
@unittest.skipUnless(
ssl.HAS_NEVER_CHECK_COMMON_NAME, "test requires hostname_checks_common_name"
)
def test_hostname_checks_common_name(self):
client_context, server_context, hostname = testing_context()
assert client_context.hostname_checks_common_name
client_context.hostname_checks_common_name = False
# default cert has a SAN
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
client_context, server_context, hostname = testing_context(NOSANFILE)
client_context.hostname_checks_common_name = False
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
with self.assertRaises(ssl.SSLCertVerificationError):
s.connect((HOST, server.port))
def test_ecc_cert(self):
client_context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
client_context.load_verify_locations(SIGNING_CA)
client_context.set_ciphers('ECDHE:ECDSA:!NULL:!aRSA')
hostname = SIGNED_CERTFILE_ECC_HOSTNAME
server_context = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
# load ECC cert
server_context.load_cert_chain(SIGNED_CERTFILE_ECC)
# correct hostname should verify
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
cert = s.getpeercert()
self.assertTrue(cert, "Can't get peer certificate.")
cipher = s.cipher()[0].split('-')
self.assertTrue(cipher[:2], ('ECDHE', 'ECDSA'))
@unittest.skipUnless(IS_OPENSSL_3_0_0,
"test requires RFC 5280 check added in OpenSSL 3.0+")
def test_verify_strict(self):
# verification fails by default, since the server cert is non-conforming
client_context = ssl.create_default_context()
client_context.load_verify_locations(LEAF_MISSING_AKI_CA)
hostname = LEAF_MISSING_AKI_CERTFILE_HOSTNAME
server_context = ssl.create_default_context(purpose=Purpose.CLIENT_AUTH)
server_context.load_cert_chain(LEAF_MISSING_AKI_CERTFILE)
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
with self.assertRaises(ssl.SSLError):
s.connect((HOST, server.port))
# explicitly disabling VERIFY_X509_STRICT allows it to succeed
client_context = ssl.create_default_context()
client_context.load_verify_locations(LEAF_MISSING_AKI_CA)
client_context.verify_flags &= ~ssl.VERIFY_X509_STRICT
server_context = ssl.create_default_context(purpose=Purpose.CLIENT_AUTH)
server_context.load_cert_chain(LEAF_MISSING_AKI_CERTFILE)
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
cert = s.getpeercert()
self.assertTrue(cert, "Can't get peer certificate.")
def test_dual_rsa_ecc(self):
client_context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
client_context.load_verify_locations(SIGNING_CA)
# TODO: fix TLSv1.3 once SSLContext can restrict signature
# algorithms.
client_context.maximum_version = ssl.TLSVersion.TLSv1_2
# only ECDSA certs
client_context.set_ciphers('ECDHE:ECDSA:!NULL:!aRSA')
hostname = SIGNED_CERTFILE_ECC_HOSTNAME
server_context = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
# load ECC and RSA key/cert pairs
server_context.load_cert_chain(SIGNED_CERTFILE_ECC)
server_context.load_cert_chain(SIGNED_CERTFILE)
# correct hostname should verify
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
cert = s.getpeercert()
self.assertTrue(cert, "Can't get peer certificate.")
cipher = s.cipher()[0].split('-')
self.assertTrue(cipher[:2], ('ECDHE', 'ECDSA'))
def test_check_hostname_idn(self, warnings_filters=True):
if support.verbose:
sys.stdout.write("\n")
server_context = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
server_context.load_cert_chain(IDNSANSFILE)
context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
context.verify_mode = ssl.CERT_REQUIRED
context.check_hostname = True
context.load_verify_locations(SIGNING_CA)
# correct hostname should verify, when specified in several
# different ways
idn_hostnames = [
('könig.idn.pythontest.net',
'xn--knig-5qa.idn.pythontest.net'),
('xn--knig-5qa.idn.pythontest.net',
'xn--knig-5qa.idn.pythontest.net'),
(b'xn--knig-5qa.idn.pythontest.net',
'xn--knig-5qa.idn.pythontest.net'),
('königsgäßchen.idna2003.pythontest.net',
'xn--knigsgsschen-lcb0w.idna2003.pythontest.net'),
('xn--knigsgsschen-lcb0w.idna2003.pythontest.net',
'xn--knigsgsschen-lcb0w.idna2003.pythontest.net'),
(b'xn--knigsgsschen-lcb0w.idna2003.pythontest.net',
'xn--knigsgsschen-lcb0w.idna2003.pythontest.net'),
# ('königsgäßchen.idna2008.pythontest.net',
# 'xn--knigsgchen-b4a3dun.idna2008.pythontest.net'),
('xn--knigsgchen-b4a3dun.idna2008.pythontest.net',
'xn--knigsgchen-b4a3dun.idna2008.pythontest.net'),
(b'xn--knigsgchen-b4a3dun.idna2008.pythontest.net',
'xn--knigsgchen-b4a3dun.idna2008.pythontest.net'),
]
for server_hostname, expected_hostname in idn_hostnames:
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with context.wrap_socket(socket.socket(),
server_hostname=server_hostname) as s:
self.assertEqual(s.server_hostname, expected_hostname)
s.connect((HOST, server.port))
cert = s.getpeercert()
self.assertEqual(s.server_hostname, expected_hostname)
self.assertTrue(cert, "Can't get peer certificate.")
# incorrect hostname should raise an exception
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with context.wrap_socket(socket.socket(),
server_hostname="python.example.org") as s:
with self.assertRaises(ssl.CertificateError):
s.connect((HOST, server.port))
with (
ThreadedEchoServer(context=server_context, chatty=True) as server,
(
warnings_helper.check_no_resource_warning(self)
if warnings_filters
else nullcontext()
),
self.assertRaises(UnicodeError),
):
context.wrap_socket(socket.socket(), server_hostname='.pythontest.net')
with (
ThreadedEchoServer(context=server_context, chatty=True) as server,
(
warnings_helper.check_no_resource_warning(self)
if warnings_filters
else nullcontext()
),
self.assertRaises(UnicodeDecodeError),
):
context.wrap_socket(
socket.socket(),
server_hostname=b'k\xf6nig.idn.pythontest.net',
)
def test_wrong_cert_tls12(self):
"""Connecting when the server rejects the client's certificate
Launch a server with CERT_REQUIRED, and check that trying to
connect to it with a wrong client certificate fails.
"""
client_context, server_context, hostname = testing_context()
# load client cert that is not signed by trusted CA
client_context.load_cert_chain(CERTFILE)
# require TLS client authentication
server_context.verify_mode = ssl.CERT_REQUIRED
# TLS 1.3 has different handshake
client_context.maximum_version = ssl.TLSVersion.TLSv1_2
server = ThreadedEchoServer(
context=server_context, chatty=True, connectionchatty=True,
)
with server, \
client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
try:
# Expect either an SSL error about the server rejecting
# the connection, or a low-level connection reset (which
# sometimes happens on Windows)
s.connect((HOST, server.port))
except ssl.SSLError as e:
if support.verbose:
sys.stdout.write("\nSSLError is %r\n" % e)
except OSError as e:
if e.errno != errno.ECONNRESET:
raise
if support.verbose:
sys.stdout.write("\nsocket.error is %r\n" % e)
else:
self.fail("Use of invalid cert should have failed!")
@requires_tls_version('TLSv1_3')
def test_wrong_cert_tls13(self):
client_context, server_context, hostname = testing_context()
# load client cert that is not signed by trusted CA
client_context.load_cert_chain(CERTFILE)
server_context.verify_mode = ssl.CERT_REQUIRED
server_context.minimum_version = ssl.TLSVersion.TLSv1_3
client_context.minimum_version = ssl.TLSVersion.TLSv1_3
server = ThreadedEchoServer(
context=server_context, chatty=True, connectionchatty=True,
)
with server, \
client_context.wrap_socket(socket.socket(),
server_hostname=hostname,
suppress_ragged_eofs=False) as s:
s.connect((HOST, server.port))
with self.assertRaisesRegex(
OSError,
'alert unknown ca|EOF occurred|TLSV1_ALERT_UNKNOWN_CA|'
'closed by the remote host|Connection reset by peer|'
'Broken pipe'
):
# TLS 1.3 perform client cert exchange after handshake
s.write(b'data')
s.read(1000)
s.write(b'should have failed already')
s.read(1000)
def test_rude_shutdown(self):
"""A brutal shutdown of an SSL server should raise an OSError
in the client when attempting handshake.
"""
listener_ready = threading.Event()
listener_gone = threading.Event()
s = socket.socket()
port = socket_helper.bind_port(s, HOST)
# `listener` runs in a thread. It sits in an accept() until
# the main thread connects. Then it rudely closes the socket,
# and sets Event `listener_gone` to let the main thread know
# the socket is gone.
def listener():
s.listen()
listener_ready.set()
newsock, addr = s.accept()
newsock.close()
s.close()
listener_gone.set()
def connector():
listener_ready.wait()
with socket.socket() as c:
c.connect((HOST, port))
listener_gone.wait()
try:
ssl_sock = test_wrap_socket(c)
except OSError:
pass
else:
self.fail('connecting to closed SSL socket should have failed')
t = threading.Thread(target=listener)
t.start()
try:
connector()
finally:
t.join()
def test_ssl_cert_verify_error(self):
if support.verbose:
sys.stdout.write("\n")
server_context = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
server_context.load_cert_chain(SIGNED_CERTFILE)
context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with context.wrap_socket(socket.socket(),
server_hostname=SIGNED_CERTFILE_HOSTNAME) as s:
try:
s.connect((HOST, server.port))
self.fail("Expected connection failure")
except ssl.SSLError as e:
msg = 'unable to get local issuer certificate'
self.assertIsInstance(e, ssl.SSLCertVerificationError)
self.assertEqual(e.verify_code, 20)
self.assertEqual(e.verify_message, msg)
# Allow for flexible libssl error messages.
regex = f"({msg}|CERTIFICATE_VERIFY_FAILED)"
self.assertRegex(repr(e), regex)
regex = re.compile(r"""(
certificate verify failed # OpenSSL
|
CERTIFICATE_VERIFY_FAILED # AWS-LC
)""", re.X)
self.assertRegex(repr(e), regex)
def test_PROTOCOL_TLS(self):
"""Connecting to an SSLv23 server with various client options"""
if support.verbose:
sys.stdout.write("\n")
if has_tls_version('SSLv3'):
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_SSLv3, False)
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_TLS, True)
if has_tls_version('TLSv1'):
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_TLSv1, 'TLSv1')
if has_tls_version('SSLv3'):
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_SSLv3, False, ssl.CERT_OPTIONAL)
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_TLS, True, ssl.CERT_OPTIONAL)
if has_tls_version('TLSv1'):
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_TLSv1, 'TLSv1', ssl.CERT_OPTIONAL)
if has_tls_version('SSLv3'):
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_SSLv3, False, ssl.CERT_REQUIRED)
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_TLS, True, ssl.CERT_REQUIRED)
if has_tls_version('TLSv1'):
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_TLSv1, 'TLSv1', ssl.CERT_REQUIRED)
# Server with specific SSL options
if has_tls_version('SSLv3'):
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_SSLv3, False,
server_options=ssl.OP_NO_SSLv3)
# Will choose TLSv1
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_TLS, True,
server_options=ssl.OP_NO_SSLv2 | ssl.OP_NO_SSLv3)
if has_tls_version('TLSv1'):
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_TLSv1, False,
server_options=ssl.OP_NO_TLSv1)
@requires_tls_version('SSLv3')
def test_protocol_sslv3(self):
"""Connecting to an SSLv3 server with various client options"""
if support.verbose:
sys.stdout.write("\n")
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv3, 'SSLv3')
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv3, 'SSLv3', ssl.CERT_OPTIONAL)
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv3, 'SSLv3', ssl.CERT_REQUIRED)
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_TLS, False,
client_options=ssl.OP_NO_SSLv3)
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_TLSv1, False)
@requires_tls_version('TLSv1')
def test_protocol_tlsv1(self):
"""Connecting to a TLSv1 server with various client options"""
if support.verbose:
sys.stdout.write("\n")
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1, 'TLSv1')
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1, 'TLSv1', ssl.CERT_OPTIONAL)
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1, 'TLSv1', ssl.CERT_REQUIRED)
if has_tls_version('SSLv3'):
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_SSLv3, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLS, False,
client_options=ssl.OP_NO_TLSv1)
@requires_tls_version('TLSv1_1')
def test_protocol_tlsv1_1(self):
"""Connecting to a TLSv1.1 server with various client options.
Testing against older TLS versions."""
if support.verbose:
sys.stdout.write("\n")
try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_TLSv1_1, 'TLSv1.1')
if has_tls_version('SSLv3'):
try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_SSLv3, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_TLS, False,
client_options=ssl.OP_NO_TLSv1_1)
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_TLSv1_1, 'TLSv1.1')
try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_TLSv1_2, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_TLSv1_1, False)
@requires_tls_version('TLSv1_2')
def test_protocol_tlsv1_2(self):
"""Connecting to a TLSv1.2 server with various client options.
Testing against older TLS versions."""
if support.verbose:
sys.stdout.write("\n")
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_TLSv1_2, 'TLSv1.2',
server_options=ssl.OP_NO_SSLv3|ssl.OP_NO_SSLv2,
client_options=ssl.OP_NO_SSLv3|ssl.OP_NO_SSLv2,)
if has_tls_version('SSLv3'):
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_SSLv3, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_TLS, False,
client_options=ssl.OP_NO_TLSv1_2)
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_TLSv1_2, 'TLSv1.2')
if has_tls_protocol(ssl.PROTOCOL_TLSv1):
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_TLSv1, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1_2, False)
if has_tls_protocol(ssl.PROTOCOL_TLSv1_1):
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_TLSv1_1, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_TLSv1_2, False)
def test_starttls(self):
"""Switching from clear text to encrypted and back again."""
msgs = (b"msg 1", b"MSG 2", b"STARTTLS", b"MSG 3", b"msg 4", b"ENDTLS", b"msg 5", b"msg 6")
server = ThreadedEchoServer(CERTFILE,
starttls_server=True,
chatty=True,
connectionchatty=True)
wrapped = False
with server:
s = socket.socket()
s.setblocking(True)
s.connect((HOST, server.port))
if support.verbose:
sys.stdout.write("\n")
for indata in msgs:
if support.verbose:
sys.stdout.write(
" client: sending %r...\n" % indata)
if wrapped:
conn.write(indata)
outdata = conn.read()
else:
s.send(indata)
outdata = s.recv(1024)
msg = outdata.strip().lower()
if indata == b"STARTTLS" and msg.startswith(b"ok"):
# STARTTLS ok, switch to secure mode
if support.verbose:
sys.stdout.write(
" client: read %r from server, starting TLS...\n"
% msg)
conn = test_wrap_socket(s)
wrapped = True
elif indata == b"ENDTLS" and msg.startswith(b"ok"):
# ENDTLS ok, switch back to clear text
if support.verbose:
sys.stdout.write(
" client: read %r from server, ending TLS...\n"
% msg)
s = conn.unwrap()
wrapped = False
else:
if support.verbose:
sys.stdout.write(
" client: read %r from server\n" % msg)
if support.verbose:
sys.stdout.write(" client: closing connection.\n")
if wrapped:
conn.write(b"over\n")
else:
s.send(b"over\n")
if wrapped:
conn.close()
else:
s.close()
def test_socketserver(self):
"""Using socketserver to create and manage SSL connections."""
server = make_https_server(self, certfile=SIGNED_CERTFILE)
# try to connect
if support.verbose:
sys.stdout.write('\n')
# Get this test file itself:
with open(__file__, 'rb') as f:
d1 = f.read()
d2 = ''
# now fetch the same data from the HTTPS server
url = f'https://localhost:{server.port}/test_ssl.py'
context = ssl.create_default_context(cafile=SIGNING_CA)
f = urllib.request.urlopen(url, context=context)
try:
dlen = f.info().get("content-length")
if dlen and (int(dlen) > 0):
d2 = f.read(int(dlen))
if support.verbose:
sys.stdout.write(
" client: read %d bytes from remote server '%s'\n"
% (len(d2), server))
finally:
f.close()
self.assertEqual(d1, d2)
def test_asyncore_server(self):
"""Check the example asyncore integration."""
if support.verbose:
sys.stdout.write("\n")
indata = b"FOO\n"
server = AsyncoreEchoServer(CERTFILE)
with server:
s = test_wrap_socket(socket.socket())
s.connect(('127.0.0.1', server.port))
if support.verbose:
sys.stdout.write(
" client: sending %r...\n" % indata)
s.write(indata)
outdata = s.read()
if support.verbose:
sys.stdout.write(" client: read %r\n" % outdata)
if outdata != indata.lower():
self.fail(
"bad data <<%r>> (%d) received; expected <<%r>> (%d)\n"
% (outdata[:20], len(outdata),
indata[:20].lower(), len(indata)))
s.write(b"over\n")
if support.verbose:
sys.stdout.write(" client: closing connection.\n")
s.close()
if support.verbose:
sys.stdout.write(" client: connection closed.\n")
def test_recv_send(self):
"""Test recv(), send() and friends."""
if support.verbose:
sys.stdout.write("\n")
server = ThreadedEchoServer(CERTFILE,
certreqs=ssl.CERT_NONE,
ssl_version=ssl.PROTOCOL_TLS_SERVER,
cacerts=CERTFILE,
chatty=True,
connectionchatty=False)
with server:
s = test_wrap_socket(socket.socket(),
server_side=False,
certfile=CERTFILE,
ca_certs=CERTFILE,
cert_reqs=ssl.CERT_NONE)
s.connect((HOST, server.port))
# helper methods for standardising recv* method signatures
def _recv_into():
b = bytearray(b"\0"*100)
count = s.recv_into(b)
return b[:count]
def _recvfrom_into():
b = bytearray(b"\0"*100)
count, addr = s.recvfrom_into(b)
return b[:count]
# (name, method, expect success?, *args, return value func)
send_methods = [
('send', s.send, True, [], len),
('sendto', s.sendto, False, ["some.address"], len),
('sendall', s.sendall, True, [], lambda x: None),
]
# (name, method, whether to expect success, *args)
recv_methods = [
('recv', s.recv, True, []),
('recvfrom', s.recvfrom, False, ["some.address"]),
('recv_into', _recv_into, True, []),
('recvfrom_into', _recvfrom_into, False, []),
]
data_prefix = "PREFIX_"
for (meth_name, send_meth, expect_success, args,
ret_val_meth) in send_methods:
indata = (data_prefix + meth_name).encode('ascii')
try:
ret = send_meth(indata, *args)
msg = "sending with {}".format(meth_name)
self.assertEqual(ret, ret_val_meth(indata), msg=msg)
outdata = s.read()
if outdata != indata.lower():
self.fail(
"While sending with <<{name:s}>> bad data "
"<<{outdata:r}>> ({nout:d}) received; "
"expected <<{indata:r}>> ({nin:d})\n".format(
name=meth_name, outdata=outdata[:20],
nout=len(outdata),
indata=indata[:20], nin=len(indata)
)
)
except ValueError as e:
if expect_success:
self.fail(
"Failed to send with method <<{name:s}>>; "
"expected to succeed.\n".format(name=meth_name)
)
if not str(e).startswith(meth_name):
self.fail(
"Method <<{name:s}>> failed with unexpected "
"exception message: {exp:s}\n".format(
name=meth_name, exp=e
)
)
for meth_name, recv_meth, expect_success, args in recv_methods:
indata = (data_prefix + meth_name).encode('ascii')
try:
s.send(indata)
outdata = recv_meth(*args)
if outdata != indata.lower():
self.fail(
"While receiving with <<{name:s}>> bad data "
"<<{outdata:r}>> ({nout:d}) received; "
"expected <<{indata:r}>> ({nin:d})\n".format(
name=meth_name, outdata=outdata[:20],
nout=len(outdata),
indata=indata[:20], nin=len(indata)
)
)
except ValueError as e:
if expect_success:
self.fail(
"Failed to receive with method <<{name:s}>>; "
"expected to succeed.\n".format(name=meth_name)
)
if not str(e).startswith(meth_name):
self.fail(
"Method <<{name:s}>> failed with unexpected "
"exception message: {exp:s}\n".format(
name=meth_name, exp=e
)
)
# consume data
s.read()
# read(-1, buffer) is supported, even though read(-1) is not
data = b"data"
s.send(data)
buffer = bytearray(len(data))
self.assertEqual(s.read(-1, buffer), len(data))
self.assertEqual(buffer, data)
# sendall accepts bytes-like objects
if ctypes is not None:
ubyte = ctypes.c_ubyte * len(data)
byteslike = ubyte.from_buffer_copy(data)
s.sendall(byteslike)
self.assertEqual(s.read(), data)
# Make sure sendmsg et al are disallowed to avoid
# inadvertent disclosure of data and/or corruption
# of the encrypted data stream
self.assertRaises(NotImplementedError, s.dup)
self.assertRaises(NotImplementedError, s.sendmsg, [b"data"])
self.assertRaises(NotImplementedError, s.recvmsg, 100)
self.assertRaises(NotImplementedError,
s.recvmsg_into, [bytearray(100)])
s.write(b"over\n")
self.assertRaises(ValueError, s.recv, -1)
self.assertRaises(ValueError, s.read, -1)
s.close()
def test_recv_zero(self):
server = ThreadedEchoServer(CERTFILE)
self.enterContext(server)
s = socket.create_connection((HOST, server.port))
self.addCleanup(s.close)
s = test_wrap_socket(s, suppress_ragged_eofs=False)
self.addCleanup(s.close)
# recv/read(0) should return no data
s.send(b"data")
self.assertEqual(s.recv(0), b"")
self.assertEqual(s.read(0), b"")
self.assertEqual(s.read(), b"data")
# Should not block if the other end sends no data
s.setblocking(False)
self.assertEqual(s.recv(0), b"")
self.assertEqual(s.recv_into(bytearray()), 0)
def test_recv_into_buffer_protocol_len(self):
server = ThreadedEchoServer(CERTFILE)
self.enterContext(server)
s = socket.create_connection((HOST, server.port))
self.addCleanup(s.close)
s = test_wrap_socket(s, suppress_ragged_eofs=False)
self.addCleanup(s.close)
s.send(b"data")
buf = array.array('I', [0, 0])
self.assertEqual(s.recv_into(buf), 4)
self.assertEqual(bytes(buf)[:4], b"data")
class B(bytearray):
def __len__(self):
1/0
s.send(b"data")
buf = B(6)
self.assertEqual(s.recv_into(buf), 4)
self.assertEqual(bytes(buf), b"data\0\0")
def test_nonblocking_send(self):
server = ThreadedEchoServer(CERTFILE,
certreqs=ssl.CERT_NONE,
ssl_version=ssl.PROTOCOL_TLS_SERVER,
cacerts=CERTFILE,
chatty=True,
connectionchatty=False)
with server:
s = test_wrap_socket(socket.socket(),
server_side=False,
certfile=CERTFILE,
ca_certs=CERTFILE,
cert_reqs=ssl.CERT_NONE)
s.connect((HOST, server.port))
s.setblocking(False)
# If we keep sending data, at some point the buffers
# will be full and the call will block
buf = bytearray(8192)
def fill_buffer():
while True:
s.send(buf)
self.assertRaises((ssl.SSLWantWriteError,
ssl.SSLWantReadError), fill_buffer)
# Now read all the output and discard it
s.setblocking(True)
s.close()
def test_handshake_timeout(self):
# Issue #5103: SSL handshake must respect the socket timeout
server = socket.socket(socket.AF_INET)
host = "127.0.0.1"
port = socket_helper.bind_port(server)
started = threading.Event()
finish = False
def serve():
server.listen()
started.set()
conns = []
while not finish:
r, w, e = select.select([server], [], [], 0.1)
if server in r:
# Let the socket hang around rather than having
# it closed by garbage collection.
conns.append(server.accept()[0])
for sock in conns:
sock.close()
t = threading.Thread(target=serve)
t.start()
started.wait()
try:
try:
c = socket.socket(socket.AF_INET)
c.settimeout(0.2)
c.connect((host, port))
# Will attempt handshake and time out
self.assertRaisesRegex(TimeoutError, "timed out",
test_wrap_socket, c)
finally:
c.close()
try:
c = socket.socket(socket.AF_INET)
c = test_wrap_socket(c)
c.settimeout(0.2)
# Will attempt handshake and time out
self.assertRaisesRegex(TimeoutError, "timed out",
c.connect, (host, port))
finally:
c.close()
finally:
finish = True
t.join()
server.close()
def test_server_accept(self):
# Issue #16357: accept() on a SSLSocket created through
# SSLContext.wrap_socket().
client_ctx, server_ctx, hostname = testing_context()
server = socket.socket(socket.AF_INET)
host = "127.0.0.1"
port = socket_helper.bind_port(server)
server = server_ctx.wrap_socket(server, server_side=True)
self.assertTrue(server.server_side)
evt = threading.Event()
remote = None
peer = None
def serve():
nonlocal remote, peer
server.listen()
# Block on the accept and wait on the connection to close.
evt.set()
remote, peer = server.accept()
remote.send(remote.recv(4))
t = threading.Thread(target=serve)
t.start()
# Client wait until server setup and perform a connect.
evt.wait()
client = client_ctx.wrap_socket(
socket.socket(), server_hostname=hostname
)
client.connect((hostname, port))
client.send(b'data')
client.recv()
client_addr = client.getsockname()
client.close()
t.join()
remote.close()
server.close()
# Sanity checks.
self.assertIsInstance(remote, ssl.SSLSocket)
self.assertEqual(peer, client_addr)
def test_getpeercert_enotconn(self):
context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
context.check_hostname = False
with context.wrap_socket(socket.socket()) as sock:
with self.assertRaises(OSError) as cm:
sock.getpeercert()
self.assertEqual(cm.exception.errno, errno.ENOTCONN)
def test_do_handshake_enotconn(self):
context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
context.check_hostname = False
with context.wrap_socket(socket.socket()) as sock:
with self.assertRaises(OSError) as cm:
sock.do_handshake()
self.assertEqual(cm.exception.errno, errno.ENOTCONN)
def test_no_shared_ciphers(self):
client_context, server_context, hostname = testing_context()
# OpenSSL enables all TLS 1.3 ciphers, enforce TLS 1.2 for test
client_context.maximum_version = ssl.TLSVersion.TLSv1_2
# Force different suites on client and server
client_context.set_ciphers("AES128")
server_context.set_ciphers("AES256")
with ThreadedEchoServer(context=server_context) as server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
with self.assertRaises(OSError):
s.connect((HOST, server.port))
self.assertIn("NO_SHARED_CIPHER", server.conn_errors[0])
def test_version_basic(self):
"""
Basic tests for SSLSocket.version().
More tests are done in the test_protocol_*() methods.
"""
context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
context.check_hostname = False
context.verify_mode = ssl.CERT_NONE
with ThreadedEchoServer(CERTFILE,
ssl_version=ssl.PROTOCOL_TLS_SERVER,
chatty=False) as server:
with context.wrap_socket(socket.socket()) as s:
self.assertIs(s.version(), None)
self.assertIs(s._sslobj, None)
s.connect((HOST, server.port))
self.assertEqual(s.version(), 'TLSv1.3')
self.assertIs(s._sslobj, None)
self.assertIs(s.version(), None)
@requires_tls_version('TLSv1_3')
def test_tls1_3(self):
client_context, server_context, hostname = testing_context()
client_context.minimum_version = ssl.TLSVersion.TLSv1_3
with ThreadedEchoServer(context=server_context) as server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
self.assertIn(s.cipher()[0], {
'TLS_AES_256_GCM_SHA384',
'TLS_CHACHA20_POLY1305_SHA256',
'TLS_AES_128_GCM_SHA256',
})
self.assertEqual(s.version(), 'TLSv1.3')
@requires_tls_version('TLSv1_2')
@requires_tls_version('TLSv1')
@ignore_deprecation
def test_min_max_version_tlsv1_2(self):
client_context, server_context, hostname = testing_context()
# client TLSv1.0 to 1.2
client_context.minimum_version = ssl.TLSVersion.TLSv1
client_context.maximum_version = ssl.TLSVersion.TLSv1_2
# server only TLSv1.2
server_context.minimum_version = ssl.TLSVersion.TLSv1_2
server_context.maximum_version = ssl.TLSVersion.TLSv1_2
with ThreadedEchoServer(context=server_context) as server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
self.assertEqual(s.version(), 'TLSv1.2')
@requires_tls_version('TLSv1_1')
@ignore_deprecation
def test_min_max_version_tlsv1_1(self):
client_context, server_context, hostname = testing_context()
# client 1.0 to 1.2, server 1.0 to 1.1
client_context.minimum_version = ssl.TLSVersion.TLSv1
client_context.maximum_version = ssl.TLSVersion.TLSv1_2
server_context.minimum_version = ssl.TLSVersion.TLSv1
server_context.maximum_version = ssl.TLSVersion.TLSv1_1
seclevel_workaround(client_context, server_context)
with ThreadedEchoServer(context=server_context) as server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
self.assertEqual(s.version(), 'TLSv1.1')
@requires_tls_version('TLSv1_2')
@requires_tls_version('TLSv1')
@ignore_deprecation
def test_min_max_version_mismatch(self):
client_context, server_context, hostname = testing_context()
# client 1.0, server 1.2 (mismatch)
server_context.maximum_version = ssl.TLSVersion.TLSv1_2
server_context.minimum_version = ssl.TLSVersion.TLSv1_2
client_context.maximum_version = ssl.TLSVersion.TLSv1
client_context.minimum_version = ssl.TLSVersion.TLSv1
seclevel_workaround(client_context, server_context)
with ThreadedEchoServer(context=server_context) as server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
with self.assertRaises(ssl.SSLError) as e:
s.connect((HOST, server.port))
self.assertRegex(str(e.exception), "(alert|ALERT)")
@requires_tls_version('SSLv3')
def test_min_max_version_sslv3(self):
client_context, server_context, hostname = testing_context()
server_context.minimum_version = ssl.TLSVersion.SSLv3
client_context.minimum_version = ssl.TLSVersion.SSLv3
client_context.maximum_version = ssl.TLSVersion.SSLv3
seclevel_workaround(client_context, server_context)
with ThreadedEchoServer(context=server_context) as server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
self.assertEqual(s.version(), 'SSLv3')
def test_default_ecdh_curve(self):
# Issue #21015: elliptic curve-based Diffie Hellman key exchange
# should be enabled by default on SSL contexts.
client_context, server_context, hostname = testing_context()
# TLSv1.3 defaults to PFS key agreement and no longer has KEA in
# cipher name.
client_context.maximum_version = ssl.TLSVersion.TLSv1_2
# Prior to OpenSSL 1.0.0, ECDH ciphers have to be enabled
# explicitly using the 'ECCdraft' cipher alias. Otherwise,
# our default cipher list should prefer ECDH-based ciphers
# automatically.
with ThreadedEchoServer(context=server_context) as server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
self.assertIn("ECDH", s.cipher()[0])
@unittest.skipUnless("tls-unique" in ssl.CHANNEL_BINDING_TYPES,
"'tls-unique' channel binding not available")
def test_tls_unique_channel_binding(self):
"""Test tls-unique channel binding."""
if support.verbose:
sys.stdout.write("\n")
client_context, server_context, hostname = testing_context()
# tls-unique is not defined for TLSv1.3
# https://datatracker.ietf.org/doc/html/rfc8446#appendix-C.5
client_context.maximum_version = ssl.TLSVersion.TLSv1_2
server = ThreadedEchoServer(context=server_context,
chatty=True,
connectionchatty=False)
with server:
with client_context.wrap_socket(
socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
# get the data
cb_data = s.get_channel_binding("tls-unique")
if support.verbose:
sys.stdout.write(
" got channel binding data: {0!r}\n".format(cb_data))
# check if it is sane
self.assertIsNotNone(cb_data)
if s.version() == 'TLSv1.3':
self.assertEqual(len(cb_data), 48)
else:
self.assertEqual(len(cb_data), 12) # True for TLSv1
# and compare with the peers version
s.write(b"CB tls-unique\n")
peer_data_repr = s.read().strip()
self.assertEqual(peer_data_repr,
repr(cb_data).encode("us-ascii"))
# now, again
with client_context.wrap_socket(
socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
new_cb_data = s.get_channel_binding("tls-unique")
if support.verbose:
sys.stdout.write(
"got another channel binding data: {0!r}\n".format(
new_cb_data)
)
# is it really unique
self.assertNotEqual(cb_data, new_cb_data)
self.assertIsNotNone(cb_data)
if s.version() == 'TLSv1.3':
self.assertEqual(len(cb_data), 48)
else:
self.assertEqual(len(cb_data), 12) # True for TLSv1
s.write(b"CB tls-unique\n")
peer_data_repr = s.read().strip()
self.assertEqual(peer_data_repr,
repr(new_cb_data).encode("us-ascii"))
def test_compression(self):
client_context, server_context, hostname = testing_context()
stats = server_params_test(client_context, server_context,
chatty=True, connectionchatty=True,
sni_name=hostname)
if support.verbose:
sys.stdout.write(" got compression: {!r}\n".format(stats['compression']))
self.assertIn(stats['compression'], { None, 'ZLIB', 'RLE' })
@unittest.skipUnless(hasattr(ssl, 'OP_NO_COMPRESSION'),
"ssl.OP_NO_COMPRESSION needed for this test")
def test_compression_disabled(self):
client_context, server_context, hostname = testing_context()
client_context.options |= ssl.OP_NO_COMPRESSION
server_context.options |= ssl.OP_NO_COMPRESSION
stats = server_params_test(client_context, server_context,
chatty=True, connectionchatty=True,
sni_name=hostname)
self.assertIs(stats['compression'], None)
def test_legacy_server_connect(self):
client_context, server_context, hostname = testing_context()
client_context.options |= ssl.OP_LEGACY_SERVER_CONNECT
server_params_test(client_context, server_context,
chatty=True, connectionchatty=True,
sni_name=hostname)
def test_no_legacy_server_connect(self):
client_context, server_context, hostname = testing_context()
client_context.options &= ~ssl.OP_LEGACY_SERVER_CONNECT
server_params_test(client_context, server_context,
chatty=True, connectionchatty=True,
sni_name=hostname)
def test_dh_params(self):
# Check we can get a connection with ephemeral finite-field
# Diffie-Hellman (if supported).
client_context, server_context, hostname = testing_context()
dhe_aliases = {"ADH", "EDH", "DHE"}
if not (supports_kx_alias(client_context, dhe_aliases)
and supports_kx_alias(server_context, dhe_aliases)):
self.skipTest("libssl doesn't support ephemeral DH")
# test scenario needs TLS <= 1.2
client_context.maximum_version = ssl.TLSVersion.TLSv1_2
try:
server_context.load_dh_params(DHFILE)
except RuntimeError:
if Py_DEBUG_WIN32:
self.skipTest("not supported on Win32 debug build")
raise
server_context.set_ciphers("kEDH")
server_context.maximum_version = ssl.TLSVersion.TLSv1_2
stats = server_params_test(client_context, server_context,
chatty=True, connectionchatty=True,
sni_name=hostname)
cipher = stats["cipher"][0]
parts = cipher.split("-")
if not dhe_aliases.intersection(parts):
self.fail("Non-DH key exchange: " + cipher[0])
def test_ecdh_curve(self):
# server secp384r1, client auto
client_context, server_context, hostname = testing_context()
server_context.set_ecdh_curve("secp384r1")
server_context.set_ciphers("ECDHE:!eNULL:!aNULL")
server_context.minimum_version = ssl.TLSVersion.TLSv1_2
stats = server_params_test(client_context, server_context,
chatty=True, connectionchatty=True,
sni_name=hostname)
# server auto, client secp384r1
client_context, server_context, hostname = testing_context()
client_context.set_ecdh_curve("secp384r1")
server_context.set_ciphers("ECDHE:!eNULL:!aNULL")
server_context.minimum_version = ssl.TLSVersion.TLSv1_2
stats = server_params_test(client_context, server_context,
chatty=True, connectionchatty=True,
sni_name=hostname)
# server / client curve mismatch
client_context, server_context, hostname = testing_context()
client_context.set_ecdh_curve("prime256v1")
server_context.set_ecdh_curve("secp384r1")
server_context.set_ciphers("ECDHE:!eNULL:!aNULL")
server_context.minimum_version = ssl.TLSVersion.TLSv1_2
with self.assertRaises(ssl.SSLError):
server_params_test(client_context, server_context,
chatty=True, connectionchatty=True,
sni_name=hostname)
def test_selected_alpn_protocol(self):
# selected_alpn_protocol() is None unless ALPN is used.
client_context, server_context, hostname = testing_context()
stats = server_params_test(client_context, server_context,
chatty=True, connectionchatty=True,
sni_name=hostname)
self.assertIs(stats['client_alpn_protocol'], None)
def test_selected_alpn_protocol_if_server_uses_alpn(self):
# selected_alpn_protocol() is None unless ALPN is used by the client.
client_context, server_context, hostname = testing_context()
server_context.set_alpn_protocols(['foo', 'bar'])
stats = server_params_test(client_context, server_context,
chatty=True, connectionchatty=True,
sni_name=hostname)
self.assertIs(stats['client_alpn_protocol'], None)
def test_alpn_protocols(self):
server_protocols = ['foo', 'bar', 'milkshake']
protocol_tests = [
(['foo', 'bar'], 'foo'),
(['bar', 'foo'], 'foo'),
(['milkshake'], 'milkshake'),
(['http/3.0', 'http/4.0'], None)
]
for client_protocols, expected in protocol_tests:
client_context, server_context, hostname = testing_context()
server_context.set_alpn_protocols(server_protocols)
client_context.set_alpn_protocols(client_protocols)
try:
stats = server_params_test(client_context,
server_context,
chatty=True,
connectionchatty=True,
sni_name=hostname)
except ssl.SSLError as e:
stats = e
msg = "failed trying %s (s) and %s (c).\n" \
"was expecting %s, but got %%s from the %%s" \
% (str(server_protocols), str(client_protocols),
str(expected))
client_result = stats['client_alpn_protocol']
self.assertEqual(client_result, expected,
msg % (client_result, "client"))
server_result = stats['server_alpn_protocols'][-1] \
if len(stats['server_alpn_protocols']) else 'nothing'
self.assertEqual(server_result, expected,
msg % (server_result, "server"))
def test_npn_protocols(self):
assert not ssl.HAS_NPN
def sni_contexts(self):
server_context = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
server_context.load_cert_chain(SIGNED_CERTFILE)
other_context = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
other_context.load_cert_chain(SIGNED_CERTFILE2)
client_context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
client_context.load_verify_locations(SIGNING_CA)
return server_context, other_context, client_context
def check_common_name(self, stats, name):
cert = stats['peercert']
self.assertIn((('commonName', name),), cert['subject'])
def test_sni_callback(self):
calls = []
server_context, other_context, client_context = self.sni_contexts()
client_context.check_hostname = False
def servername_cb(ssl_sock, server_name, initial_context):
calls.append((server_name, initial_context))
if server_name is not None:
ssl_sock.context = other_context
server_context.set_servername_callback(servername_cb)
stats = server_params_test(client_context, server_context,
chatty=True,
sni_name='supermessage')
# The hostname was fetched properly, and the certificate was
# changed for the connection.
self.assertEqual(calls, [("supermessage", server_context)])
# CERTFILE4 was selected
self.check_common_name(stats, 'fakehostname')
calls = []
# The callback is called with server_name=None
stats = server_params_test(client_context, server_context,
chatty=True,
sni_name=None)
self.assertEqual(calls, [(None, server_context)])
self.check_common_name(stats, SIGNED_CERTFILE_HOSTNAME)
# Check disabling the callback
calls = []
server_context.set_servername_callback(None)
stats = server_params_test(client_context, server_context,
chatty=True,
sni_name='notfunny')
# Certificate didn't change
self.check_common_name(stats, SIGNED_CERTFILE_HOSTNAME)
self.assertEqual(calls, [])
def test_sni_callback_alert(self):
# Returning a TLS alert is reflected to the connecting client
server_context, other_context, client_context = self.sni_contexts()
def cb_returning_alert(ssl_sock, server_name, initial_context):
return ssl.ALERT_DESCRIPTION_ACCESS_DENIED
server_context.set_servername_callback(cb_returning_alert)
with self.assertRaises(ssl.SSLError) as cm:
stats = server_params_test(client_context, server_context,
chatty=False,
sni_name='supermessage')
self.assertEqual(cm.exception.reason, 'TLSV1_ALERT_ACCESS_DENIED')
def test_sni_callback_raising(self):
# Raising fails the connection with a TLS handshake failure alert.
server_context, other_context, client_context = self.sni_contexts()
def cb_raising(ssl_sock, server_name, initial_context):
1/0
server_context.set_servername_callback(cb_raising)
with support.catch_unraisable_exception() as catch:
with self.assertRaises(ssl.SSLError) as cm:
stats = server_params_test(client_context, server_context,
chatty=False,
sni_name='supermessage')
# Allow for flexible libssl error messages.
regex = "(SSLV3_ALERT_HANDSHAKE_FAILURE|NO_PRIVATE_VALUE)"
self.assertRegex(cm.exception.reason, regex)
self.assertEqual(catch.unraisable.exc_type, ZeroDivisionError)
def test_sni_callback_wrong_return_type(self):
# Returning the wrong return type terminates the TLS connection
# with an internal error alert.
server_context, other_context, client_context = self.sni_contexts()
def cb_wrong_return_type(ssl_sock, server_name, initial_context):
return "foo"
server_context.set_servername_callback(cb_wrong_return_type)
with support.catch_unraisable_exception() as catch:
with self.assertRaises(ssl.SSLError) as cm:
stats = server_params_test(client_context, server_context,
chatty=False,
sni_name='supermessage')
self.assertEqual(cm.exception.reason, 'TLSV1_ALERT_INTERNAL_ERROR')
self.assertEqual(catch.unraisable.exc_type, TypeError)
def test_shared_ciphers(self):
client_context, server_context, hostname = testing_context()
client_context.set_ciphers("AES128:AES256")
server_context.set_ciphers("AES256:eNULL")
expected_algs = [
"AES256", "AES-256",
# TLS 1.3 ciphers are always enabled
"TLS_CHACHA20", "TLS_AES",
]
stats = server_params_test(client_context, server_context,
sni_name=hostname)
ciphers = stats['server_shared_ciphers'][0]
self.assertGreater(len(ciphers), 0)
for name, tls_version, bits in ciphers:
if not any(alg in name for alg in expected_algs):
self.fail(name)
def test_read_write_after_close_raises_valuerror(self):
client_context, server_context, hostname = testing_context()
server = ThreadedEchoServer(context=server_context, chatty=False)
with server:
s = client_context.wrap_socket(socket.socket(),
server_hostname=hostname)
s.connect((HOST, server.port))
s.close()
self.assertRaises(ValueError, s.read, 1024)
self.assertRaises(ValueError, s.write, b'hello')
def test_sendfile(self):
TEST_DATA = b"x" * 512
with open(os_helper.TESTFN, 'wb') as f:
f.write(TEST_DATA)
self.addCleanup(os_helper.unlink, os_helper.TESTFN)
client_context, server_context, hostname = testing_context()
server = ThreadedEchoServer(context=server_context, chatty=False)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
with open(os_helper.TESTFN, 'rb') as file:
s.sendfile(file)
self.assertEqual(s.recv(1024), TEST_DATA)
def test_session(self):
client_context, server_context, hostname = testing_context()
# TODO: sessions aren't compatible with TLSv1.3 yet
client_context.maximum_version = ssl.TLSVersion.TLSv1_2
# first connection without session
stats = server_params_test(client_context, server_context,
sni_name=hostname)
session = stats['session']
self.assertTrue(session.id)
self.assertGreater(session.time, 0)
self.assertGreater(session.timeout, 0)
self.assertTrue(session.has_ticket)
self.assertGreater(session.ticket_lifetime_hint, 0)
self.assertFalse(stats['session_reused'])
sess_stat = server_context.session_stats()
self.assertEqual(sess_stat['accept'], 1)
self.assertEqual(sess_stat['hits'], 0)
# reuse session
stats = server_params_test(client_context, server_context,
session=session, sni_name=hostname)
sess_stat = server_context.session_stats()
self.assertEqual(sess_stat['accept'], 2)
self.assertEqual(sess_stat['hits'], 1)
self.assertTrue(stats['session_reused'])
session2 = stats['session']
self.assertEqual(session2.id, session.id)
self.assertEqual(session2, session)
self.assertIsNot(session2, session)
self.assertGreaterEqual(session2.time, session.time)
self.assertGreaterEqual(session2.timeout, session.timeout)
# another one without session
stats = server_params_test(client_context, server_context,
sni_name=hostname)
self.assertFalse(stats['session_reused'])
session3 = stats['session']
self.assertNotEqual(session3.id, session.id)
self.assertNotEqual(session3, session)
sess_stat = server_context.session_stats()
self.assertEqual(sess_stat['accept'], 3)
self.assertEqual(sess_stat['hits'], 1)
# reuse session again
stats = server_params_test(client_context, server_context,
session=session, sni_name=hostname)
self.assertTrue(stats['session_reused'])
session4 = stats['session']
self.assertEqual(session4.id, session.id)
self.assertEqual(session4, session)
self.assertGreaterEqual(session4.time, session.time)
self.assertGreaterEqual(session4.timeout, session.timeout)
sess_stat = server_context.session_stats()
self.assertEqual(sess_stat['accept'], 4)
self.assertEqual(sess_stat['hits'], 2)
def test_session_handling(self):
client_context, server_context, hostname = testing_context()
client_context2, _, _ = testing_context()
# TODO: session reuse does not work with TLSv1.3
client_context.maximum_version = ssl.TLSVersion.TLSv1_2
client_context2.maximum_version = ssl.TLSVersion.TLSv1_2
server = ThreadedEchoServer(context=server_context, chatty=False)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
# session is None before handshake
self.assertEqual(s.session, None)
self.assertEqual(s.session_reused, None)
s.connect((HOST, server.port))
session = s.session
self.assertTrue(session)
with self.assertRaises(TypeError) as e:
s.session = object
self.assertEqual(str(e.exception), 'Value is not a SSLSession.')
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
# cannot set session after handshake
with self.assertRaises(ValueError) as e:
s.session = session
self.assertEqual(str(e.exception),
'Cannot set session after handshake.')
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
# can set session before handshake and before the
# connection was established
s.session = session
s.connect((HOST, server.port))
self.assertEqual(s.session.id, session.id)
self.assertEqual(s.session, session)
self.assertEqual(s.session_reused, True)
with client_context2.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
# cannot re-use session with a different SSLContext
with self.assertRaises(ValueError) as e:
s.session = session
s.connect((HOST, server.port))
self.assertEqual(str(e.exception),
'Session refers to a different SSLContext.')
@requires_tls_version('TLSv1_2')
@unittest.skipUnless(ssl.HAS_PSK, 'TLS-PSK disabled on this OpenSSL build')
def test_psk(self):
psk = bytes.fromhex('deadbeef')
client_context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
client_context.check_hostname = False
client_context.verify_mode = ssl.CERT_NONE
client_context.maximum_version = ssl.TLSVersion.TLSv1_2
client_context.set_ciphers('PSK')
client_context.set_psk_client_callback(lambda hint: (None, psk))
server_context = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
server_context.maximum_version = ssl.TLSVersion.TLSv1_2
server_context.set_ciphers('PSK')
server_context.set_psk_server_callback(lambda identity: psk)
# correct PSK should connect
server = ThreadedEchoServer(context=server_context)
with server:
with client_context.wrap_socket(socket.socket()) as s:
s.connect((HOST, server.port))
# incorrect PSK should fail
incorrect_psk = bytes.fromhex('cafebabe')
client_context.set_psk_client_callback(lambda hint: (None, incorrect_psk))
server = ThreadedEchoServer(context=server_context)
with server:
with client_context.wrap_socket(socket.socket()) as s:
with self.assertRaises(ssl.SSLError):
s.connect((HOST, server.port))
# identity_hint and client_identity should be sent to the other side
identity_hint = 'identity-hint'
client_identity = 'client-identity'
def client_callback(hint):
self.assertEqual(hint, identity_hint)
return client_identity, psk
def server_callback(identity):
self.assertEqual(identity, client_identity)
return psk
client_context.set_psk_client_callback(client_callback)
server_context.set_psk_server_callback(server_callback, identity_hint)
server = ThreadedEchoServer(context=server_context)
with server:
with client_context.wrap_socket(socket.socket()) as s:
s.connect((HOST, server.port))
# adding client callback to server or vice versa raises an exception
with self.assertRaisesRegex(ssl.SSLError, 'Cannot add PSK server callback'):
client_context.set_psk_server_callback(server_callback, identity_hint)
with self.assertRaisesRegex(ssl.SSLError, 'Cannot add PSK client callback'):
server_context.set_psk_client_callback(client_callback)
# test with UTF-8 identities
identity_hint = '身份暗示' # Translation: "Identity hint"
client_identity = '客户身份' # Translation: "Customer identity"
client_context.set_psk_client_callback(client_callback)
server_context.set_psk_server_callback(server_callback, identity_hint)
server = ThreadedEchoServer(context=server_context)
with server:
with client_context.wrap_socket(socket.socket()) as s:
s.connect((HOST, server.port))
@requires_tls_version('TLSv1_3')
@unittest.skipUnless(ssl.HAS_PSK, 'TLS-PSK disabled on this OpenSSL build')
def test_psk_tls1_3(self):
psk = bytes.fromhex('deadbeef')
identity_hint = 'identity-hint'
client_identity = 'client-identity'
def client_callback(hint):
# identity_hint is not sent to the client in TLS 1.3
self.assertIsNone(hint)
return client_identity, psk
def server_callback(identity):
self.assertEqual(identity, client_identity)
return psk
client_context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
client_context.check_hostname = False
client_context.verify_mode = ssl.CERT_NONE
client_context.minimum_version = ssl.TLSVersion.TLSv1_3
client_context.set_ciphers('PSK')
client_context.set_psk_client_callback(client_callback)
server_context = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
server_context.minimum_version = ssl.TLSVersion.TLSv1_3
server_context.set_ciphers('PSK')
server_context.set_psk_server_callback(server_callback, identity_hint)
server = ThreadedEchoServer(context=server_context)
with server:
with client_context.wrap_socket(socket.socket()) as s:
s.connect((HOST, server.port))
@unittest.skipUnless(has_tls_version('TLSv1_3') and ssl.HAS_PHA,
"Test needs TLS 1.3 PHA")
| ThreadedTests |
python | great-expectations__great_expectations | great_expectations/exceptions/resource_freshness.py | {
"start": 3372,
"end": 3720
} | class ____(ResourceFreshnessError):
def __init__(self, name: str) -> None:
super().__init__(
f"Checkpoint '{name}' must be added to the DataContext before it can be updated. "
"Please call `context.checkpoints.add(<CHECKPOINT_OBJECT>)`, "
"then try your action again."
)
| CheckpointNotAddedError |
python | fluentpython__example-code-2e | 05-data-classes/dataclass/resource.py | {
"start": 1280,
"end": 1787
} | class ____:
"""Media resource description."""
identifier: str # <2>
title: str = '<untitled>' # <3>
creators: list[str] = field(default_factory=list)
date: Optional[date] = None # <4>
type: ResourceType = ResourceType.BOOK # <5>
description: str = ''
language: str = ''
subjects: list[str] = field(default_factory=list)
# end::DATACLASS[]
from typing import TypedDict
| Resource |
python | Textualize__textual | examples/theme_sandbox.py | {
"start": 2169,
"end": 2206
} | class ____(Label):
pass
| ColorSample |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-microsoft-dataverse/source_microsoft_dataverse/dataverse.py | {
"start": 254,
"end": 733
} | class ____(Oauth2Authenticator):
def build_refresh_request_body(self) -> Mapping[str, Any]:
"""
Returns the request body to set on the refresh request
"""
payload: MutableMapping[str, Any] = {
"grant_type": "client_credentials",
"client_id": self.get_client_id(),
"client_secret": self.get_client_secret(),
"scope": self.get_scopes(),
}
return payload
| MicrosoftOauth2Authenticator |
python | huggingface__transformers | src/transformers/models/xcodec/modeling_xcodec.py | {
"start": 9075,
"end": 9868
} | class ____(nn.Module):
"""
Vector quantization implementation. Currently supports only euclidean distance.
"""
def __init__(self, config: XcodecConfig):
super().__init__()
self.codebook = XcodecEuclideanCodebook(config)
# Copied from transformers.models.encodec.modeling_encodec.EncodecVectorQuantization.encode
def encode(self, hidden_states):
hidden_states = hidden_states.permute(0, 2, 1)
embed_in = self.codebook.encode(hidden_states)
return embed_in
# Copied from transformers.models.encodec.modeling_encodec.EncodecVectorQuantization.decode
def decode(self, embed_ind):
quantize = self.codebook.decode(embed_ind)
quantize = quantize.permute(0, 2, 1)
return quantize
| XcodecVectorQuantization |
python | huggingface__transformers | tests/models/qwen2_audio/test_processing_qwen2_audio.py | {
"start": 887,
"end": 4395
} | class ____(ProcessorTesterMixin, unittest.TestCase):
processor_class = Qwen2AudioProcessor
model_id = "Qwen/Qwen2-Audio-7B-Instruct"
@classmethod
def _setup_test_attributes(cls, processor):
cls.audio_token = processor.audio_token
def test_can_load_various_tokenizers(self):
processor = Qwen2AudioProcessor.from_pretrained(self.model_id)
tokenizer = AutoTokenizer.from_pretrained(self.model_id)
self.assertEqual(processor.tokenizer.__class__, tokenizer.__class__)
def test_tokenizer_integration(self):
slow_tokenizer = AutoTokenizer.from_pretrained(self.model_id, use_fast=False)
fast_tokenizer = AutoTokenizer.from_pretrained(self.model_id, from_slow=True, legacy=False)
prompt = "<|im_start|>system\nAnswer the questions.<|im_end|><|im_start|>user\n<|audio_bos|><|AUDIO|><|audio_eos|>\nWhat is it in this audio?<|im_end|><|im_start|>assistant\n"
EXPECTED_OUTPUT = [
"<|im_start|>",
"system",
"Ċ",
"Answer",
"Ġthe",
"Ġquestions",
".",
"<|im_end|>",
"<|im_start|>",
"user",
"Ċ",
"<|audio_bos|>",
"<|AUDIO|>",
"<|audio_eos|>",
"Ċ",
"What",
"Ġis",
"Ġit",
"Ġin",
"Ġthis",
"Ġaudio",
"?",
"<|im_end|>",
"<|im_start|>",
"assistant",
"Ċ",
]
self.assertEqual(slow_tokenizer.tokenize(prompt), EXPECTED_OUTPUT)
self.assertEqual(fast_tokenizer.tokenize(prompt), EXPECTED_OUTPUT)
def test_chat_template(self):
processor = AutoProcessor.from_pretrained(self.model_id)
expected_prompt = "<|im_start|>system\nYou are a helpful assistant.<|im_end|>\n<|im_start|>user\nAudio 1: <|audio_bos|><|AUDIO|><|audio_eos|>\nWhat's that sound?<|im_end|>\n<|im_start|>assistant\nIt is the sound of glass shattering.<|im_end|>\n<|im_start|>user\nAudio 2: <|audio_bos|><|AUDIO|><|audio_eos|>\nHow about this one?<|im_end|>\n<|im_start|>assistant\n"
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{
"role": "user",
"content": [
{
"type": "audio",
"audio_url": url_to_local_path(
"https://huggingface.co/datasets/raushan-testing-hf/audio-test/resolve/main/glass-breaking-151256.mp3"
),
},
{"type": "text", "text": "What's that sound?"},
],
},
{"role": "assistant", "content": "It is the sound of glass shattering."},
{
"role": "user",
"content": [
{
"type": "audio",
"audio_url": url_to_local_path(
"https://huggingface.co/datasets/raushan-testing-hf/audio-test/resolve/main/f2641_0_throatclearing.wav"
),
},
{"type": "text", "text": "How about this one?"},
],
},
]
formatted_prompt = processor.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
self.assertEqual(expected_prompt, formatted_prompt)
| Qwen2AudioProcessorTest |
python | ray-project__ray | python/ray/serve/tests/test_config_files/fastapi_deployment.py | {
"start": 127,
"end": 259
} | class ____:
@app.get("/hello")
def incr(self):
return "Hello world!"
node = FastAPIDeployment.bind()
| FastAPIDeployment |
python | ansible__ansible | test/units/module_utils/basic/test_run_command.py | {
"start": 345,
"end": 1054
} | class ____(BytesIO):
"""BytesIO with dummy close() method
So that you can inspect the content after close() was called.
"""
def close(self):
pass
@pytest.fixture
def mock_os(mocker):
def mock_os_abspath(path):
if path.startswith('/'):
return path
else:
return os.getcwd.return_value + '/' + path
os = mocker.patch('ansible.module_utils.basic.os')
os.path.expandvars.side_effect = lambda x: x
os.path.expanduser.side_effect = lambda x: x
os.environ = {'PATH': '/bin'}
os.getcwd.return_value = '/home/foo'
os.path.isdir.return_value = True
os.path.abspath.side_effect = mock_os_abspath
yield os
| OpenBytesIO |
python | dagster-io__dagster | examples/docs_snippets/docs_snippets_tests/snippet_checks/guides/components/integrations/test_powerbi_utils.py | {
"start": 6256,
"end": 7333
} | class ____(PowerBIWorkspaceComponent):
@cached_property
def workspace_resource(self) -> MockPowerBIWorkspace:
return MockPowerBIWorkspace(**self.workspace.model_dump())
def test_mock_powerbi_workspace() -> None:
"""Test that the mock PowerBI workspace returns the expected data."""
workspace = MockPowerBIWorkspace(
credentials=PowerBIToken(api_token="test_token"),
workspace_id="test_workspace",
)
workspace_data = workspace.fetch_powerbi_workspace_data(use_workspace_scan=True)
# Verify we have the expected content
assert len(workspace_data.dashboards_by_id) == 2
assert len(workspace_data.reports_by_id) == 2
assert len(workspace_data.semantic_models_by_id) == 2
assert len(workspace_data.data_sources_by_id) == 2
# Verify specific content
assert "dashboard_1" in workspace_data.dashboards_by_id
assert "report_1" in workspace_data.reports_by_id
assert "dataset_1" in workspace_data.semantic_models_by_id
assert "datasource_1" in workspace_data.data_sources_by_id
| MockPowerBIComponent |
python | ray-project__ray | python/ray/data/iterator.py | {
"start": 1292,
"end": 1751
} | class ____(Iterable[T]):
def __init__(self, iterator_gen: Callable[[], Iterator[T]]):
"""Constructs an Iterable from an iterator generator.
Args:
iterator_gen: A function that returns an iterator each time it
is called. For example, this can be a generator function.
"""
self.iterator_gen = iterator_gen
def __iter__(self):
return self.iterator_gen()
@PublicAPI
| _IterableFromIterator |
python | django-import-export__django-import-export | import_export/forms.py | {
"start": 2314,
"end": 3257
} | class ____(ImportExportFormBase):
import_file = forms.FileField(label=_("File to import"))
# field ordered for usability:
# ensure that the 'file' select appears before 'format'
# so that the 'guess_format' js logic makes sense
field_order = ["resource", "import_file", "format"]
def __init__(self, formats, resources, **kwargs):
super().__init__(formats, resources, **kwargs)
if len(formats) > 1:
self.fields["import_file"].widget.attrs["class"] = "guess_format"
self.fields["format"].widget.attrs["class"] = "guess_format"
@property
def media(self):
media = super().media
extra = "" if settings.DEBUG else ".min"
return media + forms.Media(
js=(
f"admin/js/vendor/jquery/jquery{extra}.js",
"admin/js/jquery.init.js",
"import_export/guess_format.js",
)
)
| ImportForm |
python | pypa__pip | src/pip/_vendor/resolvelib/structs.py | {
"start": 5517,
"end": 6420
} | class ____(Iterable[RT]):
"""Wrap an iterable returned by find_matches().
This is essentially just a proxy to the underlying sequence that provides
the same interface as `_FactoryIterableView`.
"""
def __init__(self, sequence: Sequence[RT]):
self._sequence = sequence
def __repr__(self) -> str:
return f"{type(self).__name__}({self._sequence})"
def __bool__(self) -> bool:
return bool(self._sequence)
def __iter__(self) -> Iterator[RT]:
return iter(self._sequence)
def build_iter_view(matches: Matches[CT]) -> Iterable[CT]:
"""Build an iterable view from the value returned by `find_matches()`."""
if callable(matches):
return _FactoryIterableView(matches)
if not isinstance(matches, Sequence):
matches = list(matches)
return _SequenceIterableView(matches)
IterableView = Iterable
| _SequenceIterableView |
python | pennersr__django-allauth | allauth/headless/base/views.py | {
"start": 1005,
"end": 1587
} | class ____(APIView):
stage_class: Optional[Type[LoginStage]] = None
def handle(self, request, *args, **kwargs):
self.stage = LoginStageController.enter(request, self.stage_class.key)
if not self.stage:
return response.UnauthorizedResponse(request)
return super().handle(request, *args, **kwargs)
def respond_stage_error(self):
return response.UnauthorizedResponse(self.request)
def respond_next_stage(self):
self.stage.exit()
return response.AuthenticationResponse(self.request)
| AuthenticationStageAPIView |
python | PrefectHQ__prefect | src/integrations/prefect-databricks/prefect_databricks/models/jobs.py | {
"start": 139298,
"end": 139861
} | class ____(BaseModel):
"""
See source code for the fields' description.
"""
model_config = ConfigDict(extra="allow", frozen=True)
alert_output: Optional[SqlAlertOutput] = Field(
None, description="The output of a SQL alert task, if available."
)
dashboard_output: Optional[SqlDashboardOutput] = Field(
None, description="The output of a SQL dashboard task, if available."
)
query_output: Optional[SqlQueryOutput] = Field(
None, description="The output of a SQL query task, if available."
)
| SqlOutput |
python | PyCQA__pylint | tests/functional/n/non/non_iterator_returned.py | {
"start": 197,
"end": 337
} | class ____:
""" yields in iterator. """
def __iter__(self):
for index in range(10):
yield index
| FirstGoodIterator |
python | pytorch__pytorch | torch/_inductor/ir.py | {
"start": 312735,
"end": 319073
} | class ____(ExternKernel):
predicate: Optional[IRNode] = None
operands: Optional[Sequence[IRNode]] = None
true_subgraph: Optional[Subgraph] = None
false_subgraph: Optional[Subgraph] = None
outputs: Optional[Sequence[MultiOutput]] = None
def __init__(
self,
predicate: IRNode,
operands: Sequence[IRNode],
true_subgraph: Subgraph,
false_subgraph: Subgraph,
layout: MultiOutputLayout,
unbacked_bindings: Optional[dict[sympy.Symbol, pytree.KeyPath]],
) -> None:
self.predicate = predicate
self.operands = operands
self.true_subgraph = true_subgraph
self.false_subgraph = false_subgraph
sym_args, tensor_args = _split_by_sym_type([predicate, *operands])
super().__init__(
name=None,
layout=layout,
inputs=tensor_args,
constant_args=sym_args,
)
if unbacked_bindings is not None:
self.unbacked_bindings = unbacked_bindings
self.name = V.graph.register_buffer(self)
V.graph.register_operation(self)
@staticmethod
def _maybe_expr(s: Union[int, torch.SymInt]) -> Union[int, sympy.Expr]:
if isinstance(s, int):
return s
return s.node.expr
@classmethod
def create(
cls,
predicate: TensorBox,
true_fn: Subgraph,
false_fn: Subgraph,
operands: list[Union[TensorBox, ShapeAsConstantBuffer]],
) -> Sequence[IRNode]:
"""Create a Sequence of IRNodes from a conditional statement (see .lowering.cond)"""
# pyrefly: ignore [bad-assignment]
predicate = cls.realize_input(predicate)
# pyrefly: ignore [bad-assignment]
operands = [cls.realize_input(x) for x in operands]
fx_operands: Argument = V.graph.current_node.args[-1]
assert isinstance(fx_operands, Sequence), type(fx_operands)
assert all(isinstance(n, Node) for n in fx_operands)
fake_operands = [cast(Node, x).meta["val"] for x in fx_operands]
for subgraph in (true_fn, false_fn):
if subgraph.graph is None:
# create and lower subgraphs
subgraph.graph = V.graph.make_subgraph(
gm=subgraph.graph_module,
example_inputs=fake_operands,
subgraph_name=subgraph.name,
)
with V.set_graph_handler(subgraph.graph):
subgraph.graph.run(*fake_operands)
assert true_fn.graph is not None
assert false_fn.graph is not None
true_outputs = true_fn.graph.graph_outputs
false_outputs = false_fn.graph.graph_outputs
for name, outputs in (("true_fn", true_outputs), ("false_fn", false_outputs)):
if _has_aliased_buffers(true_outputs):
raise AssertionError(
"Output aliasing is currently not supported in compiled torch.cond. "
f"The outputs of the {name} subgraph of torch.cond are aliased: {outputs}"
)
# make sure true and false outputs are structurally equivalent
assert len(true_outputs) == len(false_outputs), (true_outputs, false_outputs)
for i, (t_o, f_o) in enumerate(zip(true_outputs, false_outputs)):
assert t_o.get_device() == f_o.get_device(), (i, t_o, f_o)
assert t_o.get_dtype() == f_o.get_dtype(), (i, t_o, f_o)
assert t_o.get_layout().offset == f_o.get_layout().offset, (i, t_o, f_o)
device = next(
o.get_device()
for o in [predicate] + operands
if not isinstance(o, ShapeAsConstantBuffer)
)
unbacked_bindings = resolve_unbacked_bindings(
V.graph.sizevars.shape_env,
V.graph.current_node.meta.get("unbacked_bindings", None),
)
assert device is not None, "cannot determine device"
conditional = Conditional(
predicate=predicate,
operands=operands,
true_subgraph=true_fn,
false_subgraph=false_fn,
layout=MultiOutputLayout(device=device),
unbacked_bindings=unbacked_bindings,
)
outputs = [
MultiOutput(
FixedLayout(
device=output.get_device()
if output.get_device() is not None
else device, # type: ignore[arg-type]
dtype=output.get_dtype(),
size=[Conditional._maybe_expr(sz) for sz in merged_output.size()],
stride=[
Conditional._maybe_expr(sz) for sz in merged_output.stride()
],
offset=output.get_layout().offset,
is_pinned=output.get_layout().is_pinned,
),
conditional,
[(list, i)],
)
# as the true and false outputs are equivalent,
# we can use either of them here as a "template"
for i, (output, merged_output) in enumerate(
zip(true_outputs, V.graph.current_node.meta["val"])
)
]
conditional.outputs = outputs # type: ignore[assignment]
return outputs
def codegen(self, wrapper: PythonWrapperCodegen) -> None:
wrapper.codegen_conditional(self)
wrapper.codegen_unbacked_symbol_defs_for_outputs(
self.get_name(), self.outputs, getattr(self, "unbacked_bindings", {})
)
def get_unbacked_symbol_defs(self) -> OrderedSet[sympy.Symbol]:
if unbacked_bindings := getattr(self, "unbacked_bindings", None):
resolved = resolve_unbacked_bindings(
V.graph.sizevars.shape_env, unbacked_bindings
)
assert resolved is not None
return OrderedSet(resolved.keys())
else:
return OrderedSet()
def _split_by_sym_type(
args: list[Any],
) -> tuple[list[ShapeAsConstantBuffer], list[Any]]:
non_sym_args = []
sym_args = []
for arg in args:
if isinstance(arg, ShapeAsConstantBuffer):
sym_args.append(arg.expr)
else:
non_sym_args.append(arg)
return sym_args, non_sym_args
@ir_dataclass(frozen=False)
| Conditional |
python | tensorflow__tensorflow | tensorflow/core/function/trace_type/custom_nest_trace_type_test.py | {
"start": 991,
"end": 1655
} | class ____(trace.TraceType):
def __init__(self, obj):
self._object = obj
def is_subtype_of(self, other):
return self._object == 2 and other._object == 3
def most_specific_common_supertype(self, others):
if not others:
return self
if self._object == 2 and isinstance(others[0]._object, int):
return MockSupertypes2With3(3)
else:
return None
def placeholder_value(self, placeholder_context=None):
raise NotImplementedError
def __eq__(self, other) -> bool:
return isinstance(other, type(self)) and self._object == other._object
def __hash__(self) -> int:
return self._object_hash
| MockSupertypes2With3 |
python | agronholm__apscheduler | src/apscheduler/_exceptions.py | {
"start": 1230,
"end": 1389
} | class ____(Exception):
"""
Raised by :meth:`~Scheduler.get_job_result` if the job failed to
start within the allotted time.
"""
| JobDeadlineMissed |
python | python-excel__xlwt | xlwt/Formatting.py | {
"start": 1680,
"end": 4865
} | class ____(object):
ESCAPEMENT_NONE = 0x00
ESCAPEMENT_SUPERSCRIPT = 0x01
ESCAPEMENT_SUBSCRIPT = 0x02
UNDERLINE_NONE = 0x00
UNDERLINE_SINGLE = 0x01
UNDERLINE_SINGLE_ACC = 0x21
UNDERLINE_DOUBLE = 0x02
UNDERLINE_DOUBLE_ACC = 0x22
FAMILY_NONE = 0x00
FAMILY_ROMAN = 0x01
FAMILY_SWISS = 0x02
FAMILY_MODERN = 0x03
FAMILY_SCRIPT = 0x04
FAMILY_DECORATIVE = 0x05
CHARSET_ANSI_LATIN = 0x00
CHARSET_SYS_DEFAULT = 0x01
CHARSET_SYMBOL = 0x02
CHARSET_APPLE_ROMAN = 0x4D
CHARSET_ANSI_JAP_SHIFT_JIS = 0x80
CHARSET_ANSI_KOR_HANGUL = 0x81
CHARSET_ANSI_KOR_JOHAB = 0x82
CHARSET_ANSI_CHINESE_GBK = 0x86
CHARSET_ANSI_CHINESE_BIG5 = 0x88
CHARSET_ANSI_GREEK = 0xA1
CHARSET_ANSI_TURKISH = 0xA2
CHARSET_ANSI_VIETNAMESE = 0xA3
CHARSET_ANSI_HEBREW = 0xB1
CHARSET_ANSI_ARABIC = 0xB2
CHARSET_ANSI_BALTIC = 0xBA
CHARSET_ANSI_CYRILLIC = 0xCC
CHARSET_ANSI_THAI = 0xDE
CHARSET_ANSI_LATIN_II = 0xEE
CHARSET_OEM_LATIN_I = 0xFF
def __init__(self):
# twip = 1/20 of a point = 1/1440 of a inch
# usually resolution == 96 pixels per 1 inch
# (rarely 120 pixels per 1 inch or another one)
self.height = 0x00C8 # 200: this is font with height 10 points
self.italic = False
self.struck_out = False
self.outline = False
self.shadow = False
self.colour_index = 0x7FFF
self.bold = False
self._weight = 0x0190 # 0x02BC gives bold font
self.escapement = self.ESCAPEMENT_NONE
self.underline = self.UNDERLINE_NONE
self.family = self.FAMILY_NONE
self.charset = self.CHARSET_SYS_DEFAULT
self.name = 'Arial'
def get_biff_record(self):
height = self.height
options = 0x00
if self.bold:
options |= 0x01
self._weight = 0x02BC
if self.italic:
options |= 0x02
if self.underline != self.UNDERLINE_NONE:
options |= 0x04
if self.struck_out:
options |= 0x08
if self.outline:
options |= 0x010
if self.shadow:
options |= 0x020
colour_index = self.colour_index
weight = self._weight
escapement = self.escapement
underline = self.underline
family = self.family
charset = self.charset
name = self.name
return BIFFRecords.FontRecord(height, options, colour_index, weight, escapement,
underline, family, charset,
name)
def _search_key(self):
return (
self.height,
self.italic,
self.struck_out,
self.outline,
self.shadow,
self.colour_index,
self.bold,
self._weight,
self.escapement,
self.underline,
self.family,
self.charset,
self.name,
)
| Font |
python | etianen__django-reversion | tests/test_app/tests/test_models.py | {
"start": 5993,
"end": 6857
} | class ____(TestModelMixin, TestBase):
databases = {"default", "mysql", "postgres"}
def testGetForObjectReferenceModelDb(self):
with reversion.create_revision():
obj = TestModel.objects.db_manager("postgres").create()
self.assertEqual(Version.objects.get_for_object_reference(TestModel, obj.pk).count(), 0)
self.assertEqual(Version.objects.get_for_object_reference(TestModel, obj.pk, model_db="postgres").count(), 1)
def testGetForObjectReferenceModelDbMySql(self):
with reversion.create_revision():
obj = TestModel.objects.db_manager("mysql").create()
self.assertEqual(Version.objects.get_for_object_reference(TestModel, obj.pk).count(), 0)
self.assertEqual(Version.objects.get_for_object_reference(TestModel, obj.pk, model_db="mysql").count(), 1)
| GetForObjectReferenceModelDbTest |
python | django__django | tests/model_formsets/models.py | {
"start": 6161,
"end": 6327
} | class ____(models.Model):
name = models.CharField(max_length=255, primary_key=True)
parent = models.ForeignKey(UUIDPKParent, models.CASCADE)
| ChildWithEditablePK |
python | ray-project__ray | python/ray/llm/tests/serve/cpu/deployments/test_prefix_aware_request_router.py | {
"start": 5544,
"end": 8970
} | class ____:
"""Tests that exercise actual prefix-aware request routing logic."""
@pytest.mark.asyncio
async def test_high_match_rate_selects_matching_replica(
self, prefix_request_router
):
"""High match rate → use matched replica instead of Pow2."""
r1 = FakeRunningReplica("r1")
r1.set_queue_len_response(0)
r2 = FakeRunningReplica("r2")
r2.set_queue_len_response(0)
prefix_request_router.update_replicas([r1, r2])
ray.get(
prefix_request_router._tree_actor.insert.remote(
"Hello", r2.replica_id.to_full_id_str(), time.time()
)
)
# Verify prefix match and smallest tenants
matched_text, matched_tenants = ray.get(
prefix_request_router._tree_actor.prefix_match.remote("Hello world")
)
assert matched_text == "Hello"
assert matched_tenants == [r2.replica_id.to_full_id_str()]
tenant_counts = ray.get(
prefix_request_router._tree_actor.getattr.remote("tenant_to_char_count")
)
assert tenant_counts[r1.replica_id.to_full_id_str()] == 0
assert tenant_counts[r2.replica_id.to_full_id_str()] == 5
prompt_req = fake_pending_request(prompt="Hello world")
for _ in range(10):
chosen = await prefix_request_router._choose_replica_for_request(prompt_req)
assert chosen == r2
chat_req = fake_pending_request(
messages=[{"content": "Hello"}, {"content": " world"}]
)
for _ in range(10):
chosen = await prefix_request_router._choose_replica_for_request(chat_req)
assert chosen == r2
@pytest.mark.asyncio
async def test_low_match_rate_uses_smallest_tree(self, prefix_request_router):
"""Low match rate → use replica with least total inserted characters."""
r1 = FakeRunningReplica("r1")
r1.set_queue_len_response(0)
r2 = FakeRunningReplica("r2")
r2.set_queue_len_response(0)
prefix_request_router.update_replicas([r1, r2])
# Make r2 "bigger" tenant
ray.get(
prefix_request_router._tree_actor.insert.remote(
"hi", r1.replica_id.to_full_id_str(), time.time()
)
)
ray.get(
prefix_request_router._tree_actor.insert.remote(
"longtext", r2.replica_id.to_full_id_str(), time.time()
)
)
# Verify tenant character counts
tenant_counts = ray.get(
prefix_request_router._tree_actor.getattr.remote("tenant_to_char_count")
)
assert tenant_counts[r1.replica_id.to_full_id_str()] == 2 # "hi"
assert tenant_counts[r2.replica_id.to_full_id_str()] == 8 # "longtext"
prompt_req = fake_pending_request(prompt="z")
for _ in range(10):
# Both tenants have 0% match rate, so the smaller tenant (r1) is chosen
assert (
await prefix_request_router._choose_replica_for_request(prompt_req)
== r1
)
chat_req = fake_pending_request(messages=[{"content": "z"}])
for _ in range(10):
# Both tenants have 0% match rate, so the smaller tenant (r1) is chosen
assert (
await prefix_request_router._choose_replica_for_request(chat_req) == r1
)
| TestPrefixAwareLogic |
python | docker__docker-py | docker/api/daemon.py | {
"start": 77,
"end": 6008
} | class ____:
@utils.minimum_version('1.25')
def df(self):
"""
Get data usage information.
Returns:
(dict): A dictionary representing different resource categories
and their respective data usage.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
url = self._url('/system/df')
return self._result(self._get(url), True)
def events(self, since=None, until=None, filters=None, decode=None):
"""
Get real-time events from the server. Similar to the ``docker events``
command.
Args:
since (UTC datetime or int): Get events from this point
until (UTC datetime or int): Get events until this point
filters (dict): Filter the events by event time, container or image
decode (bool): If set to true, stream will be decoded into dicts on
the fly. False by default.
Returns:
A :py:class:`docker.types.daemon.CancellableStream` generator
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
Example:
>>> for event in client.events(decode=True)
... print(event)
{u'from': u'image/with:tag',
u'id': u'container-id',
u'status': u'start',
u'time': 1423339459}
...
or
>>> events = client.events()
>>> for event in events:
... print(event)
>>> # and cancel from another thread
>>> events.close()
"""
if isinstance(since, datetime):
since = utils.datetime_to_timestamp(since)
if isinstance(until, datetime):
until = utils.datetime_to_timestamp(until)
if filters:
filters = utils.convert_filters(filters)
params = {
'since': since,
'until': until,
'filters': filters
}
url = self._url('/events')
response = self._get(url, params=params, stream=True, timeout=None)
stream = self._stream_helper(response, decode=decode)
return types.CancellableStream(stream, response)
def info(self):
"""
Display system-wide information. Identical to the ``docker info``
command.
Returns:
(dict): The info as a dict
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
return self._result(self._get(self._url("/info")), True)
def login(self, username, password=None, email=None, registry=None,
reauth=False, dockercfg_path=None):
"""
Authenticate with a registry. Similar to the ``docker login`` command.
Args:
username (str): The registry username
password (str): The plaintext password
email (str): The email for the registry account
registry (str): URL to the registry. E.g.
``https://index.docker.io/v1/``
reauth (bool): Whether or not to refresh existing authentication on
the Docker server.
dockercfg_path (str): Use a custom path for the Docker config file
(default ``$HOME/.docker/config.json`` if present,
otherwise ``$HOME/.dockercfg``)
Returns:
(dict): The response from the login request
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
# If we don't have any auth data so far, try reloading the config file
# one more time in case anything showed up in there.
# If dockercfg_path is passed check to see if the config file exists,
# if so load that config.
if dockercfg_path and os.path.exists(dockercfg_path):
self._auth_configs = auth.load_config(
dockercfg_path, credstore_env=self.credstore_env
)
elif not self._auth_configs or self._auth_configs.is_empty:
self._auth_configs = auth.load_config(
credstore_env=self.credstore_env
)
authcfg = self._auth_configs.resolve_authconfig(registry)
# If we found an existing auth config for this registry and username
# combination, we can return it immediately unless reauth is requested.
if authcfg and authcfg.get('username', None) == username \
and not reauth:
return authcfg
req_data = {
'username': username,
'password': password,
'email': email,
'serveraddress': registry,
}
response = self._post_json(self._url('/auth'), data=req_data)
if response.status_code == 200:
self._auth_configs.add_auth(registry or auth.INDEX_NAME, req_data)
return self._result(response, json=True)
def ping(self):
"""
Checks the server is responsive. An exception will be raised if it
isn't responding.
Returns:
(bool) The response from the server.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
return self._result(self._get(self._url('/_ping'))) == 'OK'
def version(self, api_version=True):
"""
Returns version information from the server. Similar to the ``docker
version`` command.
Returns:
(dict): The server version information
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
url = self._url("/version", versioned_api=api_version)
return self._result(self._get(url), json=True)
| DaemonApiMixin |
python | PrefectHQ__prefect | tests/test_settings.py | {
"start": 26977,
"end": 28051
} | class ____:
def test_setting_equality_with_value(self):
with temporary_settings({PREFECT_TEST_SETTING: "foo"}):
assert PREFECT_TEST_SETTING == "foo"
assert PREFECT_TEST_SETTING != "bar"
def test_setting_equality_with_self(self):
assert PREFECT_TEST_SETTING == PREFECT_TEST_SETTING
def test_setting_equality_with_other_setting(self):
assert PREFECT_TEST_SETTING != PREFECT_TEST_MODE
def test_setting_hash_is_consistent(self):
assert hash(PREFECT_TEST_SETTING) == hash(PREFECT_TEST_SETTING)
def test_setting_hash_is_unique(self):
assert hash(PREFECT_TEST_SETTING) != hash(PREFECT_LOGGING_LEVEL)
def test_setting_hash_consistent_on_value_change(self):
original = hash(PREFECT_TEST_SETTING)
with temporary_settings({PREFECT_TEST_SETTING: "foo"}):
assert hash(PREFECT_TEST_SETTING) == original
def test_setting_hash_is_consistent_after_deepcopy(self):
assert hash(PREFECT_TEST_SETTING) == hash(copy.deepcopy(PREFECT_TEST_SETTING))
| TestSettingClass |
python | readthedocs__readthedocs.org | readthedocs/core/views/__init__.py | {
"start": 4952,
"end": 5820
} | class ____(View):
"""Just a 404 view that ignores all URL parameters."""
def get(self, request, *args, **kwargs):
raise Http404()
def do_not_track(request):
dnt_header = request.headers.get("Dnt")
# https://w3c.github.io/dnt/drafts/tracking-dnt.html#status-representation
return JsonResponse( # pylint: disable=redundant-content-type-for-json-response
{
"policy": "https://docs.readthedocs.io/en/latest/privacy-policy.html",
"same-party": [
"readthedocs.org",
"readthedocs.com",
"readthedocs.io", # .org Documentation Sites
"readthedocs-hosted.com", # .com Documentation Sites
],
"tracking": "N" if dnt_header == "1" else "T",
},
content_type="application/tracking-status+json",
)
| PageNotFoundView |
python | apache__airflow | airflow-core/tests/unit/api_fastapi/core_api/routes/public/test_assets.py | {
"start": 39639,
"end": 43275
} | class ____(TestAssets):
@provide_session
def test_should_respond_200(self, test_client, session):
self.create_assets(num=1)
assert session.query(AssetModel).count() == 1
tz_datetime_format = from_datetime_to_zulu_without_ms(DEFAULT_DATE)
with assert_queries_count(6):
response = test_client.get("/assets/1")
assert response.status_code == 200
assert response.json() == {
"id": 1,
"name": "simple1",
"uri": "s3://bucket/key/1",
"group": "asset",
"extra": {"foo": "bar"},
"created_at": tz_datetime_format,
"updated_at": tz_datetime_format,
"scheduled_dags": [],
"producing_tasks": [],
"consuming_tasks": [],
"aliases": [],
"watchers": [],
"last_asset_event": {"id": None, "timestamp": None},
}
@provide_session
def test_should_respond_200_with_watchers(self, test_client, session):
"""Test that single asset endpoint returns watcher information."""
assets = self.create_assets_with_watchers(session, num=1)
asset = assets[0]
response = test_client.get(f"/assets/{asset.id}")
assert response.status_code == 200
response_data = response.json()
tz_datetime_format = from_datetime_to_zulu_without_ms(DEFAULT_DATE)
assert response_data == {
"id": asset.id,
"name": "watched1",
"uri": "s3://watched/bucket/key/1",
"group": "asset",
"extra": {"foo": "bar"},
"created_at": tz_datetime_format,
"updated_at": tz_datetime_format,
"scheduled_dags": [],
"producing_tasks": [],
"consuming_tasks": [],
"aliases": [],
"watchers": [
{
"name": "watcher_1",
"trigger_id": asset.watchers[0].trigger_id,
"created_date": tz_datetime_format,
}
],
"last_asset_event": {"id": None, "timestamp": None},
}
def test_should_respond_401(self, unauthenticated_test_client):
response = unauthenticated_test_client.get("/assets/1")
assert response.status_code == 401
def test_should_respond_403(self, unauthorized_test_client):
response = unauthorized_test_client.get("/assets/1")
assert response.status_code == 403
def test_should_respond_404(self, test_client):
response = test_client.get("/assets/1")
assert response.status_code == 404
assert response.json()["detail"] == "The Asset with ID: `1` was not found"
@pytest.mark.usefixtures("time_freezer")
@pytest.mark.enable_redact
def test_should_mask_sensitive_extra(self, test_client, session):
self.create_assets_with_sensitive_extra()
tz_datetime_format = from_datetime_to_zulu_without_ms(DEFAULT_DATE)
response = test_client.get("/assets/1")
assert response.status_code == 200
assert response.json() == {
"id": 1,
"name": "sensitive1",
"uri": "s3://bucket/key/1",
"group": "asset",
"extra": {"password": "***"},
"created_at": tz_datetime_format,
"updated_at": tz_datetime_format,
"scheduled_dags": [],
"producing_tasks": [],
"consuming_tasks": [],
"aliases": [],
"watchers": [],
"last_asset_event": {"id": None, "timestamp": None},
}
| TestGetAssetEndpoint |
python | astropy__astropy | astropy/cosmology/_src/tests/flrw/test_parameters.py | {
"start": 9018,
"end": 10919
} | class ____(ParameterTestMixin):
"""Tests for `astropy.cosmology.Parameter` Neff on a Cosmology.
Neff is a descriptor, which are tested by mixin, here with ``TestFLRW``.
These tests expect dicts ``_cls_args`` and ``cls_kwargs`` which give the
args and kwargs for the cosmology class, respectively. See ``TestFLRW``.
"""
def test_Neff(self, cosmo_cls: type[Cosmology], cosmo: Cosmology):
"""Test Parameter ``Neff``."""
# on the class
Neff = cosmo_cls.parameters["Neff"]
assert isinstance(Neff, Parameter)
assert "Number of effective neutrino species" in Neff.__doc__
assert Neff.default == 3.04
# validation
assert Neff.validate(cosmo, 1) == 1
assert Neff.validate(cosmo, 10 * u.one) == 10
with pytest.raises(ValueError, match="Neff cannot be negative"):
Neff.validate(cosmo, -1)
# on the instance
assert cosmo.Neff is cosmo.__dict__["Neff"]
assert cosmo.Neff == self.cls_kwargs.get("Neff", 3.04)
assert isinstance(cosmo.Neff, float)
def test_init_Neff(self, cosmo_cls: type[Cosmology], ba: BoundArguments):
"""Test initialization for values of ``Neff``."""
# test that it works with units
ba.arguments["Neff"] = (
cosmo_cls.parameters["Neff"].default << u.one
) # ensure units
cosmo = cosmo_cls(*ba.args, **ba.kwargs)
assert cosmo.Neff == ba.arguments["Neff"]
# also without units
ba.arguments["Neff"] = ba.arguments["Neff"].value # strip units
cosmo = cosmo_cls(*ba.args, **ba.kwargs)
assert cosmo.Neff == ba.arguments["Neff"]
ba.arguments["Neff"] = -1
with pytest.raises(ValueError):
cosmo_cls(*ba.args, **ba.kwargs)
# =============================================================================
| ParameterNeffTestMixin |
python | huggingface__transformers | tests/models/hgnet_v2/test_modeling_hgnet_v2.py | {
"start": 1124,
"end": 6118
} | class ____:
def __init__(
self,
parent,
batch_size=3,
image_size=32,
num_channels=3,
embeddings_size=10,
hidden_sizes=[64, 128, 256, 512],
stage_in_channels=[16, 64, 128, 256],
stage_mid_channels=[16, 32, 64, 128],
stage_out_channels=[64, 128, 256, 512],
stage_num_blocks=[1, 1, 2, 1],
stage_downsample=[False, True, True, True],
stage_light_block=[False, False, True, True],
stage_kernel_size=[3, 3, 5, 5],
stage_numb_of_layers=[3, 3, 3, 3],
stem_channels=[3, 16, 16],
depths=[1, 1, 2, 1],
is_training=True,
use_labels=True,
hidden_act="relu",
num_labels=3,
scope=None,
out_features=["stage2", "stage3", "stage4"],
out_indices=[2, 3, 4],
):
self.parent = parent
self.batch_size = batch_size
self.image_size = image_size
self.num_channels = num_channels
self.embeddings_size = embeddings_size
self.hidden_sizes = hidden_sizes
self.stage_in_channels = stage_in_channels
self.stage_mid_channels = stage_mid_channels
self.stage_out_channels = stage_out_channels
self.stage_num_blocks = stage_num_blocks
self.stage_downsample = stage_downsample
self.stage_light_block = stage_light_block
self.stage_kernel_size = stage_kernel_size
self.stage_numb_of_layers = stage_numb_of_layers
self.stem_channels = stem_channels
self.depths = depths
self.is_training = is_training
self.use_labels = use_labels
self.hidden_act = hidden_act
self.num_labels = num_labels
self.scope = scope
self.num_stages = len(hidden_sizes)
self.out_features = out_features
self.out_indices = out_indices
def prepare_config_and_inputs(self):
pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
labels = None
if self.use_labels:
labels = ids_tensor([self.batch_size], self.num_labels)
config = self.get_config()
return config, pixel_values, labels
def get_config(self):
return HGNetV2Config(
num_channels=self.num_channels,
embeddings_size=self.embeddings_size,
hidden_sizes=self.hidden_sizes,
stage_in_channels=self.stage_in_channels,
stage_mid_channels=self.stage_mid_channels,
stage_out_channels=self.stage_out_channels,
stage_num_blocks=self.stage_num_blocks,
stage_downsample=self.stage_downsample,
stage_light_block=self.stage_light_block,
stage_kernel_size=self.stage_kernel_size,
stage_numb_of_layers=self.stage_numb_of_layers,
stem_channels=self.stem_channels,
depths=self.depths,
hidden_act=self.hidden_act,
num_labels=self.num_labels,
out_features=self.out_features,
out_indices=self.out_indices,
)
def create_and_check_backbone(self, config, pixel_values, labels):
model = HGNetV2Backbone(config=config)
model.to(torch_device)
model.eval()
result = model(pixel_values)
# verify feature maps
self.parent.assertEqual(len(result.feature_maps), len(config.out_features))
self.parent.assertListEqual(list(result.feature_maps[0].shape), [self.batch_size, self.hidden_sizes[1], 4, 4])
# verify channels
self.parent.assertEqual(len(model.channels), len(config.out_features))
self.parent.assertListEqual(model.channels, config.hidden_sizes[1:])
# verify backbone works with out_features=None
config.out_features = None
model = HGNetV2Backbone(config=config)
model.to(torch_device)
model.eval()
result = model(pixel_values)
# verify feature maps
self.parent.assertEqual(len(result.feature_maps), 1)
self.parent.assertListEqual(list(result.feature_maps[0].shape), [self.batch_size, self.hidden_sizes[-1], 1, 1])
# verify channels
self.parent.assertEqual(len(model.channels), 1)
self.parent.assertListEqual(model.channels, [config.hidden_sizes[-1]])
def create_and_check_for_image_classification(self, config, pixel_values, labels):
config.num_labels = self.num_labels
model = HGNetV2ForImageClassification(config)
model.to(torch_device)
model.eval()
result = model(pixel_values, labels=labels)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels))
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
config, pixel_values, labels = config_and_inputs
inputs_dict = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
| HGNetV2ModelTester |
python | kamyu104__LeetCode-Solutions | Python/binary-tree-postorder-traversal.py | {
"start": 182,
"end": 1212
} | class ____(object):
def postorderTraversal(self, root):
"""
:type root: TreeNode
:rtype: List[int]
"""
dummy = TreeNode(0)
dummy.left = root
result, cur = [], dummy
while cur:
if cur.left is None:
cur = cur.right
else:
node = cur.left
while node.right and node.right != cur:
node = node.right
if node.right is None:
node.right = cur
cur = cur.left
else:
result += self.traceBack(cur.left, node)
node.right = None
cur = cur.right
return result
def traceBack(self, frm, to):
result, cur = [], frm
while cur is not to:
result.append(cur.val)
cur = cur.right
result.append(to.val)
result.reverse()
return result
# Time: O(n)
# Space: O(h)
# Stack Solution
| Solution |
python | astropy__astropy | astropy/modeling/tests/test_input.py | {
"start": 10982,
"end": 16303
} | class ____:
"""
A suite of tests to check various cases of parameter and input combinations
on models with n_input = n_output = 1 on a toy model with n_models=1.
Many of these tests mirror test cases in
``astropy.modeling.tests.test_parameters.TestParameterInitialization``,
except that this tests how different parameter arrangements interact with
different types of model inputs.
"""
def test_scalar_parameters_scalar_input(self):
"""
Scalar parameters with a scalar input should return a scalar.
"""
t = TModel_1_1(1, 10)
y = t(100)
assert isinstance(y, float)
assert np.ndim(y) == 0
assert y == 111
def test_scalar_parameters_1d_array_input(self):
"""
Scalar parameters should broadcast with an array input to result in an
array output of the same shape as the input.
"""
t = TModel_1_1(1, 10)
y = t(np.arange(5) * 100)
assert isinstance(y, np.ndarray)
assert np.shape(y) == (5,)
assert np.all(y == [11, 111, 211, 311, 411])
def test_scalar_parameters_2d_array_input(self):
"""
Scalar parameters should broadcast with an array input to result in an
array output of the same shape as the input.
"""
t = TModel_1_1(1, 10)
y = t(np.arange(6).reshape(2, 3) * 100)
assert isinstance(y, np.ndarray)
assert np.shape(y) == (2, 3)
assert np.all(y == [[11, 111, 211], [311, 411, 511]])
def test_scalar_parameters_3d_array_input(self):
"""
Scalar parameters should broadcast with an array input to result in an
array output of the same shape as the input.
"""
t = TModel_1_1(1, 10)
y = t(np.arange(12).reshape(2, 3, 2) * 100)
assert isinstance(y, np.ndarray)
assert np.shape(y) == (2, 3, 2)
assert np.all(
y
== [
[[11, 111], [211, 311], [411, 511]],
[[611, 711], [811, 911], [1011, 1111]],
]
)
def test_1d_array_parameters_scalar_input(self):
"""
Array parameters should all be broadcastable with each other, and with
a scalar input the output should be broadcast to the maximum dimensions
of the parameters.
"""
t = TModel_1_1([1, 2], [10, 20])
y = t(100)
assert isinstance(y, np.ndarray)
assert np.shape(y) == (2,)
assert np.all(y == [111, 122])
def test_1d_array_parameters_1d_array_input(self):
"""
When given an array input it must be broadcastable with all the
parameters.
"""
t = TModel_1_1([1, 2], [10, 20])
y1 = t([100, 200])
assert np.shape(y1) == (2,)
assert np.all(y1 == [111, 222])
y2 = t([[100], [200]])
assert np.shape(y2) == (2, 2)
assert np.all(y2 == [[111, 122], [211, 222]])
with pytest.raises(ValueError, match="broadcast"):
# Doesn't broadcast
t([100, 200, 300])
def test_2d_array_parameters_2d_array_input(self):
"""
When given an array input it must be broadcastable with all the
parameters.
"""
t = TModel_1_1([[1, 2], [3, 4]], [[10, 20], [30, 40]])
y1 = t([[100, 200], [300, 400]])
assert np.shape(y1) == (2, 2)
assert np.all(y1 == [[111, 222], [333, 444]])
y2 = t([[[[100]], [[200]]], [[[300]], [[400]]]])
assert np.shape(y2) == (2, 2, 2, 2)
assert np.all(
y2
== [
[[[111, 122], [133, 144]], [[211, 222], [233, 244]]],
[[[311, 322], [333, 344]], [[411, 422], [433, 444]]],
]
)
with pytest.raises(ValueError, match="broadcast"):
# Doesn't broadcast
t([[100, 200, 300], [400, 500, 600]])
def test_mixed_array_parameters_1d_array_input(self):
"""
When given an array input it must be broadcastable with all the
parameters.
"""
t = TModel_1_1(
[
[[0.01, 0.02, 0.03], [0.04, 0.05, 0.06]],
[[0.07, 0.08, 0.09], [0.10, 0.11, 0.12]],
],
[1, 2, 3],
)
y1 = t([10, 20, 30])
assert np.shape(y1) == (2, 2, 3)
assert_allclose(
y1,
[
[[11.01, 22.02, 33.03], [11.04, 22.05, 33.06]],
[[11.07, 22.08, 33.09], [11.10, 22.11, 33.12]],
],
)
y2 = t([[[[10]]], [[[20]]], [[[30]]]])
assert np.shape(y2) == (3, 2, 2, 3)
assert_allclose(
y2,
[
[
[[11.01, 12.02, 13.03], [11.04, 12.05, 13.06]],
[[11.07, 12.08, 13.09], [11.10, 12.11, 13.12]],
],
[
[[21.01, 22.02, 23.03], [21.04, 22.05, 23.06]],
[[21.07, 22.08, 23.09], [21.10, 22.11, 23.12]],
],
[
[[31.01, 32.02, 33.03], [31.04, 32.05, 33.06]],
[[31.07, 32.08, 33.09], [31.10, 32.11, 33.12]],
],
],
)
| TestSingleInputSingleOutputSingleModel |
python | tensorflow__tensorflow | tensorflow/python/kernel_tests/random/random_index_shuffle_test.py | {
"start": 1392,
"end": 4802
} | class ____(test.TestCase, parameterized.TestCase):
@parameterized.parameters(
itertools.product(_SEEDS, _DTYPES, _MAX_INDEX, _DTYPES, _ROUNDS))
def testRawOp(self, seed, seed_dtype, max_index, index_dtype, rounds):
if max_index > 200:
self.skipTest('Too slow in graph mode.')
seen = (max_index + 1) * [False]
seed = math_ops.cast([seed[0], seed[1], 42], seed_dtype)
for index in range(max_index + 1):
new_index = gen_random_index_shuffle_ops.random_index_shuffle(
math_ops.cast(index, index_dtype),
seed,
max_index=math_ops.cast(max_index, index_dtype),
rounds=rounds)
self.assertEqual(new_index.dtype, index_dtype)
new_index = self.evaluate(new_index)
self.assertGreaterEqual(new_index, 0)
self.assertLessEqual(new_index, max_index)
self.assertFalse(seen[new_index])
seen[new_index] = True
@parameterized.parameters(
itertools.product(_SEEDS, _DTYPES, _MAX_INDEX, _DTYPES))
def testUnbatched(self, seed, seed_dtype, max_index, index_dtype):
if max_index > 200:
self.skipTest('Too slow in graph mode.')
seen = (max_index + 1) * [False]
seed = math_ops.cast(seed, seed_dtype)
for index in range(max_index + 1):
new_index = stateless.index_shuffle(
math_ops.cast(index, index_dtype),
seed,
max_index=math_ops.cast(max_index, index_dtype))
self.assertEqual(new_index.dtype, index_dtype)
new_index = self.evaluate(new_index)
self.assertGreaterEqual(new_index, 0)
self.assertLessEqual(new_index, max_index)
self.assertFalse(seen[new_index])
seen[new_index] = True
@parameterized.parameters(
itertools.product(_SEEDS, _DTYPES, _MAX_INDEX, _DTYPES))
def testBatchedBroadcastSeedAndMaxval(self, seed, seed_dtype, max_index,
index_dtype):
seed = math_ops.cast(seed, seed_dtype)
index = math_ops.cast(range(max_index + 1), index_dtype)
new_index = stateless.index_shuffle(index, seed, max_index=max_index)
self.assertEqual(new_index.dtype, index_dtype)
new_index = self.evaluate(new_index)
self.assertAllGreaterEqual(new_index, 0)
self.assertAllLessEqual(new_index, max_index)
self.assertLen(new_index, max_index + 1)
self.assertLen(set(new_index), max_index + 1)
def test_unknown_shape(self):
@def_function.function
def shuffle(repeats):
indices = array_ops.repeat(2, repeats)
return stateless.index_shuffle(indices, seed=(1, 2), max_index=10)
new_index = shuffle(constant_op.constant(2))
self.assertAllGreaterEqual(new_index, 0)
self.assertAllLessEqual(new_index, 10)
def test_negative_index(self):
with self.assertRaisesRegex(
errors.InvalidArgumentError, 'index must be >= 0'
):
self.evaluate(stateless.index_shuffle(-1, seed=(1, 2), max_index=10))
def test_negative_max_index(self):
with self.assertRaisesRegex(
errors.InvalidArgumentError, 'max_index must be >= 0'
):
self.evaluate(stateless.index_shuffle(0, seed=(1, 2), max_index=-1))
def test_index_greater_than_max_index(self):
with self.assertRaisesRegex(
errors.InvalidArgumentError, 'max_index must be >= index'
):
self.evaluate(stateless.index_shuffle(5, seed=(1, 2), max_index=4))
if __name__ == '__main__':
test.main()
| StatelessOpsTest |
python | keras-team__keras | keras/src/losses/losses_test.py | {
"start": 65610,
"end": 69780
} | class ____(testing.TestCase):
def test_config(self):
self.run_class_serialization_test(
losses.CategoricalFocalCrossentropy(name="cfce")
)
def test_all_correct_unweighted(self):
y_true = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]], dtype="int64")
y_pred = np.array(
[[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]],
dtype="float32",
)
cce_obj = losses.CategoricalFocalCrossentropy(alpha=0.25, gamma=2.0)
loss = cce_obj(y_true, y_pred)
self.assertAlmostEqual(loss, 0.0, 3)
# Test with logits.
logits = np.array(
[[10.0, 0.0, 0.0], [0.0, 10.0, 0.0], [0.0, 0.0, 10.0]]
)
cce_obj = losses.CategoricalFocalCrossentropy(from_logits=True)
loss = cce_obj(y_true, logits)
self.assertAlmostEqual(loss, 0.0, 3)
def test_unweighted(self):
cce_obj = losses.CategoricalFocalCrossentropy()
y_true = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
y_pred = np.array(
[[0.9, 0.05, 0.05], [0.5, 0.89, 0.6], [0.05, 0.01, 0.94]],
dtype="float32",
)
loss = cce_obj(y_true, y_pred)
self.assertAlmostEqual(loss, 0.02059, 3)
# Test with logits.
logits = np.array([[8.0, 1.0, 1.0], [0.0, 9.0, 1.0], [2.0, 3.0, 5.0]])
cce_obj = losses.CategoricalFocalCrossentropy(from_logits=True)
loss = cce_obj(y_true, logits)
self.assertAlmostEqual(loss, 0.000345, 3)
def test_scalar_weighted(self):
cce_obj = losses.CategoricalFocalCrossentropy()
y_true = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
y_pred = np.array(
[[0.9, 0.05, 0.05], [0.5, 0.89, 0.6], [0.05, 0.01, 0.94]],
dtype="float32",
)
loss = cce_obj(y_true, y_pred, sample_weight=2.3)
self.assertAlmostEqual(loss, 0.047368, 3)
# Test with logits.
logits = np.array([[8.0, 1.0, 1.0], [0.0, 9.0, 1.0], [2.0, 3.0, 5.0]])
cce_obj = losses.CategoricalFocalCrossentropy(from_logits=True)
loss = cce_obj(y_true, logits, sample_weight=2.3)
self.assertAlmostEqual(loss, 0.000794, 4)
def test_sample_weighted(self):
cce_obj = losses.CategoricalFocalCrossentropy()
y_true = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
y_pred = np.array(
[[0.9, 0.05, 0.05], [0.5, 0.89, 0.6], [0.05, 0.01, 0.94]],
dtype="float32",
)
sample_weight = np.array([[1.2], [3.4], [5.6]]).reshape((3, 1))
loss = cce_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAlmostEqual(loss, 0.06987, 3)
# Test with logits.
logits = np.array([[8.0, 1.0, 1.0], [0.0, 9.0, 1.0], [2.0, 3.0, 5.0]])
cce_obj = losses.CategoricalFocalCrossentropy(from_logits=True)
loss = cce_obj(y_true, logits, sample_weight=sample_weight)
self.assertAlmostEqual(loss, 0.001933, 3)
def test_no_reduction(self):
y_true = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
logits = np.array([[8.0, 1.0, 1.0], [0.0, 9.0, 1.0], [2.0, 3.0, 5.0]])
cce_obj = losses.CategoricalFocalCrossentropy(
from_logits=True, reduction=None
)
loss = cce_obj(y_true, logits)
self.assertAllClose(
(1.5096224e-09, 2.4136547e-11, 1.0360638e-03),
loss,
)
def test_label_smoothing(self):
logits = np.array([[4.9, -0.5, 2.05]])
y_true = np.array([[1, 0, 0]])
label_smoothing = 0.1
cce_obj = losses.CategoricalFocalCrossentropy(
from_logits=True, label_smoothing=label_smoothing
)
loss = cce_obj(y_true, logits)
expected_value = 0.06685
self.assertAlmostEqual(loss, expected_value, 3)
def test_dtype_arg(self):
logits = np.array([[4.9, -0.5, 2.05]])
y_true = np.array([[1, 0, 0]])
cce_obj = losses.CategoricalFocalCrossentropy(
from_logits=True, dtype="bfloat16"
)
loss = cce_obj(y_true, logits)
self.assertDType(loss, "bfloat16")
| CategoricalFocalCrossentropyTest |
python | cython__cython | Cython/Debugger/libcython.py | {
"start": 26915,
"end": 32167
} | class ____(CythonCommand):
"""
Set a breakpoint for Cython code using Cython qualified name notation, e.g.:
cy break cython_modulename.ClassName.method_name...
or normal notation:
cy break function_or_method_name...
or for a line number:
cy break cython_module:lineno...
Set a Python breakpoint:
Break on any function or method named 'func' in module 'modname'
cy break -p modname.func...
Break on any function or method named 'func'
cy break -p func...
"""
name = 'cy break'
command_class = gdb.COMMAND_BREAKPOINTS
def _break_pyx(self, name):
modulename, _, lineno = name.partition(':')
lineno = int(lineno)
if modulename:
cython_module = self.cy.cython_namespace[modulename]
else:
cython_module = self.get_cython_function().module
if (cython_module.filename, lineno) in cython_module.lineno_cy2c:
c_lineno = cython_module.lineno_cy2c[cython_module.filename, lineno]
breakpoint = '%s:%s' % (cython_module.c_filename, c_lineno)
gdb.execute('break ' + breakpoint)
else:
raise gdb.GdbError("Not a valid line number. "
"Does it contain actual code?")
def _break_funcname(self, funcname):
func = self.cy.functions_by_qualified_name.get(funcname)
if func and func.is_initmodule_function:
func = None
break_funcs = [func]
if not func:
funcs = self.cy.functions_by_name.get(funcname) or []
funcs = [f for f in funcs if not f.is_initmodule_function]
if not funcs:
gdb.execute('break ' + funcname)
return
if len(funcs) > 1:
# multiple functions, let the user pick one
print('There are multiple such functions:')
for idx, func in enumerate(funcs):
print('%3d) %s' % (idx, func.qualified_name))
while True:
try:
result = input(
"Select a function, press 'a' for all "
"functions or press 'q' or '^D' to quit: ")
except EOFError:
return
else:
if result.lower() == 'q':
return
elif result.lower() == 'a':
break_funcs = funcs
break
elif (result.isdigit() and
0 <= int(result) < len(funcs)):
break_funcs = [funcs[int(result)]]
break
else:
print('Not understood...')
else:
break_funcs = [funcs[0]]
for func in break_funcs:
gdb.execute('break %s' % func.cname)
if func.pf_cname:
gdb.execute('break %s' % func.pf_cname)
@libpython.dont_suppress_errors
def invoke(self, function_names, from_tty):
if isinstance(function_names, bytes):
function_names = function_names.decode(_filesystemencoding)
argv = string_to_argv(function_names)
if function_names.startswith('-p'):
argv = argv[1:]
python_breakpoints = True
else:
python_breakpoints = False
for funcname in argv:
if python_breakpoints:
gdb.execute('py-break %s' % funcname)
elif ':' in funcname:
self._break_pyx(funcname)
else:
self._break_funcname(funcname)
@libpython.dont_suppress_errors
def complete(self, text, word):
# https://sourceware.org/git/?p=binutils-gdb.git;a=blob;f=gdb/python/py-cmd.c;h=7143c1c5f7fdce9316a8c41fc2246bc6a07630d4;hb=HEAD#l140
word = word or ""
# Filter init-module functions (breakpoints can be set using
# modulename:linenumber).
names = [n for n, L in self.cy.functions_by_name.items()
if any(not f.is_initmodule_function for f in L)]
qnames = [n for n, f in self.cy.functions_by_qualified_name.items()
if not f.is_initmodule_function]
if parameters.complete_unqualified:
all_names = itertools.chain(qnames, names)
else:
all_names = qnames
words = text.strip().split()
if not words or '.' not in words[-1]:
# complete unqualified
seen = set(text[:-len(word)].split())
return [n for n in all_names
if n.startswith(word) and n not in seen]
# complete qualified name
lastword = words[-1]
compl = [n for n in qnames if n.startswith(lastword)]
if len(lastword) > len(word):
# readline sees something (e.g. a '.') as a word boundary, so don't
# "recomplete" this prefix
strip_prefix_length = len(lastword) - len(word)
compl = [n[strip_prefix_length:] for n in compl]
return compl
| CyBreak |
python | tensorflow__tensorflow | tensorflow/python/kernel_tests/linalg/linear_operator_identity_test.py | {
"start": 11955,
"end": 23854
} | class ____(
linear_operator_test_util.SquareLinearOperatorDerivedClassTest):
"""Most tests done in the base class LinearOperatorDerivedClassTest."""
def tearDown(self):
config.enable_tensor_float_32_execution(self.tf32_keep_)
def setUp(self):
self.tf32_keep_ = config.tensor_float_32_execution_enabled()
config.enable_tensor_float_32_execution(False)
@staticmethod
def dtypes_to_test():
# TODO(langmore) Test tf.float16 once tf.linalg.solve works in
# 16bit.
return [dtypes.float32, dtypes.float64, dtypes.complex64, dtypes.complex128]
@staticmethod
def optional_tests():
"""List of optional test names to run."""
return [
"operator_matmul_with_same_type",
"operator_solve_with_same_type",
]
def operator_and_matrix(
self, build_info, dtype, use_placeholder,
ensure_self_adjoint_and_pd=False):
shape = list(build_info.shape)
assert shape[-1] == shape[-2]
batch_shape = shape[:-2]
num_rows = shape[-1]
# Uniform values that are at least length 1 from the origin. Allows the
# operator to be well conditioned.
# Shape batch_shape
multiplier = linear_operator_test_util.random_sign_uniform(
shape=batch_shape, minval=1., maxval=2., dtype=dtype)
if ensure_self_adjoint_and_pd:
# Abs on complex64 will result in a float32, so we cast back up.
multiplier = math_ops.cast(math_ops.abs(multiplier), dtype=dtype)
# Nothing to feed since LinearOperatorScaledIdentity takes no Tensor args.
lin_op_multiplier = multiplier
if use_placeholder:
lin_op_multiplier = array_ops.placeholder_with_default(
multiplier, shape=None)
operator = linalg_lib.LinearOperatorScaledIdentity(
num_rows,
lin_op_multiplier,
is_self_adjoint=True if ensure_self_adjoint_and_pd else None,
is_positive_definite=True if ensure_self_adjoint_and_pd else None)
multiplier_matrix = array_ops.expand_dims(
array_ops.expand_dims(multiplier, -1), -1)
matrix = multiplier_matrix * linalg_ops.eye(
num_rows, batch_shape=batch_shape, dtype=dtype)
return operator, matrix
def test_assert_positive_definite_does_not_raise_when_positive(self):
with self.cached_session():
operator = linalg_lib.LinearOperatorScaledIdentity(
num_rows=2, multiplier=1.)
self.evaluate(operator.assert_positive_definite()) # Should not fail
def test_assert_positive_definite_raises_when_negative(self):
with self.cached_session():
operator = linalg_lib.LinearOperatorScaledIdentity(
num_rows=2, multiplier=-1.)
with self.assertRaisesOpError("not positive definite"):
self.evaluate(operator.assert_positive_definite())
def test_assert_non_singular_does_not_raise_when_non_singular(self):
with self.cached_session():
operator = linalg_lib.LinearOperatorScaledIdentity(
num_rows=2, multiplier=[1., 2., 3.])
self.evaluate(operator.assert_non_singular()) # Should not fail
def test_assert_non_singular_raises_when_singular(self):
with self.cached_session():
operator = linalg_lib.LinearOperatorScaledIdentity(
num_rows=2, multiplier=[1., 2., 0.])
with self.assertRaisesOpError("was singular"):
self.evaluate(operator.assert_non_singular())
def test_assert_self_adjoint_does_not_raise_when_self_adjoint(self):
with self.cached_session():
operator = linalg_lib.LinearOperatorScaledIdentity(
num_rows=2, multiplier=[1. + 0J])
self.evaluate(operator.assert_self_adjoint()) # Should not fail
def test_assert_self_adjoint_raises_when_not_self_adjoint(self):
with self.cached_session():
operator = linalg_lib.LinearOperatorScaledIdentity(
num_rows=2, multiplier=[1. + 1J])
with self.assertRaisesOpError("not self-adjoint"):
self.evaluate(operator.assert_self_adjoint())
def test_float16_matmul(self):
# float16 cannot be tested by base test class because tf.linalg.solve does
# not work with float16.
with self.cached_session():
multiplier = rng.rand(3).astype(np.float16)
operator = linalg_lib.LinearOperatorScaledIdentity(
num_rows=2, multiplier=multiplier)
x = rng.randn(2, 3).astype(np.float16)
y = operator.matmul(x)
self.assertAllClose(multiplier[..., None, None] * x, self.evaluate(y))
def test_non_scalar_num_rows_raises_static(self):
# Many "test_...num_rows" tests are performed in LinearOperatorIdentity.
with self.assertRaisesRegex(ValueError, "must be a 0-D Tensor"):
linalg_lib.LinearOperatorScaledIdentity(
num_rows=[2], multiplier=123.)
def test_wrong_matrix_dimensions_raises_static(self):
operator = linalg_lib.LinearOperatorScaledIdentity(
num_rows=2, multiplier=2.2)
x = rng.randn(3, 3).astype(np.float32)
with self.assertRaisesRegex(ValueError, "Dimensions.*not compatible"):
operator.matmul(x)
def test_wrong_matrix_dimensions_raises_dynamic(self):
num_rows = array_ops.placeholder_with_default(2, shape=None)
x = array_ops.placeholder_with_default(
rng.rand(3, 3).astype(np.float32), shape=None)
with self.cached_session():
with self.assertRaisesError("Dimensions.*not.compatible"):
operator = linalg_lib.LinearOperatorScaledIdentity(
num_rows,
multiplier=[1., 2],
assert_proper_shapes=True)
self.evaluate(operator.matmul(x))
def test_broadcast_matmul_and_solve(self):
# These cannot be done in the automated (base test class) tests since they
# test shapes that tf.batch_matmul cannot handle.
# In particular, tf.batch_matmul does not broadcast.
with self.cached_session() as sess:
# Given this x and LinearOperatorScaledIdentity shape of (2, 1, 3, 3), the
# broadcast shape of operator and 'x' is (2, 2, 3, 4)
x = random_ops.random_normal(shape=(1, 2, 3, 4))
# operator is 2.2 * identity (with a batch shape).
operator = linalg_lib.LinearOperatorScaledIdentity(
num_rows=3, multiplier=2.2 * array_ops.ones((2, 1)))
# Batch matrix of zeros with the broadcast shape of x and operator.
zeros = array_ops.zeros(shape=(2, 2, 3, 4), dtype=x.dtype)
# Test matmul
expected = x * 2.2 + zeros
operator_matmul = operator.matmul(x)
self.assertAllEqual(operator_matmul.shape, expected.shape)
self.assertAllClose(*self.evaluate([operator_matmul, expected]))
# Test solve
expected = x / 2.2 + zeros
operator_solve = operator.solve(x)
self.assertAllEqual(operator_solve.shape, expected.shape)
self.assertAllClose(*self.evaluate([operator_solve, expected]))
def test_broadcast_matmul_and_solve_scalar_scale_multiplier(self):
# These cannot be done in the automated (base test class) tests since they
# test shapes that tf.batch_matmul cannot handle.
# In particular, tf.batch_matmul does not broadcast.
with self.cached_session() as sess:
# Given this x and LinearOperatorScaledIdentity shape of (3, 3), the
# broadcast shape of operator and 'x' is (1, 2, 3, 4), which is the same
# shape as x.
x = random_ops.random_normal(shape=(1, 2, 3, 4))
# operator is 2.2 * identity (with a batch shape).
operator = linalg_lib.LinearOperatorScaledIdentity(
num_rows=3, multiplier=2.2)
# Test matmul
expected = x * 2.2
operator_matmul = operator.matmul(x)
self.assertAllEqual(operator_matmul.shape, expected.shape)
self.assertAllClose(*self.evaluate([operator_matmul, expected]))
# Test solve
expected = x / 2.2
operator_solve = operator.solve(x)
self.assertAllEqual(operator_solve.shape, expected.shape)
self.assertAllClose(*self.evaluate([operator_solve, expected]))
def test_is_x_flags(self):
operator = linalg_lib.LinearOperatorScaledIdentity(
num_rows=2, multiplier=1.,
is_positive_definite=False, is_non_singular=True)
self.assertFalse(operator.is_positive_definite)
self.assertTrue(operator.is_non_singular)
self.assertTrue(operator.is_self_adjoint) # Auto-set due to real multiplier
def test_identity_adjoint_type(self):
operator = linalg_lib.LinearOperatorScaledIdentity(
num_rows=2, multiplier=1., is_non_singular=True)
self.assertIsInstance(
operator.adjoint(), linalg_lib.LinearOperatorScaledIdentity)
def test_identity_matmul(self):
operator1 = linalg_lib.LinearOperatorIdentity(num_rows=2)
operator2 = linalg_lib.LinearOperatorScaledIdentity(
num_rows=2, multiplier=3.)
self.assertIsInstance(
operator1.matmul(operator1),
linalg_lib.LinearOperatorIdentity)
self.assertIsInstance(
operator1.matmul(operator1),
linalg_lib.LinearOperatorIdentity)
self.assertIsInstance(
operator2.matmul(operator2),
linalg_lib.LinearOperatorScaledIdentity)
operator_matmul = operator1.matmul(operator2)
self.assertIsInstance(
operator_matmul,
linalg_lib.LinearOperatorScaledIdentity)
self.assertAllClose(3., self.evaluate(operator_matmul.multiplier))
operator_matmul = operator2.matmul(operator1)
self.assertIsInstance(
operator_matmul,
linalg_lib.LinearOperatorScaledIdentity)
self.assertAllClose(3., self.evaluate(operator_matmul.multiplier))
def test_identity_solve(self):
operator1 = linalg_lib.LinearOperatorIdentity(num_rows=2)
operator2 = linalg_lib.LinearOperatorScaledIdentity(
num_rows=2, multiplier=3.)
self.assertIsInstance(
operator1.solve(operator1),
linalg_lib.LinearOperatorIdentity)
self.assertIsInstance(
operator2.solve(operator2),
linalg_lib.LinearOperatorScaledIdentity)
operator_solve = operator1.solve(operator2)
self.assertIsInstance(
operator_solve,
linalg_lib.LinearOperatorScaledIdentity)
self.assertAllClose(3., self.evaluate(operator_solve.multiplier))
operator_solve = operator2.solve(operator1)
self.assertIsInstance(
operator_solve,
linalg_lib.LinearOperatorScaledIdentity)
self.assertAllClose(1. / 3., self.evaluate(operator_solve.multiplier))
def test_scaled_identity_cholesky_type(self):
operator = linalg_lib.LinearOperatorScaledIdentity(
num_rows=2,
multiplier=3.,
is_positive_definite=True,
is_self_adjoint=True,
)
self.assertIsInstance(
operator.cholesky(),
linalg_lib.LinearOperatorScaledIdentity)
def test_scaled_identity_inverse_type(self):
operator = linalg_lib.LinearOperatorScaledIdentity(
num_rows=2,
multiplier=3.,
is_non_singular=True,
)
self.assertIsInstance(
operator.inverse(),
linalg_lib.LinearOperatorScaledIdentity)
def test_ref_type_shape_args_raises(self):
with self.assertRaisesRegex(TypeError, "num_rows.*reference"):
linalg_lib.LinearOperatorScaledIdentity(
num_rows=variables_module.Variable(2), multiplier=1.23)
def test_tape_safe(self):
multiplier = variables_module.Variable(1.23)
operator = linalg_lib.LinearOperatorScaledIdentity(
num_rows=2, multiplier=multiplier)
self.check_tape_safe(operator)
def test_convert_variables_to_tensors(self):
multiplier = variables_module.Variable(1.23)
operator = linalg_lib.LinearOperatorScaledIdentity(
num_rows=2, multiplier=multiplier)
with self.cached_session() as sess:
sess.run([multiplier.initializer])
self.check_convert_variables_to_tensors(operator)
if __name__ == "__main__":
linear_operator_test_util.add_tests(LinearOperatorIdentityTest)
linear_operator_test_util.add_tests(LinearOperatorScaledIdentityTest)
test.main()
| LinearOperatorScaledIdentityTest |
python | keras-team__keras | keras/src/backend/common/keras_tensor_test.py | {
"start": 246,
"end": 16398
} | class ____(testing.TestCase):
def test_attributes(self):
x = keras_tensor.KerasTensor(shape=(3,), dtype="float32", sparse=True)
self.assertEqual(x.dtype, "float32")
self.assertEqual(x.shape, (3,))
self.assertEqual(x.sparse, True)
# Raise error if trying to set attributes
with self.assertRaisesRegex(
AttributeError, "The `shape` attribute of KerasTensor is immutable."
):
x.shape = [3, 2]
with self.assertRaisesRegex(
AttributeError, "The `dtype` attribute of KerasTensor is immutable."
):
x.dtype = "int32"
def test_attributes_sparse(self):
x = keras_tensor.KerasTensor(shape=(3,), dtype="float32", sparse=True)
self.assertEqual(x.sparse, True)
# Raise error if trying to set attributes
with self.assertRaisesRegex(
AttributeError,
"The `sparse` attribute of KerasTensor is immutable.",
):
x.sparse = False
def test_attributes_ragged(self):
x = keras_tensor.KerasTensor(shape=(3,), dtype="float32", ragged=True)
self.assertEqual(x.ragged, True)
# Raise error if trying to set attributes
with self.assertRaisesRegex(
AttributeError,
"The `ragged` attribute of KerasTensor is immutable.",
):
x.ragged = False
def test_init_sparse_ragged_raises(self):
with self.assertRaisesRegex(
ValueError, "cannot have `sparse=True` and `ragged=True`"
):
keras_tensor.KerasTensor(shape=(3,), sparse=True, ragged=True)
def test_numpy_methods(self):
x = keras_tensor.KerasTensor(shape=(3, 2), dtype="float32")
# reshape
x = x.reshape((6,))
self.assertEqual(x.shape, (6,))
# expand_dims, squeeze
x = ops.expand_dims(x, -1)
self.assertEqual(x.shape, (6, 1))
x = x.squeeze()
self.assertEqual(x.shape, (6,))
x = ops.expand_dims(x, axis=0)
self.assertEqual(x.shape, (1, 6))
x = x.squeeze(axis=0)
self.assertEqual(x.shape, (6,))
def test_invalid_usage(self):
x = keras_tensor.KerasTensor(shape=(3,), dtype="float32")
with self.assertRaisesRegex(
ValueError, "doesn't have any actual numerical value"
):
np.array(x)
if backend.backend() == "jax":
from jax import numpy as jnp
with self.assertRaisesRegex(
ValueError, "cannot be used as input to a JAX function"
):
jnp.array(x)
with self.assertRaisesRegex(
ValueError, "cannot be used as input to a TensorFlow function"
):
tf.convert_to_tensor(x)
def test_bool(self):
tensor = keras_tensor.KerasTensor(shape=(3, 4), dtype="float32")
with self.assertRaisesRegex(TypeError, "cannot be used as a boolean."):
bool(tensor)
def test_representation(self):
x = keras_tensor.KerasTensor(shape=(3, 4), dtype="float32")
self.assertIn("<KerasTensor shape=(3, 4)", repr(x))
def test_iterating(self):
x = keras_tensor.KerasTensor(shape=(3, 4), dtype="float32")
with self.assertRaises(NotImplementedError):
iter(x)
def test_any_symbolic_tensors(self):
x = keras_tensor.KerasTensor(shape=(3, 4), dtype="float32")
y = np.array([1, 2, 3])
self.assertTrue(keras_tensor.any_symbolic_tensors(args=[x, y]))
self.assertFalse(keras_tensor.any_symbolic_tensors(args=[y]))
def test_is_keras_tensor(self):
x = keras_tensor.KerasTensor(shape=(3, 4), dtype="float32")
self.assertTrue(keras_tensor.is_keras_tensor(x))
y = np.array([1, 2, 3])
self.assertFalse(keras_tensor.is_keras_tensor(y))
@patch("keras.src.ops.Absolute.symbolic_call")
def test_abs_method(self, mock_symbolic_call):
mock_tensor = Mock()
mock_symbolic_call.return_value = mock_tensor
x = keras_tensor.KerasTensor(shape=(3, 4), dtype="float32")
abs_x = abs(x) # this will internally call x.__abs__()
mock_symbolic_call.assert_called_once_with(x)
self.assertEqual(abs_x, mock_tensor)
@patch("keras.src.ops.Negative.symbolic_call")
def test_neg_method(self, mock_method):
self._test_unary_op_method(mock_method, lambda x: -x)
@patch("keras.src.ops.Subtract.symbolic_call")
def test_sub_method(self, mock_method):
y = Mock()
self._test_binary_op_method(mock_method, y, lambda x, y: x - y)
@patch("keras.src.ops.Multiply.symbolic_call")
def test_mul_method(self, mock_method):
y = Mock()
self._test_binary_op_method(mock_method, y, lambda x, y: x * y)
@patch("keras.src.ops.Matmul.symbolic_call")
def test_matmul_method(self, mock_method):
y = Mock()
self._test_binary_op_method(mock_method, y, lambda x, y: x @ y)
@patch("keras.src.ops.Power.symbolic_call")
def test_pow_method(self, mock_method):
y = Mock()
self._test_binary_op_method(mock_method, y, lambda x, y: x**y)
@patch("keras.src.ops.Mod.symbolic_call")
def test_mod_method(self, mock_method):
y = Mock()
self._test_binary_op_method(mock_method, y, lambda x, y: x % y)
@patch("keras.src.ops.Less.symbolic_call")
def test_lt_method(self, mock_method):
y = Mock()
self._test_binary_op_method(mock_method, y, lambda x, y: x < y)
@patch("keras.src.ops.LogicalAnd.symbolic_call")
def test_and_method(self, mock_method):
y = Mock()
self._test_binary_op_method(mock_method, y, lambda x, y: x & y)
@patch("keras.src.ops.LogicalOr.symbolic_call")
def test_or_method(self, mock_method):
y = Mock()
self._test_binary_op_method(mock_method, y, lambda x, y: x | y)
@patch("keras.src.ops.GetItem.symbolic_call")
def test_getitem_method(self, mock_method):
y = Mock()
self._test_binary_op_method(mock_method, y, lambda x, y: x[y])
def _test_unary_op_method(self, mock_method, operator):
mock_tensor = Mock()
mock_method.return_value = mock_tensor
x = keras_tensor.KerasTensor(shape=(3, 4), dtype="float32")
result = operator(x)
mock_method.assert_called_once_with(x)
self.assertEqual(result, mock_tensor)
def _test_binary_op_method(self, mock_method, other, operator):
mock_tensor = Mock()
mock_method.return_value = mock_tensor
x = keras_tensor.KerasTensor(shape=(3, 4), dtype="float32")
result = operator(x, other)
mock_method.assert_called_once_with(x, other)
self.assertEqual(result, mock_tensor)
@patch("keras.src.ops.Add.symbolic_call")
def test_radd_method(self, mock_symbolic_call):
"""Test __radd__ method"""
mock_tensor = Mock()
mock_symbolic_call.return_value = mock_tensor
x = keras_tensor.KerasTensor(shape=(3, 4), dtype="float32")
y = Mock()
result = y + x
mock_symbolic_call.assert_called_once_with(y, x)
self.assertEqual(result, mock_tensor)
@patch("keras.src.ops.Subtract.symbolic_call")
def test_rsub_method(self, mock_symbolic_call):
"""Test __rsub__ method"""
mock_tensor = Mock()
mock_symbolic_call.return_value = mock_tensor
x = keras_tensor.KerasTensor(shape=(3, 4), dtype="float32")
y = Mock()
result = y - x
mock_symbolic_call.assert_called_once_with(y, x)
self.assertEqual(result, mock_tensor)
@patch("keras.src.ops.Multiply.symbolic_call")
def test_rmul_method(self, mock_symbolic_call):
"""Test __rmul__ method"""
mock_tensor = Mock()
mock_symbolic_call.return_value = mock_tensor
x = keras_tensor.KerasTensor(shape=(3, 4), dtype="float32")
y = Mock()
result = y * x
mock_symbolic_call.assert_called_once_with(y, x)
self.assertEqual(result, mock_tensor)
@patch("keras.src.ops.Matmul.symbolic_call")
def test_rmatmul_method(self, mock_symbolic_call):
"""Test __rmatmul__ method"""
mock_tensor = Mock()
mock_symbolic_call.return_value = mock_tensor
x = keras_tensor.KerasTensor(shape=(3, 4), dtype="float32")
y = Mock()
result = y @ x
mock_symbolic_call.assert_called_once_with(y, x)
self.assertEqual(result, mock_tensor)
@patch("keras.src.ops.Power.symbolic_call")
def test_rpow_method(self, mock_symbolic_call):
"""Test __rpow__ method"""
mock_tensor = Mock()
mock_symbolic_call.return_value = mock_tensor
x = keras_tensor.KerasTensor(shape=(3, 4), dtype="float32")
y = Mock()
result = y**x
mock_symbolic_call.assert_called_once_with(y, x)
self.assertEqual(result, mock_tensor)
@patch("keras.src.ops.FloorDivide.symbolic_call")
def test_floordiv_method(self, mock_symbolic_call):
"""Test __floordiv__ method"""
mock_tensor = Mock()
mock_symbolic_call.return_value = mock_tensor
x = keras_tensor.KerasTensor(shape=(3, 4), dtype="float32")
y = Mock()
result = x // y
mock_symbolic_call.assert_called_once_with(x, y)
self.assertEqual(result, mock_tensor)
@patch("keras.src.ops.FloorDivide.symbolic_call")
def test_rfloordiv_method(self, mock_symbolic_call):
"""Test __rfloordiv__ method"""
mock_tensor = Mock()
mock_symbolic_call.return_value = mock_tensor
x = keras_tensor.KerasTensor(shape=(3, 4), dtype="float32")
y = Mock()
result = y // x
mock_symbolic_call.assert_called_once_with(y, x)
self.assertEqual(result, mock_tensor)
@patch("keras.src.ops.Mod.symbolic_call")
def test_rmod_method(self, mock_symbolic_call):
"""Test __rmod__ method"""
mock_tensor = Mock()
mock_symbolic_call.return_value = mock_tensor
x = keras_tensor.KerasTensor(shape=(3, 4), dtype="float32")
y = Mock()
result = y % x
mock_symbolic_call.assert_called_once_with(y, x)
self.assertEqual(result, mock_tensor)
@patch("keras.src.ops.LessEqual.symbolic_call")
def test_le_method(self, mock_symbolic_call):
"""Test __le__ method"""
mock_tensor = Mock()
mock_symbolic_call.return_value = mock_tensor
x = keras_tensor.KerasTensor(shape=(3, 4), dtype="float32")
y = Mock()
result = x <= y
mock_symbolic_call.assert_called_once_with(x, y)
self.assertEqual(result, mock_tensor)
@patch("keras.src.ops.Greater.symbolic_call")
def test_gt_method(self, mock_symbolic_call):
"""Test __gt__ method"""
mock_tensor = Mock()
mock_symbolic_call.return_value = mock_tensor
x = keras_tensor.KerasTensor(shape=(3, 4), dtype="float32")
y = Mock()
result = x > y
mock_symbolic_call.assert_called_once_with(x, y)
self.assertEqual(result, mock_tensor)
@patch("keras.src.ops.GreaterEqual.symbolic_call")
def test_ge_method(self, mock_symbolic_call):
"""Test __ge__ method"""
mock_tensor = Mock()
mock_symbolic_call.return_value = mock_tensor
x = keras_tensor.KerasTensor(shape=(3, 4), dtype="float32")
y = Mock()
result = x >= y
mock_symbolic_call.assert_called_once_with(x, y)
self.assertEqual(result, mock_tensor)
@patch("keras.src.ops.NotEqual.symbolic_call")
def test_ne_method(self, mock_symbolic_call):
"""Test __ne__ method"""
mock_tensor = Mock()
mock_symbolic_call.return_value = mock_tensor
x = keras_tensor.KerasTensor(shape=(3, 4), dtype="float32")
y = Mock()
result = x != y
mock_symbolic_call.assert_called_once_with(x, y)
self.assertEqual(result, mock_tensor)
@patch("keras.src.ops.LogicalAnd.symbolic_call")
def test_rand_method(self, mock_symbolic_call):
"""Test __rand__ method"""
mock_tensor = Mock()
mock_symbolic_call.return_value = mock_tensor
x = keras_tensor.KerasTensor(shape=(3, 4), dtype="bool")
y = Mock()
result = y & x
mock_symbolic_call.assert_called_once_with(y, x)
self.assertEqual(result, mock_tensor)
@patch("keras.src.ops.LogicalOr.symbolic_call")
def test_ror_method(self, mock_symbolic_call):
"""Test __ror__ method"""
mock_tensor = Mock()
mock_symbolic_call.return_value = mock_tensor
x = keras_tensor.KerasTensor(shape=(3, 4), dtype="bool")
y = Mock()
result = y | x
mock_symbolic_call.assert_called_once_with(y, x)
self.assertEqual(result, mock_tensor)
@patch("keras.src.ops.LogicalNot.symbolic_call")
def test_invert_method(self, mock_symbolic_call):
"""Test __invert__ method"""
mock_tensor = Mock()
mock_symbolic_call.return_value = mock_tensor
x = keras_tensor.KerasTensor(shape=(3, 4), dtype="bool")
result = ~x
mock_symbolic_call.assert_called_once_with(x)
self.assertEqual(result, mock_tensor)
@patch("keras.src.ops.LogicalXor.symbolic_call")
def test_xor_method(self, mock_symbolic_call):
"""Test __xor__ method"""
mock_tensor = Mock()
mock_symbolic_call.return_value = mock_tensor
x = keras_tensor.KerasTensor(shape=(3, 4), dtype="bool")
y = Mock()
result = x ^ y
mock_symbolic_call.assert_called_once_with(x, y)
self.assertEqual(result, mock_tensor)
@patch("keras.src.ops.LogicalXor.symbolic_call")
def test_rxor_method(self, mock_symbolic_call):
"""Test __rxor__ method"""
mock_tensor = Mock()
mock_symbolic_call.return_value = mock_tensor
x = keras_tensor.KerasTensor(shape=(3, 4), dtype="bool")
y = Mock()
result = y ^ x
mock_symbolic_call.assert_called_once_with(y, x)
self.assertEqual(result, mock_tensor)
@patch("keras.src.ops.TrueDivide.symbolic_call")
def test_truediv_method(self, mock_symbolic_call):
"""Test __truediv__ method"""
mock_tensor = Mock()
mock_symbolic_call.return_value = mock_tensor
x = keras_tensor.KerasTensor(shape=(3, 4), dtype="float32")
y = Mock()
result = x / y
mock_symbolic_call.assert_called_once_with(x, y)
self.assertEqual(result, mock_tensor)
@patch("keras.src.ops.TrueDivide.symbolic_call")
def test_rtruediv_method(self, mock_symbolic_call):
"""Test __rtruediv__ method"""
mock_tensor = Mock()
mock_symbolic_call.return_value = mock_tensor
x = keras_tensor.KerasTensor(shape=(3, 4), dtype="float32")
y = Mock()
result = y / x
mock_symbolic_call.assert_called_once_with(y, x)
self.assertEqual(result, mock_tensor)
@patch("keras.src.ops.Divide.symbolic_call")
def test_div_method(self, mock_symbolic_call):
"""Test __div__ method"""
mock_tensor = Mock()
mock_symbolic_call.return_value = mock_tensor
x = keras_tensor.KerasTensor(shape=(3, 4), dtype="float32")
y = keras_tensor.KerasTensor(shape=(3, 4), dtype="float32")
# to ensure compatibility across Python versions
result = x.__div__(y)
mock_symbolic_call.assert_called_once_with(x, y)
self.assertEqual(result, mock_tensor)
@patch("keras.src.ops.Divide.symbolic_call")
def test_rdiv_method(self, mock_symbolic_call):
"""Test __rdiv__ method"""
mock_tensor = Mock()
mock_symbolic_call.return_value = mock_tensor
x = keras_tensor.KerasTensor(shape=(3, 4), dtype="float32")
y = keras_tensor.KerasTensor(shape=(3, 4), dtype="float32")
# to ensure compatibility across Python versions
result = x.__rdiv__(y)
mock_symbolic_call.assert_called_once_with(y, x)
self.assertEqual(result, mock_tensor)
| KerasTensorTest |
python | pydantic__pydantic | pydantic/v1/networks.py | {
"start": 4554,
"end": 11890
} | class ____(str):
strip_whitespace = True
min_length = 1
max_length = 2**16
allowed_schemes: Optional[Collection[str]] = None
tld_required: bool = False
user_required: bool = False
host_required: bool = True
hidden_parts: Set[str] = set()
__slots__ = ('scheme', 'user', 'password', 'host', 'tld', 'host_type', 'port', 'path', 'query', 'fragment')
@no_type_check
def __new__(cls, url: Optional[str], **kwargs) -> object:
return str.__new__(cls, cls.build(**kwargs) if url is None else url)
def __init__(
self,
url: str,
*,
scheme: str,
user: Optional[str] = None,
password: Optional[str] = None,
host: Optional[str] = None,
tld: Optional[str] = None,
host_type: str = 'domain',
port: Optional[str] = None,
path: Optional[str] = None,
query: Optional[str] = None,
fragment: Optional[str] = None,
) -> None:
str.__init__(url)
self.scheme = scheme
self.user = user
self.password = password
self.host = host
self.tld = tld
self.host_type = host_type
self.port = port
self.path = path
self.query = query
self.fragment = fragment
@classmethod
def build(
cls,
*,
scheme: str,
user: Optional[str] = None,
password: Optional[str] = None,
host: str,
port: Optional[str] = None,
path: Optional[str] = None,
query: Optional[str] = None,
fragment: Optional[str] = None,
**_kwargs: str,
) -> str:
parts = Parts(
scheme=scheme,
user=user,
password=password,
host=host,
port=port,
path=path,
query=query,
fragment=fragment,
**_kwargs, # type: ignore[misc]
)
url = scheme + '://'
if user:
url += user
if password:
url += ':' + password
if user or password:
url += '@'
url += host
if port and ('port' not in cls.hidden_parts or cls.get_default_parts(parts).get('port') != port):
url += ':' + port
if path:
url += path
if query:
url += '?' + query
if fragment:
url += '#' + fragment
return url
@classmethod
def __modify_schema__(cls, field_schema: Dict[str, Any]) -> None:
update_not_none(field_schema, minLength=cls.min_length, maxLength=cls.max_length, format='uri')
@classmethod
def __get_validators__(cls) -> 'CallableGenerator':
yield cls.validate
@classmethod
def validate(cls, value: Any, field: 'ModelField', config: 'BaseConfig') -> 'AnyUrl':
if value.__class__ == cls:
return value
value = str_validator(value)
if cls.strip_whitespace:
value = value.strip()
url: str = cast(str, constr_length_validator(value, field, config))
m = cls._match_url(url)
# the regex should always match, if it doesn't please report with details of the URL tried
assert m, 'URL regex failed unexpectedly'
original_parts = cast('Parts', m.groupdict())
parts = cls.apply_default_parts(original_parts)
parts = cls.validate_parts(parts)
if m.end() != len(url):
raise errors.UrlExtraError(extra=url[m.end() :])
return cls._build_url(m, url, parts)
@classmethod
def _build_url(cls, m: Match[str], url: str, parts: 'Parts') -> 'AnyUrl':
"""
Validate hosts and build the AnyUrl object. Split from `validate` so this method
can be altered in `MultiHostDsn`.
"""
host, tld, host_type, rebuild = cls.validate_host(parts)
return cls(
None if rebuild else url,
scheme=parts['scheme'],
user=parts['user'],
password=parts['password'],
host=host,
tld=tld,
host_type=host_type,
port=parts['port'],
path=parts['path'],
query=parts['query'],
fragment=parts['fragment'],
)
@staticmethod
def _match_url(url: str) -> Optional[Match[str]]:
return url_regex().match(url)
@staticmethod
def _validate_port(port: Optional[str]) -> None:
if port is not None and int(port) > 65_535:
raise errors.UrlPortError()
@classmethod
def validate_parts(cls, parts: 'Parts', validate_port: bool = True) -> 'Parts':
"""
A method used to validate parts of a URL.
Could be overridden to set default values for parts if missing
"""
scheme = parts['scheme']
if scheme is None:
raise errors.UrlSchemeError()
if cls.allowed_schemes and scheme.lower() not in cls.allowed_schemes:
raise errors.UrlSchemePermittedError(set(cls.allowed_schemes))
if validate_port:
cls._validate_port(parts['port'])
user = parts['user']
if cls.user_required and user is None:
raise errors.UrlUserInfoError()
return parts
@classmethod
def validate_host(cls, parts: 'Parts') -> Tuple[str, Optional[str], str, bool]:
tld, host_type, rebuild = None, None, False
for f in ('domain', 'ipv4', 'ipv6'):
host = parts[f] # type: ignore[literal-required]
if host:
host_type = f
break
if host is None:
if cls.host_required:
raise errors.UrlHostError()
elif host_type == 'domain':
is_international = False
d = ascii_domain_regex().fullmatch(host)
if d is None:
d = int_domain_regex().fullmatch(host)
if d is None:
raise errors.UrlHostError()
is_international = True
tld = d.group('tld')
if tld is None and not is_international:
d = int_domain_regex().fullmatch(host)
assert d is not None
tld = d.group('tld')
is_international = True
if tld is not None:
tld = tld[1:]
elif cls.tld_required:
raise errors.UrlHostTldError()
if is_international:
host_type = 'int_domain'
rebuild = True
host = host.encode('idna').decode('ascii')
if tld is not None:
tld = tld.encode('idna').decode('ascii')
return host, tld, host_type, rebuild # type: ignore
@staticmethod
def get_default_parts(parts: 'Parts') -> 'Parts':
return {}
@classmethod
def apply_default_parts(cls, parts: 'Parts') -> 'Parts':
for key, value in cls.get_default_parts(parts).items():
if not parts[key]: # type: ignore[literal-required]
parts[key] = value # type: ignore[literal-required]
return parts
def __repr__(self) -> str:
extra = ', '.join(f'{n}={getattr(self, n)!r}' for n in self.__slots__ if getattr(self, n) is not None)
return f'{self.__class__.__name__}({super().__repr__()}, {extra})'
| AnyUrl |
python | keras-team__keras | keras/src/layers/merging/subtract.py | {
"start": 167,
"end": 2684
} | class ____(Merge):
"""Performs elementwise subtraction.
It takes as input a list of tensors of size 2 both of the
same shape, and returns a single tensor (inputs[0] - inputs[1])
of same shape.
Examples:
>>> input_shape = (2, 3, 4)
>>> x1 = np.random.rand(*input_shape)
>>> x2 = np.random.rand(*input_shape)
>>> y = keras.layers.Subtract()([x1, x2])
Usage in a Keras model:
>>> input1 = keras.layers.Input(shape=(16,))
>>> x1 = keras.layers.Dense(8, activation='relu')(input1)
>>> input2 = keras.layers.Input(shape=(32,))
>>> x2 = keras.layers.Dense(8, activation='relu')(input2)
>>> # equivalent to `subtracted = keras.layers.subtract([x1, x2])`
>>> subtracted = keras.layers.Subtract()([x1, x2])
>>> out = keras.layers.Dense(4)(subtracted)
>>> model = keras.models.Model(inputs=[input1, input2], outputs=out)
"""
def build(self, input_shape):
super().build(input_shape)
if len(input_shape) != 2:
raise ValueError(
"A `Subtract` layer should be called on exactly 2 inputs. "
f"Received: input_shape={input_shape}"
)
def _merge_function(self, inputs):
if len(inputs) != 2:
raise ValueError(
"A `Subtract` layer should be called on exactly 2 inputs. "
f"Received: inputs={inputs}"
)
return ops.subtract(inputs[0], inputs[1])
@keras_export("keras.layers.subtract")
def subtract(inputs, **kwargs):
"""Functional interface to the `keras.layers.Subtract` layer.
Args:
inputs: A list of input tensors of size 2, each tensor of
the same shape.
**kwargs: Standard layer keyword arguments.
Returns:
A tensor as the difference of the inputs. It has the same shape
as the inputs.
Examples:
>>> input_shape = (2, 3, 4)
>>> x1 = np.random.rand(*input_shape)
>>> x2 = np.random.rand(*input_shape)
>>> y = keras.layers.subtract([x1, x2])
Usage in a Keras model:
>>> input1 = keras.layers.Input(shape=(16,))
>>> x1 = keras.layers.Dense(8, activation='relu')(input1)
>>> input2 = keras.layers.Input(shape=(32,))
>>> x2 = keras.layers.Dense(8, activation='relu')(input2)
>>> subtracted = keras.layers.subtract([x1, x2])
>>> out = keras.layers.Dense(4)(subtracted)
>>> model = keras.models.Model(inputs=[input1, input2], outputs=out)
"""
return Subtract(**kwargs)(inputs)
| Subtract |
python | scipy__scipy | scipy/linalg/tests/test_decomp_update.py | {
"start": 47761,
"end": 66231
} | class ____(BaseQRdeltas):
def generate(self, type, mode='full', p=1):
a, q, r = super().generate(type, mode)
rng = np.random.default_rng(1234)
if p == 1:
u = rng.random(q.shape[0])
v = rng.random(r.shape[1])
else:
u = rng.random((q.shape[0], p))
v = rng.random((r.shape[1], p))
if np.iscomplexobj(self.dtype.type(1)):
b = rng.random(u.shape)
u = u + 1j * b
c = rng.random(v.shape)
v = v + 1j * c
u = u.astype(self.dtype)
v = v.astype(self.dtype)
return a, q, r, u, v
def test_sqr_rank_1(self):
a, q, r, u, v = self.generate('sqr')
q1, r1 = qr_update(q, r, u, v, False)
a1 = a + np.outer(u, v.conj())
check_qr(q1, r1, a1, self.rtol, self.atol)
def test_sqr_rank_p(self):
# test ndim = 2, rank 1 updates here too
for p in [1, 2, 3, 5]:
a, q, r, u, v = self.generate('sqr', p=p)
if p == 1:
u = u.reshape(u.size, 1)
v = v.reshape(v.size, 1)
q1, r1 = qr_update(q, r, u, v, False)
a1 = a + np.dot(u, v.T.conj())
check_qr(q1, r1, a1, self.rtol, self.atol)
def test_tall_rank_1(self):
a, q, r, u, v = self.generate('tall')
q1, r1 = qr_update(q, r, u, v, False)
a1 = a + np.outer(u, v.conj())
check_qr(q1, r1, a1, self.rtol, self.atol)
def test_tall_rank_p(self):
for p in [1, 2, 3, 5]:
a, q, r, u, v = self.generate('tall', p=p)
if p == 1:
u = u.reshape(u.size, 1)
v = v.reshape(v.size, 1)
q1, r1 = qr_update(q, r, u, v, False)
a1 = a + np.dot(u, v.T.conj())
check_qr(q1, r1, a1, self.rtol, self.atol)
def test_fat_rank_1(self):
a, q, r, u, v = self.generate('fat')
q1, r1 = qr_update(q, r, u, v, False)
a1 = a + np.outer(u, v.conj())
check_qr(q1, r1, a1, self.rtol, self.atol)
def test_fat_rank_p(self):
for p in [1, 2, 3, 5]:
a, q, r, u, v = self.generate('fat', p=p)
if p == 1:
u = u.reshape(u.size, 1)
v = v.reshape(v.size, 1)
q1, r1 = qr_update(q, r, u, v, False)
a1 = a + np.dot(u, v.T.conj())
check_qr(q1, r1, a1, self.rtol, self.atol)
def test_economic_rank_1(self):
a, q, r, u, v = self.generate('tall', 'economic')
q1, r1 = qr_update(q, r, u, v, False)
a1 = a + np.outer(u, v.conj())
check_qr(q1, r1, a1, self.rtol, self.atol, False)
def test_economic_rank_p(self):
for p in [1, 2, 3, 5]:
a, q, r, u, v = self.generate('tall', 'economic', p)
if p == 1:
u = u.reshape(u.size, 1)
v = v.reshape(v.size, 1)
q1, r1 = qr_update(q, r, u, v, False)
a1 = a + np.dot(u, v.T.conj())
check_qr(q1, r1, a1, self.rtol, self.atol, False)
def test_Mx1_rank_1(self):
a, q, r, u, v = self.generate('Mx1')
q1, r1 = qr_update(q, r, u, v, False)
a1 = a + np.outer(u, v.conj())
check_qr(q1, r1, a1, self.rtol, self.atol)
def test_Mx1_rank_p(self):
# when M or N == 1, only a rank 1 update is allowed. This isn't
# fundamental limitation, but the code does not support it.
a, q, r, u, v = self.generate('Mx1', p=1)
u = u.reshape(u.size, 1)
v = v.reshape(v.size, 1)
q1, r1 = qr_update(q, r, u, v, False)
a1 = a + np.dot(u, v.T.conj())
check_qr(q1, r1, a1, self.rtol, self.atol)
def test_Mx1_economic_rank_1(self):
a, q, r, u, v = self.generate('Mx1', 'economic')
q1, r1 = qr_update(q, r, u, v, False)
a1 = a + np.outer(u, v.conj())
check_qr(q1, r1, a1, self.rtol, self.atol, False)
def test_Mx1_economic_rank_p(self):
# when M or N == 1, only a rank 1 update is allowed. This isn't
# fundamental limitation, but the code does not support it.
a, q, r, u, v = self.generate('Mx1', 'economic', p=1)
u = u.reshape(u.size, 1)
v = v.reshape(v.size, 1)
q1, r1 = qr_update(q, r, u, v, False)
a1 = a + np.dot(u, v.T.conj())
check_qr(q1, r1, a1, self.rtol, self.atol, False)
def test_1xN_rank_1(self):
a, q, r, u, v = self.generate('1xN')
q1, r1 = qr_update(q, r, u, v, False)
a1 = a + np.outer(u, v.conj())
check_qr(q1, r1, a1, self.rtol, self.atol)
def test_1xN_rank_p(self):
# when M or N == 1, only a rank 1 update is allowed. This isn't
# fundamental limitation, but the code does not support it.
a, q, r, u, v = self.generate('1xN', p=1)
u = u.reshape(u.size, 1)
v = v.reshape(v.size, 1)
q1, r1 = qr_update(q, r, u, v, False)
a1 = a + np.dot(u, v.T.conj())
check_qr(q1, r1, a1, self.rtol, self.atol)
def test_1x1_rank_1(self):
a, q, r, u, v = self.generate('1x1')
q1, r1 = qr_update(q, r, u, v, False)
a1 = a + np.outer(u, v.conj())
check_qr(q1, r1, a1, self.rtol, self.atol)
def test_1x1_rank_p(self):
# when M or N == 1, only a rank 1 update is allowed. This isn't
# fundamental limitation, but the code does not support it.
a, q, r, u, v = self.generate('1x1', p=1)
u = u.reshape(u.size, 1)
v = v.reshape(v.size, 1)
q1, r1 = qr_update(q, r, u, v, False)
a1 = a + np.dot(u, v.T.conj())
check_qr(q1, r1, a1, self.rtol, self.atol)
def test_1x1_rank_1_scalar(self):
a, q, r, u, v = self.generate('1x1')
assert_raises(ValueError, qr_update, q[0, 0], r, u, v)
assert_raises(ValueError, qr_update, q, r[0, 0], u, v)
assert_raises(ValueError, qr_update, q, r, u[0], v)
assert_raises(ValueError, qr_update, q, r, u, v[0])
def base_non_simple_strides(self, adjust_strides, mode, p, overwriteable):
assert_sqr = False if mode == 'economic' else True
for type in ['sqr', 'tall', 'fat']:
a, q0, r0, u0, v0 = self.generate(type, mode, p)
qs, rs, us, vs = adjust_strides((q0, r0, u0, v0))
if p == 1:
aup = a + np.outer(u0, v0.conj())
else:
aup = a + np.dot(u0, v0.T.conj())
# for each variable, q, r, u, v we try with it strided and
# overwrite=False. Then we try with overwrite=True, and make
# sure that if p == 1, r and v are still overwritten.
# a strided q and u must always be copied.
q = q0.copy('F')
r = r0.copy('F')
u = u0.copy('F')
v = v0.copy('C')
q1, r1 = qr_update(qs, r, u, v, False)
check_qr(q1, r1, aup, self.rtol, self.atol, assert_sqr)
q1o, r1o = qr_update(qs, r, u, v, True)
check_qr(q1o, r1o, aup, self.rtol, self.atol, assert_sqr)
if overwriteable:
assert_allclose(r1o, r, rtol=self.rtol, atol=self.atol)
assert_allclose(v, v0.conj(), rtol=self.rtol, atol=self.atol)
q = q0.copy('F')
r = r0.copy('F')
u = u0.copy('F')
v = v0.copy('C')
q2, r2 = qr_update(q, rs, u, v, False)
check_qr(q2, r2, aup, self.rtol, self.atol, assert_sqr)
q2o, r2o = qr_update(q, rs, u, v, True)
check_qr(q2o, r2o, aup, self.rtol, self.atol, assert_sqr)
if overwriteable:
assert_allclose(r2o, rs, rtol=self.rtol, atol=self.atol)
assert_allclose(v, v0.conj(), rtol=self.rtol, atol=self.atol)
q = q0.copy('F')
r = r0.copy('F')
u = u0.copy('F')
v = v0.copy('C')
q3, r3 = qr_update(q, r, us, v, False)
check_qr(q3, r3, aup, self.rtol, self.atol, assert_sqr)
q3o, r3o = qr_update(q, r, us, v, True)
check_qr(q3o, r3o, aup, self.rtol, self.atol, assert_sqr)
if overwriteable:
assert_allclose(r3o, r, rtol=self.rtol, atol=self.atol)
assert_allclose(v, v0.conj(), rtol=self.rtol, atol=self.atol)
q = q0.copy('F')
r = r0.copy('F')
u = u0.copy('F')
v = v0.copy('C')
q4, r4 = qr_update(q, r, u, vs, False)
check_qr(q4, r4, aup, self.rtol, self.atol, assert_sqr)
q4o, r4o = qr_update(q, r, u, vs, True)
check_qr(q4o, r4o, aup, self.rtol, self.atol, assert_sqr)
if overwriteable:
assert_allclose(r4o, r, rtol=self.rtol, atol=self.atol)
assert_allclose(vs, v0.conj(), rtol=self.rtol, atol=self.atol)
q = q0.copy('F')
r = r0.copy('F')
u = u0.copy('F')
v = v0.copy('C')
# since some of these were consumed above
qs, rs, us, vs = adjust_strides((q, r, u, v))
q5, r5 = qr_update(qs, rs, us, vs, False)
check_qr(q5, r5, aup, self.rtol, self.atol, assert_sqr)
q5o, r5o = qr_update(qs, rs, us, vs, True)
check_qr(q5o, r5o, aup, self.rtol, self.atol, assert_sqr)
if overwriteable:
assert_allclose(r5o, rs, rtol=self.rtol, atol=self.atol)
assert_allclose(vs, v0.conj(), rtol=self.rtol, atol=self.atol)
def test_non_unit_strides_rank_1(self):
self.base_non_simple_strides(make_strided, 'full', 1, True)
def test_non_unit_strides_economic_rank_1(self):
self.base_non_simple_strides(make_strided, 'economic', 1, True)
def test_non_unit_strides_rank_p(self):
self.base_non_simple_strides(make_strided, 'full', 3, False)
def test_non_unit_strides_economic_rank_p(self):
self.base_non_simple_strides(make_strided, 'economic', 3, False)
def test_neg_strides_rank_1(self):
self.base_non_simple_strides(negate_strides, 'full', 1, False)
def test_neg_strides_economic_rank_1(self):
self.base_non_simple_strides(negate_strides, 'economic', 1, False)
def test_neg_strides_rank_p(self):
self.base_non_simple_strides(negate_strides, 'full', 3, False)
def test_neg_strides_economic_rank_p(self):
self.base_non_simple_strides(negate_strides, 'economic', 3, False)
def test_non_itemsize_strides_rank_1(self):
self.base_non_simple_strides(nonitemsize_strides, 'full', 1, False)
def test_non_itemsize_strides_economic_rank_1(self):
self.base_non_simple_strides(nonitemsize_strides, 'economic', 1, False)
def test_non_itemsize_strides_rank_p(self):
self.base_non_simple_strides(nonitemsize_strides, 'full', 3, False)
def test_non_itemsize_strides_economic_rank_p(self):
self.base_non_simple_strides(nonitemsize_strides, 'economic', 3, False)
def test_non_native_byte_order_rank_1(self):
self.base_non_simple_strides(make_nonnative, 'full', 1, False)
def test_non_native_byte_order_economic_rank_1(self):
self.base_non_simple_strides(make_nonnative, 'economic', 1, False)
def test_non_native_byte_order_rank_p(self):
self.base_non_simple_strides(make_nonnative, 'full', 3, False)
def test_non_native_byte_order_economic_rank_p(self):
self.base_non_simple_strides(make_nonnative, 'economic', 3, False)
def test_overwrite_qruv_rank_1(self):
# Any positive strided q, r, u, and v can be overwritten for a rank 1
# update, only checking C and F contiguous.
a, q0, r0, u0, v0 = self.generate('sqr')
a1 = a + np.outer(u0, v0.conj())
q = q0.copy('F')
r = r0.copy('F')
u = u0.copy('F')
v = v0.copy('F')
# don't overwrite
q1, r1 = qr_update(q, r, u, v, False)
check_qr(q1, r1, a1, self.rtol, self.atol)
check_qr(q, r, a, self.rtol, self.atol)
q2, r2 = qr_update(q, r, u, v, True)
check_qr(q2, r2, a1, self.rtol, self.atol)
# verify the overwriting, no good way to check u and v.
assert_allclose(q2, q, rtol=self.rtol, atol=self.atol)
assert_allclose(r2, r, rtol=self.rtol, atol=self.atol)
q = q0.copy('C')
r = r0.copy('C')
u = u0.copy('C')
v = v0.copy('C')
q3, r3 = qr_update(q, r, u, v, True)
check_qr(q3, r3, a1, self.rtol, self.atol)
assert_allclose(q3, q, rtol=self.rtol, atol=self.atol)
assert_allclose(r3, r, rtol=self.rtol, atol=self.atol)
def test_overwrite_qruv_rank_1_economic(self):
# updating economic decompositions can overwrite any contiguous r,
# and positively strided r and u. V is only ever read.
# only checking C and F contiguous.
a, q0, r0, u0, v0 = self.generate('tall', 'economic')
a1 = a + np.outer(u0, v0.conj())
q = q0.copy('F')
r = r0.copy('F')
u = u0.copy('F')
v = v0.copy('F')
# don't overwrite
q1, r1 = qr_update(q, r, u, v, False)
check_qr(q1, r1, a1, self.rtol, self.atol, False)
check_qr(q, r, a, self.rtol, self.atol, False)
q2, r2 = qr_update(q, r, u, v, True)
check_qr(q2, r2, a1, self.rtol, self.atol, False)
# verify the overwriting, no good way to check u and v.
assert_allclose(q2, q, rtol=self.rtol, atol=self.atol)
assert_allclose(r2, r, rtol=self.rtol, atol=self.atol)
q = q0.copy('C')
r = r0.copy('C')
u = u0.copy('C')
v = v0.copy('C')
q3, r3 = qr_update(q, r, u, v, True)
check_qr(q3, r3, a1, self.rtol, self.atol, False)
assert_allclose(q3, q, rtol=self.rtol, atol=self.atol)
assert_allclose(r3, r, rtol=self.rtol, atol=self.atol)
def test_overwrite_qruv_rank_p(self):
# for rank p updates, q r must be F contiguous, v must be C (v.T --> F)
# and u can be C or F, but is only overwritten if Q is C and complex
a, q0, r0, u0, v0 = self.generate('sqr', p=3)
a1 = a + np.dot(u0, v0.T.conj())
q = q0.copy('F')
r = r0.copy('F')
u = u0.copy('F')
v = v0.copy('C')
# don't overwrite
q1, r1 = qr_update(q, r, u, v, False)
check_qr(q1, r1, a1, self.rtol, self.atol)
check_qr(q, r, a, self.rtol, self.atol)
q2, r2 = qr_update(q, r, u, v, True)
check_qr(q2, r2, a1, self.rtol, self.atol)
# verify the overwriting, no good way to check u and v.
assert_allclose(q2, q, rtol=self.rtol, atol=self.atol)
assert_allclose(r2, r, rtol=self.rtol, atol=self.atol)
def test_empty_inputs(self):
a, q, r, u, v = self.generate('tall')
assert_raises(ValueError, qr_update, np.array([]), r, u, v)
assert_raises(ValueError, qr_update, q, np.array([]), u, v)
assert_raises(ValueError, qr_update, q, r, np.array([]), v)
assert_raises(ValueError, qr_update, q, r, u, np.array([]))
def test_mismatched_shapes(self):
a, q, r, u, v = self.generate('tall')
assert_raises(ValueError, qr_update, q, r[1:], u, v)
assert_raises(ValueError, qr_update, q[:-2], r, u, v)
assert_raises(ValueError, qr_update, q, r, u[1:], v)
assert_raises(ValueError, qr_update, q, r, u, v[1:])
def test_unsupported_dtypes(self):
dts = ['int8', 'int16', 'int32', 'int64',
'uint8', 'uint16', 'uint32', 'uint64',
'float16', 'longdouble', 'clongdouble',
'bool']
a, q0, r0, u0, v0 = self.generate('tall')
for dtype in dts:
q = q0.real.astype(dtype)
with np.errstate(invalid="ignore"):
r = r0.real.astype(dtype)
u = u0.real.astype(dtype)
v = v0.real.astype(dtype)
assert_raises(ValueError, qr_update, q, r0, u0, v0)
assert_raises(ValueError, qr_update, q0, r, u0, v0)
assert_raises(ValueError, qr_update, q0, r0, u, v0)
assert_raises(ValueError, qr_update, q0, r0, u0, v)
def test_integer_input(self):
q = np.arange(16).reshape(4, 4)
r = q.copy() # doesn't matter
u = q[:, 0].copy()
v = r[0, :].copy()
assert_raises(ValueError, qr_update, q, r, u, v)
def test_check_finite(self):
a0, q0, r0, u0, v0 = self.generate('tall', p=3)
q = q0.copy('F')
q[1,1] = np.nan
assert_raises(ValueError, qr_update, q, r0, u0[:,0], v0[:,0])
assert_raises(ValueError, qr_update, q, r0, u0, v0)
r = r0.copy('F')
r[1,1] = np.nan
assert_raises(ValueError, qr_update, q0, r, u0[:,0], v0[:,0])
assert_raises(ValueError, qr_update, q0, r, u0, v0)
u = u0.copy('F')
u[0,0] = np.nan
assert_raises(ValueError, qr_update, q0, r0, u[:,0], v0[:,0])
assert_raises(ValueError, qr_update, q0, r0, u, v0)
v = v0.copy('F')
v[0,0] = np.nan
assert_raises(ValueError, qr_update, q0, r0, u[:,0], v[:,0])
assert_raises(ValueError, qr_update, q0, r0, u, v)
def test_economic_check_finite(self):
a0, q0, r0, u0, v0 = self.generate('tall', mode='economic', p=3)
q = q0.copy('F')
q[1,1] = np.nan
assert_raises(ValueError, qr_update, q, r0, u0[:,0], v0[:,0])
assert_raises(ValueError, qr_update, q, r0, u0, v0)
r = r0.copy('F')
r[1,1] = np.nan
assert_raises(ValueError, qr_update, q0, r, u0[:,0], v0[:,0])
assert_raises(ValueError, qr_update, q0, r, u0, v0)
u = u0.copy('F')
u[0,0] = np.nan
assert_raises(ValueError, qr_update, q0, r0, u[:,0], v0[:,0])
assert_raises(ValueError, qr_update, q0, r0, u, v0)
v = v0.copy('F')
v[0,0] = np.nan
assert_raises(ValueError, qr_update, q0, r0, u[:,0], v[:,0])
assert_raises(ValueError, qr_update, q0, r0, u, v)
def test_u_exactly_in_span_q(self):
q = np.array([[0, 0], [0, 0], [1, 0], [0, 1]], self.dtype)
r = np.array([[1, 0], [0, 1]], self.dtype)
u = np.array([0, 0, 0, -1], self.dtype)
v = np.array([1, 2], self.dtype)
q1, r1 = qr_update(q, r, u, v)
a1 = np.dot(q, r) + np.outer(u, v.conj())
check_qr(q1, r1, a1, self.rtol, self.atol, False)
| BaseQRupdate |
python | optuna__optuna | tests/storages_tests/rdb_tests/test_models.py | {
"start": 9086,
"end": 10716
} | class ____:
@staticmethod
def test_find_by_trial_and_key(session: Session) -> None:
study = StudyModel(study_id=1, study_name="test-study")
trial = TrialModel(study_id=study.study_id)
session.add(
TrialSystemAttributeModel(trial_id=trial.trial_id, key="sample-key", value_json="1")
)
session.commit()
attr = TrialSystemAttributeModel.find_by_trial_and_key(trial, "sample-key", session)
assert attr is not None
assert "1" == attr.value_json
assert TrialSystemAttributeModel.find_by_trial_and_key(trial, "not-found", session) is None
@staticmethod
def test_cascade_delete_on_trial(session: Session) -> None:
trial_id = 1
direction = StudyDirectionModel(direction=StudyDirection.MINIMIZE, objective=0)
study = StudyModel(study_id=1, study_name="test-study", directions=[direction])
trial = TrialModel(trial_id=trial_id, study_id=study.study_id, state=TrialState.COMPLETE)
trial.system_attributes.append(
TrialSystemAttributeModel(trial_id=trial_id, key="sample-key1", value_json="1")
)
trial.system_attributes.append(
TrialSystemAttributeModel(trial_id=trial_id, key="sample-key2", value_json="2")
)
study.trials.append(trial)
session.add(study)
session.commit()
assert 2 == len(TrialSystemAttributeModel.where_trial_id(trial_id, session))
session.delete(trial)
session.commit()
assert 0 == len(TrialSystemAttributeModel.where_trial_id(trial_id, session))
| TestTrialSystemAttributeModel |
python | getsentry__sentry | src/sentry/replays/_case_studies/INC_1184_consumer_backlog_from_increased_threads/report.py | {
"start": 978,
"end": 1242
} | class ____:
def __init__(self, step):
self.step = step
self.produced_count = 0
def submit(self):
self.step.submit(Message(Value(None, {}, None)))
self.produced_count += 1
def poll(self):
self.step.poll()
| Producer |
python | lepture__authlib | authlib/oauth2/rfc6749/errors.py | {
"start": 5307,
"end": 5752
} | class ____(OAuth2Error):
"""The resource owner or authorization server denied the request.
Used in authorization endpoint for "code" and "implicit". Defined in
`Section 4.1.2.1`_.
.. _`Section 4.1.2.1`: https://tools.ietf.org/html/rfc6749#section-4.1.2.1
"""
error = "access_denied"
description = "The resource owner or authorization server denied the request"
# -- below are extended errors -- #
| AccessDeniedError |
python | google__jax | tests/multiprocess_gpu_test.py | {
"start": 7404,
"end": 19296
} | class ____(jtu.JaxTestCase):
def sorted_devices(self):
devices = sorted(jax.devices(), key=lambda d: (d.id, d.host_id))
if len(devices) != 16:
raise unittest.SkipTest(
"Test assumes that it runs on 16 devices (2 nodes)")
return devices
def create_2d_non_contiguous_mesh(self):
devices = self.sorted_devices()
device_mesh = np.array([[devices[0], devices[2]],
[devices[4], devices[6]],
[devices[1], devices[3]],
[devices[5], devices[7]],
[devices[8], devices[10]],
[devices[12], devices[14]],
[devices[9], devices[11]],
[devices[13], devices[15]]])
# The mesh looks like this (the integers are process index):
# 0 2
# 4 6
# 1 3
# 5 7
# 8 10
# 12 14
# 9 11
# 13 15
assert [d.id for d in device_mesh.flat
] == [0, 2, 4, 6, 1, 3, 5, 7, 8, 10, 12, 14, 9, 11, 13, 15]
return jax.sharding.Mesh(device_mesh, ("x", "y"))
def test_gpu_multi_node_initialize_and_psum(self):
# Hookup the ENV vars expected to be set already in the SLURM environment
coordinator_address = os.environ.get("SLURM_STEP_NODELIST", None)
if coordinator_address is not None and '[' in coordinator_address:
coordinator_address = coordinator_address.split('[')[0] + \
coordinator_address.split('[')[1].split(',')[0]
num_tasks = os.environ.get("SLURM_NPROCS", None)
taskid = os.environ.get("SLURM_PROCID", None)
localid = os.environ.get("SLURM_LOCALID", None)
# fixing port since it needs to be the same for all the processes
port = "54321"
print(f"coord addr:port : {coordinator_address}:{port}\nTotal tasks: "
f"{num_tasks}\ntask id: {taskid}\nlocal id: {localid}")
self.assertEqual(
coordinator_address is None or num_tasks is None or taskid is None,
False)
# os.environ["CUDA_VISIBLE_DEVICES"] = localid #WAR for Bug:12119
jax.config.update("jax_cuda_visible_devices", localid)
jax.distributed.initialize(coordinator_address=f'{coordinator_address}:{port}',
num_processes=int(num_tasks),
process_id=int(taskid))
print(f"Total devices: {jax.device_count()}, Total tasks: {int(num_tasks)}, "
f"Devices per task: {jax.local_device_count()}")
self.assertEqual(jax.device_count(),
int(num_tasks) * jax.local_device_count())
x = jnp.ones(jax.local_device_count())
y = jax.pmap(lambda x: jax.lax.psum(x, "i"), axis_name="i")(x)
self.assertEqual(y[0], jax.device_count())
print(y)
def test_gpu_multi_node_transparent_initialize_and_psum(self):
jax.distributed.initialize()
print(f"Total devices: {jax.device_count()}, "
f"Devices per task: {jax.local_device_count()}")
self.assertEqual(jax.device_count(), int(os.environ['SLURM_NTASKS']))
self.assertEqual(jax.local_device_count(), 1)
x = jnp.ones(jax.local_device_count())
y = jax.pmap(lambda x: jax.lax.psum(x, "i"), axis_name="i")(x)
self.assertEqual(y[0], jax.device_count())
print(y)
def test_pjit_gda_multi_input_multi_output(self):
jax.distributed.initialize()
global_mesh = jtu.create_mesh((8, 2), ("x", "y"))
global_input_shape = (16, 2)
global_input_data = np.arange(
util.prod(global_input_shape)).reshape(global_input_shape)
def cb(index):
return global_input_data[index]
mesh_axes1 = jax.sharding.PartitionSpec("x", "y")
gda1 = jax.make_array_from_callback(
global_input_shape, jax.sharding.NamedSharding(global_mesh, mesh_axes1), cb)
mesh_axes2 = jax.sharding.PartitionSpec("x")
gda2 = jax.make_array_from_callback(
global_input_shape, jax.sharding.NamedSharding(global_mesh, mesh_axes2), cb)
mesh_axes3 = jax.sharding.PartitionSpec(("x", "y"))
gda3 = jax.make_array_from_callback(
global_input_shape, jax.sharding.NamedSharding(global_mesh, mesh_axes3), cb)
with jax.sharding.Mesh(global_mesh.devices, global_mesh.axis_names):
@functools.partial(
pjit.pjit,
out_shardings=(mesh_axes1, None, mesh_axes2))
def f(x, y, z):
return x @ x.T, y, z
out1, out2, out3 = f(gda1, gda2, gda3)
self.assertEqual(out1.shape, (16, 16))
self.assertEqual(out1.addressable_shards[0].data.shape, (2, 8))
expected_matrix_mul = global_input_data @ global_input_data.T
for s in out1.addressable_shards:
np.testing.assert_array_equal(np.asarray(s.data),
expected_matrix_mul[s.index])
self.assertEqual(out2.shape, (16, 2))
self.assertEqual(out2.addressable_shards[0].data.shape, (16, 2))
for s in out2.addressable_shards:
np.testing.assert_array_equal(np.asarray(s.data), global_input_data)
self.assertEqual(out3.shape, (16, 2))
self.assertEqual(out3.addressable_shards[0].data.shape, (2, 2))
for s in out3.addressable_shards:
np.testing.assert_array_equal(np.asarray(s.data),
global_input_data[s.index])
def test_pjit_gda_non_contiguous_mesh(self):
jax.distributed.initialize()
devices = self.sorted_devices()
mesh_devices = np.array(devices[0:8:2] + devices[1:8:2] + devices[8:16:2] +
devices[9:16:2])
# The device order in the below mesh is:
# [0, 2, 4, 6, 1, 3, 5, 7, 8, 10, 12, 14, 9, 11, 13, 15]
# each having the following process index:
# The process-gpu mapping is random: @sudhakarsingh27 to figure out why so
# and the data is:
# [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]
global_mesh = jax.sharding.Mesh(mesh_devices, ("x",))
global_input_shape = (16,)
mesh_axes = jax.sharding.PartitionSpec("x")
global_input_data = np.arange(
util.prod(global_input_shape)).reshape(global_input_shape)
def cb(index):
return global_input_data[index]
gda1 = jax.make_array_from_callback(
global_input_shape, jax.sharding.NamedSharding(global_mesh, mesh_axes), cb)
# device_id -> (index, replica_id)
expected_idx_rid = {
0: ((slice(0, 1),), 0),
1: ((slice(4, 5),), 0),
2: ((slice(1, 2),), 0),
3: ((slice(5, 6),), 0),
4: ((slice(2, 3),), 0),
5: ((slice(6, 7),), 0),
6: ((slice(3, 4),), 0),
7: ((slice(7, 8),), 0),
8: ((slice(8, 9),), 0),
9: ((slice(12, 13),), 0),
10: ((slice(9, 10),), 0),
11: ((slice(13, 14),), 0),
12: ((slice(10, 11),), 0),
13: ((slice(14, 15),), 0),
14: ((slice(11, 12),), 0),
15: ((slice(15, 16),), 0),
}
with jax.sharding.Mesh(global_mesh.devices, global_mesh.axis_names):
f = pjit.pjit(lambda x: x, out_shardings=mesh_axes)
out = f(gda1)
for s in out.addressable_shards:
device_id = s.device.id
expected_index = expected_idx_rid[device_id][0]
expected_replica_id = expected_idx_rid[device_id][1]
self.assertEqual(s.index, expected_index)
self.assertEqual(s.replica_id, expected_replica_id)
self.assertEqual(s.data.shape, (1,))
np.testing.assert_array_equal(np.asarray(s.data),
global_input_data[expected_index])
def test_pjit_gda_non_contiguous_mesh_2d(self):
jax.distributed.initialize()
global_mesh = self.create_2d_non_contiguous_mesh()
global_input_shape = (16, 2)
mesh_axes = jax.sharding.PartitionSpec("x", "y")
global_input_data = np.arange(
util.prod(global_input_shape)).reshape(global_input_shape)
def cb(index):
return global_input_data[index]
gda1 = jax.make_array_from_callback(
global_input_shape, jax.sharding.NamedSharding(global_mesh, mesh_axes), cb)
# device_id -> (index, replica_id)
expected_idx_rid = {
0: ((slice(0, 2), slice(0, 1)), 0),
1: ((slice(4, 6), slice(0, 1)), 0),
2: ((slice(0, 2), slice(1, 2)), 0),
3: ((slice(4, 6), slice(1, 2)), 0),
4: ((slice(2, 4), slice(0, 1)), 0),
5: ((slice(6, 8), slice(0, 1)), 0),
6: ((slice(2, 4), slice(1, 2)), 0),
7: ((slice(6, 8), slice(1, 2)), 0),
8: ((slice(8, 10), slice(0, 1)), 0),
9: ((slice(12, 14), slice(0, 1)), 0),
10: ((slice(8, 10), slice(1, 2)), 0),
11: ((slice(12, 14), slice(1, 2)), 0),
12: ((slice(10, 12), slice(0, 1)), 0),
13: ((slice(14, 16), slice(0, 1)), 0),
14: ((slice(10, 12), slice(1, 2)), 0),
15: ((slice(14, 16), slice(1, 2)), 0),
}
with global_mesh:
f = pjit.pjit(lambda x: x, out_shardings=mesh_axes)
out = f(gda1)
for s in out.addressable_shards:
device_id = s.device.id
expected_index = expected_idx_rid[device_id][0]
expected_replica_id = expected_idx_rid[device_id][1]
self.assertEqual(s.index, expected_index)
self.assertEqual(s.replica_id, expected_replica_id)
self.assertEqual(s.data.shape, (2, 1))
np.testing.assert_array_equal(np.asarray(s.data),
global_input_data[expected_index])
with global_mesh:
f = pjit.pjit(
lambda x: x,
in_shardings=jax.sharding.PartitionSpec(None),
out_shardings=mesh_axes,
)
# Fully replicated values allows a non-contiguous mesh.
out = f(global_input_data)
with global_mesh:
f = pjit.pjit(lambda x: x, in_shardings=None, out_shardings=mesh_axes)
# Fully replicated values allows a non-contiguous mesh.
out = f(global_input_data)
gda2 = jax.make_array_from_callback(
global_input_shape, jax.sharding.NamedSharding(global_mesh, jax.sharding.PartitionSpec(None)), cb)
with global_mesh:
f = pjit.pjit(
lambda x, y: (x, y),
in_shardings=(None, None),
out_shardings=(mesh_axes, mesh_axes),
)
# Fully replicated values + GDA allows a non-contiguous mesh.
out1, out2 = f(global_input_data, gda2)
def test_pjit_gda_non_contiguous_mesh_2d_aot(self):
jax.distributed.initialize()
global_mesh = self.create_2d_non_contiguous_mesh()
global_input_shape = (8, 2)
mesh_axes = jax.sharding.PartitionSpec("x", "y")
global_input_data = np.arange(
util.prod(global_input_shape)).reshape(global_input_shape)
gda1 = jax.make_array_from_callback(
global_input_shape, jax.sharding.NamedSharding(global_mesh, mesh_axes),
lambda idx: global_input_data[idx])
with global_mesh:
f = pjit.pjit(
lambda x, y: (x, y),
in_shardings=jax.sharding.PartitionSpec("x", "y"),
out_shardings=jax.sharding.PartitionSpec("x", "y"),
)
inp_aval = core.ShapedArray((8, 2), jnp.int32)
# `ShapedArray` is considered global when lowered and compiled.
# Hence it can bypass the contiguous mesh restriction.
compiled = f.lower(inp_aval, gda1).compile()
out1, out2 = compiled(gda1, gda1)
self.assertEqual(out1.shape, (8, 2))
self.assertEqual(out2.shape, (8, 2))
def test_pjit_gda_eval_shape(self):
jax.distributed.initialize()
with jtu.create_mesh((16,), ("x")):
@functools.partial(pjit.pjit,
in_shardings=jax.sharding.PartitionSpec(None),
out_shardings=jax.sharding.PartitionSpec("x"))
def f():
return jnp.zeros([32, 10])
self.assertEqual(f().shape, (32, 10))
self.assertEqual(jax.eval_shape(f).shape, (32, 10))
if __name__ == "__main__":
absltest.main(testLoader=jtu.JaxTestLoader())
| SlurmMultiNodeGpuTest |
python | eventlet__eventlet | eventlet/dagpool.py | {
"start": 273,
"end": 583
} | class ____(Exception):
"""
DAGPool raises Collision when you try to launch two greenthreads with the
same key, or post() a result for a key corresponding to a greenthread, or
post() twice for the same key. As with KeyError, str(collision) names the
key in question.
"""
pass
| Collision |
python | spack__spack | lib/spack/spack/relocate_text.py | {
"start": 1935,
"end": 3726
} | class ____:
"""Base class for applying a prefix to prefix map to a list of binaries or text files. Derived
classes implement _apply_to_file to do the actual work, which is different when it comes to
binaries and text files."""
def __init__(self, prefix_to_prefix: Dict[bytes, bytes]) -> None:
"""
Arguments:
prefix_to_prefix: An ordered mapping from prefix to prefix. The order is relevant to
support substring fallbacks, for example
``[("/first/sub", "/x"), ("/first", "/y")]`` will ensure /first/sub is matched and
replaced before /first.
"""
self.prefix_to_prefix = filter_identity_mappings(prefix_to_prefix)
@property
def is_noop(self) -> bool:
"""Returns true when the prefix to prefix map
is mapping everything to the same location (identity)
or there are no prefixes to replace."""
return not self.prefix_to_prefix
def apply(self, filenames: Iterable[str]) -> List[str]:
"""Returns a list of files that were modified"""
changed_files = []
if self.is_noop:
return []
for filename in filenames:
if self.apply_to_filename(filename):
changed_files.append(filename)
return changed_files
def apply_to_filename(self, filename: str) -> bool:
if self.is_noop:
return False
with open(filename, "rb+") as f:
return self.apply_to_file(f)
def apply_to_file(self, f: IO[bytes]) -> bool:
if self.is_noop:
return False
return self._apply_to_file(f)
def _apply_to_file(self, f: IO) -> bool:
raise NotImplementedError("Derived classes must implement this method")
| PrefixReplacer |
python | pytorch__pytorch | test/quantization/jit/test_ondevice_quantization.py | {
"start": 935,
"end": 1481
} | class ____(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.conv = torch.nn.Conv2d(3, 5, 3)
weight = torch.nn.Parameter(torch.ones(5, 5))
self.weight1 = torch.nn.Parameter(torch.ones(5, 5))
self.mymod = myMod(weight)
def forward(self, x):
conv_output = self.conv(x)
y = self.mymod(conv_output)
z = torch.nn.functional.linear(y, self.weight1)
return z
def get_example_inputs(self):
return (torch.rand(1, 3, 12, 7),)
| MyConvLinearModule |
python | xlwings__xlwings | xlwings/constants.py | {
"start": 115587,
"end": 115715
} | class ____:
xlSortColumns = 1 # from enum XlSortOrientation
xlSortRows = 2 # from enum XlSortOrientation
| SortOrientation |
python | encode__django-rest-framework | tests/test_api_client.py | {
"start": 4865,
"end": 5130
} | class ____(APIView):
parser_classes = [FileUploadParser]
def post(self, request):
return Response({
'method': request.method,
'files': _get_files(request),
'content_type': request.content_type
})
| UploadView |
python | mitmproxy__pdoc | test/testdata/top_level_reimports/_internal.py | {
"start": 0,
"end": 44
} | class ____:
class FooSub:
pass
| Foo |
python | PrefectHQ__prefect | src/prefect/exceptions.py | {
"start": 8115,
"end": 8248
} | class ____(BaseException):
"""
Base type for signal-like exceptions that should never be caught by users.
"""
| PrefectSignal |
python | pytorch__pytorch | test/distributed/fsdp/test_fsdp_tp_integration.py | {
"start": 2412,
"end": 17714
} | class ____(FSDPTest):
def _get_params_and_sharding_info(
self,
model: SimpleModel,
sharded_param_names: list[str],
tensor_parallel_size: int,
) -> tuple[dict[str, int], dict[str, tuple[torch.Size, int]]]:
""" """
assert type(model) is SimpleModel, (
"Expects a `SimpleModel` since the sharding cases on the model definition"
)
param_name_to_numel = OrderedDict()
param_name_to_sharding_info = OrderedDict()
for param_name, param in model.named_parameters():
if param_name not in sharded_param_names:
param_name_to_numel[param_name] = param.numel()
else:
param_name_to_numel[param_name] = param.numel() // tensor_parallel_size
param_name_to_sharding_info[param_name] = (
param.size(),
0 if "net1" in param_name else 1,
)
return param_name_to_numel, param_name_to_sharding_info
def _get_sub_pgs(self, tensor_parallel_size: int):
"""
Generates TP and FSDP subprocess groups. ``tensor_parallel_size`` gives
the TP process group size.
For example, if the global world size is 8 and the tensor parallel size
is 2, then this creates:
- 4 TP subprocess groups: [0, 1], [2, 3], [4, 5], [6, 7]
- 2 FSDP subprocess groups: [0, 2, 4, 6], [1, 3, 5, 7]
"""
# 2-D mesh is [dp, tp]
twod_mesh = DeviceMesh(
device_type=device_type,
mesh=torch.arange(0, self.world_size).view(-1, tensor_parallel_size),
)
fsdp_pg = twod_mesh.get_group(mesh_dim=0)
tp_pg = twod_mesh.get_group(mesh_dim=1)
return twod_mesh, fsdp_pg, tp_pg
def _sync_tp_grads(
self,
tp_fsdp_model: FSDP,
tp_pg: dist.ProcessGroup,
param_name_to_numel: dict[str, int],
non_sharded_param_names: list[str],
) -> None:
"""
Syncs the tensor parallel parameters' gradients following the data
parallel paradigm where gradients are averaged over ranks (in this
case, the ones in the tensor parallel process group).
"""
tp_world_size = tp_pg.size()
fsdp_world_size = self.world_size // tp_world_size
assert (
type(tp_fsdp_model) is FSDP
and len([m for m in tp_fsdp_model.modules() if type(m) is FSDP]) == 1
), (
"The following logic assumes a single top-level-only FSDP wrapping "
"the model with TP already applied"
)
for flat_param in tp_fsdp_model.params:
splits = tuple(param_name_to_numel.values())
# Create a mask over the gradient elements to manually reduce
unsharded_size = torch.Size([flat_param.numel() * fsdp_world_size])
unsharded_zeros = torch.zeros(unsharded_size, device=flat_param.device)
per_param_masks = unsharded_zeros.split(splits)
for param_idx, param_name in enumerate(
param_name_to_numel.keys()
): # assumes fixed order
if param_name not in non_sharded_param_names:
per_param_masks[param_idx][:] = 1
unsharded_mask = (
torch.cat(per_param_masks).contiguous().type(torch.BoolTensor)
)
sharded_mask = unsharded_mask.chunk(fsdp_world_size)[
self.rank // tp_world_size
]
grad_device = flat_param.grad.device
grad = flat_param.grad.detach().clone().to(self.rank)
dist.all_reduce(grad, op=dist.ReduceOp.SUM, group=tp_pg)
grad = grad.to(grad_device)
flat_param.grad[~sharded_mask] = grad[~sharded_mask]
# Average *all* gradient elements to match the FSDP only semantics
flat_param.grad /= tp_world_size
def _get_grads_as_flattened(
self,
model: FSDP,
uses_tp: bool,
param_name_to_numel: dict[str, int],
param_name_to_sharding_info: dict[str, tuple[torch.Size, int]],
tp_pg: Optional[dist.ProcessGroup],
fsdp_pg: Optional[dist.ProcessGroup],
sharded_param_names: Optional[list[str]],
) -> torch.Tensor:
"""
Returns all unsharded gradients as a single flattened tensor. This
returns the same value on all ranks.
"""
local_grads_as_flattened = (
torch.cat(
[
(
torch.flatten(param.grad)
if param.grad is not None
else torch.zeros_like(torch.flatten(param))
)
for param in model.parameters()
]
)
.contiguous()
.to(self.rank)
)
all_grads_as_flattened = torch.cat(
[torch.empty_like(local_grads_as_flattened) for _ in range(fsdp_pg.size())]
).contiguous()
dist.all_gather_into_tensor(
all_grads_as_flattened, local_grads_as_flattened, group=fsdp_pg
)
if not uses_tp:
return all_grads_as_flattened
splits = tuple(param_name_to_numel.values())
all_grads_per_param = list(all_grads_as_flattened.split(splits))
for param_idx, param_name in enumerate(
param_name_to_numel.keys()
): # assumes fixed order
if param_name in sharded_param_names:
local_tensor_size = list(param_name_to_sharding_info[param_name][0])
sharding_dim = param_name_to_sharding_info[param_name][1]
local_tensor_size[sharding_dim] //= tp_pg.size()
local_tensor = all_grads_per_param[param_idx].view(*local_tensor_size)
local_tensors = [
torch.empty_like(local_tensor) for _ in range(tp_pg.size())
]
dist.all_gather(local_tensors, local_tensor, group=tp_pg)
all_grads_per_param[param_idx] = torch.cat(
local_tensors, dim=sharding_dim
).reshape(-1)
return torch.cat(all_grads_per_param).contiguous()
@skip_if_lt_x_gpu(4)
def test_fsdp_tp_integration(self):
self.run_subtests(
{
"cpu_offload": [
CPUOffload(offload_params=False),
CPUOffload(offload_params=True),
],
"sharding_strategy": [None, ShardingStrategy.SHARD_GRAD_OP],
"use_orig_params": [False, True],
},
self._test_fsdp_tp_integration,
)
def _test_fsdp_tp_integration(
self, cpu_offload, sharding_strategy, use_orig_params
):
"""
Tests training for TP + FSDP integration by comparing an FSDP-only
model with a TP + FSDP model.
"""
tensor_parallel_size = 2
LR = 3e-5
torch.manual_seed(0)
model = SimpleModel().to(self.rank)
tp_fsdp_model = copy.deepcopy(model)
sharded_param_names = SimpleModel.get_sharded_param_names()
non_sharded_param_names = SimpleModel.get_non_sharded_param_names()
(
param_name_to_numel,
param_name_to_sharding_info,
) = self._get_params_and_sharding_info(
model,
sharded_param_names,
tensor_parallel_size,
)
input_seed = self.rank
torch.manual_seed(input_seed + 1)
inp_size = [2, 3, 5]
inp = torch.rand(*inp_size).to(self.rank)
self.assertEqual(model(inp), tp_fsdp_model(inp)) # sanity check
mesh_1d = init_device_mesh(device_type, (self.world_size,))
fsdp_model = FSDP(
model,
cpu_offload=cpu_offload,
device_mesh=mesh_1d,
sharding_strategy=sharding_strategy,
use_orig_params=use_orig_params,
)
mesh_2d = init_device_mesh(
device_type,
(self.world_size // tensor_parallel_size, tensor_parallel_size),
mesh_dim_names=["dp", "tp"],
)
# Shard with TP and then wrap with FSDP
sequence_parallelize_plan = {
"net1": ColwiseParallel(input_layouts=Shard(0)),
"net2": RowwiseParallel(output_layouts=Shard(0)),
}
tp_fsdp_model = parallelize_module(
tp_fsdp_model,
mesh_2d["tp"],
sequence_parallelize_plan,
)
tp_pg = mesh_2d["tp"].get_group(mesh_dim=0)
assert isinstance(tp_fsdp_model.net1.weight, DTensor)
assert isinstance(tp_fsdp_model.net2.weight, DTensor)
tp_fsdp_model = FSDP(
tp_fsdp_model,
cpu_offload=cpu_offload,
device_mesh=mesh_2d["dp"],
sharding_strategy=sharding_strategy,
use_orig_params=use_orig_params,
)
fsdp_pg = mesh_2d["dp"].get_group(mesh_dim=0)
# Check the forward by checking output equality
fsdp_out = fsdp_model(inp)
tp_fsdp_out = tp_fsdp_model(inp)
self.assertEqual(fsdp_out, tp_fsdp_out)
# Check the backward by checking gradient equality
fsdp_out.sum().backward()
tp_fsdp_out.sum().backward()
self._sync_tp_grads(
tp_fsdp_model,
tp_pg,
param_name_to_numel,
non_sharded_param_names,
)
model_grads = self._get_grads_as_flattened(
fsdp_model,
False,
param_name_to_numel,
param_name_to_sharding_info,
None,
self.process_group,
None,
)
model_tp_grads = self._get_grads_as_flattened(
tp_fsdp_model,
True,
param_name_to_numel,
param_name_to_sharding_info,
tp_pg,
fsdp_pg,
sharded_param_names,
)
self.assertEqual(model_grads, model_tp_grads)
# Check the optimizer step by performing a second forward pass
fsdp_optim = torch.optim.SGD(fsdp_model.parameters(), lr=LR)
tp_fsdp_optim = torch.optim.SGD(tp_fsdp_model.parameters(), lr=LR)
fsdp_optim.step()
tp_fsdp_optim.step()
torch.manual_seed(input_seed + 16)
inp = torch.rand(*inp_size).to(self.rank)
fsdp_out = fsdp_model(inp)
tp_fsdp_out = tp_fsdp_model(inp)
self.assertEqual(fsdp_out, tp_fsdp_out)
@skip_if_lt_x_gpu(4)
def test_fsdp_tp_extension_grad(self):
"""
Tests TP + FSDP extension with correct gradient (i.e. no ACT)
"""
mesh_2d = init_device_mesh(
device_type, (self.world_size // 2, 2), mesh_dim_names=["dp", "tp"]
)
class TestModel(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.mlp = MLPModule(device_type)
self.mlp_norm = RMSNormPython(10)
def forward(self, x):
return self.mlp(self.mlp_norm(x))
model = TestModel().to(self.rank)
# Shard with TP and test gradient
tp_mesh = mesh_2d["tp"]
tp_model = parallelize_module(
model,
tp_mesh,
{
"mlp.net1": ColwiseParallel(input_layouts=Shard(0)),
"mlp.net2": RowwiseParallel(output_layouts=Shard(0)),
},
)
distribute_rmsnorm(tp_model.mlp_norm, tp_mesh)
fsdp_2d_model = FSDP(tp_model, device_mesh=mesh_2d["dp"])
comm_mode = CommDebugMode()
with comm_mode:
fsdp_2d_model(torch.rand(2, 10).to(self.rank)).sum().backward()
funcol = torch.ops.c10d_functional
c10d_ops = torch.ops.c10d
comm_counts = comm_mode.get_comm_counts()
self.assertEqual(comm_mode.get_total_counts(), 7)
# TP comms
self.assertEqual(comm_counts[funcol.reduce_scatter_tensor], 2)
self.assertEqual(comm_counts[funcol.all_gather_into_tensor], 2)
self.assertEqual(comm_counts[funcol.all_reduce], 1)
# FSDP comms
self.assertEqual(comm_counts[c10d_ops._allgather_base_], 1)
self.assertEqual(comm_counts[c10d_ops._reduce_scatter_base_], 1)
grads = [p.grad for p in fsdp_2d_model.parameters() if p.grad is not None]
for grad in grads:
self.assertFalse(grad.isnan().any().item())
@skip_if_lt_x_gpu(4)
def test_fsdp_tp_sync_module_state(self):
mesh_2d = init_device_mesh(
device_type, (self.world_size // 2, 2), mesh_dim_names=["dp", "tp"]
)
tp_mesh = mesh_2d["tp"]
dp_mesh = mesh_2d["dp"]
# set random seed for each rank
torch.manual_seed(mesh_2d.get_rank())
class TestModel(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
replicated_dt = DTensor.from_local(
torch.randn(8, 8), tp_mesh, [Replicate()], run_check=False
)
replicated_buffer_dt = DTensor.from_local(
torch.randn(8, 8), tp_mesh, [Replicate()], run_check=False
)
self.param = torch.nn.Parameter(replicated_dt)
self.buf = torch.nn.Buffer(replicated_buffer_dt)
def forward(self, x):
return self.param + self.buffer + 1
model = TestModel()
def assert_local_shard_across_ranks(local_tensor, group, check_equal=True):
gathered_tensors = [
torch.empty_like(local_tensor) for _ in range(group.size())
]
dist.all_gather(gathered_tensors, local_tensor, group=group)
# on dp mesh dim local tensor does not equal
tensor_to_compare = gathered_tensors[0]
for tensor in gathered_tensors[1:]:
if check_equal:
self.assertTrue(torch.equal(tensor, tensor_to_compare))
else:
self.assertFalse(torch.equal(tensor, tensor_to_compare))
dp_group = dp_mesh.get_group()
# check on dp mesh dim param local tensor does not equal
local_param = model.param.to_local()
assert_local_shard_across_ranks(local_param, dp_group, check_equal=False)
# check on dp mesh dim buffer local tensor does not equal
local_buf = model.buf.to_local()
assert_local_shard_across_ranks(local_buf, dp_group, check_equal=False)
# wrap with fsdp sync param should sync dp mesh dim
fsdp_mod = FSDP(model, device_mesh=dp_mesh, sync_module_states=True)
with fsdp_mod.summon_full_params(fsdp_mod):
# on dp mesh dim local param does equal after sync_module_states
local_param = fsdp_mod.param.to_local()
assert_local_shard_across_ranks(local_param, dp_group, check_equal=True)
# on dp mesh dim local buf does equal after sync_module_states
local_buf = fsdp_mod.buf.to_local()
assert_local_shard_across_ranks(local_buf, dp_group, check_equal=True)
instantiate_parametrized_tests(TestTPFSDPIntegration)
if __name__ == "__main__":
run_tests()
| TestTPFSDPIntegration |
python | readthedocs__readthedocs.org | readthedocs/aws/tests/test_security_token_service.py | {
"start": 822,
"end": 10979
} | class ____(TestCase):
def setUp(self):
self.user = get(User)
self.project = get(
Project,
slug="project",
users=[self.user],
)
self.version = self.project.versions.first()
self.build = get(
Build,
version=self.version,
project=self.project,
)
@override_settings(USING_AWS=False, DEBUG=True)
def test_get_s3_build_media_global_credentials(self):
credentials = get_s3_build_media_scoped_credentials(build=self.build)
assert credentials == AWSS3TemporaryCredentials(
access_key_id="global_access_key_id",
secret_access_key="global_secret_access_key",
session_token=None,
region_name="us-east-1",
bucket_name="readthedocs-media",
)
@mock.patch("readthedocs.aws.security_token_service.boto3.client")
def test_get_s3_build_media_scoped_credentials(self, boto3_client):
boto3_client().assume_role.return_value = {
"Credentials": {
"AccessKeyId": "access_key_id",
"SecretAccessKey": "secret_access_key",
"SessionToken": "session_token",
}
}
credentials = get_s3_build_media_scoped_credentials(build=self.build)
assert credentials == AWSS3TemporaryCredentials(
access_key_id="access_key_id",
secret_access_key="secret_access_key",
session_token="session_token",
region_name="us-east-1",
bucket_name="readthedocs-media",
)
policy = {
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": [
"s3:GetObject",
"s3:PutObject",
"s3:DeleteObject",
],
"Resource": [
"arn:aws:s3:::readthedocs-media/html/project/latest/*",
"arn:aws:s3:::readthedocs-media/pdf/project/latest/*",
"arn:aws:s3:::readthedocs-media/epub/project/latest/*",
"arn:aws:s3:::readthedocs-media/htmlzip/project/latest/*",
"arn:aws:s3:::readthedocs-media/json/project/latest/*",
"arn:aws:s3:::readthedocs-media/diff/project/latest/*",
],
},
{
"Effect": "Allow",
"Action": ["s3:ListBucket"],
"Resource": [
"arn:aws:s3:::readthedocs-media",
],
"Condition": {
"StringLike": {
"s3:prefix": [
"html/project/latest/*",
"pdf/project/latest/*",
"epub/project/latest/*",
"htmlzip/project/latest/*",
"json/project/latest/*",
"diff/project/latest/*",
]
}
},
},
],
}
boto3_client().assume_role.assert_called_once_with(
RoleArn="arn:aws:iam::1234:role/RoleName",
RoleSessionName=f"rtd-{self.build.id}-project-latest",
Policy=json.dumps(policy),
DurationSeconds=15 * 60,
)
@mock.patch("readthedocs.aws.security_token_service.boto3.client")
def test_get_s3_build_media_scoped_credentials_external_version(self, boto3_client):
self.version.type = EXTERNAL
self.version.save()
boto3_client().assume_role.return_value = {
"Credentials": {
"AccessKeyId": "access_key_id",
"SecretAccessKey": "secret_access_key",
"SessionToken": "session_token",
}
}
credentials = get_s3_build_media_scoped_credentials(build=self.build)
assert credentials == AWSS3TemporaryCredentials(
access_key_id="access_key_id",
secret_access_key="secret_access_key",
session_token="session_token",
region_name="us-east-1",
bucket_name="readthedocs-media",
)
policy = {
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": [
"s3:GetObject",
"s3:PutObject",
"s3:DeleteObject",
],
"Resource": [
"arn:aws:s3:::readthedocs-media/external/html/project/latest/*",
"arn:aws:s3:::readthedocs-media/external/pdf/project/latest/*",
"arn:aws:s3:::readthedocs-media/external/epub/project/latest/*",
"arn:aws:s3:::readthedocs-media/external/htmlzip/project/latest/*",
"arn:aws:s3:::readthedocs-media/external/json/project/latest/*",
"arn:aws:s3:::readthedocs-media/external/diff/project/latest/*",
],
},
{
"Effect": "Allow",
"Action": ["s3:ListBucket"],
"Resource": [
"arn:aws:s3:::readthedocs-media",
],
"Condition": {
"StringLike": {
"s3:prefix": [
"external/html/project/latest/*",
"external/pdf/project/latest/*",
"external/epub/project/latest/*",
"external/htmlzip/project/latest/*",
"external/json/project/latest/*",
"external/diff/project/latest/*",
]
}
},
},
],
}
boto3_client().assume_role.assert_called_once_with(
RoleArn="arn:aws:iam::1234:role/RoleName",
RoleSessionName=f"rtd-{self.build.id}-project-latest",
Policy=json.dumps(policy),
DurationSeconds=15 * 60,
)
@override_settings(USING_AWS=False, DEBUG=True)
def test_get_s3_build_tools_global_credentials(self):
credentials = get_s3_build_tools_scoped_credentials(build=self.build)
assert credentials == AWSS3TemporaryCredentials(
access_key_id="global_access_key_id",
secret_access_key="global_secret_access_key",
session_token=None,
region_name="us-east-1",
bucket_name="readthedocs-build-tools",
)
@mock.patch("readthedocs.aws.security_token_service.boto3.client")
def test_get_s3_build_tools_scoped_credentials(self, boto3_client):
boto3_client().assume_role.return_value = {
"Credentials": {
"AccessKeyId": "access_key_id",
"SecretAccessKey": "secret_access_key",
"SessionToken": "session_token",
}
}
credentials = get_s3_build_tools_scoped_credentials(build=self.build)
assert credentials == AWSS3TemporaryCredentials(
access_key_id="access_key_id",
secret_access_key="secret_access_key",
session_token="session_token",
region_name="us-east-1",
bucket_name="readthedocs-build-tools",
)
policy = {
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": [
"s3:GetObject",
"s3:ListBucket",
],
"Resource": [
"arn:aws:s3:::readthedocs-build-tools",
"arn:aws:s3:::readthedocs-build-tools/*",
],
},
],
}
boto3_client().assume_role.assert_called_once_with(
RoleArn="arn:aws:iam::1234:role/RoleName",
RoleSessionName=f"rtd-{self.build.id}-project-latest",
Policy=json.dumps(policy),
DurationSeconds=15 * 60,
)
@mock.patch("readthedocs.aws.security_token_service.boto3.client")
def test_get_s3_build_tools_scoped_credentials_external_version(self, boto3_client):
self.version.type = EXTERNAL
self.version.save()
boto3_client().assume_role.return_value = {
"Credentials": {
"AccessKeyId": "access_key_id",
"SecretAccessKey": "secret_access_key",
"SessionToken": "session_token",
}
}
credentials = get_s3_build_tools_scoped_credentials(build=self.build)
assert credentials == AWSS3TemporaryCredentials(
access_key_id="access_key_id",
secret_access_key="secret_access_key",
session_token="session_token",
region_name="us-east-1",
bucket_name="readthedocs-build-tools",
)
policy = {
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": [
"s3:GetObject",
"s3:ListBucket",
],
"Resource": [
"arn:aws:s3:::readthedocs-build-tools",
"arn:aws:s3:::readthedocs-build-tools/*",
],
},
],
}
boto3_client().assume_role.assert_called_once_with(
RoleArn="arn:aws:iam::1234:role/RoleName",
RoleSessionName=f"rtd-{self.build.id}-project-latest",
Policy=json.dumps(policy),
DurationSeconds=15 * 60,
)
| TestSecurityTokenService |
python | apache__airflow | providers/cncf/kubernetes/tests/unit/cncf/kubernetes/hooks/test_kubernetes.py | {
"start": 2978,
"end": 33532
} | class ____:
# TODO: Potential performance issue, converted setup_class to a setup_connections function level fixture
@pytest.fixture(autouse=True)
def setup_connections(self, create_connection_without_db):
"""Create test connections for Kubernetes hook tests."""
import json
connections = [
("in_cluster", {"in_cluster": True}),
("in_cluster_empty", {"in_cluster": ""}),
("kube_config", {"kube_config": '{"test": "kube"}'}),
("kube_config_dict", {"kube_config": {"test": "kube"}}),
("kube_config_path", {"kube_config_path": "path/to/file"}),
("kube_config_empty", {"kube_config": ""}),
("kube_config_path_empty", {"kube_config_path": ""}),
("kube_config_empty", {"kube_config": ""}),
("kube_config_path_empty", {"kube_config_path": ""}),
("context_empty", {"cluster_context": ""}),
("context", {"cluster_context": "my-context"}),
("with_namespace", {"namespace": "mock_namespace"}),
("default_kube_config", {}),
("disable_verify_ssl", {"disable_verify_ssl": True}),
("disable_verify_ssl_empty", {"disable_verify_ssl": ""}),
("disable_tcp_keepalive", {"disable_tcp_keepalive": True}),
("disable_tcp_keepalive_empty", {"disable_tcp_keepalive": ""}),
("sidecar_container_image", {"xcom_sidecar_container_image": "private.repo.com/alpine:3.16"}),
("sidecar_container_image_empty", {"xcom_sidecar_container_image": ""}),
(
"sidecar_container_resources",
{
"xcom_sidecar_container_resources": json.dumps(
{
"requests": {"cpu": "1m", "memory": "10Mi"},
"limits": {"cpu": "1m", "memory": "50Mi"},
}
),
},
),
("sidecar_container_resources_empty", {"xcom_sidecar_container_resources": ""}),
]
for conn_id, extra in connections:
create_connection_without_db(
Connection(conn_type="kubernetes", conn_id=conn_id, extra=json.dumps(extra))
)
@classmethod
def teardown_class(cls) -> None:
clear_test_connections()
@pytest.mark.parametrize(
("in_cluster_param", "conn_id", "in_cluster_called"),
(
(True, None, True),
(None, None, False),
(False, None, False),
(None, "in_cluster", True),
(True, "in_cluster", True),
(False, "in_cluster", False),
(None, "in_cluster_empty", False),
(True, "in_cluster_empty", True),
(False, "in_cluster_empty", False),
),
)
@patch("kubernetes.config.kube_config.KubeConfigLoader")
@patch("kubernetes.config.kube_config.KubeConfigMerger")
@patch("kubernetes.config.incluster_config.InClusterConfigLoader")
@patch(f"{HOOK_MODULE}.KubernetesHook._get_default_client")
def test_in_cluster_connection(
self,
mock_get_default_client,
mock_in_cluster_loader,
mock_merger,
mock_loader,
in_cluster_param,
conn_id,
in_cluster_called,
):
"""
Verifies whether in_cluster is called depending on combination of hook param and connection extra.
Hook param should beat extra.
"""
kubernetes_hook = KubernetesHook(conn_id=conn_id, in_cluster=in_cluster_param)
mock_get_default_client.return_value = kubernetes.client.api_client.ApiClient()
api_conn = kubernetes_hook.get_conn()
if in_cluster_called:
mock_in_cluster_loader.assert_called_once()
mock_merger.assert_not_called()
mock_loader.assert_not_called()
else:
mock_get_default_client.assert_called()
assert isinstance(api_conn, kubernetes.client.api_client.ApiClient)
if not mock_get_default_client.called:
# get_default_client is mocked, so only check is_in_cluster if it isn't called
assert kubernetes_hook.is_in_cluster is in_cluster_called
@pytest.mark.parametrize("in_cluster_fails", [True, False])
@patch("kubernetes.config.kube_config.KubeConfigLoader")
@patch("kubernetes.config.kube_config.KubeConfigMerger")
@patch("kubernetes.config.incluster_config.InClusterConfigLoader")
def test_get_default_client(
self,
mock_incluster,
mock_merger,
mock_loader,
in_cluster_fails,
):
"""
Verifies the behavior of the ``_get_default_client`` function. It should try the "in cluster"
loader first but if that fails, try to use the default kubeconfig file.
"""
if in_cluster_fails:
mock_incluster.side_effect = ConfigException("any")
kubernetes_hook = KubernetesHook()
api_conn = kubernetes_hook._get_default_client()
if in_cluster_fails:
mock_incluster.assert_called_once()
mock_merger.assert_called_once_with(KUBE_CONFIG_PATH)
mock_loader.assert_called_once()
assert kubernetes_hook.is_in_cluster is False
else:
mock_incluster.assert_called_once()
mock_merger.assert_not_called()
mock_loader.assert_not_called()
assert kubernetes_hook.is_in_cluster is True
assert isinstance(api_conn, kubernetes.client.api_client.ApiClient)
@pytest.mark.parametrize(
("disable_verify_ssl", "conn_id", "disable_called"),
(
(True, None, True),
(None, None, False),
(False, None, False),
(None, "disable_verify_ssl", True),
(True, "disable_verify_ssl", True),
(False, "disable_verify_ssl", False),
(None, "disable_verify_ssl_empty", False),
(True, "disable_verify_ssl_empty", True),
(False, "disable_verify_ssl_empty", False),
),
)
@patch("kubernetes.config.incluster_config.InClusterConfigLoader", new=MagicMock())
@patch(f"{HOOK_MODULE}._disable_verify_ssl")
def test_disable_verify_ssl(
self,
mock_disable,
disable_verify_ssl,
conn_id,
disable_called,
):
"""
Verifies whether disable verify ssl is called depending on combination of hook param and
connection extra. Hook param should beat extra.
"""
kubernetes_hook = KubernetesHook(conn_id=conn_id, disable_verify_ssl=disable_verify_ssl)
api_conn = kubernetes_hook.get_conn()
assert mock_disable.called is disable_called
assert isinstance(api_conn, kubernetes.client.api_client.ApiClient)
@pytest.mark.parametrize(
("disable_tcp_keepalive", "conn_id", "expected"),
(
(True, None, False),
(None, None, True),
(False, None, True),
(None, "disable_tcp_keepalive", False),
(True, "disable_tcp_keepalive", False),
(False, "disable_tcp_keepalive", True),
(None, "disable_tcp_keepalive_empty", True),
(True, "disable_tcp_keepalive_empty", False),
(False, "disable_tcp_keepalive_empty", True),
),
)
@patch("kubernetes.config.incluster_config.InClusterConfigLoader", new=MagicMock())
@patch(f"{HOOK_MODULE}._enable_tcp_keepalive")
def test_disable_tcp_keepalive(
self,
mock_enable,
disable_tcp_keepalive,
conn_id,
expected,
):
"""
Verifies whether enable tcp keepalive is called depending on combination of hook
param and connection extra. Hook param should beat extra.
"""
kubernetes_hook = KubernetesHook(conn_id=conn_id, disable_tcp_keepalive=disable_tcp_keepalive)
api_conn = kubernetes_hook.get_conn()
assert mock_enable.called is expected
assert isinstance(api_conn, kubernetes.client.api_client.ApiClient)
@pytest.mark.parametrize(
("config_path_param", "conn_id", "call_path"),
(
(None, None, KUBE_CONFIG_PATH),
("/my/path/override", None, "/my/path/override"),
(None, "kube_config_path", "path/to/file"),
("/my/path/override", "kube_config_path", "/my/path/override"),
(None, "kube_config_path_empty", KUBE_CONFIG_PATH),
("/my/path/override", "kube_config_path_empty", "/my/path/override"),
),
)
@patch("kubernetes.config.kube_config.KubeConfigLoader")
@patch("kubernetes.config.kube_config.KubeConfigMerger")
def test_kube_config_path(
self, mock_kube_config_merger, mock_kube_config_loader, config_path_param, conn_id, call_path
):
"""
Verifies kube config path depending on combination of hook param and connection extra.
Hook param should beat extra.
"""
kubernetes_hook = KubernetesHook(conn_id=conn_id, config_file=config_path_param)
api_conn = kubernetes_hook.get_conn()
mock_kube_config_merger.assert_called_once_with(call_path)
mock_kube_config_loader.assert_called_once()
assert isinstance(api_conn, kubernetes.client.api_client.ApiClient)
@pytest.mark.parametrize(
("conn_id", "has_config"),
(
(None, False),
("kube_config", True),
("kube_config_dict", True),
("kube_config_empty", False),
),
)
@patch("kubernetes.config.kube_config.KubeConfigLoader")
@patch("kubernetes.config.kube_config.KubeConfigMerger")
@patch.object(tempfile, "NamedTemporaryFile")
def test_kube_config_connection(
self, mock_tempfile, mock_kube_config_merger, mock_kube_config_loader, conn_id, has_config
):
"""
Verifies whether temporary kube config file is created.
"""
mock_tempfile.return_value.__enter__.return_value.name = "fake-temp-file"
mock_kube_config_merger.return_value.config = {"fake_config": "value"}
kubernetes_hook = KubernetesHook(conn_id=conn_id)
api_conn = kubernetes_hook.get_conn()
if has_config:
mock_tempfile.is_called_once()
mock_kube_config_loader.assert_called_once()
mock_kube_config_merger.assert_called_once_with("fake-temp-file")
else:
mock_tempfile.assert_not_called()
mock_kube_config_loader.assert_called_once()
mock_kube_config_merger.assert_called_once_with(KUBE_CONFIG_PATH)
assert isinstance(api_conn, kubernetes.client.api_client.ApiClient)
@pytest.mark.parametrize(
("context_param", "conn_id", "expected_context"),
(
("param-context", None, "param-context"),
(None, None, None),
("param-context", "context", "param-context"),
(None, "context", "my-context"),
("param-context", "context_empty", "param-context"),
(None, "context_empty", None),
),
)
@patch("kubernetes.config.load_kube_config")
def test_cluster_context(self, mock_load_kube_config, context_param, conn_id, expected_context):
"""
Verifies cluster context depending on combination of hook param and connection extra.
Hook param should beat extra.
"""
kubernetes_hook = KubernetesHook(conn_id=conn_id, cluster_context=context_param)
kubernetes_hook.get_conn()
mock_load_kube_config.assert_called_with(client_configuration=None, context=expected_context)
@patch("kubernetes.config.kube_config.KubeConfigLoader")
@patch("kubernetes.config.kube_config.KubeConfigMerger")
@patch("kubernetes.config.kube_config.KUBE_CONFIG_DEFAULT_LOCATION", "/mock/config")
def test_default_kube_config_connection(self, mock_kube_config_merger, mock_kube_config_loader):
kubernetes_hook = KubernetesHook(conn_id="default_kube_config")
api_conn = kubernetes_hook.get_conn()
mock_kube_config_merger.assert_called_once_with("/mock/config")
mock_kube_config_loader.assert_called_once()
assert isinstance(api_conn, kubernetes.client.api_client.ApiClient)
@pytest.mark.parametrize(
("conn_id", "expected"),
(
pytest.param(None, None, id="no-conn-id"),
pytest.param("with_namespace", "mock_namespace", id="conn-with-namespace"),
pytest.param("default_kube_config", None, id="conn-without-namespace"),
),
)
def test_get_namespace(self, conn_id, expected):
hook = KubernetesHook(conn_id=conn_id)
assert hook.get_namespace() == expected
if get_provider_min_airflow_version("apache-airflow-providers-cncf-kubernetes") >= (6, 0):
raise DeprecationRemovalRequired(
"You must update get_namespace so that if namespace not set "
"in the connection, then None is returned. To do so, remove get_namespace "
"and rename _get_namespace to get_namespace."
)
@pytest.mark.parametrize(
("conn_id", "expected"),
(
pytest.param("sidecar_container_image", "private.repo.com/alpine:3.16", id="sidecar-with-image"),
pytest.param("sidecar_container_image_empty", None, id="sidecar-without-image"),
),
)
def test_get_xcom_sidecar_container_image(self, conn_id, expected):
hook = KubernetesHook(conn_id=conn_id)
assert hook.get_xcom_sidecar_container_image() == expected
@pytest.mark.parametrize(
("conn_id", "expected"),
(
pytest.param(
"sidecar_container_resources",
{
"requests": {"cpu": "1m", "memory": "10Mi"},
"limits": {
"cpu": "1m",
"memory": "50Mi",
},
},
id="sidecar-with-resources",
),
pytest.param("sidecar_container_resources_empty", None, id="sidecar-without-resources"),
),
)
def test_get_xcom_sidecar_container_resources(self, conn_id, expected):
hook = KubernetesHook(conn_id=conn_id)
assert hook.get_xcom_sidecar_container_resources() == expected
@patch("kubernetes.config.kube_config.KubeConfigLoader")
@patch("kubernetes.config.kube_config.KubeConfigMerger")
def test_client_types(self, mock_kube_config_merger, mock_kube_config_loader):
hook = KubernetesHook(None)
assert isinstance(hook.core_v1_client, kubernetes.client.CoreV1Api)
assert isinstance(hook.api_client, kubernetes.client.ApiClient)
assert isinstance(hook.get_conn(), kubernetes.client.ApiClient)
@patch(f"{HOOK_MODULE}.KubernetesHook._get_default_client")
def test_prefixed_names_still_work(self, mock_get_client):
conn_uri = "kubernetes://?extra__kubernetes__cluster_context=test&extra__kubernetes__namespace=test"
with mock.patch.dict("os.environ", AIRFLOW_CONN_KUBERNETES_DEFAULT=conn_uri):
kubernetes_hook = KubernetesHook(conn_id="kubernetes_default")
kubernetes_hook.get_conn()
mock_get_client.assert_called_with(cluster_context="test")
assert kubernetes_hook.get_namespace() == "test"
def test_missing_default_connection_is_ok(self, remove_default_conn, sdk_connection_not_found):
# prove to ourselves that the default conn doesn't exist
k8s_conn_exists = os.environ.get(f"AIRFLOW_CONN_{DEFAULT_CONN_ID.upper()}")
assert k8s_conn_exists is None
# verify K8sHook still works
hook = KubernetesHook()
assert hook.conn_extras == {}
# meanwhile, asking for non-default should still fail if it doesn't exist
hook = KubernetesHook("some_conn")
with pytest.raises(AirflowNotFoundException, match="The conn_id `some_conn` isn't defined"):
hook.conn_extras
@patch("kubernetes.config.kube_config.KubeConfigLoader")
@patch("kubernetes.config.kube_config.KubeConfigMerger")
@patch(f"{HOOK_MODULE}.client.CustomObjectsApi")
def test_delete_custom_object(
self, mock_custom_object_api, mock_kube_config_merger, mock_kube_config_loader
):
hook = KubernetesHook()
hook.delete_custom_object(
group="group",
version="version",
plural="plural",
name="name",
namespace="namespace",
_preload_content="_preload_content",
)
mock_custom_object_api.return_value.delete_namespaced_custom_object.assert_called_once_with(
group="group",
version="version",
plural="plural",
name="name",
namespace="namespace",
_preload_content="_preload_content",
)
@patch("kubernetes.config.kube_config.KubeConfigLoader")
@patch("kubernetes.config.kube_config.KubeConfigMerger")
@patch(f"{HOOK_MODULE}.KubernetesHook.batch_v1_client")
def test_get_job_status(self, mock_client, mock_kube_config_merger, mock_kube_config_loader):
job_expected = mock_client.read_namespaced_job_status.return_value
hook = KubernetesHook()
job_actual = hook.get_job_status(job_name=JOB_NAME, namespace=NAMESPACE)
mock_client.read_namespaced_job_status.assert_called_once_with(
name=JOB_NAME, namespace=NAMESPACE, pretty=True
)
assert job_actual == job_expected
@pytest.mark.parametrize(
("conditions", "expected_result"),
[
(None, False),
([], False),
([mock.MagicMock(type="Complete", status=True)], False),
([mock.MagicMock(type="Complete", status=False)], False),
([mock.MagicMock(type="Failed", status=False)], False),
([mock.MagicMock(type="Failed", status=True, reason="test reason 1")], "test reason 1"),
(
[
mock.MagicMock(type="Complete", status=False),
mock.MagicMock(type="Failed", status=True, reason="test reason 2"),
],
"test reason 2",
),
(
[
mock.MagicMock(type="Complete", status=True),
mock.MagicMock(type="Failed", status=True, reason="test reason 3"),
],
"test reason 3",
),
],
)
@patch("kubernetes.config.kube_config.KubeConfigLoader")
@patch("kubernetes.config.kube_config.KubeConfigMerger")
def test_is_job_failed(self, mock_merger, mock_loader, conditions, expected_result):
mock_job = mock.MagicMock()
mock_job.status.conditions = conditions
hook = KubernetesHook()
actual_result = hook.is_job_failed(mock_job)
assert actual_result == expected_result
@patch("kubernetes.config.kube_config.KubeConfigLoader")
@patch("kubernetes.config.kube_config.KubeConfigMerger")
def test_is_job_failed_no_status(self, mock_merger, mock_loader):
mock_job = mock.MagicMock()
mock_job.status = None
hook = KubernetesHook()
job_failed = hook.is_job_failed(mock_job)
assert not job_failed
@pytest.mark.parametrize(
("condition_type", "status", "expected_result"),
[
("Complete", False, False),
("Complete", True, True),
("Failed", False, False),
("Failed", True, False),
("Suspended", False, False),
("Suspended", True, False),
("Unknown", False, False),
("Unknown", True, False),
],
)
@patch("kubernetes.config.kube_config.KubeConfigLoader")
@patch("kubernetes.config.kube_config.KubeConfigMerger")
def test_is_job_successful(self, mock_merger, mock_loader, condition_type, status, expected_result):
mock_job = mock.MagicMock()
mock_job.status.conditions = [mock.MagicMock(type=condition_type, status=status)]
hook = KubernetesHook()
actual_result = hook.is_job_successful(mock_job)
assert actual_result == expected_result
@patch("kubernetes.config.kube_config.KubeConfigLoader")
@patch("kubernetes.config.kube_config.KubeConfigMerger")
def test_is_job_successful_no_status(self, mock_merger, mock_loader):
mock_job = mock.MagicMock()
mock_job.status = None
hook = KubernetesHook()
job_successful = hook.is_job_successful(mock_job)
assert not job_successful
@pytest.mark.parametrize(
("condition_type", "status", "expected_result"),
[
("Complete", False, False),
("Complete", True, True),
("Failed", False, False),
("Failed", True, True),
("Suspended", False, False),
("Suspended", True, False),
("Unknown", False, False),
("Unknown", True, False),
],
)
@patch("kubernetes.config.kube_config.KubeConfigLoader")
@patch("kubernetes.config.kube_config.KubeConfigMerger")
def test_is_job_complete(self, mock_merger, mock_loader, condition_type, status, expected_result):
mock_job = mock.MagicMock()
mock_job.status.conditions = [mock.MagicMock(type=condition_type, status=status)]
hook = KubernetesHook()
actual_result = hook.is_job_complete(mock_job)
assert actual_result == expected_result
@patch("kubernetes.config.kube_config.KubeConfigLoader")
@patch("kubernetes.config.kube_config.KubeConfigMerger")
def test_is_job_complete_no_status(self, mock_merger, mock_loader):
mock_job = mock.MagicMock()
mock_job.status = None
hook = KubernetesHook()
job_complete = hook.is_job_complete(mock_job)
assert not job_complete
@patch("kubernetes.config.kube_config.KubeConfigLoader")
@patch("kubernetes.config.kube_config.KubeConfigMerger")
@patch(f"{HOOK_MODULE}.KubernetesHook.get_job_status")
def test_wait_until_job_complete(self, mock_job_status, mock_kube_config_merger, mock_kube_config_loader):
job_expected = mock.MagicMock(
status=mock.MagicMock(
conditions=[
mock.MagicMock(type="TestType1"),
mock.MagicMock(type="TestType2"),
mock.MagicMock(type="Complete", status=True),
]
)
)
mock_job_status.side_effect = [
mock.MagicMock(status=mock.MagicMock(conditions=None)),
mock.MagicMock(status=mock.MagicMock(conditions=[mock.MagicMock(type="TestType")])),
mock.MagicMock(
status=mock.MagicMock(
conditions=[
mock.MagicMock(type="TestType1"),
mock.MagicMock(type="TestType2"),
]
)
),
mock.MagicMock(
status=mock.MagicMock(
conditions=[
mock.MagicMock(type="TestType1"),
mock.MagicMock(type="TestType2"),
mock.MagicMock(type="Complete", status=False),
]
)
),
job_expected,
]
hook = KubernetesHook()
with patch(f"{HOOK_MODULE}.sleep", return_value=None) as mock_sleep:
job_actual = hook.wait_until_job_complete(
job_name=JOB_NAME, namespace=NAMESPACE, job_poll_interval=POLL_INTERVAL
)
mock_job_status.assert_has_calls([mock.call(job_name=JOB_NAME, namespace=NAMESPACE)] * 5)
mock_sleep.assert_has_calls([mock.call(POLL_INTERVAL)] * 4)
assert job_actual == job_expected
@patch(f"{HOOK_MODULE}.json.dumps")
@patch(f"{HOOK_MODULE}.KubernetesHook.batch_v1_client")
def test_create_job_retries_on_500_error(self, mock_client, mock_json_dumps):
mock_client.create_namespaced_job.side_effect = [
ApiException(status=500),
MagicMock(),
]
hook = KubernetesHook()
hook.create_job(job=mock.MagicMock())
assert mock_client.create_namespaced_job.call_count == 2
@patch(f"{HOOK_MODULE}.json.dumps")
@patch(f"{HOOK_MODULE}.KubernetesHook.batch_v1_client")
def test_create_job_fails_on_other_exception(self, mock_client, mock_json_dumps):
mock_client.create_namespaced_job.side_effect = [ApiException(status=404)]
hook = KubernetesHook()
with pytest.raises(ApiException):
hook.create_job(job=mock.MagicMock())
@patch(f"{HOOK_MODULE}.json.dumps")
@patch(f"{HOOK_MODULE}.KubernetesHook.batch_v1_client")
def test_create_job_retries_five_times(self, mock_client, mock_json_dumps):
mock_client.create_namespaced_job.side_effect = [
ApiException(status=500),
ApiException(status=500),
ApiException(status=500),
ApiException(status=500),
ApiException(status=500),
]
hook = KubernetesHook()
with pytest.raises(ApiException):
hook.create_job(job=mock.MagicMock())
assert mock_client.create_namespaced_job.call_count == 5
@pytest.mark.parametrize(
("given_namespace", "expected_namespace"),
[
(None, "default-namespace"),
("given-namespace", "given-namespace"),
],
)
@pytest.mark.parametrize(
("given_client", "expected_client"),
[
(None, mock.MagicMock()),
(mock_client := mock.MagicMock(), mock_client), # type: ignore[name-defined]
],
)
@patch(f"{HOOK_MODULE}.utils.create_from_yaml")
@patch(f"{HOOK_MODULE}.KubernetesHook.get_namespace")
@patch(f"{HOOK_MODULE}.KubernetesHook.api_client", new_callable=PropertyMock)
def test_apply_from_yaml_file(
self,
mock_api_client,
mock_get_namespace,
mock_create_from_yaml,
given_client,
expected_client,
given_namespace,
expected_namespace,
):
initial_kwargs = dict(
api_client=given_client,
yaml_objects=mock.MagicMock(),
yaml_file=mock.MagicMock(),
verbose=mock.MagicMock(),
namespace=given_namespace,
)
expected_kwargs = dict(
k8s_client=expected_client,
yaml_objects=initial_kwargs["yaml_objects"],
yaml_file=initial_kwargs["yaml_file"],
verbose=initial_kwargs["verbose"],
namespace=expected_namespace,
)
mock_api_client.return_value = expected_client
mock_get_namespace.return_value = expected_namespace
KubernetesHook().apply_from_yaml_file(**initial_kwargs)
mock_create_from_yaml.assert_called_once_with(**expected_kwargs)
if given_client is None:
mock_api_client.assert_called_once()
if given_namespace is None:
mock_get_namespace.assert_called_once()
@mock.patch(HOOK_MODULE + ".sleep")
@mock.patch(HOOK_MODULE + ".KubernetesHook.log")
@mock.patch(HOOK_MODULE + ".KubernetesHook.get_deployment_status")
def test_check_kueue_deployment_running(self, mock_get_deployment_status, mock_log, mock_sleep):
mock_get_deployment_status.side_effect = [
NOT_READY_DEPLOYMENT,
READY_DEPLOYMENT,
]
KubernetesHook().check_kueue_deployment_running(name=DEPLOYMENT_NAME, namespace=NAMESPACE)
mock_log.info.assert_called_once_with("Waiting until Deployment will be ready...")
mock_sleep.assert_called_once_with(2.0)
@mock.patch(HOOK_MODULE + ".KubernetesHook.log")
@mock.patch(HOOK_MODULE + ".KubernetesHook.get_deployment_status")
def test_check_kueue_deployment_raise_exception(self, mock_get_deployment_status, mock_log):
mock_get_deployment_status.side_effect = ValueError
with pytest.raises(ValueError, match="Exception occurred while checking for Deployment status"):
KubernetesHook().check_kueue_deployment_running(name=DEPLOYMENT_NAME, namespace=NAMESPACE)
mock_log.exception.assert_called_once_with("Exception occurred while checking for Deployment status.")
@mock.patch(f"{HOOK_MODULE}.yaml")
@mock.patch(f"{HOOK_MODULE}.requests")
def test_get_yaml_content_from_file(self, mock_requests, mock_yaml):
mock_get = mock_requests.get
mock_response = mock_get.return_value
expected_response_text = "test response text"
mock_response.text = expected_response_text
mock_response.status_code = 200
expected_result = list(mock_yaml.safe_load_all.return_value)
result = KubernetesHook().get_yaml_content_from_file(YAML_URL)
mock_get.assert_called_with(YAML_URL, allow_redirects=True)
mock_yaml.safe_load_all.assert_called_with(expected_response_text)
assert result == expected_result
@mock.patch(f"{HOOK_MODULE}.yaml")
@mock.patch(f"{HOOK_MODULE}.requests")
def test_get_yaml_content_from_file_error(self, mock_requests, mock_yaml):
mock_get = mock_requests.get
mock_response = mock_get.return_value
mock_response.status_code = 500
expected_error_message = "Was not able to read the yaml file from given URL"
with pytest.raises(AirflowException, match=expected_error_message):
KubernetesHook().get_yaml_content_from_file(YAML_URL)
mock_get.assert_called_with(YAML_URL, allow_redirects=True)
mock_yaml.safe_load_all.assert_not_called()
@patch("kubernetes.config.kube_config.KubeConfigLoader")
@patch("kubernetes.config.incluster_config.InClusterConfigLoader")
@patch("kubernetes.config.kube_config.KubeConfigMerger")
def test_load_config_with_config_dict(self, kube_config_merger, incluster_config, kube_config_loader):
hook = KubernetesHook(
conn_id=None,
in_cluster=False,
config_dict={"a": "b"},
cluster_context=None,
)
api_conn = hook.get_conn()
assert not incluster_config.called
assert hook._is_in_cluster is False
kube_config_loader.assert_called_once()
assert isinstance(api_conn, kubernetes.client.api_client.ApiClient)
| TestKubernetesHook |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-zendesk-support/unit_tests/integrations/zs_requests/users_request_builder.py | {
"start": 300,
"end": 1546
} | class ____(ZendeskSupportBaseRequestBuilder):
@classmethod
def endpoint(cls, authenticator: Authenticator) -> "UsersRequestBuilder":
return cls("d3v-airbyte", "incremental/users/cursor.json").with_authenticator(authenticator)
def __init__(self, subdomain: str, resource: str) -> None:
super().__init__(subdomain, resource)
self._start_time: Optional[str] = None
self._cursor: Optional[str] = None
self._include: Optional[str] = None
@property
def query_params(self):
params = super().query_params or {}
if self._start_time:
params["start_time"] = self._start_time
if self._cursor:
params["cursor"] = self._cursor
if self._include:
params["include"] = self._include
return params
def with_start_time(self, start_time: AirbyteDateTime) -> "UsersRequestBuilder":
self._start_time = str(calendar.timegm(start_time.timetuple()))
return self
def with_cursor(self, cursor: str) -> "UsersRequestBuilder":
self._cursor = cursor
return self
def with_include(self, include: str) -> "UsersRequestBuilder":
self._include = include
return self
| UsersRequestBuilder |
python | getsentry__sentry | src/sentry/api/endpoints/project_rule_details.py | {
"start": 2221,
"end": 4656
} | class ____(serializers.Serializer):
name = serializers.CharField(max_length=256, help_text="The name for the rule.")
actionMatch = serializers.ChoiceField(
choices=(
("all", "All conditions must evaluate to true."),
("any", "At least one of the conditions must evaluate to true."),
("none", "All conditions must evaluate to false."),
),
help_text="A string determining which of the conditions need to be true before any filters are evaluated.",
)
conditions = serializers.ListField(
child=RuleNodeField(type="condition/event"),
help_text="A list of triggers that determine when the rule fires. See [Create an Issue Alert Rule](/api/alerts/create-an-issue-alert-rule-for-a-project) for valid conditions.",
)
actions = serializers.ListField(
child=RuleNodeField(type="action/event"),
help_text="A list of actions that take place when all required conditions and filters for the rule are met. See [Create an Issue Alert Rule](/api/alerts/create-an-issue-alert-rule-for-a-project) for valid actions.",
)
frequency = serializers.IntegerField(
min_value=5,
max_value=60 * 24 * 30,
help_text="How often to perform the actions once for an issue, in minutes. The valid range is `5` to `43200`.",
)
environment = serializers.CharField(
required=False, allow_null=True, help_text="The name of the environment to filter by."
)
filterMatch = serializers.ChoiceField(
choices=(
("all", "All filters must evaluate to true."),
("any", "At least one of the filters must evaluate to true."),
("none", "All filters must evaluate to false."),
),
required=False,
help_text="A string determining which filters need to be true before any actions take place.",
)
filters = serializers.ListField(
child=RuleNodeField(type="filter/event"),
required=False,
help_text="A list of filters that determine if a rule fires after the necessary conditions have been met. See [Create an Issue Alert Rule](/api/alerts/create-an-issue-alert-rule-for-a-project) for valid filters.",
)
owner = ActorField(
required=False, allow_null=True, help_text="The ID of the team or user that owns the rule."
)
@extend_schema(tags=["Alerts"])
@region_silo_endpoint
| ProjectRuleDetailsPutSerializer |
python | PrefectHQ__prefect | src/prefect/client/orchestration/_artifacts/client.py | {
"start": 8414,
"end": 9725
} | class ____(BaseClient):
def read_latest_artifacts(
self, **kwargs: Unpack["ArtifactCollectionReadParams"]
) -> list["ArtifactCollection"]:
response = self.request(
"POST",
"/artifacts/latest/filter",
json={
"artifacts": (
artifact_filter.model_dump(mode="json", exclude_unset=True)
if (artifact_filter := kwargs.get("artifact_filter"))
else None
),
"flow_runs": (
flow_run_filter.model_dump(mode="json", exclude_unset=True)
if (flow_run_filter := kwargs.get("flow_run_filter"))
else None
),
"task_runs": (
task_run_filter.model_dump(mode="json", exclude_unset=True)
if (task_run_filter := kwargs.get("task_run_filter"))
else None
),
"limit": kwargs.get("limit", None),
"offset": kwargs.get("offset", 0),
"sort": kwargs.get("sort", None),
},
)
from prefect.client.schemas.objects import ArtifactCollection
return ArtifactCollection.model_validate_list(response.json())
| ArtifactCollectionClient |
python | huggingface__transformers | tests/models/perception_lm/test_modeling_perception_lm.py | {
"start": 5886,
"end": 14540
} | class ____(ModelTesterMixin, GenerationTesterMixin, unittest.TestCase):
"""
Model tester for `PerceptionLMForConditionalGeneration`.
"""
all_model_classes = (
(
PerceptionLMModel,
PerceptionLMForConditionalGeneration,
)
if is_torch_available()
else ()
)
_is_composite = True
def setUp(self):
self.model_tester = PerceptionLMVisionText2TextModelTester(self)
common_properties = [
"image_token_id",
"video_token_id",
]
self.config_tester = ConfigTester(
self,
config_class=PerceptionLMConfig,
has_text_modality=False,
common_properties=common_properties,
)
def test_config(self):
self.config_tester.run_common_tests()
# overwrite inputs_embeds tests because we need to delete "pixel values" for LVLMs
def test_inputs_embeds(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
model.to(torch_device)
model.eval()
inputs = self._prepare_for_class(inputs_dict, model_class)
input_ids = inputs["input_ids"]
del inputs["input_ids"]
del inputs["pixel_values"]
del inputs["pixel_values_videos"]
wte = model.get_input_embeddings()
inputs["inputs_embeds"] = wte(input_ids)
with torch.no_grad():
model(**inputs)
# overwrite inputs_embeds tests because we need to delete "pixel values" for LVLMs
# while some other models require pixel_values to be present
def test_inputs_embeds_matches_input_ids(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
model.to(torch_device)
model.eval()
inputs = self._prepare_for_class(inputs_dict, model_class)
input_ids = inputs["input_ids"]
del inputs["input_ids"]
del inputs["pixel_values"]
del inputs["pixel_values_videos"]
inputs_embeds = model.get_input_embeddings()(input_ids)
with torch.no_grad():
out_ids = model(input_ids=input_ids, **inputs)[0]
out_embeds = model(inputs_embeds=inputs_embeds, **inputs)[0]
torch.testing.assert_close(out_embeds, out_ids)
def test_mismatching_num_image_tokens(self):
"""
Tests that VLMs through an error with explicit message saying what is wrong
when number of images doesn't match number of image tokens in the text.
Also we need to test multi-image cases when one prompr has multiple image tokens.
"""
config, input_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
if model_class == PerceptionLMModel:
continue
model = model_class(config).to(torch_device)
model.eval()
_ = model(**input_dict) # successful forward with no modifications
# remove one image but leave the image token in text
input_dict["pixel_values"] = input_dict["pixel_values"][-1:, ...]
with self.assertRaises(ValueError):
_ = model(**input_dict)
# simulate multi-image case by concatenating inputs where each has exactly one image/image-token
input_ids = input_dict["input_ids"][:1]
pixel_values = input_dict["pixel_values"][:1]
input_ids = torch.cat([input_ids, input_ids], dim=0)
# one image and two image tokens raise an error
with self.assertRaises(ValueError):
_ = model(input_ids=input_ids, pixel_values=pixel_values)
# two images and two image tokens don't raise an error
pixel_values = torch.cat([pixel_values, pixel_values], dim=0)
_ = model(input_ids=input_ids, pixel_values=pixel_values)
def test_training(self):
self.all_model_classes = (PerceptionLMForConditionalGeneration,) if is_torch_available() else ()
super().test_training()
def test_training_gradient_checkpointing(self):
self.all_model_classes = (PerceptionLMForConditionalGeneration,) if is_torch_available() else ()
super().test_training_gradient_checkpointing()
def test_training_gradient_checkpointing_use_reentrant(self):
self.all_model_classes = (PerceptionLMForConditionalGeneration,) if is_torch_available() else ()
super().test_training_gradient_checkpointing_use_reentrant()
def test_training_gradient_checkpointing_use_reentrant_false(self):
self.all_model_classes = (PerceptionLMForConditionalGeneration,) if is_torch_available() else ()
super().test_training_gradient_checkpointing_use_reentrant_false()
@unittest.skip(reason="Timm Eva (PE) weights cannot be fully constructed in _init_weights")
def test_can_init_all_missing_weights(self):
pass
@unittest.skip(
reason="PE/TIMM's attention implementation is self configured and won't raise ValueError on global attention implementation."
)
def test_flash_attn_2_can_dispatch_composite_models(self):
pass
@unittest.skip(
"VLMs need lots of steps to prepare images/mask correctly to get pad-free inputs. Can be tested as part of LLM test"
)
def test_flash_attention_2_padding_matches_padding_free_with_position_ids(self):
pass
@unittest.skip("ViT PE / TimmWrapperModel cannot be tested with meta device")
def test_can_be_initialized_on_meta(self):
pass
@unittest.skip("Specifying both inputs_embeds and pixel_values are not supported for PerceptionLM")
def test_generate_from_inputs_embeds_0_greedy(self):
pass
@unittest.skip("Specifying both inputs_embeds and pixel_values are not supported for PerceptionLM")
def test_generate_from_inputs_embeds_1_beam_search(self):
pass
@unittest.skip("Specifying both inputs_embeds and pixel_values are not supported for PerceptionLM")
def test_generate_from_inputs_embeds_with_static_cache(self):
pass
## Skip flash attention releated tests below
## correct configuration:
## from_pretrained(model_id, attn_implementation={"text_config": "flash_attention_2", "vision_config": "eager"}
@unittest.skip("Flash attn test is not configured correctly as we need to configure vision/timm model to 'eager'.")
def test_eager_matches_fa2_generate(self):
pass
@unittest.skip("Flash attn test is not configured correctly as we need to configure vision/timm model to 'eager'.")
def test_flash_attn_2_fp32_ln(self):
pass
@unittest.skip("Flash attn test is not configured correctly as we need to configure vision/timm model to 'eager'.")
def test_flash_attn_2_from_config(self):
pass
@unittest.skip("SDPA test is not configured correctly as we need to configure vision/timm model to 'eager'.")
def test_eager_matches_sdpa_generate_with_dynamic_cache(self):
pass
@unittest.skip("Flash attn test is not configured correctly as we need to configure vision/timm model to 'eager'.")
def test_flash_attn_2_inference_equivalence_right_padding(self):
pass
@unittest.skip("SDPA test is not configured correctly as we need to configure vision/timm model to 'eager'.")
def test_eager_matches_sdpa_generate(self):
pass
@unittest.skip("Flash attn test is not configured correctly as we need to configure vision/timm model to 'eager'.")
def test_flash_attn_2_inference_equivalence(self):
pass
@unittest.skip(
"PerceptionLMForConditionalGeneration does not have language_model, vision_tower, multi_modal_projector."
)
def test_sdpa_can_dispatch_composite_models(self):
pass
@unittest.skip("Cannot set `output_attentions` for timm models.")
def test_attention_outputs(self):
pass
@unittest.skip("Cannot set `output_attentions` for timm models.")
def test_retain_grad_hidden_states_attentions(self):
pass
@unittest.skip("Cannot set `output_attentions` for timm models.")
def test_generate_compilation_all_outputs(self):
pass
TEST_MODEL_PATH = "facebook/Perception-LM-1B"
@require_torch
@require_bitsandbytes
@slow
@require_read_token
| PerceptionLMForConditionalGenerationModelTest |
python | doocs__leetcode | solution/0000-0099/0094.Binary Tree Inorder Traversal/Solution2.py | {
"start": 192,
"end": 560
} | class ____:
def inorderTraversal(self, root: Optional[TreeNode]) -> List[int]:
ans, stk = [], []
while root or stk:
if root:
stk.append(root)
root = root.left
else:
root = stk.pop()
ans.append(root.val)
root = root.right
return ans
| Solution |
python | pypa__setuptools | setuptools/command/install_scripts.py | {
"start": 205,
"end": 2490
} | class ____(orig.install_scripts):
"""Do normal script install, plus any egg_info wrapper scripts"""
distribution: Distribution # override distutils.dist.Distribution with setuptools.dist.Distribution
def initialize_options(self) -> None:
orig.install_scripts.initialize_options(self)
self.no_ep = False
def run(self) -> None:
self.run_command("egg_info")
if self.distribution.scripts:
orig.install_scripts.run(self) # run first to set up self.outfiles
else:
self.outfiles: list[str] = []
if self.no_ep:
# don't install entry point scripts into .egg file!
return
self._install_ep_scripts()
def _install_ep_scripts(self):
# Delay import side-effects
from .. import _scripts
from .._importlib import metadata
ei_cmd = self.get_finalized_command("egg_info")
dist = metadata.Distribution.at(path=ei_cmd.egg_info)
bs_cmd = self.get_finalized_command('build_scripts')
exec_param = getattr(bs_cmd, 'executable', None)
writer = _scripts.ScriptWriter
if exec_param == sys.executable:
# In case the path to the Python executable contains a space, wrap
# it so it's not split up.
exec_param = [exec_param]
# resolve the writer to the environment
writer = writer.best()
cmd = writer.command_spec_class.best().from_param(exec_param)
for args in writer.get_args(dist, cmd.as_header()):
self.write_script(*args)
def write_script(self, script_name, contents, mode: str = "t", *ignored) -> None:
"""Write an executable file to the scripts directory"""
from .._shutil import attempt_chmod_verbose as chmod, current_umask
log.info("Installing %s script to %s", script_name, self.install_dir)
target = os.path.join(self.install_dir, script_name)
self.outfiles.append(target)
encoding = None if "b" in mode else "utf-8"
mask = current_umask()
if not self.dry_run:
ensure_directory(target)
with open(target, "w" + mode, encoding=encoding) as f:
f.write(contents)
chmod(target, 0o777 - mask)
| install_scripts |
python | davidhalter__jedi | jedi/inference/context.py | {
"start": 13203,
"end": 13350
} | class ____(ValueContext):
def get_filters(self, until_position=None, origin_scope=None):
return self._value.get_filters()
| CompiledContext |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 12753,
"end": 12939
} | class ____(sgqlc.types.Enum):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__choices__ = ("COMMENTS", "CREATED_AT", "UPDATED_AT")
| IssueOrderField |
python | pallets__markupsafe | tests/test_escape.py | {
"start": 1044,
"end": 1635
} | class ____:
def __init__(self, value: t.Any) -> None:
self.__value = value
@property # type: ignore[misc]
def __class__(self) -> type[t.Any]:
# Make o.__class__ and isinstance(o, str) see the proxied object.
return self.__value.__class__ # type: ignore[no-any-return]
def __str__(self) -> str:
return str(self.__value)
def test_proxy() -> None:
"""Handle a proxy object that pretends its __class__ is str."""
p = Proxy("test")
assert p.__class__ is str
assert isinstance(p, str)
assert escape(p) == Markup("test")
| Proxy |
python | doocs__leetcode | solution/1600-1699/1626.Best Team With No Conflicts/Solution2.py | {
"start": 0,
"end": 361
} | class ____:
def __init__(self, n):
self.n = n
self.c = [0] * (n + 1)
def update(self, x, val):
while x <= self.n:
self.c[x] = max(self.c[x], val)
x += x & -x
def query(self, x):
s = 0
while x:
s = max(s, self.c[x])
x -= x & -x
return s
| BinaryIndexedTree |
python | kamyu104__LeetCode-Solutions | Python/design-front-middle-back-queue.py | {
"start": 50,
"end": 1493
} | class ____(object):
def __init__(self):
self.__left, self.__right = collections.deque(), collections.deque()
def pushFront(self, val):
"""
:type val: int
:rtype: None
"""
self.__left.appendleft(val)
self.__balance()
def pushMiddle(self, val):
"""
:type val: int
:rtype: None
"""
if len(self.__left) > len(self.__right):
self.__right.appendleft(self.__left.pop())
self.__left.append(val)
def pushBack(self, val):
"""
:type val: int
:rtype: None
"""
self.__right.append(val)
self.__balance()
def popFront(self):
"""
:rtype: int
"""
val = (self.__left or collections.deque([-1])).popleft()
self.__balance()
return val
def popMiddle(self):
"""
:rtype: int
"""
val = (self.__left or [-1]).pop()
self.__balance()
return val
def popBack(self):
"""
:rtype: int
"""
val = (self.__right or self.__left or [-1]).pop()
self.__balance()
return val
def __balance(self):
if len(self.__left) > len(self.__right)+1:
self.__right.appendleft(self.__left.pop())
elif len(self.__left) < len(self.__right):
self.__left.append(self.__right.popleft())
| FrontMiddleBackQueue |
python | geekcomputers__Python | PongPong_Game/pongpong.py | {
"start": 523,
"end": 1833
} | class ____(pyglet.window.Window):
def __init__(self, *args, **kwargs):
super(PongPongWindow, self).__init__(*args, **kwargs)
self.win_size = (WIDTH, HEIGHT)
self.paddle_pos = (WIDTH / 2 - PWIDTH / 2, 0)
self.main_batch = pyglet.graphics.Batch()
self.walls = load.load_rectangles(self.win_size, BORDER, batch=self.main_batch)
self.balls = load.load_balls(
self.win_size, RADIUS, speed=ballspeed, batch=self.main_batch
)
self.paddles = load.load_paddles(
self.paddle_pos, PWIDTH, PHEIGHT, acc=paddleacc, batch=self.main_batch
)
def on_draw(self):
self.clear()
self.main_batch.draw()
game_window = PongPongWindow(width=WIDTH, height=HEIGHT, caption="PongPong")
game_objects = game_window.balls + game_window.paddles
for paddle in game_window.paddles:
for handler in paddle.event_handlers:
game_window.push_handlers(handler)
def update(dt):
global game_objects, game_window
for obj1 in game_objects:
for obj2 in game_objects:
if obj1 is obj2:
continue
obj1.update(game_window.win_size, BORDER, obj2, dt)
if __name__ == "__main__":
pyglet.clock.schedule_interval(update, 1 / 120.0)
pyglet.app.run()
| PongPongWindow |
python | tensorflow__tensorflow | tensorflow/python/ops/ragged/ragged_constant_value_op_test.py | {
"start": 1114,
"end": 13214
} | class ____(test_util.TensorFlowTestCase,
parameterized.TestCase):
@parameterized.parameters(
#=========================================================================
# 0-dimensional tensors.
dict(pylist='x', expected_shape=()),
#=========================================================================
# 1-dimensional tensors.
dict(pylist=[1, 2, 3], expected_shape=(3,)),
#=========================================================================
# 2-dimensional tensors.
dict(pylist=[[1, 2, 3], [4], [5, 6]], expected_shape=(3, None)),
dict(pylist=[[1, 2, 3], [4, 5, 6], [7, 8, 9]], expected_shape=(3, None)),
#=========================================================================
# 3-dimensional tensors.
dict(
pylist=[[[1, 2], [3, 4]], [], [[5, 6], [7, 8], [9, 0]]],
expected_shape=(3, None, None)),
dict(
pylist=[[[1, 2], [3, 4]], [], [[5, 6], [7, 8], [9, 0]]],
ragged_rank=1,
expected_shape=(3, None, 2)),
dict(
pylist=[[[1, 2], [3, 4]], [], [[5, 6], [7, 8], [9, 0]]],
inner_shape=(2,),
expected_shape=(3, None, 2)),
dict(
pylist=[[[1, 2], [3, 4]], [], [[5, 6], [7, 8], [9, 0]]],
ragged_rank=1,
inner_shape=(2,),
expected_shape=(3, None, 2)),
# 3-dimensional tensors with numpy arrays
dict(
pylist=[[[1, 2], np.array([3, np.array(4)])],
np.array([]), [[5, 6], [7, 8], [9, 0]]],
expected_shape=(3, None, None)),
dict(
pylist=[[[1, 2], np.array([3, np.array(4)])],
np.array([]), [[5, 6], [7, 8], [9, 0]]],
ragged_rank=1,
expected_shape=(3, None, 2)),
dict(
pylist=[[np.array([3, np.array(4)]), [1, 2]],
np.array([]), [[5, 6], [7, 8], [9, 0]]],
ragged_rank=1,
expected_shape=(3, None, 2)),
dict(
pylist=[[[1, 2], np.array([3, np.array(4)])],
np.array([]), [[5, 6], [7, 8], [9, 0]]],
inner_shape=(2,),
expected_shape=(3, None, 2)),
dict(
pylist=[[[1, 2], np.array([3, np.array(4)])],
np.array([]), [[5, 6], [7, 8], [9, 0]]],
ragged_rank=1,
inner_shape=(2,),
expected_shape=(3, None, 2)),
#=========================================================================
# 4-dimensional tensors.
dict(
pylist=[[[[1, 2], [3, 4]], [[5, 6], [7, 8]]],
[[[2, 4], [6, 8]], [[1, 5], [7, 9]]]],
expected_shape=(2, None, None, None)),
dict(
pylist=[[[[1, 2], [3, 4]], [[5, 6], [7, 8]]],
[[[2, 4], [6, 8]], [[1, 5], [7, 9]]]],
ragged_rank=1,
expected_shape=(2, None, 2, 2)),
dict(
pylist=[[[[1, 2], [3, 4]], [[5, 6], [7, 8]]],
[[[2, 4], [6, 8]], [[1, 5], [7, 9]]]],
inner_shape=(2,),
expected_shape=(2, None, None, 2)),
dict(
pylist=[[[[1, 2], [3, 4]], [[5, 6], [7, 8]]],
[[[2, 4], [6, 8]], [[1, 5], [7, 9]]]],
inner_shape=(2, 2),
expected_shape=(2, None, 2, 2)),
# 4-dimensional tensors with numpy arrays
dict(
pylist=np.array([[[np.array([1, 2]), [3, 4]], [[5, 6], [7, 8]]],
np.array([[[2, 4], [6, 8]], [[1, 5], [7, 9]]])]),
expected_shape=(2, None, None, None)),
#=========================================================================
# Empty tensors (no scalar values) w/ default ragged_rank and inner_shape
dict(pylist=[], expected_shape=(0,)),
dict(pylist=[[], [], np.array([])], expected_shape=(3, None)),
dict(
pylist=[[[], []], [], [[], [[]]]],
expected_shape=(3, None, None, None)),
dict(
pylist=np.array([np.array([[], []]),
np.array([]), [[], [[]]]], dtype=object),
expected_shape=(3, None, None, None)),
#=========================================================================
# Empty tensors (no scalar values) w/ explicit ragged_rank or inner_shape
dict(pylist=[], ragged_rank=1, expected_shape=(0, None)),
dict(pylist=[], ragged_rank=2, expected_shape=(0, None, None)),
dict(pylist=[], inner_shape=(0, 100, 20), expected_shape=(0, 100, 20)),
dict(
pylist=[],
ragged_rank=1,
inner_shape=(100, 20),
expected_shape=(0, None, 100, 20)),
dict(
pylist=[],
ragged_rank=2,
inner_shape=(100, 20),
expected_shape=(0, None, None, 100, 20)),
dict(pylist=[[], [], []], ragged_rank=2, expected_shape=(3, None, None)),
dict(pylist=[], inner_shape=(0,), expected_shape=(0,)),
dict(pylist=[[]], inner_shape=(1, 0), expected_shape=(1, 0)),
dict(
pylist=np.array([]),
ragged_rank=1,
inner_shape=(100, 20),
expected_shape=(0, None, 100, 20)),
#=========================================================================
# default/inferred dtypes.
#
# Note: numpy has different default/inferred types than tensorflow.
# Since we are using values, not tensors, we get the default numpy types
# here.
dict(pylist=[], expected_dtype=np.float64),
dict(pylist=[[[], [[[]], []]]], expected_dtype=np.float64),
dict(pylist=[[1, 2], [3], [4, 5, 6]], expected_dtype=np.int64),
dict(pylist=[[1., 2.], [], [4., 5., 6.]], expected_dtype=np.float64),
dict(pylist=[[1, 2], [3.], [4, 5, 6]], expected_dtype=np.float64),
dict(pylist=[[b'a', b'b'], [b'c']], expected_dtype=np.dtype('S1')),
dict(pylist=[[True]], expected_dtype=np.bool_),
dict(
pylist=[np.array([1, 2]), np.array([3.]), [4, 5, 6]],
expected_dtype=np.float64),
#=========================================================================
# explicit dtypes
dict(pylist=[], dtype=np.float32),
dict(pylist=[], dtype=np.dtype('S1')),
dict(pylist=[[1, 2], [3], [4, 5, 6]], dtype=np.int64),
dict(pylist=[[1, 2], [3], [4, 5, 6]], dtype=np.int32),
dict(pylist=[[1, 2], [3], [4, 5, 6]], dtype=np.float32),
dict(pylist=[[1., 2.], [3.], [4., 5., 6.]], dtype=np.float16),
dict(pylist=[[1., 2.], [3.], [4., 5., 6.]], dtype=np.float32),
dict(
pylist=[[b'a', b'b'], [b'c'], [b'd', b'e', b'f']],
dtype=np.dtype('S1')),
dict(pylist=[], dtype=dtypes.float32, expected_dtype=np.float32),
dict(pylist=[], dtype=dtypes.int32, expected_dtype=np.int32),
)
def testRaggedValues(self,
pylist,
dtype=None,
ragged_rank=None,
inner_shape=None,
expected_shape=None,
expected_dtype=None):
"""Tests that `ragged_value(pylist).to_list() == pylist`."""
rt = ragged_factory_ops.constant_value(
pylist, dtype=dtype, ragged_rank=ragged_rank, inner_shape=inner_shape)
# Normalize the pylist, i.e., convert all np.arrays to list.
# E.g., [np.array((1,2))] --> [[1,2]]
pylist = _normalize_pylist(pylist)
# If dtype was explicitly specified, check it.
if expected_dtype is not None:
self.assertEqual(rt.dtype, expected_dtype)
elif dtype is not None:
self.assertEqual(rt.dtype, dtype)
# If ragged_rank was explicitly specified, check it.
if ragged_rank is not None:
if isinstance(rt, ragged_tensor_value.RaggedTensorValue):
self.assertEqual(rt.ragged_rank, ragged_rank)
else:
self.assertEqual(0, ragged_rank)
# If inner_shape was explicitly specified, check it.
if inner_shape is not None:
if isinstance(rt, ragged_tensor_value.RaggedTensorValue):
self.assertEqual(rt.flat_values.shape[1:], inner_shape)
else:
self.assertEqual(rt.shape, inner_shape)
if expected_shape is not None:
self.assertEqual(tuple(rt.shape), expected_shape)
if rt.shape:
if isinstance(rt, ragged_tensor_value.RaggedTensorValue):
self.assertEqual(rt.to_list(), pylist)
else:
self.assertEqual(rt.tolist(), pylist)
if expected_shape is not None:
self.assertEqual(rt.shape, expected_shape)
else:
self.assertEqual(rt, pylist)
if expected_shape is not None:
self.assertEqual((), expected_shape)
@parameterized.parameters(
dict(
pylist=12,
ragged_rank=1,
exception=ValueError,
message='Invalid pylist=12: incompatible with ragged_rank=1'),
dict(
pylist=np.array(12),
ragged_rank=1,
exception=ValueError,
message='Invalid pylist=array\\(12\\): incompatible with '
'ragged_rank=1'),
dict(
pylist=12,
inner_shape=(1,),
exception=ValueError,
message='Invalid pylist=12: incompatible with '
'dim\\(inner_shape\\)=1'),
dict(
pylist=[[[1], [2]]],
ragged_rank=-1,
exception=ValueError,
message='Invalid ragged_rank=-1: must be nonnegative'),
dict(
pylist=[[1, [2]]],
exception=ValueError,
message='all scalar values must have the same nesting depth'),
dict(
pylist=[[[1]], [[[2]]]],
exception=ValueError,
message='all scalar values must have the same nesting depth'),
dict(
pylist=[[1], [[]]],
exception=ValueError,
message='Invalid pylist=.*: empty list nesting is greater '
'than scalar value nesting'),
dict(
pylist=[1, 2, 3],
ragged_rank=1,
exception=ValueError,
message='pylist has scalar values depth 1, but ragged_rank=1 '
'requires scalar value depth greater than 1'),
dict(
pylist=[[1, 2, 3], [4, 5, 6], [7, 8, 9]],
ragged_rank=2,
exception=ValueError,
message='pylist has scalar values depth 2, but ragged_rank=2 '
'requires scalar value depth greater than 2'),
dict(
pylist=[1, 2, 3],
inner_shape=(1, 1),
exception=ValueError,
message='cannot reshape array'),
dict(
pylist=[[[1, 2], [3, 4]], [[5, 6], [7, 8]]],
inner_shape=(2, 2),
ragged_rank=1,
exception=ValueError,
message='Invalid pylist=.*: incompatible with ragged_rank=1 and '
'dim\\(inner_shape\\)=2'),
dict(
pylist=[[[1, 2], [3, 4]], [[5, 6], [7, 8, 9]]],
ragged_rank=1,
exception=ValueError,
message='inner values have inconsistent shape'),
dict(
pylist=[[[], [[]]]],
ragged_rank=1,
exception=ValueError,
message='inner values have inconsistent shape'),
)
def testRaggedValuesError(self,
pylist,
dtype=None,
ragged_rank=None,
inner_shape=None,
exception=None,
message=None):
"""Tests that `constant_value()` raises an expected exception."""
self.assertRaisesRegex(
exception,
message,
ragged_factory_ops.constant_value,
pylist,
dtype=dtype,
ragged_rank=ragged_rank,
inner_shape=inner_shape)
def _normalize_pylist(item):
"""Convert all (possibly nested) np.arrays contained in item to list."""
# convert np.arrays in current level to list
if not isinstance(item, (list, np.ndarray)):
return item
level = (x.tolist() if isinstance(x, np.ndarray) else x for x in item)
return [_normalize_pylist(el) if isinstance(item, (list, np.ndarray))
else el for el in level]
if __name__ == '__main__':
googletest.main()
| RaggedConstantValueOpTest |
python | tensorflow__tensorflow | tensorflow/python/framework/ops.py | {
"start": 62180,
"end": 65601
} | class ____(object):
"""A decorator for registering the gradient function for an op type.
This decorator is only used when defining a new op type. For an op
with `m` inputs and `n` outputs, the gradient function is a function
that takes the original `Operation` and `n` `Tensor` objects
(representing the gradients with respect to each output of the op),
and returns `m` `Tensor` objects (representing the partial gradients
with respect to each input of the op).
For example, assuming that operations of type `"Sub"` take two
inputs `x` and `y`, and return a single output `x - y`, the
following gradient function would be registered:
```python
@tf.RegisterGradient("Sub")
def _sub_grad(unused_op, grad):
return grad, tf.negative(grad)
```
The decorator argument `op_type` is the string type of an
operation. This corresponds to the `OpDef.name` field for the proto
that defines the operation.
"""
__slots__ = ["_op_type"]
def __init__(self, op_type):
"""Creates a new decorator with `op_type` as the Operation type.
Args:
op_type: The string type of an operation. This corresponds to the
`OpDef.name` field for the proto that defines the operation.
Raises:
TypeError: If `op_type` is not string.
"""
if not isinstance(op_type, str):
raise TypeError("op_type must be a string")
self._op_type = op_type
def __call__(self, f: _T) -> _T:
"""Registers the function `f` as gradient function for `op_type`."""
gradient_registry.register(f, self._op_type)
return f
@deprecation.deprecated_endpoints("NotDifferentiable", "NoGradient")
@tf_export("no_gradient", v1=["no_gradient", "NotDifferentiable", "NoGradient"])
def no_gradient(op_type: str) -> None:
"""Specifies that ops of type `op_type` is not differentiable.
This function should *not* be used for operations that have a
well-defined gradient that is not yet implemented.
This function is only used when defining a new op type. It may be
used for ops such as `tf.size()` that are not differentiable. For
example:
```python
tf.no_gradient("Size")
```
The gradient computed for 'op_type' will then propagate zeros.
For ops that have a well-defined gradient but are not yet implemented,
no declaration should be made, and an error *must* be thrown if
an attempt to request its gradient is made.
Args:
op_type: The string type of an operation. This corresponds to the
`OpDef.name` field for the proto that defines the operation.
Raises:
TypeError: If `op_type` is not a string.
"""
if not isinstance(op_type, str):
raise TypeError("op_type must be a string")
gradient_registry.register(None, op_type)
# Aliases for the old names, will be eventually removed.
NoGradient: Callable[[str], None] = no_gradient
NotDifferentiable: Callable[[str], None] = no_gradient
def get_gradient_function(op):
"""Returns the function that computes gradients for "op"."""
if not op.inputs:
return None
gradient_function = op._gradient_function # pylint: disable=protected-access
if gradient_function:
return gradient_function
try:
op_type = op.get_attr("_gradient_op_type")
except ValueError:
op_type = op.type
return gradient_registry.lookup(op_type)
def set_shape_and_handle_data_for_outputs(_) -> None:
"""No op. TODO(b/74620627): Remove this."""
pass
| RegisterGradient |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_chart_column05.py | {
"start": 315,
"end": 1343
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("chart_column05.xlsx")
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet("Foo")
chart = workbook.add_chart({"type": "column"})
chart.axis_ids = [47292800, 47295104]
data = [
[1, 2, 3, 4, 5],
[2, 4, 6, 8, 10],
[3, 6, 9, 12, 15],
]
worksheet.write_column("A1", data[0])
worksheet.write_column("B1", data[1])
worksheet.write_column("C1", data[2])
chart.add_series({"values": "=Foo!$A$1:$A$5"})
chart.add_series({"values": "=Foo!$B$1:$B$5"})
chart.add_series({"values": "=Foo!$C$1:$C$5"})
worksheet.insert_chart("E9", chart)
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
python | huggingface__transformers | src/transformers/models/deepseek_v3/modular_deepseek_v3.py | {
"start": 13369,
"end": 13472
} | class ____(LlamaModel):
_keys_to_ignore_on_load_unexpected = [r"model\.layers\.61.*"]
| DeepseekV3Model |
python | ray-project__ray | python/ray/util/client/common.py | {
"start": 25777,
"end": 31367
} | class ____:
"""
Cache for blocking method calls. Needed to prevent retried requests from
being applied multiple times on the server, for example when the client
disconnects. This is used to cache requests/responses sent through
unary-unary RPCs to the RayletServicer.
Note that no clean up logic is used, the last response for each thread
will always be remembered, so at most the cache will hold N entries,
where N is the number of threads on the client side. This relies on the
assumption that a thread will not make a new blocking request until it has
received a response for a previous one, at which point it's safe to
overwrite the old response.
The high level logic is:
1. Before making a call, check the cache for the current thread.
2. If present in the cache, check the request id of the cached
response.
a. If it matches the current request_id, then the request has been
received before and we shouldn't re-attempt the logic. Wait for
the response to become available in the cache, and then return it
b. If it doesn't match, then this is a new request and we can
proceed with calling the real stub. While the response is still
being generated, temporarily keep (req_id, None) in the cache.
Once the call is finished, update the cache entry with the
new (req_id, response) pair. Notify other threads that may
have been waiting for the response to be prepared.
"""
def __init__(self):
self.cv = threading.Condition()
self.cache: Dict[int, Tuple[int, Any]] = {}
def check_cache(self, thread_id: int, request_id: int) -> Optional[Any]:
"""
Check the cache for a given thread, and see if the entry in the cache
matches the current request_id. Returns None if the request_id has
not been seen yet, otherwise returns the cached result.
Throws an error if the placeholder in the cache doesn't match the
request_id -- this means that a new request evicted the old value in
the cache, and that the RPC for `request_id` is redundant and the
result can be discarded, i.e.:
1. Request A is sent (A1)
2. Channel disconnects
3. Request A is resent (A2)
4. A1 is received
5. A2 is received, waits for A1 to finish
6. A1 finishes and is sent back to client
7. Request B is sent
8. Request B overwrites cache entry
9. A2 wakes up extremely late, but cache is now invalid
In practice this is VERY unlikely to happen, but the error can at
least serve as a sanity check or catch invalid request id's.
"""
with self.cv:
if thread_id in self.cache:
cached_request_id, cached_resp = self.cache[thread_id]
if cached_request_id == request_id:
while cached_resp is None:
# The call was started, but the response hasn't yet
# been added to the cache. Let go of the lock and
# wait until the response is ready.
self.cv.wait()
cached_request_id, cached_resp = self.cache[thread_id]
if cached_request_id != request_id:
raise RuntimeError(
"Cached response doesn't match the id of the "
"original request. This might happen if this "
"request was received out of order. The "
"result of the caller is no longer needed. "
f"({request_id} != {cached_request_id})"
)
return cached_resp
if not _id_is_newer(request_id, cached_request_id):
raise RuntimeError(
"Attempting to replace newer cache entry with older "
"one. This might happen if this request was received "
"out of order. The result of the caller is no "
f"longer needed. ({request_id} != {cached_request_id}"
)
self.cache[thread_id] = (request_id, None)
return None
def update_cache(self, thread_id: int, request_id: int, response: Any) -> None:
"""
Inserts `response` into the cache for `request_id`.
"""
with self.cv:
cached_request_id, cached_resp = self.cache[thread_id]
if cached_request_id != request_id or cached_resp is not None:
# The cache was overwritten by a newer requester between
# our call to check_cache and our call to update it.
# This can't happen if the assumption that the cached requests
# are all blocking on the client side, so if you encounter
# this, check if any async requests are being cached.
raise RuntimeError(
"Attempting to update the cache, but placeholder's "
"do not match the current request_id. This might happen "
"if this request was received out of order. The result "
f"of the caller is no longer needed. ({request_id} != "
f"{cached_request_id})"
)
self.cache[thread_id] = (request_id, response)
self.cv.notify_all()
| ResponseCache |
python | neetcode-gh__leetcode | python/0621-task-scheduler.py | {
"start": 0,
"end": 640
} | class ____:
def leastInterval(self, tasks: List[str], n: int) -> int:
count = Counter(tasks)
maxHeap = [-cnt for cnt in count.values()]
heapq.heapify(maxHeap)
time = 0
q = deque() # pairs of [-cnt, idleTime]
while maxHeap or q:
time += 1
if not maxHeap:
time = q[0][1]
else:
cnt = 1 + heapq.heappop(maxHeap)
if cnt:
q.append([cnt, time + n])
if q and q[0][1] == time:
heapq.heappush(maxHeap, q.popleft()[0])
return time
# Greedy algorithm
| Solution |
python | buildout__buildout | src/zc/buildout/testing.py | {
"start": 7042,
"end": 7250
} | class ____(zc.buildout.buildout.Options):
def __init__(self, *args):
zc.buildout.buildout.Options.__init__(self, *args)
self._created = []
def initialize(self):
pass
| TestOptions |
python | doocs__leetcode | solution/0300-0399/0320.Generalized Abbreviation/Solution2.py | {
"start": 0,
"end": 550
} | class ____:
def generateAbbreviations(self, word: str) -> List[str]:
n = len(word)
ans = []
for i in range(1 << n):
cnt = 0
s = []
for j in range(n):
if i >> j & 1:
cnt += 1
else:
if cnt:
s.append(str(cnt))
cnt = 0
s.append(word[j])
if cnt:
s.append(str(cnt))
ans.append("".join(s))
return ans
| Solution |
python | huggingface__transformers | src/transformers/models/esm/modeling_esm.py | {
"start": 16646,
"end": 17010
} | class ____(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
hidden_states = self.dense(hidden_states)
hidden_states = gelu(hidden_states)
return hidden_states
| EsmIntermediate |
python | allegroai__clearml | clearml/backend_api/services/v2_23/frames.py | {
"start": 111307,
"end": 114649
} | class ____(Request):
"""
Get a specific frame for a dataset version using the frame's id. Random Access API.
:param dataset: Dataset id
:type dataset: str
:param version: Version id
:type version: str
:param frame: Frame id
:type frame: str
:param projection: Used to select which parts of the frame will be returned.
Each string represents a field or sub-field (using dot-separated notation). In
order to specify a specific array element, use array index as a field name. To
specify all array elements, use '*'.
:type projection: Sequence[str]
"""
_service = "frames"
_action = "get_by_id"
_version = "2.23"
_schema = {
"definitions": {},
"properties": {
"dataset": {"description": "Dataset id", "type": "string"},
"frame": {"description": "Frame id", "type": "string"},
"projection": {
"description": (
"Used to select which parts of the frame will be returned. Each string represents a\n "
" field or sub-field (using dot-separated notation). In order to specify a specific array"
" element,\n use array index as a field name. To specify all array elements, use"
" '*'."
),
"items": {"type": "string"},
"type": "array",
},
"version": {"description": "Version id", "type": "string"},
},
"required": ["dataset", "version", "frame"],
"type": "object",
}
def __init__(self, dataset, version, frame, projection=None, **kwargs):
super(GetByIdRequest, self).__init__(**kwargs)
self.dataset = dataset
self.version = version
self.frame = frame
self.projection = projection
@schema_property("dataset")
def dataset(self):
return self._property_dataset
@dataset.setter
def dataset(self, value):
if value is None:
self._property_dataset = None
return
self.assert_isinstance(value, "dataset", six.string_types)
self._property_dataset = value
@schema_property("version")
def version(self):
return self._property_version
@version.setter
def version(self, value):
if value is None:
self._property_version = None
return
self.assert_isinstance(value, "version", six.string_types)
self._property_version = value
@schema_property("frame")
def frame(self):
return self._property_frame
@frame.setter
def frame(self, value):
if value is None:
self._property_frame = None
return
self.assert_isinstance(value, "frame", six.string_types)
self._property_frame = value
@schema_property("projection")
def projection(self):
return self._property_projection
@projection.setter
def projection(self, value):
if value is None:
self._property_projection = None
return
self.assert_isinstance(value, "projection", (list, tuple))
self.assert_isinstance(value, "projection", six.string_types, is_array=True)
self._property_projection = value
| GetByIdRequest |
python | charliermarsh__ruff | crates/ruff_python_parser/resources/valid/statement/class.py | {
"start": 171,
"end": 331
} | class ____(A, B):
def __init__(self):
pass
def method_with_default(self, arg='default'):
pass
# Class with generic types:
# TypeVar
| Test |
python | pypa__virtualenv | src/virtualenv/create/describe.py | {
"start": 2884,
"end": 3154
} | class ____(Describe, ABC):
@classmethod
def can_describe(cls, interpreter):
return interpreter.os == "nt" and super().can_describe(interpreter)
__all__ = [
"Describe",
"PosixSupports",
"Python3Supports",
"WindowsSupports",
]
| WindowsSupports |
python | tensorflow__tensorflow | tensorflow/python/framework/convert_to_constants.py | {
"start": 27144,
"end": 29514
} | class ____(object):
"""Container for constant conversion supporting data.
The data includes the graph being converted, and the pre-converted
tensors. This class will be specialized for ConcreteFunction and Session-based
conversions, as the means to obtain that data is different for each case.
"""
def __init__(self,
graph_def,
variable_names_allowlist=None,
variable_names_denylist=None):
self._graph_def = graph_def
self._tensor_data = {}
self._build_node_defs_list()
self._variable_names_allowlist = variable_names_allowlist
self._variable_names_denylist = variable_names_denylist
@property
def graph_def(self):
"""The graph to be converted."""
return self._graph_def
@property
def node_defs(self):
"""All the node defs in the graph to be converted.
Returns:
A map from node name to the NodeDef for all NodeDefs in the graph, as well
as all control flow NodeDefs in the functions.
"""
return self._node_defs
@property
def tensor_data(self):
"""A map from tensor name to its converted _TensorData."""
return self._tensor_data
def _should_convert(self, name):
"""Checks whether to convert the given variable name to a constant."""
return (self._variable_names_allowlist is None or
name in self._variable_names_allowlist) and (
self._variable_names_denylist is None or
name not in self._variable_names_denylist)
def _build_node_defs_list(self):
"""Builds the list of NodeDefs in the GraphDef.
This list consists of all NodeDefs in the main graph as well as all control
flow NodeDefs in the functions.
The remaining NodeDefs in the functions are not included because the op
names
are not unique and the variables are handled differently than the main
graph.
The control flow ops need to be extracted because they are need their
attributes to be updated similar to the control flow ops in the main graph.
"""
self._node_defs = {node.name: node for node in self._graph_def.node}
if self._graph_def.library:
for func in self._graph_def.library.function:
self._node_defs.update({
node.name: node
for node in func.node_def
if node.op in _CONTROL_FLOW_OPS
})
| _ConverterData |
python | astropy__astropy | astropy/io/ascii/html.py | {
"start": 462,
"end": 697
} | class ____(str):
"""
Allows for strings to hold BeautifulSoup data.
"""
def __new__(cls, *args, **kwargs):
return str.__new__(cls, *args, **kwargs)
def __init__(self, val):
self.soup = val
| SoupString |
python | faif__python-patterns | patterns/other/blackboard.py | {
"start": 1369,
"end": 2023
} | class ____:
"""The controller that manages the blackboard system."""
def __init__(self, blackboard: Blackboard) -> None:
self.blackboard = blackboard
def run_loop(self):
"""
This function is a loop that runs until the progress reaches 100.
It checks if an expert is eager to contribute and then calls its contribute method.
"""
while self.blackboard.common_state["progress"] < 100:
for expert in self.blackboard.experts:
if expert.is_eager_to_contribute:
expert.contribute()
return self.blackboard.common_state["contributions"]
| Controller |
python | huggingface__transformers | src/transformers/models/qwen3_next/modular_qwen3_next.py | {
"start": 27453,
"end": 27497
} | class ____(Qwen3MoeMLP):
pass
| Qwen3NextMLP |
python | conda__conda | conda/core/path_actions.py | {
"start": 26370,
"end": 27318
} | class ____(CompileMultiPycAction):
"""Bunch up all of our compile actions, so that they all get carried out at once.
This avoids clobbering and is faster when we have several individual packages requiring
compilation.
"""
def __init__(self, *individuals, **kw):
transaction_context = individuals[0].transaction_context
# not used; doesn't matter
package_info = individuals[0].package_info
target_prefix = individuals[0].target_prefix
source_short_paths = set()
target_short_paths = set()
for individual in individuals:
source_short_paths.update(individual.source_short_paths)
target_short_paths.update(individual.target_short_paths)
super().__init__(
transaction_context,
package_info,
target_prefix,
source_short_paths,
target_short_paths,
)
| AggregateCompileMultiPycAction |
python | pypa__pipenv | pipenv/patched/pip/_vendor/distlib/locators.py | {
"start": 35003,
"end": 36706
} | class ____(Locator):
"""
This locator uses special extended metadata (not available on PyPI) and is
the basis of performant dependency resolution in distlib. Other locators
require archive downloads before dependencies can be determined! As you
might imagine, that can be slow.
"""
def get_distribution_names(self):
"""
Return all the distribution names known to this locator.
"""
raise NotImplementedError('Not available from this locator')
def _get_project(self, name):
result = {'urls': {}, 'digests': {}}
data = get_project_data(name)
if data:
for info in data.get('files', []):
if info['ptype'] != 'sdist' or info['pyversion'] != 'source':
continue
# We don't store summary in project metadata as it makes
# the data bigger for no benefit during dependency
# resolution
dist = make_dist(data['name'],
info['version'],
summary=data.get('summary', 'Placeholder for summary'),
scheme=self.scheme)
md = dist.metadata
md.source_url = info['url']
# TODO SHA256 digest
if 'digest' in info and info['digest']:
dist.digest = ('md5', info['digest'])
md.dependencies = info.get('requirements', {})
dist.exports = info.get('exports', {})
result[dist.version] = dist
result['urls'].setdefault(dist.version, set()).add(info['url'])
return result
| JSONLocator |
python | PrefectHQ__prefect | src/prefect/infrastructure/provisioners/ecs.py | {
"start": 29146,
"end": 34757
} | class ____:
def __init__(self, work_pool_name: str, repository_name: str = "prefect-flows"):
self._ecr_client = boto3.client("ecr")
self._repository_name = repository_name
self._requires_provisioning = None
self._work_pool_name = work_pool_name
self._next_steps: list[str | Panel] = []
async def get_task_count(self) -> int:
"""
Returns the number of tasks that will be executed to provision this resource.
Returns:
int: The number of tasks to be provisioned.
"""
return 3 if await self.requires_provisioning() else 0
async def _get_prefect_created_registry(self):
try:
registries = await anyio.to_thread.run_sync(
partial(
self._ecr_client.describe_repositories,
repositoryNames=[self._repository_name],
)
)
return next(iter(registries), None)
except self._ecr_client.exceptions.RepositoryNotFoundException:
return None
async def requires_provisioning(self) -> bool:
"""
Check if this resource requires provisioning.
Returns:
bool: True if provisioning is required, False otherwise.
"""
if self._requires_provisioning is not None:
return self._requires_provisioning
if await self._get_prefect_created_registry() is not None:
self._requires_provisioning = False
return False
self._requires_provisioning = True
return True
async def get_planned_actions(self) -> List[str]:
"""
Returns a description of the planned actions for provisioning this resource.
Returns:
Optional[str]: A description of the planned actions for provisioning the resource,
or None if provisioning is not required.
"""
if await self.requires_provisioning():
return [
"Creating an ECR repository for storing Prefect images:"
f" [blue]{self._repository_name}[/]"
]
return []
async def provision(
self,
base_job_template: dict[str, Any],
advance: Callable[[], None],
) -> None:
"""
Provisions an ECR repository.
Args:
base_job_template: The base job template of the work pool to provision
infrastructure for.
advance: A callback function to indicate progress.
"""
if await self.requires_provisioning():
console = current_console.get()
console.print("Provisioning ECR repository")
response = await anyio.to_thread.run_sync(
partial(
self._ecr_client.create_repository,
repositoryName=self._repository_name,
)
)
advance()
console.print("Authenticating with ECR")
auth_token = self._ecr_client.get_authorization_token()
user, passwd = (
base64.b64decode(
auth_token["authorizationData"][0]["authorizationToken"]
)
.decode()
.split(":")
)
proxy_endpoint = auth_token["authorizationData"][0]["proxyEndpoint"]
await run_process(f"docker login -u {user} -p {passwd} {proxy_endpoint}")
advance()
console.print("Setting default Docker build namespace")
namespace = response["repository"]["repositoryUri"].split("/")[0]
update_current_profile({PREFECT_DEFAULT_DOCKER_BUILD_NAMESPACE: namespace})
self._update_next_steps(namespace)
advance()
def _update_next_steps(self, repository_uri: str):
self._next_steps.extend(
[
dedent(
f"""\
Your default Docker build namespace has been set to [blue]{repository_uri!r}[/].
To build and push a Docker image to your newly created repository, use [blue]{self._repository_name!r}[/] as your image name:
"""
),
Panel(
Syntax(
dedent(
f"""\
from prefect import flow
from prefect.docker import DockerImage
@flow(log_prints=True)
def my_flow(name: str = "world"):
print(f"Hello {{name}}! I'm a flow running on ECS!")
if __name__ == "__main__":
my_flow.deploy(
name="my-deployment",
work_pool_name="{self._work_pool_name}",
image=DockerImage(
name="{self._repository_name}:latest",
platform="linux/amd64",
)
)"""
),
"python",
background_color="default",
),
title="example_deploy_script.py",
expand=False,
),
]
)
@property
def next_steps(self) -> list[str | Panel]:
return self._next_steps
| ContainerRepositoryResource |
python | encode__django-rest-framework | tests/schemas/test_coreapi.py | {
"start": 18292,
"end": 18598
} | class ____(APIView):
permission_classes = [permissions.IsAuthenticatedOrReadOnly]
def get(self, *args, **kwargs):
pass
@unittest.skipUnless(coreapi, 'coreapi is not installed')
@override_settings(REST_FRAMEWORK={'DEFAULT_SCHEMA_CLASS': 'rest_framework.schemas.AutoSchema'})
| ExampleDetailView |
python | Textualize__textual | src/textual/app.py | {
"start": 6523,
"end": 6635
} | class ____(ScreenError):
"""Raised when trying to manipulate the screen stack incorrectly."""
| ScreenStackError |
python | pandas-dev__pandas | pandas/tests/arrays/test_ndarray_backed.py | {
"start": 260,
"end": 2332
} | class ____:
def test_empty_categorical(self):
ci = CategoricalIndex(["a", "b", "c"], ordered=True)
dtype = ci.dtype
# case with int8 codes
shape = (4,)
result = Categorical._empty(shape, dtype=dtype)
assert isinstance(result, Categorical)
assert result.shape == shape
assert result._ndarray.dtype == np.int8
# case where repr would segfault if we didn't override base implementation
result = Categorical._empty((4096,), dtype=dtype)
assert isinstance(result, Categorical)
assert result.shape == (4096,)
assert result._ndarray.dtype == np.int8
repr(result)
# case with int16 codes
ci = CategoricalIndex(list(range(512)) * 4, ordered=False)
dtype = ci.dtype
result = Categorical._empty(shape, dtype=dtype)
assert isinstance(result, Categorical)
assert result.shape == shape
assert result._ndarray.dtype == np.int16
def test_empty_dt64tz(self):
dti = date_range("2016-01-01", periods=2, tz="Asia/Tokyo")
dtype = dti.dtype
shape = (0,)
result = DatetimeArray._empty(shape, dtype=dtype)
assert result.dtype == dtype
assert isinstance(result, DatetimeArray)
assert result.shape == shape
def test_empty_dt64(self):
shape = (3, 9)
result = DatetimeArray._empty(shape, dtype="datetime64[ns]")
assert isinstance(result, DatetimeArray)
assert result.shape == shape
def test_empty_td64(self):
shape = (3, 9)
result = TimedeltaArray._empty(shape, dtype="m8[ns]")
assert isinstance(result, TimedeltaArray)
assert result.shape == shape
def test_empty_pandas_array(self):
arr = NumpyExtensionArray(np.array([1, 2]))
dtype = arr.dtype
shape = (3, 9)
result = NumpyExtensionArray._empty(shape, dtype=dtype)
assert isinstance(result, NumpyExtensionArray)
assert result.dtype == dtype
assert result.shape == shape
| TestEmpty |
python | pennersr__django-allauth | allauth/mfa/recovery_codes/views.py | {
"start": 766,
"end": 1997
} | class ____(FormView):
form_class = GenerateRecoveryCodesForm
template_name = "mfa/recovery_codes/generate." + account_settings.TEMPLATE_EXTENSION
success_url = reverse_lazy("mfa_view_recovery_codes")
def form_valid(self, form):
flows.generate_recovery_codes(self.request)
return super().form_valid(form)
def get_context_data(self, **kwargs):
ret = super().get_context_data(**kwargs)
unused_codes = []
authenticator = Authenticator.objects.filter(
user=self.request.user, type=Authenticator.Type.RECOVERY_CODES
).first()
if authenticator:
unused_codes = authenticator.wrap().get_unused_codes()
ret["unused_code_count"] = len(unused_codes)
return ret
def get_form_kwargs(self):
ret = super().get_form_kwargs()
ret["user"] = self.request.user
return ret
def get_form_class(self):
return get_form_class(
app_settings.FORMS, "generate_recovery_codes", self.form_class
)
generate_recovery_codes = GenerateRecoveryCodesView.as_view()
@method_decorator(login_required, name="dispatch")
@method_decorator(never_cache, name="dispatch")
| GenerateRecoveryCodesView |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-google-directory/source_google_directory/source.py | {
"start": 159,
"end": 226
} | class ____(BaseSource):
client_class = Client
| SourceGoogleDirectory |
python | simonw__datasette | datasette/views/special.py | {
"start": 21673,
"end": 27054
} | class ____(BaseView):
name = "create_token"
has_json_alternate = False
def check_permission(self, request):
if not self.ds.setting("allow_signed_tokens"):
raise Forbidden("Signed tokens are not enabled for this Datasette instance")
if not request.actor:
raise Forbidden("You must be logged in to create a token")
if not request.actor.get("id"):
raise Forbidden(
"You must be logged in as an actor with an ID to create a token"
)
if request.actor.get("token"):
raise Forbidden(
"Token authentication cannot be used to create additional tokens"
)
async def shared(self, request):
self.check_permission(request)
# Build list of databases and tables the user has permission to view
db_page = await self.ds.allowed_resources("view-database", request.actor)
allowed_databases = [r async for r in db_page.all()]
table_page = await self.ds.allowed_resources("view-table", request.actor)
allowed_tables = [r async for r in table_page.all()]
# Build database -> tables mapping
database_with_tables = []
for db_resource in allowed_databases:
database_name = db_resource.parent
if database_name == "_memory":
continue
# Find tables for this database
tables = []
for table_resource in allowed_tables:
if table_resource.parent == database_name:
tables.append(
{
"name": table_resource.child,
"encoded": tilde_encode(table_resource.child),
}
)
database_with_tables.append(
{
"name": database_name,
"encoded": tilde_encode(database_name),
"tables": tables,
}
)
return {
"actor": request.actor,
"all_actions": self.ds.actions.keys(),
"database_actions": [
key for key, value in self.ds.actions.items() if value.takes_parent
],
"child_actions": [
key for key, value in self.ds.actions.items() if value.takes_child
],
"database_with_tables": database_with_tables,
}
async def get(self, request):
self.check_permission(request)
return await self.render(
["create_token.html"], request, await self.shared(request)
)
async def post(self, request):
self.check_permission(request)
post = await request.post_vars()
errors = []
expires_after = None
if post.get("expire_type"):
duration_string = post.get("expire_duration")
if (
not duration_string
or not duration_string.isdigit()
or not int(duration_string) > 0
):
errors.append("Invalid expire duration")
else:
unit = post["expire_type"]
if unit == "minutes":
expires_after = int(duration_string) * 60
elif unit == "hours":
expires_after = int(duration_string) * 60 * 60
elif unit == "days":
expires_after = int(duration_string) * 60 * 60 * 24
else:
errors.append("Invalid expire duration unit")
# Are there any restrictions?
restrict_all = []
restrict_database = {}
restrict_resource = {}
for key in post:
if key.startswith("all:") and key.count(":") == 1:
restrict_all.append(key.split(":")[1])
elif key.startswith("database:") and key.count(":") == 2:
bits = key.split(":")
database = tilde_decode(bits[1])
action = bits[2]
restrict_database.setdefault(database, []).append(action)
elif key.startswith("resource:") and key.count(":") == 3:
bits = key.split(":")
database = tilde_decode(bits[1])
resource = tilde_decode(bits[2])
action = bits[3]
restrict_resource.setdefault(database, {}).setdefault(
resource, []
).append(action)
token = self.ds.create_token(
request.actor["id"],
expires_after=expires_after,
restrict_all=restrict_all,
restrict_database=restrict_database,
restrict_resource=restrict_resource,
)
token_bits = self.ds.unsign(token[len("dstok_") :], namespace="token")
await self.ds.track_event(
CreateTokenEvent(
actor=request.actor,
expires_after=expires_after,
restrict_all=restrict_all,
restrict_database=restrict_database,
restrict_resource=restrict_resource,
)
)
context = await self.shared(request)
context.update({"errors": errors, "token": token, "token_bits": token_bits})
return await self.render(["create_token.html"], request, context)
| CreateTokenView |
python | scrapy__scrapy | tests/test_dupefilters.py | {
"start": 756,
"end": 954
} | class ____(RFPDupeFilter):
@classmethod
def from_crawler(cls, crawler):
df = super().from_crawler(crawler)
df.method = "from_crawler"
return df
| FromCrawlerRFPDupeFilter |
python | huggingface__transformers | src/transformers/models/cohere/modeling_cohere.py | {
"start": 17772,
"end": 20923
} | class ____(CoherePreTrainedModel):
def __init__(self, config: CohereConfig):
super().__init__(config)
self.padding_idx = config.pad_token_id
self.vocab_size = config.vocab_size
self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx)
self.layers = nn.ModuleList(
[CohereDecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)]
)
self.norm = CohereLayerNorm(hidden_size=(config.hidden_size), eps=config.layer_norm_eps)
self.rotary_emb = CohereRotaryEmbedding(config=config)
self.gradient_checkpointing = False
# Initialize weights and apply final processing
self.post_init()
@check_model_inputs()
@auto_docstring
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[Cache] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
cache_position: Optional[torch.LongTensor] = None,
use_cache: Optional[bool] = None,
**kwargs: Unpack[TransformersKwargs],
) -> BaseModelOutputWithPast:
if (input_ids is None) ^ (inputs_embeds is not None):
raise ValueError("You must specify exactly one of input_ids or inputs_embeds")
if inputs_embeds is None:
inputs_embeds: torch.Tensor = self.embed_tokens(input_ids)
if use_cache and past_key_values is None:
past_key_values = DynamicCache(config=self.config)
if cache_position is None:
past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
cache_position: torch.Tensor = torch.arange(
past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device
)
if position_ids is None:
position_ids = cache_position.unsqueeze(0)
causal_mask = create_causal_mask(
config=self.config,
input_embeds=inputs_embeds,
attention_mask=attention_mask,
cache_position=cache_position,
past_key_values=past_key_values,
position_ids=position_ids,
)
hidden_states = inputs_embeds
position_embeddings = self.rotary_emb(hidden_states, position_ids=position_ids)
for decoder_layer in self.layers[: self.config.num_hidden_layers]:
hidden_states = decoder_layer(
hidden_states,
attention_mask=causal_mask,
position_embeddings=position_embeddings,
position_ids=position_ids,
past_key_values=past_key_values,
use_cache=use_cache,
cache_position=cache_position,
**kwargs,
)
hidden_states = self.norm(hidden_states)
return BaseModelOutputWithPast(
last_hidden_state=hidden_states,
past_key_values=past_key_values,
)
@auto_docstring
| CohereModel |
python | pytorch__pytorch | torch/_lazy/extract_compiled_graph.py | {
"start": 1853,
"end": 8453
} | class ____:
r"""
When ltc_sync_multi is called on multi tensors, the compiled graph
will contain output only for unique tensors - if a tensor appears multiple
times in the input to _ltc_sync_multi, only the first occurrence matters.
However from python level, we still expect multi tensors returned with duplication
even if the TS graph dedup the output. e.g. for method:
def forward(self, a):
return a, a
the TS graph captured by LTC will return a single tensor, but Python method expects 2.
This class dedup the lazy tensors first to get the index that will be used
to duplicate the eager tensors later.
"""
def __init__(self, lazy_out_list):
self.index: list[list[int]] = []
self.total_count = len(lazy_out_list)
tensor_id_to_idx: dict[int, int] = {}
for dup_idx, lazy_tensor in enumerate(lazy_out_list):
uniq_idx = tensor_id_to_idx.get(id(lazy_tensor), None)
if uniq_idx is not None:
self.index[uniq_idx].append(dup_idx)
else:
uniq_idx = len(self.index)
self.index.append([dup_idx])
tensor_id_to_idx[id(lazy_tensor)] = uniq_idx
def duplicate_eager_tensors(self, eager_tensor_list):
duplicated_list = [None] * self.total_count
assert len(eager_tensor_list) == len(self.index)
for uniq_idx, eager_tensor in enumerate(eager_tensor_list):
for dup_idx in self.index[uniq_idx]:
duplicated_list[dup_idx] = eager_tensor
return duplicated_list
def force_lazy_device(model: fx.GraphModule):
"""
Factory methods in a Fx graph may create tensors for a specific eager devices.
If we take no actions, those eager tensors will be mixed with lazy tensors and
cause crash. This method overwrite those eager device to lazy device.
"""
def tolazydevice(dev):
if isinstance(dev, torch.device):
return torch.device("lazy", index=dev.index)
return dev
def hasDeviceArg(args, kwargs):
return any(
isinstance(arg, torch.device)
for arg in itertools.chain(args, kwargs.values())
)
for nd in model.graph.nodes:
nd.args = tuple(tolazydevice(arg) for arg in nd.args)
nd.kwargs = {k: tolazydevice(v) for k, v in nd.kwargs.items()}
# For torchbench like yolov3, hf_Bart, dynamo generates Fx graph that return
# eager tensors on the default device
# (check https://gist.github.com/shunting314/eabdf6c769c59bc384469717b8f9bb7f for yolove,
# and https://gist.github.com/shunting314/8d5e2d9348a3258959d3954186c48814 for hf_Bart).
# To force those tensors on the lazy device, we can not simply override
# the device argument since there is no explicit device argument.
# What we are doing here is, for the list of covered tensor factory methods
# we add a lazy device argument explicitly.
#
# TODO: This solution is no ideal since we may miss some factory methods. In future
# when we support lazy mode, this method can be replaced by that.
if nd.target in tensor_factory_functions and not hasDeviceArg(
nd.args, nd.kwargs
):
kwargs = dict(nd.kwargs) # nd.kwargs is immutable. make a mutable copy.
kwargs["device"] = torch.device("lazy")
nd.kwargs = kwargs
model.recompile()
def get_fallback_ops():
fallback_ops = []
for opname in metrics.counter_names():
if "aten::" not in opname:
continue
val = int(metrics.counter_value(opname))
if val > 0:
fallback_ops.append(f"{opname}={val}")
return fallback_ops
def extract_compiled_graph(model: fx.GraphModule, example_inputs) -> Callable:
"""
Optimize an eager model with LTC and returns a wrapper to execute the
compiled graph directly without retracing. It depends on other mechanisms
like TorchDynamo guards to guarantee the returned wrapper is only called
when it's safe.
"""
lazy_args = [arg.to(device="lazy") for arg in example_inputs]
args_tensor_ids = [lazy.get_tensor_id(lazy_arg) for lazy_arg in lazy_args]
tensor_id_to_arg_idx = {tensor_id: i for i, tensor_id in enumerate(args_tensor_ids)}
lazy_model = copy.deepcopy(model).to(device=torch.device("lazy"))
force_lazy_device(lazy_model)
# This line executes lazy tracing and enable us extracting compiled graph later
metrics.reset()
lazy_out = lazy_model(*lazy_args)
fallback_ops = get_fallback_ops()
metrics.reset()
if len(fallback_ops) > 0:
raise RuntimeError(
f"Fail to extract the compiled graph because of fallback: {','.join(fallback_ops)}"
)
if not isinstance(lazy_out, (tuple, list)):
lazy_out = (lazy_out,)
args_and_out = tuple(lazy_args) + tuple(lazy_out)
return_value_handler = ReturnValueHandler(args_and_out)
if debug:
print("Fx code:\n", model.code)
print("LTC IR:", lazy_debug.dump_ir(args_and_out, "text"))
# TODO: this part is TS backend specific for now and will be generalized to
# support XLA
(
graph_input_tensor_ids,
graph_input_ivalues,
) = computation.get_tensors_ts_device_data_node(args_and_out)
assert len(graph_input_tensor_ids) == len(graph_input_ivalues)
graph_input_matcher = GraphInputMatcher(
tensor_id_to_arg_idx, graph_input_tensor_ids, graph_input_ivalues
)
graph_hash = computation.get_graph_hash(args_and_out)
if debug:
print("graph_hash", graph_hash)
print(f"args_tensor_ids {args_tensor_ids}")
print("tensor ids from device data:", graph_input_tensor_ids)
# sync the list of output tensors so the computation graph for these
# tensors will be cached. Those computation graphs can be retrieved
# by graph hash later.
lazy.sync_multi(args_and_out, [])
def optimized_mod(*args):
if len(args_and_out) == 0:
return ()
graph_input = graph_input_matcher(args)
res = return_value_handler.duplicate_eager_tensors(
computation.run_cached_graph(graph_hash, graph_input)
)
assert len(res) == len(args_and_out)
for i, arg in enumerate(args):
# only copy those tensors that get inplace updated
if arg is not res[i]:
arg.copy_(res[i])
# skip the args
return res[len(args) :]
return optimized_mod
| ReturnValueHandler |
python | scipy__scipy | benchmarks/benchmarks/signal_filtering.py | {
"start": 278,
"end": 799
} | class ____(Benchmark):
param_names = ['q', 'ftype', 'zero_phase']
params = [
[2, 10, 30],
['iir', 'fir'],
[True, False]
]
def setup(self, q, ftype, zero_phase):
np.random.seed(123456)
sample_rate = 10000.
t = np.arange(int(1e6), dtype=np.float64) / sample_rate
self.sig = np.sin(2*np.pi*500*t) + 0.3 * np.sin(2*np.pi*4e3*t)
def time_decimate(self, q, ftype, zero_phase):
decimate(self.sig, q, ftype=ftype, zero_phase=zero_phase)
| Decimate |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 1248721,
"end": 1248970
} | class ____(sgqlc.types.Type, Node, AuditEntry, OrganizationAuditEntryData):
"""Audit log entry for a org.config.enable_collaborators_only event."""
__schema__ = github_schema
__field_names__ = ()
| OrgConfigEnableCollaboratorsOnlyAuditEntry |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.