language
stringclasses
1 value
repo
stringclasses
346 values
path
stringlengths
6
201
class_span
dict
source
stringlengths
21
2.38M
target
stringlengths
1
96
python
tornadoweb__tornado
demos/helloworld/helloworld.py
{ "start": 744, "end": 1155 }
class ____(tornado.web.RequestHandler): def get(self): self.write("Hello, world") async def main(): tornado.options.parse_command_line() application = tornado.web.Application([(r"/", MainHandler)]) http_server = tornado.httpserver.HTTPServer(application) http_server.listen(options.port) await asyncio.Event().wait() if __name__ == "__main__": asyncio.run(main())
MainHandler
python
tiangolo__fastapi
docs_src/path_operation_configuration/tutorial004.py
{ "start": 109, "end": 681 }
class ____(BaseModel): name: str description: Union[str, None] = None price: float tax: Union[float, None] = None tags: Set[str] = set() @app.post("/items/", response_model=Item, summary="Create an item") async def create_item(item: Item): """ Create an item with all the information: - **name**: each item must have a name - **description**: a long description - **price**: required - **tax**: if the item doesn't have tax, you can omit this - **tags**: a set of unique tag strings for this item """ return item
Item
python
django__django
django/db/backends/ddl_references.py
{ "start": 162, "end": 1314 }
class ____: """Base class that defines the reference interface.""" def references_table(self, table): """ Return whether or not this instance references the specified table. """ return False def references_column(self, table, column): """ Return whether or not this instance references the specified column. """ return False def references_index(self, table, index): """ Return whether or not this instance references the specified index. """ return False def rename_table_references(self, old_table, new_table): """ Rename all references to the old_name to the new_table. """ pass def rename_column_references(self, table, old_column, new_column): """ Rename all references to the old_column to the new_column. """ pass def __repr__(self): return "<%s %r>" % (self.__class__.__name__, str(self)) def __str__(self): raise NotImplementedError( "Subclasses must define how they should be converted to string." )
Reference
python
jmcnamara__XlsxWriter
xlsxwriter/test/comparison/test_chart_bar22.py
{ "start": 315, "end": 2167 }
class ____(ExcelComparisonTest): """ Test file created by XlsxWriter against a file created by Excel. """ def setUp(self): self.set_filename("chart_bar22.xlsx") def test_create_file(self): """Test the creation of a simple XlsxWriter file.""" workbook = Workbook(self.got_filename) worksheet = workbook.add_worksheet() chart = workbook.add_chart({"type": "bar"}) chart.axis_ids = [43706240, 43727104] headers = ["Series 1", "Series 2", "Series 3"] data = [ ["Category 1", "Category 2", "Category 3", "Category 4"], [4.3, 2.5, 3.5, 4.5], [2.4, 4.5, 1.8, 2.8], [2, 2, 3, 5], ] worksheet.set_column("A:D", 11) worksheet.write_row("B1", headers) worksheet.write_column("A2", data[0]) worksheet.write_column("B2", data[1]) worksheet.write_column("C2", data[2]) worksheet.write_column("D2", data[3]) chart.add_series( { "categories": "=Sheet1!$A$2:$A$5", "values": "=Sheet1!$B$2:$B$5", "categories_data": data[0], "values_data": data[1], } ) chart.add_series( { "categories": "=Sheet1!$A$2:$A$5", "values": "=Sheet1!$C$2:$C$5", "categories_data": data[0], "values_data": data[2], } ) chart.add_series( { "categories": "=Sheet1!$A$2:$A$5", "values": "=Sheet1!$D$2:$D$5", "categories_data": data[0], "values_data": data[3], } ) worksheet.insert_chart("E9", chart) workbook.close() self.assertExcelEqual()
TestCompareXLSXFiles
python
PrefectHQ__prefect
src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py
{ "start": 632970, "end": 633289 }
class ____(sgqlc.types.Type): """ See source code for more info. """ __schema__ = graphql_schema __field_names__ = ("cursor", "node") cursor = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="cursor") node = sgqlc.types.Field("SponsorsTier", graphql_name="node")
SponsorsTierEdge
python
pyca__cryptography
tests/hazmat/primitives/test_serialization.py
{ "start": 21897, "end": 52143 }
class ____: @pytest.mark.parametrize( ("key_file", "password"), [ (["PEM_Serialization", "rsa_private_key.pem"], b"123456"), (["PKCS8", "unenc-rsa-pkcs8.pem"], None), (["PKCS8", "enc-rsa-pkcs8.pem"], b"foobar"), (["PKCS8", "enc2-rsa-pkcs8.pem"], b"baz"), (["PKCS8", "pkcs12_s2k_pem-X_9607.pem"], b"123456"), (["PKCS8", "pkcs12_s2k_pem-X_9671.pem"], b"123456"), (["PKCS8", "pkcs12_s2k_pem-X_9925.pem"], b"123456"), (["PKCS8", "pkcs12_s2k_pem-X_9926.pem"], b"123456"), (["PKCS8", "pkcs12_s2k_pem-X_9927.pem"], b"123456"), (["PKCS8", "pkcs12_s2k_pem-X_9928.pem"], b"123456"), (["PKCS8", "pkcs12_s2k_pem-X_9929.pem"], b"123456"), (["PKCS8", "pkcs12_s2k_pem-X_9930.pem"], b"123456"), (["PKCS8", "pkcs12_s2k_pem-X_9931.pem"], b"123456"), (["PKCS8", "pkcs12_s2k_pem-X_9932.pem"], b"123456"), (["Traditional_OpenSSL_Serialization", "key1.pem"], b"123456"), (["Traditional_OpenSSL_Serialization", "key2.pem"], b"a123456"), (["Traditional_OpenSSL_Serialization", "testrsa.pem"], None), ( ["Traditional_OpenSSL_Serialization", "testrsa-encrypted.pem"], b"password", ), ], ) def test_load_pem_rsa_private_key(self, key_file, password, backend): _skip_fips_format(key_file, password, backend) key = load_vectors_from_file( os.path.join("asymmetric", *key_file), lambda pemfile: load_pem_private_key( pemfile.read().encode(), password, unsafe_skip_rsa_key_validation=True, ), ) assert key assert isinstance(key, rsa.RSAPrivateKey) _check_rsa_private_numbers(key.private_numbers()) @pytest.mark.supported( only_if=lambda backend: backend.dsa_supported(), skip_message="Does not support DSA.", ) @pytest.mark.parametrize( ("key_path", "password"), [ (["Traditional_OpenSSL_Serialization", "dsa.1024.pem"], None), (["Traditional_OpenSSL_Serialization", "dsa.2048.pem"], None), (["Traditional_OpenSSL_Serialization", "dsa.3072.pem"], None), (["PKCS8", "unenc-dsa-pkcs8.pem"], None), (["PEM_Serialization", "dsa_private_key.pem"], b"123456"), ], ) def test_load_dsa_private_key(self, key_path, password, backend): _skip_fips_format(key_path, password, backend) key = load_vectors_from_file( os.path.join("asymmetric", *key_path), lambda pemfile: load_pem_private_key( pemfile.read().encode(), password, backend ), ) assert key assert isinstance(key, dsa.DSAPrivateKey) _check_dsa_private_numbers(key.private_numbers()) @pytest.mark.parametrize( ("key_path", "password"), [ (["PKCS8", "ec_private_key.pem"], None), (["PKCS8", "ec_private_key_encrypted.pem"], b"123456"), (["PEM_Serialization", "ec_private_key.pem"], None), (["PEM_Serialization", "ec_private_key_encrypted.pem"], b"123456"), ], ) def test_load_pem_ec_private_key(self, key_path, password, backend): _skip_curve_unsupported(backend, ec.SECP256R1()) _skip_fips_format(key_path, password, backend) key = load_vectors_from_file( os.path.join("asymmetric", *key_path), lambda pemfile: load_pem_private_key( pemfile.read().encode(), password, backend ), ) assert key assert isinstance(key, ec.EllipticCurvePrivateKey) assert key.curve.name == "secp256r1" assert key.curve.key_size == 256 @pytest.mark.parametrize( ("key_file"), [ os.path.join("asymmetric", "PKCS8", "unenc-rsa-pkcs8.pub.pem"), os.path.join( "asymmetric", "PEM_Serialization", "rsa_public_key.pem" ), os.path.join("asymmetric", "public", "PKCS1", "rsa.pub.pem"), os.path.join( "asymmetric", "PEM_Serialization", "rsa_wrong_delimiter_public_key.pem", ), ], ) def test_load_pem_rsa_public_key(self, key_file, backend): key = load_vectors_from_file( key_file, lambda pemfile: load_pem_public_key( pemfile.read().encode(), backend ), ) assert key assert isinstance(key, rsa.RSAPublicKey) numbers = key.public_numbers() assert numbers.e == 65537 def test_load_pem_public_fails_with_ec_key_with_rsa_delimiter(self): with pytest.raises(ValueError): load_vectors_from_file( os.path.join( "asymmetric", "PEM_Serialization", "ec_public_key_rsa_delimiter.pem", ), lambda pemfile: load_pem_public_key(pemfile.read().encode()), ) def test_load_priv_key_with_public_key_api_fails( self, rsa_key_2048, backend ): # In OpenSSL 3.0.x the PEM_read_bio_PUBKEY function will invoke # the default password callback if you pass an encrypted private # key. This is very, very, very bad as the default callback can # trigger an interactive console prompt, which will hang the # Python process. This test makes sure we don't do that. priv_key_serialized = rsa_key_2048.private_bytes( Encoding.PEM, PrivateFormat.PKCS8, BestAvailableEncryption(b"password"), ) with pytest.raises(ValueError): load_pem_public_key(priv_key_serialized) @pytest.mark.supported( only_if=lambda backend: backend.dsa_supported(), skip_message="Does not support DSA.", ) @pytest.mark.parametrize( ("key_file"), [ os.path.join("asymmetric", "PKCS8", "unenc-dsa-pkcs8.pub.pem"), os.path.join( "asymmetric", "PEM_Serialization", "dsa_public_key.pem" ), ], ) def test_load_pem_dsa_public_key(self, key_file, backend): key = load_vectors_from_file( key_file, lambda pemfile: load_pem_public_key( pemfile.read().encode(), backend ), ) assert key assert isinstance(key, dsa.DSAPublicKey) def test_load_ec_public_key(self, backend): _skip_curve_unsupported(backend, ec.SECP256R1()) key = load_vectors_from_file( os.path.join( "asymmetric", "PEM_Serialization", "ec_public_key.pem" ), lambda pemfile: load_pem_public_key( pemfile.read().encode(), backend ), ) assert key assert isinstance(key, ec.EllipticCurvePublicKey) assert key.curve.name == "secp256r1" assert key.curve.key_size == 256 @pytest.mark.skip_fips( reason="Traditional OpenSSL format blocked in FIPS mode" ) def test_rsa_traditional_encrypted_values(self, backend): pkey = load_vectors_from_file( os.path.join( "asymmetric", "Traditional_OpenSSL_Serialization", "key1.pem" ), lambda pemfile: load_pem_private_key( pemfile.read().encode(), b"123456", unsafe_skip_rsa_key_validation=True, ), ) assert isinstance(pkey, rsa.RSAPrivateKey) numbers = pkey.private_numbers() assert numbers.p == int( "f8337fbcd4b54e14d4226889725d9dc713e40c87e62ce1886a517c729b3d133d" "c519bfb026081788509d2b503bc0966bdc67c45771e41f9844cee1be968b3263" "735d6c47d981dacfde1fe2110c4acbfe656599890b8f131c20d246891959f45d" "06d4fadf205f94f9ea050c661efdc760d7471a1963bf16333837ef6dc4f8dbaf", 16, ) assert numbers.q == int( "bf8c2ad54acf67f8b687849f91ece4761901e8abc8b0bc8604f55e64ad413a62" "02dbb28eac0463f87811c1ca826b0eeafb53d115b50de5a775f74c5e9cf8161b" "fc030f5e402664388ea1ef7d0ade85559e4e68cef519cb4f582ec41f994249d8" "b860a7433f0612322827a87b3cc0d785075811b76bccbc90ff153a11592fa307", 16, ) assert numbers.d == int( "09a768d21f58866d690aeb78f0d92732aa03fa843f960b0799dfc31e7d73f1e6" "503953c582becd4de92d293b3a86a42b2837531fdfc54db75e0d30701801a85c" "120e997bce2b19290234710e2fd4cbe750d3fdaab65893c539057a21b8a2201b" "4e418b6dff47423905a8e0b17fdd14bd3b0834ccb0a7c203d8e62e6ab4c6552d" "9b777847c874e743ac15942a21816bb177919215ee235064fb0a7b3baaafac14" "92e29b2fc80dc16b633525d83eed73fa47a55a9894148a50358eb94c62b19e84" "f3d7daf866cd6a606920d54ba41d7aa648e777d5269fe00b12a8cf5ccf823f62" "c1e8dc442ec3a7e3356913f444919baa4a5c7299345817543b4add5f9c1a477f", 16, ) assert numbers.dmp1 == int( "e0cdcc51dd1b0648c9470d0608e710040359179c73778d2300a123a5ae43a84c" "d75c1609d6b8978fe8ec2211febcd5c186151a79d57738c2b2f7eaf1b3eb09cd" "97ed3328f4b1afdd7ca3c61f88d1aa6895b06b5afc742f6bd7b27d1eaa2e96ad" "3785ea5ff4337e7cc9609f3553b6aa42655a4a225afcf57f98d8d8ecc46e5e93", 16, ) assert numbers.dmq1 == int( "904aeda559429e870c315025c88e9497a644fada154795ecbb657f6305e4c22f" "3d09f51b66d7b3db63cfb49571e3660c7ba16b3b17f5cd0f765d0189b0636e7c" "4c3e9de0192112944c560e8bba996005dc4822c9ec772ee1a9832938c881d811" "4aeb7c74bad03efacba6fc5341b3df6695deb111e44209b68c819809a38eb017", 16, ) assert numbers.iqmp == int( "378a3ae1978c381dce3b486b038601cf06dfa77687fdcd2d56732380bff4f32e" "ec20027034bcd53be80162e4054ab7fefdbc3e5fe923aa8130d2c9ab01d6a70f" "da3615f066886ea610e06c29cf5c2e0649a40ca936f290b779cd9e2bc3b87095" "26667f75a1016e268ae3b9501ae4696ec8c1af09dc567804151fdeb1486ee512", 16, ) assert numbers.public_numbers.e == 65537 assert numbers.public_numbers.n == int( "b9b651fefc4dd4c9b1c0312ee69f0803990d5a539785dd14f1f6880d9198ee1f" "71b3babb1ebe977786b30bea170f24b7a0e7b116f2c6908cf374923984924187" "86de9d4e0f5f3e56d7be9eb971d3f8a4f812057cf9f9053b829d1c54d1a340fe" "5c90a6e228a5871da900770141b4c6e6f298409718cb16467a4f5ff63882b204" "255028f49745dedc7ca4b5cba6d78acf32b650f06bf81862eda0856a14e8767e" "d4086342284a6f9752e96435f7119a05cc3220a954774a931dbebe1f1ab0df9d" "aeaedb132741c3b5c48e1a1426ccd954fb9b5140c14daec9a79be9c7c8e50610" "dfb489c7539999cfc14ac75765bab4ae8a8df5d96c3de34c12435b1a02cf6ec9", 16, ) @pytest.mark.parametrize( "key_path", [ ["Traditional_OpenSSL_Serialization", "testrsa.pem"], ["PKCS8", "unenc-rsa-pkcs8.pem"], ], ) def test_unused_password(self, key_path, backend): key_file = os.path.join("asymmetric", *key_path) password = b"this password will not be used" with pytest.raises(TypeError): load_vectors_from_file( key_file, lambda pemfile: load_pem_private_key( pemfile.read().encode(), password, backend ), ) def test_invalid_encoding_with_traditional(self, backend): key_file = os.path.join( "asymmetric", "Traditional_OpenSSL_Serialization", "testrsa.pem" ) key = load_vectors_from_file( key_file, lambda pemfile: load_pem_private_key( pemfile.read(), None, unsafe_skip_rsa_key_validation=True ), mode="rb", ) for enc in (Encoding.OpenSSH, Encoding.Raw, Encoding.X962): with pytest.raises(ValueError): key.private_bytes( enc, PrivateFormat.TraditionalOpenSSL, NoEncryption() ) @pytest.mark.parametrize( "key_path", [ ["Traditional_OpenSSL_Serialization", "testrsa-encrypted.pem"], ["PKCS8", "enc-rsa-pkcs8.pem"], ], ) def test_password_not_bytes(self, key_path, backend): key_file = os.path.join("asymmetric", *key_path) password = "this password is not bytes" with pytest.raises(TypeError): load_vectors_from_file( key_file, lambda pemfile: load_pem_private_key( pemfile.read().encode(), password, # type:ignore[arg-type] backend, ), ) @pytest.mark.parametrize( "key_path", [ ["Traditional_OpenSSL_Serialization", "testrsa-encrypted.pem"], ["PKCS8", "enc-rsa-pkcs8.pem"], ], ) def test_wrong_password(self, key_path, backend): key_file = os.path.join("asymmetric", *key_path) password = b"this password is wrong" with pytest.raises(ValueError): load_vectors_from_file( key_file, lambda pemfile: load_pem_private_key( pemfile.read().encode(), password, backend ), ) @pytest.mark.parametrize( ("key_path", "password"), itertools.product( [ ["Traditional_OpenSSL_Serialization", "testrsa-encrypted.pem"], ["PKCS8", "enc-rsa-pkcs8.pem"], ], [b"", None], ), ) def test_missing_password(self, key_path, password, backend): key_file = os.path.join("asymmetric", *key_path) with pytest.raises(TypeError): load_vectors_from_file( key_file, lambda pemfile: load_pem_private_key( pemfile.read().encode(), password, backend ), ) def test_wrong_private_format(self, backend): key_data = b"---- NOT A KEY ----\n" with pytest.raises(ValueError): load_pem_private_key(key_data, None, backend) with pytest.raises(ValueError): load_pem_private_key( key_data, b"this password will not be used", backend ) def test_wrong_public_format(self, backend): key_data = b"---- NOT A KEY ----\n" with pytest.raises(ValueError): load_pem_public_key(key_data, backend) @pytest.mark.supported( only_if=lambda backend: backend.dh_supported(), skip_message="DH not supported", ) def test_wrong_parameters_format(self, backend): param_data = b"---- NOT A KEY ----\n" with pytest.raises(ValueError): load_pem_parameters(param_data, backend) def test_corrupt_traditional_format(self, backend): # privkey.pem with a bunch of data missing. key_data = textwrap.dedent( """\ -----BEGIN RSA PRIVATE KEY----- MIIBPAIBAAJBAKrbeqkuRk8VcRmWFmtP+LviMB3+6dizWW3DwaffznyHGAFwUJ/I Tv0XtbsCyl3QoyKGhrOAy3RvPK5M38iuXT0CAwEAAQJAZ3cnzaHXM/bxGaR5CR1R rD1qFBAVfoQFiOH9uPJgMaoAuoQEisPHVcZDKcOv4wEg6/TInAIXBnEigtqvRzuy mvcpHZwQJdmdHHkGKAs37Dfxi67HbkUCIQCeZGliHXFa071Fp06ZeWlR2ADonTZz rJBhdTe0v5pCeQIhAIZfkiGgGBX4cIuuckzEm43g9WMUjxP/0GlK39vIyihxAiEA mymehFRT0MvqW5xAKAx7Pgkt8HVKwVhc2LwGKHE0DZM= -----END RSA PRIVATE KEY----- """ ).encode() with pytest.raises(ValueError): load_pem_private_key(key_data, None, backend) with pytest.raises(ValueError): load_pem_private_key( key_data, b"this password will not be used", backend ) def test_traditional_encrypted_corrupt_format(self, backend): # privkey.pem with a single bit flipped key_data = textwrap.dedent( """\ -----BEGIN RSA PRIVATE KEY----- Proc-Type: <,ENCRYPTED DEK-Info: AES-128-CBC,5E22A2BD85A653FB7A3ED20DE84F54CD hAqtb5ZkTMGcs4BBDQ1SKZzdQThWRDzEDxM3qBfjvYa35KxZ54aic013mW/lwj2I v5bbpOjrHYHNAiZYZ7RNb+ztbF6F/g5PA5g7mFwEq+LFBY0InIplYBSv9QtE+lot Dy4AlZa/+NzJwgdKDb+JVfk5SddyD4ywnyeORnMPy4xXKvjXwmW+iLibZVKsjIgw H8hSxcD+FhWyJm9h9uLtmpuqhQo0jTUYpnTezZx2xeVPB53Ev7YCxR9Nsgj5GsVf 9Z/hqLB7IFgM3pa0z3PQeUIZF/cEf72fISWIOBwwkzVrPUkXWfbuWeJXQXSs3amE 5A295jD9BQp9CY0nNFSsy+qiXWToq2xT3y5zVNEStmN0SCGNaIlUnJzL9IHW+oMI kPmXZMnAYBWeeCF1gf3J3aE5lZInegHNfEI0+J0LazC2aNU5Dg/BNqrmRqKWEIo/ -----END RSA PRIVATE KEY----- """ ).encode() password = b"this password is wrong" with pytest.raises(ValueError): load_pem_private_key(key_data, None, backend) with pytest.raises(ValueError): load_pem_private_key(key_data, password, backend) def test_unsupported_key_encryption(self, backend): key_data = textwrap.dedent( """\ -----BEGIN RSA PRIVATE KEY----- Proc-Type: 4,ENCRYPTED DEK-Info: FAKE-123,5E22A2BD85A653FB7A3ED20DE84F54CD hAqtb5ZkTMGcs4BBDQ1SKZzdQThWRDzEDxM3qBfjvYa35KxZ54aic013mW/lwj2I v5bbpOjrHYHNAiZYZ7RNb+ztbF6F/g5PA5g7mFwEq+LFBY0InIplYBSv9QtE+lot Dy4AlZa/+NzJwgdKDb+JVfk5SddyD4ywnyeORnMPy4xXKvjXwmW+iLibZVKsjIgw H8hSxcD+FhWyJm9h9uLtmpuqhQo0jTUYpnTezZx2xeVPB53Ev7YCxR9Nsgj5GsVf 9Z/hqLB7IFgM3pa0z3PQeUIZF/cEf72fISWIOBwwkzVrPUkXWfbuWeJXQXSs3amE 5A295jD9BQp9CY0nNFSsy+qiXWToq2xT3y5zVNEStmN0SCGNaIlUnJzL9IHW+oMI kPmXZMnAYBWeeCF1gf3J3aE5lZInegHNfEI0+J0LazC2aNU5Dg/BNqrmRqKWEIo/ -----END RSA PRIVATE KEY----- """ ).encode() password = b"password" with pytest.raises(ValueError): load_pem_private_key(key_data, password, backend) def test_corrupt_pkcs8_format(self, backend): # unenc-rsa-pkcs8.pem with a bunch of data missing. key_data = textwrap.dedent( """\ -----BEGIN PRIVATE KEY----- MIICdQIBADALBgkqhkiG9w0BAQEEggJhMIICXQIBAAKBgQC7JHoJfg6yNzLMOWet 8Z49a4KD0dCspMAYvo2YAMB7/wdEycocujbhJ2n/seONi+5XqTqqFkM5VBl8rmkk FPZk/7x0xmdsTPECSWnHK+HhoaNDFPR3j8jQhVo1laxiqcEhAHegi5cwtFosuJAv FiRC0Cgz+frQPFQEBsAV9RuasyQxqzxrR0Ow0qncBeGBWbYE6WZhqtcLAI895b+i +F4lbB4iD7T9QeIDMU/aIMXA81UO4cns1z4qDAHKeyLLrPQrJ/B4X7XC+egUWm5+ hr1qmyAMusyXIBECQQDJWZ8piluf4yrYfsJAn6hF5T4RjTztbqvO0GVG2McHY7Uj NPSffhzHx/ll0fQEQji+OgydCCX8o3HZrgw5YfSJAkEA7e+rqdU5nO5ZG//PSEQb tjLnRiTzBH/elQhtdZ5nF7pcpNTi4k13zutmKcWW4GK75azcRGJUhu1kDM7QYAOd SQJAVNkYcifkvna7GmooL5VYEsQsqLbM4v0NF2TIGNfG3z1MGp75KrC5LhL97MNR we2p/bd2k0HYyCKUGnf2nMPDiQJBAI75pwittSoE240EobUGIDTSz8CJsXIxuDmL z+KOpdpPRR5TQmbEMEspjsFpFymMiuYPgmihQbO2cJl1qScY5OkCQQCJ6m5tcN8l Xxg/SNpjEIv+qAyUD96XVlOJlOIeLHQ8kYE0C6ZA+MsqYIzgAreJk88Yn0lU/X0/ mu/UpE/BRZmR -----END PRIVATE KEY----- """ ).encode() with pytest.raises(ValueError): load_pem_private_key(key_data, None, backend) with pytest.raises(ValueError): load_pem_private_key( key_data, b"this password will not be used", backend ) def test_pks8_encrypted_corrupt_format(self, backend): # enc-rsa-pkcs8.pem with some bits flipped. key_data = textwrap.dedent( """\ -----BEGIN ENCRYPTED PRIVATE KEY----- MIICojAcBgoqhkiG9w0BDAEDMA4ECHK0M0+QuEL9AgIBIcSCAoDRq+KRY+0XP0tO lwBTzViiXSXoyNnKAZKt5r5K/fGNntv22g/1s/ZNCetrqsJDC5eMUPPacz06jFq/ Ipsep4/OgjQ9UAOzXNrWEoNyrHnWDo7usgD3CW0mKyqER4+wG0adVMbt3N+CJHGB 85jzRmQTfkdx1rSWeSx+XyswHn8ER4+hQ+omKWMVm7AFkjjmP/KnhUnLT98J8rhU ArQoFPHz/6HVkypFccNaPPNg6IA4aS2A+TU9vJYOaXSVfFB2yf99hfYYzC+ukmuU 5Lun0cysK5s/5uSwDueUmDQKspnaNyiaMGDxvw8hilJc7vg0fGObfnbIpizhxJwq gKBfR7Zt0Hv8OYi1He4MehfMGdbHskztF+yQ40LplBGXQrvAqpU4zShga1BoQ98T 0ekbBmqj7hg47VFsppXR7DKhx7G7rpMmdKbFhAZVCjae7rRGpUtD52cpFdPhMyAX huhMkoczwUW8B/rM4272lkHo6Br0yk/TQfTEGkvryflNVu6lniPTV151WV5U1M3o 3G3a44eDyt7Ln+WSOpWtbPQMTrpKhur6WXgJvrpa/m02oOGdvOlDsoOCgavgQMWg 7xKKL7620pHl7p7f/8tlE8q6vLXVvyNtAOgt/JAr2rgvrHaZSzDE0DwgCjBXEm+7 cVMVNkHod7bLQefVanVtWqPzbmr8f7gKeuGwWSG9oew/lN2hxcLEPJHAQlnLgx3P 0GdGjK9NvwA0EP2gYIeE4+UtSder7xQ7bVh25VB20R4TTIIs4aXXCVOoQPagnzaT 6JLgl8FrvdfjHwIvmSOO1YMNmILBq000Q8WDqyErBDs4hsvtO6VQ4LeqJj6gClX3 qeJNaJFu -----END ENCRYPTED PRIVATE KEY----- """ ).encode() password = b"this password is wrong" with pytest.raises(ValueError): load_pem_private_key(key_data, None, backend) with pytest.raises(ValueError): load_pem_private_key(key_data, password, backend) @pytest.mark.skip_fips(reason="non-FIPS parameters") def test_rsa_pkcs8_encrypted_values(self, backend): pkey = load_vectors_from_file( os.path.join("asymmetric", "PKCS8", "enc-rsa-pkcs8.pem"), lambda pemfile: load_pem_private_key( pemfile.read().encode(), b"foobar", unsafe_skip_rsa_key_validation=True, ), ) assert isinstance(pkey, rsa.RSAPrivateKey) numbers = pkey.private_numbers() assert numbers.public_numbers.n == int( "00beec64d6db5760ac2fd4c971145641b9bd7f5c56558ece608795c79807" "376a7fe5b19f95b35ca358ea5c8abd7ae051d49cd2f1e45969a1ae945460" "3c14b278664a0e414ebc8913acb6203626985525e17a600611b028542dd0" "562aad787fb4f1650aa318cdcff751e1b187cbf6785fbe164e9809491b95" "dd68480567c99b1a57", 16, ) assert numbers.public_numbers.e == 65537 assert numbers.d == int( "0cfe316e9dc6b8817f4fcfd5ae38a0886f68f773b8a6db4c9e6d8703c599" "f3d9785c3a2c09e4c8090909fb3721e19a3009ec21221523a729265707a5" "8f13063671c42a4096cad378ef2510cb59e23071489d8893ac4934dd149f" "34f2d094bea57f1c8027c3a77248ac9b91218737d0c3c3dfa7d7829e6977" "cf7d995688c86c81", 16, ) assert numbers.p == int( "00db122ac857b2c0437d7616daa98e597bb75ca9ad3a47a70bec10c10036" "03328794b225c8e3eee6ffd3fd6d2253d28e071fe27d629ab072faa14377" "ce6118cb67", 16, ) assert numbers.q == int( "00df1b8aa8506fcbbbb9d00257f2975e38b33d2698fd0f37e82d7ef38c56" "f21b6ced63c825383782a7115cfcc093300987dbd2853b518d1c8f26382a" "2d2586d391", 16, ) assert numbers.dmp1 == int( "00be18aca13e60712fdf5daa85421eb10d86d654b269e1255656194fb0c4" "2dd01a1070ea12c19f5c39e09587af02f7b1a1030d016a9ffabf3b36d699" "ceaf38d9bf", 16, ) assert numbers.dmq1 == int( "71aa8978f90a0c050744b77cf1263725b203ac9f730606d8ae1d289dce4a" "28b8d534e9ea347aeb808c73107e583eb80c546d2bddadcdb3c82693a4c1" "3d863451", 16, ) assert numbers.iqmp == int( "136b7b1afac6e6279f71b24217b7083485a5e827d156024609dae39d48a6" "bdb55af2f062cc4a3b077434e6fffad5faa29a2b5dba2bed3e4621e478c0" "97ccfe7f", 16, ) @pytest.mark.supported( only_if=lambda backend: backend.dsa_supported(), skip_message="Does not support DSA.", ) def test_load_pem_dsa_private_key(self, backend): key = load_vectors_from_file( os.path.join("asymmetric", "PKCS8", "unenc-dsa-pkcs8.pem"), lambda pemfile: load_pem_private_key( pemfile.read().encode(), None, backend ), ) assert key assert isinstance(key, dsa.DSAPrivateKey) params = key.parameters() assert isinstance(params, dsa.DSAParameters) num = key.private_numbers() pub = num.public_numbers parameter_numbers = pub.parameter_numbers assert num.x == int("00a535a8e1d0d91beafc8bee1d9b2a3a8de3311203", 16) assert pub.y == int( "2b260ea97dc6a12ae932c640e7df3d8ff04a8a05a0324f8d5f1b23f15fa1" "70ff3f42061124eff2586cb11b49a82dcdc1b90fc6a84fb10109cb67db5d" "2da971aeaf17be5e37284563e4c64d9e5fc8480258b319f0de29d54d8350" "70d9e287914d77df81491f4423b62da984eb3f45eb2a29fcea5dae525ac6" "ab6bcce04bfdf5b6", 16, ) assert parameter_numbers.p == int( "00aa0930cc145825221caffa28ac2894196a27833de5ec21270791689420" "7774a2e7b238b0d36f1b2499a2c2585083eb01432924418d867faa212dd1" "071d4dceb2782794ad393cc08a4d4ada7f68d6e839a5fcd34b4e402d82cb" "8a8cb40fec31911bf9bd360b034caacb4c5e947992573c9e90099c1b0f05" "940cabe5d2de49a167", 16, ) assert parameter_numbers.q == int( "00adc0e869b36f0ac013a681fdf4d4899d69820451", 16 ) assert parameter_numbers.g == int( "008c6b4589afa53a4d1048bfc346d1f386ca75521ccf72ddaa251286880e" "e13201ff48890bbfc33d79bacaec71e7a778507bd5f1a66422e39415be03" "e71141ba324f5b93131929182c88a9fa4062836066cebe74b5c6690c7d10" "1106c240ab7ebd54e4e3301fd086ce6adac922fb2713a2b0887cba13b9bc" "68ce5cfff241cd3246", 16, ) @pytest.mark.parametrize( ("key_file", "password"), [("bad-oid-dsa-key.pem", None)] ) def test_load_bad_oid_key(self, key_file, password, backend): with pytest.raises(UnsupportedAlgorithm): load_vectors_from_file( os.path.join("asymmetric", "PKCS8", key_file), lambda pemfile: load_pem_private_key( pemfile.read().encode(), password, backend ), ) @pytest.mark.parametrize( ("key_file", "password"), [("bad-encryption-oid.pem", b"password")] ) def test_load_bad_encryption_oid_key(self, key_file, password, backend): with pytest.raises(ValueError): load_vectors_from_file( os.path.join("asymmetric", "PKCS8", key_file), lambda pemfile: load_pem_private_key( pemfile.read().encode(), password, backend ), ) def test_encrypted_pkcs8_non_utf_password(self): data = load_vectors_from_file( os.path.join("asymmetric", "PKCS8", "enc-rsa-pkcs8.pem"), lambda f: f.read(), mode="rb", ) with pytest.raises(ValueError): load_pem_private_key(data, password=b"\xff") def test_rsa_private_key_invalid_version(self): data = load_vectors_from_file( os.path.join( "asymmetric", "Traditional_OpenSSL_Serialization", "rsa-wrong-version.pem", ), lambda f: f.read(), mode="rb", ) with pytest.raises(ValueError): load_pem_private_key(data, password=None) def test_dsa_private_key_invalid_version(self): data = load_vectors_from_file( os.path.join( "asymmetric", "Traditional_OpenSSL_Serialization", "dsa-wrong-version.pem", ), lambda f: f.read(), mode="rb", ) with pytest.raises(ValueError): load_pem_private_key(data, password=None) def test_pem_encryption_missing_dek_info(self): data = load_vectors_from_file( os.path.join( "asymmetric", "Traditional_OpenSSL_Serialization", "key1-no-dek-info.pem", ), lambda f: f.read(), mode="rb", ) with pytest.raises(ValueError): load_pem_private_key(data, password=b"password") def test_pem_encryption_malformed_dek_info(self): data = load_vectors_from_file( os.path.join( "asymmetric", "Traditional_OpenSSL_Serialization", "key1-malformed-dek-info.pem", ), lambda f: f.read(), mode="rb", ) with pytest.raises(ValueError): load_pem_private_key(data, password=b"password") def test_pem_encryption_malformed_iv(self): data = load_vectors_from_file( os.path.join( "asymmetric", "Traditional_OpenSSL_Serialization", "key1-malformed-iv.pem", ), lambda f: f.read(), mode="rb", ) with pytest.raises(ValueError): load_pem_private_key(data, password=b"password") def test_pem_encryption_short_iv(self): data = load_vectors_from_file( os.path.join( "asymmetric", "Traditional_OpenSSL_Serialization", "key1-short-iv.pem", ), lambda f: f.read(), mode="rb", ) with pytest.raises(ValueError): load_pem_private_key(data, password=b"password") def test_pkcs8_key_with_wrong_pem_delimiter(self): data = load_vectors_from_file( os.path.join( "asymmetric", "PKCS8", "wrong-pem-delimiter-rsa.pem", ), lambda f: f.read(), mode="rb", ) with pytest.raises(ValueError) as exc_info: load_pem_private_key(data, password=None) if sys.version_info >= (3, 11): assert len(exc_info.value.__notes__) == 1 assert "PKCS#8 format" in exc_info.value.__notes__[0]
TestPEMSerialization
python
microsoft__pyright
packages/pyright-internal/src/tests/samples/solver19.py
{ "start": 143, "end": 198 }
class ____(list[str]): pass _T1 = TypeVar("_T1")
LS
python
mlflow__mlflow
tests/sklearn/test_sklearn_autolog.py
{ "start": 64444, "end": 67608 }
class ____(sklearn.cluster.KMeans): def __init__(self, n_clusters=8): super().__init__(n_clusters) self.generator = (i for i in range(3)) # Ignore parameter validation added in scikit-learn > 1.1.0 def _validate_params(self): pass def test_autolog_print_warning_if_custom_estimator_pickling_raise_error(): mlflow.sklearn.autolog() with mlflow.start_run() as run, mock.patch("mlflow.sklearn._logger.warning") as mock_warning: unpicklable_kmeans = UnpicklableKmeans() with pytest.raises(TypeError, match=r"(can't|cannot) pickle.+generator"): pickle.dumps(unpicklable_kmeans) unpicklable_kmeans.fit(*get_iris()) assert any( call_args[0][0].startswith("Pickling custom sklearn model UnpicklableKmeans failed") for call_args in mock_warning.call_args_list ) run_id = run.info.run_id params, metrics, tags, artifacts = get_run_data(run_id) assert len(params) > 0 assert len(metrics) > 0 assert len(tags) > 0 assert artifacts == (["estimator.html"] if _is_estimator_html_repr_supported() else []) def test_autolog_registering_model(): registered_model_name = "test_autolog_registered_model" mlflow.sklearn.autolog(registered_model_name=registered_model_name) with mlflow.start_run(): sklearn.cluster.KMeans().fit(*get_iris()) registered_model = MlflowClient().get_registered_model(registered_model_name) assert registered_model.name == registered_model_name def test_autolog_pos_label_used_for_training_metric(): mlflow.sklearn.autolog(pos_label=1) import sklearn.ensemble model = sklearn.ensemble.RandomForestClassifier(max_depth=2, random_state=0, n_estimators=10) X, y = sklearn.datasets.load_breast_cancer(return_X_y=True) with mlflow.start_run() as run: model = fit_model(model, X, y, "fit") _, training_metrics, _, _ = get_run_data(run.info.run_id) with MlflowAutologgingQueueingClient() as autologging_client: expected_training_metrics = _log_estimator_content( autologging_client=autologging_client, estimator=model, run_id=run.info.run_id, prefix="training_", X=X, y_true=y, sample_weight=None, pos_label=1, ) assert training_metrics == expected_training_metrics def test_autolog_emits_warning_message_when_pos_label_used_for_multilabel(): mlflow.sklearn.autolog(pos_label=1) model = sklearn.svm.SVC() X, y = get_iris() with mlflow.start_run(), mock.patch("mlflow.sklearn.utils._logger.warning") as mock_warning: model.fit(X, y) assert mock_warning.call_count == 3 # for precision, recall and f1_score mock_warning.assert_any_call( "precision_score failed. The metric training_precision_score will not be recorded. " "Metric error: Target is multiclass but average='binary'. Please choose another " "average setting, one of [None, 'micro', 'macro', 'weighted']." )
UnpicklableKmeans
python
walkccc__LeetCode
solutions/652. Find Duplicate Subtrees/652.py
{ "start": 0, "end": 485 }
class ____: def findDuplicateSubtrees(self, root: TreeNode | None) -> list[TreeNode | None]: ans = [] count = collections.Counter() def encode(root: TreeNode | None) -> str: if not root: return '' encoded = (str(root.val) + '#' + encode(root.left) + '#' + encode(root.right)) count[encoded] += 1 if count[encoded] == 2: ans.append(root) return encoded encode(root) return ans
Solution
python
getsentry__sentry
src/sentry/sentry_metrics/querying/data/execution.py
{ "start": 19072, "end": 20412 }
class ____: """ Represents a partial query result which contains all the queries that are linearly dependent and their results. This result is stored in the array of results for each ScheduledQuery that has a next parameter. Attributes: previous_queries: All the previous queries that have been executed as part of a single list of chained queries, defined via the next parameter of ScheduledQuery. """ previous_queries: list[tuple[ScheduledQuery, Mapping[str, Any], bool]] def to_query_result(self) -> QueryResult: """ Transforms a PartialQueryResult in a QueryResult by taking the last query that was executed in the list. Returns: A QueryResult which contains the data of the last query executed as part of this PartialQueryResult. """ # For now, we naively return the first scheduled query and result, but this is just because # we currently support only the chaining of at most two queries, meaning that a partial result # can accumulate only one query. last_scheduled_query, last_query_result, has_more = self.previous_queries[0] return QueryResult.from_scheduled_query( scheduled_query=last_scheduled_query, query_result=last_query_result, has_more=has_more )
PartialQueryResult
python
python-excel__xlwt
xlwt/antlr.py
{ "start": 51033, "end": 52917 }
class ____(object): def __init__(self,stream): self.input = stream self.nMarkers = 0 self.markerOffset = 0 self.numToConsume = 0 self.queue = Queue() def reset(self) : self.nMarkers = 0 self.markerOffset = 0 self.numToConsume = 0 self.queue.reset() def consume(self) : self.numToConsume += 1 def fill(self, amount): self.syncConsume() while self.queue.length() < (amount + self.markerOffset): self.queue.append(self.input.nextToken()) def getInput(self): return self.input def LA(self,k) : self.fill(k) return self.queue.elementAt(self.markerOffset + k - 1).type def LT(self,k) : self.fill(k) return self.queue.elementAt(self.markerOffset + k - 1) def mark(self) : self.syncConsume() self.nMarkers += 1 return self.markerOffset def rewind(self,mark) : self.syncConsume() self.markerOffset = mark self.nMarkers -= 1 def syncConsume(self) : while self.numToConsume > 0: if self.nMarkers > 0: # guess mode -- leave leading characters and bump offset. self.markerOffset += 1 else: # normal mode -- remove first character self.queue.removeFirst() self.numToConsume -= 1 def __str__(self): return "(%s,%s,%s,%s,%s)" % ( self.input, self.nMarkers, self.markerOffset, self.numToConsume, self.queue) def __repr__(self): return str(self) ###xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx### ### ParserSharedInputState ### ###xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx###
TokenBuffer
python
google__flatbuffers
tests/monster_test_generated.py
{ "start": 15353, "end": 16809 }
class ____(object): # StructOfStructsT def __init__( self, a = None, b = None, c = None, ): self.a = a # type: Optional[AbilityT] self.b = b # type: Optional[TestT] self.c = c # type: Optional[AbilityT] @classmethod def InitFromBuf(cls, buf, pos): structOfStructs = StructOfStructs() structOfStructs.Init(buf, pos) return cls.InitFromObj(structOfStructs) @classmethod def InitFromPackedBuf(cls, buf, pos=0): n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos) return cls.InitFromBuf(buf, pos+n) @classmethod def InitFromObj(cls, structOfStructs): x = StructOfStructsT() x._UnPack(structOfStructs) return x # StructOfStructsT def _UnPack(self, structOfStructs): if structOfStructs is None: return if structOfStructs.A(Ability()) is not None: self.a = AbilityT.InitFromObj(structOfStructs.A(Ability())) if structOfStructs.B(Test()) is not None: self.b = TestT.InitFromObj(structOfStructs.B(Test())) if structOfStructs.C(Ability()) is not None: self.c = AbilityT.InitFromObj(structOfStructs.C(Ability())) # StructOfStructsT def Pack(self, builder): return CreateStructOfStructs(builder, self.a.id, self.a.distance, self.b.a, self.b.b, self.c.id, self.c.distance)
StructOfStructsT
python
wandb__wandb
wandb/automations/_generated/fragments.py
{ "start": 922, "end": 1155 }
class ____(GQLResult): typename__: Typename[Literal["GenericWebhookIntegration"]] = ( "GenericWebhookIntegration" ) id: GQLId name: str url_endpoint: str = Field(alias="urlEndpoint")
WebhookIntegrationFields
python
django__django
tests/db_functions/comparison/test_cast.py
{ "start": 313, "end": 7604 }
class ____(TestCase): @classmethod def setUpTestData(self): Author.objects.create(name="Bob", age=1, alias="1") def test_cast_from_value(self): numbers = Author.objects.annotate( cast_integer=Cast(models.Value("0"), models.IntegerField()) ) self.assertEqual(numbers.get().cast_integer, 0) def test_cast_from_field(self): numbers = Author.objects.annotate( cast_string=Cast("age", models.CharField(max_length=255)), ) self.assertEqual(numbers.get().cast_string, "1") def test_cast_to_char_field_without_max_length(self): numbers = Author.objects.annotate(cast_string=Cast("age", models.CharField())) self.assertEqual(numbers.get().cast_string, "1") # Silence "Truncated incorrect CHAR(1) value: 'Bob'". @ignore_warnings(module="django.db.backends.mysql.base") @skipUnlessDBFeature("supports_cast_with_precision") def test_cast_to_char_field_with_max_length(self): names = Author.objects.annotate( cast_string=Cast("name", models.CharField(max_length=1)) ) self.assertEqual(names.get().cast_string, "B") @skipUnlessDBFeature("supports_cast_with_precision") def test_cast_to_decimal_field(self): FloatModel.objects.create(f1=-1.934, f2=3.467) float_obj = FloatModel.objects.annotate( cast_f1_decimal=Cast( "f1", models.DecimalField(max_digits=8, decimal_places=2) ), cast_f2_decimal=Cast( "f2", models.DecimalField(max_digits=8, decimal_places=1) ), ).get() self.assertEqual(float_obj.cast_f1_decimal, decimal.Decimal("-1.93")) expected = "3.4" if connection.features.rounds_to_even else "3.5" self.assertEqual(float_obj.cast_f2_decimal, decimal.Decimal(expected)) author_obj = Author.objects.annotate( cast_alias_decimal=Cast( "alias", models.DecimalField(max_digits=8, decimal_places=2) ), ).get() self.assertEqual(author_obj.cast_alias_decimal, decimal.Decimal("1")) def test_cast_to_integer(self): for field_class in ( models.AutoField, models.BigAutoField, models.SmallAutoField, models.IntegerField, models.BigIntegerField, models.SmallIntegerField, models.PositiveBigIntegerField, models.PositiveIntegerField, models.PositiveSmallIntegerField, ): with self.subTest(field_class=field_class): numbers = Author.objects.annotate(cast_int=Cast("alias", field_class())) self.assertEqual(numbers.get().cast_int, 1) def test_cast_to_integer_foreign_key(self): numbers = Author.objects.annotate( cast_fk=Cast( models.Value("0"), models.ForeignKey(Author, on_delete=models.SET_NULL), ) ) self.assertEqual(numbers.get().cast_fk, 0) def test_cast_to_duration(self): duration = datetime.timedelta(days=1, seconds=2, microseconds=3) DTModel.objects.create(duration=duration) dtm = DTModel.objects.annotate( cast_duration=Cast("duration", models.DurationField()), cast_neg_duration=Cast(-duration, models.DurationField()), ).get() self.assertEqual(dtm.cast_duration, duration) self.assertEqual(dtm.cast_neg_duration, -duration) def test_cast_from_db_datetime_to_date(self): dt_value = datetime.datetime(2018, 9, 28, 12, 42, 10, 234567) DTModel.objects.create(start_datetime=dt_value) dtm = DTModel.objects.annotate( start_datetime_as_date=Cast("start_datetime", models.DateField()) ).first() self.assertEqual(dtm.start_datetime_as_date, datetime.date(2018, 9, 28)) def test_cast_from_db_datetime_to_time(self): dt_value = datetime.datetime(2018, 9, 28, 12, 42, 10, 234567) DTModel.objects.create(start_datetime=dt_value) dtm = DTModel.objects.annotate( start_datetime_as_time=Cast("start_datetime", models.TimeField()) ).first() rounded_ms = int( round(0.234567, connection.features.time_cast_precision) * 10**6 ) self.assertEqual( dtm.start_datetime_as_time, datetime.time(12, 42, 10, rounded_ms) ) def test_cast_from_db_date_to_datetime(self): dt_value = datetime.date(2018, 9, 28) DTModel.objects.create(start_date=dt_value) dtm = DTModel.objects.annotate( start_as_datetime=Cast("start_date", models.DateTimeField()) ).first() self.assertEqual( dtm.start_as_datetime, datetime.datetime(2018, 9, 28, 0, 0, 0, 0) ) def test_cast_from_db_datetime_to_date_group_by(self): author = Author.objects.create(name="John Smith", age=45) dt_value = datetime.datetime(2018, 9, 28, 12, 42, 10, 234567) Fan.objects.create(name="Margaret", age=50, author=author, fan_since=dt_value) fans = ( Fan.objects.values("author") .annotate( fan_for_day=Cast("fan_since", models.DateField()), fans=models.Count("*"), ) .values() ) self.assertEqual(fans[0]["fan_for_day"], datetime.date(2018, 9, 28)) self.assertEqual(fans[0]["fans"], 1) def test_cast_from_python_to_date(self): today = datetime.date.today() dates = Author.objects.annotate(cast_date=Cast(today, models.DateField())) self.assertEqual(dates.get().cast_date, today) def test_cast_from_python_to_datetime(self): now = datetime.datetime.now() dates = Author.objects.annotate(cast_datetime=Cast(now, models.DateTimeField())) time_precision = datetime.timedelta( microseconds=10 ** (6 - connection.features.time_cast_precision) ) self.assertAlmostEqual(dates.get().cast_datetime, now, delta=time_precision) def test_cast_from_python(self): numbers = Author.objects.annotate( cast_float=Cast(decimal.Decimal(0.125), models.FloatField()) ) cast_float = numbers.get().cast_float self.assertIsInstance(cast_float, float) self.assertEqual(cast_float, 0.125) @unittest.skipUnless(connection.vendor == "postgresql", "PostgreSQL test") def test_expression_wrapped_with_parentheses_on_postgresql(self): """ The SQL for the Cast expression is wrapped with parentheses in case it's a complex expression. """ with CaptureQueriesContext(connection) as captured_queries: list( Author.objects.annotate( cast_float=Cast(models.Avg("age"), models.FloatField()), ) ) self.assertIn( '(AVG("db_functions_author"."age"))::double precision', captured_queries[0]["sql"], ) def test_cast_to_text_field(self): self.assertEqual( Author.objects.values_list( Cast("age", models.TextField()), flat=True ).get(), "1", )
CastTests
python
pyparsing__pyparsing
tests/test_simple_unit.py
{ "start": 3880, "end": 5120 }
class ____(PyparsingExpressionTestCase): tests = [ PyparsingTest( desc="Simple match", expr=pp.Literal("xyz"), text="xyz", expected_list=["xyz"], ), PyparsingTest( desc="Simple match after skipping whitespace", expr=pp.Literal("xyz"), text=" xyz", expected_list=["xyz"], ), PyparsingTest( desc="Simple fail - parse an empty string", expr=pp.Literal("xyz"), text="", expected_fail_locn=0, ), PyparsingTest( desc="Simple fail - parse a mismatching string", expr=pp.Literal("xyz"), text="xyu", expected_fail_locn=0, ), PyparsingTest( desc="Simple fail - parse a partially matching string", expr=pp.Literal("xyz"), text="xy", expected_fail_locn=0, ), PyparsingTest( desc="Fail - parse a partially matching string by matching individual letters", expr=pp.Literal("x") + pp.Literal("y") + pp.Literal("z"), text="xy", expected_fail_locn=2, ), ]
TestLiteral
python
fastapi__sqlmodel
docs_src/tutorial/update/tutorial003.py
{ "start": 100, "end": 2218 }
class ____(SQLModel, table=True): id: Optional[int] = Field(default=None, primary_key=True) name: str = Field(index=True) secret_name: str age: Optional[int] = Field(default=None, index=True) sqlite_file_name = "database.db" sqlite_url = f"sqlite:///{sqlite_file_name}" engine = create_engine(sqlite_url, echo=True) def create_db_and_tables(): SQLModel.metadata.create_all(engine) def create_heroes(): hero_1 = Hero(name="Deadpond", secret_name="Dive Wilson") hero_2 = Hero(name="Spider-Boy", secret_name="Pedro Parqueador") hero_3 = Hero(name="Rusty-Man", secret_name="Tommy Sharp", age=48) hero_4 = Hero(name="Tarantula", secret_name="Natalia Roman-on", age=32) hero_5 = Hero(name="Black Lion", secret_name="Trevor Challa", age=35) hero_6 = Hero(name="Dr. Weird", secret_name="Steve Weird", age=36) hero_7 = Hero(name="Captain North America", secret_name="Esteban Rogelios", age=93) with Session(engine) as session: session.add(hero_1) session.add(hero_2) session.add(hero_3) session.add(hero_4) session.add(hero_5) session.add(hero_6) session.add(hero_7) session.commit() def update_heroes(): with Session(engine) as session: statement = select(Hero).where(Hero.name == "Spider-Boy") results = session.exec(statement) hero_1 = results.one() print("Hero 1:", hero_1) statement = select(Hero).where(Hero.name == "Captain North America") results = session.exec(statement) hero_2 = results.one() print("Hero 2:", hero_2) hero_1.age = 16 hero_1.name = "Spider-Youngster" session.add(hero_1) hero_2.name = "Captain North America Except Canada" hero_2.age = 110 session.add(hero_2) session.commit() session.refresh(hero_1) session.refresh(hero_2) print("Updated hero 1:", hero_1) print("Updated hero 2:", hero_2) def main(): create_db_and_tables() create_heroes() update_heroes() if __name__ == "__main__": main()
Hero
python
google__flatbuffers
python/flatbuffers/flexbuffers.py
{ "start": 10493, "end": 11417 }
class ____(Sized): """Data accessor for the encoded string bytes.""" __slots__ = () @property def Bytes(self): return self._buf[0 : len(self)] def Mutate(self, value): """Mutates underlying string bytes in place. Args: value: New string to replace the existing one. New string must have less or equal UTF-8-encoded bytes than the existing one to successfully mutate underlying byte buffer. Returns: Whether the value was mutated or not. """ encoded = value.encode('utf-8') n = len(encoded) if n <= len(self): self._buf[-self._byte_width : 0] = _Pack(U, n, self._byte_width) self._buf[0:n] = encoded self._buf[n : len(self)] = bytearray(len(self) - n) return True return False def __str__(self): return self.Bytes.decode('utf-8') def __repr__(self): return 'String(%s, size=%d)' % (self._buf, len(self))
String
python
huggingface__transformers
src/transformers/models/phi4_multimodal/modular_phi4_multimodal.py
{ "start": 2070, "end": 5947 }
class ____(SiglipVisionConfig): r""" This is the configuration class to store the configuration of a [`Phi4MultimodalVisionModel`]. It is used to instantiate a Phi4Multimodal vision encoder according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the vision encoder of [microsoft/Phi-4-multimodal-instruct](https://huggingface.co/microsoft/Phi-4-multimodal-instruct) architecture. Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PreTrainedConfig`] for more information. Args: hidden_size (`int`, *optional*, defaults to 1152): Dimensionality of the encoder layers and the pooler layer. intermediate_size (`int`, *optional*, defaults to 4304): Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder. num_hidden_layers (`int`, *optional*, defaults to 27): Number of hidden layers in the Transformer encoder. num_attention_heads (`int`, *optional*, defaults to 16): Number of attention heads for each attention layer in the Transformer encoder. num_channels (`int`, *optional*, defaults to 3): Number of channels in the input images. image_size (`int`, *optional*, defaults to 448): The size (resolution) of each image. patch_size (`int`, *optional*, defaults to 14): The size (resolution) of each patch. hidden_act (`str` or `function`, *optional*, defaults to `"gelu_pytorch_tanh"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"selu"` and `"gelu_new"` `"quick_gelu"` are supported. layer_norm_eps (`float`, *optional*, defaults to 1e-06): The epsilon used by the layer normalization layers. attention_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for the attention probabilities. crop_size (`int`, *optional*, defaults to 448): Crop size for the input images. image_token_id (`int`, *optional*, defaults to 200010): The image token id. feature_layer (`int`, *optional*, defaults to -2): The index of the layer of the encoder from which to extract image features. Example: ```python >>> from transformers import Phi4MultimodalVisionConfig >>> # Initializing a Phi4MultimodalVisionConfig with microsoft/Phi-4-multimodal-instruct style configuration >>> configuration = Phi4MultimodalVisionConfig() ```""" model_type = "phi4_multimodal_vision" def __init__( self, hidden_size=1152, intermediate_size=4304, num_hidden_layers=27, num_attention_heads=16, num_channels=3, image_size=448, patch_size=14, hidden_act="gelu_pytorch_tanh", layer_norm_eps=1e-6, attention_dropout=0.0, crop_size: int = 448, image_token_id: int = 200010, feature_layer: int = -2, **kwargs, ): super().__init__( hidden_size=hidden_size, intermediate_size=intermediate_size, num_hidden_layers=num_hidden_layers, num_attention_heads=num_attention_heads, num_channels=num_channels, image_size=image_size, patch_size=patch_size, hidden_act=hidden_act, layer_norm_eps=layer_norm_eps, attention_dropout=attention_dropout, **kwargs, ) self.crop_size = crop_size self.image_token_id = image_token_id self.feature_layer = feature_layer
Phi4MultimodalVisionConfig
python
getsentry__sentry
src/sentry/web/forms/__init__.py
{ "start": 119, "end": 828 }
class ____(forms.Form): text = forms.CharField( widget=forms.Textarea(attrs={"rows": "1", "placeholder": "Type a note and press enter..."}) ) def save(self, group, user): qs = Activity.objects.filter( group=group, project_id=group.project_id, user_id=user.id, type=ActivityType.NOTE.value, data=self.cleaned_data, ) # Prevent duplicate comments, this is necessary for outbox based # delivery to be idempotent if qs.exists(): return return Activity.objects.create_group_activity( group, ActivityType.NOTE, user=user, data=self.cleaned_data )
NewNoteForm
python
doocs__leetcode
solution/2100-2199/2152.Minimum Number of Lines to Cover Points/Solution.py
{ "start": 0, "end": 948 }
class ____: def minimumLines(self, points: List[List[int]]) -> int: def check(i, j, k): x1, y1 = points[i] x2, y2 = points[j] x3, y3 = points[k] return (x2 - x1) * (y3 - y1) == (x3 - x1) * (y2 - y1) @cache def dfs(state): if state == (1 << n) - 1: return 0 ans = inf for i in range(n): if not (state >> i & 1): for j in range(i + 1, n): nxt = state | 1 << i | 1 << j for k in range(j + 1, n): if not (nxt >> k & 1) and check(i, j, k): nxt |= 1 << k ans = min(ans, dfs(nxt) + 1) if i == n - 1: ans = min(ans, dfs(state | 1 << i) + 1) return ans n = len(points) return dfs(0)
Solution
python
huggingface__transformers
src/transformers/models/gpt_bigcode/modeling_gpt_bigcode.py
{ "start": 34642, "end": 38643 }
class ____(GPTBigCodePreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.transformer = GPTBigCodeModel(config) if hasattr(config, "classifier_dropout") and config.classifier_dropout is not None: classifier_dropout = config.classifier_dropout elif hasattr(config, "hidden_dropout") and config.hidden_dropout is not None: classifier_dropout = config.hidden_dropout else: classifier_dropout = 0.1 self.dropout = nn.Dropout(classifier_dropout) self.classifier = nn.Linear(config.hidden_size, config.num_labels) # Initialize weights and apply final processing self.post_init() @auto_docstring def forward( self, input_ids: Optional[torch.Tensor] = None, past_key_values: Optional[Cache] = None, attention_mask: Optional[torch.Tensor] = None, token_type_ids: Optional[torch.Tensor] = None, position_ids: Optional[torch.Tensor] = None, inputs_embeds: Optional[torch.Tensor] = None, labels: Optional[torch.Tensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[tuple, TokenClassifierOutput]: r""" input_ids (`torch.Tensor` of shape `(batch_size, input_ids_length)`): `input_ids_length` = `sequence_length` if `past_key_values` is `None` else `past_key_values.get_seq_length()` (`sequence_length` of input past key value states). Indices of input sequence tokens in the vocabulary. If `past_key_values` is used, only `input_ids` that do not have their past calculated should be passed as `input_ids`. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) labels (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict transformer_outputs = self.transformer( input_ids, past_key_values=past_key_values, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) hidden_states = transformer_outputs[0] hidden_states = self.dropout(hidden_states) logits = self.classifier(hidden_states) loss = None if labels is not None: loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1).to(logits.device)) if not return_dict: output = (logits,) + transformer_outputs[2:] return ((loss,) + output) if loss is not None else output return TokenClassifierOutput( loss=loss, logits=logits, hidden_states=transformer_outputs.hidden_states, attentions=transformer_outputs.attentions, ) __all__ = [ "GPTBigCodeForSequenceClassification", "GPTBigCodeForTokenClassification", "GPTBigCodeForCausalLM", "GPTBigCodeModel", "GPTBigCodePreTrainedModel", ]
GPTBigCodeForTokenClassification
python
django__django
tests/custom_lookups/models.py
{ "start": 332, "end": 428 }
class ____(models.Model): author = models.ForeignKey(Author, on_delete=models.CASCADE)
Article
python
sympy__sympy
sympy/core/relational.py
{ "start": 19231, "end": 25598 }
class ____(Relational): """ An equal relation between two objects. Explanation =========== Represents that two objects are equal. If they can be easily shown to be definitively equal (or unequal), this will reduce to True (or False). Otherwise, the relation is maintained as an unevaluated Equality object. Use the ``simplify`` function on this object for more nontrivial evaluation of the equality relation. As usual, the keyword argument ``evaluate=False`` can be used to prevent any evaluation. Examples ======== >>> from sympy import Eq, simplify, exp, cos >>> from sympy.abc import x, y >>> Eq(y, x + x**2) Eq(y, x**2 + x) >>> Eq(2, 5) False >>> Eq(2, 5, evaluate=False) Eq(2, 5) >>> _.doit() False >>> Eq(exp(x), exp(x).rewrite(cos)) Eq(exp(x), sinh(x) + cosh(x)) >>> simplify(_) True See Also ======== sympy.logic.boolalg.Equivalent : for representing equality between two boolean expressions Notes ===== Python treats 1 and True (and 0 and False) as being equal; SymPy does not. And integer will always compare as unequal to a Boolean: >>> Eq(True, 1), True == 1 (False, True) This class is not the same as the == operator. The == operator tests for exact structural equality between two expressions; this class compares expressions mathematically. If either object defines an ``_eval_Eq`` method, it can be used in place of the default algorithm. If ``lhs._eval_Eq(rhs)`` or ``rhs._eval_Eq(lhs)`` returns anything other than None, that return value will be substituted for the Equality. If None is returned by ``_eval_Eq``, an Equality object will be created as usual. Since this object is already an expression, it does not respond to the method ``as_expr`` if one tries to create `x - y` from ``Eq(x, y)``. If ``eq = Eq(x, y)`` then write `eq.lhs - eq.rhs` to get ``x - y``. .. deprecated:: 1.5 ``Eq(expr)`` with a single argument is a shorthand for ``Eq(expr, 0)``, but this behavior is deprecated and will be removed in a future version of SymPy. """ rel_op = '==' __slots__ = () is_Equality = True def __new__(cls, lhs, rhs, **options) -> Equality | BooleanFalse | BooleanTrue: # type: ignore evaluate = options.pop('evaluate', global_parameters.evaluate) lhs = _sympify(lhs) rhs = _sympify(rhs) if evaluate: val = is_eq(lhs, rhs) if val is None: return cls(lhs, rhs, evaluate=False) else: return _sympify(val) return Relational.__new__(cls, lhs, rhs) @classmethod def _eval_relation(cls, lhs, rhs): return _sympify(lhs == rhs) def _eval_rewrite_as_Add(self, L, R, evaluate=True, **kwargs): """ return Eq(L, R) as L - R. To control the evaluation of the result set pass `evaluate=True` to give L - R; if `evaluate=None` then terms in L and R will not cancel but they will be listed in canonical order; otherwise non-canonical args will be returned. If one side is 0, the non-zero side will be returned. .. deprecated:: 1.13 The method ``Eq.rewrite(Add)`` is deprecated. See :ref:`eq-rewrite-Add` for details. Examples ======== >>> from sympy import Eq, Add >>> from sympy.abc import b, x >>> eq = Eq(x + b, x - b) >>> eq.rewrite(Add) #doctest: +SKIP 2*b >>> eq.rewrite(Add, evaluate=None).args #doctest: +SKIP (b, b, x, -x) >>> eq.rewrite(Add, evaluate=False).args #doctest: +SKIP (b, x, b, -x) """ sympy_deprecation_warning(""" Eq.rewrite(Add) is deprecated. For ``eq = Eq(a, b)`` use ``eq.lhs - eq.rhs`` to obtain ``a - b``. """, deprecated_since_version="1.13", active_deprecations_target="eq-rewrite-Add", stacklevel=5, ) from .add import _unevaluated_Add, Add if L == 0: return R if R == 0: return L if evaluate: # allow cancellation of args return L - R args = Add.make_args(L) + Add.make_args(-R) if evaluate is None: # no cancellation, but canonical return _unevaluated_Add(*args) # no cancellation, not canonical return Add._from_args(args) @property def binary_symbols(self): if S.true in self.args or S.false in self.args: if self.lhs.is_Symbol: return {self.lhs} elif self.rhs.is_Symbol: return {self.rhs} return set() def _eval_simplify(self, **kwargs): # standard simplify e = super()._eval_simplify(**kwargs) if not isinstance(e, Equality): return e from .expr import Expr if not isinstance(e.lhs, Expr) or not isinstance(e.rhs, Expr): return e free = self.free_symbols if len(free) == 1: try: from .add import Add from sympy.solvers.solveset import linear_coeffs x = free.pop() m, b = linear_coeffs( Add(e.lhs, -e.rhs, evaluate=False), x) if m.is_zero is False: enew = e.func(x, -b / m) else: enew = e.func(m * x, -b) measure = kwargs['measure'] if measure(enew) <= kwargs['ratio'] * measure(e): e = enew except ValueError: pass return e.canonical def integrate(self, *args, **kwargs): """See the integrate function in sympy.integrals""" from sympy.integrals.integrals import integrate return integrate(self, *args, **kwargs) def as_poly(self, *gens, **kwargs): '''Returns lhs-rhs as a Poly Examples ======== >>> from sympy import Eq >>> from sympy.abc import x >>> Eq(x**2, 1).as_poly(x) Poly(x**2 - 1, x, domain='ZZ') ''' return (self.lhs - self.rhs).as_poly(*gens, **kwargs) Eq = Equality
Equality
python
getsentry__sentry
src/sentry/auth/superuser.py
{ "start": 6030, "end": 19728 }
class ____(ElevatedMode): allowed_ips = frozenset(ipaddress.ip_network(str(v), strict=False) for v in ALLOWED_IPS) org_id = SUPERUSER_ORG_ID def _check_expired_on_org_change(self) -> bool: if self.expires is not None: session_start_time = self.expires - MAX_AGE current_datetime = django_timezone.now() if current_datetime - session_start_time > MAX_AGE_PRIVILEGED_ORG_ACCESS: logger.warning( "superuser.privileged_org_access_expired", extra={"superuser_token": self.token}, ) self.set_logged_out() return False return self._is_active def __init__( self, request: HttpRequest, allowed_ips: Iterable[Any] | _UnsetType = _Unset, org_id: int | None | _UnsetType = _Unset, current_datetime: datetime | None = None, ) -> None: self.uid: str | None = None self.request = request self.expires: datetime | None = None self.token: str | None = None self._is_active: bool = False self._inactive_reason: InactiveReason = InactiveReason.NONE self.is_valid: bool = False if allowed_ips is not _Unset: self.allowed_ips = frozenset( ipaddress.ip_network(str(v), strict=False) for v in allowed_ips or () ) if org_id is not _Unset: self.org_id = org_id self._populate(current_datetime=current_datetime) @staticmethod def _needs_validation() -> bool: self_hosted = is_self_hosted() logger.info( "superuser.needs-validation", extra={ "DISABLE_SU_FORM_U2F_CHECK_FOR_LOCAL": DISABLE_SU_FORM_U2F_CHECK_FOR_LOCAL, "self_hosted": self_hosted, }, ) if self_hosted or DISABLE_SU_FORM_U2F_CHECK_FOR_LOCAL: return False return settings.VALIDATE_SUPERUSER_ACCESS_CATEGORY_AND_REASON @property def is_active(self) -> bool: org = getattr(self.request, "organization", None) if org and org.id != self.org_id: return self._check_expired_on_org_change() # We have a wsgi request with no user or user is None. if not hasattr(self.request, "user") or self.request.user is None: return False # if we've been logged out if not self.request.user.is_authenticated: return False # if superuser status was changed if not self.request.user.is_superuser: return False # if the user has changed if str(self.request.user.id) != self.uid: return False return self._is_active def is_privileged_request(self) -> tuple[bool, InactiveReason]: """ Returns ``(bool is_privileged, RequestStatus reason)`` """ allowed_ips = self.allowed_ips # if we've bound superuser to an organization they must # have completed SSO to gain status if self.org_id and not has_completed_sso(self.request, self.org_id): return False, InactiveReason.INCOMPLETE_SSO # if there's no IPs configured, we allow assume its the same as * if not allowed_ips: return True, InactiveReason.NONE ip = ipaddress.ip_address(str(self.request.META["REMOTE_ADDR"])) if not any(ip in addr for addr in allowed_ips): return False, InactiveReason.INVALID_IP return True, InactiveReason.NONE def get_session_data(self, current_datetime: datetime | None = None) -> dict[str, Any] | None: """ Return the current session data, with native types coerced. """ request = self.request try: cookie_token = request.get_signed_cookie( key=COOKIE_NAME, default=None, salt=COOKIE_SALT, max_age=int(MAX_AGE.total_seconds()), ) except BadSignature: logger.exception( "superuser.bad-cookie-signature", extra={"ip_address": request.META["REMOTE_ADDR"], "user_id": request.user.id}, ) return None data = request.session.get(SESSION_KEY) if not cookie_token: if data: logger.warning( "superuser.missing-cookie-token", extra={"ip_address": request.META["REMOTE_ADDR"], "user_id": request.user.id}, ) return None elif not data: logger.warning( "superuser.missing-session-data", extra={"ip_address": request.META["REMOTE_ADDR"], "user_id": request.user.id}, ) return None session_token = data.get("tok") if not session_token: logger.warning( "superuser.missing-session-token", extra={"ip_address": request.META["REMOTE_ADDR"], "user_id": request.user.id}, ) return None if not constant_time_compare(cookie_token, session_token): logger.warning( "superuser.invalid-token", extra={"ip_address": request.META["REMOTE_ADDR"], "user_id": request.user.id}, ) return None if data["uid"] != str(request.user.id): logger.warning( "superuser.invalid-uid", extra={ "ip_address": request.META["REMOTE_ADDR"], "user_id": request.user.id, "expected_user_id": data["uid"], }, ) return None if current_datetime is None: current_datetime = django_timezone.now() try: data["idl"] = datetime.fromtimestamp(float(data["idl"]), timezone.utc) except (TypeError, ValueError): logger.warning( "superuser.invalid-idle-expiration", extra={"ip_address": request.META["REMOTE_ADDR"], "user_id": request.user.id}, exc_info=True, ) return None if data["idl"] < current_datetime: logger.info( "superuser.session-expired", extra={"ip_address": request.META["REMOTE_ADDR"], "user_id": request.user.id}, ) return None try: data["exp"] = datetime.fromtimestamp(float(data["exp"]), timezone.utc) except (TypeError, ValueError): logger.warning( "superuser.invalid-expiration", extra={"ip_address": request.META["REMOTE_ADDR"], "user_id": request.user.id}, exc_info=True, ) return None if data["exp"] < current_datetime: logger.info( "superuser.session-expired", extra={"ip_address": request.META["REMOTE_ADDR"], "user_id": request.user.id}, ) return None return data def _populate(self, current_datetime: datetime | None = None) -> None: if current_datetime is None: current_datetime = django_timezone.now() request = self.request user: User | None = getattr(request, "user", None) if not hasattr(request, "session"): data = None elif not (user and user.is_superuser): data = None else: data = self.get_session_data(current_datetime=current_datetime) if not data: self._set_logged_out() else: assert user is not None self._set_logged_in(expires=data["exp"], token=data["tok"], user=user) if not self.is_active: if self._inactive_reason: logger.warning( "superuser.%s", self._inactive_reason, extra={ "ip_address": request.META["REMOTE_ADDR"], "user_id": request.user.id, }, ) else: logger.warning( "superuser.inactive-unknown-reason", extra={ "ip_address": request.META["REMOTE_ADDR"], "user_id": request.user.id, }, ) def _set_logged_in( self, expires: datetime, token: str, user: User, current_datetime: datetime | None = None, ) -> None: # we bind uid here, as if you change users in the same request # we wouldn't want to still support superuser auth (given # the superuser check happens right here) assert user.is_superuser if current_datetime is None: current_datetime = django_timezone.now() self.token = token self.uid = str(user.id) # the absolute maximum age of this session self.expires = expires # do we have a valid superuser session? self.is_valid = True # is the session active? (it could be valid, but inactive) self._is_active, self._inactive_reason = self.is_privileged_request() self.request.session[SESSION_KEY] = { "exp": self.expires.strftime("%s"), "idl": (current_datetime + IDLE_MAX_AGE).strftime("%s"), "tok": self.token, # XXX(dcramer): do we really need the uid safety mechanism "uid": self.uid, } def _set_logged_out(self) -> None: self.uid = None self.expires = None self.token = None self._is_active = False self._inactive_reason = InactiveReason.NONE self.is_valid = False self.request.session.pop(SESSION_KEY, None) def set_logged_in( self, user: User, current_datetime: datetime | None = None, prefilled_su_modal: dict[str, Any] | None = None, ) -> None: """ Mark a session as superuser-enabled. """ request = self.request if current_datetime is None: current_datetime = django_timezone.now() token = get_random_string(12) def enable_and_log_superuser_access() -> None: self._set_logged_in( expires=current_datetime + MAX_AGE, token=token, user=user, current_datetime=current_datetime, ) metrics.incr( "superuser.success", sample_rate=1.0, ) logger.info( "superuser.logged-in", extra={"ip_address": request.META["REMOTE_ADDR"], "user_id": user.id}, ) if not self._needs_validation(): enable_and_log_superuser_access() return if prefilled_su_modal: su_access_json = prefilled_su_modal else: try: # need to use json loads as the data is no longer in request.data su_access_json = orjson.loads(request.body) except orjson.JSONDecodeError: metrics.incr( "superuser.failure", sample_rate=1.0, tags={"reason": SuperuserAccessFormInvalidJson.code}, ) raise SuperuserAccessFormInvalidJson() su_access_info = SuperuserAccessSerializer(data=su_access_json) if not su_access_info.is_valid(): raise serializers.ValidationError(su_access_info.errors) try: logger.info( "superuser.superuser_access", extra={ "superuser_token_id": token, "user_id": request.user.id, "user_email": getattr(request.user, "email", None), "su_access_category": su_access_info.validated_data["superuserAccessCategory"], "reason_for_su": su_access_info.validated_data["superuserReason"], }, ) enable_and_log_superuser_access() except AttributeError: metrics.incr("superuser.failure", sample_rate=1.0, tags={"reason": "missing-user-info"}) logger.exception("superuser.superuser_access.missing_user_info") def set_logged_out(self) -> None: """ Mark a session as superuser-disabled. """ request = self.request self._set_logged_out() logger.info( "superuser.logged-out", extra={"ip_address": request.META["REMOTE_ADDR"], "user_id": request.user.id}, ) def on_response(self, response: HttpResponse) -> None: request = self.request # always re-bind the cookie to update the idle expiration window if self.is_active: response.set_signed_cookie( COOKIE_NAME, self.token or "", salt=COOKIE_SALT, # set max_age to None, as we want this cookie to expire on browser close max_age=None, secure=request.is_secure() if COOKIE_SECURE is None else COOKIE_SECURE, httponly=COOKIE_HTTPONLY, path=COOKIE_PATH, domain=COOKIE_DOMAIN, ) # otherwise, if the session is invalid and there's a cookie set, clear it elif not self.is_valid and request.COOKIES.get(COOKIE_NAME): response.delete_cookie(COOKIE_NAME)
Superuser
python
numpy__numpy
numpy/lib/tests/test_function_base.py
{ "start": 23943, "end": 24855 }
class ____: @pytest.mark.parametrize("cumsum", [np.cumsum, np.cumulative_sum]) def test_basic(self, cumsum): ba = [1, 2, 10, 11, 6, 5, 4] ba2 = [[1, 2, 3, 4], [5, 6, 7, 9], [10, 3, 4, 5]] for ctype in [np.int8, np.uint8, np.int16, np.uint16, np.int32, np.uint32, np.float32, np.float64, np.complex64, np.complex128]: a = np.array(ba, ctype) a2 = np.array(ba2, ctype) tgt = np.array([1, 3, 13, 24, 30, 35, 39], ctype) assert_array_equal(cumsum(a, axis=0), tgt) tgt = np.array( [[1, 2, 3, 4], [6, 8, 10, 13], [16, 11, 14, 18]], ctype) assert_array_equal(cumsum(a2, axis=0), tgt) tgt = np.array( [[1, 3, 6, 10], [5, 11, 18, 27], [10, 13, 17, 22]], ctype) assert_array_equal(cumsum(a2, axis=1), tgt)
TestCumsum
python
huggingface__transformers
src/transformers/models/bark/generation_configuration_bark.py
{ "start": 11209, "end": 14942 }
class ____(GenerationConfig): model_type = "bark" # TODO (joao): nested from_dict def __init__( self, semantic_config: Optional[dict] = None, coarse_acoustics_config: Optional[dict] = None, fine_acoustics_config: Optional[dict] = None, sample_rate=24_000, codebook_size=1024, **kwargs, ): """Class that holds a generation configuration for [`BarkModel`]. The [`BarkModel`] does not have a `generate` method, but uses this class to generate speeches with a nested [`BarkGenerationConfig`] which uses [`BarkSemanticGenerationConfig`], [`BarkCoarseGenerationConfig`], [`BarkFineGenerationConfig`]. This configuration inherit from [`GenerationConfig`] and can be used to control the model generation. Read the documentation from [`GenerationConfig`] for more information. Args: semantic_config (`Dict`, *optional*): Semantic generation configuration. coarse_acoustics_config (`Dict`, *optional*): Coarse generation configuration. fine_acoustics_config (`Dict`, *optional*): Fine generation configuration. sample_rate (`int`, *optional*, defaults to 24_000): Sample rate. codebook_size (`int`, *optional*, defaults to 1024): Vector length for each codebook. """ if semantic_config is None: semantic_config = {} logger.info("semantic_config is None. initializing the semantic model with default values.") if coarse_acoustics_config is None: coarse_acoustics_config = {} logger.info("coarse_acoustics_config is None. initializing the coarse model with default values.") if fine_acoustics_config is None: fine_acoustics_config = {} logger.info("fine_acoustics_config is None. initializing the fine model with default values.") self.semantic_config = BarkSemanticGenerationConfig(**semantic_config) self.coarse_acoustics_config = BarkCoarseGenerationConfig(**coarse_acoustics_config) self.fine_acoustics_config = BarkFineGenerationConfig(**fine_acoustics_config) self.sample_rate = sample_rate self.codebook_size = codebook_size @classmethod def from_sub_model_configs( cls, semantic_config: BarkSemanticGenerationConfig, coarse_acoustics_config: BarkCoarseGenerationConfig, fine_acoustics_config: BarkFineGenerationConfig, **kwargs, ): r""" Instantiate a [`BarkGenerationConfig`] (or a derived class) from bark sub-models generation configuration. Returns: [`BarkGenerationConfig`]: An instance of a configuration object """ return cls( semantic_config=semantic_config.to_dict(), coarse_acoustics_config=coarse_acoustics_config.to_dict(), fine_acoustics_config=fine_acoustics_config.to_dict(), **kwargs, ) def to_dict(self): """ Serializes this instance to a Python dictionary. Override the default [`~PreTrainedConfig.to_dict`]. Returns: `dict[str, any]`: Dictionary of all the attributes that make up this configuration instance, """ output = copy.deepcopy(self.__dict__) output["semantic_config"] = self.semantic_config.to_dict() output["coarse_acoustics_config"] = self.coarse_acoustics_config.to_dict() output["fine_acoustics_config"] = self.fine_acoustics_config.to_dict() output["model_type"] = self.__class__.model_type return output
BarkGenerationConfig
python
doocs__leetcode
solution/2700-2799/2760.Longest Even Odd Subarray With Threshold/Solution2.py
{ "start": 0, "end": 475 }
class ____: def longestAlternatingSubarray(self, nums: List[int], threshold: int) -> int: ans, l, n = 0, 0, len(nums) while l < n: if nums[l] % 2 == 0 and nums[l] <= threshold: r = l + 1 while r < n and nums[r] % 2 != nums[r - 1] % 2 and nums[r] <= threshold: r += 1 ans = max(ans, r - l) l = r else: l += 1 return ans
Solution
python
aio-libs__aiohttp
aiohttp/_websocket/models.py
{ "start": 1833, "end": 1978 }
class ____(NamedTuple): data: bytes size: int extra: str | None = None type: Literal[WSMsgType.PONG] = WSMsgType.PONG
WSMessagePong
python
wandb__wandb
wandb/vendor/pygments/lexers/templates.py
{ "start": 73077, "end": 73457 }
class ____(DelegatingLexer): """ Subclass of the `Angular2Lexer` that highlights unlexed data with the `HtmlLexer`. .. versionadded:: 2.0 """ name = "HTML + Angular2" aliases = ["html+ng2"] filenames = ['*.ng2'] def __init__(self, **options): super(Angular2HtmlLexer, self).__init__(HtmlLexer, Angular2Lexer, **options)
Angular2HtmlLexer
python
falconry__falcon
tests/test_httperror.py
{ "start": 6365, "end": 6686 }
class ____: def __init__(self, retry_after=None): self.retry_after = retry_after def on_get(self, req, resp): raise falcon.HTTPTooManyRequests( title='Too many requests', description='1 per minute', retry_after=self.retry_after, )
TooManyRequestsResource
python
PrefectHQ__prefect
tests/server/database/test_alembic_commands.py
{ "start": 377, "end": 3493 }
class ____: @mock.patch("alembic.command.upgrade") def test_alembic_upgrade_defaults(self, mocked): alembic_upgrade() args, kwargs = mocked.call_args assert mocked.call_count == 1 assert args[1] == "head" # sql == dry_run assert kwargs["sql"] is False @mock.patch("alembic.command.upgrade") def test_alembic_upgrade_passed_params(self, mocked): alembic_upgrade("revision123", dry_run=True) args, kwargs = mocked.call_args assert mocked.call_count == 1 assert args[1] == "revision123" # sql == dry_run assert kwargs["sql"] is True @mock.patch("alembic.command.downgrade") def test_alembic_downgrade_defaults(self, mocked): alembic_downgrade() args, kwargs = mocked.call_args assert mocked.call_count == 1 assert args[1] == "-1" # sql == dry_run assert kwargs["sql"] is False @mock.patch("alembic.command.downgrade") def test_alembic_downgrade_passed_params(self, mocked): alembic_downgrade("revision123", dry_run=True) args, kwargs = mocked.call_args assert mocked.call_count == 1 assert args[1] == "revision123" # sql == dry_run assert kwargs["sql"] is True @mock.patch("alembic.command.revision") def test_alembic_revision_defaults(self, mocked): alembic_revision() _, kwargs = mocked.call_args assert mocked.call_count == 1 assert kwargs["message"] is None assert kwargs["autogenerate"] is False @mock.patch("alembic.command.revision") def test_alembic_revision_passed_params(self, mocked): alembic_revision(message="new_revision", autogenerate=True) _, kwargs = mocked.call_args assert mocked.call_count == 1 assert kwargs["message"] == "new_revision" assert kwargs["autogenerate"] is True @mock.patch("alembic.command.stamp") def test_alembic_stamp(self, mocked): alembic_stamp(revision="abcdef") _, kwargs = mocked.call_args assert mocked.call_count == 1 assert kwargs["revision"] == "abcdef" async def test_concurrent_upgrade(self): jobs = [run_sync_in_worker_thread(alembic_upgrade) for _ in range(0, 10)] await asyncio.gather(*jobs) @pytest.mark.skip( reason=( "This test is occasionally failing on CI because the tables aren't being " "restored after the downgrade, which makes the DB cleanup fixture error " "for the rest of the test suite" ) ) async def test_concurrent_downgrade_upgrade(self): try: jobs = [] for _ in range(0, 2): jobs.append( run_sync_in_worker_thread(alembic_downgrade, revision="base") ) jobs.append(run_sync_in_worker_thread(alembic_upgrade)) await asyncio.gather(*jobs) finally: # Ensure we're back at the latest revision await run_sync_in_worker_thread(alembic_upgrade)
TestAlembicCommands
python
GoogleCloudPlatform__python-docs-samples
appengine/standard_python3/bundled-services/blobstore/django/main.py
{ "start": 1186, "end": 1292 }
class ____(ndb.Model): blob_key = ndb.BlobKeyProperty() # [START gae_blobstore_handler_django]
UserPhoto
python
keras-team__keras
keras/src/ops/numpy.py
{ "start": 9213, "end": 9867 }
class ____(Operation): def __init__(self, axis=None, keepdims=False, *, name=None): super().__init__(name=name) if isinstance(axis, int): self.axis = [axis] else: self.axis = axis self.keepdims = keepdims def call(self, x): return backend.numpy.any( x, axis=self.axis, keepdims=self.keepdims, ) def compute_output_spec(self, x): return KerasTensor( reduce_shape( x.shape, axis=self.axis, keepdims=self.keepdims, ), dtype="bool", )
Any
python
Netflix__metaflow
test/core/tests/nested_foreach.py
{ "start": 67, "end": 1293 }
class ____(MetaflowTest): PRIORITY = 1 SKIP_GRAPHS = [ "simple_switch", "nested_switch", "branch_in_switch", "foreach_in_switch", "switch_in_branch", "switch_in_foreach", "recursive_switch", "recursive_switch_inside_foreach", ] @steps(0, ["foreach-nested-inner"], required=True) def inner(self): [x, y, z] = self.foreach_stack() # assert that lengths are correct assert_equals(len(self.x), x[1]) assert_equals(len(self.y), y[1]) assert_equals(len(self.z), z[1]) # assert that variables are correct given their indices assert_equals(x[2], self.x[x[0]]) assert_equals(y[2], self.y[y[0]]) assert_equals(z[2], self.z[z[0]]) self.combo = x[2] + y[2] + z[2] @steps(1, ["all"]) def step_all(self): pass def check_results(self, flow, checker): from itertools import product artifacts = checker.artifact_dict("foreach_inner", "combo") got = sorted(val["combo"] for val in artifacts.values()) expected = sorted("".join(p) for p in product("abc", "de", "fghijk")) assert_equals(expected, got)
NestedForeachTest
python
apache__airflow
providers/google/tests/unit/google/cloud/operators/test_dataplex.py
{ "start": 16568, "end": 17656 }
class ____: @mock.patch(HOOK_STR) @mock.patch(DATASCANJOB_STR) def test_execute(self, mock_data_scan_job, hook_mock): op = DataplexGetDataProfileScanResultOperator( task_id="get_data_scan_result", project_id=PROJECT_ID, region=REGION, job_id=JOB_ID, data_scan_id=DATA_SCAN_ID, api_version=API_VERSION, wait_for_results=False, gcp_conn_id=GCP_CONN_ID, impersonation_chain=IMPERSONATION_CHAIN, ) op.execute(context=mock.MagicMock()) hook_mock.assert_called_once_with( gcp_conn_id=GCP_CONN_ID, api_version=API_VERSION, impersonation_chain=IMPERSONATION_CHAIN, ) hook_mock.return_value.get_data_scan_job.assert_called_once_with( project_id=PROJECT_ID, region=REGION, job_id=JOB_ID, data_scan_id=DATA_SCAN_ID, retry=DEFAULT, timeout=None, metadata=(), )
TestDataplexGetDataProfileScanResultOperator
python
microsoft__pyright
packages/pyright-internal/src/tests/samples/classes11.py
{ "start": 220, "end": 380 }
class ____(Mapping[str, int], Collection[int]): def __len__(self) -> int: ... def __iter__(self) -> Iterator[str]: ... # This should generate an error.
A
python
sympy__sympy
sympy/utilities/autowrap.py
{ "start": 29225, "end": 42561 }
class ____(CodeWrapper): """Wrapper for Ufuncify""" def __init__(self, *args, **kwargs): ext_keys = ['include_dirs', 'library_dirs', 'libraries', 'extra_compile_args', 'extra_link_args'] msg = ('The compilation option kwarg {} is not supported with the numpy' ' backend.') for k in ext_keys: if k in kwargs.keys(): warn(msg.format(k)) kwargs.pop(k, None) super().__init__(*args, **kwargs) @property def command(self): command = [sys.executable, "setup.py", "build_ext", "--inplace"] return command def wrap_code(self, routines, helpers=None): # This routine overrides CodeWrapper because we can't assume funcname == routines[0].name # Therefore we have to break the CodeWrapper private API. # There isn't an obvious way to extend multi-expr support to # the other autowrap backends, so we limit this change to ufuncify. helpers = helpers if helpers is not None else [] # We just need a consistent name funcname = 'wrapped_' + str(id(routines) + id(helpers)) workdir = self.filepath or tempfile.mkdtemp("_sympy_compile") if not os.access(workdir, os.F_OK): os.mkdir(workdir) oldwork = os.getcwd() os.chdir(workdir) try: sys.path.append(workdir) self._generate_code(routines, helpers) self._prepare_files(routines, funcname) self._process_files(routines) mod = __import__(self.module_name) finally: sys.path.remove(workdir) CodeWrapper._module_counter += 1 os.chdir(oldwork) if not self.filepath: try: shutil.rmtree(workdir) except OSError: # Could be some issues on Windows pass return self._get_wrapped_function(mod, funcname) def _generate_code(self, main_routines, helper_routines): all_routines = main_routines + helper_routines self.generator.write( all_routines, self.filename, True, self.include_header, self.include_empty) def _prepare_files(self, routines, funcname): # C codefilename = self.module_name + '.c' with open(codefilename, 'w') as f: self.dump_c(routines, f, self.filename, funcname=funcname) # setup.py with open('setup.py', 'w') as f: self.dump_setup(f) @classmethod def _get_wrapped_function(cls, mod, name): return getattr(mod, name) def dump_setup(self, f): setup = _ufunc_setup.substitute(module=self.module_name, filename=self.filename) f.write(setup) def dump_c(self, routines, f, prefix, funcname=None): """Write a C file with Python wrappers This file contains all the definitions of the routines in c code. Arguments --------- routines List of Routine instances f File-like object to write the file to prefix The filename prefix, used to name the imported module. funcname Name of the main function to be returned. """ if funcname is None: if len(routines) == 1: funcname = routines[0].name else: msg = 'funcname must be specified for multiple output routines' raise ValueError(msg) functions = [] function_creation = [] ufunc_init = [] module = self.module_name include_file = "\"{}.h\"".format(prefix) top = _ufunc_top.substitute(include_file=include_file, module=module) name = funcname # Partition the C function arguments into categories # Here we assume all routines accept the same arguments r_index = 0 py_in, _ = self._partition_args(routines[0].arguments) n_in = len(py_in) n_out = len(routines) # Declare Args form = "char *{0}{1} = args[{2}];" arg_decs = [form.format('in', i, i) for i in range(n_in)] arg_decs.extend([form.format('out', i, i+n_in) for i in range(n_out)]) declare_args = '\n '.join(arg_decs) # Declare Steps form = "npy_intp {0}{1}_step = steps[{2}];" step_decs = [form.format('in', i, i) for i in range(n_in)] step_decs.extend([form.format('out', i, i+n_in) for i in range(n_out)]) declare_steps = '\n '.join(step_decs) # Call Args form = "*(double *)in{0}" call_args = ', '.join([form.format(a) for a in range(n_in)]) # Step Increments form = "{0}{1} += {0}{1}_step;" step_incs = [form.format('in', i) for i in range(n_in)] step_incs.extend([form.format('out', i, i) for i in range(n_out)]) step_increments = '\n '.join(step_incs) # Types n_types = n_in + n_out types = "{" + ', '.join(["NPY_DOUBLE"]*n_types) + "};" # Docstring docstring = '"Created in SymPy with Ufuncify"' # Function Creation function_creation.append("PyObject *ufunc{};".format(r_index)) # Ufunc initialization init_form = _ufunc_init_form.substitute(module=module, funcname=name, docstring=docstring, n_in=n_in, n_out=n_out, ind=r_index) ufunc_init.append(init_form) outcalls = [_ufunc_outcalls.substitute( outnum=i, call_args=call_args, funcname=routines[i].name) for i in range(n_out)] body = _ufunc_body.substitute(module=module, funcname=name, declare_args=declare_args, declare_steps=declare_steps, call_args=call_args, step_increments=step_increments, n_types=n_types, types=types, outcalls='\n '.join(outcalls)) functions.append(body) body = '\n\n'.join(functions) ufunc_init = '\n '.join(ufunc_init) function_creation = '\n '.join(function_creation) bottom = _ufunc_bottom.substitute(module=module, ufunc_init=ufunc_init, function_creation=function_creation) text = [top, body, bottom] f.write('\n\n'.join(text)) def _partition_args(self, args): """Group function arguments into categories.""" py_in = [] py_out = [] for arg in args: if isinstance(arg, OutputArgument): py_out.append(arg) elif isinstance(arg, InOutArgument): raise ValueError("Ufuncify doesn't support InOutArguments") else: py_in.append(arg) return py_in, py_out @cacheit @doctest_depends_on(exe=('f2py', 'gfortran', 'gcc'), modules=('numpy',)) def ufuncify(args, expr, language=None, backend='numpy', tempdir=None, flags=None, verbose=False, helpers=None, **kwargs): """Generates a binary function that supports broadcasting on numpy arrays. Parameters ========== args : iterable Either a Symbol or an iterable of symbols. Specifies the argument sequence for the function. expr A SymPy expression that defines the element wise operation. language : string, optional If supplied, (options: 'C' or 'F95'), specifies the language of the generated code. If ``None`` [default], the language is inferred based upon the specified backend. backend : string, optional Backend used to wrap the generated code. Either 'numpy' [default], 'cython', or 'f2py'. tempdir : string, optional Path to directory for temporary files. If this argument is supplied, the generated code and the wrapper input files are left intact in the specified path. flags : iterable, optional Additional option flags that will be passed to the backend. verbose : bool, optional If True, autowrap will not mute the command line backends. This can be helpful for debugging. helpers : 3-tuple or iterable of 3-tuples, optional Used to define auxiliary functions needed for the main expression. Each tuple should be of the form (name, expr, args) where: - name : str, the function name - expr : sympy expression, the function - args : iterable, the function arguments (can be any iterable of symbols) kwargs : dict These kwargs will be passed to autowrap if the `f2py` or `cython` backend is used and ignored if the `numpy` backend is used. Notes ===== The default backend ('numpy') will create actual instances of ``numpy.ufunc``. These support ndimensional broadcasting, and implicit type conversion. Use of the other backends will result in a "ufunc-like" function, which requires equal length 1-dimensional arrays for all arguments, and will not perform any type conversions. References ========== .. [1] https://numpy.org/doc/stable/reference/ufuncs.html Examples ======== Basic usage: >>> from sympy.utilities.autowrap import ufuncify >>> from sympy.abc import x, y >>> import numpy as np >>> f = ufuncify((x, y), y + x**2) >>> type(f) <class 'numpy.ufunc'> >>> f([1, 2, 3], 2) array([ 3., 6., 11.]) >>> f(np.arange(5), 3) array([ 3., 4., 7., 12., 19.]) Using helper functions: >>> from sympy import Function >>> helper_func = Function('helper_func') # Define symbolic function >>> expr = x**2 + y*helper_func(x) # Main expression using helper function >>> # Define helper_func(x) = x**3 >>> f = ufuncify((x, y), expr, helpers=[('helper_func', x**3, [x])]) >>> f([1, 2], [3, 4]) array([ 4., 36.]) Type handling with different backends: For the 'f2py' and 'cython' backends, inputs are required to be equal length 1-dimensional arrays. The 'f2py' backend will perform type conversion, but the Cython backend will error if the inputs are not of the expected type. >>> f_fortran = ufuncify((x, y), y + x**2, backend='f2py') >>> f_fortran(1, 2) array([ 3.]) >>> f_fortran(np.array([1, 2, 3]), np.array([1.0, 2.0, 3.0])) array([ 2., 6., 12.]) >>> f_cython = ufuncify((x, y), y + x**2, backend='Cython') >>> f_cython(1, 2) # doctest: +ELLIPSIS Traceback (most recent call last): ... TypeError: Argument '_x' has incorrect type (expected numpy.ndarray, got int) >>> f_cython(np.array([1.0]), np.array([2.0])) array([ 3.]) """ if isinstance(args, Symbol): args = (args,) else: args = tuple(args) if language: _validate_backend_language(backend, language) else: language = _infer_language(backend) helpers = helpers if helpers else () flags = flags if flags else () if backend.upper() == 'NUMPY': # maxargs is set by numpy compile-time constant NPY_MAXARGS # If a future version of numpy modifies or removes this restriction # this variable should be changed or removed maxargs = 32 helps = [] for name, expr, args in helpers: helps.append(make_routine(name, expr, args)) code_wrapper = UfuncifyCodeWrapper(C99CodeGen("ufuncify"), tempdir, flags, verbose) if not isinstance(expr, (list, tuple)): expr = [expr] if len(expr) == 0: raise ValueError('Expression iterable has zero length') if len(expr) + len(args) > maxargs: msg = ('Cannot create ufunc with more than {0} total arguments: ' 'got {1} in, {2} out') raise ValueError(msg.format(maxargs, len(args), len(expr))) routines = [make_routine('autofunc{}'.format(idx), exprx, args) for idx, exprx in enumerate(expr)] return code_wrapper.wrap_code(routines, helpers=helps) else: # Dummies are used for all added expressions to prevent name clashes # within the original expression. y = IndexedBase(Dummy('y')) m = Dummy('m', integer=True) i = Idx(Dummy('i', integer=True), m) f_dummy = Dummy('f') f = implemented_function('%s_%d' % (f_dummy.name, f_dummy.dummy_index), Lambda(args, expr)) # For each of the args create an indexed version. indexed_args = [IndexedBase(Dummy(str(a))) for a in args] # Order the arguments (out, args, dim) args = [y] + indexed_args + [m] args_with_indices = [a[i] for a in indexed_args] return autowrap(Eq(y[i], f(*args_with_indices)), language, backend, tempdir, args, flags, verbose, helpers, **kwargs)
UfuncifyCodeWrapper
python
bokeh__bokeh
src/bokeh/util/compiler.py
{ "start": 4989, "end": 5815 }
class ____(Implementation): ''' A custom model implementation read from a separate source file. Args: path (str) : The path to the file containing the extension source code ''' def __init__(self, path: str) -> None: with open(path, encoding="utf-8") as f: self.code = f.read() self.file = path @property def lang(self) -> str: if self.file is not None: if self.file.endswith(".ts"): return "typescript" if self.file.endswith(".js"): return "javascript" if self.file.endswith((".css", ".less")): return "less" raise ValueError(f"unknown file type {self.file}") #: recognized extensions that can be compiled exts = (".ts", ".js", ".css", ".less")
FromFile
python
huggingface__transformers
tests/quantization/autoawq/test_awq.py
{ "start": 3483, "end": 11772 }
class ____(unittest.TestCase): model_name = "TheBloke/Mistral-7B-v0.1-AWQ" dummy_transformers_model_name = "bigscience/bloom-560m" model_with_no_k_proj_quantized = "hf-internal-testing/opt-125m-awq-no-k-proj" input_text = "Hello my name is" EXPECTED_OUTPUT = set() EXPECTED_OUTPUT.add( "Hello my name is Katie and I am a 20 year old student at the University of North Carolina at Chapel Hill. I am a junior and I am majoring in Journalism and minoring in Spanish" ) EXPECTED_OUTPUT.add( "Hello my name is Katie and I am a 20 year old student at the University of North Carolina at Chapel Hill. I am a junior and I am majoring in Journalism and minoring in Spanish. I am" ) EXPECTED_OUTPUT.add( "Hello my name is Katie and I am a 20 year old student at the University of North Carolina at Chapel Hill. I am a junior and I am majoring in Exercise and Sport Science with a" ) EXPECTED_OUTPUT_BF16 = [ "Hello my name is Katie and I am a 20 year old student at the University of North Carolina at Chapel Hill. I am a junior and I am majoring in Journalism and minoring in Spanish" ] EXPECTED_OUTPUT_EXLLAMA = [ "Hello my name is Katie and I am a 20 year old student from the UK. I am currently studying for a degree in English Literature and History at the University of York. I am a very out", "Hello my name is Katie and I am a 20 year old student from the UK. I am currently studying for a degree in English Literature and History at the University of York. I am a very creative", ] device_map = torch_device # called only once for all test in this class @classmethod def setUpClass(cls): """ Setup quantized model """ cls.tokenizer = AutoTokenizer.from_pretrained(cls.model_name) cls.quantized_model = AutoModelForCausalLM.from_pretrained(cls.model_name, device_map=cls.device_map) def tearDown(self): gc.collect() backend_empty_cache(torch_device) gc.collect() def test_quantized_model_conversion(self): """ Simple test that checks if the quantized model has been converted properly """ from awq.modules.linear import WQLinear_GEMM, WQLinear_GEMV from transformers.integrations.awq import replace_with_awq_linear model_id = "facebook/opt-350m" config = AutoConfig.from_pretrained(model_id, revision="cb32f77e905cccbca1d970436fb0f5e6b58ee3c5") quantization_config = AwqConfig(bits=4) with init_empty_weights(): model = OPTForCausalLM(config) nb_linears = 0 for module in model.modules(): if isinstance(module, torch.nn.Linear): nb_linears += 1 model, _ = replace_with_awq_linear(model, quantization_config=quantization_config) nb_awq_linear = 0 for module in model.modules(): if isinstance(module, (WQLinear_GEMM, WQLinear_GEMV)): nb_awq_linear += 1 self.assertEqual(nb_linears, nb_awq_linear) # Try with `modules_not_to_convert` with init_empty_weights(): model = OPTForCausalLM(config) model, _ = replace_with_awq_linear( model, quantization_config=quantization_config, modules_to_not_convert=["lm_head"] ) nb_awq_linear = 0 for module in model.modules(): if isinstance(module, (WQLinear_GEMM, WQLinear_GEMV)): nb_awq_linear += 1 self.assertEqual(nb_linears - 1, nb_awq_linear) def test_quantized_model(self): """ Simple test that checks if the quantized model is working properly """ input_ids = self.tokenizer(self.input_text, return_tensors="pt").to(torch_device) output = self.quantized_model.generate(**input_ids, max_new_tokens=40) self.assertIn(self.tokenizer.decode(output[0], skip_special_tokens=True), self.EXPECTED_OUTPUT) def test_raise_if_non_quantized(self): model_id = "facebook/opt-125m" quantization_config = AwqConfig(bits=4) with self.assertRaises(ValueError): _ = AutoModelForCausalLM.from_pretrained(model_id, quantization_config=quantization_config) def test_quantized_model_bf16(self): """ Simple test that checks if the quantized model is working properly with bf16 """ input_ids = self.tokenizer(self.input_text, return_tensors="pt").to(torch_device) quantized_model = AutoModelForCausalLM.from_pretrained(self.model_name, dtype=torch.bfloat16).to(torch_device) output = quantized_model.generate(**input_ids, max_new_tokens=40) self.assertIn(self.tokenizer.decode(output[0], skip_special_tokens=True), self.EXPECTED_OUTPUT_BF16) @require_torch_gpu def test_quantized_model_exllama(self): """ Simple test that checks if the quantized model is working properly with exllama backend """ input_ids = self.tokenizer(self.input_text, return_tensors="pt").to(torch_device) quantization_config = AwqConfig(version="exllama") quantized_model = AutoModelForCausalLM.from_pretrained( self.model_name, quantization_config=quantization_config, device_map=torch_device ) output = quantized_model.generate(**input_ids, max_new_tokens=40) self.assertIn(self.tokenizer.decode(output[0], skip_special_tokens=True), self.EXPECTED_OUTPUT_EXLLAMA) def test_quantized_model_no_device_map(self): """ Simple test that checks if the quantized model is working properly """ input_ids = self.tokenizer(self.input_text, return_tensors="pt").to(torch_device) quantized_model = AutoModelForCausalLM.from_pretrained(self.model_name).to(torch_device) output = quantized_model.generate(**input_ids, max_new_tokens=40) self.assertIn(self.tokenizer.decode(output[0], skip_special_tokens=True), self.EXPECTED_OUTPUT) def test_save_pretrained(self): """ Simple test that checks if the quantized model is working properly after being saved and loaded """ with tempfile.TemporaryDirectory() as tmpdirname: self.quantized_model.save_pretrained(tmpdirname) model = AutoModelForCausalLM.from_pretrained(tmpdirname, device_map=self.device_map) input_ids = self.tokenizer(self.input_text, return_tensors="pt").to(torch_device) output = model.generate(**input_ids, max_new_tokens=40) self.assertIn(self.tokenizer.decode(output[0], skip_special_tokens=True), self.EXPECTED_OUTPUT) @require_torch_multi_accelerator def test_quantized_model_multi_accelerator(self): """ Simple test that checks if the quantized model is working properly with multiple GPUs """ input_ids = self.tokenizer(self.input_text, return_tensors="pt").to(torch_device) quantized_model = AutoModelForCausalLM.from_pretrained(self.model_name, device_map="auto") self.assertTrue(len(set(quantized_model.hf_device_map.values())) >= 2) output = quantized_model.generate(**input_ids, max_new_tokens=40) self.assertIn(self.tokenizer.decode(output[0], skip_special_tokens=True), self.EXPECTED_OUTPUT) def test_quantized_model_no_k_proj_quantized(self): """ Simple test that checks if the quantized model is working properly with multiple GPUs """ dummy_input = torch.LongTensor([[0, 1, 0]]).to(torch_device) quantized_model = AutoModelForCausalLM.from_pretrained(self.model_with_no_k_proj_quantized).to(torch_device) self.assertTrue(isinstance(quantized_model.model.decoder.layers[0].self_attn.k_proj, torch.nn.Linear)) self.assertFalse(isinstance(quantized_model.model.decoder.layers[0].self_attn.v_proj, torch.nn.Linear)) EXPECTED_OUTPUT = torch.LongTensor([[0, 1, 0, 50118, 50118, 133, 248, 12, 134, 16, 10, 372, 2031]]).to( torch_device ) output = quantized_model.generate(dummy_input, max_new_tokens=10) self.assertTrue((EXPECTED_OUTPUT == output).all()) @slow @require_torch_accelerator @require_auto_awq @require_accelerate
AwqTest
python
pennersr__django-allauth
allauth/socialaccount/providers/openstreetmap/views.py
{ "start": 388, "end": 1155 }
class ____(OAuthAdapter): provider_id = "openstreetmap" request_token_url = "https://www.openstreetmap.org/oauth/request_token" # nosec access_token_url = "https://www.openstreetmap.org/oauth/access_token" # nosec authorize_url = "https://www.openstreetmap.org/oauth/authorize" def complete_login(self, request, app, token, response): client = OpenStreetMapAPI( request, app.client_id, app.secret, self.request_token_url ) extra_data = client.get_user_info() return self.get_provider().sociallogin_from_response(request, extra_data) oauth_login = OAuthLoginView.adapter_view(OpenStreetMapOAuthAdapter) oauth_callback = OAuthCallbackView.adapter_view(OpenStreetMapOAuthAdapter)
OpenStreetMapOAuthAdapter
python
huggingface__transformers
src/transformers/models/mistral/modeling_mistral.py
{ "start": 14844, "end": 18092 }
class ____(MistralPreTrainedModel): def __init__(self, config: MistralConfig): super().__init__(config) self.padding_idx = config.pad_token_id self.vocab_size = config.vocab_size self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx) self.layers = nn.ModuleList( [MistralDecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)] ) self.norm = MistralRMSNorm(config.hidden_size, eps=config.rms_norm_eps) self.rotary_emb = MistralRotaryEmbedding(config=config) self.gradient_checkpointing = False # Initialize weights and apply final processing self.post_init() @check_model_inputs() @auto_docstring def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[Cache] = None, inputs_embeds: Optional[torch.FloatTensor] = None, use_cache: Optional[bool] = None, cache_position: Optional[torch.LongTensor] = None, **kwargs: Unpack[TransformersKwargs], ) -> BaseModelOutputWithPast: if (input_ids is None) ^ (inputs_embeds is not None): raise ValueError("You must specify exactly one of input_ids or inputs_embeds") if inputs_embeds is None: inputs_embeds = self.embed_tokens(input_ids) if use_cache and past_key_values is None: past_key_values = DynamicCache(config=self.config) if cache_position is None: past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0 cache_position = torch.arange( past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device ) if position_ids is None: position_ids = cache_position.unsqueeze(0) mask_function = create_causal_mask if self.config.sliding_window is None else create_sliding_window_causal_mask causal_mask = mask_function( config=self.config, input_embeds=inputs_embeds, attention_mask=attention_mask, cache_position=cache_position, past_key_values=past_key_values, position_ids=position_ids, ) hidden_states = inputs_embeds position_embeddings = self.rotary_emb(hidden_states, position_ids=position_ids) for decoder_layer in self.layers[: self.config.num_hidden_layers]: hidden_states = decoder_layer( hidden_states, attention_mask=causal_mask, position_ids=position_ids, past_key_values=past_key_values, use_cache=use_cache, cache_position=cache_position, position_embeddings=position_embeddings, **kwargs, ) hidden_states = self.norm(hidden_states) return BaseModelOutputWithPast( last_hidden_state=hidden_states, past_key_values=past_key_values if use_cache else None, ) @auto_docstring
MistralModel
python
huggingface__transformers
tests/models/qwen3/test_modeling_qwen3.py
{ "start": 1979, "end": 54532 }
class ____(unittest.TestCase): def setUp(self): cleanup(torch_device, gc_collect=True) def tearDown(self): cleanup(torch_device, gc_collect=True) @slow def test_model_600m_logits(self): input_ids = [1, 306, 4658, 278, 6593, 310, 2834, 338] model = Qwen3ForCausalLM.from_pretrained("Qwen/Qwen3-0.6B-Base", device_map="auto") input_ids = torch.tensor([input_ids]).to(model.model.embed_tokens.weight.device) with torch.no_grad(): out = model(input_ids).logits.float().cpu() # Expected mean on dim = -1 EXPECTED_MEAN = torch.tensor([[-1.3789, 1.3029, 3.8262, 3.4637, 2.8796, 1.8357, 2.1290, 2.1814]]) torch.testing.assert_close(out.mean(-1), EXPECTED_MEAN, rtol=1e-4, atol=1e-4) # slicing logits[0, 0, 0:30] EXPECTED_SLICE = torch.tensor([4.6905, 4.9243, 4.7101, 3.2052, 2.2683, 1.6576, 3.6529, 3.9800, 3.2605, 2.6475, 3.0468, 4.2296, 5.7443, 4.8940, 4.4883, 6.0323, 7.4057, 7.3710, 6.8373, 6.6323, 6.7114, 6.3069, 6.1751, 6.0416, 6.0793, 4.6975, 2.3286, 3.6387, 2.0757, 1.9813]) # fmt: skip torch.testing.assert_close(out[0, 0, :30], EXPECTED_SLICE, rtol=1e-4, atol=1e-4) @slow def test_model_600m_generation(self): EXPECTED_TEXT_COMPLETION = """My favourite condiment is 100% plain, unflavoured, and unadulterated. It is""" prompt = "My favourite condiment is " tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen3-0.6B-Base", use_fast=False) model = Qwen3ForCausalLM.from_pretrained("Qwen/Qwen3-0.6B-Base", device_map="auto") input_ids = tokenizer.encode(prompt, return_tensors="pt").to(model.model.embed_tokens.weight.device) # greedy generation outputs generated_ids = model.generate(input_ids, max_new_tokens=20, temperature=0) text = tokenizer.decode(generated_ids[0], skip_special_tokens=True) self.assertEqual(EXPECTED_TEXT_COMPLETION, text) @require_bitsandbytes @slow @require_flash_attn @pytest.mark.flash_attn_test def test_model_600m_long_prompt(self): EXPECTED_OUTPUT_TOKEN_IDS = [198, 198] # An input with 4097 tokens that is above the size of the sliding window input_ids = [1] + [306, 338] * 2048 model = Qwen3ForCausalLM.from_pretrained( "Qwen/Qwen3-0.6B-Base", device_map="auto", quantization_config=BitsAndBytesConfig(load_in_4bit=True), attn_implementation="flash_attention_2", ) input_ids = torch.tensor([input_ids]).to(model.model.embed_tokens.weight.device) generated_ids = model.generate(input_ids, max_new_tokens=4, temperature=0) self.assertEqual(EXPECTED_OUTPUT_TOKEN_IDS, generated_ids[0][-2:].tolist()) # Assisted generation assistant_model = model assistant_model.generation_config.num_assistant_tokens = 2 assistant_model.generation_config.num_assistant_tokens_schedule = "constant" generated_ids = model.generate(input_ids, max_new_tokens=4, temperature=0) self.assertEqual(EXPECTED_OUTPUT_TOKEN_IDS, generated_ids[0][-2:].tolist()) @slow def test_model_600m_long_prompt_sdpa(self): EXPECTED_OUTPUT_TOKEN_IDS = [198, 198] # An input with 4097 tokens that is above the size of the sliding window input_ids = [1] + [306, 338] * 2048 model = Qwen3ForCausalLM.from_pretrained("Qwen/Qwen3-0.6B-Base", device_map="auto", attn_implementation="sdpa") input_ids = torch.tensor([input_ids]).to(model.model.embed_tokens.weight.device) generated_ids = model.generate(input_ids, max_new_tokens=4, temperature=0) self.assertEqual(EXPECTED_OUTPUT_TOKEN_IDS, generated_ids[0][-2:].tolist()) # Assisted generation assistant_model = model assistant_model.generation_config.num_assistant_tokens = 2 assistant_model.generation_config.num_assistant_tokens_schedule = "constant" generated_ids = assistant_model.generate(input_ids, max_new_tokens=4, temperature=0) self.assertEqual(EXPECTED_OUTPUT_TOKEN_IDS, generated_ids[0][-2:].tolist()) del assistant_model cleanup(torch_device, gc_collect=True) EXPECTED_TEXT_COMPLETION = "My favourite condiment is 100% plain, unflavoured, and unadulterated. It is" prompt = "My favourite condiment is " tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen3-0.6B-Base", use_fast=False) input_ids = tokenizer.encode(prompt, return_tensors="pt").to(model.model.embed_tokens.weight.device) # greedy generation outputs generated_ids = model.generate(input_ids, max_new_tokens=20, temperature=0) text = tokenizer.decode(generated_ids[0], skip_special_tokens=True) self.assertEqual(EXPECTED_TEXT_COMPLETION, text) @slow def test_speculative_generation(self): EXPECTED_TEXT_COMPLETIONS = Expectations( { ("xpu", 3): "My favourite condiment is 100% beef and comes in a 12 oz. jar. It is sold in", ("cuda", 7): "My favourite condiment is 100% natural. It's a little spicy and a little sweet, but it's the", ("cuda", 8): "My favourite condiment is 100% beef, 100% beef, 100% beef.", } ) # fmt: skip EXPECTED_TEXT_COMPLETION = EXPECTED_TEXT_COMPLETIONS.get_expectation() prompt = "My favourite condiment is " tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen3-0.6B-Base", use_fast=False) model = Qwen3ForCausalLM.from_pretrained("Qwen/Qwen3-0.6B-Base", device_map="auto", dtype=torch.float16) assistant_model = Qwen3ForCausalLM.from_pretrained( "Qwen/Qwen3-0.6B-Base", device_map="auto", dtype=torch.float16 ) input_ids = tokenizer.encode(prompt, return_tensors="pt").to(model.model.embed_tokens.weight.device) # greedy generation outputs set_seed(0) generated_ids = model.generate( input_ids, max_new_tokens=20, do_sample=True, temperature=0.3, assistant_model=assistant_model ) text = tokenizer.decode(generated_ids[0], skip_special_tokens=True) self.assertEqual(EXPECTED_TEXT_COMPLETION, text) @pytest.mark.torch_export_test @slow def test_export_static_cache(self): if version.parse(torch.__version__) < version.parse("2.4.0"): self.skipTest(reason="This test requires torch >= 2.4 to run.") from transformers.integrations.executorch import ( TorchExportableModuleWithStaticCache, ) qwen_model = "Qwen/Qwen3-0.6B-Base" tokenizer = AutoTokenizer.from_pretrained(qwen_model, pad_token="</s>", padding_side="right") if version.parse(torch.__version__) == version.parse("2.7.0"): strict = False # Due to https://github.com/pytorch/pytorch/issues/150994 cuda_expectation = ["My favourite condiment is 100% plain, unflavoured, and unadulterated."] else: strict = True cuda_expectation = ["My favourite condiment is 100% plain, unflavoured, and unadulterated. It is"] expected_text_completions = Expectations( { ("xpu", None): ["My favourite condiment is 100% plain, unflavoured, and unadulterated. It is"], ("rocm", (9, 5)): ["My favourite condiment is 100% plain, unflavoured, and unadulterated."], ("cuda", None): cuda_expectation, } ) # fmt: skip EXPECTED_TEXT_COMPLETION = expected_text_completions.get_expectation() max_generation_length = tokenizer(EXPECTED_TEXT_COMPLETION, return_tensors="pt", padding=True)[ "input_ids" ].shape[-1] device = "cpu" # TODO (joao / export experts): should be on `torch_device`, but causes GPU OOM dtype = torch.bfloat16 cache_implementation = "static" attn_implementation = "sdpa" batch_size = 1 # Load model model = Qwen3ForCausalLM.from_pretrained( qwen_model, device_map=device, dtype=dtype, attn_implementation=attn_implementation, generation_config=GenerationConfig( use_cache=True, cache_implementation=cache_implementation, max_length=max_generation_length, cache_config={ "batch_size": batch_size, "max_cache_len": max_generation_length, }, ), ) prompt = ["My favourite condiment is "] prompt_tokens = tokenizer(prompt, return_tensors="pt", padding=True).to(model.device) prompt_token_ids = prompt_tokens["input_ids"] max_new_tokens = max_generation_length - prompt_token_ids.shape[-1] # Static Cache + export from transformers.integrations.executorch import TorchExportableModuleForDecoderOnlyLM exportable_module = TorchExportableModuleForDecoderOnlyLM(model) exported_program = exportable_module.export( input_ids=torch.tensor([[1]], dtype=torch.long, device=model.device), cache_position=torch.tensor([0], dtype=torch.long, device=model.device), strict=strict, ) ep_generated_ids = TorchExportableModuleWithStaticCache.generate( exported_program=exported_program, prompt_token_ids=prompt_token_ids, max_new_tokens=max_new_tokens ) ep_generated_text = tokenizer.batch_decode(ep_generated_ids, skip_special_tokens=True) self.assertEqual(EXPECTED_TEXT_COMPLETION, ep_generated_text) @require_flash_attn @slow @pytest.mark.flash_attn_test def test_600m_generation(self): model_id = "Qwen/Qwen3-0.6B-Base" tokenizer = AutoTokenizer.from_pretrained(model_id) model = Qwen3ForCausalLM.from_pretrained( model_id, use_sliding_window=True, max_window_layers=28, sliding_window=2048, dtype=torch.float16 ).to(torch_device) # we need a long text to test sliding window # fmt: off LONG_TEXT = """The Warring States period in Chinese history (c. 475 – 221 BC) comprises the final centuries of the Zhou dynasty (c. 1046 – 256 BC), which were characterized by warfare, bureaucratic and military reform, and political consolidation. It followed the Spring and Autumn period and concluded with the wars of conquest that saw the state of Qin annex each of the other contender states by 221 BC and found the Qin dynasty, the first imperial dynastic state in East Asian history. While scholars have identified several different dates as marking the beginning of the Warring States period, Sima Qian's choice of 475 BC is the most often cited. The era largely corresponds to the second half of the Eastern Zhou period, where the king of Zhou formally ruled as Chinese sovereign, but had lost political power and functioned in practice as a figurehead. This dynamic served as the backdrop for the machinations of the eponymous Warring States. The label "Warring States period" derives from the Record of the Warring States, a work of history compiled during the early Han dynasty (202 BC – 220 AD). Geography The political geography of the era was dominated by the Seven Warring States, namely: Besides these seven major states other smaller states survived into the period. They include: Periodisation The eastward flight of the Zhou court in 771 BC marks the start of the Spring and Autumn period. No one single incident or starting point inaugurated the Warring States era. The political situation of the period represented a culmination of historical trends of conquest and annexation which also characterised the Spring and Autumn period. As a result, there is some controversy as to the beginning of the era. Proposed starting points include: History Background and formation The Eastern Zhou dynasty began its fall around 5th century BC. As their influence waned, they had to rely on armies in allied states rather than their own military force. Hundreds of smaller polities coalesced into seven major states which included: Chu, Han, Qin, Wei, Yan, Qi and Zhao. However, there eventually was a shift in alliances because each state's ruler wanted independence. This caused hundreds of wars between 535 and 286 BC. The victorious state would have overall rule and control in China. The system of feudal states created by the Western Zhou dynasty underwent enormous changes after 771 BC with the flight of the Zhou court to modern-day Luoyang and the diminution of its relevance and power. The Spring and Autumn period led to a few states gaining power at the expense of many others, the latter no longer able to depend on central authority for legitimacy or protection. During the Warring States period, many rulers claimed the Mandate of Heaven to justify their conquest of other states and spread their influence. The struggle for hegemony eventually created a state system dominated by several large states, such as Jin, Chu, Qin, Yan, and Qi, while the smaller states of the Central Plain tended to be their satellites and tributaries. Other major states also existed, such as Wu and Yue in the southeast. The last decades of the Spring and Autumn era were marked by increased stability, as the result of peace negotiations between Jin and Chu which established their respective spheres of influence. This situation ended with the partition of Jin, whereby the state was divided between the houses of Han, Zhao and Wei, leading to the seven major warring states. Partition of Jin (453–403 BC) The rulers of Jin had steadily lost political powers since the middle of the 6th century BC to their nominally subordinate nobles and military commanders, a situation arising from the traditions of the Jin which forbade the enfeoffment of relatives of the ducal house. This allowed other clans to gain fiefs and military authority, and decades of internecine struggle led to the establishment of four major families, the Han, Zhao, Wei and Zhi. The Battle of Jinyang saw the allied Han, Zhao and Wei destroy the Zhi family (453 BC) and their lands were distributed among them. With this, they became the de facto rulers of most of Jin's territory, though this situation would not be officially recognised until half a century later. The Jin division created a political vacuum that enabled during the first 50 years expansion of Chu and Yue northward and Qi southward. Qin increased its control of the local tribes and began its expansion southwest to Sichuan. Early Warring States The three Jins recognized (403–364 BC) In 403 BC, the court of King Weilie of Zhou officially recognized Zhao, Wei and Han as immediate vassals, thereby raising them to the same rank as the other warring states. From before 405 until 383 BC the three Jins were united under the leadership of Wei and expanded in all directions. The most important figure was Marquess Wen of Wei (445–396 BC). In 408–406 BC he conquered the State of Zhongshan to the northeast on the other side of Zhao. At the same time he pushed west across the Yellow River to the Luo River taking the area of Xihe (literally 'west of the river'). The growing power of Wei caused Zhao to back away from the alliance. In 383 BC it moved its capital to Handan and attacked the small state of Wey. Wey appealed to Wei which attacked Zhao on the western side. Being in danger, Zhao called in Chu. As usual, Chu used this as a pretext to annex territory to its north, but the diversion allowed Zhao to occupy a part of Wei. This conflict marked the end of the power of the united Jins and the beginning a period of shifting alliances and wars on several fronts. In 376 BC, the states of Han, Wei and Zhao deposed Duke Jing of Jin and divided the last remaining Jin territory between themselves, which marked the final end of the Jin state. In 370 BC, Marquess Wu of Wei died without naming a successor, which led to a war of succession. After three years of civil war, Zhao from the north and Han from the south invaded Wei. On the verge of conquering Wei, the leaders of Zhao and Han fell into disagreement about what to do with Wei, and both armies abruptly retreated. As a result, King Hui of Wei (still a Marquess at the time) was able to ascend the throne of Wei. Zhao extended from the Shanxi plateau across the plain to the borders of Qi. Wei reached east to Qi, Lu, and Song. To the south, the weaker state of Han held the east–west part of the Yellow River valley, surrounded the Zhou royal domain at Luoyang and held an area north of Luoyang called Shangdang. Qi resurgence under Tian (379–340 BC) Duke Kang of Qi died in 379 BC with no heir from the house of Jiang, which had ruled Qi since the state's founding. The throne instead passed to the future King Wei, from the house of Tian. The Tian had been very influential at court towards the end of Jiang rule, and now openly assumed power. The new ruler set about reclaiming territories that had been lost to other states. He launched a successful campaign against Zhao, Wey and Wei, once again extending Qi territory to the Great Wall. Sima Qian writes that the other states were so awestruck that nobody dared attack Qi for more than 20 years. The demonstrated military prowess also had a calming effect on Qi's own population, which experienced great domestic tranquility during Wei's reign. By the end of King Wei's reign, Qi had become the strongest of the states and proclaimed itself "king"; establishing independence from the Zhou dynasty (see below). Wars of Wei King Hui of Wei (370–319 BC) set about restoring the state. In 362–359 BC he exchanged territories with Han and Zhao in order to make the boundaries of the three states more rational. In 364 BC, Wei was defeated by Qin at the Battle of Shimen and was only saved by the intervention of Zhao. Qin won another victory in 362 BC. In 361 BC the Wei capital was moved east to Daliang to be out of the reach of Qin. In 354 BC, King Hui of Wei started a large-scale attack on Zhao. By 353 BC, Zhao was losing badly and its capital, Handan, was under siege. The state of Qi intervened. The famous Qi strategist, Sun Bin the great-great-great-grandson of Sun Tzu, the author of the Art of War, proposed to attack the Wei capital while the Wei army was tied up besieging Zhao. The strategy was a success; the Wei army hastily moved south to protect its capital, was caught on the road and decisively defeated at the Battle of Guiling. The battle is remembered in the second of the Thirty-Six Stratagems, "besiege Wei, save Zhao"—meaning to attack a vulnerable spot to relieve pressure at another point. Domestically, King Hui patronized philosophy and the arts, and is perhaps best remembered for hosting the Confucian philosopher Mencius at his court; their conversations form the first two chapters of the book which bears Meng Zi's name. Dukes become kings Qi and Wei became kingdoms (344 BC) The title of king (wang, 王) was held by figurehead rulers of the Zhou dynasty, while the rulers of most states held the title of duke (gong, 公) or marquess (hou, 侯). A major exception was Chu, whose rulers were called kings since King Wu of Chu started using the title c. 703 BC. In 344 BC the rulers of Qi and Wei mutually recognized each other as kings: King Wei of Qi and King Hui of Wei, in effect declaring their independence from the Zhou court. This marked a major turning point: unlike those in the Spring and Autumn period, the new generation of rulers ascending the thrones in the Warring States period would not entertain even the pretence of being vassals of the Zhou dynasty, instead proclaiming themselves fully independent kingdoms. Shang Yang reforms Qin (356–338 BC) During the early Warring States period Qin generally avoided conflicts with the other states. This changed during the reign of Duke Xiao, when prime minister Shang Yang made centralizing and authoritarian reforms in accordance with his Legalist philosophy between the years 356 and 338 BC. Shang introduced land reforms, privatized land, rewarded farmers who exceeded harvest quotas, enslaved farmers who failed to meet quotas, and used enslaved subjects as rewards for those who met government policies. As manpower was short in Qin relative to the other states at the time, Shang enacted policies to increase its manpower. As Qin peasants were recruited into the military, he encouraged active immigration of peasants from other states into Qin as a replacement workforce; this policy simultaneously increased the manpower of Qin and weakened the manpower of Qin's rivals. Shang made laws forcing citizens to marry at a young age and passed tax laws to encourage raising multiple children. He also enacted policies to free convicts who worked in opening wastelands for agriculture. Shang abolished primogeniture and created a double tax on households that had more than one son living in the household, to break up large clans into nuclear families. Shang also moved the capital to reduce the influence of nobles on the administration. The rise of Qin was recognized by the royal court, and in 343 BC the king conferred the title of Count (伯 Bó) on Duke Xiao. As was customary, a conference was hosted which the feudal lords attended, and during which the Son of Heaven bestowed the title. After the reforms Qin became much more aggressive. In 340 Qin took land from Wèi after it had been defeated by Qi. In 316 Qin conquered Shu and Ba in Sichuan to the southwest. Development of this area took a long time but slowly added greatly to Qin's wealth and power. Qin defeats Wei (341–340 BC) In 341 BC, Wei attacked Han. Qi allowed Han to be nearly defeated and then intervened. The generals from the Battle of Guiling met again (Sun Bin and Tian Ji versus Pang Juan), using the same tactic, attacking Wei's capital. Sun Bin feigned a retreat and then turned on the overconfident Wei troops and decisively defeated them at the Battle of Maling. After the battle all three of the Jin successor states appeared before King Xuan of Qi, pledging their loyalty. In the following year Qin attacked the weakened Wei. Wei was devastatingly defeated and ceded a large part of its territory in return for truce. With Wei severely weakened, Qi and Qin became the dominant states in China. Wei came to rely on Qi for protection, with King Hui of Wei meeting King Xuan of Qi on two occasions. After Hui's death, his successor King Xiang also established a good relationship with his Qi counterpart, with both promising to recognize the other as "king". Chu conquers Yue (334 BC) Early in the Warring States period, Chu was one of the strongest states in China. The state rose to a new level of power around 389 BC when King Dao of Chu (楚悼王) named the famous reformer Wu Qi as his chancellor. Chu rose to its peak in 334 BC, when it conquered Yue to its east on the Pacific coast. The series of events leading up to this began when Yue prepared to attack Qi to its north. The King of Qi sent an emissary who persuaded the King of Yue to attack Chu instead. Yue initiated a large-scale attack at Chu but was defeated by Chu's counter-attack. Chu then proceeded to conquer Yue. Qin, Han and Yan became kingdoms (325–323 BC) King Xian of Zhou had attempted to use what little royal prerogative he had left by appointing the dukes Xian (384–362 BC), Xiao (361–338 BC) and Hui (338–311 BC) of Qin as hegemons, thereby in theory making Qin the chief ally of the court. However, in 325 the confidence of Duke Hui grew so great that he proclaimed himself "king" of Qin; adopting the same title as the king of Zhou and thereby effectively proclaiming independence from the Zhou dynasty. King Hui of Qin was guided by his prime minister Zhang Yi, a prominent representative of the School of Diplomacy. He was followed in 323 BC by King Xuanhui of Han and King Yi of Yan, as well as King Cuo of the minor state Zhongshan. In 318 BC even the ruler of Song, a relatively minor state, declared himself king. Uniquely, while King Wuling of Zhao had joined the other kings in declaring himself king, he retracted this order in 318 BC, after Zhao suffered a great defeat at the hands of Qin. Partition of Zhou (314 BC) King Kao of Zhou had enfeoffed his younger brother as Duke Huan of Henan. Three generations later, this cadet branch of the royal house began calling themselves "dukes of East Zhou". Upon the ascension of King Nan in 314, East Zhou became an independent state. The king came to reside in what became known as West Zhou. Horizontal and vertical alliances (334–249 BC) Towards the end of the Warring States period, the state of Qin became disproportionately powerful compared with the other six states. As a result, the policies of the six states became overwhelmingly oriented towards dealing with the Qin threat, with two opposing schools of thought. One school advocated a 'vertical' or north–south alliance called hezong (合縱) in which the states would ally with each other to repel Qin. The other advocated a 'horizontal' or east–west alliance called lianheng (連橫{), in which a state would ally with Qin to participate in its ascendancy. There were some initial successes in hezong, though mutual suspicions between allied states led to the breakdown of such alliances. Qin repeatedly exploited the horizontal alliance strategy to defeat the states one by one. During this period, many philosophers and tacticians travelled around the states, recommending that the rulers put their respective ideas into use. These "lobbyists", such as Su Qin, who advocated vertical alliances, and Zhang Yi, who advocated horizontal alliances, were famous for their tact and intellect, and were collectively known as the School of Diplomacy, whose Chinese name (縱橫家 'the school of the vertical and horizontal') was derived from the two opposing ideas. Su Qin and the first vertical alliance (334–300 BC) Beginning in 334 BC the diplomat Su Qin spent years visiting the courts of Yan, Zhao, Han, Wei, Qi and Chu and persuaded them to form a united front against Qin. In 318 BC all states except Qi launched a joint attack on Qin, which was not successful. King Hui of Qin died in 311 BC, followed by prime minister Zhang Yi one year later. The new monarch, King Wu, reigned only four years before dying without legitimate heirs. Some damaging turbulence ensued throughout 307 BC before a son of King Hui by a concubine (i.e. a younger half-brother of King Wu) could be established as King Zhao, who in stark contrast to his predecessor went on to rule for an unprecedented 53 years. After the failure of the first vertical alliance, Su Qin eventually came to live in Qi, where he was favored by King Xuan and drew the envy of the ministers. An assassination attempt in 300 BC left Su mortally wounded but not dead. Sensing death approaching, he advised the newly crowned King Min have him publicly executed to draw out the assassins. King Min complied with Su's request and killed him, putting an end to the first generation of Vertical alliance thinkers. The first horizontal alliance (300–287 BC) King Min of Qi came to be highly influenced by Lord Mengchang, a grandson of the former King Wei of Qi. Lord Mengchang made a westward alliance with the states of Wei and Han. In the far west, Qin, which had been weakened by a succession struggle in 307, yielded to the new coalition and appointed Lord Mengchang its chief minister. The alliance between Qin and Qi was sealed by a Qin princess marrying King Min. This horizontal or east–west alliance might have secured peace except that it excluded the State of Zhao. Around 299 BC, the ruler of Zhao became the last of the seven major states to proclaim himself "king". In 298 BC, Zhao offered Qin an alliance and Lord Mengchang was driven out of Qin. The remaining three allies, Qi, Wei and Han, attacked Qin, driving up the Yellow River below Shanxi to the Hangu Pass. After 3 years of fighting they took the pass and forced Qin to return territory to Han and Wei. They next inflicted major defeats on Yan and Chu. During the 5-year administration of Lord Mengchang, Qi was the major power in China. In 294, Lord Mengchang was implicated in a coup d'état and fled to Wei. His alliance system collapsed. Qi and Qin made a truce and pursued their own interests. Qi moved south against the state of Song whilst the Qin General Bai Qi pushed back eastward against a Han/Wei alliance, gaining victory at the Battle of Yique. In 288, King Zhao of Qin and King Min of Qi took the title di (帝 'emperor'), of the west and east respectively. They swore a covenant and started planning an attack on Zhao. Su Dai and the second vertical alliance In 287 BC the strategist Su Dai, younger brother of Su Qin and possibly an agent of Yan, persuaded King Min that the Zhao war would only benefit Qin. King Min agreed and formed a 'vertical' alliance with the other states against Qin. Qin backed off, abandoned the presumptuous title of "Di", and restored territory to Wei and Zhao. In 286 Qi annexed the state of Song. The second horizontal alliance and fall of Qi In 285 BC, the success of Qi had frightened the other states. Under the leadership of Lord Mengchang, who was exiled in Wei, Qin, Zhao, Wei and Yan formed an alliance. Yan had normally been a relatively weak ally of Qi and Qi feared little from this quarter. Yan's onslaught under general Yue Yi came as a devastating surprise. Simultaneously, the other allies attacked from the west. Chu declared itself an ally of Qi but contented itself with annexing some territory to its north. Qi's armies were destroyed while the territory of Qi was reduced to the two cities of Ju and Jimo. King Min himself was later captured and executed by his own followers. King Min was succeeded by King Xiang in 283 BC. His general Tian Dan was eventually able to restore much of Qi's territory, but it never regained the influence it had under King Min. Qin and Zhao expansion In 278 BC, the Qin general Bai Qi attacked from Qin's new territory in Sichuan to the west of Chu. The capital of Ying was captured and Chu's western lands on the Han River were lost. The effect was to shift Chu significantly to the east. After Chu was defeated in 278, the remaining great powers were Qin in the west and Zhao in the north-center. There was little room for diplomatic maneuver and matters were decided by wars. Zhao had been much strengthened by King Wuling of Zhao (325–299). In 307 he enlarged his cavalry by copying the northern nomads. In 306 he took more land in the northern Shanxi plateau. In 305 he defeated the north-eastern border state of Zhongshan. In 304 he pushed far to the north-west and occupied the east–west section of the Yellow River in the north of the Ordos Loop. King Huiwen of Zhao (298–266) chose able servants and expanded against the weakened Qi and Wei. In 296 his general Lian Po defeated two Qin armies. In 269 BC Fan Sui became chief advisor to Qin. He advocated authoritarian reforms, irrevocable expansion and an alliance with distant states to attack nearby states (the twenty-third of the Thirty-Six Stratagems). His maxim "attack not only the territory, but also the people" enunciated a policy of mass slaughter that became increasingly frequent. Qin-Zhao wars (282–257 BC) In 265 King Zhaoxiang of Qin made the first move by attacking the weak state of Han which held the Yellow River gateway into Qin. He moved north-east across Wei territory to cut off the Han exclave of Shangdang north of Luoyang and south of Zhao. The Han king agreed to surrender Shangdang, but the local governor refused and presented it to King Xiaocheng of Zhao. Zhao sent out Lian Po who based his armies at Changping and Qin sent out general Wang He. Lian Po was too wise to risk a decisive battle with the Qin army and remained inside his fortifications. Qin could not break through and the armies were locked in stalemate for three years. The Zhao king decided that Lian Po was not aggressive enough and sent out Zhao Kuo who promised a decisive battle. At the same time Qin secretly replaced Wang He with the notoriously violent Bai Qi. When Zhao Kuo left his fortifications, Bai Qi used a Cannae maneuver, falling back in the center and surrounding the Zhao army from the sides. After being surrounded for 46 days, the starving Zhao troops surrendered in September 260 BC. It is said that Bai Qi had all the prisoners killed and that Zhao lost 400,000 men. Qin was too exhausted to follow up its victory. Some time later it sent an army to besiege the Zhao capital but the army was destroyed when it was attacked from the rear. Zhao survived, but there was no longer a state that could resist Qin on its own. The other states could have survived if they remained united against Qin, but they did not. In 257 BC, Qin army failed to besiege Handan and was defeated by the allied force of Zhao, Wei and Chu during the Battle of Handan. End of Zhou dynasty (256–249 BC) The forces of King Zhao of Qin defeated King Nan of Zhou and conquered West Zhou in 256 BC, claiming the Nine Cauldrons and thereby symbolically becoming The Son of Heaven. King Zhao's exceptionally long reign ended in 251 BC. His son King Xiaowen, already an old man, died just three days after his coronation and was succeeded by his son King Zhuangxiang of Qin. The new Qin king proceeded to conquer East Zhou, seven years after the fall of West Zhou. Thus the 800-year Zhou dynasty, nominally China's longest-ruling regime, finally came to an end. Sima Qian contradicts himself regarding the ultimate fate of the East Zhou court. Chapter 4 (The Annals of Zhou) concludes with the sentence "thus the sacrifices of Zhou ended", but in the following chapter 5 (The Annals of Qin) we learn that "Qin did not prohibit their sacrifices; the Lord of Zhou was allotted a patch of land in Yangren where he could continue his ancestral sacrifices". Qin unites China (247–221 BC) King Zhuangxiang of Qin ruled for only three years. He was succeeded by his son Zheng, who unlike the two elderly kings that preceded him was only 13 years old at his coronation. As an adult, Zheng became a brilliant commander who, in the span of just nine years, unified China. Conquest of Han In 230 BC, Qin conquered Han. Han, the weakest of the Seven Warring States, was adjacent to the much stronger Qin, and had suffered continuous assaults by Qin in earlier years of the Warring States period. This went on until Emperor Qin Shi Huang sent general Wang Jian to attack Zhao. King An of Han, frightened by the thought that Han would be the next target of the Qin state, immediately sent diplomats to surrender the entire kingdom without a fight, saving the Han populace from the terrible potential consequences of an unsuccessful resistance. Conquest of Wei In 225 BC, Qin conquered Wei. The Qin army led a direct invasion into Wei by besieging its capital Daliang but soon realized that the city walls were too tough to break into. They devised a new strategy in which they utilized the power of a local river that was linked to the Yellow River. The river was used to flood the city's walls, causing massive devastation to the city. Upon realizing the situation, King Jia of Wei hurriedly came out of the capital and surrendered it to the Qin army in order to avoid further bloodshed of his people. Conquest of Chu In 223 BC, Qin conquered Chu. The first invasion was however an utter disaster when 200,000 Qin troops, led by the general, Li Xin, were defeated by 500,000 Chu troops in the unfamiliar territory of Huaiyang, modern-day northern Jiangsu and Anhui provinces. Xiang Yan, the Chu commander, had lured Qin by allowing a few initial victories, but then counterattacked and burnt two large Qin camps. In 222 BC, Wang Jian was recalled to lead a second military invasion with 600,000 men against the Chu state. High in morale after their victory in the previous year, the Chu forces were content to sit back and defend against what they expected to be a siege of Chu. However, Wang Jian decided to weaken Chu's resolve and tricked the Chu army by appearing to be idle in his fortifications whilst secretly training his troops to fight in Chu territory. After a year, the Chu defenders decided to disband due to apparent lack of action from the Qin. Wang Jian invaded at that point, with full force, and overran Huaiyang and the remaining Chu forces. Chu lost the initiative and could only sustain local guerrilla-style resistance until it too was fully conquered with the destruction of Shouchun and the death of its last leader, Lord Changping, in 223 BC. At their peak, the combined armies of Chu and Qin are estimated to have ranged from hundreds of thousands to a million soldiers, more than those involved in the campaign of Changping between Qin and Zhao 35 years earlier. Conquest of Zhao and Yan In 222 BC, Qin conquered Zhao and Yan. After the conquest of Zhao, the Qin army turned its attention towards Yan. Realizing the danger and gravity of this situation, Crown Prince Dan of Yan had sent Jing Ke to assassinate King Zheng of Qin, but this failure only helped to fuel the rage and determination of the Qin king, and he increased the number of troops to conquer the Yan state. Conquest of Qi In 221 BC, Qin conquered Qi, the final unconquered state. It had not previously contributed or helped other states when Qin was conquering them. As soon as Qin's intention to invade it became clear, Qi swiftly surrendered all its cities, completing the unification of China and ushering in the Qin dynasty. The last Qi king lived out his days in exile in Gong and was not given a posthumous name, therefore he is known to posterity by his personal name Jian. Aftermath The Qin king Ying Zheng declared himself as Qin Shi Huangdi, "The first Sovereign Emperor of Qin". In the rule of the Qin state, the union was based solely on military power. The feudal holdings were abolished, and noble families were forced to live in the capital city Xianyang, in order to be supervised. A national road (as well as greater use of canals) allowed for faster and easier deployment and supply of the army. The peasants were given a wider range of land rights, although they were subject to taxation, creating a large amount of revenue to the state. Military theory and practice Increasing scale of warfare The chariot remained a major factor in Chinese warfare long after it went out of fashion in the Middle East. Near the beginning of the Warring States period there is a shift from chariots to massed infantry, possibly associated with the invention of the crossbow. This had two major effects. First it led the dukes to weaken their chariot-riding nobility so they could get direct access to the peasantry who could be drafted as infantry. This change was associated with the shift from aristocratic to bureaucratic government. Second, it led to a massive increase in the scale of warfare. When the Zhou overthrew the Shang at the Battle of Muye they used 45,000 troops and 300 chariots. For the Warring States period the following figures for the military strengths of various states are reported: For major battles, the following figures are reported: Many scholars think these numbers are exaggerated (records are inadequate, they are much larger than those from similar societies, soldiers were paid by the number of enemies they killed and the Han dynasty had an interest in exaggerating the bloodiness of the age before China was unified). Regardless of exaggeration, it seems clear that warfare had become excessive during this period. The bloodshed and misery of the Warring States period goes a long way in explaining China's traditional and current preference for a united throne. Military developments The Warring States period saw the introduction of many innovations to the art of warfare in China, such as the use of iron and of cavalry. Warfare in the Warring States period evolved considerably from the Spring and Autumn period, as most armies made use of infantry and cavalry in battles, and the use of chariots became less widespread. The use of massed infantry made warfare bloodier and reduced the importance of the aristocracy, which in turn made the kings more despotic. From this period onward, as the various states competed with each other by mobilizing their armies to war, nobles in China belonged to the literate class, rather than to the warrior class as had previously been the case. The various states fielded massive armies of infantry, cavalry, and chariots. Complex logistical systems maintained by efficient government bureaucracies were needed to supply, train, and control such large forces. The size of the armies ranged from tens of thousands to several hundred thousand men. Iron weapons became more widespread and began to replace bronze. Most armour and weapons of this period were made from iron. The first official native Chinese cavalry unit was formed in 307 BC during the military reforms of King Wuling of Zhao, who advocated 'nomadic dress and horse archery'. But the war chariot still retained its prestige and importance, despite the tactical superiority of cavalry. The crossbow was the preferred long-range weapon of this period, due to several reasons. The crossbow could be mass-produced easily, and mass training of crossbowmen was possible. These qualities made it a powerful weapon against the enemy. Infantrymen deployed a variety of weapons, but the most popular was the dagger-axe. The dagger-axe came in various lengths, from 9 to 18 feet; the weapon consisted of a thrusting spear with a slashing blade appended to it. Dagger-axes were an extremely popular weapon in various kingdoms, especially for the Qin, who produced 18-foot-long pike-like weapons. The Qiang battle spear was named as the king 'wang' of all ancient weapons. It had the biggest impact on the battlefield and was quite difficult to master. The second important weapon of that era was the double-edged battle sword Jian. The fighting methods of using the Qiang spear and Jian sword were very different from what we see in movies or re-enactment shows today. Professional warriors of that era used the military concepts of "Master" Sun Tzu and created several successful "Ge Dou" martial schools. Military thought The Warring States was a great period for military strategy; of the Seven Military Classics of China, four were written during this period: Culture and society The Warring States period was an era of warfare in ancient China, as well as bureaucratic and military reforms and consolidation; the major states, ruling over large territories, quickly sought to consolidate their powers, leading to the final erosion of the Zhou court's prestige. As a sign of this shift, the rulers of all the major states (except for Chu, which had claimed kingly title much earlier) abandoned their former feudal titles for the title of 王, or King, claiming equality with the rulers of the Zhou. At the same time, the constant conflict and need for innovative social and political models led to the development of many philosophical doctrines, later known as the Hundred Schools of Thought. The most notable schools of thought include Mohism (expounded by Mozi), Confucianism (represented by Mencius and Xunzi), Legalism (represented by Shang Yang, Shen Buhai, Shen Dao and Han Fei) and Taoism (represented by Zhuangzi and Lao Tzu). The many states that were competing between each other attempted to display their power not only militarily but in their courts and in state philosophy. Many differing rulers adopted the differing philosophies to their own advantage or that of their kingdom. Mencius attempted to instate Confucianism as a state philosophy, proposing that through the governing of moral principles like benevolence and righteousness, the state would win popular support from one state and those neighboring, eliminating the need of a war altogether. Mencius had attempted to convince King Hui of Liang, although was unsuccessful since the king saw no advantage in the period of wars. Mohism was developed by Mozi (468–376 BC) and it provided a unified moral and political philosophy based on impartiality and benevolence. Mohists had the belief that people change depending on environments around. The same was applied to rulers, which is why one must be cautious of foreign influences. Mozi was very much against warfare, although he was a great tactician in defense. He defended the small state of Song from many attempts of the Chu state. Taoism was advocated by Laozi, and believed that human nature was good and can achieve perfection by returning to its original state. It believed that like a baby, humans are simple and innocent although with development of civilizations it lost its innocence only to be replaced by fraud and greed. Contrarily to other schools, it did not want to gain influence in the offices of states and Laozi even refused to be the minister of the state of Chu. Legalism created by Shang Yang in 338 BC, rejected all notions of religion and practices, and believed a nation should be governed by strict law. Not only were severe punishments applied, but they would be grouped with the families and made mutually responsible for criminal act. It proposed radical reforms, and established a society based on solid ranks. Peasants were encouraged to practice agriculture as occupation, and military performance was rewarded. Laws were also applied to all ranks with no exception; even the king was not above punishment. The philosophy was adapted by the Qin state and it created it into an organized, centralized state with a bureaucracy chosen on the basis of merit. This period is most famous for the establishment of complex bureaucracies and centralized governments, as well as a clear legal system. The developments in political and military organization were the basis of the power of the Qin state, which conquered the other states and unified them under the Qin dynasty in 221 BC. Nobles, bureaucrats and reformers The phenomenon of intensive warfare, based on mass formations of infantry rather than the traditional chariots, was one major trend which led to the creation of strong central bureaucracies in each of the major states. At the same time, the process of secondary feudalism which permeated the Spring and Autumn period, and led to such events as the partition of Jin and the usurpation of Qi by the Tian clan, was eventually reversed by the same process of bureaucratisation. Under the demands of warfare, the states adopted bureaucratic reforms in the Warring States period. Wei adopted these in 445 BC, Zhao in 403 BC, Chu in 390 BC, Han in 355 BC, Qi in 357 BC and Qin in 350 BC. Power was centralised by curbing the landed aristocrats and sinecures and creating a new hierarchy based on meritorious service to the state, which were drawn from the lower rungs of society. Systematic auditing and reporting systems, and fixed salaries for officials were created. The reforms of Shang Yang in Qin, and of Wu Qi in Chu, both centred on increased centralisation, the suppression of the nobility, and a vastly increased scope of government based on Legalist ideals, which were necessary to mobilise the large armies of the period. Sophisticated arithmetic A bundle of 21 bamboo slips from the Tsinghua collection dated to 305 BC are the world's earliest example of a two digit decimal multiplication table, indicating that sophisticated commercial arithmetic was already established during this period. Rod numerals were used to represent both negative and positive integers, and rational numbers, a true positional number system, with a blank for zero dating back to the Warring States period. The nine linked-rings puzzle, an advanced puzzle device which requires mathematical analysis to solve, was invented during the period. Literature An important literary achievement of the Warring States period is the Zuo Commentary on the Spring and Autumn Annals, which summarizes the preceding Spring and Autumn period. The less famous work Guoyu is thought to be by the same author. Many sayings of Spring and Autumn philosophers, which had previously been circulated orally, were put into writing in the Warring States. These include the Analects and The Art of War. Economic developments The Warring States period saw the proliferation of iron working in China, replacing bronze as the dominant type of metal used in warfare. Areas such as Shu (present-day Sichuan) and Yue (present-day Zhejiang) were also brought into the Chinese cultural sphere during this time. Trade also became important, and some merchants had considerable power in politics, the most prominent of which was Lü Buwei, who rose to become Chancellor of Qin and was a key supporter of the eventual Qin Shihuang. At the same time, the increased resources of consolidated, bureaucratic states, coupled with the logistical needs of mass levies and large-scale warfare, led to the proliferation of economic projects such as large-scale waterworks. Major examples of such waterworks include the Dujiangyan Irrigation System, which controlled the Min River in Sichuan and turned the former backwater region into a major Qin logistical base, and the Zhengguo Canal which irrigated large areas of land in the Guanzhong Plain, again increasing Qin's agricultural output. The Guanzi is considered one of the most foundational texts of the developing political economy in the Warring States period. It addresses principles of price regulation in the context of effectively dealing with commodities that are "light" (connoting a commodity which is unimportant, non-essential, or inexpensive) or "heavy" (a commodity which is important, essential, or expensive) and how whether a commodity is "light" or "heavy" is understood in relation to other commodities. In summary:""" # fmt: on input_ids = tokenizer(LONG_TEXT, return_tensors="pt").input_ids.to(torch_device) generated_ids = model.generate(input_ids, max_new_tokens=50)[:, input_ids.shape[1] :] torch.testing.assert_close(generated_ids.cpu(), torch.tensor([[ 279, 467, 19859, 4180, 4168, 572, 264, 4168, 315, 2244, 2297, 304, 5616, 11, 504, 279, 8606, 94350, 1849, 311, 279, 79395, 1584, 11, 504, 279, 8606, 6277, 311, 279, 6277, 11, 504, 279, 8606, 8584, 311, 279, 6955, 11, 323, 504, 279, 8606, 7674, 311, 279, 12752, 13, 576]], dtype=torch.long)) # fmt: skip self.assertEqual( tokenizer.decode(generated_ids[0]), """ the Warring States period was a period of great change in China, from the traditional feudal system to the bureaucratic state, from the traditional military to the military, from the traditional economy to the economic, and from the traditional culture to the cultural. The""", ) model.config._attn_implementation = "eager" new_generated_ids = model.generate(input_ids, max_new_tokens=50)[:, input_ids.shape[1] :] with self.subTest("Eager matches sdpa"): torch.testing.assert_close(generated_ids, new_generated_ids, rtol=1e-4, atol=1e-4) # `flex_attention` gives `torch._inductor.exc.InductorError: RuntimeError: No valid triton configs. OutOfMemoryError: out of resource: triton_tem_fused_0 Required: 147456 Hardware limit:101376 Reducing block sizes or `num_stages` may help.` # Impossible to test it with this model (even with < 100 tokens), probably due to the compilation of a large model. # model.config._attn_implementation = "flex_attention" # new_generated_ids = model.generate(input_ids, max_new_tokens=50)[:, input_ids.shape[1] :] # with self.subTest("Eager matches Flex attention"): # torch.testing.assert_close(generated_ids, new_generated_ids, rtol=1e-4, atol=1e-4) model.config._attn_implementation = "flash_attention_2" new_generated_ids = model.generate(input_ids, max_new_tokens=50)[:, input_ids.shape[1] :] with self.subTest("Eager matches flash attention"): torch.testing.assert_close(generated_ids, new_generated_ids, rtol=1e-4, atol=1e-4)
Qwen3IntegrationTest
python
pandas-dev__pandas
asv_bench/benchmarks/categoricals.py
{ "start": 8506, "end": 9348 }
class ____: def setup(self): N = 10**5 self.index = pd.CategoricalIndex(range(N), range(N)) self.series = pd.Series(range(N), index=self.index).sort_index() self.category = self.index[500] def time_get_loc(self): self.index.get_loc(self.category) def time_shallow_copy(self): self.index._view() def time_align(self): pd.DataFrame({"a": self.series, "b": self.series[:500]}) def time_intersection(self): self.index[:750].intersection(self.index[250:]) def time_unique(self): self.index.unique() def time_reindex(self): self.index.reindex(self.index[:500]) def time_reindex_missing(self): self.index.reindex(["a", "b", "c", "d"]) def time_sort_values(self): self.index.sort_values(ascending=False)
Indexing
python
airbytehq__airbyte
airbyte-integrations/connectors/destination-duckdb/destination_duckdb/destination.py
{ "start": 895, "end": 8786 }
class ____(Destination): @staticmethod def _get_destination_path(destination_path: str) -> str: """ Get a normalized version of the destination path. Automatically append /local/ to the start of the path """ if destination_path.startswith("md:") or destination_path.startswith("motherduck:"): return destination_path if not destination_path.startswith("/local"): destination_path = os.path.join("/local", destination_path) destination_path = os.path.normpath(destination_path) if not destination_path.startswith("/local"): raise ValueError( f"destination_path={destination_path} is not a valid path." "A valid path shall start with /local or no / prefix" ) return destination_path def write( self, config: Mapping[str, Any], configured_catalog: ConfiguredAirbyteCatalog, input_messages: Iterable[AirbyteMessage], ) -> Iterable[AirbyteMessage]: """ Reads the input stream of messages, config, and catalog to write data to the destination. This method returns an iterable (typically a generator of AirbyteMessages via yield) containing state messages received in the input message stream. Outputting a state message means that every AirbyteRecordMessage which came before it has been successfully persisted to the destination. This is used to ensure fault tolerance in the case that a sync fails before fully completing, then the source is given the last state message output from this method as the starting point of the next sync. :param config: dict of JSON configuration matching the configuration declared in spec.json :param input_messages: The stream of input messages received from the source :param configured_catalog: The Configured Catalog describing the schema of the data being received and how it should be persisted in the destination :return: Iterable of AirbyteStateMessages wrapped in AirbyteMessage structs """ streams = {s.stream.name for s in configured_catalog.streams} logger.info(f"Starting write to DuckDB with {len(streams)} streams") path = str(config.get("destination_path")) path = self._get_destination_path(path) schema_name = validated_sql_name(config.get("schema", CONFIG_DEFAULT_SCHEMA)) # Get and register auth token if applicable motherduck_api_key = str(config.get(CONFIG_MOTHERDUCK_API_KEY, "")) duckdb_config = {} if motherduck_api_key: duckdb_config["motherduck_token"] = motherduck_api_key duckdb_config["custom_user_agent"] = "airbyte" con = duckdb.connect(database=path, read_only=False, config=duckdb_config) con.execute(f"CREATE SCHEMA IF NOT EXISTS {schema_name}") for configured_stream in configured_catalog.streams: name = configured_stream.stream.name table_name = f"_airbyte_raw_{name}" if configured_stream.destination_sync_mode == DestinationSyncMode.overwrite: # delete the tables logger.info(f"Dropping tables for overwrite: {table_name}") query = f"DROP TABLE IF EXISTS {schema_name}.{table_name}" con.execute(query) # create the table if needed query = f""" CREATE TABLE IF NOT EXISTS {schema_name}.{table_name} ( _airbyte_ab_id TEXT PRIMARY KEY, _airbyte_emitted_at DATETIME, _airbyte_data JSON ) """ con.execute(query) buffer = defaultdict(lambda: defaultdict(list)) for message in input_messages: if message.type == Type.STATE: # flush the buffer for stream_name in buffer.keys(): logger.info(f"flushing buffer for state: {message}") DestinationDuckdb._safe_write(con=con, buffer=buffer, schema_name=schema_name, stream_name=stream_name) buffer = defaultdict(lambda: defaultdict(list)) yield message elif message.type == Type.RECORD: data = message.record.data stream_name = message.record.stream if stream_name not in streams: logger.debug(f"Stream {stream_name} was not present in configured streams, skipping") continue # add to buffer buffer[stream_name]["_airbyte_ab_id"].append(str(uuid.uuid4())) buffer[stream_name]["_airbyte_emitted_at"].append(datetime.datetime.now().isoformat()) buffer[stream_name]["_airbyte_data"].append(json.dumps(data)) else: logger.info(f"Message type {message.type} not supported, skipping") # flush any remaining messages for stream_name in buffer.keys(): DestinationDuckdb._safe_write(con=con, buffer=buffer, schema_name=schema_name, stream_name=stream_name) @staticmethod def _safe_write(*, con: duckdb.DuckDBPyConnection, buffer: Dict[str, Dict[str, List[Any]]], schema_name: str, stream_name: str): table_name = f"_airbyte_raw_{stream_name}" try: pa_table = pa.Table.from_pydict(buffer[stream_name]) except: logger.exception( f"Writing with pyarrow view failed, falling back to writing with executemany. Expect some performance degradation." ) query = f""" INSERT INTO {schema_name}.{table_name} (_airbyte_ab_id, _airbyte_emitted_at, _airbyte_data) VALUES (?,?,?) """ entries_to_write = buffer[stream_name] con.executemany( query, zip(entries_to_write["_airbyte_ab_id"], entries_to_write["_airbyte_emitted_at"], entries_to_write["_airbyte_data"]) ) else: # DuckDB will automatically find and SELECT from the `pa_table` # local variable defined above. con.sql(f"INSERT INTO {schema_name}.{table_name} SELECT * FROM pa_table") def check(self, logger: logging.Logger, config: Mapping[str, Any]) -> AirbyteConnectionStatus: """ Tests if the input configuration can be used to successfully connect to the destination with the needed permissions e.g: if a provided API token or password can be used to connect and write to the destination. :param logger: Logging object to display debug/info/error to the logs (logs will not be accessible via airbyte UI if they are not passed to this logger) :param config: Json object containing the configuration of this destination, content of this json is as specified in the properties of the spec.json file :return: AirbyteConnectionStatus indicating a Success or Failure """ try: path = config.get("destination_path") path = self._get_destination_path(path) if path.startswith("/local"): logger.info(f"Using DuckDB file at {path}") os.makedirs(os.path.dirname(path), exist_ok=True) duckdb_config = {} if CONFIG_MOTHERDUCK_API_KEY in config: duckdb_config["motherduck_token"] = str(config[CONFIG_MOTHERDUCK_API_KEY]) duckdb_config["custom_user_agent"] = "airbyte" con = duckdb.connect(database=path, read_only=False, config=duckdb_config) con.execute("SELECT 1;") return AirbyteConnectionStatus(status=Status.SUCCEEDED) except Exception as e: return AirbyteConnectionStatus(status=Status.FAILED, message=f"An exception occurred: {repr(e)}")
DestinationDuckdb
python
getsentry__sentry
src/sentry/dashboards/endpoints/organization_dashboard_details.py
{ "start": 7887, "end": 9050 }
class ____(OrganizationDashboardBase): publish_status = { "POST": ApiPublishStatus.PRIVATE, } def post( self, request: Request, organization: Organization, dashboard: Dashboard | dict[Any, Any] ) -> Response: """ Update last_visited and increment visits counter """ if not features.has(EDIT_FEATURE, organization, actor=request.user): return Response(status=404) if isinstance(dashboard, dict): return Response(status=204) dashboard.visits = F("visits") + 1 dashboard.last_visited = timezone.now() dashboard.save(update_fields=["visits", "last_visited"]) org_member = OrganizationMember.objects.filter( user_id=request.user.pk, organization_id=organization.id ).first() if not org_member: return Response(status=403) DashboardLastVisited.objects.create_or_update( dashboard=dashboard, member=org_member, values={"last_visited": timezone.now()}, ) return Response(status=204) @region_silo_endpoint
OrganizationDashboardVisitEndpoint
python
celery__celery
t/unit/app/test_trace.py
{ "start": 106, "end": 5083 }
class ____: """Unit tests for traceback_clear function.""" def test_uses_exc_argument(self): """Test that traceback_clear(exc) correctly uses the exc argument. This test proves that the reported issue about traceback_clear not using the exc argument is NOT valid. The function does use the exc argument correctly. """ # Create exception with traceback def create_exception_with_traceback(): """Create an exception with a traceback for testing.""" try: # Create a nested call stack to have frames to clear def inner_function(): x = "some_local_variable" * 1000 # Create local variable # noqa: F841 y = list(range(1000)) # Another local variable # noqa: F841 raise ValueError("Test exception with traceback") def outer_function(): z = "outer_local_variable" * 1000 # Local variable in outer frame # noqa: F841 inner_function() outer_function() except Exception as e: return e # Test 1: traceback_clear(exc) with provided exception exc = create_exception_with_traceback() # Verify exception has traceback exc_tb = getattr(exc, '__traceback__', None) assert exc_tb is not None, "Exception should have traceback" # Count initial frames initial_frames = [] tb = exc_tb while tb is not None: initial_frames.append(tb.tb_frame) tb = tb.tb_next assert len(initial_frames) > 0, "Should have traceback frames" # Verify frames have local variables before clearing frame_locals_before = [] for frame in initial_frames: frame_locals_before.append(len(frame.f_locals)) assert any(count > 0 for count in frame_locals_before), "Frames should have local variables" # Call traceback_clear with the exception - this should use exc argument traceback_clear(exc) # Verify frames are cleared exc_tb_after = getattr(exc, '__traceback__', None) assert exc_tb_after is not None, "Traceback should still exist after clearing" tb = exc_tb_after frames_after = [] while tb is not None: frames_after.append(tb.tb_frame) tb = tb.tb_next # Check that frame locals are cleared cleared_count = 0 for frame in frames_after: if len(frame.f_locals) == 0: cleared_count += 1 assert cleared_count == len(frames_after), "All frames should be cleared" # Verify the function actually used the exc argument by checking traceback still exists assert getattr(exc, '__traceback__', None) is not None, ( "Traceback should still exist but frames should be cleared" ) def test_without_exc_argument(self): """Test traceback_clear() without exc argument uses sys.exc_info().""" try: def test_function(): local_var = "test" * 1000 # noqa: F841 raise RuntimeError("Test exception") test_function() except Exception: # Now we're in except block with active traceback _, _, tb_before = sys.exc_info() assert tb_before is not None, "Should have active traceback" # Call traceback_clear without argument - should use sys.exc_info() traceback_clear() # Test passes if no exception is raised def test_with_none(self): """Test traceback_clear(None) uses sys.exc_info() fallback.""" try: def test_function(): local_var = "test" * 1000 # noqa: F841 raise RuntimeError("Test exception") test_function() except Exception: # Call with None - should fall back to sys.exc_info() traceback_clear(None) # Test passes if no exception is raised def test_with_exception_no_traceback(self): """Test traceback_clear with exception that has no __traceback__.""" # Create exception without traceback exc = ValueError("Test exception") # Should not raise exception traceback_clear(exc) def test_handles_runtime_error(self): """Test that traceback_clear handles RuntimeError when frame is executing.""" # This test is mainly for coverage - RuntimeError handling is internal # and difficult to trigger in normal circumstances try: def test_function(): local_var = "test" * 1000 # noqa: F841 raise RuntimeError("Test exception") test_function() except Exception as exc: # Should not raise exception even if RuntimeError occurs internally traceback_clear(exc)
test_traceback_clear
python
apache__airflow
providers/google/src/airflow/providers/google/cloud/operators/vertex_ai/endpoint_service.py
{ "start": 8078, "end": 12946 }
class ____(GoogleCloudBaseOperator): """ Deploys a Model into this Endpoint, creating a DeployedModel within it. :param project_id: Required. The ID of the Google Cloud project that the service belongs to. :param region: Required. The ID of the Google Cloud region that the service belongs to. :param endpoint_id: Required. The name of the Endpoint resource into which to deploy a Model. Format: ``projects/{project}/locations/{location}/endpoints/{endpoint}`` :param deployed_model: Required. The DeployedModel to be created within the Endpoint. Note that [Endpoint.traffic_split][google.cloud.aiplatform.v1.Endpoint.traffic_split] must be updated for the DeployedModel to start receiving traffic, either as part of this call, or via [EndpointService.UpdateEndpoint][google.cloud.aiplatform.v1.EndpointService.UpdateEndpoint]. :param traffic_split: A map from a DeployedModel's ID to the percentage of this Endpoint's traffic that should be forwarded to that DeployedModel. If this field is non-empty, then the Endpoint's [traffic_split][google.cloud.aiplatform.v1.Endpoint.traffic_split] will be overwritten with it. To refer to the ID of the just being deployed Model, a "0" should be used, and the actual ID of the new DeployedModel will be filled in its place by this method. The traffic percentage values must add up to 100. If this field is empty, then the Endpoint's [traffic_split][google.cloud.aiplatform.v1.Endpoint.traffic_split] is not updated. :param retry: Designation of what errors, if any, should be retried. :param timeout: The timeout for this request. :param metadata: Strings which should be sent along with the request as metadata. :param gcp_conn_id: The connection ID to use connecting to Google Cloud. :param impersonation_chain: Optional service account to impersonate using short-term credentials, or chained list of accounts required to get the access_token of the last account in the list, which will be impersonated in the request. If set as a string, the account must grant the originating account the Service Account Token Creator IAM role. If set as a sequence, the identities from the list must grant Service Account Token Creator IAM role to the directly preceding identity, with first account from the list granting this role to the originating account (templated). """ template_fields = ("region", "endpoint_id", "project_id", "deployed_model", "impersonation_chain") operator_extra_links = (VertexAIModelLink(),) def __init__( self, *, region: str, project_id: str, endpoint_id: str, deployed_model: DeployedModel | dict, traffic_split: Sequence | dict | None = None, retry: Retry | _MethodDefault = DEFAULT, timeout: float | None = None, metadata: Sequence[tuple[str, str]] = (), gcp_conn_id: str = "google_cloud_default", impersonation_chain: str | Sequence[str] | None = None, **kwargs, ) -> None: super().__init__(**kwargs) self.region = region self.project_id = project_id self.endpoint_id = endpoint_id self.deployed_model = deployed_model self.traffic_split = traffic_split self.retry = retry self.timeout = timeout self.metadata = metadata self.gcp_conn_id = gcp_conn_id self.impersonation_chain = impersonation_chain @property def extra_links_params(self) -> dict[str, Any]: return { "region": self.region, "project_id": self.project_id, } def execute(self, context: Context): hook = EndpointServiceHook( gcp_conn_id=self.gcp_conn_id, impersonation_chain=self.impersonation_chain, ) self.log.info("Deploying model") operation = hook.deploy_model( project_id=self.project_id, region=self.region, endpoint=self.endpoint_id, deployed_model=self.deployed_model, traffic_split=self.traffic_split, retry=self.retry, timeout=self.timeout, metadata=self.metadata, ) result = hook.wait_for_operation(timeout=self.timeout, operation=operation) deploy_model = endpoint_service.DeployModelResponse.to_dict(result) deployed_model_id = hook.extract_deployed_model_id(deploy_model) self.log.info("Model was deployed. Deployed Model ID: %s", deployed_model_id) context["ti"].xcom_push(key="deployed_model_id", value=deployed_model_id) VertexAIModelLink.persist(context=context, model_id=deployed_model_id) return deploy_model
DeployModelOperator
python
huggingface__transformers
tests/models/clip/test_processing_clip.py
{ "start": 878, "end": 1026 }
class ____(ProcessorTesterMixin, unittest.TestCase): processor_class = CLIPProcessor model_id = "openai/clip-vit-base-patch32"
CLIPProcessorTest
python
django__django
tests/forms_tests/field_tests/test_multivaluefield.py
{ "start": 541, "end": 807 }
class ____(Form): f = PartiallyRequiredField( fields=(CharField(required=True), CharField(required=False)), required=True, require_all_fields=False, widget=MultiWidget(widgets=[TextInput(), TextInput()]), )
PartiallyRequiredForm
python
numpy__numpy
numpy/matrixlib/tests/test_matrix_linalg.py
{ "start": 1298, "end": 1356 }
class ____(InvCases, MatrixTestCase): pass
TestInvMatrix
python
gevent__gevent
src/gevent/tests/test__core_timer.py
{ "start": 2381, "end": 4330 }
class ____(Test): # On CI, with *all* backends, sometimes we get timer values of # 0.02 or higher. @reraises_flaky_timeout(AssertionError) def test_resolution(self): # pylint:disable=too-many-locals # Make sure that having an active IO watcher # doesn't badly throw off our timer resolution. # (This was a specific problem with libuv) # https://github.com/gevent/gevent/pull/1194 from gevent._compat import perf_counter import socket s = socket.socket() self._close_on_teardown(s) fd = s.fileno() ran_at_least_once = False fired_at = [] def timer_counter(): fired_at.append(perf_counter()) loop = self.loop timer_multiplier = 11 max_time = self.timer_duration * timer_multiplier assert max_time < 0.3 for _ in range(150): # in libuv, our signal timer fires every 300ms; depending on # when this runs, we could artificially get a better # resolution than we expect. Run it multiple times to be more sure. io = loop.io(fd, 1) io.start(lambda events=None: None) now = perf_counter() del fired_at[:] timer = self.timer timer.start(timer_counter) loop.run(once=True) io.stop() io.close() timer.stop() if fired_at: ran_at_least_once = True self.assertEqual(1, len(fired_at)) self.assertTimeWithinRange(fired_at[0] - now, 0, max_time) if not greentest.RUNNING_ON_CI: # Hmm, this always fires locally on mocOS but # not an Travis? self.assertTrue(ran_at_least_once) if __name__ == '__main__': greentest.main()
TestTimerResolution
python
pytorch__pytorch
torchgen/model.py
{ "start": 80203, "end": 80738 }
class ____(Type): elem: Type size: int | None def __str__(self) -> str: size = f"{self.size}" if self.size else "" return f"{self.elem}[{size}]" def is_base_ty_like(self, base_ty: BaseTy) -> bool: return self.elem.is_base_ty_like(base_ty) def is_symint_like(self) -> bool: return self.elem.is_symint_like() def is_nullable(self) -> bool: return self.elem.is_nullable() def is_list_like(self) -> ListType | None: return self @dataclass(frozen=True)
ListType
python
tox-dev__tox
src/tox/tox_env/python/package.py
{ "start": 756, "end": 961 }
class ____(PathPackage): def __init__(self, path: Path, deps: Sequence[Requirement]) -> None: super().__init__(path=path) self.deps: Sequence[Requirement] = deps
PythonPathPackageWithDeps
python
huggingface__transformers
tests/trainer/test_trainer.py
{ "start": 7649, "end": 8403 }
class ____: def __enter__(self): gc.collect() if torch_device in ["cuda", "xpu"]: backend_empty_cache(torch_device) backend_reset_max_memory_allocated(torch_device) # reset the peak gauge to zero self.begin = backend_memory_allocated(torch_device) return self def __exit__(self, *exc): gc.collect() if torch_device in ["cuda", "xpu"]: backend_empty_cache(torch_device) self.end = backend_memory_allocated(torch_device) self.peak = backend_max_memory_allocated(torch_device) self.used = bytes2megabytes(self.end - self.begin) self.peaked = bytes2megabytes(self.peak - self.begin) @dataclasses.dataclass
TorchTracemalloc
python
gevent__gevent
src/gevent/tests/test__greenletset.py
{ "start": 603, "end": 4932 }
class ____(greentest.TestCase): __timeout__ = greentest.LARGE_TIMEOUT def test_basic(self): s = pool.Group() s.spawn(gevent.sleep, timing.LARGE_TICK) self.assertEqual(len(s), 1, s) s.spawn(gevent.sleep, timing.LARGE_TICK * 5) self.assertEqual(len(s), 2, s) gevent.sleep() gevent.sleep(timing.LARGE_TICK * 2 + timing.LARGE_TICK_MIN_ADJ) self.assertEqual(len(s), 1, s) gevent.sleep(timing.LARGE_TICK * 5 + timing.LARGE_TICK_MIN_ADJ) self.assertFalse(s) def test_waitall(self): s = pool.Group() s.spawn(gevent.sleep, DELAY) s.spawn(gevent.sleep, DELAY * 2) assert len(s) == 2, s start = time.time() s.join(raise_error=True) delta = time.time() - start self.assertFalse(s) self.assertEqual(len(s), 0) self.assertTimeWithinRange(delta, DELAY * 1.9, DELAY * 2.5) def test_kill_block(self): s = pool.Group() s.spawn(gevent.sleep, DELAY) s.spawn(gevent.sleep, DELAY * 2) assert len(s) == 2, s start = time.time() s.kill() self.assertFalse(s) self.assertEqual(len(s), 0) delta = time.time() - start assert delta < DELAY * 0.8, delta def test_kill_noblock(self): s = pool.Group() s.spawn(gevent.sleep, DELAY) s.spawn(gevent.sleep, DELAY * 2) assert len(s) == 2, s s.kill(block=False) assert len(s) == 2, s gevent.sleep(0.0001) self.assertFalse(s) self.assertEqual(len(s), 0) def test_kill_fires_once(self): u1 = Undead() u2 = Undead() p1 = gevent.spawn(u1) p2 = gevent.spawn(u2) def check(count1, count2): self.assertTrue(p1) self.assertTrue(p2) self.assertFalse(p1.dead, p1) self.assertFalse(p2.dead, p2) self.assertEqual(u1.shot_count, count1) self.assertEqual(u2.shot_count, count2) gevent.sleep(0.01) s = pool.Group([p1, p2]) self.assertEqual(len(s), 2, s) check(0, 0) s.killone(p1, block=False) check(0, 0) gevent.sleep(0) check(1, 0) s.killone(p1) check(1, 0) s.killone(p1) check(1, 0) s.kill(block=False) s.kill(block=False) s.kill(block=False) check(1, 0) gevent.sleep(DELAY) check(1, 1) X = object() kill_result = gevent.with_timeout(DELAY, s.kill, block=True, timeout_value=X) assert kill_result is X, repr(kill_result) assert len(s) == 2, s check(1, 1) p1.kill(SpecialError) p2.kill(SpecialError) def test_killall_subclass(self): p1 = GreenletSubclass.spawn(lambda: 1 / 0) p2 = GreenletSubclass.spawn(lambda: gevent.sleep(10)) s = pool.Group([p1, p2]) s.kill() def test_killall_iterable_argument_non_block(self): p1 = GreenletSubclass.spawn(lambda: gevent.sleep(0.5)) p2 = GreenletSubclass.spawn(lambda: gevent.sleep(0.5)) s = set() s.add(p1) s.add(p2) gevent.killall(s, block=False) gevent.sleep(0.5) for g in s: assert g.dead def test_killall_iterable_argument_timeout_not_started(self): def f(): try: gevent.sleep(1.5) except: # pylint:disable=bare-except gevent.sleep(1) p1 = GreenletSubclass.spawn(f) p2 = GreenletSubclass.spawn(f) s = set() s.add(p1) s.add(p2) gevent.killall(s, timeout=0.5) for g in s: self.assertTrue(g.dead, g) def test_killall_iterable_argument_timeout_started(self): def f(): try: gevent.sleep(1.5) except: # pylint:disable=bare-except gevent.sleep(1) p1 = GreenletSubclass.spawn(f) p2 = GreenletSubclass.spawn(f) s = set() s.add(p1) s.add(p2) # Get them both running. gevent.sleep(timing.SMALLEST_RELIABLE_DELAY) with self.assertRaises(Timeout): gevent.killall(s, timeout=0.5) for g in s: self.assertFalse(g.dead, g)
Test
python
cython__cython
Cython/Compiler/Nodes.py
{ "start": 14177, "end": 14644 }
class ____: # Mixin class for nodes representing a declaration block. def generate_cached_builtins_decls(self, env, code): entries = env.global_scope().undeclared_cached_builtins for entry in entries: code.globalstate.add_cached_builtin_decl(entry) del entries[:] def generate_lambda_definitions(self, env, code): for node in env.lambda_defs: node.generate_function_definitions(env, code)
BlockNode
python
PyCQA__pydocstyle
src/tests/parser_test.py
{ "start": 149, "end": 28193 }
class ____(io.StringIO): """A code snippet. Automatically wraps snippet as a file-like object and handles line wraps. """ def __init__(self, code_string): """Initialize the object.""" io.StringIO.__init__(self, textwrap.dedent(code_string)) def test_function(): """Test parsing of a simple function.""" parser = Parser() code = CodeSnippet("""\ def do_something(pos_param0, pos_param1, kw_param0="default"): \"""Do something.\""" return None """) module = parser.parse(code, 'file_path') assert module.is_public assert module.dunder_all is None function, = module.children assert function.name == 'do_something' assert function.decorators == [] assert function.children == [] assert function.docstring == '"""Do something."""' assert function.docstring.start == 2 assert function.docstring.end == 2 assert function.kind == 'function' assert function.parent == module assert function.start == 1 assert function.end == 3 assert function.error_lineno == 2 assert function.source == code.getvalue() assert function.is_public assert str(function) == 'in public function `do_something`' def test_simple_fstring(): """Test parsing of a function with a simple fstring as a docstring.""" parser = Parser() code = CodeSnippet("""\ def do_something(pos_param0, pos_param1, kw_param0="default"): f\"""Do something.\""" return None """) module = parser.parse(code, 'file_path') assert module.is_public assert module.dunder_all is None function, = module.children assert function.name == 'do_something' assert function.decorators == [] assert function.children == [] assert function.docstring == 'f"""Do something."""' assert function.docstring.start == 2 assert function.docstring.end == 2 assert function.kind == 'function' assert function.parent == module assert function.start == 1 assert function.end == 3 assert function.error_lineno == 2 assert function.source == code.getvalue() assert function.is_public assert str(function) == 'in public function `do_something`' def test_fstring_with_args(): """Test parsing of a function with an fstring with args as a docstring.""" parser = Parser() code = CodeSnippet("""\ foo = "bar" bar = "baz" def do_something(pos_param0, pos_param1, kw_param0="default"): f\"""Do some {foo} and some {bar}.\""" return None """) module = parser.parse(code, 'file_path') assert module.is_public assert module.dunder_all is None function, = module.children assert function.name == 'do_something' assert function.decorators == [] assert function.children == [] assert function.docstring == 'f"""Do some {foo} and some {bar}."""' assert function.docstring.start == 4 assert function.docstring.end == 4 assert function.kind == 'function' assert function.parent == module assert function.start == 3 assert function.end == 5 assert function.error_lineno == 4 assert function.source == textwrap.dedent("""\ def do_something(pos_param0, pos_param1, kw_param0="default"): f\"""Do some {foo} and some {bar}.\""" return None """) assert function.is_public assert str(function) == 'in public function `do_something`' def test_decorated_function(): """Test parsing of a simple function with a decorator.""" parser = Parser() code = CodeSnippet("""\ @single_decorator def do_something(): \"""Do something.\""" return None """) module = parser.parse(code, 'file_path') function, = module.children assert function.name == 'do_something' assert len(function.decorators) == 1 assert function.decorators[0].name == 'single_decorator' assert function.children == [] assert function.docstring == '"""Do something."""' assert function.kind == 'function' assert function.parent == module assert function.start == 2 assert function.end == 4 assert function.source == textwrap.dedent("""\ def do_something(): \"""Do something.\""" return None """) assert function.is_public assert str(function) == 'in public function `do_something`' def test_nested_function(): """Test parsing of a nested function.""" parser = Parser() code = CodeSnippet("""\ def outer_function(): \"""This is the outer function.\""" def inner_function(): '''This is the inner function.''' return None return None """) module = parser.parse(code, 'file_path') outer_function, = module.children assert outer_function.name == 'outer_function' assert outer_function.decorators == [] assert outer_function.docstring == '"""This is the outer function."""' assert outer_function.kind == 'function' assert outer_function.parent == module assert outer_function.start == 1 assert outer_function.end == 6 assert outer_function.error_lineno == 2 assert outer_function.source == code.getvalue() assert outer_function.is_public assert str(outer_function) == 'in public function `outer_function`' inner_function, = outer_function.children assert inner_function.name == 'inner_function' assert inner_function.decorators == [] assert inner_function.docstring == "'''This is the inner function.'''" assert inner_function.kind == 'function' assert inner_function.parent == outer_function assert inner_function.start == 3 assert inner_function.end == 5 assert inner_function.error_lineno == 4 assert textwrap.dedent(inner_function.source) == textwrap.dedent("""\ def inner_function(): '''This is the inner function.''' return None """) assert not inner_function.is_public assert str(inner_function) == 'in private nested function `inner_function`' def test_conditional_nested_function(): """Test parsing of a nested function inside a condition.""" parser = Parser() code = CodeSnippet("""\ def outer_function(): \"""This is the outer function.\""" if True: def inner_function(): '''This is the inner function.''' return None return None """) module = parser.parse(code, 'file_path') outer_function, = module.children assert outer_function.name == 'outer_function' assert outer_function.decorators == [] assert outer_function.docstring == '"""This is the outer function."""' assert outer_function.kind == 'function' assert outer_function.parent == module assert outer_function.start == 1 assert outer_function.end == 7 assert outer_function.source == code.getvalue() assert outer_function.is_public assert str(outer_function) == 'in public function `outer_function`' inner_function, = outer_function.children assert inner_function.name == 'inner_function' assert inner_function.decorators == [] assert inner_function.docstring == "'''This is the inner function.'''" assert inner_function.kind == 'function' assert inner_function.parent == outer_function assert inner_function.start == 4 assert inner_function.end == 6 assert textwrap.dedent(inner_function.source) == textwrap.dedent("""\ def inner_function(): '''This is the inner function.''' return None """) assert not inner_function.is_public assert str(inner_function) == 'in private nested function `inner_function`' def test_doubly_nested_function(): """Test parsing of a nested function inside a nested function.""" parser = Parser() code = CodeSnippet("""\ def outer_function(): \"""This is the outer function.\""" def middle_function(): def inner_function(): '''This is the inner function.''' return None return None """) module = parser.parse(code, 'file_path') outer_function, = module.children assert outer_function.name == 'outer_function' assert outer_function.decorators == [] assert outer_function.docstring == '"""This is the outer function."""' assert outer_function.kind == 'function' assert outer_function.parent == module assert outer_function.start == 1 assert outer_function.end == 7 assert outer_function.source == code.getvalue() assert outer_function.is_public assert str(outer_function) == 'in public function `outer_function`' middle_function, = outer_function.children assert middle_function.name == 'middle_function' assert middle_function.decorators == [] assert middle_function.docstring is None assert middle_function.kind == 'function' assert middle_function.parent == outer_function assert middle_function.start == 3 assert middle_function.end == 6 assert textwrap.dedent(middle_function.source) == textwrap.dedent("""\ def middle_function(): def inner_function(): '''This is the inner function.''' return None """) assert not middle_function.is_public assert (str(middle_function) == 'in private nested function `middle_function`') inner_function, = middle_function.children assert inner_function.name == 'inner_function' assert inner_function.decorators == [] assert inner_function.docstring == "'''This is the inner function.'''" assert inner_function.kind == 'function' assert inner_function.parent == middle_function assert inner_function.start == 4 assert inner_function.end == 6 assert textwrap.dedent(inner_function.source) == textwrap.dedent("""\ def inner_function(): '''This is the inner function.''' return None """) assert not inner_function.is_public assert str(inner_function) == 'in private nested function `inner_function`' def test_class(): """Test parsing of a class.""" parser = Parser() code = CodeSnippet("""\ class TestedClass(object): " an ugly docstring " """) module = parser.parse(code, 'file_path') klass, = module.children assert klass.name == 'TestedClass' assert klass.decorators == [] assert klass.children == [] assert klass.docstring == '" an ugly docstring "' assert klass.kind == 'class' assert klass.parent == module assert klass.start == 1 assert klass.end == 3 assert klass.error_lineno == 3 assert klass.source == code.getvalue() assert klass.is_public assert str(klass) == 'in public class `TestedClass`' def test_public_method(): """Test parsing of a public method.""" parser = Parser() code = CodeSnippet("""\ class TestedClass(object): def do_it(param): \"""Do the 'it'\""" # do nothing return None """) module = parser.parse(code, 'file_path') klass, = module.children assert klass.name == 'TestedClass' assert klass.decorators == [] assert klass.docstring is None assert klass.kind == 'class' assert klass.parent == module assert klass.start == 1 assert klass.end == 5 assert klass.error_lineno == 1 assert klass.source == code.getvalue() assert klass.is_public assert str(klass) == 'in public class `TestedClass`' method, = klass.children assert method.name == 'do_it' assert method.decorators == [] assert method.docstring == '''"""Do the 'it'"""''' assert method.kind == 'method' assert method.parent == klass assert method.start == 2 assert method.end == 5 assert method.error_lineno == 3 assert textwrap.dedent(method.source) == textwrap.dedent("""\ def do_it(param): \"""Do the 'it'\""" # do nothing return None """) assert method.is_public assert not method.is_magic assert str(method) == 'in public method `do_it`' def test_private_method(): """Test parsing of a private method.""" parser = Parser() code = CodeSnippet("""\ class TestedClass(object): def _do_it(param): \"""Do the 'it'\""" # do nothing return None """) module = parser.parse(code, 'file_path') klass, = module.children assert klass.name == 'TestedClass' assert klass.decorators == [] assert klass.docstring is None assert klass.kind == 'class' assert klass.parent == module assert klass.start == 1 assert klass.end == 5 assert klass.error_lineno == 1 assert klass.source == code.getvalue() assert klass.is_public assert str(klass) == 'in public class `TestedClass`' method, = klass.children assert method.name == '_do_it' assert method.decorators == [] assert method.docstring == '''"""Do the 'it'"""''' assert method.kind == 'method' assert method.parent == klass assert method.start == 2 assert method.end == 5 assert method.error_lineno == 3 assert textwrap.dedent(method.source) == textwrap.dedent("""\ def _do_it(param): \"""Do the 'it'\""" # do nothing return None """) assert not method.is_public assert not method.is_magic assert str(method) == 'in private method `_do_it`' def test_magic_method(): """Test parsing of a magic method.""" parser = Parser() code = CodeSnippet("""\ class TestedClass(object): def __str__(self): return "me" """) module = parser.parse(code, 'file_path') klass, = module.children assert klass.name == 'TestedClass' assert klass.decorators == [] assert klass.docstring is None assert klass.kind == 'class' assert klass.parent == module assert klass.start == 1 assert klass.end == 3 assert klass.error_lineno == 1 assert klass.source == code.getvalue() assert klass.is_public assert str(klass) == 'in public class `TestedClass`' method, = klass.children[0] assert method.name == '__str__' assert method.decorators == [] assert method.docstring is None assert method.kind == 'method' assert method.parent == klass assert method.start == 2 assert method.end == 3 assert method.error_lineno == 2 assert textwrap.dedent(method.source) == textwrap.dedent("""\ def __str__(self): return "me" """) assert method.is_public assert method.is_magic assert str(method) == 'in public method `__str__`' def test_nested_class(): """Test parsing of a class.""" parser = Parser() code = CodeSnippet("""\ class OuterClass(object): ' an outer docstring' class InnerClass(object): "An inner docstring." """) module = parser.parse(code, 'file_path') outer_class, = module.children assert outer_class.name == 'OuterClass' assert outer_class.decorators == [] assert outer_class.docstring == "' an outer docstring'" assert outer_class.kind == 'class' assert outer_class.parent == module assert outer_class.start == 1 assert outer_class.end == 4 assert outer_class.error_lineno == 2 assert outer_class.source == code.getvalue() assert outer_class.is_public assert str(outer_class) == 'in public class `OuterClass`' inner_class, = outer_class.children assert inner_class.name == 'InnerClass' assert inner_class.decorators == [] assert inner_class.children == [] assert inner_class.docstring == '"An inner docstring."' assert inner_class.kind == 'class' assert inner_class.parent == outer_class assert inner_class.start == 3 assert inner_class.end == 4 assert inner_class.error_lineno == 4 assert textwrap.dedent(inner_class.source) == textwrap.dedent("""\ class InnerClass(object): "An inner docstring." """) assert inner_class.is_public assert str(inner_class) == 'in public nested class `InnerClass`' def test_raise_from(): """Make sure 'raise x from y' doesn't trip the parser.""" parser = Parser() code = CodeSnippet("raise ValueError() from None") parser.parse(code, 'file_path') def test_simple_matrix_multiplication(): """Make sure 'a @ b' doesn't trip the parser.""" parser = Parser() code = CodeSnippet(""" def foo(): a @ b """) parser.parse(code, 'file_path') @pytest.mark.parametrize("code", ( CodeSnippet(""" def foo(): a @ b (a @b) @a def b(): pass """), CodeSnippet(""" def foo(): a @ b (a @b) a\ @b @a def b(): pass """), CodeSnippet(""" def foo(): a @ b (a # A random comment here @b) a\ @b @a def b(): pass """), )) def test_matrix_multiplication_with_decorators(code): """Make sure 'a @ b' doesn't trip the parser.""" parser = Parser() module = parser.parse(code, 'file_path') outer_function, = module.children assert outer_function.name == 'foo' inner_function, = outer_function.children assert len(inner_function.decorators) == 1 assert inner_function.decorators[0].name == 'a' @pytest.mark.parametrize("public_path", ( Path(""), Path("module.py"), Path("package") / "module.py", Path("package") / "__init__.py", Path("") / "package" / "module.py", Path("") / "__dunder__" / "package" / "module.py" )) def test_module_publicity_with_public_path(public_path): """Test module publicity with public path. Module names such as my_module.py are considered public. Special "dunder" modules, with leading and trailing double-underscores (e.g. __init__.py) are public. The same rules for publicity apply to both packages and modules. """ parser = Parser() code = CodeSnippet("") module = parser.parse(code, str(public_path)) assert module.is_public @pytest.mark.parametrize("private_path", ( # single underscore Path("_private_module.py"), Path("_private_package") / "module.py", Path("_private_package") / "package" / "module.py", Path("") / "_private_package" / "package" / "module.py", # double underscore Path("__private_module.py"), Path("__private_package") / "module.py", Path("__private_package") / "package" / "module.py", Path("") / "__private_package" / "package" / "module.py" )) def test_module_publicity_with_private_paths(private_path): """Test module publicity with private path. Module names starting with single or double-underscore are private. For example, _my_private_module.py and __my_private_module.py. Any module within a private package is considered private. The same rules for publicity apply to both packages and modules. """ parser = Parser() code = CodeSnippet("") module = parser.parse(code, str(private_path)) assert not module.is_public @pytest.mark.parametrize("syspath,is_public", ( ("/", False), ("_foo/", True), )) def test_module_publicity_with_different_sys_path(syspath, is_public, monkeypatch): """Test module publicity for same path and different sys.path.""" parser = Parser() code = CodeSnippet("") monkeypatch.syspath_prepend(syspath) path = Path("_foo") / "bar" / "baz.py" module = parser.parse(code, str(path)) assert module.is_public == is_public def test_complex_module(): """Test that a complex module is parsed correctly.""" parser = Parser() code = CodeSnippet('''\ """Module.""" __all__ = ('a', 'b' 'c',) def function(): "Function." def nested_1(): """Nested.""" if True: def nested_2(): pass class class_(object): """Class.""" def method_1(self): """Method.""" def method_2(self): def nested_3(self): """Nested.""" ''') module = parser.parse(code, "filepath") assert list(module)[0] == module assert len(list(module)) == 8 @pytest.mark.parametrize("code", ( CodeSnippet("""\ __all__ = ['foo', 'bar'] """), CodeSnippet("""\ __all__ = ['foo', 'ba' 'r',] """), CodeSnippet("""\ __all__ = ('foo', 'bar' ) """), CodeSnippet("""\ __all__ = ['foo', # Inconvenient comment 'bar' ] """), CodeSnippet("""\ __all__ = 'foo', 'bar' """), CodeSnippet("""\ __all__ = 'foo', 'bar', """), CodeSnippet( """__all__ = 'foo', 'bar'""" ), CodeSnippet("""\ __all__ = 'foo', \ 'bar' """), CodeSnippet("""\ foo = 1 __all__ = 'foo', 'bar' """), CodeSnippet("""\ __all__ = 'foo', 'bar' foo = 1 """), CodeSnippet("""\ __all__ = ['foo', 'bar'] # never freeze """), )) def test_dunder_all(code): """Test that __all__ is parsed correctly.""" parser = Parser() module = parser.parse(code, "filepath") assert module.dunder_all == ('foo', 'bar') def test_single_value_dunder_all(): """Test that single value __all__ is parsed correctly.""" parser = Parser() code = CodeSnippet("""\ __all__ = 'foo', """) module = parser.parse(code, "filepath") assert module.dunder_all == ('foo', ) code = CodeSnippet("""\ __all__ = 'foo' """) module = parser.parse(code, "filepath") assert module.dunder_all is None assert module.dunder_all_error code = CodeSnippet("""\ __all__ = ('foo', ) """) module = parser.parse(code, "filepath") assert module.dunder_all == ('foo', ) indeterminable_dunder_all_test_cases = [ CodeSnippet("""\ __all__ = ['foo'] __all__ += ['bar'] """), CodeSnippet("""\ __all__ = ['foo'] + ['bar'] """), CodeSnippet("""\ __all__ = ['foo'] __all__.insert('bar') """), CodeSnippet("""\ __all__ = foo() """), CodeSnippet("""\ all = ['foo'] __all__ = all """), CodeSnippet("""\ foo = 'foo' __all__ = [foo] """), CodeSnippet("""\ __all__ = (*foo, 'bar') """), ] @pytest.mark.parametrize("code", indeterminable_dunder_all_test_cases) def test_indeterminable_dunder_all(code): """Test that __all__ is ignored if it can't be statically evaluated.""" parser = Parser() module = parser.parse(code, "filepath") assert module.dunder_all is None assert module.dunder_all_error @pytest.mark.parametrize("code", ( CodeSnippet("""\ from __future__ import unicode_literals, nested_scopes """), CodeSnippet("""\ from __future__ import unicode_literals, nested_scopes; """), CodeSnippet("""\ from __future__ import unicode_literals from __future__ import nested_scopes; """), CodeSnippet("""\ from __future__ import unicode_literals from __future__ import nested_scopes as ns """), CodeSnippet("""\ from __future__ import (unicode_literals as nl, nested_scopes) """), CodeSnippet("""\ from __future__ import (unicode_literals as nl,) from __future__ import (nested_scopes) """), CodeSnippet("""\ from __future__ \\ import unicode_literals from __future__ \\ import nested_scopes """), )) def test_future_import(code): """Test that __future__ imports are properly parsed and collected.""" parser = Parser() module = parser.parse(code, "filepath") assert module.future_imports == {'unicode_literals', 'nested_scopes'} def test_noqa_function(): """Test that "# noqa" comments are correctly collected for definitions.""" code = CodeSnippet("""\ def foo(): # noqa: D100,D101 pass """) parser = Parser() module = parser.parse(code, "filepath") function, = module.children assert function.skipped_error_codes == 'D100,D101' @pytest.mark.parametrize("code", ( CodeSnippet("""\ while True: try: pass """), CodeSnippet("[\n"), # Should result in `SyntaxError: from __future__ imports must occur # at the beginning of the file` CodeSnippet("""\ from __future__ import unicode_literals; import string; from \ __future__ import nested_scopes """), )) def test_invalid_syntax(code): """Test invalid code input to the parser.""" parser = Parser() with pytest.raises(ParseError): module = parser.parse(code, "filepath") @pytest.mark.parametrize("code", ( CodeSnippet("""\ '''Test this''' @property def test(): pass """), CodeSnippet("""\ '''Test this''' @property def test(): pass """), CodeSnippet("""\ '''Test this''' @property def test(): pass """), CodeSnippet("""\ '''Test this''' @property def test(): pass """), CodeSnippet("""\ '''Test this''' # A random comment in the middle to break things @property def test(): pass """), CodeSnippet("""\ '''Test this''' @property def test(): pass """), CodeSnippet("""\ '''Test this''' @first_decorator @property def test(): pass """), )) def test_parsing_function_decorators(code): """Test to ensure we are correctly parsing function decorators.""" parser = Parser() module = parser.parse(code, "filename") function, = module.children decorator_names = {dec.name for dec in function.decorators} assert "property" in decorator_names @pytest.mark.parametrize("code", ( CodeSnippet("""\ class Test: @property def test(self): pass """), CodeSnippet("""\ class Test: @property def test(self): pass """), CodeSnippet("""\ class Test: # Random comment to trip decorator parsing @property def test(self): pass """), CodeSnippet("""\ class Test: # Random comment to trip decorator parsing A = 1 @property def test(self): pass """), CodeSnippet("""\ class Test: # Random comment to trip decorator parsing A = 1 '''Another random comment''' @property def test(self): pass """), )) def test_parsing_method_decorators(code): """Test to ensure we are correctly parsing method decorators.""" parser = Parser() module = parser.parse(code, "filename") function, = module.children[0].children decorator_names = {dec.name for dec in function.decorators} assert "property" in decorator_names
CodeSnippet
python
python-openxml__python-docx
tests/parts/test_numbering.py
{ "start": 299, "end": 1768 }
class ____: def it_provides_access_to_the_numbering_definitions(self, num_defs_fixture): ( numbering_part, _NumberingDefinitions_, numbering_elm_, numbering_definitions_, ) = num_defs_fixture numbering_definitions = numbering_part.numbering_definitions _NumberingDefinitions_.assert_called_once_with(numbering_elm_) assert numbering_definitions is numbering_definitions_ # fixtures ------------------------------------------------------- @pytest.fixture def num_defs_fixture(self, _NumberingDefinitions_, numbering_elm_, numbering_definitions_): numbering_part = NumberingPart(None, None, numbering_elm_, None) return ( numbering_part, _NumberingDefinitions_, numbering_elm_, numbering_definitions_, ) # fixture components --------------------------------------------- @pytest.fixture def _NumberingDefinitions_(self, request, numbering_definitions_): return class_mock( request, "docx.parts.numbering._NumberingDefinitions", return_value=numbering_definitions_, ) @pytest.fixture def numbering_definitions_(self, request): return instance_mock(request, _NumberingDefinitions) @pytest.fixture def numbering_elm_(self, request): return instance_mock(request, CT_Numbering)
DescribeNumberingPart
python
sympy__sympy
sympy/physics/quantum/cartesian.py
{ "start": 4647, "end": 4949 }
class ____(Bra): """1D cartesian position eigenbra.""" @classmethod def default_args(self): return ("x",) @classmethod def dual_class(self): return XKet @property def position(self): """The position of the state.""" return self.label[0]
XBra
python
apache__airflow
providers/amazon/src/airflow/providers/amazon/aws/operators/emr.py
{ "start": 8631, "end": 14532 }
class ____(AwsBaseOperator[EmrHook]): """ An operator that starts an EMR notebook execution. .. seealso:: For more information on how to use this operator, take a look at the guide: :ref:`howto/operator:EmrStartNotebookExecutionOperator` :param editor_id: The unique identifier of the EMR notebook to use for notebook execution. :param relative_path: The path and file name of the notebook file for this execution, relative to the path specified for the EMR notebook. :param cluster_id: The unique identifier of the EMR cluster the notebook is attached to. :param service_role: The name or ARN of the IAM role that is used as the service role for Amazon EMR (the EMR role) for the notebook execution. :param notebook_execution_name: Optional name for the notebook execution. :param notebook_params: Input parameters in JSON format passed to the EMR notebook at runtime for execution. :param notebook_instance_security_group_id: The unique identifier of the Amazon EC2 security group to associate with the EMR notebook for this notebook execution. :param master_instance_security_group_id: Optional unique ID of an EC2 security group to associate with the master instance of the EMR cluster for this notebook execution. :param tags: Optional list of key value pair to associate with the notebook execution. :param waiter_max_attempts: Maximum number of tries before failing. :param waiter_delay: Number of seconds between polling the state of the notebook. :param aws_conn_id: The Airflow connection used for AWS credentials. If this is ``None`` or empty then the default boto3 behaviour is used. If running Airflow in a distributed manner and aws_conn_id is None or empty, then default boto3 configuration would be used (and must be maintained on each worker node). :param region_name: AWS region_name. If not specified then the default boto3 behaviour is used. :param verify: Whether or not to verify SSL certificates. See: https://boto3.amazonaws.com/v1/documentation/api/latest/reference/core/session.html """ aws_hook_class = EmrHook template_fields: Sequence[str] = aws_template_fields( "editor_id", "cluster_id", "relative_path", "service_role", "notebook_execution_name", "notebook_params", "notebook_instance_security_group_id", "master_instance_security_group_id", "tags", "waiter_delay", "waiter_max_attempts", ) def __init__( self, editor_id: str, relative_path: str, cluster_id: str, service_role: str, notebook_execution_name: str | None = None, notebook_params: str | None = None, notebook_instance_security_group_id: str | None = None, master_instance_security_group_id: str | None = None, tags: list | None = None, wait_for_completion: bool = False, waiter_max_attempts: int | None = None, waiter_delay: int | None = None, **kwargs: Any, ): super().__init__(**kwargs) self.editor_id = editor_id self.relative_path = relative_path self.service_role = service_role self.notebook_execution_name = notebook_execution_name or f"emr_notebook_{uuid4()}" self.notebook_params = notebook_params or "" self.notebook_instance_security_group_id = notebook_instance_security_group_id or "" self.tags = tags or [] self.wait_for_completion = wait_for_completion self.cluster_id = cluster_id self.waiter_max_attempts = waiter_max_attempts or 25 self.waiter_delay = waiter_delay or 60 self.master_instance_security_group_id = master_instance_security_group_id def execute(self, context: Context): execution_engine = { "Id": self.cluster_id, "Type": "EMR", "MasterInstanceSecurityGroupId": self.master_instance_security_group_id or "", } response = self.hook.conn.start_notebook_execution( EditorId=self.editor_id, RelativePath=self.relative_path, NotebookExecutionName=self.notebook_execution_name, NotebookParams=self.notebook_params, ExecutionEngine=execution_engine, ServiceRole=self.service_role, NotebookInstanceSecurityGroupId=self.notebook_instance_security_group_id, Tags=self.tags, ) if response["ResponseMetadata"]["HTTPStatusCode"] != 200: raise AirflowException(f"Starting notebook execution failed: {response}") self.log.info("Notebook execution started: %s", response["NotebookExecutionId"]) notebook_execution_id = response["NotebookExecutionId"] if self.wait_for_completion: self.hook.get_waiter("notebook_running").wait( NotebookExecutionId=notebook_execution_id, WaiterConfig=prune_dict( { "Delay": self.waiter_delay, "MaxAttempts": self.waiter_max_attempts, } ), ) # The old Waiter method raised an exception if the notebook # failed, adding that here. This could maybe be deprecated # later to bring it in line with how other waiters behave. failure_states = {"FAILED"} final_status = self.hook.conn.describe_notebook_execution( NotebookExecutionId=notebook_execution_id )["NotebookExecution"]["Status"] if final_status in failure_states: raise AirflowException(f"Notebook Execution reached failure state {final_status}.") return notebook_execution_id
EmrStartNotebookExecutionOperator
python
joke2k__faker
faker/providers/profile/en_US/__init__.py
{ "start": 65, "end": 127 }
class ____(ProfileProvider): # pragma: no cover pass
Provider
python
Textualize__rich
examples/attrs.py
{ "start": 348, "end": 1034 }
class ____: name: str triangles: List[Triangle] = attr.Factory(list) if __name__ == "__main__": model = Model( name="Alien#1", triangles=[ Triangle( Point3D(x=20, y=50), Point3D(x=50, y=15, z=-45.34), Point3D(3.1426, 83.2323, -16), ) ], ) from rich.console import Console from rich.pretty import Pretty from rich.table import Column, Table from rich.text import Text console = Console() table = Table("attrs *with* Rich", Column(Text.from_markup("attrs *without* Rich"))) table.add_row(Pretty(model), repr(model)) console.print(table)
Model
python
airbytehq__airbyte
airbyte-integrations/connectors/destination-vectara/integration_tests/integration_test.py
{ "start": 499, "end": 4740 }
class ____(unittest.TestCase): def _get_configured_catalog(self, destination_mode: DestinationSyncMode) -> ConfiguredAirbyteCatalog: stream_schema = {"type": "object", "properties": {"str_col": {"type": "str"}, "int_col": {"type": "integer"}}} overwrite_stream = ConfiguredAirbyteStream( stream=AirbyteStream( name="mystream", json_schema=stream_schema, supported_sync_modes=[SyncMode.incremental, SyncMode.full_refresh] ), primary_key=[["int_col"]], sync_mode=SyncMode.incremental, destination_sync_mode=destination_mode, ) return ConfiguredAirbyteCatalog(streams=[overwrite_stream]) def _state(self, data: Dict[str, Any]) -> AirbyteMessage: return AirbyteMessage(type=Type.STATE, state=AirbyteStateMessage(data=data)) def _record(self, stream: str, str_value: str, int_value: int) -> AirbyteMessage: return AirbyteMessage( type=Type.RECORD, record=AirbyteRecordMessage(stream=stream, data={"str_col": str_value, "int_col": int_value}, emitted_at=0) ) def _clean(self): self._client.delete_doc_by_metadata(metadata_field_name="_ab_stream", metadata_field_values=["None_mystream"]) def setUp(self): with open("secrets/config.json", "r") as f: self.config = json.loads(f.read()) self._client = VectaraClient(self.config) self._clean() def tearDown(self): self._clean() def test_check_valid_config(self): outcome = DestinationVectara().check(logging.getLogger("airbyte"), self.config) assert outcome.status == Status.SUCCEEDED def test_check_invalid_config(self): outcome = DestinationVectara().check( logging.getLogger("airbyte"), { "oauth2": {"client_id": "myclientid", "client_secret": "myclientsecret"}, "corpus_name": "teststore", "customer_id": "123456", "text_fields": [], "metadata_fields": [], "title_field": "", }, ) assert outcome.status == Status.FAILED def _query_index(self, query="Everything", num_results=100): return self._client._request( "query", data={ "query": [ { "query": query, "numResults": num_results, "corpusKey": [ { "customerId": self._client.customer_id, "corpusId": self._client.corpus_id, } ], } ] }, )["responseSet"][0] def test_write(self): # validate corpus starts empty initial_result = self._query_index()["document"] assert len(initial_result) == 0 catalog = self._get_configured_catalog(DestinationSyncMode.overwrite) first_state_message = self._state({"state": "1"}) first_record_chunk = [self._record("mystream", f"Dogs are number {i}", i) for i in range(5)] # initial sync destination = DestinationVectara() list(destination.write(self.config, catalog, [*first_record_chunk, first_state_message])) assert len(self._query_index()["document"]) == 5 # incrementalally update a doc incremental_catalog = self._get_configured_catalog(DestinationSyncMode.append_dedup) list(destination.write(self.config, incremental_catalog, [self._record("mystream", "Cats are nice", 2), first_state_message])) assert len(self._query_index()["document"]) == 5 # use semantic search result = self._query_index("Feline animals", 1) assert result["document"] == [ { "id": "Stream_None_mystream_Key_None_mystream_2", "metadata": [ {"name": "int_col", "value": "2"}, {"name": "_ab_stream", "value": "None_mystream"}, {"name": "title", "value": "Cats are nice"}, ], } ]
VectaraIntegrationTest
python
django__django
tests/middleware/tests.py
{ "start": 43236, "end": 45188 }
class ____(SimpleTestCase): """ ETags are handled properly by GZipMiddleware. """ rf = RequestFactory() compressible_string = b"a" * 500 def test_strong_etag_modified(self): """ GZipMiddleware makes a strong ETag weak. """ def get_response(req): response = HttpResponse(self.compressible_string) response.headers["ETag"] = '"eggs"' return response request = self.rf.get("/", headers={"accept-encoding": "gzip, deflate"}) gzip_response = GZipMiddleware(get_response)(request) self.assertEqual(gzip_response.headers["ETag"], 'W/"eggs"') def test_weak_etag_not_modified(self): """ GZipMiddleware doesn't modify a weak ETag. """ def get_response(req): response = HttpResponse(self.compressible_string) response.headers["ETag"] = 'W/"eggs"' return response request = self.rf.get("/", headers={"accept-encoding": "gzip, deflate"}) gzip_response = GZipMiddleware(get_response)(request) self.assertEqual(gzip_response.headers["ETag"], 'W/"eggs"') def test_etag_match(self): """ GZipMiddleware allows 304 Not Modified responses. """ def get_response(req): return HttpResponse(self.compressible_string) def get_cond_response(req): return ConditionalGetMiddleware(get_response)(req) request = self.rf.get("/", headers={"accept-encoding": "gzip, deflate"}) response = GZipMiddleware(get_cond_response)(request) gzip_etag = response.headers["ETag"] next_request = self.rf.get( "/", headers={"accept-encoding": "gzip, deflate", "if-none-match": gzip_etag}, ) next_response = ConditionalGetMiddleware(get_response)(next_request) self.assertEqual(next_response.status_code, 304)
ETagGZipMiddlewareTest
python
run-llama__llama_index
llama-index-core/llama_index/core/langchain_helpers/agents/toolkits.py
{ "start": 290, "end": 794 }
class ____(BaseToolkit): """Toolkit for interacting with Llama indices.""" model_config = ConfigDict(arbitrary_types_allowed=True) index_configs: List[IndexToolConfig] = Field(default_factory=list) def get_tools(self) -> List[BaseTool]: """Get the tools in the toolkit.""" index_tools: List[BaseTool] = [ LlamaIndexTool.from_tool_config(tool_config=tool_config) for tool_config in self.index_configs ] return index_tools
LlamaToolkit
python
tensorflow__tensorflow
tensorflow/python/kernel_tests/math_ops/cwise_ops_test.py
{ "start": 24310, "end": 28829 }
class ____(test.TestCase): """Test broadcasting of Select when 'c' is a vec and 't' &'e' are rank2+.""" def _compare(self, c, x, y, use_gpu): np_ans = np.dstack( [x_i if c_i else y_i for c_i, x_i, y_i in zip(c, x, y)]).transpose( [2, 0, 1]) with test_util.device(use_gpu=use_gpu): out = array_ops.where(c, x, y) tf_ans = self.evaluate(out) self.assertAllEqual(np_ans, tf_ans) self.assertShapeEqual(np_ans, out) def _compareGradientX(self, c, x, y, numeric_gradient_type=None): with self.cached_session(): inx = ops.convert_to_tensor(x) iny = ops.convert_to_tensor(y) out = array_ops.where(c, inx, iny) s = list(np.shape(x)) jacob_t, jacob_n = gradient_checker.compute_gradient( inx, s, out, s, x_init_value=x) if numeric_gradient_type is not None: xf = x.astype(numeric_gradient_type) yf = y.astype(numeric_gradient_type) inxf = ops.convert_to_tensor(xf) inyf = ops.convert_to_tensor(yf) outf = array_ops.where(c, inxf, inyf) _, jacob_n = gradient_checker.compute_gradient( inxf, s, outf, s, x_init_value=xf) jacob_n = jacob_n.astype(x.dtype) if x.dtype == np.float16: self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3) elif x.dtype == np.float32: self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3) elif x.dtype == np.float64: self.assertAllClose(jacob_t, jacob_n, rtol=1e-5, atol=1e-5) def _compareGradientY(self, c, x, y, numeric_gradient_type=None): with self.cached_session(): inx = ops.convert_to_tensor(x) iny = ops.convert_to_tensor(y) out = array_ops.where(c, inx, iny) s = list(np.shape(x)) jacob_t, jacob_n = gradient_checker.compute_gradient( iny, s, out, s, x_init_value=y) if numeric_gradient_type is not None: xf = x.astype(numeric_gradient_type) yf = y.astype(numeric_gradient_type) inxf = ops.convert_to_tensor(xf) inyf = ops.convert_to_tensor(yf) outf = array_ops.where(c, inxf, inyf) _, jacob_n = gradient_checker.compute_gradient( inyf, s, outf, s, x_init_value=yf) jacob_n = jacob_n.astype(x.dtype) if x.dtype == np.float16: self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3) elif x.dtype == np.float32: self.assertAllClose(jacob_t, jacob_n, rtol=1e-3, atol=1e-3) elif x.dtype == np.float64: self.assertAllClose(jacob_t, jacob_n, rtol=1e-5, atol=1e-5) def testBasic(self): c = np.random.randint(0, 2, 16).astype(np.bool_) x = np.random.rand(16, 2, 8) * 100 y = np.random.rand(16, 2, 8) * 100 for t in [ np.float16, np.float32, np.float64, np.int32, np.int64, np.complex64, np.complex128 ]: with self.subTest(t=t): xt = x.astype(t) yt = y.astype(t) self._compare(c, xt, yt, use_gpu=False) if t in [np.float16, np.float32, np.float64]: self._compare(c, xt, yt, use_gpu=True) @test_util.run_deprecated_v1 def testGradients(self): c = np.random.randint(0, 2, 16).astype(np.bool_) x = np.random.rand(16, 2, 8) * 100 y = np.random.rand(16, 2, 8) * 100 for t in [np.float16, np.float32, np.float64]: with self.subTest(t=t): xt = x.astype(t) yt = y.astype(t) if t == np.float16: # Compare fp16 theoretical gradients to fp32 numerical gradients, # since fp16 numerical gradients are too imprecise unless great # care is taken with choosing the inputs and the delta. This is # a weaker check (in particular, it does not test the op itself, # only its gradient), but it's much better than nothing. self._compareGradientX(c, xt, yt, np.float64) self._compareGradientY(c, xt, yt, np.float64) else: self._compareGradientX(c, xt, yt) self._compareGradientY(c, xt, yt) @test_util.run_deprecated_v1 def testShapeMismatch(self): c = np.random.randint(0, 2, 8).astype(np.bool_) x = np.random.rand(16, 3, 2) * 100 y = np.random.rand(16, 3, 2) * 100 for t in [ np.float16, np.float32, np.float64, np.int32, np.int64, np.complex64, np.complex128 ]: with self.subTest(t=t): xt = x.astype(t) yt = y.astype(t) with self.assertRaises(ValueError): array_ops.where(c, xt, yt) @test_util.with_eager_op_as_function
BatchSelectOpTest
python
django__django
django/db/models/fields/json.py
{ "start": 24346, "end": 24432 }
class ____(KeyTransformNumericLookupMixin, lookups.GreaterThan): pass
KeyTransformGt
python
pennersr__django-allauth
examples/regular-django/example/users/allauth.py
{ "start": 151, "end": 1554 }
class ____(DefaultAccountAdapter): def set_phone(self, user, phone: str, verified: bool): user.phone = phone user.phone_verified = verified user.save(update_fields=["phone", "phone_verified"]) def get_phone(self, user) -> typing.Optional[typing.Tuple[str, bool]]: if user.phone: return user.phone, user.phone_verified return None def set_phone_verified(self, user, phone): self.set_phone(user, phone, True) def send_verification_code_sms(self, user, phone: str, code: str, **kwargs): messages.add_message( self.request, messages.WARNING, f"⚠️ SMS demo stub: assume code {code} was sent to {phone}.", ) def send_unknown_account_sms(self, phone: str, **kwargs): messages.add_message( self.request, messages.WARNING, f"⚠️ SMS demo stub: Enumeration prevention: texted {phone} informing no account exists.", ) def send_account_already_exists_sms(self, phone: str, **kwargs): messages.add_message( self.request, messages.WARNING, f"⚠️ SMS demo stub: Enumeration prevention: texted {phone} informing account already exists.", ) def get_user_by_phone(self, phone): return User.objects.filter(phone=phone).order_by("-phone_verified").first()
AccountAdapter
python
matplotlib__matplotlib
galleries/examples/units/basic_units.py
{ "start": 684, "end": 913 }
class ____: def __init__(self, fn_name, proxy_type): self.proxy_type = proxy_type self.fn_name = fn_name def __get__(self, obj, objtype=None): return self.proxy_type(self.fn_name, obj)
ProxyDelegate
python
scipy__scipy
scipy/cluster/tests/test_hierarchy.py
{ "start": 2448, "end": 3052 }
class ____: # Bypass xpx.testing.lazy_xp_function when calling # these functions from this namespace is_valid_im = is_valid_im is_valid_linkage = is_valid_linkage # Matplotlib is not a scipy dependency but is optionally used in dendrogram, so # check if it's available try: import matplotlib # and set the backend to be Agg (no gui) matplotlib.use('Agg') # before importing pyplot import matplotlib.pyplot as plt have_matplotlib = True except Exception: have_matplotlib = False skip_xp_backends = pytest.mark.skip_xp_backends @make_xp_test_case(linkage)
eager
python
joke2k__faker
faker/providers/internet/ru_RU/__init__.py
{ "start": 46, "end": 2168 }
class ____(InternetProvider): user_name_formats = ( "{{last_name_female}}.{{first_name_female}}", "{{last_name_male}}.{{first_name_male}}", "{{last_name_male}}.{{first_name_male}}", "{{first_name_male}}.{{last_name_male}}", "{{first_name}}##", "{{first_name}}_##", "?{{last_name}}", "{{first_name}}{{year}}", "{{first_name}}_{{year}}", ) email_formats = ( "{{user_name}}@{{free_email_domain}}", "{{user_name}}@{{domain_name}}", ) free_email_domains = ( "gmail.com", "yahoo.com", "hotmail.com", "mail.ru", "yandex.ru", "rambler.ru", ) tlds = ("ru", "com", "biz", "info", "net", "org", "edu") replacements = ( ("А", "a"), ("Б", "b"), ("В", "v"), ("Г", "g"), ("Д", "d"), ("Е", "e"), ("Ё", "e"), ("Ж", "zh"), ("З", "z"), ("И", "i"), ("Й", ""), ("К", "k"), ("Л", "l"), ("М", "m"), ("Н", "n"), ("О", "o"), ("П", "p"), ("Р", "r"), ("С", "s"), ("Т", "t"), ("У", "u"), ("Ф", "f"), ("Х", "h"), ("Ц", "ts"), ("Ч", "ch"), ("Ш", "sh"), ("Щ", "shch"), ("Ъ", ""), ("Ы", "i"), ("Ь", ""), ("Э", "e"), ("Ю", "yu"), ("Я", "ya"), ("а", "a"), ("б", "b"), ("в", "v"), ("г", "g"), ("д", "d"), ("е", "e"), ("ё", "e"), ("ж", "zh"), ("з", "z"), ("и", "i"), ("й", ""), ("к", "k"), ("л", "l"), ("м", "m"), ("н", "n"), ("о", "o"), ("п", "p"), ("р", "r"), ("с", "s"), ("т", "t"), ("у", "u"), ("ф", "f"), ("х", "h"), ("ц", "ts"), ("ч", "ch"), ("ш", "sh"), ("щ", "shch"), ("ъ", ""), ("ы", "i"), ("ь", ""), ("э", "e"), ("ю", "ju"), ("я", "ja"), )
Provider
python
coleifer__peewee
tests/regressions.py
{ "start": 22563, "end": 22723 }
class ____(TestModel): name = TextField() project = ForeignKeyField(Project, backref='tasks') alt = ForeignKeyField(Project, backref='alt_tasks')
Task
python
getsentry__sentry
tests/sentry/snuba/test_entity_subscriptions.py
{ "start": 1342, "end": 21377 }
class ____(TestCase): def setUp(self) -> None: super().setUp() for tag in [ SessionMRI.RAW_SESSION.value, SessionMRI.RAW_USER.value, "session.status", "init", "crashed", ]: indexer.record(use_case_id=UseCaseID.SESSIONS, org_id=self.organization.id, string=tag) def test_get_entity_subscriptions_for_sessions_dataset_non_supported_aggregate(self) -> None: aggregate = "count(sessions)" with pytest.raises(UnsupportedQuerySubscription): get_entity_subscription( query_type=SnubaQuery.Type.CRASH_RATE, dataset=Dataset.Metrics, aggregate=aggregate, time_window=3600, extra_fields={"org_id": self.organization.id}, ) def test_get_entity_subscriptions_for_sessions_dataset_missing_organization(self) -> None: aggregate = "percentage(sessions_crashed, sessions) AS _crash_rate_alert_aggregate" with pytest.raises(InvalidQuerySubscription): get_entity_subscription( query_type=SnubaQuery.Type.CRASH_RATE, dataset=Dataset.Metrics, aggregate=aggregate, time_window=3600, ) def test_build_query_builder_invalid_fields_raise_error(self) -> None: entities = [ get_entity_subscription( query_type=SnubaQuery.Type.CRASH_RATE, dataset=Dataset.Metrics, aggregate="percentage(sessions_crashed, sessions) AS _crash_rate_alert_aggregate", time_window=3600, extra_fields={"org_id": self.organization.id}, ), get_entity_subscription( query_type=SnubaQuery.Type.ERROR, dataset=Dataset.Events, aggregate="count_unique(user)", time_window=3600, ), ] for entity in entities: with pytest.raises(Exception): entity.build_query_builder("timestamp:-24h", [self.project.id], None) def test_get_entity_subscription_for_metrics_dataset_non_supported_aggregate(self) -> None: aggregate = "count(sessions)" with pytest.raises(UnsupportedQuerySubscription): get_entity_subscription( query_type=SnubaQuery.Type.CRASH_RATE, dataset=Dataset.Metrics, aggregate=aggregate, time_window=3600, extra_fields={"org_id": self.organization.id}, ) def test_get_entity_subscription_for_metrics_dataset_missing_organization(self) -> None: aggregate = "percentage(sessions_crashed, sessions) AS _crash_rate_alert_aggregate" with pytest.raises(InvalidQuerySubscription): get_entity_subscription( query_type=SnubaQuery.Type.CRASH_RATE, dataset=Dataset.Metrics, aggregate=aggregate, time_window=3600, ) org_id = self.organization.id use_case_id = UseCaseID.SESSIONS aggregate = "percentage(users_crashed, users) AS _crash_rate_alert_aggregate" entity_subscription = get_entity_subscription( query_type=SnubaQuery.Type.CRASH_RATE, dataset=Dataset.Metrics, aggregate=aggregate, time_window=3600, extra_fields={"org_id": self.organization.id}, ) assert isinstance(entity_subscription, MetricsSetsEntitySubscription) assert entity_subscription.aggregate == aggregate assert entity_subscription.get_entity_extra_params() == { "organization": self.organization.id, "granularity": 10, } assert entity_subscription.dataset == Dataset.Metrics session_status = resolve_tag_key(use_case_id, org_id, "session.status") session_status_crashed = resolve_tag_value(use_case_id, org_id, "crashed") metric_id = resolve(use_case_id, org_id, entity_subscription.metric_key.value) snql_query = entity_subscription.build_query_builder( "", [self.project.id], None, {"organization_id": self.organization.id} ).get_snql_query() key = lambda func: func.alias assert sorted(snql_query.query.select, key=key) == sorted( [ Function( "uniqIf", parameters=[ Column("value"), Function("equals", [Column("metric_id"), metric_id]), ], alias="count", ), Function( "uniqIf", parameters=[ Column("value"), Function( "and", parameters=[ Function("equals", [Column("metric_id"), metric_id]), Function( "equals", [Column(session_status), session_status_crashed] ), ], ), ], alias="crashed", ), ], key=key, ) assert snql_query.query.where == [ Condition(Column("org_id"), Op.EQ, self.organization.id), Condition(Column("project_id"), Op.IN, [self.project.id]), Condition(Column("metric_id"), Op.IN, [metric_id]), ] # This test has been kept in order to validate whether the old queries through metrics are supported, in the future # this should be removed. def test_get_entity_subscription_for_metrics_dataset_for_sessions(self) -> None: org_id = self.organization.id use_case_id = UseCaseID.SESSIONS aggregate = "percentage(sessions_crashed, sessions) AS _crash_rate_alert_aggregate" entity_subscription = get_entity_subscription( query_type=SnubaQuery.Type.CRASH_RATE, dataset=Dataset.Metrics, aggregate=aggregate, time_window=3600, extra_fields={"org_id": self.organization.id}, ) assert isinstance(entity_subscription, MetricsCountersEntitySubscription) assert entity_subscription.aggregate == aggregate assert entity_subscription.get_entity_extra_params() == { "organization": self.organization.id, "granularity": 10, } assert entity_subscription.dataset == Dataset.Metrics session_status = resolve_tag_key(use_case_id, org_id, "session.status") session_status_crashed = resolve_tag_value(use_case_id, org_id, "crashed") session_status_init = resolve_tag_value(use_case_id, org_id, "init") snql_query = entity_subscription.build_query_builder( "", [self.project.id], None, {"organization_id": self.organization.id} ).get_snql_query() key = lambda func: func.alias assert sorted(snql_query.query.select, key=key) == sorted( [ Function( function="sumIf", parameters=[ Column("value"), Function( "equals", parameters=[Column(session_status), session_status_init] ), ], alias="count", ), Function( "sumIf", parameters=[ Column(name="value"), Function( "equals", parameters=[Column(session_status), session_status_crashed] ), ], alias="crashed", ), ], key=key, ) assert snql_query.query.where == [ Condition(Column("project_id"), Op.IN, [self.project.id]), Condition(Column("org_id"), Op.EQ, self.organization.id), Condition( Column("metric_id"), Op.EQ, resolve(use_case_id, self.organization.id, entity_subscription.metric_key.value), ), Condition( Column(session_status), Op.IN, [session_status_crashed, session_status_init], ), ] def test_get_entity_subscription_for_performance_transactions_dataset(self) -> None: aggregate = "percentile(transaction.duration,.95)" entity_subscription = get_entity_subscription( query_type=SnubaQuery.Type.PERFORMANCE, dataset=Dataset.Transactions, aggregate=aggregate, time_window=3600, ) assert isinstance(entity_subscription, PerformanceTransactionsEntitySubscription) assert entity_subscription.aggregate == aggregate assert entity_subscription.get_entity_extra_params() == {} assert entity_subscription.dataset == Dataset.Transactions snql_query = entity_subscription.build_query_builder( "", [self.project.id], None ).get_snql_query() assert snql_query.query.select == [ Function( "quantile(0.95)", parameters=[Column(name="duration")], alias="percentile_transaction_duration__95", ) ] assert snql_query.query.where == [Condition(Column("project_id"), Op.IN, [self.project.id])] # This test has been kept in order to validate whether the old queries through metrics are supported, in the future # this should be removed. def test_get_entity_subscription_for_performance_metrics_dataset(self) -> None: aggregate = "percentile(transaction.duration,.95)" entity_subscription = get_entity_subscription( query_type=SnubaQuery.Type.PERFORMANCE, dataset=Dataset.PerformanceMetrics, aggregate=aggregate, time_window=3600, extra_fields={"org_id": self.organization.id}, ) assert isinstance(entity_subscription, PerformanceMetricsEntitySubscription) assert entity_subscription.aggregate == aggregate assert entity_subscription.get_entity_extra_params() == { "organization": self.organization.id, "granularity": 60, } assert entity_subscription.dataset == Dataset.PerformanceMetrics snql_query = entity_subscription.build_query_builder( "", [self.project.id], None, { "organization_id": self.organization.id, }, ).get_snql_query() metric_id = resolve( UseCaseID.TRANSACTIONS, self.organization.id, METRICS_MAP["transaction.duration"] ) assert snql_query.query.select == [ Function( function="arrayElement", parameters=[ Function( function="quantilesIf(0.95)", parameters=[ Column(name="value"), Function( function="equals", parameters=[Column(name="metric_id"), metric_id], ), ], ), 1, ], alias="percentile_transaction_duration__95", ) ] assert snql_query.query.where == [ Condition(Column("project_id"), Op.IN, [self.project.id]), Condition(Column("org_id"), Op.EQ, self.organization.id), Condition(Column("metric_id"), Op.IN, [metric_id]), ] def test_get_entity_subscription_for_events_dataset(self) -> None: aggregate = "count_unique(user)" entity_subscription = get_entity_subscription( query_type=SnubaQuery.Type.ERROR, dataset=Dataset.Events, aggregate=aggregate, time_window=3600, ) assert isinstance(entity_subscription, EventsEntitySubscription) assert entity_subscription.aggregate == aggregate assert entity_subscription.get_entity_extra_params() == {} assert entity_subscription.dataset == Dataset.Events entity = Entity(Dataset.Events.value, alias=Dataset.Events.value) snql_query = entity_subscription.build_query_builder( "release:latest", [self.project.id], None ).get_snql_query() assert snql_query.query.select == [ Function( function="uniq", parameters=[Column(name="tags[sentry:user]", entity=entity)], alias="count_unique_user", ) ] assert snql_query.query.where == [ And( [ Condition(Column("type", entity=entity), Op.EQ, "error"), Condition( Function( function="ifNull", parameters=[Column(name="tags[sentry:release]", entity=entity), ""], ), Op.IN, [""], ), ] ), Condition(Column("project_id", entity=entity), Op.IN, [self.project.id]), ] def test_get_entity_subscription_for_events_dataset_with_join(self) -> None: aggregate = "count_unique(user)" entity_subscription = get_entity_subscription( query_type=SnubaQuery.Type.ERROR, dataset=Dataset.Events, aggregate=aggregate, time_window=3600, ) assert isinstance(entity_subscription, EventsEntitySubscription) assert entity_subscription.aggregate == aggregate assert entity_subscription.get_entity_extra_params() == {} assert entity_subscription.dataset == Dataset.Events e_entity = Entity(Dataset.Events.value, alias=Dataset.Events.value) g_entity = Entity("group_attributes", alias="ga") snql_query = entity_subscription.build_query_builder( "status:unresolved", [self.project.id], None ).get_snql_query() assert snql_query.query.match == Join([Relationship(e_entity, "attributes", g_entity)]) assert snql_query.query.select == [ Function( function="uniq", parameters=[Column(name="tags[sentry:user]", entity=e_entity)], alias="count_unique_user", ) ] assert snql_query.query.where == [ And( [ Condition(Column("type", entity=e_entity), Op.EQ, "error"), Condition( Column("group_status", entity=g_entity), Op.IN, [GroupStatus.UNRESOLVED] ), ] ), Condition(Column("project_id", entity=e_entity), Op.IN, [self.project.id]), Condition(Column("project_id", entity=g_entity), Op.IN, [self.project.id]), ] def test_events_subscription_count_upsampling_toggle(self) -> None: project = self.create_project(organization=self.organization) sub = get_entity_subscription( query_type=SnubaQuery.Type.ERROR, dataset=Dataset.Events, aggregate="count()", time_window=60, ) # Not allowlisted → expect plain count and no sample_weight with self.options({"issues.client_error_sampling.project_allowlist": []}): qb = sub.build_query_builder(query="", project_ids=[project.id], environment=None) req = qb.get_snql_query() assert len(req.query.select) == 1 func = req.query.select[0] assert getattr(func, "alias", None) == "count" # Allowlisted → expect full upsampled function structure with self.options({"issues.client_error_sampling.project_allowlist": [project.id]}): qb = sub.build_query_builder(query="", project_ids=[project.id], environment=None) req = qb.get_snql_query() assert len(req.query.select) == 1 func = req.query.select[0] assert getattr(func, "alias", None) == "upsampled_count" # Expect: toInt64(sum(ifNull(sample_weight, 1))) structure assert isinstance(func, Function) assert func.function == "toInt64" assert len(func.parameters) == 1 outer_sum = func.parameters[0] assert isinstance(outer_sum, Function) assert outer_sum.function == "sum" assert len(outer_sum.parameters) == 1 inner_ifnull = outer_sum.parameters[0] assert isinstance(inner_ifnull, Function) assert inner_ifnull.function == "ifNull" assert len(inner_ifnull.parameters) == 2 weight_col = inner_ifnull.parameters[0] assert isinstance(weight_col, Column) assert weight_col.name == "sample_weight" def test_get_entity_subscription_for_eap_rpc_query(self) -> None: aggregate = "count(span.duration)" query = "span.op:http.client" entity_subscription = get_entity_subscription( query_type=SnubaQuery.Type.PERFORMANCE, dataset=Dataset.EventsAnalyticsPlatform, aggregate=aggregate, time_window=3600, extra_fields={"org_id": self.organization.id}, ) assert isinstance(entity_subscription, PerformanceSpansEAPRpcEntitySubscription) assert entity_subscription.aggregate == aggregate assert entity_subscription.get_entity_extra_params() == {} assert entity_subscription.dataset == Dataset.EventsAnalyticsPlatform rpc_timeseries_request = entity_subscription.build_rpc_request( query, [self.project.id], None ) assert rpc_timeseries_request.granularity_secs == 3600 assert rpc_timeseries_request.filter.comparison_filter.value.val_str == "http.client" assert rpc_timeseries_request.expressions[0].aggregation.label == "count(span.duration)" assert ( rpc_timeseries_request.expressions[0].aggregation.extrapolation_mode == ProtoExtrapolationMode.EXTRAPOLATION_MODE_SAMPLE_WEIGHTED ) def test_get_entity_subscription_for_eap_with_extrapolation_mode(self) -> None: aggregate = "count(span.duration)" query = "span.op:http.client" # Test with SERVER_WEIGHTED extrapolation mode entity_subscription = get_entity_subscription( query_type=SnubaQuery.Type.PERFORMANCE, dataset=Dataset.EventsAnalyticsPlatform, aggregate=aggregate, time_window=3600, extra_fields={ "org_id": self.organization.id, "extrapolation_mode": ExtrapolationMode.SERVER_WEIGHTED, }, ) assert isinstance(entity_subscription, PerformanceSpansEAPRpcEntitySubscription) assert entity_subscription.extrapolation_mode == ExtrapolationMode.SERVER_WEIGHTED rpc_timeseries_request = entity_subscription.build_rpc_request( query, [self.project.id], None ) # Verify the extrapolation mode is passed to the RPC request assert ( rpc_timeseries_request.expressions[0].aggregation.extrapolation_mode == ProtoExtrapolationMode.EXTRAPOLATION_MODE_SERVER_ONLY )
EntitySubscriptionTestCase
python
streamlit__streamlit
lib/tests/streamlit/elements/audio_input_test.py
{ "start": 1204, "end": 7627 }
class ____(DeltaGeneratorTestCase): def test_just_label(self): """Test that it can be called with no other values.""" st.audio_input("the label") c = self.get_delta_from_queue().new_element.audio_input assert c.label == "the label" assert ( c.label_visibility.value == LabelVisibilityMessage.LabelVisibilityOptions.VISIBLE ) # Default sample_rate should be 16000 assert c.sample_rate == 16000 @parameterized.expand( [ ("visible", LabelVisibilityMessage.LabelVisibilityOptions.VISIBLE), ("hidden", LabelVisibilityMessage.LabelVisibilityOptions.HIDDEN), ("collapsed", LabelVisibilityMessage.LabelVisibilityOptions.COLLAPSED), ] ) def test_label_visibility(self, label_visibility_value, proto_value): """Test that it can be called with label_visibility parameter.""" st.audio_input("the label", label_visibility=label_visibility_value) c = self.get_delta_from_queue().new_element.audio_input assert c.label_visibility.value == proto_value def test_label_visibility_wrong_value(self): with pytest.raises(StreamlitAPIException) as e: st.audio_input("the label", label_visibility="wrong_value") assert ( str(e.value) == "Unsupported label_visibility option 'wrong_value'. Valid values are 'visible', 'hidden' or 'collapsed'." ) def test_width_config_stretch(self): """Test width config with 'stretch' value.""" st.audio_input("the label", width="stretch") c = self.get_delta_from_queue().new_element assert ( c.width_config.WhichOneof("width_spec") == WidthConfigFields.USE_STRETCH.value ) assert c.width_config.use_stretch def test_width_config_pixel(self): """Test width config with pixel value.""" st.audio_input("the label", width=100) c = self.get_delta_from_queue().new_element assert ( c.width_config.WhichOneof("width_spec") == WidthConfigFields.PIXEL_WIDTH.value ) assert c.width_config.pixel_width == 100 def test_width_config_default(self): """Test width config with default value.""" st.audio_input("the label") c = self.get_delta_from_queue().new_element assert ( c.width_config.WhichOneof("width_spec") == WidthConfigFields.USE_STRETCH.value ) assert c.width_config.use_stretch @parameterized.expand( [ ("invalid_string", "invalid"), ("negative", -1), ("zero", 0), ("float", 100.5), ] ) def test_width_config_invalid(self, name, invalid_width): """Test width config with various invalid values.""" with pytest.raises(StreamlitInvalidWidthError): st.audio_input("the label", width=invalid_width) @parameterized.expand( [ (8000,), (11025,), (16000,), (22050,), (24000,), (32000,), (44100,), (48000,), ] ) def test_valid_sample_rates(self, sample_rate): """Test that valid sample rates are accepted and properly set in the protobuf.""" st.audio_input("the label", sample_rate=sample_rate) c = self.get_delta_from_queue().new_element.audio_input assert c.sample_rate == sample_rate def test_sample_rate_none(self): """Test that None sample_rate means browser default.""" st.audio_input("the label", sample_rate=None) c = self.get_delta_from_queue().new_element.audio_input # When sample_rate is None, the field should not be set in protobuf assert not c.HasField("sample_rate") def test_stable_id_with_key(self): """Widget ID is stable when key is provided and non-whitelisted args change.""" with patch( "streamlit.elements.lib.utils._register_element_id", return_value=True, ): st.audio_input( label="Label 1", key="audio_input_key", help="Help 1", disabled=False, width="stretch", label_visibility="visible", sample_rate=8000, on_change=lambda: None, args=("arg1", "arg2"), kwargs={"kwarg1": "kwarg1"}, ) c1 = self.get_delta_from_queue().new_element.audio_input id1 = c1.id # Change non-whitelisted params st.audio_input( label="Label 2", key="audio_input_key", help="Help 2", disabled=True, width=200, label_visibility="hidden", sample_rate=16000, on_change=lambda: None, args=("arg_1", "arg_2"), kwargs={"kwarg_1": "kwarg_1"}, ) c2 = self.get_delta_from_queue().new_element.audio_input id2 = c2.id assert id1 == id2 @parameterized.expand( [ (12345,), (9000,), (50000,), (100000,), ] ) def test_invalid_sample_rates(self, sample_rate): """Test that invalid sample rates raise an exception.""" with pytest.raises(StreamlitAPIException) as e: st.audio_input("the label", sample_rate=sample_rate) assert "Invalid sample_rate" in str(e.value) assert "Must be one of" in str(e.value) @patch("streamlit.elements.widgets.audio_input._get_upload_files") def test_not_allowed_file_extension_raise_an_exception_for_camera_input( self, get_upload_files_patch ): rec1 = UploadedFileRec("file1", "file1.mp3", "type", b"123") uploaded_files = [ UploadedFile( rec1, FileURLsProto(file_id="file1", delete_url="d1", upload_url="u1") ), ] get_upload_files_patch.return_value = uploaded_files with pytest.raises(StreamlitAPIException) as e: return_val = st.audio_input("label") st.write(return_val) assert str(e.value) == "Invalid file extension: `.mp3`. Allowed: ['.wav']"
AudioInputTest
python
pydantic__pydantic
tests/test_forward_ref.py
{ "start": 10210, "end": 10254 }
class ____(BaseModel): account: Account
Owner
python
fsspec__filesystem_spec
fsspec/implementations/tests/test_asyn_wrapper.py
{ "start": 299, "end": 6809 }
class ____(AsyncFileSystem): """ A mock file system that simulates a synchronous locking file systems with delays. """ def __init__( self, asynchronous: bool = False, delays=None, ) -> None: self.lock = asyncio.Lock() self.delays = cycle((0.03, 0.01) if delays is None else delays) super().__init__(asynchronous=asynchronous) async def _cat_file(self, path, start=None, end=None) -> bytes: await self._simulate_io_operation(path) return path.encode() async def _await_io(self) -> None: await asyncio.sleep(next(self.delays)) async def _simulate_io_operation(self, path) -> None: await self._check_active() async with self.lock: await self._await_io() async def _check_active(self) -> None: if self.lock.locked(): raise RuntimeError("Concurrent requests!") @pytest.mark.asyncio async def test_is_async_default(): fs = fsspec.filesystem("file") async_fs = AsyncFileSystemWrapper(fs) assert async_fs.async_impl assert async_fs.asynchronous async_fs = AsyncFileSystemWrapper(fs, asynchronous=False) assert not async_fs.asynchronous def test_class_wrapper(): fs_cls = LocalFileSystem async_fs_cls = AsyncFileSystemWrapper.wrap_class(fs_cls) assert async_fs_cls.__name__ == "AsyncLocalFileSystemWrapper" async_fs = async_fs_cls() assert async_fs.async_impl @pytest.mark.asyncio async def test_cats(): with filetexts(csv_files, mode="b"): fs = fsspec.filesystem("file") async_fs = AsyncFileSystemWrapper(fs) result = await async_fs._cat(".test.fakedata.1.csv") assert result == b"a,b\n1,2\n" out = set( ( await async_fs._cat([".test.fakedata.1.csv", ".test.fakedata.2.csv"]) ).values() ) assert out == {b"a,b\n1,2\n", b"a,b\n3,4\n"} result = await async_fs._cat(".test.fakedata.1.csv", None, None) assert result == b"a,b\n1,2\n" result = await async_fs._cat(".test.fakedata.1.csv", start=1, end=6) assert result == b"a,b\n1,2\n"[1:6] result = await async_fs._cat(".test.fakedata.1.csv", start=-1) assert result == b"a,b\n1,2\n"[-1:] result = await async_fs._cat(".test.fakedata.1.csv", start=1, end=-2) assert result == b"a,b\n1,2\n"[1:-2] # test synchronous API is available as expected async_fs = AsyncFileSystemWrapper(fs, asynchronous=False) result = async_fs.cat(".test.fakedata.1.csv", start=1, end=-2) assert result == b"a,b\n1,2\n"[1:-2] out = set( ( await async_fs._cat( [".test.fakedata.1.csv", ".test.fakedata.2.csv"], start=1, end=-1 ) ).values() ) assert out == {b"a,b\n1,2\n"[1:-1], b"a,b\n3,4\n"[1:-1]} @pytest.mark.asyncio async def test_basic_crud_operations(): with filetexts(csv_files, mode="b"): fs = fsspec.filesystem("file") async_fs = AsyncFileSystemWrapper(fs) await async_fs._touch(".test.fakedata.3.csv") assert await async_fs._exists(".test.fakedata.3.csv") data = await async_fs._cat(".test.fakedata.1.csv") assert data == b"a,b\n1,2\n" await async_fs._pipe(".test.fakedata.1.csv", b"a,b\n5,6\n") data = await async_fs._cat(".test.fakedata.1.csv") assert data == b"a,b\n5,6\n" await async_fs._rm(".test.fakedata.1.csv") assert not await async_fs._exists(".test.fakedata.1.csv") @pytest.mark.asyncio async def test_error_handling(): fs = fsspec.filesystem("file") async_fs = AsyncFileSystemWrapper(fs) with pytest.raises(FileNotFoundError): await async_fs._cat(".test.non_existent.csv") with pytest.raises(FileNotFoundError): await async_fs._rm(".test.non_existent.csv") @pytest.mark.asyncio async def test_concurrent_operations(): with filetexts(csv_files, mode="b"): fs = fsspec.filesystem("file") async_fs = AsyncFileSystemWrapper(fs) async def read_file(file_path): return await async_fs._cat(file_path) results = await asyncio.gather( read_file(".test.fakedata.1.csv"), read_file(".test.fakedata.2.csv"), read_file(".test.fakedata.1.csv"), ) assert results == [b"a,b\n1,2\n", b"a,b\n3,4\n", b"a,b\n1,2\n"] @pytest.mark.asyncio async def test_directory_operations(): with filetexts(csv_files, mode="b"): fs = fsspec.filesystem("file") async_fs = AsyncFileSystemWrapper(fs) await async_fs._makedirs("new_directory") assert await async_fs._isdir("new_directory") files = await async_fs._ls(".") filenames = [os.path.basename(file) for file in files] assert ".test.fakedata.1.csv" in filenames assert ".test.fakedata.2.csv" in filenames assert "new_directory" in filenames @pytest.mark.asyncio async def test_batch_operations(): with filetexts(csv_files, mode="b"): fs = fsspec.filesystem("file") async_fs = AsyncFileSystemWrapper(fs) await async_fs._rm([".test.fakedata.1.csv", ".test.fakedata.2.csv"]) assert not await async_fs._exists(".test.fakedata.1.csv") assert not await async_fs._exists(".test.fakedata.2.csv") def test_open(tmpdir): fn = f"{tmpdir}/afile" with open(fn, "wb") as f: f.write(b"hello") of = fsspec.open( "dir://afile::async_wrapper::file", mode="rb", async_wrapper={"asynchronous": False}, dir={"path": str(tmpdir)}, ) with of as f: assert f.read() == b"hello" @pytest.mark.asyncio async def test_semaphore_synchronous(): fs = AsyncFileSystemWrapper( LockedFileSystem(), asynchronous=False, semaphore=asyncio.Semaphore(1) ) paths = [f"path_{i}" for i in range(1, 3)] results = await asyncio.gather(*(fs._cat_file(path) for path in paths)) assert set(results) == {path.encode() for path in paths} @pytest.mark.asyncio async def test_deadlock_when_asynchronous(): fs = AsyncFileSystemWrapper( LockedFileSystem(), asynchronous=False, semaphore=asyncio.Semaphore(3) ) paths = [f"path_{i}" for i in range(1, 3)] with pytest.raises(RuntimeError, match="Concurrent requests!"): await asyncio.gather(*(fs._cat_file(path) for path in paths))
LockedFileSystem
python
Lightning-AI__lightning
tests/tests_pytorch/trainer/test_trainer.py
{ "start": 66008, "end": 78583 }
class ____(Callback): exceptions = 0 def on_exception(self, *_): self.exceptions += 1 @pytest.mark.parametrize("strategy", ["auto", pytest.param("ddp_spawn", marks=RunIf(skip_windows=True, mps=False))]) def test_error_handling_all_stages(tmp_path, strategy): model = TrainerStagesErrorsModel() counter = ExceptionCounter() trainer = Trainer( default_root_dir=tmp_path, strategy=strategy, devices=1, callbacks=counter, fast_dev_run=True, ) with pytest.raises(Exception, match=r"Error during train"): trainer.fit(model) assert counter.exceptions == 1 with pytest.raises(Exception, match=r"Error during validation"): trainer.validate(model) assert counter.exceptions == 2 with pytest.raises(Exception, match=r"Error during test"): trainer.test(model) assert counter.exceptions == 3 with pytest.raises(Exception, match=r"Error during predict"): trainer.predict(model, model.val_dataloader(), return_predictions=False) assert counter.exceptions == 4 def test_trainer_metrics_reset_before_each_task(tmp_path): """Test that callback, logged and progress bar metrics are reset before each task starts.""" class TestMetricRestartCallback(Callback): def _make_assertions(self, trainer): assert trainer.callback_metrics == {} assert trainer.progress_bar_metrics == {} assert trainer.logged_metrics == {} def on_train_start(self, trainer, *args, **kwargs): self._make_assertions(trainer) def on_validation_start(self, trainer, *args, **kwargs): if trainer.state.fn == TrainerFn.VALIDATING: self._make_assertions(trainer) def on_test_start(self, trainer, *args, **kwargs): self._make_assertions(trainer) def on_predict_start(self, trainer, *args, **kwargs): self._make_assertions(trainer) class CustomBoringModel(BoringModel): def __init__(self): super().__init__() def training_step(self, *args, **kwargs): self.log("train/metric", 7.0) return super().training_step(*args, **kwargs) def validation_step(self, *args, **kwargs): self.log("val/metric", 14.0) return super().validation_step(*args, **kwargs) def test_step(self, *args, **kwargs): self.log("test/metric", 21.0) return super().test_step(*args, **kwargs) model = CustomBoringModel() trainer = Trainer(default_root_dir=tmp_path, fast_dev_run=4, callbacks=[TestMetricRestartCallback()]) trainer.fit(model) trainer.validate(model) trainer.test(model) trainer.predict(model) def test_detect_anomaly_nan(tmp_path): class NanModel(BoringModel): def training_step(self, batch, batch_idx): output = super().training_step(batch, batch_idx) output["loss"] = output["loss"] * torch.tensor(float("nan")) return output model = NanModel() trainer = Trainer(default_root_dir=tmp_path, detect_anomaly=True) with ( pytest.raises(RuntimeError, match=r"returned nan values in its 0th output."), pytest.warns(UserWarning, match=r".*Error detected in.* Traceback of forward call that caused the error.*"), ): trainer.fit(model) @pytest.mark.parametrize( ("trainer_kwargs", "strategy_cls", "accelerator_cls", "devices"), [ pytest.param({"strategy": "auto"}, SingleDeviceStrategy, CPUAccelerator, 1, marks=RunIf(mps=False)), pytest.param({"strategy": "ddp"}, DDPStrategy, CPUAccelerator, 1, marks=RunIf(mps=False)), pytest.param({"strategy": "ddp", "num_nodes": 2}, DDPStrategy, CPUAccelerator, 1, marks=RunIf(mps=False)), ( {"strategy": "auto", "accelerator": "cuda", "devices": 1}, SingleDeviceStrategy, CUDAAccelerator, 1, ), ({"strategy": "ddp", "accelerator": "cuda", "devices": 1}, DDPStrategy, CUDAAccelerator, 1), ( {"strategy": "ddp_spawn", "accelerator": "cuda", "devices": 1}, DDPStrategy, CUDAAccelerator, 1, ), ({"strategy": "auto", "accelerator": "cuda", "devices": 2}, DDPStrategy, CUDAAccelerator, 2), ({"strategy": "ddp", "accelerator": "cuda", "devices": 2}, DDPStrategy, CUDAAccelerator, 2), ({"strategy": "ddp", "accelerator": "cpu", "devices": 2}, DDPStrategy, CPUAccelerator, 2), ( {"strategy": "ddp_spawn", "accelerator": "cpu", "devices": 2}, DDPStrategy, CPUAccelerator, 2, ), ( {"strategy": "ddp_spawn", "accelerator": "cpu", "devices": 1}, DDPStrategy, CPUAccelerator, 1, ), ( {"strategy": DDPStrategy(), "accelerator": "cpu", "devices": 2}, DDPStrategy, CPUAccelerator, 2, ), ( {"strategy": DDPStrategy(), "accelerator": "cuda", "devices": 2}, DDPStrategy, CUDAAccelerator, 2, ), pytest.param({"strategy": DDPStrategy()}, DDPStrategy, CPUAccelerator, 1, marks=RunIf(mps=False)), ( {"strategy": "ddp_spawn", "accelerator": "cuda", "devices": 2, "num_nodes": 2}, DDPStrategy, CUDAAccelerator, 2, ), ], ) def test_trainer_config_strategy(monkeypatch, trainer_kwargs, strategy_cls, accelerator_cls, devices): if trainer_kwargs.get("accelerator") == "cuda": mock_cuda_count(monkeypatch, trainer_kwargs["devices"]) if trainer_kwargs.get("accelerator") == "auto": # current parametrizations assume non-CUDA env mock_cuda_count(monkeypatch, 0) trainer = Trainer(**trainer_kwargs) assert isinstance(trainer.strategy, strategy_cls) assert isinstance(trainer.accelerator, accelerator_cls) assert trainer.num_devices == devices assert trainer.num_nodes == trainer_kwargs.get("num_nodes", 1) trainer_kwargs.pop("accelerator", None) trainer_kwargs.pop("devices", None) assert isinstance(trainer.strategy, strategy_cls) assert isinstance(trainer.accelerator, accelerator_cls) assert trainer.num_devices == devices assert trainer.num_nodes == trainer_kwargs.get("num_nodes", 1) @pytest.mark.parametrize( "running_stage", [RunningStage.TRAINING, RunningStage.VALIDATING, RunningStage.TESTING, RunningStage.PREDICTING] ) def test_dataloaders_are_not_loaded_if_disabled_through_limit_batches(running_stage): dl_prefix = running_stage.dataloader_prefix argument = f"limit_{dl_prefix}_batches" trainer_kwargs = {argument: 0} trainer = Trainer(**trainer_kwargs) model = BoringModel() trainer.strategy.connect(model) trainer._data_connector.attach_data(model) trainer.state.stage = running_stage if running_stage == "train": fn = trainer.fit_loop.setup_data elif running_stage == "validate": fn = trainer.validate_loop.setup_data elif running_stage == "test": fn = trainer.test_loop.setup_data else: fn = trainer.predict_loop.setup_data # with no limit, the attribute is None fn() dataloader_attribute = f"{dl_prefix}_dataloader{'' if running_stage == 'train' else 's'}" assert getattr(trainer, dataloader_attribute) is None # validate it would've worked if a limit was set setattr(trainer, argument, 1) fn() assert isinstance(getattr(trainer, dataloader_attribute), DataLoader) @pytest.mark.parametrize( ("trainer_kwargs", "expected_device_ids"), [ ({}, [0]), ({"devices": 1}, [0]), ({"devices": "1"}, [0]), pytest.param({"devices": 2}, [0, 1], marks=RunIf(mps=False)), ({"accelerator": "gpu", "devices": 1}, [0]), ({"accelerator": "cuda", "devices": 1}, [0]), ({"accelerator": "cuda", "devices": 2}, [0, 1]), ({"accelerator": "cuda", "devices": "2"}, [0, 1]), ({"accelerator": "cuda", "devices": [2]}, [2]), ({"accelerator": "cuda", "devices": "2,"}, [2]), ({"accelerator": "cuda", "devices": [0, 2]}, [0, 2]), ({"accelerator": "cuda", "devices": "0, 2"}, [0, 2]), ({"accelerator": "mps", "devices": 1}, [0]), ], ) def test_trainer_config_device_ids(monkeypatch, trainer_kwargs, expected_device_ids): if trainer_kwargs.get("accelerator") in ("cuda", "gpu"): mock_cuda_count(monkeypatch, 4) elif trainer_kwargs.get("accelerator") in ("mps", "gpu"): mock_mps_count(monkeypatch, 1) trainer = Trainer(**trainer_kwargs) assert trainer.device_ids == expected_device_ids assert trainer.num_devices == len(expected_device_ids) def test_trainer_save_checkpoint_no_model_attached(): trainer = Trainer() assert trainer.model is None with pytest.raises(AttributeError, match="Saving a checkpoint is only possible if a model is attached"): trainer.save_checkpoint("checkpoint.ckpt") def test_trainer_calls_logger_finalize_on_exception(tmp_path): class CustomModel(BoringModel): def on_fit_start(self): super().on_fit_start() raise Exception("logger-finalize") model = CustomModel() logger = TensorBoardLogger(save_dir=tmp_path) logger.finalize = Mock() trainer = Trainer(logger=logger) with pytest.raises(Exception, match="logger-finalize"): trainer.fit(model) logger.finalize.assert_called_once_with("failed") @pytest.mark.parametrize("exception_type", [KeyboardInterrupt, RuntimeError]) def test_trainer_calls_strategy_on_exception(exception_type, tmp_path): """Test that when an exception occurs, the Trainer lets the strategy process it.""" exception = exception_type("Test exception") class ExceptionModel(BoringModel): def on_fit_start(self): raise exception trainer = Trainer(default_root_dir=tmp_path) with ( mock.patch("lightning.pytorch.strategies.strategy.Strategy.on_exception") as on_exception_mock, suppress(Exception, SystemExit), ): trainer.fit(ExceptionModel()) on_exception_mock.assert_called_once_with(exception) @pytest.mark.parametrize("exception_type", [KeyboardInterrupt, RuntimeError]) def test_trainer_calls_datamodule_on_exception(exception_type, tmp_path): """Test that when an exception occurs, the Trainer lets the data module process it.""" exception = exception_type("Test exception") class ExceptionModel(BoringModel): def on_fit_start(self): raise exception datamodule = BoringDataModule() datamodule.on_exception = Mock() trainer = Trainer(default_root_dir=tmp_path) with suppress(Exception, SystemExit): trainer.fit(ExceptionModel(), datamodule=datamodule) datamodule.on_exception.assert_called_once_with(exception) def test_init_module_context(monkeypatch): """Test that the strategy returns the context manager for initializing the module.""" trainer = Trainer(accelerator="cpu", devices=1) strategy = SingleDeviceStrategy(device=torch.device("cuda")) strategy.tensor_init_context = Mock(wraps=strategy.tensor_init_context) trainer._accelerator_connector.strategy = strategy with trainer.init_module(): pass strategy.tensor_init_context.assert_called_once_with(empty_init=None) strategy.tensor_init_context.reset_mock() @pytest.mark.parametrize( ("target_device", "accelerator", "devices"), [ ("cpu", "cpu", "auto"), pytest.param("cuda:0", "gpu", [0], marks=RunIf(min_cuda_gpus=1)), pytest.param("cuda:1", "gpu", [1], marks=RunIf(min_cuda_gpus=2)), ], ) def test_init_module_device_type(target_device, accelerator, devices): """Test that the strategy returns the context manager for initializing the module.""" trainer = Trainer(accelerator=accelerator, devices=devices) with trainer.init_module(): model = BoringModel() assert model.device == torch.device(target_device) def test_expand_home_trainer(): """Test that the dirpath gets expanded if it contains `~`.""" home_root = Path.home() trainer = Trainer(default_root_dir="~/trainer") assert trainer.default_root_dir == str(home_root / "trainer") trainer = Trainer(default_root_dir=Path("~/trainer")) assert trainer.default_root_dir == str(home_root / "trainer")
ExceptionCounter
python
scipy__scipy
scipy/optimize/tests/test__basinhopping.py
{ "start": 1693, "end": 2300 }
class ____: """pass a custom accept test This does nothing but make sure it's being used and ensure all the possible return values are accepted """ def __init__(self): self.been_called = False self.ncalls = 0 self.testres = [False, 'force accept', True, np.bool_(True), np.bool_(False), [], {}, 0, 1] def __call__(self, **kwargs): self.been_called = True self.ncalls += 1 if self.ncalls - 1 < len(self.testres): return self.testres[self.ncalls - 1] else: return True
MyAcceptTest
python
anthropics__anthropic-sdk-python
src/anthropic/resources/beta/messages/batches.py
{ "start": 34271, "end": 34936 }
class ____: def __init__(self, batches: AsyncBatches) -> None: self._batches = batches self.create = _legacy_response.async_to_raw_response_wrapper( batches.create, ) self.retrieve = _legacy_response.async_to_raw_response_wrapper( batches.retrieve, ) self.list = _legacy_response.async_to_raw_response_wrapper( batches.list, ) self.delete = _legacy_response.async_to_raw_response_wrapper( batches.delete, ) self.cancel = _legacy_response.async_to_raw_response_wrapper( batches.cancel, )
AsyncBatchesWithRawResponse
python
apache__airflow
providers/google/tests/unit/google/cloud/operators/test_alloy_db.py
{ "start": 45403, "end": 51474 }
class ____: def setup_method(self): self.operator = AlloyDBUpdateInstanceOperator( task_id=TEST_TASK_ID, instance_id=TEST_INSTANCE_ID, cluster_id=TEST_CLUSTER_ID, instance_configuration=TEST_INSTANCE, update_mask=TEST_UPDATE_MASK, allow_missing=TEST_ALLOW_MISSING, project_id=TEST_GCP_PROJECT, location=TEST_GCP_REGION, gcp_conn_id=TEST_GCP_CONN_ID, request_id=TEST_REQUEST_ID, validate_request=TEST_VALIDATE_ONLY, retry=TEST_RETRY, timeout=TEST_TIMEOUT, metadata=TEST_METADATA, impersonation_chain=TEST_IMPERSONATION_CHAIN, ) def test_init(self): assert self.operator.instance_id == TEST_INSTANCE_ID assert self.operator.cluster_id == TEST_CLUSTER_ID assert self.operator.instance_configuration == TEST_INSTANCE assert self.operator.update_mask == TEST_UPDATE_MASK assert self.operator.allow_missing == TEST_ALLOW_MISSING def test_template_fields(self): expected_template_fields = { "cluster_id", "instance_id", "instance_configuration", "update_mask", "allow_missing", } | set(AlloyDBWriteBaseOperator.template_fields) assert set(AlloyDBUpdateInstanceOperator.template_fields) == expected_template_fields @mock.patch(OPERATOR_MODULE_PATH.format("alloydb_v1.Instance.to_dict")) @mock.patch(UPDATE_INSTANCE_OPERATOR_PATH.format("get_operation_result")) @mock.patch(UPDATE_INSTANCE_OPERATOR_PATH.format("log")) @mock.patch(ALLOY_DB_HOOK_PATH, new_callable=mock.PropertyMock) def test_execute(self, mock_hook, mock_log, mock_get_operation_result, mock_to_dict): mock_update_instance = mock_hook.return_value.update_instance mock_operation = mock_update_instance.return_value mock_operation_result = mock_get_operation_result.return_value expected_result = mock_to_dict.return_value mock_context = mock.MagicMock() result = self.operator.execute(context=mock_context) mock_update_instance.assert_called_once_with( cluster_id=TEST_CLUSTER_ID, instance_id=TEST_INSTANCE_ID, project_id=TEST_GCP_PROJECT, location=TEST_GCP_REGION, instance=TEST_INSTANCE, update_mask=TEST_UPDATE_MASK, allow_missing=TEST_ALLOW_MISSING, request_id=TEST_REQUEST_ID, validate_only=TEST_VALIDATE_ONLY, retry=TEST_RETRY, timeout=TEST_TIMEOUT, metadata=TEST_METADATA, ) mock_get_operation_result.assert_called_once_with(mock_operation) mock_to_dict.assert_called_once_with(mock_operation_result) assert result == expected_result mock_log.info.assert_has_calls( [ call("Updating an AlloyDB instance."), call("AlloyDB instance %s was successfully updated.", TEST_CLUSTER_ID), ] ) @mock.patch(OPERATOR_MODULE_PATH.format("alloydb_v1.Cluster.to_dict")) @mock.patch(UPDATE_INSTANCE_OPERATOR_PATH.format("get_operation_result")) @mock.patch(UPDATE_INSTANCE_OPERATOR_PATH.format("log")) @mock.patch(ALLOY_DB_HOOK_PATH, new_callable=mock.PropertyMock) def test_execute_validate_request( self, mock_hook, mock_log, mock_get_operation_result, mock_to_dict, ): mock_update_instance = mock_hook.return_value.update_instance mock_operation = mock_update_instance.return_value mock_get_operation_result.return_value = None expected_message = "Validating an Update AlloyDB instance request." mock_context = mock.MagicMock() self.operator.validate_request = True result = self.operator.execute(context=mock_context) mock_log.info.assert_called_once_with(expected_message) mock_update_instance.assert_called_once_with( cluster_id=TEST_CLUSTER_ID, instance_id=TEST_INSTANCE_ID, project_id=TEST_GCP_PROJECT, location=TEST_GCP_REGION, instance=TEST_INSTANCE, update_mask=TEST_UPDATE_MASK, allow_missing=TEST_ALLOW_MISSING, request_id=TEST_REQUEST_ID, validate_only=True, retry=TEST_RETRY, timeout=TEST_TIMEOUT, metadata=TEST_METADATA, ) mock_get_operation_result.assert_called_once_with(mock_operation) assert not mock_to_dict.called assert result is None @mock.patch(OPERATOR_MODULE_PATH.format("alloydb_v1.Cluster.to_dict")) @mock.patch(UPDATE_INSTANCE_OPERATOR_PATH.format("get_operation_result")) @mock.patch(UPDATE_INSTANCE_OPERATOR_PATH.format("log")) @mock.patch(ALLOY_DB_HOOK_PATH, new_callable=mock.PropertyMock) def test_execute_exception(self, mock_hook, mock_log, mock_get_operation_result, mock_to_dict): mock_update_instance = mock_hook.return_value.update_instance mock_update_instance.side_effect = Exception mock_context = mock.MagicMock() with pytest.raises(AirflowException): self.operator.execute(context=mock_context) mock_update_instance.assert_called_once_with( cluster_id=TEST_CLUSTER_ID, instance_id=TEST_INSTANCE_ID, project_id=TEST_GCP_PROJECT, location=TEST_GCP_REGION, instance=TEST_INSTANCE, update_mask=TEST_UPDATE_MASK, allow_missing=TEST_ALLOW_MISSING, request_id=TEST_REQUEST_ID, validate_only=TEST_VALIDATE_ONLY, retry=TEST_RETRY, timeout=TEST_TIMEOUT, metadata=TEST_METADATA, ) assert not mock_get_operation_result.called assert not mock_to_dict.called mock_log.info.assert_called_once_with("Updating an AlloyDB instance.")
TestAlloyDBUpdateInstanceOperator
python
keras-team__keras
keras/src/callbacks/progbar_logger.py
{ "start": 226, "end": 3104 }
class ____(Callback): """Callback that prints metrics to stdout. Args: count_mode: One of `"steps"` or `"samples"`. Whether the progress bar should count samples seen or steps (batches) seen. Raises: ValueError: In case of invalid `count_mode`. """ def __init__(self): super().__init__() self.seen = 0 self.progbar = None self.target = None self.verbose = 1 self.epochs = 1 self._called_in_fit = False def set_params(self, params): verbose = params["verbose"] if verbose == "auto": verbose = 1 self.verbose = verbose self.epochs = params["epochs"] self.target = params["steps"] def on_train_begin(self, logs=None): # When this logger is called inside `fit`, validation is silent. self._called_in_fit = True def on_test_begin(self, logs=None): if not self._called_in_fit: self._reset_progbar() self._maybe_init_progbar() def on_predict_begin(self, logs=None): self._reset_progbar() self._maybe_init_progbar() def on_epoch_begin(self, epoch, logs=None): self._reset_progbar() self._maybe_init_progbar() if self.verbose and self.epochs > 1: io_utils.print_msg(f"Epoch {epoch + 1}/{self.epochs}") def on_train_batch_end(self, batch, logs=None): self._update_progbar(batch, logs) def on_test_batch_end(self, batch, logs=None): if not self._called_in_fit: self._update_progbar(batch, logs) def on_predict_batch_end(self, batch, logs=None): # Don't pass prediction results. self._update_progbar(batch, None) def on_epoch_end(self, epoch, logs=None): self._finalize_progbar(logs) def on_test_end(self, logs=None): if not self._called_in_fit: self._finalize_progbar(logs) def on_predict_end(self, logs=None): self._finalize_progbar(logs) def _reset_progbar(self): self.seen = 0 self.progbar = None def _maybe_init_progbar(self): if self.progbar is None: self.progbar = Progbar( target=self.target, verbose=self.verbose, unit_name="step" ) def _update_progbar(self, batch, logs=None): """Updates the progbar.""" logs = logs or {} self._maybe_init_progbar() self.seen = batch + 1 # One-indexed. if self.verbose == 1: self.progbar.update(self.seen, list(logs.items()), finalize=False) def _finalize_progbar(self, logs): logs = logs or {} if self.target is None: self.target = self.seen self.progbar.target = self.target self.progbar.update(self.target, list(logs.items()), finalize=True)
ProgbarLogger
python
huggingface__transformers
src/transformers/modeling_outputs.py
{ "start": 5306, "end": 6526 }
class ____(ModelOutput): """ Base class for model's outputs that also contains a pooling of the last hidden states. Args: last_hidden_state (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): Sequence of hidden-states at the output of the last layer of the model. pooler_output (`torch.FloatTensor` of shape `(batch_size, hidden_size)`): Last layer hidden-state after a pooling operation on the spatial dimensions. hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape `(batch_size, num_channels, height, width)`. Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. """ last_hidden_state: Optional[torch.FloatTensor] = None pooler_output: Optional[torch.FloatTensor] = None hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None @dataclass
BaseModelOutputWithPoolingAndNoAttention
python
astropy__astropy
astropy/modeling/projections.py
{ "start": 27467, "end": 28373 }
class ____(Pix2SkyProjection, Conic): r""" Colles' conic perspective projection - pixel to sky. Corresponds to the ``COP`` projection in FITS WCS. See `Conic` for a description of the entire equation. The projection formulae are: .. math:: C &= \sin \theta_a \\ R_\theta &= \frac{180^\circ}{\pi} \cos \eta [ \cot \theta_a - \tan(\theta - \theta_a)] \\ Y_0 &= \frac{180^\circ}{\pi} \cos \eta \cot \theta_a Parameters ---------- sigma : float :math:`(\theta_1 + \theta_2) / 2`, where :math:`\theta_1` and :math:`\theta_2` are the latitudes of the standard parallels, in degrees. Default is 90. delta : float :math:`(\theta_1 - \theta_2) / 2`, where :math:`\theta_1` and :math:`\theta_2` are the latitudes of the standard parallels, in degrees. Default is 0. """
Pix2Sky_ConicPerspective
python
airbytehq__airbyte
airbyte-integrations/connectors/source-bing-ads/unit_tests/integrations/base_test.py
{ "start": 1706, "end": 4884 }
class ____(TestCase): def setUp(self) -> None: self._http_mocker = HttpMocker() self._http_mocker.__enter__() self._auth_client(self._http_mocker) def tearDown(self) -> None: self._http_mocker.__exit__(None, None, None) @property def _config(self) -> dict[str, Any]: return ConfigBuilder().build() def _state(self, file: str, stream_name: str) -> list[AirbyteStateMessage]: state_file = Path(__file__).parent.parent / f"resource/state/{file}.json" with open(state_file, "r") as f: state = json.loads(f.read()) return StateBuilder().with_stream_state(stream_name, state).build() def _auth_client(self, http_mocker: HttpMocker) -> None: http_mocker.post(request=build_request(self._config), responses=response_with_status("oauth", 200)) http_mocker.post(request=build_request_2(self._config), responses=response_with_status("oauth", 200)) def mock_user_query_api(self, response_template: str) -> None: http_mocker = self.http_mocker http_mocker.post( RequestBuilder(resource="User/Query", api="client_center").with_body('{"UserId": null}').build(), HttpResponse(json.dumps(find_template(response_template, __file__)), 200), ) def mock_accounts_search_api(self, body: bytes, response_template: str) -> None: http_mocker = self.http_mocker http_mocker.post( RequestBuilder(resource="Accounts/Search").with_body(body).build(), HttpResponse(json.dumps(find_template(resource=response_template, execution_folder=__file__)), 200), ) def mock_generate_report_api(self, endpoint: str, body: bytes, response_template: str) -> None: http_mocker = self.http_mocker http_mocker.post( RequestBuilder(resource=f"GenerateReport/{endpoint}", api="reporting").with_body(body).build(), HttpResponse(json.dumps(find_template(resource=response_template, execution_folder=__file__)), 200), ) def mock_get_report_request_api(self, file_name) -> None: zipped_data = create_zip_from_csv(file_name) http_mocker = self.http_mocker http_mocker.get( RequestBuilder(resource="").build_report_url(), HttpResponse(zipped_data), ) def read_stream( self, stream_name: str, sync_mode: SyncMode, config: Dict[str, Any], stream_data_file: str = None, state: Optional[Dict[str, Any]] = None, expecting_exception: bool = False, ) -> EntrypointOutput: self.mock_get_report_request_api(stream_data_file) catalog = CatalogBuilder().with_stream(stream_name, sync_mode).build() return read_helper(config, catalog, state, expecting_exception) @property def http_mocker(self) -> HttpMocker: return self._http_mocker @staticmethod def create_log_message(log_message: str): return AirbyteMessage( type=Type.LOG, log=AirbyteLogMessage( level=Level.INFO, message=log_message, ), )
BaseTest
python
pypa__setuptools
setuptools/_distutils/compilers/C/tests/test_msvc.py
{ "start": 2928, "end": 4129 }
class ____: def test_concurrent_safe(self): """ Concurrent calls to spawn should have consistent results. """ compiler = msvc.Compiler() compiler._paths = "expected" inner_cmd = 'import os; assert os.environ["PATH"] == "expected"' command = [sys.executable, '-c', inner_cmd] threads = [ CheckThread(target=compiler.spawn, args=[command]) for n in range(100) ] for thread in threads: thread.start() for thread in threads: thread.join() assert all(threads) def test_concurrent_safe_fallback(self): """ If CCompiler.spawn has been monkey-patched without support for an env, it should still execute. """ from distutils import ccompiler compiler = msvc.Compiler() compiler._paths = "expected" def CCompiler_spawn(self, cmd): "A spawn without an env argument." assert os.environ["PATH"] == "expected" with mock.patch.object(ccompiler.CCompiler, 'spawn', CCompiler_spawn): compiler.spawn(["n/a"]) assert os.environ.get("PATH") != "expected"
TestSpawn
python
python__mypy
mypyc/irbuild/classdef.py
{ "start": 12005, "end": 15481 }
class ____(ExtClassBuilder): # controls whether an __annotations__ attribute should be added to the class # __dict__. This is not desirable for attrs classes where auto_attribs is # disabled, as attrs will reject it. add_annotations_to_dict = True def __init__(self, builder: IRBuilder, cdef: ClassDef) -> None: super().__init__(builder, cdef) self.non_ext = self.create_non_ext_info() def create_non_ext_info(self) -> NonExtClassInfo: """Set up a NonExtClassInfo to track dataclass attributes. In addition to setting up a normal extension class for dataclasses, we also collect its class attributes like a non-extension class so that we can hand them to the dataclass decorator. """ return NonExtClassInfo( self.builder.call_c(dict_new_op, [], self.cdef.line), self.builder.add(TupleSet([], self.cdef.line)), self.builder.call_c(dict_new_op, [], self.cdef.line), self.builder.add(LoadAddress(type_object_op.type, type_object_op.src, self.cdef.line)), ) def skip_attr_default(self, name: str, stmt: AssignmentStmt) -> bool: return stmt.type is not None def get_type_annotation(self, stmt: AssignmentStmt) -> TypeInfo | None: # We populate __annotations__ because dataclasses uses it to determine # which attributes to compute on. ann_type = get_proper_type(stmt.type) if isinstance(ann_type, Instance): return ann_type.type return None def add_attr(self, lvalue: NameExpr, stmt: AssignmentStmt) -> None: add_non_ext_class_attr_ann( self.builder, self.non_ext, lvalue, stmt, self.get_type_annotation ) add_non_ext_class_attr( self.builder, self.non_ext, lvalue, stmt, self.cdef, self.attrs_to_cache ) super().add_attr(lvalue, stmt) def finalize(self, ir: ClassIR) -> None: """Generate code to finish instantiating a dataclass. This works by replacing all of the attributes on the class (which will be descriptors) with whatever they would be in a non-extension class, calling dataclass, then switching them back. The resulting class is an extension class and instances of it do not have a __dict__ (unless something else requires it). All methods written explicitly in the source are compiled and may be called through the vtable while the methods generated by dataclasses are interpreted and may not be. (If we just called dataclass without doing this, it would think that all of the descriptors for our attributes are default values and generate an incorrect constructor. We need to do the switch so that dataclass gets the appropriate defaults.) """ super().finalize(ir) assert self.type_obj add_dunders_to_non_ext_dict( self.builder, self.non_ext, self.cdef.line, self.add_annotations_to_dict ) dec = self.builder.accept( next(d for d in self.cdef.decorators if is_dataclass_decorator(d)) ) dataclass_type_val = self.builder.load_str(dataclass_type(self.cdef) or "unknown") self.builder.call_c( dataclass_sleight_of_hand, [dec, self.type_obj, self.non_ext.dict, self.non_ext.anns, dataclass_type_val], self.cdef.line, )
DataClassBuilder
python
apache__airflow
helm-tests/tests/helm_tests/security/test_fernetkey_secret.py
{ "start": 954, "end": 2647 }
class ____: """Tests fernet key secret.""" def test_should_add_annotations_to_fernetkey_secret(self): # Create a Fernet key fernet_key_provided = Fernet.generate_key().decode() docs = render_chart( values={ "fernetKey": fernet_key_provided, "fernetKeySecretAnnotations": {"test_annotation": "test_annotation_value"}, }, show_only=["templates/secrets/fernetkey-secret.yaml"], )[0] assert "annotations" in jmespath.search("metadata", docs) assert jmespath.search("metadata.annotations", docs)["test_annotation"] == "test_annotation_value" # Extract the base64 encoded fernet key from the secret fernet_key_b64 = jmespath.search('data."fernet-key"', docs).strip('"') fernet_key = base64.b64decode(fernet_key_b64).decode() # Verify the key is valid by creating a Fernet instance Fernet(fernet_key.encode()) # Raise: Fernet key must be 32 url-safe base64-encoded bytes. def test_should_generate_valid_fernetkey_secret(self): """Test that a valid Fernet key is generated.""" docs = render_chart( values={}, # No fernetKey provided show_only=["templates/secrets/fernetkey-secret.yaml"], )[0] # Extract the base64 encoded fernet key from the secret fernet_key_b64 = jmespath.search('data."fernet-key"', docs).strip('"') fernet_key = base64.b64decode(fernet_key_b64).decode() # Verify the key is valid by creating a Fernet instance Fernet(fernet_key.encode()) # Raise: Fernet key must be 32 url-safe base64-encoded bytes.
TestFernetKeySecret
python
doocs__leetcode
solution/2300-2399/2376.Count Special Integers/Solution.py
{ "start": 0, "end": 662 }
class ____: def countSpecialNumbers(self, n: int) -> int: @cache def dfs(i: int, mask: int, lead: bool, limit: bool) -> int: if i >= len(s): return int(lead ^ 1) up = int(s[i]) if limit else 9 ans = 0 for j in range(up + 1): if mask >> j & 1: continue if lead and j == 0: ans += dfs(i + 1, mask, True, limit and j == up) else: ans += dfs(i + 1, mask | 1 << j, False, limit and j == up) return ans s = str(n) return dfs(0, 0, True, True)
Solution
python
tensorflow__tensorflow
tensorflow/python/framework/type_spec_test.py
{ "start": 5510, "end": 6917 }
class ____(type_spec.TypeSpec): """A TypeSpec for the NestOfTensors value type.""" def __init__(self, spec): self.spec = spec value_type = property(lambda self: NestOfTensors) _component_specs = property(lambda self: self.spec) def _to_components(self, value): return nest.flatten(value) def _from_components(self, components): return nest.pack_sequence_as(self.spec, components) def _serialize(self): return self.spec def __repr__(self): if hasattr(self.spec, "_fields") and isinstance( self.spec._fields, collections_abc.Sequence) and all( isinstance(f, str) for f in self.spec._fields): return "%s(%r)" % (type(self).__name__, self._serialize()) return super().__repr__() @classmethod def from_value(cls, value): return cls(nest.map_structure(type_spec.type_spec_from_value, value.nest)) @classmethod def _deserialize(cls, spec): return cls(spec) type_spec.register_type_spec_from_value_converter( NestOfTensors, NestOfTensorsSpec.from_value) _TestNamedTuple = collections.namedtuple("NamedTuple", ["a", "b"]) _TestNamedTuple2 = collections.namedtuple("NamedTuple", ["a", "b"]) _TestNamedTupleSingleField = collections.namedtuple("SingleField", ["a"]) _TestNamedTupleDifferentField = collections.namedtuple("DifferentField", ["a", "c"])
NestOfTensorsSpec
python
apache__airflow
providers/elasticsearch/tests/unit/elasticsearch/log/test_es_response.py
{ "start": 1116, "end": 1494 }
class ____: def test_wrap_with_dict(self): test_dict = {"key1": "value1"} result = _wrap(test_dict) assert isinstance(result, AttributeDict) assert result.key1 == "value1" def test_wrap_with_non_dict(self): test_values = [1, [2, 3], "string", 4.5] for value in test_values: assert _wrap(value) == value
TestWrap
python
plotly__plotly.py
plotly/graph_objs/box/legendgrouptitle/_font.py
{ "start": 233, "end": 9906 }
class ____(_BaseTraceHierarchyType): _parent_path_str = "box.legendgrouptitle" _path_str = "box.legendgrouptitle.font" _valid_props = { "color", "family", "lineposition", "shadow", "size", "style", "textcase", "variant", "weight", } @property def color(self): """ The 'color' property is a color and may be specified as: - A hex string (e.g. '#ff0000') - An rgb/rgba string (e.g. 'rgb(255,0,0)') - An hsl/hsla string (e.g. 'hsl(0,100%,50%)') - An hsv/hsva string (e.g. 'hsv(0,100%,100%)') - A named CSS color: see https://plotly.com/python/css-colors/ for a list Returns ------- str """ return self["color"] @color.setter def color(self, val): self["color"] = val @property def family(self): """ HTML font family - the typeface that will be applied by the web browser. The web browser can only apply a font if it is available on the system where it runs. Provide multiple font families, separated by commas, to indicate the order in which to apply fonts if they aren't available. The 'family' property is a string and must be specified as: - A non-empty string Returns ------- str """ return self["family"] @family.setter def family(self, val): self["family"] = val @property def lineposition(self): """ Sets the kind of decoration line(s) with text, such as an "under", "over" or "through" as well as combinations e.g. "under+over", etc. The 'lineposition' property is a flaglist and may be specified as a string containing: - Any combination of ['under', 'over', 'through'] joined with '+' characters (e.g. 'under+over') OR exactly one of ['none'] (e.g. 'none') Returns ------- Any """ return self["lineposition"] @lineposition.setter def lineposition(self, val): self["lineposition"] = val @property def shadow(self): """ Sets the shape and color of the shadow behind text. "auto" places minimal shadow and applies contrast text font color. See https://developer.mozilla.org/en-US/docs/Web/CSS/text-shadow for additional options. The 'shadow' property is a string and must be specified as: - A string - A number that will be converted to a string Returns ------- str """ return self["shadow"] @shadow.setter def shadow(self, val): self["shadow"] = val @property def size(self): """ The 'size' property is a number and may be specified as: - An int or float in the interval [1, inf] Returns ------- int|float """ return self["size"] @size.setter def size(self, val): self["size"] = val @property def style(self): """ Sets whether a font should be styled with a normal or italic face from its family. The 'style' property is an enumeration that may be specified as: - One of the following enumeration values: ['normal', 'italic'] Returns ------- Any """ return self["style"] @style.setter def style(self, val): self["style"] = val @property def textcase(self): """ Sets capitalization of text. It can be used to make text appear in all-uppercase or all-lowercase, or with each word capitalized. The 'textcase' property is an enumeration that may be specified as: - One of the following enumeration values: ['normal', 'word caps', 'upper', 'lower'] Returns ------- Any """ return self["textcase"] @textcase.setter def textcase(self, val): self["textcase"] = val @property def variant(self): """ Sets the variant of the font. The 'variant' property is an enumeration that may be specified as: - One of the following enumeration values: ['normal', 'small-caps', 'all-small-caps', 'all-petite-caps', 'petite-caps', 'unicase'] Returns ------- Any """ return self["variant"] @variant.setter def variant(self, val): self["variant"] = val @property def weight(self): """ Sets the weight (or boldness) of the font. The 'weight' property is a integer and may be specified as: - An int (or float that will be cast to an int) in the interval [1, 1000] OR exactly one of ['normal', 'bold'] (e.g. 'bold') Returns ------- int """ return self["weight"] @weight.setter def weight(self, val): self["weight"] = val @property def _prop_descriptions(self): return """\ color family HTML font family - the typeface that will be applied by the web browser. The web browser can only apply a font if it is available on the system where it runs. Provide multiple font families, separated by commas, to indicate the order in which to apply fonts if they aren't available. lineposition Sets the kind of decoration line(s) with text, such as an "under", "over" or "through" as well as combinations e.g. "under+over", etc. shadow Sets the shape and color of the shadow behind text. "auto" places minimal shadow and applies contrast text font color. See https://developer.mozilla.org/en- US/docs/Web/CSS/text-shadow for additional options. size style Sets whether a font should be styled with a normal or italic face from its family. textcase Sets capitalization of text. It can be used to make text appear in all-uppercase or all-lowercase, or with each word capitalized. variant Sets the variant of the font. weight Sets the weight (or boldness) of the font. """ def __init__( self, arg=None, color=None, family=None, lineposition=None, shadow=None, size=None, style=None, textcase=None, variant=None, weight=None, **kwargs, ): """ Construct a new Font object Sets this legend group's title font. Parameters ---------- arg dict of properties compatible with this constructor or an instance of :class:`plotly.graph_objs.box.legendgrouptitle.Font` color family HTML font family - the typeface that will be applied by the web browser. The web browser can only apply a font if it is available on the system where it runs. Provide multiple font families, separated by commas, to indicate the order in which to apply fonts if they aren't available. lineposition Sets the kind of decoration line(s) with text, such as an "under", "over" or "through" as well as combinations e.g. "under+over", etc. shadow Sets the shape and color of the shadow behind text. "auto" places minimal shadow and applies contrast text font color. See https://developer.mozilla.org/en- US/docs/Web/CSS/text-shadow for additional options. size style Sets whether a font should be styled with a normal or italic face from its family. textcase Sets capitalization of text. It can be used to make text appear in all-uppercase or all-lowercase, or with each word capitalized. variant Sets the variant of the font. weight Sets the weight (or boldness) of the font. Returns ------- Font """ super().__init__("font") if "_parent" in kwargs: self._parent = kwargs["_parent"] return if arg is None: arg = {} elif isinstance(arg, self.__class__): arg = arg.to_plotly_json() elif isinstance(arg, dict): arg = _copy.copy(arg) else: raise ValueError("""\ The first argument to the plotly.graph_objs.box.legendgrouptitle.Font constructor must be a dict or an instance of :class:`plotly.graph_objs.box.legendgrouptitle.Font`""") self._skip_invalid = kwargs.pop("skip_invalid", False) self._validate = kwargs.pop("_validate", True) self._set_property("color", arg, color) self._set_property("family", arg, family) self._set_property("lineposition", arg, lineposition) self._set_property("shadow", arg, shadow) self._set_property("size", arg, size) self._set_property("style", arg, style) self._set_property("textcase", arg, textcase) self._set_property("variant", arg, variant) self._set_property("weight", arg, weight) self._process_kwargs(**dict(arg, **kwargs)) self._skip_invalid = False
Font
python
rapidsai__cudf
python/cudf/cudf/pandas/fast_slow_proxy.py
{ "start": 16041, "end": 20181 }
class ____: """ Base class for all fast=slow proxy types. A fast-slow proxy is proxy for a pair of types that provide "fast" and "slow" implementations of the same API. At any time, a fast-slow proxy wraps an object of either "fast" type, or "slow" type. Operations invoked on the fast-slow proxy are first delegated to the "fast" type, and if that fails, to the "slow" type. """ _fsproxy_wrapped: Any # Instance-level transfer blocking flag _fsproxy_transfer_block: _BlockState | None = None def _fsproxy_fast_to_slow(self) -> Any: """ If the wrapped object is of "fast" type, returns the corresponding "slow" object. Otherwise, returns the wrapped object as-is. """ raise NotImplementedError("Abstract base class") def _fsproxy_slow_to_fast(self) -> Any: """ If the wrapped object is of "slow" type, returns the corresponding "fast" object. Otherwise, returns the wrapped object as-is. """ raise NotImplementedError("Abstract base class") @property def _fsproxy_fast(self) -> Any: """ Returns the wrapped object. If the wrapped object is of "slow" type, replaces it with the corresponding "fast" object before returning it. """ # Check for transfer blocking before attempting conversion if ( self._fsproxy_transfer_block is _BlockState.TO_FAST and self._fsproxy_state is _State.SLOW # type: ignore[attr-defined] ): raise RuntimeError("Fast-to-slow transfer is blocked") self._fsproxy_wrapped = self._fsproxy_slow_to_fast() return self._fsproxy_wrapped @property def _fsproxy_slow(self) -> Any: """ Returns the wrapped object. If the wrapped object is of "fast" type, replaces it with the corresponding "slow" object before returning it. """ self._fsproxy_wrapped = self._fsproxy_fast_to_slow() return self._fsproxy_wrapped def set_transfer_block_state( self, transfer_block: _BlockState | None = None ): """Set transfer blocking flag for this specific instance. Parameters ---------- transfer_block : _BlockState | None None: No blocking (default) _BlockState.TO_FAST: Block slow-to-fast transfers. _BlockState.TO_SLOW: Block fast-to-slow transfers. """ self._fsproxy_transfer_block = transfer_block def get_transfer_blocking(self): """Get current instance-level transfer blocking flag.""" return getattr(self, "_fsproxy_transfer_block", None) def force_state(self, state: _State): """Force the proxy to a specific state (FAST or SLOW) and block opposite transfers.""" if state == _State.FAST: # Force to fast state and block fast-to-slow transfers self._fsproxy_wrapped = self._fsproxy_slow_to_fast() self.set_transfer_block_state(_BlockState.TO_SLOW) elif state == _State.SLOW: # Force to slow state and block slow-to-fast transfers self._fsproxy_wrapped = self._fsproxy_fast_to_slow() self.set_transfer_block_state(_BlockState.TO_FAST) else: raise ValueError( f"Invalid state: {state}. Must be _State.FAST or _State.SLOW" ) def unblock_transfers(self): """Remove all transfer blocking for this instance.""" self.set_transfer_block_state(None) def __dir__(self): # Try to return the cached dir of the slow object, but if it # doesn't exist, fall back to the default implementation. try: return self._fsproxy_slow_dir except AttributeError: return object.__dir__(self) def __setattr__(self, name, value): if name.startswith("_"): object.__setattr__(self, name, value) return return _FastSlowAttribute("__setattr__").__get__(self, type(self))( name, value )
_FastSlowProxy
python
bokeh__bokeh
src/bokeh/client/states.py
{ "start": 2261, "end": 2518 }
class ____(State): ''' The ``ClientConnection`` connected to a Bokeh server, but has not yet received an ACK from it. ''' async def run(self, connection: ClientConnection) -> None: await connection._wait_for_ack()
CONNECTED_BEFORE_ACK
python
realpython__materials
wordcount/tests/task_06.py
{ "start": 372, "end": 773 }
class ____: def test_reports_zeros_on_a_directory(self, wc, fake_dir): expected = f"0 0 0 {fake_dir}/ (is a directory)\n".encode() assert expected == wc(fake_dir) def test_reports_zeros_on_a_missing_file(self, wc, random_name): expected = ( f"0 0 0 {random_name} (no such file or directory)\n".encode() ) assert expected == wc(random_name)
Test
python
pyca__cryptography
src/cryptography/hazmat/_oid.py
{ "start": 4079, "end": 7938 }
class ____: RSA_WITH_MD5 = ObjectIdentifier("1.2.840.113549.1.1.4") RSA_WITH_SHA1 = ObjectIdentifier("1.2.840.113549.1.1.5") # This is an alternate OID for RSA with SHA1 that is occasionally seen _RSA_WITH_SHA1 = ObjectIdentifier("1.3.14.3.2.29") RSA_WITH_SHA224 = ObjectIdentifier("1.2.840.113549.1.1.14") RSA_WITH_SHA256 = ObjectIdentifier("1.2.840.113549.1.1.11") RSA_WITH_SHA384 = ObjectIdentifier("1.2.840.113549.1.1.12") RSA_WITH_SHA512 = ObjectIdentifier("1.2.840.113549.1.1.13") RSA_WITH_SHA3_224 = ObjectIdentifier("2.16.840.1.101.3.4.3.13") RSA_WITH_SHA3_256 = ObjectIdentifier("2.16.840.1.101.3.4.3.14") RSA_WITH_SHA3_384 = ObjectIdentifier("2.16.840.1.101.3.4.3.15") RSA_WITH_SHA3_512 = ObjectIdentifier("2.16.840.1.101.3.4.3.16") RSASSA_PSS = ObjectIdentifier("1.2.840.113549.1.1.10") ECDSA_WITH_SHA1 = ObjectIdentifier("1.2.840.10045.4.1") ECDSA_WITH_SHA224 = ObjectIdentifier("1.2.840.10045.4.3.1") ECDSA_WITH_SHA256 = ObjectIdentifier("1.2.840.10045.4.3.2") ECDSA_WITH_SHA384 = ObjectIdentifier("1.2.840.10045.4.3.3") ECDSA_WITH_SHA512 = ObjectIdentifier("1.2.840.10045.4.3.4") ECDSA_WITH_SHA3_224 = ObjectIdentifier("2.16.840.1.101.3.4.3.9") ECDSA_WITH_SHA3_256 = ObjectIdentifier("2.16.840.1.101.3.4.3.10") ECDSA_WITH_SHA3_384 = ObjectIdentifier("2.16.840.1.101.3.4.3.11") ECDSA_WITH_SHA3_512 = ObjectIdentifier("2.16.840.1.101.3.4.3.12") DSA_WITH_SHA1 = ObjectIdentifier("1.2.840.10040.4.3") DSA_WITH_SHA224 = ObjectIdentifier("2.16.840.1.101.3.4.3.1") DSA_WITH_SHA256 = ObjectIdentifier("2.16.840.1.101.3.4.3.2") DSA_WITH_SHA384 = ObjectIdentifier("2.16.840.1.101.3.4.3.3") DSA_WITH_SHA512 = ObjectIdentifier("2.16.840.1.101.3.4.3.4") ED25519 = ObjectIdentifier("1.3.101.112") ED448 = ObjectIdentifier("1.3.101.113") GOSTR3411_94_WITH_3410_2001 = ObjectIdentifier("1.2.643.2.2.3") GOSTR3410_2012_WITH_3411_2012_256 = ObjectIdentifier("1.2.643.7.1.1.3.2") GOSTR3410_2012_WITH_3411_2012_512 = ObjectIdentifier("1.2.643.7.1.1.3.3") _SIG_OIDS_TO_HASH: dict[ObjectIdentifier, hashes.HashAlgorithm | None] = { SignatureAlgorithmOID.RSA_WITH_MD5: hashes.MD5(), SignatureAlgorithmOID.RSA_WITH_SHA1: hashes.SHA1(), SignatureAlgorithmOID._RSA_WITH_SHA1: hashes.SHA1(), SignatureAlgorithmOID.RSA_WITH_SHA224: hashes.SHA224(), SignatureAlgorithmOID.RSA_WITH_SHA256: hashes.SHA256(), SignatureAlgorithmOID.RSA_WITH_SHA384: hashes.SHA384(), SignatureAlgorithmOID.RSA_WITH_SHA512: hashes.SHA512(), SignatureAlgorithmOID.RSA_WITH_SHA3_224: hashes.SHA3_224(), SignatureAlgorithmOID.RSA_WITH_SHA3_256: hashes.SHA3_256(), SignatureAlgorithmOID.RSA_WITH_SHA3_384: hashes.SHA3_384(), SignatureAlgorithmOID.RSA_WITH_SHA3_512: hashes.SHA3_512(), SignatureAlgorithmOID.ECDSA_WITH_SHA1: hashes.SHA1(), SignatureAlgorithmOID.ECDSA_WITH_SHA224: hashes.SHA224(), SignatureAlgorithmOID.ECDSA_WITH_SHA256: hashes.SHA256(), SignatureAlgorithmOID.ECDSA_WITH_SHA384: hashes.SHA384(), SignatureAlgorithmOID.ECDSA_WITH_SHA512: hashes.SHA512(), SignatureAlgorithmOID.ECDSA_WITH_SHA3_224: hashes.SHA3_224(), SignatureAlgorithmOID.ECDSA_WITH_SHA3_256: hashes.SHA3_256(), SignatureAlgorithmOID.ECDSA_WITH_SHA3_384: hashes.SHA3_384(), SignatureAlgorithmOID.ECDSA_WITH_SHA3_512: hashes.SHA3_512(), SignatureAlgorithmOID.DSA_WITH_SHA1: hashes.SHA1(), SignatureAlgorithmOID.DSA_WITH_SHA224: hashes.SHA224(), SignatureAlgorithmOID.DSA_WITH_SHA256: hashes.SHA256(), SignatureAlgorithmOID.ED25519: None, SignatureAlgorithmOID.ED448: None, SignatureAlgorithmOID.GOSTR3411_94_WITH_3410_2001: None, SignatureAlgorithmOID.GOSTR3410_2012_WITH_3411_2012_256: None, SignatureAlgorithmOID.GOSTR3410_2012_WITH_3411_2012_512: None, }
SignatureAlgorithmOID
python
ZoranPandovski__al-go-rithms
data_structures/b_tree/Python/binaryTree.py
{ "start": 226, "end": 1660 }
class ____(object): def insert(self,root,data): if root == None: root = Node(data) return root # Insert node by using breath first search queue = deque() queue.append(root) while len(queue): popped = queue.popleft() # The node to be inserted is already present in the tree if popped.val == data: return root if root.left: popped.append(root.left) else: node = Node(data) root.left = node return root if root.right: popped.append(root.right) else: node = Node(data) root.right = node return root ## yet to code delete method def delete(self,root,data): pass def display(self,root): #preorder Traversal if root == None: return print(root.val) self.display(root.left) self.display(root.right) def size(self,root): if root == None: return 0 return (1 + self.size(root.left) + self.size(root.right)) def search(self,root,element): if root == None: return False if root.val == element: return True return self.search(root.left,element) or self.search(root.right,element)
BinaryTree
python
apache__airflow
providers/apache/beam/src/airflow/providers/apache/beam/hooks/beam.py
{ "start": 7226, "end": 17046 }
class ____(BaseHook): """ Hook for Apache Beam. All the methods in the hook where project_id is used must be called with keyword arguments rather than positional. :param runner: Runner type """ def __init__( self, runner: str, ) -> None: self.runner = runner super().__init__() def _start_pipeline( self, variables: dict, command_prefix: list[str], process_line_callback: Callable[[str], None] | None = None, working_directory: str | None = None, is_dataflow_job_id_exist_callback: Callable[[], bool] | None = None, ) -> None: cmd = [*command_prefix, f"--runner={self.runner}"] if variables: cmd.extend(beam_options_to_args(variables)) run_beam_command( cmd=cmd, process_line_callback=process_line_callback, working_directory=working_directory, log=self.log, is_dataflow_job_id_exist_callback=is_dataflow_job_id_exist_callback, ) def start_python_pipeline( self, variables: dict, py_file: str, py_options: list[str], py_interpreter: str = "python3", py_requirements: list[str] | None = None, py_system_site_packages: bool = False, process_line_callback: Callable[[str], None] | None = None, is_dataflow_job_id_exist_callback: Callable[[], bool] | None = None, ): """ Start Apache Beam python pipeline. :param variables: Variables passed to the pipeline. :param py_file: Path to the python file to execute. :param py_options: Additional options. :param py_interpreter: Python version of the Apache Beam pipeline. If None, this defaults to the python3. To track python versions supported by beam and related issues check: https://issues.apache.org/jira/browse/BEAM-1251 :param py_requirements: Additional python package(s) to install. If a value is passed to this parameter, a new virtual environment has been created with additional packages installed. You could also install the apache-beam package if it is not installed on your system, or you want to use a different version. :param py_system_site_packages: Whether to include system_site_packages in your virtualenv. See virtualenv documentation for more information. This option is only relevant if the ``py_requirements`` parameter is not None. :param process_line_callback: (optional) Callback that can be used to process each line of the stdout and stderr file descriptors. """ if "labels" in variables: variables["labels"] = [f"{key}={value}" for key, value in variables["labels"].items()] with contextlib.ExitStack() as exit_stack: if py_requirements is not None: if not py_requirements and not py_system_site_packages: warning_invalid_environment = textwrap.dedent( """\ Invalid method invocation. You have disabled inclusion of system packages and empty list required for installation, so it is not possible to create a valid virtual environment. In the virtual environment, apache-beam package must be installed for your job to be executed. To fix this problem: * install apache-beam on the system, then set parameter py_system_site_packages to True, * add apache-beam to the list of required packages in parameter py_requirements. """ ) raise AirflowException(warning_invalid_environment) tmp_dir = exit_stack.enter_context(tempfile.TemporaryDirectory(prefix="apache-beam-venv")) py_interpreter = prepare_virtualenv( venv_directory=tmp_dir, python_bin=py_interpreter, system_site_packages=py_system_site_packages, requirements=py_requirements, ) command_prefix = [py_interpreter, *py_options, py_file] beam_version = ( subprocess.check_output([py_interpreter, "-c", _APACHE_BEAM_VERSION_SCRIPT]).decode().strip() ) self.log.info("Beam version: %s", beam_version) impersonate_service_account = variables.get("impersonate_service_account") if impersonate_service_account: if Version(beam_version) < Version("2.39.0"): raise AirflowException( "The impersonateServiceAccount option requires Apache Beam 2.39.0 or newer." ) self._start_pipeline( variables=variables, command_prefix=command_prefix, process_line_callback=process_line_callback, is_dataflow_job_id_exist_callback=is_dataflow_job_id_exist_callback, ) def start_java_pipeline( self, variables: dict, jar: str, job_class: str | None = None, process_line_callback: Callable[[str], None] | None = None, is_dataflow_job_id_exist_callback: Callable[[], bool] | None = None, ) -> None: """ Start Apache Beam Java pipeline. :param variables: Variables passed to the job. :param jar: Name of the jar for the pipeline :param job_class: Name of the java class for the pipeline. :param process_line_callback: (optional) Callback that can be used to process each line of the stdout and stderr file descriptors. """ if "labels" in variables: variables["labels"] = json.dumps(variables["labels"], separators=(",", ":")) command_prefix = ["java", "-cp", jar, job_class] if job_class else ["java", "-jar", jar] self._start_pipeline( variables=variables, command_prefix=command_prefix, process_line_callback=process_line_callback, is_dataflow_job_id_exist_callback=is_dataflow_job_id_exist_callback, ) def start_go_pipeline( self, variables: dict, go_file: str, process_line_callback: Callable[[str], None] | None = None, should_init_module: bool = False, ) -> None: """ Start Apache Beam Go pipeline with a source file. :param variables: Variables passed to the job. :param go_file: Path to the Go file with your beam pipeline. :param process_line_callback: (optional) Callback that can be used to process each line of the stdout and stderr file descriptors. :param should_init_module: If False (default), will just execute a `go run` command. If True, will init a module and dependencies with a ``go mod init`` and ``go mod tidy``, useful when pulling source with GCSHook. :return: """ if shutil.which("go") is None: raise AirflowConfigException( "You need to have Go installed to run beam go pipeline. See https://go.dev/doc/install " "installation guide. If you are running airflow in Docker see more info at " "'https://airflow.apache.org/docs/docker-stack/recipes.html'." ) try: from airflow.providers.google.go_module_utils import init_module, install_dependencies except ImportError: from airflow.exceptions import AirflowOptionalProviderFeatureException raise AirflowOptionalProviderFeatureException( "Failed to import apache-airflow-google-provider. To start a go pipeline, please install the" " google provider." ) if "labels" in variables: variables["labels"] = json.dumps(variables["labels"], separators=(",", ":")) working_directory = os.path.dirname(go_file) basename = os.path.basename(go_file) if should_init_module: init_module("main", working_directory) install_dependencies(working_directory) command_prefix = ["go", "run", basename] self._start_pipeline( variables=variables, command_prefix=command_prefix, process_line_callback=process_line_callback, working_directory=working_directory, ) def start_go_pipeline_with_binary( self, variables: dict, launcher_binary: str, worker_binary: str, process_line_callback: Callable[[str], None] | None = None, ) -> None: """ Start Apache Beam Go pipeline with an executable binary. :param variables: Variables passed to the job. :param launcher_binary: Path to the binary compiled for the launching platform. :param worker_binary: Path to the binary compiled for the worker platform. :param process_line_callback: (optional) Callback that can be used to process each line of the stdout and stderr file descriptors. """ job_variables = copy.deepcopy(variables) if "labels" in job_variables: job_variables["labels"] = json.dumps(job_variables["labels"], separators=(",", ":")) job_variables["worker_binary"] = worker_binary command_prefix = [launcher_binary] self._start_pipeline( variables=job_variables, command_prefix=command_prefix, process_line_callback=process_line_callback, )
BeamHook
python
apache__airflow
task-sdk/src/airflow/sdk/definitions/param.py
{ "start": 10400, "end": 13761 }
class ____(ResolveMixin): """ Dag run parameter reference. This binds a simple Param object to a name within a Dag instance, so that it can be resolved during the runtime via the ``{{ context }}`` dictionary. The ideal use case of this class is to implicitly convert args passed to a method decorated by ``@dag``. It can be used to parameterize a Dag. You can overwrite its value by setting it on conf when you trigger your DagRun. This can also be used in templates by accessing ``{{ context.params }}``. **Example**: with DAG(...) as dag: EmailOperator(subject=dag.param('subject', 'Hi from Airflow!')) :param current_dag: Dag being used for parameter. :param name: key value which is used to set the parameter :param default: Default value used if no parameter was set. """ def __init__(self, current_dag: DAG, name: str, default: Any = NOTSET): if default is not NOTSET: current_dag.params[name] = default self._name = name self._default = default self.current_dag = current_dag def iter_references(self) -> Iterable[tuple[Operator, str]]: return () def resolve(self, context: Context) -> Any: """Pull DagParam value from DagRun context. This method is run during ``op.execute()``.""" with contextlib.suppress(KeyError): if context["dag_run"].conf: return context["dag_run"].conf[self._name] if self._default is not NOTSET: return self._default with contextlib.suppress(KeyError): return context["params"][self._name] raise RuntimeError(f"No value could be resolved for parameter {self._name}") def serialize(self) -> dict: """Serialize the DagParam object into a dictionary.""" return { "dag_id": self.current_dag.dag_id, "name": self._name, "default": self._default, } @classmethod def deserialize(cls, data: dict, dags: dict) -> DagParam: """ Deserializes the dictionary back into a DagParam object. :param data: The serialized representation of the DagParam. :param dags: A dictionary of available Dags to look up the Dag. """ dag_id = data["dag_id"] # Retrieve the current Dag from the provided Dags dictionary current_dag = dags.get(dag_id) if not current_dag: raise ValueError(f"Dag with id {dag_id} not found.") return cls(current_dag=current_dag, name=data["name"], default=data["default"]) def process_params( dag: DAG, task: Operator, dagrun_conf: dict[str, Any] | None, *, suppress_exception: bool, ) -> dict[str, Any]: """Merge, validate params, and convert them into a simple dict.""" from airflow.sdk.configuration import conf dagrun_conf = dagrun_conf or {} params = ParamsDict(suppress_exception=suppress_exception) with contextlib.suppress(AttributeError): params.update(dag.params) if task.params: params.update(task.params) if conf.getboolean("core", "dag_run_conf_overrides_params") and dagrun_conf: logger.debug("Updating task params (%s) with DagRun.conf (%s)", params, dagrun_conf) params.update(dagrun_conf) return params.validate()
DagParam
python
bokeh__bokeh
src/bokeh/models/layouts.py
{ "start": 19167, "end": 19767 }
class ____(FlexBox): ''' Lay out child components in a single vertical row. Children can be specified as positional arguments, as a single argument that is a sequence, or using the ``children`` keyword argument. ''' # explicit __init__ to support Init signatures def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) def _sphinx_height_hint(self) -> int|None: if any(x._sphinx_height_hint() is None for x in self.children): return None return sum(x._sphinx_height_hint() for x in self.children)
Column
python
astropy__astropy
astropy/modeling/tests/test_compound.py
{ "start": 13756, "end": 40190 }
class ____(Model): mean = Parameter(default=0, fixed=True) @staticmethod def evaluate(mean): return mean def test_inherit_constraints(): """ Various tests for copying of constraint values between compound models and their members. Regression test for https://github.com/astropy/astropy/issues/3481 """ model = Gaussian1D(bounds={"stddev": (0, 0.3)}, fixed={"mean": True}) + Gaussian1D( fixed={"mean": True} ) # Lots of assertions in this test as there are multiple interfaces to # parameter constraints assert "stddev_0" in model.bounds assert model.bounds["stddev_0"] == (0, 0.3) assert model.stddev_0.bounds == (0, 0.3) assert "mean_0" in model.fixed assert model.fixed["mean_0"] is True assert model.mean_0.fixed is True assert "mean_1" in model.fixed assert model.fixed["mean_1"] is True assert model.mean_1.fixed is True assert model.stddev_0 is model[0].stddev # Great, all the constraints were inherited properly # Now what about if we update them through the sub-models? model.stddev_0.bounds = (0, 0.4) assert model[0].stddev.bounds == (0, 0.4) assert model[0].bounds["stddev"] == (0, 0.4) model.stddev_0.bounds = (0.1, 0.5) assert model[0].stddev.bounds == (0.1, 0.5) assert model[0].bounds["stddev"] == (0.1, 0.5) model[1].mean.fixed = False assert model.mean_1.fixed is False assert model[1].mean.fixed is False # Now turn off syncing of constraints assert model.bounds["stddev_0"] == (0.1, 0.5) model.sync_constraints = False model[0].stddev.bounds = (0, 0.2) assert model.bounds["stddev_0"] == (0.1, 0.5) model.sync_constraints = True assert model.bounds["stddev_0"] == (0, 0.2) def test_compound_custom_inverse(): """ Test that a compound model with a custom inverse has that inverse applied when the inverse of another model, of which it is a component, is computed. Regression test for https://github.com/astropy/astropy/issues/3542 """ poly = Polynomial1D(1, c0=1, c1=2) scale = Scale(1) shift = Shift(1) model1 = poly | scale model1.inverse = poly # model1 now has a custom inverse (the polynomial itself, ignoring the # trivial scale factor) model2 = shift | model1 assert_allclose(model2.inverse(1), (poly | shift.inverse)(1)) # Make sure an inverse is not allowed if the models were combined with the # wrong operator, or if one of the models doesn't have an inverse defined MESSAGE = ( r"No analytical or user-supplied inverse transform has been implemented for" r" this model" ) with pytest.raises(NotImplementedError, match=MESSAGE): (shift + model1).inverse with pytest.raises(NotImplementedError, match=MESSAGE): (model1 & poly).inverse def test_pickle_compound(): """ Regression test for https://github.com/astropy/astropy/issues/3867#issuecomment-114547228 """ # Test pickling a compound model instance g1 = Gaussian1D(1.0, 0.0, 0.1) g2 = Gaussian1D([2.0, 3.0], [0.0, 0.0], [0.2, 0.3]) m = g1 + g2 m2 = pickle.loads(pickle.dumps(m)) assert m.param_names == m2.param_names assert m.__class__.__name__ == m2.__class__.__name__ assert np.all(m.parameters == m2.parameters) assert np.all(m(0) == m2(0)) def test_update_parameters(): offx = Shift(1) scl = Scale(2) m = offx | scl assert m(1) == 4 offx.offset = 42 assert m(1) == 86 m.factor_1 = 100 assert m(1) == 4300 m2 = m | offx assert m2(1) == 4342 def test_name(): offx = Shift(1) scl = Scale(2) m = offx | scl scl.name = "scale" assert m.submodel_names == ("None_0", "scale") assert m.name is None m.name = "M" assert m.name == "M" m1 = m.rename("M1") assert m.name == "M1" assert m1.name == "M1" def test_name_index(): g1 = Gaussian1D(1, 1, 1) g2 = Gaussian1D(1, 2, 1) g = g1 + g2 MESSAGE = r"No component with name 'bozo' found" with pytest.raises(IndexError, match=MESSAGE): g["bozo"] g1.name = "bozo" assert g["bozo"].mean == 1 g2.name = "bozo" MESSAGE = r"Multiple components found using 'bozo' as name.*" with pytest.raises(IndexError, match=MESSAGE): g["bozo"] @pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy") def test_tabular_in_compound(): """ Issue #7411 - evaluate should not change the shape of the output. """ t = Tabular1D(points=([1, 5, 7],), lookup_table=[12, 15, 19], bounds_error=False) rot = Rotation2D(2) p = Polynomial1D(1) x = np.arange(12).reshape((3, 4)) # Create a compound model which does not execute Tabular.__call__, # but model.evaluate and is followed by a Rotation2D which # checks the exact shapes. model = p & t | rot x1, y1 = model(x, x) assert x1.ndim == 2 assert y1.ndim == 2 def test_bounding_box(): g = Gaussian2D() + Gaussian2D(2, 0.5, 0.1, 2, 3, 0) g.bounding_box = ((0, 1), (0, 0.5)) y, x = np.mgrid[0:10, 0:10] y = y / 3.0 x = x / 3.0 val = g(x, y, with_bounding_box=True) # fmt: off compare = np.array( [ [2.93738984, 2.93792011, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan], [2.87857153, 2.88188761, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan], [2.70492922, 2.71529265, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan], [2.45969972, 2.47912103, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan], [np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan], [np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan], [np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan], [np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan], [np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan], [np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan] ] ) # fmt: on mask = ~np.isnan(val) assert_allclose(val[mask], compare[mask]) val2 = g(x + 2, y + 2, with_bounding_box=True) assert np.isnan(val2).sum() == 100 # val3 = g(.1, .1, with_bounding_box=True) @pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy") def test_bounding_box_with_units(): points = np.arange(5) * u.pix lt = np.arange(5) * u.AA t = Tabular1D(points, lt) assert t(1 * u.pix, with_bounding_box=True) == 1.0 * u.AA @pytest.mark.parametrize("poly", [Chebyshev1D(5), Legendre1D(5), Polynomial1D(5)]) def test_compound_with_polynomials_1d(poly): """ Tests that polynomials are offset when used in compound models. Issue #3699 """ poly.parameters = [1, 2, 3, 4, 1, 2] shift = Shift(3) model = poly | shift x = np.linspace(-5, 5, 10) result_compound = model(x) result = shift(poly(x)) assert_allclose(result, result_compound) assert model.param_names == ( "c0_0", "c1_0", "c2_0", "c3_0", "c4_0", "c5_0", "offset_1", ) def test_replace_submodel(): """ Replace a model in a Compound model """ S1 = Shift(2, name="shift2") | Scale(3, name="scale3") # First shift then scale S2 = Scale(2, name="scale2") | Shift(3, name="shift3") # First scale then shift m = S1 & S2 assert m(1, 2) == (9, 7) m2 = m.replace_submodel("scale3", Scale(4, name="scale4")) assert m2(1, 2) == (12, 7) assert m(1, 2) == (9, 7) # Check the inverse has been updated assert m2.inverse(12, 7) == (1, 2) # Produce the same result by replacing a single model with a compound m3 = m.replace_submodel("shift2", Shift(2) | Scale(2)) assert m(1, 2) == (9, 7) assert m3(1, 2) == (18, 7) # Check the inverse has been updated assert m3.inverse(18, 7) == (1, 2) # Test with arithmetic model compunding operator m = S1 + S2 assert m(1) == 14 m2 = m.replace_submodel("scale2", Scale(4, name="scale4")) assert m2(1) == 16 # Test with fix_inputs() R = fix_inputs(Rotation2D(angle=90, name="rotate"), {0: 1}) m4 = S1 | R assert_allclose(m4(0), (-6, 1)) m5 = m4.replace_submodel("rotate", Rotation2D(180)) assert_allclose(m5(0), (-1, -6)) # Check we get a value error when model name doesn't exist MESSAGE = r"No submodels found named not_there" with pytest.raises(ValueError, match=MESSAGE): m2 = m.replace_submodel("not_there", Scale(2)) # And now a model set P = Polynomial1D(degree=1, n_models=2, name="poly") S = Shift([1, 2], n_models=2) m = P | S assert_array_equal(m([0, 1]), (1, 2)) MESSAGE = r"New and old models must have equal values for n_models" with pytest.raises(ValueError, match=MESSAGE): m2 = m.replace_submodel("poly", Polynomial1D(degree=1, c0=1)) m2 = m.replace_submodel("poly", Polynomial1D(degree=1, c0=[1, 2], n_models=2)) assert_array_equal(m2([0, 1]), (2, 4)) # Ensure previous _user_inverse doesn't stick around S1 = Shift(1) S2 = Shift(2) S3 = Shift(3, name="S3") S23 = S2 | S3 S23.inverse = Shift(-4.9) m = S1 & S23 # This should delete the S23._user_inverse m2 = m.replace_submodel("S3", Shift(4)) assert m2(1, 2) == (2, 8) assert m2.inverse(2, 8) == (1, 2) @pytest.mark.parametrize( "expr", [ lambda m1, m2: m1 + m2, lambda m1, m2: m1 - m2, lambda m1, m2: m1 * m2, lambda m1, m2: m1 / m2, ], ) def test_compound_evaluate(expr): """ Tests that compound evaluate function produces the same result as the models with the operator applied """ x = np.linspace(-5, 5, 10) # Some evaluate functions assume that inputs are numpy arrays or quantities including Const1D p1 = np.array([1, 2, 3, 4, 1, 2]) p2 = np.array([1, 0, 0.5]) model1 = Polynomial1D(5) model2 = Gaussian1D(2, 1, 5) compound = expr(model1, model2) assert_array_equal( compound.evaluate(x, *p1, *p2), expr(model1.evaluate(x, *p1), model2.evaluate(x, *p2)), ) def test_compound_evaluate_power(): """ Tests that compound evaluate function produces the same result as the models with the power operator applied """ x = np.linspace(-5, 5, 10) p1 = np.array([1, 0, 0.2]) p2 = np.array([3]) model1 = Gaussian1D(2, 1, 5) model2 = Const1D(2) compound = model1**model2 assert_array_equal( compound.evaluate(x, *p1, *p2), model1.evaluate(x, *p1) ** model2.evaluate(x, *p2), ) def test_compound_evaluate_double_shift(): x = np.linspace(-5, 5, 10) y = np.linspace(-5, 5, 10) m1 = Gaussian2D(1, 0, 0, 1, 1, 1) m2 = Shift(1) m3 = Shift(2) m = Gaussian2D(1, 0, 0, 1, 1, 1) & Shift(1) & Shift(2) assert_array_equal( m.evaluate(x, y, x - 10, y + 20, 1, 0, 0, 1, 1, 1, 1, 2), [ m1.evaluate(x, y, 1, 0, 0, 1, 1, 1), m2.evaluate(x - 10, 1), m3.evaluate(y + 20, 2), ], ) @pytest.mark.parametrize( "expr", [ lambda m1, m2: m1 + m2, lambda m1, m2: m1 - m2, lambda m1, m2: m1 * m2, lambda m1, m2: m1 / m2, ], ) def test_compound_evaluate_named_param(expr): """ Tests that compound evaluate function produces the same result as the models with the operator applied """ x = np.linspace(-5, 5, 10) p1 = np.array([1, 0, 0.2]) p2 = np.array([3, 0.5, 0.5]) model1 = Gaussian1D(2, 1, 5) model2 = Gaussian1D(2, 1, 5) compound = expr(model1, model2) assert_array_equal( compound.evaluate(x, *p2, amplitude_0=p1[0], mean_0=p1[1], stddev_0=p1[2]), expr(model1.evaluate(x, *p1), model2.evaluate(x, *p2)), ) def test_compound_evaluate_name_param_power(): """ Tests that compound evaluate function produces the same result as the models with the power operator applied """ x = np.linspace(-5, 5, 10) p1 = np.array([1, 0, 0.2]) p2 = np.array([3]) model1 = Gaussian1D(2, 1, 5) model2 = Const1D(2) compound = model1**model2 assert_array_equal( compound.evaluate(x, *p2, amplitude_0=p1[0], mean_0=p1[1], stddev_0=p1[2]), model1.evaluate(x, *p1) ** model2.evaluate(x, *p2), ) def test_compound_evaluate_and(): """ Tests that compound evaluate function produces the same result as the models with the operator applied """ x = np.linspace(-5, 5, 10) p1 = np.array([1, 0.1, 0.5]) p2 = np.array([3]) model1 = Gaussian1D() model2 = Shift() compound = model1 & model2 assert_array_equal( compound.evaluate(x, x, *p1, p2), [model1.evaluate(x, *p1), model2.evaluate(x, p2)], ) def test_compound_evaluate_or(): """ Tests that compound evaluate function produces the same result as the models with the operator applied """ x = np.linspace(-5, 5, 10) p1 = np.array([0.5]) p2_amplitude = np.array([3]) p2_mean = np.array([0]) p2_std = np.array([0.1]) model1 = Shift(0.5) model2 = Gaussian1D(1, 0, 0.5) compound = model1 | model2 assert_array_equal( compound.evaluate(x, p1, p2_amplitude, p2_mean, p2_std), model2.evaluate(model1.evaluate(x, p1), p2_amplitude, p2_mean, p2_std), ) def test_compound_evaluate_fix_inputs_by_keyword(): """ Tests that compound evaluate function produces the same result as the models fix_inputs operator is applied when using the keyword """ y, x = np.mgrid[:10, :10] model_params = [3, 0, 0.1, 1, 0.5, 0] model = Gaussian2D(1, 2, 0, 0.5) compound = fix_inputs(model, {"x": x + 5}) assert_array_equal( compound.evaluate(x, y, *model_params), model.evaluate(x + 5, y, *model_params), ) def test_compound_evaluate_fix_inputs_by_position(): """ Tests that compound evaluate function produces the same result as the models fix_inputs operator is applied when using the input index """ y, x = np.mgrid[:10, :10] model_params = [3, 0, 0.1, 1, 0.5, 0] model = Gaussian2D(1, 2, 0, 0.5) compound = fix_inputs(model, {0: x + 5}) assert_array_equal( compound.evaluate(x, y, *model_params), model.evaluate(x + 5, y, *model_params), ) @pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy") def test_fit_multiplied_compound_model_with_mixed_units(): """ Regression test for issue #12320 """ fitter = LevMarLSQFitter() x = np.linspace(0, 1, 101) * u.s y = np.linspace(5, 10, 101) * u.m * u.kg / u.s m1 = Linear1D(slope=5 * u.m / u.s / u.s, intercept=1.0 * u.m / u.s) m2 = Linear1D(slope=0.0 * u.kg / u.s, intercept=10.0 * u.kg) truth = m1 * m2 # We need to fix some of the parameters to avoid degeneracies truth.slope_1.fixed = True truth.intercept_0.fixed = True fit = fitter(truth, x, y) unfit_output = truth(x) fit_output = fit(x) assert unfit_output.unit == fit_output.unit == (u.kg * u.m / u.s) # The unfit model is 10 kg m^2 / s for x=0s and goes up to 60 kg m^2 / s # for x=1s, whereas the actual data being fit goes from 5 to 10, so we need # to correct this. assert_allclose(unfit_output / 10 + 4 * u.kg * u.m / u.s, fit_output) assert_quantity_allclose(fit.slope_0, 1 * u.m / u.s / u.s) assert_quantity_allclose(fit.intercept_0, 1.0 * u.m / u.s) assert_quantity_allclose(fit.slope_1, 0 * u.kg / u.s) assert_quantity_allclose(fit.intercept_1, 5 * u.kg) @pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy") def test_fit_multiplied_recursive_compound_model_with_mixed_units(): """ Regression test for issue #12320 """ fitter = LevMarLSQFitter() x = np.linspace(0, 1, 101) * u.s y = np.linspace(5, 10, 101) * u.m * u.m * u.kg / u.s m1 = Linear1D(slope=5 * u.m / u.s / u.s, intercept=1.0 * u.m / u.s) m2 = Linear1D(slope=0.0 * u.kg / u.s, intercept=10.0 * u.kg) m3 = Linear1D(slope=0.0 * u.m / u.s, intercept=10.0 * u.m) truth = m1 * m2 * m3 # We need to fix some of the parameters to avoid degeneracies truth.slope_1.fixed = True truth.slope_2.fixed = True truth.intercept_0.fixed = True truth.intercept_1.fixed = True fit = fitter(truth, x, y) unfit_output = truth(x) fit_output = fit(x) assert unfit_output.unit == fit_output.unit == (u.kg * u.m * u.m / u.s) # The unfit model is 100 kg m^2 / s for x=0s and goes up to 600 kg m^2 / s # for x=1s, whereas the actual data being fit goes from 5 to 10, so we need # to correct this. assert_allclose(unfit_output / 100 + 4 * u.kg * u.m * u.m / u.s, fit_output) assert_quantity_allclose(fit.slope_0, 1 * u.m / u.s / u.s) assert_quantity_allclose(fit.intercept_0, 1.0 * u.m / u.s) assert_quantity_allclose(fit.slope_1, 0 * u.kg / u.s) assert_quantity_allclose(fit.intercept_1, 10 * u.kg) assert_quantity_allclose(fit.slope_2, 0 * u.m / u.s) assert_quantity_allclose(fit.intercept_2, 0.5 * u.m) x = np.linspace(0, 1, 101) * u.s y = np.linspace(5, 10, 101) * u.m * u.m * u.kg * u.kg / u.s m1 = Linear1D(slope=5 * u.m / u.s / u.s, intercept=1.0 * u.m / u.s) m2 = Linear1D(slope=0.0 * u.kg / u.s, intercept=10.0 * u.kg) m3 = Linear1D(slope=0.0 * u.m / u.s, intercept=10.0 * u.m) m4 = Linear1D(slope=0.0 * u.kg / u.s, intercept=10.0 * u.kg) m11 = m1 * m2 m22 = m3 * m4 truth = m11 * m22 # We need to fix some of the parameters to avoid degeneracies truth.slope_1.fixed = True truth.slope_2.fixed = True truth.slope_3.fixed = True truth.intercept_0.fixed = True truth.intercept_1.fixed = True truth.intercept_2.fixed = True fit = fitter(truth, x, y) unfit_output = truth(x) fit_output = fit(x) assert unfit_output.unit == fit_output.unit == (u.kg * u.kg * u.m * u.m / u.s) # The unfit model is 1000 kg m^2 / s for x=0s and goes up to 6000 kg m^2 / s # for x=1s, whereas the actual data being fit goes from 5 to 10, so we need # to correct this. assert_allclose(unfit_output / 1000 + 4 * u.kg * u.kg * u.m * u.m / u.s, fit_output) assert_quantity_allclose(fit.slope_0, 1 * u.m / u.s / u.s) assert_quantity_allclose(fit.intercept_0, 1.0 * u.m / u.s) assert_quantity_allclose(fit.slope_1, 0 * u.kg / u.s) assert_quantity_allclose(fit.intercept_1, 10 * u.kg) assert_quantity_allclose(fit.slope_2, 0 * u.m / u.s) assert_quantity_allclose(fit.intercept_2, 10 * u.m) assert_quantity_allclose(fit.slope_3, 0 * u.kg / u.s) assert_quantity_allclose(fit.intercept_3, 0.05 * u.kg) @pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy") def test_fit_divided_compound_model_with_mixed_units(): """ Regression test for issue #12320 """ fitter = LevMarLSQFitter() x = np.linspace(0, 1, 101) * u.s y = np.linspace(5, 10, 101) * u.kg * u.m / u.s m1 = Linear1D(slope=5 * u.kg * u.m / u.s, intercept=1.0 * u.kg * u.m) m2 = Linear1D(slope=0.0 * u.s / u.s, intercept=10.0 * u.s) truth = m1 / m2 # We need to fix some of the parameters to avoid degeneracies truth.slope_1.fixed = True truth.intercept_0.fixed = True fit = fitter(truth, x, y) unfit_output = truth(x) fit_output = fit(x) assert unfit_output.unit == fit_output.unit == (u.kg * u.m / u.s) # The unfit model is 0.1 kg m / s for x=0s and goes up to 0.5 kg m / s for # x=1s, whereas the actual data being fit goes from 5 to 10, so we need # to correct this. assert_allclose(unfit_output * 10 + 4 * u.kg * u.m / u.s, fit_output, rtol=1e-4) assert_quantity_allclose(fit.slope_0, 1 * u.kg * u.m / u.s) assert_quantity_allclose(fit.intercept_0, 1.0 * u.kg * u.m) assert_quantity_allclose(fit.slope_1, 0 * u.s / u.s) assert_quantity_allclose(fit.intercept_1, 0.2 * u.s) @pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy") def test_fit_mixed_recursive_compound_model_with_mixed_units(): """ Regression test for issue #12320 """ fitter = LevMarLSQFitter() x = np.linspace(0, 1, 101) * u.s y = np.linspace(5, 10, 101) * u.kg * u.m * u.m / u.s m1 = Linear1D(slope=5 * u.kg * u.m / u.s, intercept=1.0 * u.kg * u.m) m2 = Linear1D(slope=0.0 * u.s / u.s, intercept=10.0 * u.s) m3 = Linear1D(slope=0.0 * u.m / u.s, intercept=10.0 * u.m) truth = m1 / m2 * m3 # We need to fix some of the parameters to avoid degeneracies truth.slope_1.fixed = True truth.slope_2.fixed = True truth.intercept_0.fixed = True truth.intercept_1.fixed = True fit = fitter(truth, x, y) unfit_output = truth(x) fit_output = fit(x) assert unfit_output.unit == fit_output.unit == (u.kg * u.m * u.m / u.s) # The unfit model is 1 kg m^2 / s for x=0s and goes up to 6 kg m^2 / s for # x=1s, whereas the actual data being fit goes from 5 to 10, so we need # to correct this. assert_allclose(unfit_output + 4 * u.kg * u.m * u.m / u.s, fit_output) assert_quantity_allclose(fit.slope_0, 1 * u.kg * u.m / u.s) assert_quantity_allclose(fit.intercept_0, 1.0 * u.kg * u.m) assert_quantity_allclose(fit.slope_1, 0 * u.s / u.s) assert_quantity_allclose(fit.intercept_1, 10 * u.s) assert_quantity_allclose(fit.slope_2, 0 * u.m / u.s) assert_quantity_allclose(fit.intercept_2, 50 * u.m) x = np.linspace(0, 1, 101) * u.s y = np.linspace(5, 10, 101) * u.kg * u.kg * u.m * u.m / u.s m1 = Linear1D(slope=5 * u.kg * u.m / u.s, intercept=1.0 * u.kg * u.m) m2 = Linear1D(slope=0.0 * u.s / u.s, intercept=10.0 * u.s) m3 = Linear1D(slope=0.0 * u.m / u.s, intercept=10.0 * u.m) m4 = Linear1D(slope=0.0 * u.kg / u.s, intercept=10.0 * u.kg) m11 = m1 / m2 m22 = m3 * m4 truth = m11 * m22 # We need to fix some of the parameters to avoid degeneracies truth.slope_1.fixed = True truth.slope_2.fixed = True truth.slope_3.fixed = True truth.intercept_0.fixed = True truth.intercept_1.fixed = True truth.intercept_2.fixed = True fit = fitter(truth, x, y) unfit_output = truth(x) fit_output = fit(x) assert unfit_output.unit == fit_output.unit == (u.kg * u.kg * u.m * u.m / u.s) # The unfit model is 10 kg^2 m^2 / s for x=0s and goes up to 60 kg^2 m^2 / s # for x=1s, whereas the actual data being fit goes from 5 to 10, so we need # to correct this. assert_allclose(unfit_output / 10 + 4 * u.kg * u.kg * u.m * u.m / u.s, fit_output) assert_quantity_allclose(fit.slope_0, 1 * u.kg * u.m / u.s) assert_quantity_allclose(fit.intercept_0, 1.0 * u.kg * u.m) assert_quantity_allclose(fit.slope_1, 0 * u.s / u.s) assert_quantity_allclose(fit.intercept_1, 10 * u.s) assert_quantity_allclose(fit.slope_2, 0 * u.m / u.s) assert_quantity_allclose(fit.intercept_2, 10 * u.m) assert_quantity_allclose(fit.slope_3, 0 * u.kg / u.s) assert_quantity_allclose(fit.intercept_3, 5 * u.kg) def numerical_partial_deriv(model, *inputs, param_idx, delta=1e-5): """ Evaluate the central difference approximation of the derivative for param_idx. Parameters ---------- model The model to evaluate inputs The inputs to the model param_idx The index of the parameter to compute the partial derivative for. delta The step size with which to compute the central difference. """ param = model.parameters param_down = param.copy() param_down[param_idx] = param[param_idx] - delta param_up = param.copy() param_up[param_idx] = param[param_idx] + delta up = model.evaluate(*inputs, *param_up) down = model.evaluate(*inputs, *param_down) return (up - down) / (2 * delta) @pytest.mark.parametrize( "model", [ pytest.param( m, id=m._format_expression(format_leaf=lambda i, l: type(l).__name__) ) for m in [ Gaussian1D(5, 2, 3) + Linear1D(2, 3), Gaussian1D(5, 2, 3) - Linear1D(2, 3), Polynomial1D(2) * Gaussian1D(), Polynomial1D(2) / Gaussian1D(), Polynomial1D(2) + Gaussian1D(), Polynomial2D(2) + Gaussian2D(), ] ], ) @pytest.mark.parametrize("input_ndim", (1, 2)) def test_compound_fit_deriv(model, input_ndim): """ Given some compound models compare the numerical derivatives to analytical ones. """ x = np.linspace(1, 5, num=10) y = np.linspace(1, 5, num=10) if input_ndim == 2: x = x.reshape((5, 2)) y = y.reshape((5, 2)) inputs = (x,) if model.n_inputs == 1 else (x, y) numerical = [ numerical_partial_deriv(model, *inputs, param_idx=i) for i in range(len(model.parameters)) ] analytical = model.fit_deriv(*inputs, *model.parameters) numerical = np.asarray(numerical) analytical = np.asarray(analytical) # Reshape output to ravel all but the first dimension since some models do this numerical = numerical.reshape((numerical.shape[0], -1)) analytical = analytical.reshape((analytical.shape[0], -1)) assert_allclose(numerical, analytical) @pytest.mark.skipif(not HAS_SCIPY, reason="requires scipy") def test_fit_compound_polynomial2d(): """ Regression test for a bug that caused compound models with Polynomial2D to not be fittable due to a bug in CompoundModel.fit_deriv """ # Generate fake data rng = np.random.default_rng(0) y, x = np.mgrid[:128, :128] z = 2.0 * x**2 - 0.5 * x**2 + 1.5 * x * y - 1.0 z += rng.normal(0.0, 0.1, z.shape) * 50000.0 z += Gaussian2D(amplitude=50000, x_mean=60, y_mean=60, x_stddev=5, y_stddev=5)(x, y) # Fit the data using astropy.modeling p_init = Polynomial2D(degree=2) + Gaussian2D(amplitude=50000, x_mean=60, y_mean=60) fit_p = DogBoxLSQFitter() # We just make sure the fitting works, as it previously crashed fit_p(p_init, x, y, z)
_ConstraintsTestB