text
stringlengths
5
631k
id
stringlengths
14
178
metadata
dict
__index_level_0__
int64
0
647
{"[MASK]": 0, "[UNK]": 1, "[PAD]": 2, "DUMMY": 3, "DUMMY2": 4, "[MASK2]": 5}
transformers/tests/fixtures/test_entity_vocab.json/0
{ "file_path": "transformers/tests/fixtures/test_entity_vocab.json", "repo_id": "transformers", "token_count": 45 }
563
[ { "repo_id": "hf-audio/xcodec-hubert-librispeech", "bandwidth": 0.5, "codes": [ [ [ 590, 590, 306, 306, 590, 1006, 826, 916, 590, 826, 826, 844, 844, 746, 392 ] ] ], "decoded": [ [ [ -0.000644786050543189, 0.00013058912009000778, 0.0006741542601957917, 0.00034384033642709255, -0.00010847719386219978, 0.00014182832092046738, 6.248801946640015e-05, -0.0003324758727103472, -0.0005267199594527483, -0.0010175877250730991, -0.0011251929681748152, -0.0006446582265198231, -0.00047831423580646515, -0.00023265788331627846, -0.0001271876972168684, -1.8235761672258377e-05, -5.327444523572922e-05, -0.0001765203196555376, -0.0004240723792463541, -0.0002993491943925619, -0.0002642744220793247, -0.0003135430160909891, -0.0002605144400149584, -0.00026806839741766453, -2.2151507437229156e-06, 0.00020786607638001442, 0.00017593754455447197, -0.00016461312770843506, -0.0003985136281698942, -0.00025607552379369736, -0.00013757869601249695, -6.365473382174969e-05, -0.00020918482914566994, -0.0002473779022693634, -0.00014893501065671444, 0.0001332222018390894, 0.00040268548764288425, 0.0002646979410201311, 7.32732005417347e-05, -0.00025837705470621586, -0.0007715015672147274, -0.0012102078180760145, -0.0009501995518803596, -0.00017246650531888008, 0.0003386975731700659, 0.0003189577255398035, 0.00021003605797886848, 0.0001099675428122282, 0.00024433922953903675, 3.217277117073536e-05 ] ] ], "codec_error": 0.005472081247717142, "codec_tol": 1e-05, "dec_tol": 0.001 }, { "repo_id": "hf-audio/xcodec-hubert-librispeech", "bandwidth": 1.0, "codes": [ [ [ 590, 590, 306, 306, 590, 1006, 826, 916, 590, 826, 826, 844, 844, 746, 392 ], [ 748, 375, 156, 327, 327, 156, 448, 448, 327, 346, 1023, 346, 346, 1023, 1023 ] ] ], "decoded": [ [ [ -0.00022301427088677883, 0.00018040905706584454, 0.00034872093237936497, 0.00010567391291260719, -0.0002222477924078703, -0.00012606964446604252, -7.731537334620953e-05, -0.00012137391604483128, -0.0001762432511895895, -0.0007736883126199245, -0.0010079797357320786, -0.00046425615437328815, -7.964251562952995e-05, -3.2845884561538696e-05, -1.0334886610507965e-05, 8.499552495777607e-05, -0.00010045897215604782, -0.00029115937650203705, -0.00044626137241721153, -0.00014261016622185707, -0.00036265188828110695, -0.0005839171353727579, -0.00024048984050750732, 1.8859747797250748e-05, 5.6158751249313354e-06, 0.00017515220679342747, 0.0004101542290300131, 0.0001747957430779934, -0.0003047185018658638, -0.00032516056671738625, -0.0002171192318201065, -0.0002566229086369276, -0.00022618728689849377, 8.058198727667332e-05, 0.00029059057123959064, 0.000731412204913795, 0.0009413085645064712, 0.0005381645169109106, -7.746554911136627e-05, -0.0005039803218096495, -0.0008559448178857565, -0.0010777676943689585, -0.0005720553454011679, 0.00046507292427122593, 0.0009388792095705867, 0.0007773893885314465, 0.000560535816475749, 0.00030490709468722343, 0.00010391278192400932, -0.0004857585299760103 ] ] ], "codec_error": 0.005680468864738941, "codec_tol": 1e-05, "dec_tol": 0.001 }, { "repo_id": "hf-audio/xcodec-hubert-librispeech", "bandwidth": 1.5, "codes": [ [ [ 590, 590, 306, 306, 590, 1006, 826, 916, 590, 826, 826, 844, 844, 746, 392 ], [ 748, 375, 156, 327, 327, 156, 448, 448, 327, 346, 1023, 346, 346, 1023, 1023 ], [ 351, 835, 351, 992, 992, 817, 127, 832, 855, 177, 817, 177, 177, 97, 472 ] ] ], "decoded": [ [ [ -8.131377398967743e-05, 0.0003722899127751589, 0.0005264813080430031, 0.00024720048531889915, -0.00012781796976923943, -6.020977161824703e-05, 2.0435545593500137e-06, -9.971903637051582e-06, -5.366327241063118e-05, -0.0007150094024837017, -0.0009912890382111073, -0.00038447463884949684, 5.855737254023552e-05, 0.00012949109077453613, 0.00017356942407786846, 0.0002417820505797863, -4.827766679227352e-05, -0.00032059219665825367, -0.0004915285389870405, -0.00013681501150131226, -0.00028307782486081123, -0.00039494410157203674, 5.228118970990181e-05, 0.0003737024962902069, 0.0003404058516025543, 0.0004928638227283955, 0.000665064319036901, 0.00023967307060956955, -0.000433565117418766, -0.0005353728774935007, -0.00038189860060811043, -0.00032747630029916763, -0.00019068107940256596, 0.0002735431771725416, 0.0006756627699360251, 0.0012527555227279663, 0.0013769968645647168, 0.000728685175999999, -0.0001932412851601839, -0.0009031407535076141, -0.0014542911667376757, -0.0017141078133136034, -0.001031694933772087, 0.00032188743352890015, 0.0010538509814068675, 0.0010152260074391961, 0.0008743192302063107, 0.0006233796011656523, 0.0002936923410743475, -0.0004914039745926857 ] ] ], "codec_error": 0.005860992707312107, "codec_tol": 1e-05, "dec_tol": 0.001 }, { "repo_id": "hf-audio/xcodec-hubert-librispeech", "bandwidth": 2.0, "codes": [ [ [ 590, 590, 306, 306, 590, 1006, 826, 916, 590, 826, 826, 844, 844, 746, 392 ], [ 748, 375, 156, 327, 327, 156, 448, 448, 327, 346, 1023, 346, 346, 1023, 1023 ], [ 351, 835, 351, 992, 992, 817, 127, 832, 855, 177, 817, 177, 177, 97, 472 ], [ 156, 1008, 546, 266, 856, 116, 173, 531, 894, 938, 87, 531, 745, 747, 747 ] ] ], "decoded": [ [ [ 6.87665306031704e-05, 0.0005173773970454931, 0.0006601555505767465, 0.0004073025193065405, 2.4633249267935753e-05, 6.317137740552425e-05, 0.00011769519187510014, 0.00010632933117449284, 6.606383249163628e-05, -0.0006399150006473064, -0.0008867487777024508, -0.00026029557920992374, 0.00015681260265409946, 0.00021465751342475414, 0.00026818737387657166, 0.00039722840301692486, 0.00010386388748884201, -0.0001172386109828949, -0.0002974425442516804, 6.535928696393967e-05, -0.00014087161980569363, -0.0002665710635483265, 0.00022345990873873234, 0.0005574806127697229, 0.0005362457595765591, 0.0006526130018755794, 0.0007966818520799279, 0.0003330092877149582, -0.00044982251711189747, -0.0006805681623518467, -0.0005900394171476364, -0.0005267017986625433, -0.00036593549884855747, 0.00016947905533015728, 0.0006881445879116654, 0.001375970197841525, 0.0014863089891150594, 0.0008116817334666848, -8.753198198974133e-05, -0.0008385318797081709, -0.0014865880366414785, -0.0017615130636841059, -0.0009944913908839226, 0.00041615008376538754, 0.0011417579371482134, 0.001129985903389752, 0.0010677824029698968, 0.0007883547805249691, 0.00044182222336530685, -0.0003320761024951935 ] ] ], "codec_error": 0.005977352149784565, "codec_tol": 1e-05, "dec_tol": 0.001 }, { "repo_id": "hf-audio/xcodec-hubert-librispeech", "bandwidth": 4.0, "codes": null, "decoded": [ [ [ 4.919269122183323e-05, 0.00044998968951404095, 0.0005170812364667654, 0.00038593681529164314, 0.00012237182818353176, 6.045890040695667e-05, 7.303990423679352e-05, 0.00023534847423434258, 0.00028711999766528606, -0.0004980487283319235, -0.0007807251531630754, -0.0001900549978017807, 0.00020040222443640232, 0.00022384990006685257, 0.00028798868879675865, 0.0005522638093680143, 0.0003211870789527893, 5.3691910579800606e-05, -0.0002666471991688013, 3.6196084693074226e-05, -0.00030040694400668144, -0.0006298250518739223, -0.00017189816571772099, 0.0004776653368026018, 0.0007132112514227629, 0.0007824968779459596, 0.0010266141034662724, 0.0007283660816028714, -0.00011806422844529152, -0.0006026057526469231, -0.0005629444494843483, -0.00034018163569271564, -0.0001810593530535698, 0.00023290864191949368, 0.0007565215928480029, 0.001519701792858541, 0.0016510691493749619, 0.0009673768654465675, 0.00013116677291691303, -0.0004444648511707783, -0.001082052243873477, -0.001428384566679597, -0.0006310269236564636, 0.0007636576192453504, 0.0013155697379261255, 0.001216168631799519, 0.0012932788813486695, 0.0010688184993341565, 0.0005435235798358917, -0.00032686395570635796 ] ] ], "codec_error": 0.00608655484393239, "codec_tol": 1e-05, "dec_tol": 0.001 }, { "repo_id": "hf-audio/xcodec-hubert-general", "bandwidth": 0.5, "codes": [ [ [ 935, 433, 126, 803, 850, 448, 917, 387, 387, 592, 592, 855, 917, 572, 28 ] ] ], "decoded": [ [ [ -0.00021017552353441715, 0.0004965162370353937, 0.0001386473886668682, -0.00020841951481997967, -8.969777263700962e-05, -0.0002771597355604172, -0.00027932715602219105, 5.5042095482349396e-05, 0.00026329560205340385, 0.00017548329196870327, 0.00038894289173185825, 0.0005539481062442064, 0.00014357827603816986, -0.00031660543754696846, -0.0005882629193365574, -0.00044182687997817993, -0.0001490083523094654, -5.8116158470511436e-05, 4.0457816794514656e-05, -7.796147838234901e-05, 3.5779085010290146e-06, 0.00017821392975747585, 9.860633872449398e-05, 0.00010030693374574184, 1.2427102774381638e-05, -0.0001045640092343092, -9.76896844804287e-05, -5.82672655582428e-05, 0.00017144251614809036, 7.190275937318802e-06, -0.00019244407303631306, 5.330890417098999e-05, 0.0001915388274937868, 2.933409996330738e-05, -9.671831503510475e-05, -0.00038430141285061836, -0.000596470432356, -0.00054143276065588, -0.0003454047255218029, -0.0001294529065489769, -0.00019761803559958935, -0.00011440459638834, 1.8423888832330704e-05, 5.5767595767974854e-05, 0.00012666056863963604, -9.990530088543892e-06, -8.841603994369507e-05, -7.79847614467144e-05, -0.0001224260777235031, -9.860680438578129e-05 ] ] ], "codec_error": 0.0038952771574258804, "codec_tol": 1e-06, "dec_tol": 0.0001 }, { "repo_id": "hf-audio/xcodec-hubert-general", "bandwidth": 1.0, "codes": [ [ [ 935, 433, 126, 803, 850, 448, 917, 387, 387, 592, 592, 855, 917, 572, 28 ], [ 739, 882, 882, 49, 459, 64, 189, 459, 459, 459, 143, 551, 550, 760, 808 ] ] ], "decoded": [ [ [ -0.0002046455629169941, 0.00046922592446208, 0.00011031189933419228, -0.0002996474504470825, -0.00021691364236176014, -0.00039181788451969624, -0.0003864085301756859, -4.2735831812024117e-05, 0.00022559752687811852, 0.00019318307749927044, 0.00036341347731649876, 0.00047189113683998585, 5.879811942577362e-05, -0.00035421736538410187, -0.0005878964439034462, -0.00047568860463798046, -0.00024429219774901867, -8.333590812981129e-05, 9.222817607223988e-05, -2.709287218749523e-05, 2.5267712771892548e-05, 0.00020193355157971382, 0.00010740640573203564, 1.998012885451317e-05, -0.00012808036990463734, -0.00025810301303863525, -0.00024647475220263004, -0.0001851061824709177, 6.030406802892685e-05, -3.3717602491378784e-05, -0.00014680484309792519, 5.045789293944836e-05, 3.881426528096199e-05, -0.00020871451124548912, -0.0002821478992700577, -0.0005010757595300674, -0.0007123809773474932, -0.0006850638892501593, -0.0005063675343990326, -0.00030805286951363087, -0.0003586956299841404, -0.00024597207084298134, -0.0001272936351597309, -0.0001404962968081236, -9.55783762037754e-05, -0.00024282699450850487, -0.0003365979064255953, -0.000235437648370862, -0.00011085602454841137, -1.4352379366755486e-05 ] ] ], "codec_error": 0.003511926392093301, "codec_tol": 1e-06, "dec_tol": 0.0001 }, { "repo_id": "hf-audio/xcodec-hubert-general", "bandwidth": 1.5, "codes": [ [ [ 935, 433, 126, 803, 850, 448, 917, 387, 387, 592, 592, 855, 917, 572, 28 ], [ 739, 882, 882, 49, 459, 64, 189, 459, 459, 459, 143, 551, 550, 760, 808 ], [ 710, 536, 176, 531, 623, 626, 100, 833, 796, 311, 457, 382, 360, 176, 410 ] ] ], "decoded": [ [ [ -0.0001305723562836647, 0.0007112827152013779, 0.0003338572569191456, -6.938702426850796e-05, 0.0001109603326767683, -7.854425348341465e-05, -5.859602242708206e-05, 0.00043661193922162056, 0.0007921892683953047, 0.0007424359209835529, 0.0009273507166653872, 0.0010653517674654722, 0.0005372778978198767, -4.121800884604454e-05, -0.0003301908727735281, -0.00012553343549370766, 0.00018500350415706635, 0.000404976774007082, 0.0006721802055835724, 0.0005569383502006531, 0.0005787969566881657, 0.0007312376983463764, 0.0005475706420838833, 0.00038789398968219757, 0.00024302303791046143, 0.0001421694178134203, 0.00014890613965690136, 0.000260709086433053, 0.0005868605803698301, 0.0004271466750651598, 0.00019052578136324883, 0.000402234960347414, 0.00040456512942910194, 8.718366734683514e-05, 8.386559784412384e-07, -0.00021422700956463814, -0.00043249805457890034, -0.00034649111330509186, -9.317276999354362e-05, 0.00012367917224764824, -5.164183676242828e-06, 4.87668439745903e-05, 0.0001070518046617508, 2.7181115001440048e-05, 0.0001189960166811943, 4.686368629336357e-05, 7.493654265999794e-06, 0.00016385293565690517, 0.0002651643007993698, 0.0002767094410955906 ] ] ], "codec_error": 0.0034364312887191772, "codec_tol": 1e-06, "dec_tol": 0.0001 }, { "repo_id": "hf-audio/xcodec-hubert-general", "bandwidth": 2.0, "codes": [ [ [ 935, 433, 126, 803, 850, 448, 917, 387, 387, 592, 592, 855, 917, 572, 28 ], [ 739, 882, 882, 49, 459, 64, 189, 459, 459, 459, 143, 551, 550, 760, 808 ], [ 710, 536, 176, 531, 623, 626, 100, 833, 796, 311, 457, 382, 360, 176, 410 ], [ 791, 515, 953, 596, 454, 753, 295, 454, 454, 115, 515, 816, 36, 75, 226 ] ] ], "decoded": [ [ [ -0.00014006253331899643, 0.0007989001460373402, 0.00046538468450307846, 8.566956967115402e-05, 0.0002664625644683838, 5.368725396692753e-05, 4.951213486492634e-05, 0.000605646288022399, 0.001051453989930451, 0.0011005131527781487, 0.001340654562227428, 0.0014765722444280982, 0.0008794022724032402, 0.00019451347179710865, -0.0001761973835527897, 1.8655788153409958e-05, 0.0003523188643157482, 0.0006215828470885754, 0.0009482598397880793, 0.0008765892125666142, 0.0009128025267273188, 0.0010658367536962032, 0.0008650952950119972, 0.0006931314710527658, 0.0005274452269077301, 0.00040836725383996964, 0.0003828315529972315, 0.00045729358680546284, 0.0008139281999319792, 0.0007075911853462458, 0.0004986736457794905, 0.0007380517199635506, 0.0007470641285181046, 0.0003727404400706291, 0.00020861998200416565, -4.448019899427891e-05, -0.00026486185379326344, -0.00014658598229289055, 0.00014992570504546165, 0.0003946979995816946, 0.00024374201893806458, 0.000254632206633687, 0.00027234642766416073, 0.00017000176012516022, 0.0002736307214945555, 0.00022304686717689037, 0.00018746289424598217, 0.0003647569101303816, 0.00047829560935497284, 0.0004925637040287256 ] ] ], "codec_error": 0.003246477572247386, "codec_tol": 1e-06, "dec_tol": 0.0001 }, { "repo_id": "hf-audio/xcodec-hubert-general", "bandwidth": 4.0, "codes": null, "decoded": [ [ [ -0.0001746553461998701, 0.0008285697549581528, 0.0005095258820801973, 0.00022105872631072998, 0.00047804228961467743, 0.0002920464612543583, 0.00020101177506148815, 0.0006921763997524977, 0.001189485308714211, 0.001171361654996872, 0.001430683652870357, 0.0016735134413465858, 0.0011466527357697487, 0.0004955567419528961, 0.00013427832163870335, 0.0003044463228434324, 0.0005627109203487635, 0.0007822064217180014, 0.001134557300247252, 0.0010599115630611777, 0.0011251196265220642, 0.001351013546809554, 0.001148079987615347, 0.0009455143008381128, 0.000764030497521162, 0.0005778523627668619, 0.00042620301246643066, 0.0005069044418632984, 0.0009447732008993626, 0.0009011845104396343, 0.0007778429426252842, 0.0010773082030937076, 0.0009713636245578527, 0.00035730027593672276, 0.00012435344979166985, -0.0001019404735416174, -0.0003732512705028057, -0.00022046314552426338, 0.0002396036870777607, 0.0005130092613399029, 0.00034346291795372963, 0.00045024044811725616, 0.000551396980881691, 0.00038504088297486305, 0.00042170868255198, 0.000304151326417923, 0.00014883745461702347, 0.00035709445364773273, 0.0005582491867244244, 0.0006064062472432852 ] ] ], "codec_error": 0.0029708717484027147, "codec_tol": 1e-06, "dec_tol": 0.0001 }, { "repo_id": "hf-audio/xcodec-hubert-general-balanced", "bandwidth": 0.5, "codes": [ [ [ 361, 327, 361, 220, 296, 448, 794, 794, 220, 215, 215, 523, 794, 572, 837 ] ] ], "decoded": [ [ [ -0.00034493682323955, 0.0004888690891675651, 5.2613399020629004e-05, -0.0003154197765979916, -0.000374013528926298, -0.0006679014186374843, -0.0004468882689252496, 0.00014481281687039882, 0.0004187110753264278, 0.0002444020356051624, 0.0005527980392798781, 0.0007324246689677238, 0.0001953293103724718, -0.0001868946710601449, -0.00039330992149189115, -1.5875579265411943e-05, 0.0003087812219746411, 0.00023550353944301605, 0.00026388472178950906, -1.399356551701203e-05, 5.0710201321635395e-05, 0.00025053517310880125, 0.0001787259243428707, 0.0004594939819071442, 0.000588070775847882, 0.00020284971105866134, -0.0001789979578461498, -0.0004223345313221216, -0.0005676769069395959, -0.0009293632465414703, -0.0010678550461307168, -0.0005041397525928915, 4.17735063820146e-05, 0.0003083720221184194, 0.00022762120352126658, -0.0001844105718191713, -0.0003540224861353636, -0.0004843481001444161, -0.0005098123801872134, -4.7943081881385297e-05, 0.0003332827764097601, 0.0006624205270782113, 0.0007267107721418142, 0.0005331048159860075, 0.0005102302529849112, 0.00026525993598625064, -0.00011996129614999518, -0.0003491249808575958, -0.00035246959305368364, -0.00011564368469407782 ] ] ], "codec_error": 0.0033109495416283607, "codec_tol": 0.0001, "dec_tol": 1e-05 }, { "repo_id": "hf-audio/xcodec-hubert-general-balanced", "bandwidth": 1.0, "codes": [ [ [ 361, 327, 361, 220, 296, 448, 794, 794, 220, 215, 215, 523, 794, 572, 837 ], [ 558, 561, 17, 689, 341, 17, 17, 746, 995, 203, 203, 626, 44, 930, 137 ] ] ], "decoded": [ [ [ 0.00030808174051344395, 0.001045881537720561, 0.0007541970699094236, 0.00033998070284724236, 0.0003453573735896498, 9.488919749855995e-05, 0.00013062538346275687, 0.0006009942735545337, 0.0008533764048479497, 0.0006959228776395321, 0.0010016924934461713, 0.0011683015618473291, 0.0007219507242552936, 0.0004506743571255356, 0.0003839473647531122, 0.0006096045835874975, 0.0006778741371817887, 0.0006203189841471612, 0.0006753152702003717, 0.0004131025052629411, 0.000491982267703861, 0.000795087602455169, 0.0007778703584335744, 0.0009378900285810232, 0.0010353495599702, 0.0006254438776522875, 0.000214845611480996, 0.00012940631131641567, 0.00015907330089248717, -7.865422958275303e-05, -4.9406051402911544e-05, 0.000535525381565094, 0.0008429466979578137, 0.0009230281575582922, 0.0009178092004731297, 0.0005792290903627872, 0.0003179354825988412, 0.0002350879949517548, 0.0002593859098851681, 0.00047500411164946854, 0.0005929681938141584, 0.0008235352579504251, 0.0009488606592640281, 0.0009453080128878355, 0.0011052204063162208, 0.0009675186011008918, 0.0005994988605380058, 0.00043946868390776217, 0.0004515677283052355, 0.00041880516801029444 ] ] ], "codec_error": 0.0031247916631400585, "codec_tol": 0.0001, "dec_tol": 1e-05 }, { "repo_id": "hf-audio/xcodec-hubert-general-balanced", "bandwidth": 1.5, "codes": [ [ [ 361, 327, 361, 220, 296, 448, 794, 794, 220, 215, 215, 523, 794, 572, 837 ], [ 558, 561, 17, 689, 341, 17, 17, 746, 995, 203, 203, 626, 44, 930, 137 ], [ 551, 408, 315, 232, 209, 733, 935, 96, 644, 260, 204, 1005, 360, 220, 566 ] ] ], "decoded": [ [ [ 0.000503583112731576, 0.0012962579494342208, 0.000953793409280479, 0.000562680943403393, 0.0005354040767997503, 0.0001995227939914912, 0.00021683776867575943, 0.0007633829372934997, 0.0011789660202339292, 0.0011270726099610329, 0.0015703362878412008, 0.0018343244446441531, 0.0013319269055500627, 0.000994652509689331, 0.000904104090295732, 0.0010642888955771923, 0.001001085969619453, 0.0008879639790393412, 0.0009764981223270297, 0.0007446073577739298, 0.0009423579322174191, 0.0014007777208462358, 0.0014580122660845518, 0.001651210943236947, 0.0017139619449153543, 0.0011293691350147128, 0.00046205963008105755, 0.0002840239612851292, 0.00037141164648346603, 0.00018329237354919314, 0.00032734169508330524, 0.0011594139505177736, 0.001596459187567234, 0.0016294002998620272, 0.00158004742115736, 0.0011817252961918712, 0.0007946674595586956, 0.0006213290616869926, 0.0006479363073594868, 0.0008777808980084956, 0.0010338183492422104, 0.001345760771073401, 0.0015597327146679163, 0.0016180119710043073, 0.0018115403363481164, 0.001682001631706953, 0.0012854461092501879, 0.001027835882268846, 0.0009527513175271451, 0.000887712521944195 ] ] ], "codec_error": 0.0029652512166649103, "codec_tol": 0.0001, "dec_tol": 1e-05 }, { "repo_id": "hf-audio/xcodec-hubert-general-balanced", "bandwidth": 2.0, "codes": [ [ [ 361, 327, 361, 220, 296, 448, 794, 794, 220, 215, 215, 523, 794, 572, 837 ], [ 558, 561, 17, 689, 341, 17, 17, 746, 995, 203, 203, 626, 44, 930, 137 ], [ 551, 408, 315, 232, 209, 733, 935, 96, 644, 260, 204, 1005, 360, 220, 566 ], [ 14, 14, 407, 656, 472, 407, 472, 365, 444, 521, 162, 128, 575, 340, 407 ] ] ], "decoded": [ [ [ 0.0005418736836872995, 0.001448116498067975, 0.001061455113813281, 0.0007103311945684254, 0.0007390136015601456, 0.00034474380663596094, 0.0003522688348311931, 0.0008343191584572196, 0.001150410040281713, 0.0010256256209686399, 0.001495253061875701, 0.001773528871126473, 0.0012762193800881505, 0.0009920906741172075, 0.0009303519036620855, 0.0011235735146328807, 0.0010679269907996058, 0.0009568841778673232, 0.0010723505401983857, 0.0007524813408963382, 0.0009166915551759303, 0.0013677531387656927, 0.0013341642916202545, 0.0015379394171759486, 0.0016818979056552052, 0.001159622217528522, 0.0005840237135998905, 0.00046342622954398394, 0.0005499363178387284, 0.00028997453046031296, 0.0003494007105473429, 0.00111175118945539, 0.0014719140017405152, 0.001518196426331997, 0.0015718521317467093, 0.0012554096756502986, 0.0009631850407458842, 0.0008293383289128542, 0.0007663772557862103, 0.0008835261687636375, 0.0009167938260361552, 0.0011743532959371805, 0.0012979755410924554, 0.0013041547499597073, 0.0016306998440995812, 0.001617942820303142, 0.0012842006981372833, 0.001117410254664719, 0.0010679230326786637, 0.0009179076296277344 ] ] ], "codec_error": 0.002776096574962139, "codec_tol": 0.0001, "dec_tol": 1e-05 }, { "repo_id": "hf-audio/xcodec-hubert-general-balanced", "bandwidth": 4.0, "codes": null, "decoded": null, "codec_error": 0.002473212545737624, "codec_tol": 0.0001, "dec_tol": 1e-05 }, { "repo_id": "hf-audio/xcodec-wavlm-mls", "bandwidth": 0.5, "codes": null, "decoded": [ [ [ -0.0002471721963956952, -0.0007365602068603039, -0.00029661963344551623, -0.0001487718109274283, -0.00025510278646834195, -8.943512511905283e-06, 0.0001678174448898062, 0.0001996321661863476, -4.0839742723619565e-05, -0.00035881157964468, -0.0003721409884747118, -0.00031694662175141275, -0.00031707881134934723, -0.000331440765876323, -4.38143324572593e-05, 0.0003987671807408333, 0.0005647469661198556, 0.000618462567217648, 0.00044193019857630134, -8.946254092734307e-05, -0.00041933005559258163, -0.00044410640839487314, -0.00023401528596878052, -3.552871567080729e-05, 0.00014210691733751446, 0.0002467158483341336, 0.00015603734937030822, 3.193183147232048e-05, -0.00013763393508270383, -0.00035281648160889745, -0.000529766664840281, -0.0006517550209537148, -0.0006077081197872758, -0.00020284725178498775, 0.00034685738501138985, 0.0006811313796788454, 0.000679212505929172, 0.0005077550304122269, 0.0003816273238044232, 0.0002144991885870695, -0.00016964729002211243, -0.00016414248966611922, 7.161950634326786e-05, 0.0002906467125285417, 0.0007565916166640818, 0.0010058481711894274, 0.0012552258558571339, 0.0010698521509766579, 0.0009335971553809941, 0.000667513522785157 ] ] ], "codec_error": 0.002784556010738015, "codec_tol": 1e-05, "dec_tol": 0.0001 }, { "repo_id": "hf-audio/xcodec-wavlm-mls", "bandwidth": 1.0, "codes": null, "decoded": [ [ [ -0.00021897304395679384, -0.0006707283901050687, -0.0003198632912244648, -0.0002019737585214898, -0.00031223104451783, -0.00015073314716573805, 1.3109893188811839e-05, 0.00010109333379659802, -4.4847565732197836e-05, -0.00026841764338314533, -0.0002181670570280403, -0.00029383719083853066, -0.0005045154830440879, -0.0005756605532951653, -0.0002519423433113843, 0.00017382082296535373, 0.0003450627264101058, 0.0004639998951461166, 0.00032735118293203413, -0.0002998501295223832, -0.0007324003963731229, -0.0006358272512443364, -0.00019679185061249882, 9.154963481705636e-05, 0.00023105139553081244, 0.0002694898867048323, 5.3933585149934515e-05, -0.00017367034160997719, -0.000370326975826174, -0.00045389344450086355, -0.0003742754051927477, -0.0003183094668202102, -0.0003107236698269844, -7.117551285773516e-05, 0.00032562544220127165, 0.0005173510289750993, 0.00035740318708121777, 0.0001532924798084423, 8.018290100153536e-05, -8.011072350200266e-05, -0.0004129649023525417, -0.0002168295468436554, 0.00016200236859731376, 0.00034917722223326564, 0.0006240003858692944, 0.0007102257804945111, 0.0006860584835521877, 0.00021981803001835942, -7.106941484380513e-05, -0.00018566018843557686 ] ] ], "codec_error": 0.0025421823374927044, "codec_tol": 1e-05, "dec_tol": 0.0001 }, { "repo_id": "hf-audio/xcodec-wavlm-mls", "bandwidth": 1.5, "codes": null, "decoded": [ [ [ -0.00028943613870069385, -0.0007279455894604325, -0.00037402231828309596, -0.00023792865977156907, -0.0003411142679397017, -0.0001595417852513492, 4.3847772758454084e-05, 3.897479109582491e-05, -0.00015776942018419504, -0.0003887333150487393, -0.0003272934991400689, -0.000308241811580956, -0.0005203168257139623, -0.0006497218855656683, -0.00032806317904032767, 9.003604645840824e-05, 0.0002507736498955637, 0.00038551652687601745, 0.00031532219145447016, -0.00028256408404558897, -0.000780107919126749, -0.0006798981921747327, -0.000187133060535416, 9.075377602130175e-05, 0.0002059313701465726, 0.000252072379225865, 8.591249934397638e-05, -6.903265602886677e-05, -0.00022177191567607224, -0.0003164776717312634, -0.0002896834921557456, -0.00034714731737039983, -0.0004501724033616483, -0.00014487521548289806, 0.00042451676563359797, 0.0007103644893504679, 0.0005320025375112891, 0.00030199086177162826, 0.00017571817443240434, -7.344436016865075e-05, -0.0004125718551222235, -0.00022480337065644562, 0.00010712102812249213, 0.00021271411969792098, 0.0004759627627208829, 0.0007045314996503294, 0.0008243073243647814, 0.00042970190406776965, 0.00021766081044916064, 3.350689439685084e-05 ] ] ], "codec_error": 0.0024348432198166847, "codec_tol": 1e-05, "dec_tol": 0.0001 }, { "repo_id": "hf-audio/xcodec-wavlm-mls", "bandwidth": 2.0, "codes": null, "decoded": [ [ [ -0.00010455434676259756, -0.0006130463443696499, -0.00019683418213389814, -0.00010099369683302939, -0.00021099163859616965, -2.6220950530841947e-06, 0.00020871838205493987, 0.00018668117991182953, -1.0649124305928126e-05, -0.00018443060980644077, -9.16237331693992e-05, -0.00010015940642915666, -0.0003710154560394585, -0.0004892981378361583, -0.00011564286251086742, 0.00032124019344337285, 0.0004669901682063937, 0.000622692343313247, 0.0005683908239006996, -0.0001299505529459566, -0.0007344742771238089, -0.0006522545008920133, -7.369450759142637e-05, 0.00023016634804662317, 0.00029706733766943216, 0.00040724140126258135, 0.00031984460656531155, 0.00010261082206852734, -0.00017221950110979378, -0.0002610271912999451, -0.00015819921100046486, -0.00027220440097153187, -0.00047697784611955285, -0.00013687692990060896, 0.0004770267987623811, 0.0007236705278046429, 0.0005755429738201201, 0.0005305277300067246, 0.0004971636226400733, 9.585886436980218e-05, -0.00044038970372639596, -0.0002591072116047144, 8.882302790880203e-05, 0.00010468350956216455, 0.0004465210950002074, 0.0008701237966306508, 0.001133413054049015, 0.0006035252590663731, 0.00032269812072627246, 0.0001862043427536264 ] ] ], "codec_error": 0.0022935366723686457, "codec_tol": 1e-05, "dec_tol": 0.0001 }, { "repo_id": "hf-audio/xcodec-wavlm-mls", "bandwidth": 4.0, "codes": null, "decoded": [ [ [ 0.0002609139191918075, -0.0005568607593886554, -8.985280146589503e-06, 8.966325549408793e-05, -7.479086343664676e-05, 0.0002076481468975544, 0.00034824819886125624, 0.000307059584883973, 0.00019627390429377556, -3.6874123907182366e-05, -7.025599188636988e-06, -0.00012438457633834332, -0.00032755275606177747, -0.000411909946706146, -8.821926894597709e-05, 0.0003717693325597793, 0.0004727449850179255, 0.0006257848581299186, 0.0005679113091900945, -0.00014851021114736795, -0.0006242442759685218, -0.0005352640873752534, -1.3237040548119694e-06, 0.0003045561315957457, 0.00041467309347353876, 0.0005932425847277045, 0.00044341094326227903, 0.00023843055532779545, 8.901742694433779e-05, -1.0392006515758112e-05, 0.0001084191317204386, 0.00021004895097576082, 8.228524529840797e-05, 0.0003851399233099073, 0.0008233294356614351, 0.0008892629412002861, 0.000785985728725791, 0.0006910106167197227, 0.0005654219421558082, 0.00034487323137000203, -0.00012858158152084798, -3.434516838751733e-05, 0.0003092561091762036, 0.0003492308605927974, 0.0006020109285600483, 0.0008246254292316735, 0.0010068734409287572, 0.000397632597014308, 0.00017323131032753736, 0.00012472675007302314 ] ] ], "codec_error": 0.002119641751050949, "codec_tol": 1e-05, "dec_tol": 0.0001 }, { "repo_id": "hf-audio/xcodec-wavlm-more-data", "bandwidth": 0.5, "codes": [ [ [ 44, 881, 344, 344, 344, 881, 44, 881, 571, 813, 107, 950, 437, 950, 437 ] ] ], "decoded": [ [ [ 8.60798463691026e-05, -0.0003060192975681275, 9.043663885677233e-05, 0.00014830089639872313, -2.100023630191572e-05, 0.00017922179540619254, 0.00023896087077446282, 0.00024265481624752283, 9.400439739692956e-05, -0.0001393423299305141, -0.00017324750660918653, -0.00010024692164734006, -9.44490238907747e-05, -0.00022922895732335746, -1.1478223314043134e-06, 0.0003733094781637192, 0.00048060796689242125, 0.00044257345143705606, 0.00030929266358725727, -2.485524964868091e-05, -0.00015137175796553493, -4.780884046340361e-05, 0.00020679077715612948, 0.00033283786615356803, 0.0003789166221395135, 0.0004155742935836315, 0.00019656820222735405, -6.525123899336904e-05, -0.00014414236648008227, -0.00024506013141945004, -0.0002763999509625137, -0.0002528353070374578, -0.0002784187672659755, -5.5240401707123965e-05, 0.00034663392580114305, 0.0005081702838651836, 0.0003451743978075683, 0.00017948850290849805, 0.0001805563224479556, 5.415735358837992e-05, -0.0002908696769736707, -0.00022546530817635357, -2.816985215758905e-06, 8.796519978204742e-05, 0.00036707063554786146, 0.0004874078440479934, 0.0006295923958532512, 0.00048173684626817703, 0.0004252410144545138, 0.00026322310441173613 ] ] ], "codec_error": 0.002914232201874256, "codec_tol": 1e-05, "dec_tol": 1e-05 }, { "repo_id": "hf-audio/xcodec-wavlm-more-data", "bandwidth": 1.0, "codes": [ [ [ 44, 881, 344, 344, 344, 881, 44, 881, 571, 813, 107, 950, 437, 950, 437 ], [ 659, 335, 335, 801, 30, 726, 647, 721, 562, 421, 421, 797, 797, 797, 797 ] ] ], "decoded": [ [ [ -0.00013853900600224733, -0.0006001053261570632, -0.00014205696061253548, -5.91947537031956e-06, -0.00010432626731926575, 8.372707816306502e-05, 0.00019299553241580725, 0.0002358516794629395, -3.9954891690285876e-05, -0.0002931125636678189, -0.00026569172041490674, -0.00043283088598400354, -0.0006556544685736299, -0.0006934062112122774, -0.00022642247495241463, 0.0002532481448724866, 0.0005737473256886005, 0.0007966211414895952, 0.000520562578458339, -0.00015708617866039276, -0.0004774363187607378, -0.0003260352532379329, 6.291552563197911e-05, 0.0002651271061040461, 0.00031190537265501916, 0.0003755779762286693, 0.00024414766812697053, 7.252671639434993e-05, -6.486603524535894e-05, -0.00016354414401575923, -3.527149237925187e-05, -0.00011908992746612057, -0.0004999941447749734, -0.0004161799151916057, 0.0002218327426817268, 0.0004935782635584474, 0.0002801110385917127, 0.00032458442728966475, 0.0007008450338616967, 0.0006555632571689785, 7.318511052289978e-05, 0.0001435471058357507, 0.0003743935376405716, 0.00017988341278396547, 0.00022015272406861186, 0.00037314006476663053, 0.0005442615947686136, 0.00028906844090670347, 0.0002551917568780482, 0.00038736595888622105 ] ] ], "codec_error": 0.002613937947899103, "codec_tol": 1e-05, "dec_tol": 1e-05 }, { "repo_id": "hf-audio/xcodec-wavlm-more-data", "bandwidth": 1.5, "codes": null, "decoded": [ [ [ 6.279885565163568e-05, -0.00033689799602143466, 0.00011250129318796098, 0.00019457557937130332, 0.0001285735343117267, 0.00032528748852200806, 0.0003915508568752557, 0.0003026904596481472, 0.0002715887385420501, 0.00014779283083043993, 0.00011752712453017011, 5.8236309996573254e-05, -0.00019055884331464767, -0.0003741715627256781, 6.279123772401363e-05, 0.0007348853396251798, 0.0010394826531410217, 0.0010661018313840032, 0.0007755373371765018, -2.9394934244919568e-05, -0.000576356309466064, -0.00027806576690636575, 0.00037756405072286725, 0.0006358004757203162, 0.0006607291288673878, 0.000555443752091378, 0.00015076500130817294, -7.972745515871793e-05, -7.068378909025341e-05, -0.00018670226563699543, -0.00018936209380626678, -0.00012967083603143692, -0.0003847008920274675, -0.0004100532678421587, 0.0002501893322914839, 0.0007843074854463339, 0.000563940207939595, 0.000467402336653322, 0.0007548240246251225, 0.0005277566961012781, -1.5599987818859518e-05, 0.0002939583209808916, 0.0006670067086815834, 0.0005304471123963594, 0.000824933813419193, 0.0012162369675934315, 0.001134063582867384, 0.0007296190597116947, 0.0008042262634262443, 0.0005679695168510079 ] ] ], "codec_error": 0.002474759239703417, "codec_tol": 1e-05, "dec_tol": 1e-05 }, { "repo_id": "hf-audio/xcodec-wavlm-more-data", "bandwidth": 2.0, "codes": null, "decoded": [ [ [ 0.00025080336490646005, -0.00017259249580092728, 0.00025967558030970395, 0.000364720297511667, 0.0002696491137612611, 0.0004170248284935951, 0.0005731081473641098, 0.0005545281455852091, 0.0004947385750710964, 0.00031294734799303114, 0.0002801477094180882, 0.0001770674716681242, -0.00011514315701788291, -0.00026510338648222387, 0.00021959032164886594, 0.000816573272459209, 0.001120111788623035, 0.0012912392849102616, 0.001166811096481979, 0.0005309308762662113, 8.758961485000327e-06, 0.00021711227600462735, 0.0007150850724428892, 0.0008045516442507505, 0.0007022423087619245, 0.0007424060022458434, 0.0006152429268695414, 0.0004591939796227962, 0.0003924915799871087, 0.00026367188547737896, 0.00017671581008471549, 4.0699342207517475e-05, -0.0001596197544131428, -7.874160655774176e-05, 0.0004457739123608917, 0.0008729892433620989, 0.0007467817049473524, 0.0007103694952093065, 0.0010271944338455796, 0.0008960479754023254, 0.0004278179258108139, 0.0006703450926579535, 0.0008964851731434464, 0.0006187634426169097, 0.0007422066992148757, 0.0010011898120865226, 0.0008749665576033294, 0.00045576749835163355, 0.0005878594820387661, 0.00046867664786987007 ] ] ], "codec_error": 0.0023411870934069157, "codec_tol": 1e-05, "dec_tol": 1e-05 }, { "repo_id": "hf-audio/xcodec-wavlm-more-data", "bandwidth": 4.0, "codes": null, "decoded": [ [ [ 0.0005943458527326584, 0.00017469993326812983, 0.0007098331698216498, 0.0008159588905982673, 0.0008173630340024829, 0.0010675098747015, 0.0011202740715816617, 0.0009797199163585901, 0.0008500582771375775, 0.0006013234378769994, 0.000489078345708549, 0.0004758498689625412, 0.0004311382654123008, 0.00044707287452183664, 0.0008811730076558888, 0.0014070728793740273, 0.0015382873825728893, 0.0014448357978835702, 0.001160905696451664, 0.0004969813744537532, -1.3974204193800688e-06, 0.00016932631842792034, 0.0007671721396036446, 0.0010725297033786774, 0.0010861807968467474, 0.0010590273886919022, 0.0007575892377644777, 0.0003892407985404134, -1.2117772712372243e-05, -0.00036681993515230715, -0.0004377248405944556, -0.00038845473318360746, -0.00036490376805886626, -5.0593931518960744e-05, 0.0005764220259152353, 0.0010159657103940845, 0.000954087299760431, 0.0009282709797844291, 0.0010461328784003854, 0.0007925458485260606, 0.0002687857486307621, 0.00048614019760861993, 0.0009145350777544081, 0.0009633700246922672, 0.0012744814157485962, 0.0016483643557876348, 0.0016050116391852498, 0.0008498440147377551, 0.0005446132854558527, 0.00015282645472325385 ] ] ], "codec_error": 0.0021570040844380856, "codec_tol": 1e-05, "dec_tol": 1e-05 } ]
transformers/tests/fixtures/xcodec/integration_tests.json/0
{ "file_path": "transformers/tests/fixtures/xcodec/integration_tests.json", "repo_id": "transformers", "token_count": 55835 }
564
# coding=utf-8 # Copyright 2025 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Testing suite for the PyTorch AIMv2 model.""" import inspect import tempfile import unittest import numpy as np import requests from parameterized import parameterized from pytest import mark from transformers import Aimv2Config, Aimv2TextConfig, Aimv2VisionConfig from transformers.testing_utils import ( require_flash_attn, require_torch, require_torch_gpu, require_vision, slow, torch_device, ) from transformers.utils import ( is_torch_available, is_vision_available, ) from ...test_configuration_common import ConfigTester from ...test_modeling_common import ( TEST_EAGER_MATCHES_SDPA_INFERENCE_PARAMETERIZATION, ModelTesterMixin, _config_zero_init, _test_eager_matches_sdpa_inference, floats_tensor, ids_tensor, random_attention_mask, ) from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ( Aimv2Model, Aimv2TextModel, Aimv2VisionModel, ) if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor, AutoProcessor class Aimv2VisionModelTester: def __init__( self, parent, batch_size=12, image_size=30, patch_size=2, num_channels=3, is_training=False, hidden_size=32, projection_dim=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, dropout=0.1, attention_dropout=0.1, ): self.parent = parent self.batch_size = batch_size self.image_size = image_size self.patch_size = patch_size self.num_channels = num_channels self.is_training = is_training self.hidden_size = hidden_size self.projection_dim = projection_dim self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.dropout = dropout self.attention_dropout = attention_dropout num_patches = (image_size // patch_size) ** 2 self.seq_length = num_patches def prepare_config_and_inputs(self): pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) config = self.get_config() return config, pixel_values def get_config(self): return Aimv2VisionConfig( image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, projection_dim=self.projection_dim, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, dropout=self.dropout, attention_dropout=self.attention_dropout, ) def create_and_check_model(self, config, pixel_values): model = Aimv2VisionModel(config=config) model.to(torch_device) model.eval() with torch.no_grad(): result = model(pixel_values) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, pixel_values = config_and_inputs inputs_dict = {"pixel_values": pixel_values} return config, inputs_dict class Aimv2ModelTesterMixin(ModelTesterMixin): """ Subclass of ModelTesterMixin with methods specific to testing Aimv2 models. The SDPA equivalence test is overridden here because Aimv2 models may have test/vision/text+vision inputs, different output logits, and are not supposed to be used or tested with padding_side="left". """ def test_sdpa_can_dispatch_composite_models(self): for model_class in self.all_model_classes: config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() model = model_class(config) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) # Load the model with SDPA model_sdpa = model_class.from_pretrained(tmpdirname) # Load model with eager attention model_eager = model_class.from_pretrained( tmpdirname, attn_implementation="eager", ) model_eager = model_eager.eval().to(torch_device) if hasattr(model_sdpa, "vision_model"): self.assertTrue(model_sdpa.vision_model.config._attn_implementation == "sdpa") self.assertTrue(model_eager.vision_model.config._attn_implementation == "eager") if hasattr(model_sdpa, "text_model"): self.assertTrue(model_sdpa.text_model.config._attn_implementation == "sdpa") self.assertTrue(model_eager.text_model.config._attn_implementation == "eager") self.assertTrue(model_sdpa.config._attn_implementation == "sdpa") self.assertTrue(model_eager.config._attn_implementation == "eager") @require_torch class Aimv2VisionModelTest(Aimv2ModelTesterMixin, unittest.TestCase): """ Here we also overwrite some of the tests of test_modeling_common.py, as Aimv2 does not use input_ids, inputs_embeds, attention_mask and seq_length. """ all_model_classes = (Aimv2VisionModel,) if is_torch_available() else () fx_compatible = False test_pruning = False test_resize_embeddings = False test_head_masking = False test_torchscript = False def setUp(self): self.model_tester = Aimv2VisionModelTester(self) self.config_tester = ConfigTester( self, config_class=Aimv2VisionConfig, has_text_modality=False, hidden_size=37 ) def test_config(self): self.config_tester.run_common_tests() @unittest.skip(reason="Aimv2 does not use inputs_embeds") def test_inputs_embeds(self): pass def test_model_get_set_embeddings(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) self.assertIsInstance(model.get_input_embeddings(), (nn.Module)) x = model.get_output_embeddings() self.assertTrue(x is None or isinstance(x, nn.Linear)) def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) signature = inspect.signature(model.forward) # signature.parameters is an OrderedDict => so arg_names order is deterministic arg_names = [*signature.parameters.keys()] expected_arg_names = ["pixel_values"] self.assertListEqual(arg_names[:1], expected_arg_names) def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) class Aimv2TextModelTester: def __init__( self, parent, batch_size=12, seq_length=7, is_training=False, use_input_mask=True, use_labels=True, vocab_size=99, hidden_size=32, projection_dim=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, dropout=0.1, attention_dropout=0.1, max_position_embeddings=25, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_input_mask = use_input_mask self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.projection_dim = projection_dim self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.dropout = dropout self.attention_dropout = attention_dropout self.max_position_embeddings = max_position_embeddings def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_mask = None if self.use_input_mask: input_mask = random_attention_mask([self.batch_size, self.seq_length]) if input_mask is not None: batch_size, seq_length = input_mask.shape rnd_start_indices = np.random.randint(1, seq_length - 1, size=(batch_size,)) for batch_idx, start_index in enumerate(rnd_start_indices): input_mask[batch_idx, :start_index] = 1 input_mask[batch_idx, start_index:] = 0 config = self.get_config() return config, input_ids, input_mask def get_config(self): return Aimv2TextConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, projection_dim=self.projection_dim, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, dropout=self.dropout, attention_dropout=self.attention_dropout, max_position_embeddings=self.max_position_embeddings, ) def create_and_check_model(self, config, input_ids, input_mask): model = Aimv2TextModel(config=config) model.to(torch_device) model.eval() with torch.no_grad(): result = model(input_ids, attention_mask=input_mask) result = model(input_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, input_ids, input_mask = config_and_inputs inputs_dict = {"input_ids": input_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class Aimv2TextModelTest(Aimv2ModelTesterMixin, unittest.TestCase): all_model_classes = (Aimv2TextModel,) if is_torch_available() else () fx_compatible = False test_pruning = False test_head_masking = False test_resize_embeddings = False test_torchscript = False def setUp(self): self.model_tester = Aimv2TextModelTester(self) self.config_tester = ConfigTester(self, config_class=Aimv2TextConfig, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) @unittest.skip(reason="Aimv2 does not use inputs_embeds") def test_inputs_embeds(self): pass class Aimv2ModelTester: def __init__(self, parent, text_kwargs=None, vision_kwargs=None, is_training=False): if text_kwargs is None: text_kwargs = {} if vision_kwargs is None: vision_kwargs = {} self.parent = parent self.text_model_tester = Aimv2TextModelTester(parent, **text_kwargs) self.vision_model_tester = Aimv2VisionModelTester(parent, **vision_kwargs) self.batch_size = self.text_model_tester.batch_size # need bs for batching_equivalence test self.is_training = is_training def prepare_config_and_inputs(self): text_config, input_ids, attention_mask = self.text_model_tester.prepare_config_and_inputs() vision_config, pixel_values = self.vision_model_tester.prepare_config_and_inputs() config = self.get_config() return config, input_ids, attention_mask, pixel_values def get_config(self): return Aimv2Config.from_text_vision_configs( self.text_model_tester.get_config(), self.vision_model_tester.get_config(), projection_dim=64 ) def create_and_check_model(self, config, input_ids, attention_mask, pixel_values): model = Aimv2Model(config).to(torch_device).eval() with torch.no_grad(): result = model(input_ids, pixel_values, attention_mask) self.parent.assertEqual( result.logits_per_image.shape, (self.vision_model_tester.batch_size, self.text_model_tester.batch_size) ) self.parent.assertEqual( result.logits_per_text.shape, (self.text_model_tester.batch_size, self.vision_model_tester.batch_size) ) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, input_ids, attention_mask, pixel_values = config_and_inputs inputs_dict = { "input_ids": input_ids, "attention_mask": attention_mask, "pixel_values": pixel_values, } return config, inputs_dict @require_torch class Aimv2ModelTest(Aimv2ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): additional_model_inputs = ["pixel_values"] all_model_classes = (Aimv2Model,) if is_torch_available() else () pipeline_model_mapping = ( {"feature-extraction": Aimv2Model, "image-feature-extraction": Aimv2VisionModel} if is_torch_available() else {} ) fx_compatible = False test_head_masking = False test_pruning = False test_torchscript = False test_resize_embeddings = False test_attention_outputs = False _is_composite = True def setUp(self): self.model_tester = Aimv2ModelTester(self) common_properties = ["projection_dim", "logit_scale_init_value"] self.config_tester = ConfigTester( self, config_class=Aimv2Config, has_text_modality=False, common_properties=common_properties ) def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() print(config_and_inputs) self.model_tester.create_and_check_model(*config_and_inputs) def test_config(self): self.config_tester.run_common_tests() @unittest.skip(reason="Hidden_states is tested in individual model tests") def test_hidden_states_output(self): pass @unittest.skip(reason="Retain_grad is tested in individual model tests") def test_retain_grad_hidden_states_attentions(self): pass @unittest.skip(reason="Aimv2Model does not have input/output embeddings") def test_model_get_set_embeddings(self): pass @unittest.skip("Size mismatch on CUDA") def test_multi_gpu_data_parallel_forward(self): pass # Override as the `logit_scale` parameter initialization is different for Aimv2 def test_initialization(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() configs_no_init = _config_zero_init(config) for model_class in self.all_model_classes: model = model_class(config=configs_no_init) for name, param in model.named_parameters(): if param.requires_grad: # check if `logit_scale` is initialized as per the original implementation if name == "logit_scale": self.assertAlmostEqual( param.data.item(), np.log(1 / 0.07), delta=1e-3, msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) else: self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item(), [0.0, 1.0], msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) def test_load_vision_text_config(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() # Save Aimv2Config and check if we can load Aimv2VisionConfig from it with tempfile.TemporaryDirectory() as tmp_dir_name: config.save_pretrained(tmp_dir_name) vision_config = Aimv2VisionConfig.from_pretrained(tmp_dir_name) self.assertDictEqual(config.vision_config.to_dict(), vision_config.to_dict()) # Save Aimv2Config and check if we can load Aimv2TextConfig from it with tempfile.TemporaryDirectory() as tmp_dir_name: config.save_pretrained(tmp_dir_name) text_config = Aimv2TextConfig.from_pretrained(tmp_dir_name) self.assertDictEqual(config.text_config.to_dict(), text_config.to_dict()) @require_flash_attn @require_torch_gpu @mark.flash_attn_test @slow def test_flash_attn_2_inference_equivalence(self): for model_class in self.all_model_classes: if not model_class._supports_flash_attn: self.skipTest(f"{model_class.__name__} does not support Flash Attention 2") config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() model = model_class(config) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) model_fa = model_class.from_pretrained( tmpdirname, dtype=torch.bfloat16, attn_implementation="flash_attention_2" ) model_fa.to(torch_device) model = model_class.from_pretrained(tmpdirname, dtype=torch.bfloat16) model.to(torch_device) dummy_pixel_values = inputs_dict["pixel_values"].to(torch.bfloat16) dummy_input_ids = inputs_dict["input_ids"] outputs = model(pixel_values=dummy_pixel_values, input_ids=dummy_input_ids, output_hidden_states=True) outputs_fa = model_fa( pixel_values=dummy_pixel_values, input_ids=dummy_input_ids, output_hidden_states=True ) self.assertTrue( torch.allclose(outputs.logits_per_image, outputs_fa.logits_per_image, atol=4e-2, rtol=4e-2), f"Image logits max diff: {torch.max(torch.abs(outputs.logits_per_image - outputs_fa.logits_per_image))}", ) self.assertTrue( torch.allclose(outputs.logits_per_text, outputs_fa.logits_per_text, atol=4e-2, rtol=4e-2), f"Text logits max diff: {torch.max(torch.abs(outputs.logits_per_text - outputs_fa.logits_per_text))}", ) @require_flash_attn @require_torch_gpu @mark.flash_attn_test def test_flash_attn_2_inference_equivalence_right_padding(self): for model_class in self.all_model_classes: if not model_class._supports_flash_attn: self.skipTest(f"{model_class.__name__} does not support Flash Attention 2") config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() model = model_class(config) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) model_fa = model_class.from_pretrained( tmpdirname, dtype=torch.bfloat16, attn_implementation="flash_attention_2" ) model_fa.to(torch_device) model = model_class.from_pretrained(tmpdirname, dtype=torch.bfloat16, attn_implementation="eager") model.to(torch_device) dummy_pixel_values = inputs_dict["pixel_values"].to(torch.bfloat16) dummy_input_ids = inputs_dict["input_ids"] dummy_pixel_mask = inputs_dict["attention_mask"] # right padding dummy_pixel_mask[:] = 1 dummy_pixel_mask[:, -1:] = 0 outputs = model(pixel_values=dummy_pixel_values, input_ids=dummy_input_ids, output_hidden_states=True) outputs_fa = model_fa( pixel_values=dummy_pixel_values, input_ids=dummy_input_ids, output_hidden_states=True ) logits_per_image_eager = outputs.logits_per_image[:, :-1] logits_per_text_eager = outputs.logits_per_text[:, :-1] logits_per_image_sdpa = outputs_fa.logits_per_image[:, :-1] logits_per_text_sdpa = outputs_fa.logits_per_text[:, :-1] self.assertTrue( torch.allclose(logits_per_image_eager, logits_per_image_sdpa, atol=4e-2, rtol=4e-2), f"Image logits max diff: {torch.max(torch.abs(logits_per_image_eager - logits_per_image_sdpa))}", ) self.assertTrue( torch.allclose(logits_per_text_eager, logits_per_text_sdpa, atol=4e-2, rtol=4e-2), f"Text logits max diff: {torch.max(torch.abs(logits_per_text_eager - logits_per_text_sdpa))}", ) @parameterized.expand(TEST_EAGER_MATCHES_SDPA_INFERENCE_PARAMETERIZATION) def test_eager_matches_sdpa_inference( self, name, dtype, padding_side, use_attention_mask, output_attentions, enable_kernels, ): "We need to relax a bit the `atols` for fp32 here due to the altup projections" atols = { ("cpu", False, torch.float32): 1e-6, ("cpu", False, torch.float16): 5e-3, ("cpu", False, torch.bfloat16): 3e-2, # this was relaxed ("cpu", True, torch.float32): 1e-6, ("cpu", True, torch.float16): 5e-3, ("cpu", True, torch.bfloat16): 3e-2, # this was relaxed ("cuda", False, torch.float32): 1e-6, ("cuda", False, torch.bfloat16): 3e-2, # this was relaxed ("cuda", False, torch.float16): 5e-3, ("cuda", True, torch.float32): 1e-6, ("cuda", True, torch.bfloat16): 3e-2, # this was relaxed ("cuda", True, torch.float16): 5e-3, } _test_eager_matches_sdpa_inference( self, name, dtype, padding_side, use_attention_mask, output_attentions, enable_kernels, atols=atols ) @require_vision @require_torch class Aimv2ModelIntegrationTest(unittest.TestCase): @slow def test_inference(self): model_name = "apple/aimv2-large-patch14-224-lit" model = Aimv2Model.from_pretrained(model_name, device_map=torch_device) processor = AutoProcessor.from_pretrained(model_name) image = Image.open(requests.get("http://images.cocodataset.org/val2017/000000039769.jpg", stream=True).raw) inputs = processor( text=["a photo of a cat", "a photo of a dog"], images=image, padding=True, return_tensors="pt" ).to(model.device) # Forward pass with torch.no_grad(): outputs = model(**inputs) # Verify the logits self.assertEqual( outputs.logits_per_image.shape, torch.Size((inputs.pixel_values.shape[0], inputs.input_ids.shape[0])), ) self.assertEqual( outputs.logits_per_text.shape, torch.Size((inputs.input_ids.shape[0], inputs.pixel_values.shape[0])), ) # handle device expected_logits = torch.tensor([[33.3550, 26.4255]]).to(model.device) torch.testing.assert_close(outputs.logits_per_image, expected_logits, atol=1e-3, rtol=1e-3) @require_vision @require_torch class Aimv2VisionModelIntegrationTests(unittest.TestCase): @slow def test_inference(self): model_name = "apple/aimv2-large-patch14-224" model = Aimv2VisionModel.from_pretrained(model_name, device_map=torch_device) processor = AutoImageProcessor.from_pretrained(model_name) image = Image.open(requests.get("http://images.cocodataset.org/val2017/000000039769.jpg", stream=True).raw) inputs = processor(image, return_tensors="pt").to(model.device) with torch.no_grad(): output = model(**inputs) # Verify logits shape self.assertEqual(output.last_hidden_state.shape, torch.Size([1, 256, 1024])) # Verify logits slice # fmt: off expected_logits = torch.tensor( [[ 0.0510, 0.0806, -0.0990, -0.0154], [ 2.7850, -2.5143, -0.3320, 2.4196], [ 2.8179, -2.4089, -0.2770, 2.3218], [ 2.7641, -2.4114, -0.3684, 2.2998], [ 2.7972, -2.3180, -0.4490, 2.2302], [ 2.8584, -2.5322, -0.2302, 2.4936], [-2.7849, 2.4121, 1.3670, -1.5514]]).to(model.device) # fmt: on output_slice = output.last_hidden_state.squeeze(0)[0:7, 0:4] self.assertTrue(torch.allclose(output_slice, expected_logits, atol=1e-3)) @slow def test_inference_for_native_resolution(self): model_name = "apple/aimv2-large-patch14-native" model = Aimv2VisionModel.from_pretrained(model_name, device_map="auto") processor = AutoImageProcessor.from_pretrained(model_name) image = Image.open(requests.get("http://images.cocodataset.org/val2017/000000039769.jpg", stream=True).raw) inputs = processor(image, return_tensors="pt").to(model.device) with torch.no_grad(): output = model(**inputs) # Verify logits shape self.assertEqual(output.last_hidden_state.shape, torch.Size([1, 1530, 1024])) # Verify logits slice # fmt: off expected_logits = torch.tensor( [[-1.3342, 0.3720, 0.0963, 0.4159], [-1.5328, 0.4677, 0.0936, 0.4321], [-0.3775, -0.2758, -0.0803, -0.5367], [-1.3877, 0.5561, -1.9064, -1.1766], [-0.5148, 0.0108, -0.4515, -0.6402], [-0.3400, -0.1711, -0.1855, -0.4219], [-1.2877, -0.0585, -0.1646, 0.7420]]).to(model.device) # fmt: on output_slice = output.last_hidden_state.squeeze(0)[0:7, 0:4] self.assertTrue(torch.allclose(output_slice, expected_logits, atol=1e-3))
transformers/tests/models/aimv2/test_modeling_aimv2.py/0
{ "file_path": "transformers/tests/models/aimv2/test_modeling_aimv2.py", "repo_id": "transformers", "token_count": 12409 }
565
# Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import pytest from packaging import version from transformers import AutoTokenizer, BertConfig, is_torch_available from transformers.cache_utils import EncoderDecoderCache from transformers.models.auto import get_values from transformers.testing_utils import ( CaptureLogger, require_torch, slow, torch_device, ) from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_PRETRAINING_MAPPING, BertForMaskedLM, BertForMultipleChoice, BertForNextSentencePrediction, BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification, BertForTokenClassification, BertLMHeadModel, BertModel, logging, ) class BertModelTester: def __init__( self, parent, batch_size=13, seq_length=7, is_training=True, use_input_mask=True, use_token_type_ids=True, use_labels=True, vocab_size=99, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=16, type_sequence_label_size=2, initializer_range=0.02, num_labels=3, num_choices=4, scope=None, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_input_mask = use_input_mask self.use_token_type_ids = use_token_type_ids self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.type_sequence_label_size = type_sequence_label_size self.initializer_range = initializer_range self.num_labels = num_labels self.num_choices = num_choices self.scope = scope def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_mask = None if self.use_input_mask: input_mask = random_attention_mask([self.batch_size, self.seq_length]) token_type_ids = None if self.use_token_type_ids: token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size) sequence_labels = None token_labels = None choice_labels = None if self.use_labels: sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size) token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels) choice_labels = ids_tensor([self.batch_size], self.num_choices) config = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def get_config(self): """ Returns a tiny configuration by default. """ return BertConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=False, initializer_range=self.initializer_range, ) def prepare_config_and_inputs_for_decoder(self): ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ) = self.prepare_config_and_inputs() config.is_decoder = True encoder_hidden_states = floats_tensor([self.batch_size, self.seq_length, self.hidden_size]) encoder_attention_mask = ids_tensor([self.batch_size, self.seq_length], vocab_size=2) return ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ) def create_and_check_model( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = BertModel(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids) result = model(input_ids, token_type_ids=token_type_ids) result = model(input_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size)) def create_and_check_model_as_decoder( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ): config.add_cross_attention = True model = BertModel(config) model.to(torch_device) model.eval() result = model( input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, ) result = model( input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, encoder_hidden_states=encoder_hidden_states, ) result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size)) def create_and_check_for_causal_lm( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ): model = BertLMHeadModel(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) def create_and_check_for_masked_lm( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = BertForMaskedLM(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) def create_and_check_model_for_causal_lm_as_decoder( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ): config.add_cross_attention = True model = BertLMHeadModel(config=config) model.to(torch_device) model.eval() result = model( input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, ) result = model( input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels, encoder_hidden_states=encoder_hidden_states, ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) def create_and_check_decoder_model_past_large_inputs( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ): config.is_decoder = True config.add_cross_attention = True model = BertLMHeadModel(config=config).to(torch_device).eval() # first forward pass outputs = model( input_ids, attention_mask=input_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, use_cache=True, ) past_key_values = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size) next_mask = ids_tensor((self.batch_size, 3), vocab_size=2) # append to next input_ids and next_input_ids = torch.cat([input_ids, next_tokens], dim=-1) next_attention_mask = torch.cat([input_mask, next_mask], dim=-1) output_from_no_past = model( next_input_ids, attention_mask=next_attention_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, output_hidden_states=True, )["hidden_states"][0] output_from_past = model( next_tokens, attention_mask=next_attention_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, past_key_values=past_key_values, output_hidden_states=True, )["hidden_states"][0] # select random slice random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item() output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx].detach() output_from_past_slice = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1]) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3)) def create_and_check_for_next_sequence_prediction( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = BertForNextSentencePrediction(config=config) model.to(torch_device) model.eval() result = model( input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=sequence_labels, ) self.parent.assertEqual(result.logits.shape, (self.batch_size, 2)) def create_and_check_for_pretraining( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = BertForPreTraining(config=config) model.to(torch_device) model.eval() result = model( input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels, next_sentence_label=sequence_labels, ) self.parent.assertEqual(result.prediction_logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) self.parent.assertEqual(result.seq_relationship_logits.shape, (self.batch_size, 2)) def create_and_check_for_question_answering( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = BertForQuestionAnswering(config=config) model.to(torch_device) model.eval() result = model( input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, start_positions=sequence_labels, end_positions=sequence_labels, ) self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length)) self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length)) def create_and_check_for_sequence_classification( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.num_labels = self.num_labels model = BertForSequenceClassification(config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=sequence_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels)) def create_and_check_for_token_classification( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.num_labels = self.num_labels model = BertForTokenClassification(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels)) def create_and_check_for_multiple_choice( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.num_choices = self.num_choices model = BertForMultipleChoice(config=config) model.to(torch_device) model.eval() multiple_choice_inputs_ids = input_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous() multiple_choice_token_type_ids = token_type_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous() multiple_choice_input_mask = input_mask.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous() result = model( multiple_choice_inputs_ids, attention_mask=multiple_choice_input_mask, token_type_ids=multiple_choice_token_type_ids, labels=choice_labels, ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ) = config_and_inputs inputs_dict = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class BertModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = ( ( BertModel, BertLMHeadModel, BertForMaskedLM, BertForMultipleChoice, BertForNextSentencePrediction, BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification, BertForTokenClassification, ) if is_torch_available() else () ) pipeline_model_mapping = ( { "feature-extraction": BertModel, "fill-mask": BertForMaskedLM, "question-answering": BertForQuestionAnswering, "text-classification": BertForSequenceClassification, "text-generation": BertLMHeadModel, "token-classification": BertForTokenClassification, "zero-shot": BertForSequenceClassification, } if is_torch_available() else {} ) fx_compatible = True model_split_percents = [0.5, 0.8, 0.9] # special case for ForPreTraining model def _prepare_for_class(self, inputs_dict, model_class, return_labels=False): inputs_dict = super()._prepare_for_class(inputs_dict, model_class, return_labels=return_labels) if return_labels: if model_class in get_values(MODEL_FOR_PRETRAINING_MAPPING): inputs_dict["labels"] = torch.zeros( (self.model_tester.batch_size, self.model_tester.seq_length), dtype=torch.long, device=torch_device ) inputs_dict["next_sentence_label"] = torch.zeros( self.model_tester.batch_size, dtype=torch.long, device=torch_device ) return inputs_dict def setUp(self): self.model_tester = BertModelTester(self) self.config_tester = ConfigTester(self, config_class=BertConfig, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_model_various_embeddings(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: config_and_inputs[0].position_embedding_type = type self.model_tester.create_and_check_model(*config_and_inputs) def test_model_3d_mask_shapes(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() # manipulate input_mask config_and_inputs = list(config_and_inputs) batch_size, seq_length = config_and_inputs[3].shape config_and_inputs[3] = random_attention_mask([batch_size, seq_length, seq_length]) self.model_tester.create_and_check_model(*config_and_inputs) def test_model_as_decoder(self): config_and_inputs = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_model_as_decoder(*config_and_inputs) def test_model_as_decoder_with_default_input_mask(self): ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ) = self.model_tester.prepare_config_and_inputs_for_decoder() input_mask = None self.model_tester.create_and_check_model_as_decoder( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ) def test_model_as_decoder_with_3d_input_mask(self): ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ) = self.model_tester.prepare_config_and_inputs_for_decoder() batch_size, seq_length = input_mask.shape input_mask = random_attention_mask([batch_size, seq_length, seq_length]) batch_size, seq_length = encoder_attention_mask.shape encoder_attention_mask = random_attention_mask([batch_size, seq_length, seq_length]) self.model_tester.create_and_check_model_as_decoder( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ) def test_for_causal_lm(self): config_and_inputs = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_for_causal_lm(*config_and_inputs) def test_for_masked_lm(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*config_and_inputs) def test_for_causal_lm_decoder(self): config_and_inputs = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_model_for_causal_lm_as_decoder(*config_and_inputs) def test_decoder_model_past_with_large_inputs(self): config_and_inputs = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_decoder_model_past_large_inputs(*config_and_inputs) def test_decoder_model_past_with_large_inputs_relative_pos_emb(self): config_and_inputs = self.model_tester.prepare_config_and_inputs_for_decoder() config_and_inputs[0].position_embedding_type = "relative_key" self.model_tester.create_and_check_decoder_model_past_large_inputs(*config_and_inputs) def test_for_multiple_choice(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*config_and_inputs) def test_for_next_sequence_prediction(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_next_sequence_prediction(*config_and_inputs) def test_for_pretraining(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*config_and_inputs) def test_for_question_answering(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*config_and_inputs) def test_for_sequence_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*config_and_inputs) def test_for_token_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*config_and_inputs) def test_for_warning_if_padding_and_no_attention_mask(self): ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ) = self.model_tester.prepare_config_and_inputs() # Set pad tokens in the input_ids input_ids[0, 0] = config.pad_token_id # Check for warnings if the attention_mask is missing. logger = logging.get_logger("transformers.modeling_utils") # clear cache so we can test the warning is emitted (from `warning_once`). logger.warning_once.cache_clear() with CaptureLogger(logger) as cl: model = BertModel(config=config) model.to(torch_device) model.eval() model(input_ids, attention_mask=None, token_type_ids=token_type_ids) self.assertIn("We strongly recommend passing in an `attention_mask`", cl.out) @slow def test_model_from_pretrained(self): model_name = "google-bert/bert-base-uncased" model = BertModel.from_pretrained(model_name) self.assertIsNotNone(model) @require_torch class BertModelIntegrationTest(unittest.TestCase): @slow def test_inference_no_head_absolute_embedding(self): model = BertModel.from_pretrained("google-bert/bert-base-uncased") input_ids = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]]) attention_mask = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]) with torch.no_grad(): output = model(input_ids, attention_mask=attention_mask)[0] expected_shape = torch.Size((1, 11, 768)) self.assertEqual(output.shape, expected_shape) expected_slice = torch.tensor([[[0.4249, 0.1008, 0.7531], [0.3771, 0.1188, 0.7467], [0.4152, 0.1098, 0.7108]]]) torch.testing.assert_close(output[:, 1:4, 1:4], expected_slice, rtol=1e-4, atol=1e-4) @slow def test_inference_no_head_relative_embedding_key(self): model = BertModel.from_pretrained("zhiheng-huang/bert-base-uncased-embedding-relative-key") input_ids = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]]) attention_mask = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]) with torch.no_grad(): output = model(input_ids, attention_mask=attention_mask)[0] expected_shape = torch.Size((1, 11, 768)) self.assertEqual(output.shape, expected_shape) expected_slice = torch.tensor( [[[0.0756, 0.3142, -0.5128], [0.3761, 0.3462, -0.5477], [0.2052, 0.3760, -0.1240]]] ) torch.testing.assert_close(output[:, 1:4, 1:4], expected_slice, rtol=1e-4, atol=1e-4) @slow def test_inference_no_head_relative_embedding_key_query(self): model = BertModel.from_pretrained("zhiheng-huang/bert-base-uncased-embedding-relative-key-query") input_ids = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]]) attention_mask = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]) with torch.no_grad(): output = model(input_ids, attention_mask=attention_mask)[0] expected_shape = torch.Size((1, 11, 768)) self.assertEqual(output.shape, expected_shape) expected_slice = torch.tensor( [[[0.6496, 0.3784, 0.8203], [0.8148, 0.5656, 0.2636], [-0.0681, 0.5597, 0.7045]]] ) torch.testing.assert_close(output[:, 1:4, 1:4], expected_slice, rtol=1e-4, atol=1e-4) def test_sdpa_ignored_mask(self): pkv = [] model = BertModel.from_pretrained("hf-internal-testing/tiny-random-BertModel", attn_implementation="eager") model_sdpa = BertModel.from_pretrained("hf-internal-testing/tiny-random-BertModel", attn_implementation="sdpa") model = model.eval() model_sdpa = model_sdpa.eval() for _ in range(model.config.num_hidden_layers): num_heads = model.config.num_attention_heads head_dim = model.config.hidden_size // model.config.num_attention_heads pkv.append([torch.rand(1, num_heads, 3, head_dim), torch.rand(1, num_heads, 3, head_dim)]) tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-BertModel") inp = tokenizer("I am in Paris and", return_tensors="pt") del inp["attention_mask"] with torch.no_grad(): res_eager = model(**inp) res_sdpa = model_sdpa(**inp) self.assertTrue( torch.allclose(res_eager.last_hidden_state, res_sdpa.last_hidden_state, atol=1e-5, rtol=1e-4) ) # Case where query length != kv_length. Note that model needs to be a decoder so we can use cache model.config.is_decoder = True model_sdpa.config.is_decoder = True res_eager = model(**inp, past_key_values=EncoderDecoderCache.from_legacy_cache(pkv), use_cache=True) res_sdpa = model_sdpa(**inp, past_key_values=EncoderDecoderCache.from_legacy_cache(pkv), use_cache=True) self.assertTrue( torch.allclose(res_eager.last_hidden_state, res_sdpa.last_hidden_state, atol=1e-5, rtol=1e-4) ) @slow @pytest.mark.torch_export_test def test_export(self): if version.parse(torch.__version__) < version.parse("2.4.0"): self.skipTest(reason="This test requires torch >= 2.4 to run.") bert_model = "google-bert/bert-base-uncased" device = "cpu" attn_implementation = "sdpa" max_length = 512 tokenizer = AutoTokenizer.from_pretrained(bert_model) inputs = tokenizer( "the man worked as a [MASK].", return_tensors="pt", padding="max_length", max_length=max_length, ) model = BertForMaskedLM.from_pretrained( bert_model, device_map=device, attn_implementation=attn_implementation, use_cache=True, ) logits = model(**inputs).logits eg_predicted_mask = tokenizer.decode(logits[0, 6].topk(5).indices) self.assertEqual(eg_predicted_mask.split(), ["carpenter", "waiter", "barber", "mechanic", "salesman"]) exported_program = torch.export.export( model, args=(inputs["input_ids"],), kwargs={"attention_mask": inputs["attention_mask"]}, strict=True, ) result = exported_program.module().forward(inputs["input_ids"], inputs["attention_mask"]) ep_predicted_mask = tokenizer.decode(result.logits[0, 6].topk(5).indices) self.assertEqual(eg_predicted_mask, ep_predicted_mask)
transformers/tests/models/bert/test_modeling_bert.py/0
{ "file_path": "transformers/tests/models/bert/test_modeling_bert.py", "repo_id": "transformers", "token_count": 14119 }
566
# Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import os import unittest from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES, BioGptTokenizer from transformers.testing_utils import require_sacremoses, slow from ...test_tokenization_common import TokenizerTesterMixin @require_sacremoses class BioGptTokenizationTest(TokenizerTesterMixin, unittest.TestCase): from_pretrained_id = "microsoft/biogpt" tokenizer_class = BioGptTokenizer test_rust_tokenizer = False @classmethod def setUpClass(cls): super().setUpClass() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt vocab = [ "l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "w</w>", "r</w>", "t</w>", "lo", "low", "er</w>", "low</w>", "lowest</w>", "newer</w>", "wider</w>", "<unk>", ] vocab_tokens = dict(zip(vocab, range(len(vocab)))) merges = ["l o 123", "lo w 1456", "e r</w> 1789", ""] cls.vocab_file = os.path.join(cls.tmpdirname, VOCAB_FILES_NAMES["vocab_file"]) cls.merges_file = os.path.join(cls.tmpdirname, VOCAB_FILES_NAMES["merges_file"]) with open(cls.vocab_file, "w") as fp: fp.write(json.dumps(vocab_tokens)) with open(cls.merges_file, "w") as fp: fp.write("\n".join(merges)) def get_input_output_texts(self, tokenizer): input_text = "lower newer" output_text = "lower newer" return input_text, output_text def test_full_tokenizer(self): """Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt""" tokenizer = BioGptTokenizer(self.vocab_file, self.merges_file) text = "lower" bpe_tokens = ["low", "er</w>"] tokens = tokenizer.tokenize(text) self.assertListEqual(tokens, bpe_tokens) input_tokens = tokens + ["<unk>"] input_bpe_tokens = [14, 15, 20] self.assertListEqual(tokenizer.convert_tokens_to_ids(input_tokens), input_bpe_tokens) @slow def test_sequence_builders(self): tokenizer = BioGptTokenizer.from_pretrained("microsoft/biogpt") text = tokenizer.encode("sequence builders", add_special_tokens=False) text_2 = tokenizer.encode("multi-sequence build", add_special_tokens=False) encoded_sentence = tokenizer.build_inputs_with_special_tokens(text) encoded_pair = tokenizer.build_inputs_with_special_tokens(text, text_2) self.assertTrue(encoded_sentence == [2] + text) self.assertTrue(encoded_pair == [2] + text + [2] + text_2)
transformers/tests/models/biogpt/test_tokenization_biogpt.py/0
{ "file_path": "transformers/tests/models/biogpt/test_tokenization_biogpt.py", "repo_id": "transformers", "token_count": 1549 }
567
# Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import shutil import tempfile import unittest import pytest from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_vision_available from ...test_processing_common import ProcessorTesterMixin if is_vision_available(): from transformers import AutoProcessor, BertTokenizer, BlipImageProcessor, BlipProcessor, PreTrainedTokenizerFast @require_vision class BlipProcessorTest(ProcessorTesterMixin, unittest.TestCase): processor_class = BlipProcessor @classmethod def setUpClass(cls): cls.tmpdirname = tempfile.mkdtemp() image_processor = BlipImageProcessor() tokenizer = BertTokenizer.from_pretrained("hf-internal-testing/tiny-random-BertModel") processor = BlipProcessor(image_processor, tokenizer) processor.save_pretrained(cls.tmpdirname) def get_tokenizer(self, **kwargs): return AutoProcessor.from_pretrained(self.tmpdirname, **kwargs).tokenizer def get_image_processor(self, **kwargs): return AutoProcessor.from_pretrained(self.tmpdirname, **kwargs).image_processor @classmethod def tearDownClass(cls): shutil.rmtree(cls.tmpdirname, ignore_errors=True) def test_save_load_pretrained_additional_features(self): with tempfile.TemporaryDirectory() as tmpdir: processor = BlipProcessor(tokenizer=self.get_tokenizer(), image_processor=self.get_image_processor()) processor.save_pretrained(tmpdir) tokenizer_add_kwargs = self.get_tokenizer(bos_token="(BOS)", eos_token="(EOS)") image_processor_add_kwargs = self.get_image_processor(do_normalize=False, padding_value=1.0) processor = BlipProcessor.from_pretrained( tmpdir, bos_token="(BOS)", eos_token="(EOS)", do_normalize=False, padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab(), tokenizer_add_kwargs.get_vocab()) self.assertIsInstance(processor.tokenizer, PreTrainedTokenizerFast) self.assertEqual(processor.image_processor.to_json_string(), image_processor_add_kwargs.to_json_string()) self.assertIsInstance(processor.image_processor, BlipImageProcessor) def test_image_processor(self): image_processor = self.get_image_processor() tokenizer = self.get_tokenizer() processor = BlipProcessor(tokenizer=tokenizer, image_processor=image_processor) image_input = self.prepare_image_inputs() input_feat_extract = image_processor(image_input, return_tensors="np") input_processor = processor(images=image_input, return_tensors="np") for key in input_feat_extract: self.assertAlmostEqual(input_feat_extract[key].sum(), input_processor[key].sum(), delta=1e-2) def test_tokenizer(self): image_processor = self.get_image_processor() tokenizer = self.get_tokenizer() processor = BlipProcessor(tokenizer=tokenizer, image_processor=image_processor) input_str = "lower newer" encoded_processor = processor(text=input_str) encoded_tok = tokenizer(input_str, return_token_type_ids=False) for key in encoded_tok: self.assertListEqual(encoded_tok[key], encoded_processor[key]) def test_processor(self): image_processor = self.get_image_processor() tokenizer = self.get_tokenizer() processor = BlipProcessor(tokenizer=tokenizer, image_processor=image_processor) input_str = "lower newer" image_input = self.prepare_image_inputs() inputs = processor(text=input_str, images=image_input) self.assertListEqual(list(inputs.keys()), ["pixel_values", "input_ids", "attention_mask"]) # test if it raises when no input is passed with pytest.raises(ValueError): processor() def test_tokenizer_decode(self): image_processor = self.get_image_processor() tokenizer = self.get_tokenizer() processor = BlipProcessor(tokenizer=tokenizer, image_processor=image_processor) predicted_ids = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] decoded_processor = processor.batch_decode(predicted_ids) decoded_tok = tokenizer.batch_decode(predicted_ids) self.assertListEqual(decoded_tok, decoded_processor) @require_torch @require_vision def test_unstructured_kwargs_batched(self): if "image_processor" not in self.processor_class.attributes: self.skipTest(f"image_processor attribute not present in {self.processor_class}") image_processor = self.get_component("image_processor") tokenizer = self.get_component("tokenizer") processor = self.processor_class(tokenizer=tokenizer, image_processor=image_processor) self.skip_processor_without_typed_kwargs(processor) input_str = ["lower newer", "upper older longer string"] image_input = self.prepare_image_inputs(batch_size=2) inputs = processor( text=input_str, images=image_input, return_tensors="pt", crop_size={"height": 214, "width": 214}, size={"height": 214, "width": 214}, padding="longest", max_length=76, ) self.assertEqual(inputs["pixel_values"].shape[2], 214) self.assertEqual(len(inputs["input_ids"][0]), 24)
transformers/tests/models/blip/test_processing_blip.py/0
{ "file_path": "transformers/tests/models/blip/test_processing_blip.py", "repo_id": "transformers", "token_count": 2299 }
568
# Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from transformers import is_torch_available from transformers.testing_utils import ( require_sentencepiece, require_tokenizers, require_torch, slow, torch_device, ) if is_torch_available(): import torch from transformers import CamembertModel @require_torch @require_sentencepiece @require_tokenizers class CamembertModelIntegrationTest(unittest.TestCase): @slow def test_output_embeds_base_model(self): model = CamembertModel.from_pretrained("almanach/camembert-base", attn_implementation="eager") model.to(torch_device) input_ids = torch.tensor( [[5, 121, 11, 660, 16, 730, 25543, 110, 83, 6]], device=torch_device, dtype=torch.long, ) # J'aime le camembert ! with torch.no_grad(): output = model(input_ids)["last_hidden_state"] expected_shape = torch.Size((1, 10, 768)) self.assertEqual(output.shape, expected_shape) # compare the actual values for a slice. expected_slice = torch.tensor( [[[-0.0254, 0.0235, 0.1027], [0.0606, -0.1811, -0.0418], [-0.1561, -0.1127, 0.2687]]], device=torch_device, dtype=torch.float, ) # camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0') # camembert.eval() # expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach() torch.testing.assert_close(output[:, :3, :3], expected_slice, rtol=1e-4, atol=1e-4) @slow def test_output_embeds_base_model_sdpa(self): input_ids = torch.tensor( [[5, 121, 11, 660, 16, 730, 25543, 110, 83, 6]], device=torch_device, dtype=torch.long, ) # J'aime le camembert ! expected_slice = torch.tensor( [[[-0.0254, 0.0235, 0.1027], [0.0606, -0.1811, -0.0418], [-0.1561, -0.1127, 0.2687]]], device=torch_device, dtype=torch.float, ) model = CamembertModel.from_pretrained("almanach/camembert-base", attn_implementation="sdpa").to(torch_device) with torch.no_grad(): output = model(input_ids)["last_hidden_state"].detach() torch.testing.assert_close(output[:, :3, :3], expected_slice, rtol=1e-4, atol=1e-4)
transformers/tests/models/camembert/test_modeling_camembert.py/0
{ "file_path": "transformers/tests/models/camembert/test_modeling_camembert.py", "repo_id": "transformers", "token_count": 1257 }
569
# coding=utf-8 # Copyright 2025 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Testing suite for the PyTorch ColQwen2 model.""" import unittest from typing import ClassVar import pytest import torch from datasets import load_dataset from tests.test_configuration_common import ConfigTester from tests.test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from transformers import is_torch_available from transformers.models.colqwen2.configuration_colqwen2 import ColQwen2Config from transformers.models.colqwen2.modeling_colqwen2 import ColQwen2ForRetrieval, ColQwen2ForRetrievalOutput from transformers.models.colqwen2.processing_colqwen2 import ColQwen2Processor from transformers.testing_utils import ( Expectations, cleanup, require_bitsandbytes, require_torch, require_vision, slow, torch_device, ) if is_torch_available(): import torch class ColQwen2ForRetrievalModelTester: def __init__( self, parent, ignore_index=-100, pad_token_id=2, projector_hidden_act="gelu", seq_length=11, vision_feature_select_strategy="default", vision_feature_layer=-1, projection_dim=32, is_training=False, use_cache=False, vlm_config={ "_name_or_path": "Qwen/Qwen2-VL-2B-Instruct", "bos_token_id": 0, "eos_token_id": 1, "vision_start_token_id": 3, "image_token_id": 4, "video_token_id": 5, "hidden_size": 64, "intermediate_size": 2, "max_window_layers": 2, "model_type": "qwen2_vl", "num_attention_heads": 2, "num_hidden_layers": 2, "num_key_value_heads": 2, "rms_norm_eps": 1e-06, "rope_scaling": {"mrope_section": [4, 6, 6], "rope_type": "default", "type": "default"}, "sliding_window": 32768, "tie_word_embeddings": True, "vision_config": { "depth": 2, "embed_dim": 32, "hidden_act": "quick_gelu", "hidden_size": 64, "mlp_ratio": 4, "num_heads": 4, "patch_size": 14, "in_chans": 3, "spatial_merge_size": 1, "temporal_patch_size": 2, }, "vision_end_token_id": 151653, "vision_token_id": 151654, "vocab_size": 99, }, embedding_dim=32, initializer_range=0.02, ): self.parent = parent self.ignore_index = ignore_index self.pad_token_id = pad_token_id # `image_token_index` is set to 0 to pass "resize_embeddings" test, do not modify self.image_token_index = 0 self.image_token_id = vlm_config["image_token_id"] self.video_token_id = vlm_config["video_token_id"] self.pad_token_id = vlm_config["eos_token_id"] self.vision_start_token_id = vlm_config["vision_start_token_id"] self.projector_hidden_act = projector_hidden_act self.vision_feature_select_strategy = vision_feature_select_strategy self.vision_feature_layer = vision_feature_layer self.image_size = 56 self.num_image_tokens = 4 self.seq_length = seq_length + self.num_image_tokens self.projection_dim = projection_dim self.num_hidden_layers = vlm_config["num_hidden_layers"] self.vocab_size = vlm_config["vocab_size"] self.hidden_size = vlm_config["hidden_size"] self.num_attention_heads = vlm_config["num_attention_heads"] self.is_training = is_training self.batch_size = 3 self.num_channels = vlm_config["vision_config"]["in_chans"] self.encoder_seq_length = self.seq_length self.use_cache = use_cache self.vlm_config = vlm_config self.embedding_dim = embedding_dim self.initializer_range = initializer_range def get_config(self): return ColQwen2Config( vlm_config=self.vlm_config, embedding_dim=self.embedding_dim, initializer_range=self.initializer_range, ) def prepare_config_and_inputs(self): config = self.get_config() patch_size = config.vlm_config.vision_config.patch_size temporal_patch_size = config.vlm_config.vision_config.temporal_patch_size # NOTE: Assume all inputs are square images of the same size. num_patches = (self.image_size // patch_size) ** 2 pixel_values = floats_tensor( [ self.batch_size * num_patches, self.num_channels * (patch_size**2) * temporal_patch_size, ] ) # Hardcoded image grid size: do not change unless you modified image size or patch size! image_grid_thw = torch.tensor([1, 4, 4]).repeat(self.batch_size, 1) # NOTE: The following adjustment ensures correct behavior with DDP on multiple GPUs. # Line is copied from `src/transformers/models/colqwen2/processing_colqwen2.py` offsets = image_grid_thw[:, 1] * image_grid_thw[:, 2] # (batch_size,) pixel_values = list( torch.split(pixel_values, offsets.tolist()) ) # [(num_patches_image_0, pixel_values), ..., (num_patches_image_n, pixel_values)] pixel_values = torch.nn.utils.rnn.pad_sequence( pixel_values, batch_first=True ) # (batch_size, max_num_patches, pixel_values) return config, pixel_values, image_grid_thw def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, pixel_values, image_grid_thw = config_and_inputs input_ids = ( ids_tensor( shape=[self.batch_size, self.seq_length], vocab_size=config.vlm_config.vocab_size - 1, ) + 1 ) attention_mask = torch.ones(input_ids.shape, dtype=torch.long, device=torch_device) input_ids[:, -1] = self.pad_token_id input_ids[:, : self.num_image_tokens] = self.image_token_id input_ids[input_ids == self.video_token_id] = self.pad_token_id input_ids[input_ids == self.image_token_id] = self.pad_token_id input_ids[input_ids == self.vision_start_token_id] = self.pad_token_id inputs_dict = { "input_ids": input_ids, "pixel_values": pixel_values, "image_grid_thw": image_grid_thw, "attention_mask": attention_mask, "labels": input_ids, } return config, inputs_dict @require_torch class ColQwen2ForRetrievalModelTest(ModelTesterMixin, unittest.TestCase): """ Model tester for `ColQwen2ForRetrieval`. """ all_model_classes = (ColQwen2ForRetrieval,) if is_torch_available() else () fx_compatible = False test_torchscript = False test_pruning = False test_resize_embeddings = True test_head_masking = False def setUp(self): self.model_tester = ColQwen2ForRetrievalModelTester(self) self.config_tester = ConfigTester(self, config_class=ColQwen2Config, has_text_modality=False) def test_inputs_embeds(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) model.to(torch_device) model.eval() inputs = self._prepare_for_class(inputs_dict, model_class) input_ids = inputs["input_ids"] del inputs["input_ids"] del inputs["pixel_values"] wte = model.get_input_embeddings() inputs["inputs_embeds"] = wte(input_ids) with torch.no_grad(): model(**inputs) # overwrite inputs_embeds tests because we need to delete "pixel values" for LVLMs # while some other models require pixel_values to be present def test_inputs_embeds_matches_input_ids(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) model.to(torch_device) model.eval() inputs = self._prepare_for_class(inputs_dict, model_class) input_ids = inputs["input_ids"] del inputs["input_ids"] del inputs["pixel_values"] inputs_embeds = model.get_input_embeddings()(input_ids) with torch.no_grad(): out_ids = model(input_ids=input_ids, **inputs)[0] out_embeds = model(inputs_embeds=inputs_embeds, **inputs)[0] self.assertTrue(torch.allclose(out_embeds, out_ids)) @slow @require_vision def test_colqwen2_forward_inputs(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) model.to(torch_device) model.eval() inputs = self._prepare_for_class(inputs_dict, model_class) with torch.no_grad(): outputs = model(**inputs, return_dict=True) self.assertIsInstance(outputs, ColQwen2ForRetrievalOutput) @unittest.skip(reason="Some undefined behavior encountered with test versions of Qwen2-VL. Skip for now.") def test_model_parallelism(self): pass @unittest.skip(reason="Pass because ColQwen2 requires `attention_mask is not None`") def test_sdpa_can_dispatch_on_flash(self): pass @unittest.skip(reason="Pass because ColQwen2 requires `attention_mask is not None`") @pytest.mark.torch_compile_test def test_sdpa_can_compile_dynamic(self): pass @require_torch class ColQwen2ModelIntegrationTest(unittest.TestCase): model_name: ClassVar[str] = "vidore/colqwen2-v1.0-hf" def setUp(self): self.processor = ColQwen2Processor.from_pretrained(self.model_name) def tearDown(self): cleanup(torch_device, gc_collect=True) @require_bitsandbytes @slow def test_model_integration_test(self): """ Test if the model is able to retrieve the correct pages for a small and easy dataset. """ model = ColQwen2ForRetrieval.from_pretrained( self.model_name, dtype=torch.float16, load_in_8bit=True, ).eval() # Load the test dataset ds = load_dataset("hf-internal-testing/document-visual-retrieval-test", split="test") # Preprocess the examples batch_images = self.processor(images=ds["image"]).to(torch_device) batch_queries = self.processor(text=ds["query"]).to(torch_device) # Run inference with torch.inference_mode(): image_embeddings = model(**batch_images).embeddings query_embeddings = model(**batch_queries).embeddings # Compute retrieval scores scores = self.processor.score_retrieval( query_embeddings=query_embeddings, passage_embeddings=image_embeddings, ) # (num_queries, num_passages) assert scores.ndim == 2, f"Expected 2D tensor, got {scores.ndim}" assert scores.shape == (len(ds), len(ds)), f"Expected shape {(len(ds), len(ds))}, got {scores.shape}" # Check if the maximum scores per row are in the diagonal of the matrix score self.assertTrue((scores.argmax(axis=1) == torch.arange(len(ds), device=scores.device)).all()) # Further validation: fine-grained check, with a hardcoded score from the original Hf implementation. expectations = Expectations( { ("cuda", 7): [ [15.0938, 8.3203, 15.0391], [9.6328, 16.9062, 10.5312], [15.6562, 12.2656, 20.2969], ], ("cuda", 8): [ [15.0703, 8.7422, 15.0312], [9.5078, 16.8906, 10.6250], [15.6484, 12.3984, 20.4688], ], } ) expected_scores = torch.tensor(expectations.get_expectation(), dtype=scores.dtype) assert torch.allclose(scores, expected_scores, atol=1e-3), f"Expected scores {expected_scores}, got {scores}"
transformers/tests/models/colqwen2/test_modeling_colqwen2.py/0
{ "file_path": "transformers/tests/models/colqwen2/test_modeling_colqwen2.py", "repo_id": "transformers", "token_count": 5938 }
570
# Copyright 2022 The OpenBMB Team and The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import unittest from transformers.models.cpmant.tokenization_cpmant import VOCAB_FILES_NAMES, CpmAntTokenizer from transformers.testing_utils import require_jieba, tooslow from ...test_tokenization_common import TokenizerTesterMixin @require_jieba class CPMAntTokenizationTest(TokenizerTesterMixin, unittest.TestCase): from_pretrained_id = "openbmb/cpm-ant-10b" tokenizer_class = CpmAntTokenizer test_rust_tokenizer = False @classmethod def setUpClass(cls): super().setUpClass() vocab_tokens = [ "<d>", "</d>", "<s>", "</s>", "</_>", "<unk>", "<pad>", "</n>", "我", "是", "C", "P", "M", "A", "n", "t", ] cls.vocab_file = os.path.join(cls.tmpdirname, VOCAB_FILES_NAMES["vocab_file"]) with open(cls.vocab_file, "w", encoding="utf-8") as vocab_writer: vocab_writer.write("".join([x + "\n" for x in vocab_tokens])) @tooslow def test_pre_tokenization(self): tokenizer = CpmAntTokenizer.from_pretrained("openbmb/cpm-ant-10b") texts = "今天天气真好!" jieba_tokens = ["今天", "天气", "真", "好", "!"] tokens = tokenizer.tokenize(texts) self.assertListEqual(tokens, jieba_tokens) normalized_text = "今天天气真好!" input_tokens = [tokenizer.bos_token] + tokens input_jieba_tokens = [6, 9802, 14962, 2082, 831, 244] self.assertListEqual(tokenizer.convert_tokens_to_ids(input_tokens), input_jieba_tokens) reconstructed_text = tokenizer.decode(input_jieba_tokens) self.assertEqual(reconstructed_text, normalized_text)
transformers/tests/models/cpmant/test_tokenization_cpmant.py/0
{ "file_path": "transformers/tests/models/cpmant/test_tokenization_cpmant.py", "repo_id": "transformers", "token_count": 1096 }
571
# Copyright 2022 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Testing suite for the PyTorch Dinat model.""" import collections import unittest from transformers import DinatConfig from transformers.testing_utils import require_natten, require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import DinatBackbone, DinatForImageClassification, DinatModel if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class DinatModelTester: def __init__( self, parent, batch_size=13, image_size=64, patch_size=4, num_channels=3, embed_dim=16, depths=[1, 2, 1], num_heads=[2, 4, 8], kernel_size=3, dilations=[[3], [1, 2], [1]], mlp_ratio=2.0, qkv_bias=True, hidden_dropout_prob=0.0, attention_probs_dropout_prob=0.0, drop_path_rate=0.1, hidden_act="gelu", patch_norm=True, initializer_range=0.02, layer_norm_eps=1e-5, is_training=True, scope=None, use_labels=True, num_labels=10, out_features=["stage1", "stage2"], out_indices=[1, 2], ): self.parent = parent self.batch_size = batch_size self.image_size = image_size self.patch_size = patch_size self.num_channels = num_channels self.embed_dim = embed_dim self.depths = depths self.num_heads = num_heads self.kernel_size = kernel_size self.dilations = dilations self.mlp_ratio = mlp_ratio self.qkv_bias = qkv_bias self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.drop_path_rate = drop_path_rate self.hidden_act = hidden_act self.patch_norm = patch_norm self.layer_norm_eps = layer_norm_eps self.initializer_range = initializer_range self.is_training = is_training self.scope = scope self.use_labels = use_labels self.num_labels = num_labels self.out_features = out_features self.out_indices = out_indices def prepare_config_and_inputs(self): pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) labels = None if self.use_labels: labels = ids_tensor([self.batch_size], self.num_labels) config = self.get_config() return config, pixel_values, labels def get_config(self): return DinatConfig( num_labels=self.num_labels, image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, embed_dim=self.embed_dim, depths=self.depths, num_heads=self.num_heads, kernel_size=self.kernel_size, dilations=self.dilations, mlp_ratio=self.mlp_ratio, qkv_bias=self.qkv_bias, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, drop_path_rate=self.drop_path_rate, hidden_act=self.hidden_act, patch_norm=self.patch_norm, layer_norm_eps=self.layer_norm_eps, initializer_range=self.initializer_range, out_features=self.out_features, out_indices=self.out_indices, ) def create_and_check_model(self, config, pixel_values, labels): model = DinatModel(config=config) model.to(torch_device) model.eval() result = model(pixel_values) expected_height = expected_width = (config.image_size // config.patch_size) // (2 ** (len(config.depths) - 1)) expected_dim = int(config.embed_dim * 2 ** (len(config.depths) - 1)) self.parent.assertEqual( result.last_hidden_state.shape, (self.batch_size, expected_height, expected_width, expected_dim) ) def create_and_check_for_image_classification(self, config, pixel_values, labels): model = DinatForImageClassification(config) model.to(torch_device) model.eval() result = model(pixel_values, labels=labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels)) # test greyscale images config.num_channels = 1 model = DinatForImageClassification(config) model.to(torch_device) model.eval() pixel_values = floats_tensor([self.batch_size, 1, self.image_size, self.image_size]) result = model(pixel_values) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels)) def create_and_check_backbone(self, config, pixel_values, labels): model = DinatBackbone(config=config) model.to(torch_device) model.eval() result = model(pixel_values) # verify hidden states self.parent.assertEqual(len(result.feature_maps), len(config.out_features)) self.parent.assertListEqual(list(result.feature_maps[0].shape), [self.batch_size, model.channels[0], 16, 16]) # verify channels self.parent.assertEqual(len(model.channels), len(config.out_features)) # verify backbone works with out_features=None config.out_features = None model = DinatBackbone(config=config) model.to(torch_device) model.eval() result = model(pixel_values) # verify feature maps self.parent.assertEqual(len(result.feature_maps), 1) self.parent.assertListEqual(list(result.feature_maps[0].shape), [self.batch_size, model.channels[-1], 4, 4]) # verify channels self.parent.assertEqual(len(model.channels), 1) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, pixel_values, labels = config_and_inputs inputs_dict = {"pixel_values": pixel_values} return config, inputs_dict @require_natten @require_torch class DinatModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = ( ( DinatModel, DinatForImageClassification, DinatBackbone, ) if is_torch_available() else () ) pipeline_model_mapping = ( {"image-feature-extraction": DinatModel, "image-classification": DinatForImageClassification} if is_torch_available() else {} ) fx_compatible = False test_torchscript = False test_pruning = False test_resize_embeddings = False test_head_masking = False test_torch_exportable = True def setUp(self): self.model_tester = DinatModelTester(self) self.config_tester = ConfigTester( self, config_class=DinatConfig, embed_dim=37, common_properties=["patch_size", "num_channels"] ) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_for_image_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*config_and_inputs) def test_backbone(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_backbone(*config_and_inputs) @unittest.skip(reason="Dinat does not use inputs_embeds") def test_inputs_embeds(self): pass @unittest.skip(reason="Dinat does not use feedforward chunking") def test_feed_forward_chunking(self): pass def test_model_get_set_embeddings(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) self.assertIsInstance(model.get_input_embeddings(), (nn.Module)) x = model.get_output_embeddings() self.assertTrue(x is None or isinstance(x, nn.Linear)) def test_attention_outputs(self): self.skipTest(reason="Dinat's attention operation is handled entirely by NATTEN.") def check_hidden_states_output(self, inputs_dict, config, model_class, image_size): model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) hidden_states = outputs.hidden_states expected_num_layers = getattr( self.model_tester, "expected_num_hidden_layers", len(self.model_tester.depths) + 1 ) self.assertEqual(len(hidden_states), expected_num_layers) # Dinat has a different seq_length patch_size = ( config.patch_size if isinstance(config.patch_size, collections.abc.Iterable) else (config.patch_size, config.patch_size) ) height = image_size[0] // patch_size[0] width = image_size[1] // patch_size[1] self.assertListEqual( list(hidden_states[0].shape[-3:]), [height, width, self.model_tester.embed_dim], ) if model_class.__name__ != "DinatBackbone": reshaped_hidden_states = outputs.reshaped_hidden_states self.assertEqual(len(reshaped_hidden_states), expected_num_layers) batch_size, num_channels, height, width = reshaped_hidden_states[0].shape reshaped_hidden_states = ( reshaped_hidden_states[0].view(batch_size, num_channels, height, width).permute(0, 2, 3, 1) ) self.assertListEqual( list(reshaped_hidden_states.shape[-3:]), [height, width, self.model_tester.embed_dim], ) def test_hidden_states_output(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() image_size = ( self.model_tester.image_size if isinstance(self.model_tester.image_size, collections.abc.Iterable) else (self.model_tester.image_size, self.model_tester.image_size) ) for model_class in self.all_model_classes: inputs_dict["output_hidden_states"] = True self.check_hidden_states_output(inputs_dict, config, model_class, image_size) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] config.output_hidden_states = True self.check_hidden_states_output(inputs_dict, config, model_class, image_size) @slow def test_model_from_pretrained(self): model_name = "shi-labs/dinat-mini-in1k-224" model = DinatModel.from_pretrained(model_name) self.assertIsNotNone(model) def test_initialization(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() configs_no_init = _config_zero_init(config) for model_class in self.all_model_classes: model = model_class(config=configs_no_init) for name, param in model.named_parameters(): if "embeddings" not in name and param.requires_grad: self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item(), [0.0, 1.0], msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) @require_natten @require_vision @require_torch class DinatModelIntegrationTest(unittest.TestCase): @cached_property def default_image_processor(self): return AutoImageProcessor.from_pretrained("shi-labs/dinat-mini-in1k-224") if is_vision_available() else None @slow def test_inference_image_classification_head(self): model = DinatForImageClassification.from_pretrained("shi-labs/dinat-mini-in1k-224").to(torch_device) image_processor = self.default_image_processor image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") inputs = image_processor(images=image, return_tensors="pt").to(torch_device) # forward pass with torch.no_grad(): outputs = model(**inputs) # verify the logits expected_shape = torch.Size((1, 1000)) self.assertEqual(outputs.logits.shape, expected_shape) expected_slice = torch.tensor([-0.1545, -0.7667, 0.4642]).to(torch_device) torch.testing.assert_close(outputs.logits[0, :3], expected_slice, rtol=1e-4, atol=1e-4) @require_torch @require_natten class DinatBackboneTest(unittest.TestCase, BackboneTesterMixin): all_model_classes = (DinatBackbone,) if is_torch_available() else () config_class = DinatConfig def setUp(self): self.model_tester = DinatModelTester(self)
transformers/tests/models/dinat/test_modeling_dinat.py/0
{ "file_path": "transformers/tests/models/dinat/test_modeling_dinat.py", "repo_id": "transformers", "token_count": 6149 }
572
# coding=utf-8 # Copyright 2025 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Testing suite for the PyTorch Doge model.""" import unittest from transformers import AutoTokenizer, DogeConfig, is_torch_available, set_seed from transformers.testing_utils import ( require_read_token, require_torch, require_torch_accelerator, slow, torch_device, ) from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( DogeForCausalLM, DogeForSequenceClassification, DogeModel, ) class DogeModelTester: def __init__( self, parent, batch_size=8, seq_length=16, is_training=True, use_input_mask=True, use_token_type_ids=False, use_labels=True, vocab_size=128, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=64, hidden_act="silu", max_position_embeddings=512, type_vocab_size=16, type_sequence_label_size=2, initializer_range=0.02, num_labels=3, pad_token_id=0, scope=None, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_input_mask = use_input_mask self.use_token_type_ids = use_token_type_ids self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.type_sequence_label_size = type_sequence_label_size self.initializer_range = initializer_range self.num_labels = num_labels self.pad_token_id = pad_token_id self.scope = scope def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_mask = None if self.use_input_mask: input_mask = torch.tril(torch.ones_like(input_ids).to(torch_device)) token_type_ids = None if self.use_token_type_ids: token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size) sequence_labels = None token_labels = None if self.use_labels: sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size) token_labels = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) config = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels def get_config(self): return DogeConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=False, initializer_range=self.initializer_range, pad_token_id=self.pad_token_id, ) def create_and_check_model(self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels): model = DogeModel(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask) result = model(input_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) def create_and_check_model_as_decoder( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, encoder_hidden_states, encoder_attention_mask, ): config.add_cross_attention = True model = DogeModel(config) model.to(torch_device) model.eval() result = model( input_ids, attention_mask=input_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, ) result = model( input_ids, attention_mask=input_mask, encoder_hidden_states=encoder_hidden_states, ) result = model(input_ids, attention_mask=input_mask) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) def create_and_check_for_causal_lm( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, encoder_hidden_states, encoder_attention_mask, ): model = DogeForCausalLM(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, labels=token_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) def create_and_check_decoder_model_past_large_inputs( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, encoder_hidden_states, encoder_attention_mask, ): config.is_decoder = True config.add_cross_attention = True model = DogeForCausalLM(config=config) model.to(torch_device) model.eval() # first forward pass outputs = model( input_ids, attention_mask=input_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, use_cache=True, ) past_key_values = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size) next_mask = ids_tensor((self.batch_size, 3), vocab_size=2) # append to next input_ids and next_input_ids = torch.cat([input_ids, next_tokens], dim=-1) next_attention_mask = torch.cat([input_mask, next_mask], dim=-1) output_from_no_past = model( next_input_ids, attention_mask=next_attention_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, output_hidden_states=True, )["hidden_states"][0] output_from_past = model( next_tokens, attention_mask=next_attention_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, past_key_values=past_key_values, output_hidden_states=True, )["hidden_states"][0] # select random slice random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item() output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx].detach() output_from_past_slice = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1]) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, ) = config_and_inputs inputs_dict = {"input_ids": input_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class DogeModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = ( ( DogeModel, DogeForCausalLM, DogeForSequenceClassification, ) if is_torch_available() else () ) all_generative_model_classes = (DogeForCausalLM,) if is_torch_available() else () pipeline_model_mapping = ( { "feature-extraction": DogeModel, "text-classification": DogeForSequenceClassification, "text-generation": DogeForCausalLM, "zero-shot": DogeForSequenceClassification, } if is_torch_available() else {} ) has_attentions = False test_headmasking = False test_pruning = False test_torchscript = False fx_compatible = False # Need to use `0.8` instead of `0.9` for `test_cpu_offload` # This is because we are hitting edge cases with the causal_mask buffer model_split_percents = [0.5, 0.7, 0.8] # used in `test_torch_compile_for_training` _torch_compile_train_cls = DogeForCausalLM if is_torch_available() else None def setUp(self): self.model_tester = DogeModelTester(self) self.config_tester = ConfigTester(self, config_class=DogeConfig, hidden_size=32) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_doge_sequence_classification_model(self): config, input_dict = self.model_tester.prepare_config_and_inputs_for_common() config.num_labels = 3 input_ids = input_dict["input_ids"] attention_mask = input_ids.ne(1).to(torch_device) sequence_labels = ids_tensor([self.model_tester.batch_size], self.model_tester.type_sequence_label_size) model = DogeForSequenceClassification(config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=attention_mask, labels=sequence_labels) self.assertEqual(result.logits.shape, (self.model_tester.batch_size, self.model_tester.num_labels)) def test_doge_sequence_classification_model_for_single_label(self): config, input_dict = self.model_tester.prepare_config_and_inputs_for_common() config.num_labels = 3 config.problem_type = "single_label_classification" input_ids = input_dict["input_ids"] attention_mask = input_ids.ne(1).to(torch_device) sequence_labels = ids_tensor([self.model_tester.batch_size], self.model_tester.type_sequence_label_size) model = DogeForSequenceClassification(config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=attention_mask, labels=sequence_labels) self.assertEqual(result.logits.shape, (self.model_tester.batch_size, self.model_tester.num_labels)) def test_doge_sequence_classification_model_for_multi_label(self): config, input_dict = self.model_tester.prepare_config_and_inputs_for_common() config.num_labels = 3 config.problem_type = "multi_label_classification" input_ids = input_dict["input_ids"] attention_mask = input_ids.ne(1).to(torch_device) sequence_labels = ids_tensor( [self.model_tester.batch_size, config.num_labels], self.model_tester.type_sequence_label_size ).to(torch.float) model = DogeForSequenceClassification(config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=attention_mask, labels=sequence_labels) self.assertEqual(result.logits.shape, (self.model_tester.batch_size, self.model_tester.num_labels)) @unittest.skip(reason="Doge buffers include complex numbers, which breaks this test") def test_save_load_fast_init_from_base(self): pass @require_torch_accelerator class DogeIntegrationTest(unittest.TestCase): # This variable is used to determine which CUDA device are we using for our runners (A10 or T4) # Depending on the hardware we get different logits / generations cuda_compute_capability_major_version = None @classmethod def setUpClass(cls): if is_torch_available() and torch.cuda.is_available(): # 8 is for A100 / A10 and 7 for T4 cls.cuda_compute_capability_major_version = torch.cuda.get_device_capability()[0] @slow @require_read_token def test_Doge_20M_hard(self): """ An integration test for Doge-20M. It tests against a long output to ensure the subtle numerical differences """ EXPECTED_TEXT = "Here's everything I know about dogs. Dogs is the best animal in the world. It is a very popular and popular dog in the United States. It is a very popular" tokenizer = AutoTokenizer.from_pretrained("SmallDoge/Doge-20M") model = DogeForCausalLM.from_pretrained("SmallDoge/Doge-20M", device_map="auto", dtype=torch.bfloat16) input_text = ["Here's everything I know about dogs. Dogs is the best animal in the"] set_seed(0) model_inputs = tokenizer(input_text, return_tensors="pt").to(model.device) generated_ids = model.generate(**model_inputs, max_new_tokens=20, do_sample=False) generated_text = tokenizer.decode(generated_ids[0], skip_special_tokens=True) self.assertEqual(generated_text, EXPECTED_TEXT)
transformers/tests/models/doge/test_modeling_doge.py/0
{ "file_path": "transformers/tests/models/doge/test_modeling_doge.py", "repo_id": "transformers", "token_count": 6329 }
573
# Copyright 2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from tests.models.superglue.test_image_processing_superglue import ( SuperGlueImageProcessingTest, SuperGlueImageProcessingTester, ) from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available if is_torch_available(): import numpy as np import torch from transformers.models.efficientloftr.modeling_efficientloftr import KeypointMatchingOutput if is_vision_available(): from transformers import EfficientLoFTRImageProcessor def random_array(size): return np.random.randint(255, size=size) def random_tensor(size): return torch.rand(size) class EfficientLoFTRImageProcessingTester(SuperGlueImageProcessingTester): """Tester for EfficientLoFTRImageProcessor""" def __init__( self, parent, batch_size=6, num_channels=3, image_size=18, min_resolution=30, max_resolution=400, do_resize=True, size=None, do_grayscale=True, ): super().__init__( parent, batch_size, num_channels, image_size, min_resolution, max_resolution, do_resize, size, do_grayscale ) def prepare_keypoint_matching_output(self, pixel_values): """Prepare a fake output for the keypoint matching model with random matches between 50 keypoints per image.""" max_number_keypoints = 50 batch_size = len(pixel_values) keypoints = torch.zeros((batch_size, 2, max_number_keypoints, 2)) matches = torch.full((batch_size, 2, max_number_keypoints), -1, dtype=torch.int) scores = torch.zeros((batch_size, 2, max_number_keypoints)) for i in range(batch_size): random_number_keypoints0 = np.random.randint(10, max_number_keypoints) random_number_keypoints1 = np.random.randint(10, max_number_keypoints) random_number_matches = np.random.randint(5, min(random_number_keypoints0, random_number_keypoints1)) keypoints[i, 0, :random_number_keypoints0] = torch.rand((random_number_keypoints0, 2)) keypoints[i, 1, :random_number_keypoints1] = torch.rand((random_number_keypoints1, 2)) random_matches_indices0 = torch.randperm(random_number_keypoints1, dtype=torch.int)[:random_number_matches] random_matches_indices1 = torch.randperm(random_number_keypoints0, dtype=torch.int)[:random_number_matches] matches[i, 0, random_matches_indices1] = random_matches_indices0 matches[i, 1, random_matches_indices0] = random_matches_indices1 scores[i, 0, random_matches_indices1] = torch.rand((random_number_matches,)) scores[i, 1, random_matches_indices0] = torch.rand((random_number_matches,)) return KeypointMatchingOutput(keypoints=keypoints, matches=matches, matching_scores=scores) @require_torch @require_vision class EfficientLoFTRImageProcessingTest(SuperGlueImageProcessingTest, unittest.TestCase): image_processing_class = EfficientLoFTRImageProcessor if is_vision_available() else None def setUp(self) -> None: super().setUp() self.image_processor_tester = EfficientLoFTRImageProcessingTester(self)
transformers/tests/models/efficientloftr/test_image_processing_efficientloftr.py/0
{ "file_path": "transformers/tests/models/efficientloftr/test_image_processing_efficientloftr.py", "repo_id": "transformers", "token_count": 1442 }
574
# Copyright 2022 Meta Platforms authors and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Testing suite for the PyTorch FLAVA model.""" import inspect import os import random import tempfile import unittest import numpy as np import requests from transformers import ( FlavaConfig, FlavaImageCodebookConfig, FlavaImageConfig, FlavaMultimodalConfig, FlavaTextConfig, ) from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ( ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor, random_attention_mask, ) from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ( FlavaForPreTraining, FlavaImageCodebook, FlavaImageModel, FlavaModel, FlavaMultimodalModel, FlavaTextModel, ) else: FlavaModel = None FlavaForPreTraining = None torch = {} if is_vision_available(): from PIL import Image from transformers import FlavaProcessor class FlavaImageModelTester: def __init__( self, parent, batch_size=12, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.0, attention_probs_dropout_prob=0.0, initializer_range=0.02, layer_norm_eps=1e-12, image_size=30, patch_size=2, num_channels=3, qkv_bias=True, mask_token=True, vocab_size=99, ): self.parent = parent self.batch_size = batch_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.initializer_range = initializer_range self.layer_norm_eps = layer_norm_eps self.image_size = image_size self.patch_size = patch_size self.num_channels = num_channels self.qkv_bias = qkv_bias self.mask_token = mask_token self.vocab_size = vocab_size def prepare_config_and_inputs(self): pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) num_patches = self.image_size // self.patch_size bool_masked_pos = ( torch.rand((self.batch_size, num_patches, num_patches), device=pixel_values.device) < 0.9 ).long() config = self.get_config() return config, pixel_values, bool_masked_pos def get_config(self): return FlavaImageConfig( hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, initializer_range=self.initializer_range, layer_norm_eps=self.layer_norm_eps, image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, qkv_bias=self.qkv_bias, mask_token=self.mask_token, vocab_size=self.vocab_size, ) def create_and_check_model(self, config, pixel_values, bool_masked_pos): model = FlavaImageModel(config=config) model.to(torch_device) model.eval() with torch.no_grad(): result = model(pixel_values, bool_masked_pos) # expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) image_size = (self.image_size, self.image_size) patch_size = (self.patch_size, self.patch_size) num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, num_patches + 1, self.hidden_size)) self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, pixel_values, bool_masked_pos = config_and_inputs inputs_dict = {"pixel_values": pixel_values, "bool_masked_pos": bool_masked_pos} return config, inputs_dict @require_torch class FlavaImageModelTest(ModelTesterMixin, unittest.TestCase): """ Here we also overwrite some of the tests of test_modeling_common.py, as FLAVA does not use input_ids, inputs_embeds, attention_mask and seq_length. """ all_model_classes = (FlavaImageModel,) if is_torch_available() else () test_pruning = False test_torchscript = False test_resize_embeddings = False test_head_masking = False def setUp(self): self.model_tester = FlavaImageModelTester(self) self.config_tester = ConfigTester(self, config_class=FlavaImageConfig, has_text_modality=False, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() @unittest.skip("Flava does not use input_ids") def test_inputs_embeds(self): pass def test_model_get_set_embeddings(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) self.assertIsInstance(model.get_input_embeddings(), (nn.Module)) x = model.get_output_embeddings() self.assertTrue(x is None or isinstance(x, nn.Linear)) def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) signature = inspect.signature(model.forward) # signature.parameters is an OrderedDict => so arg_names order is deterministic arg_names = [*signature.parameters.keys()] expected_arg_names = ["pixel_values"] self.assertListEqual(arg_names[:1], expected_arg_names) def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_attention_outputs(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True # in FLAVA, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token) image_size = (self.model_tester.image_size, self.model_tester.image_size) patch_size = (self.model_tester.patch_size, self.model_tester.patch_size) num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) seq_len = num_patches + 1 for model_class in self.all_model_classes: inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = False config.return_dict = True model = model_class._from_config(config, attn_implementation="eager") config = model.config model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.attentions self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) # check that output_attentions also work using config del inputs_dict["output_attentions"] config.output_attentions = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.attentions self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) out_len = len(outputs) # Check attention is always last and order is fine inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) added_hidden_states = 1 self.assertEqual(out_len + added_hidden_states, len(outputs)) self_attentions = outputs.attentions self.assertEqual(len(self_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(self_attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, seq_len, seq_len], ) def test_hidden_states_output(self): def check_hidden_states_output(inputs_dict, config, model_class): model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) hidden_states = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states expected_num_layers = getattr( self.model_tester, "expected_num_hidden_layers", self.model_tester.num_hidden_layers + 1 ) self.assertEqual(len(hidden_states), expected_num_layers) # FLAVA has a different seq_length image_size = (self.model_tester.image_size, self.model_tester.image_size) patch_size = (self.model_tester.patch_size, self.model_tester.patch_size) num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) seq_length = num_patches + 1 self.assertListEqual( list(hidden_states[0].shape[-2:]), [seq_length, self.model_tester.hidden_size], ) config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: inputs_dict["output_hidden_states"] = True check_hidden_states_output(inputs_dict, config, model_class) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] config.output_hidden_states = True check_hidden_states_output(inputs_dict, config, model_class) @unittest.skip def test_training(self): pass @unittest.skip def test_training_gradient_checkpointing(self): pass @unittest.skip( reason="This architecture seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant(self): pass @unittest.skip( reason="This architecture seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant_false(self): pass @slow def test_model_from_pretrained(self): model_name = "facebook/flava-full" model = FlavaImageModel.from_pretrained(model_name) self.assertIsNotNone(model) class FlavaTextModelTester: def __init__( self, parent, batch_size=12, seq_length=7, is_training=True, use_input_mask=True, use_token_type_ids=True, vocab_size=102, type_vocab_size=2, max_position_embeddings=512, position_embedding_type="absolute", hidden_size=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.0, attention_probs_dropout_prob=0.0, initializer_range=0.02, layer_norm_eps=1e-12, pad_token_id=0, qkv_bias=True, ): self.parent = parent self.batch_size = batch_size self.is_training = is_training self.use_input_mask = use_input_mask self.use_token_type_ids = use_token_type_ids self.seq_length = seq_length self.vocab_size = vocab_size self.type_vocab_size = type_vocab_size self.max_position_embeddings = max_position_embeddings self.position_embedding_type = position_embedding_type self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.initializer_range = initializer_range self.layer_norm_eps = layer_norm_eps self.qkv_bias = qkv_bias self.pad_token_id = pad_token_id def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_mask = None if self.use_input_mask: input_mask = random_attention_mask([self.batch_size, self.seq_length]) if input_mask is not None: batch_size, seq_length = input_mask.shape rnd_start_indices = np.random.randint(1, seq_length - 1, size=(batch_size,)) for batch_idx, start_index in enumerate(rnd_start_indices): input_mask[batch_idx, :start_index] = 1 input_mask[batch_idx, start_index:] = 0 token_type_ids = None if self.use_token_type_ids: token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size) config = self.get_config() return config, input_ids, token_type_ids, input_mask def get_config(self): return FlavaTextConfig( vocab_size=self.vocab_size, type_vocab_size=self.type_vocab_size, max_position_embeddings=self.max_position_embeddings, position_embedding_type=self.position_embedding_type, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, initializer_range=self.initializer_range, layer_norm_eps=self.layer_norm_eps, pad_token_id=self.pad_token_id, qkv_bias=self.qkv_bias, ) def create_and_check_model(self, config, input_ids, token_type_ids, input_mask): model = FlavaTextModel(config=config) model.to(torch_device) model.eval() with torch.no_grad(): result = model(input_ids, token_type_ids=token_type_ids, attention_mask=input_mask) result = model(input_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, input_ids, token_type_ids, input_mask = config_and_inputs inputs_dict = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class FlavaTextModelTest(ModelTesterMixin, unittest.TestCase): all_model_classes = (FlavaTextModel,) if is_torch_available() else () test_pruning = False test_head_masking = False test_torchscript = False def setUp(self): self.model_tester = FlavaTextModelTester(self) self.config_tester = ConfigTester(self, config_class=FlavaTextConfig, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) @unittest.skip def test_training(self): pass @unittest.skip def test_training_gradient_checkpointing(self): pass @unittest.skip( reason="This architecture seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant(self): pass @unittest.skip( reason="This architecture seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant_false(self): pass @unittest.skip(reason="FLAVA does not use input_embeds") def test_inputs_embeds(self): # FLAVA does not use inputs_embeds pass @slow def test_model_from_pretrained(self): model_name = "facebook/flava-full" model = FlavaTextModel.from_pretrained(model_name) self.assertIsNotNone(model) class FlavaMultimodalModelTester: def __init__( self, parent, batch_size=12, seq_length=44, use_input_mask=True, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.0, attention_probs_dropout_prob=0.0, initializer_range=0.02, layer_norm_eps=1e-12, qkv_bias=True, ce_ignore_index=-100, use_cls_token=True, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.use_input_mask = use_input_mask self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.initializer_range = initializer_range self.layer_norm_eps = layer_norm_eps self.qkv_bias = qkv_bias self.ce_ignore_index = ce_ignore_index self.use_cls_token = use_cls_token def prepare_config_and_inputs(self): hidden_states = floats_tensor([self.batch_size, self.seq_length - 1, self.hidden_size]) input_mask = None if self.use_input_mask: input_mask = random_attention_mask([self.batch_size, self.seq_length]) if input_mask is not None: batch_size, seq_length = input_mask.shape rnd_start_indices = np.random.randint(1, seq_length - 1, size=(batch_size,)) for batch_idx, start_index in enumerate(rnd_start_indices): input_mask[batch_idx, :start_index] = 1 input_mask[batch_idx, start_index:] = 0 config = self.get_config() return config, hidden_states, input_mask def get_config(self): return FlavaMultimodalConfig( hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, initializer_range=self.initializer_range, layer_norm_eps=self.layer_norm_eps, qkv_bias=self.qkv_bias, use_cls_token=self.use_cls_token, ce_ignore_index=self.ce_ignore_index, ) def create_and_check_model(self, config, hidden_states, input_mask): model = FlavaMultimodalModel(config=config) model.to(torch_device) model.eval() with torch.no_grad(): result = model(hidden_states, attention_mask=input_mask) result = model(hidden_states) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, hidden_states, input_mask = config_and_inputs inputs_dict = {"hidden_states": hidden_states, "attention_mask": input_mask} return config, inputs_dict @require_torch class FlavaMultimodalModelTest(ModelTesterMixin, unittest.TestCase): all_model_classes = (FlavaMultimodalModel,) if is_torch_available() else () test_pruning = False test_head_masking = False test_resize_embeddings = False test_torchscript = False def setUp(self): self.model_tester = FlavaMultimodalModelTester(self) self.config_tester = ConfigTester( self, config_class=FlavaMultimodalConfig, has_text_modality=False, hidden_size=37 ) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) signature = inspect.signature(model.forward) # signature.parameters is an OrderedDict => so arg_names order is deterministic arg_names = [*signature.parameters.keys()] expected_arg_names = ["hidden_states"] self.assertListEqual(arg_names[:1], expected_arg_names) @unittest.skip("FLAVA does not have input embeddings") def test_model_get_set_embeddings(self): pass @unittest.skip def test_training(self): pass @unittest.skip def test_training_gradient_checkpointing(self): pass @unittest.skip( reason="This architecture seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant(self): pass @unittest.skip( reason="This architecture seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant_false(self): pass @unittest.skip(reason="FLAVA does not use input_embeds") def test_inputs_embeds(self): pass @slow def test_model_from_pretrained(self): model_name = "facebook/flava-full" model = FlavaMultimodalModel.from_pretrained(model_name) self.assertIsNotNone(model) class FlavaImageCodebookTester: def __init__( self, parent, batch_size=12, image_size=112, num_channels=3, hidden_size=32, num_groups=2, vocab_size=99, ): self.parent = parent self.batch_size = batch_size self.image_size = image_size self.num_channels = num_channels self.hidden_size = hidden_size self.num_groups = num_groups self.vocab_size = vocab_size def prepare_config_and_inputs(self): pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) config = self.get_config() return config, pixel_values def get_config(self): return FlavaImageCodebookConfig( hidden_size=self.hidden_size, num_groups=self.num_groups, vocab_size=self.vocab_size ) def create_and_check_model(self, config, pixel_values): model = FlavaImageCodebook(config=config) model.to(torch_device) model.eval() with torch.no_grad(): result = model(pixel_values) self.parent.assertEqual( result.shape, (self.batch_size, config.vocab_size, self.image_size // 8, self.image_size // 8) ) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, pixel_values = config_and_inputs inputs_dict = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class FlavaImageCodebookTest(ModelTesterMixin, unittest.TestCase): all_model_classes = (FlavaImageCodebook,) if is_torch_available() else () test_pruning = False test_head_masking = False test_resize_embeddings = False test_torchscript = False has_attentions = False def setUp(self): self.model_tester = FlavaImageCodebookTester(self) self.config_tester = ConfigTester(self, config_class=FlavaImageCodebookConfig, has_text_modality=False) def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) signature = inspect.signature(model.forward) # signature.parameters is an OrderedDict => so arg_names order is deterministic arg_names = [*signature.parameters.keys()] expected_arg_names = ["pixel_values"] self.assertListEqual(arg_names[:1], expected_arg_names) @unittest.skip(reason="Flava does not output attentions") def test_attention_outputs(self): pass @unittest.skip(reason="No embedding in multimodal model") def test_model_get_set_embeddings(self): pass @unittest.skip def test_training(self): pass @unittest.skip def test_hidden_states_output(self): pass @unittest.skip(reason="FlavaImageCodebook has no attentions") def test_retain_grad_hidden_states_attentions(self): pass @unittest.skip def test_training_gradient_checkpointing(self): pass @unittest.skip( reason="This architecture seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant(self): pass @unittest.skip( reason="This architecture seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant_false(self): pass @unittest.skip(reason="FLAVA does not use input_embeds") def test_inputs_embeds(self): pass @unittest.skip def test_model_outputs_equivalence(self): pass @slow def test_model_from_pretrained(self): model_name = "facebook/flava-full" model = FlavaImageCodebook.from_pretrained(model_name) self.assertIsNotNone(model) class FlavaModelTester: model_class = FlavaModel def __init__( self, parent, text_kwargs=None, image_kwargs=None, multimodal_kwargs=None, image_codebook_kwargs=None, is_training=True, hidden_size=32, projection_dim=32, initializer_range=0.02, layer_norm_eps=1e-12, ): if text_kwargs is None: text_kwargs = {} if image_kwargs is None: image_kwargs = {} if multimodal_kwargs is None: multimodal_kwargs = {} if image_codebook_kwargs is None: image_codebook_kwargs = {} self.parent = parent self.image_model_tester = FlavaImageModelTester(parent, **image_kwargs) self.text_model_tester = FlavaTextModelTester(parent, **text_kwargs) self.multimodal_model_tester = FlavaMultimodalModelTester(parent, **multimodal_kwargs) self.image_codebook_tester = FlavaImageCodebookTester(parent, **image_codebook_kwargs) self.is_training = is_training self.config_tester = ConfigTester(self, config_class=FlavaConfig, hidden_size=37) self.hidden_size = hidden_size self.projection_dim = projection_dim self.initializer_range = initializer_range self.layer_norm_eps = layer_norm_eps self.batch_size = self.text_model_tester.batch_size # need bs for batching_equivalence test def test_config(self): self.config_tester.run_common_tests() def prepare_config_and_inputs_for_common(self): _, pixel_values, bool_masked_pos = self.image_model_tester.prepare_config_and_inputs() _, input_ids, token_type_ids, attention_mask = self.text_model_tester.prepare_config_and_inputs() config = self.get_config() return config, { "input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask, "pixel_values": pixel_values, "bool_masked_pos": bool_masked_pos, } def get_config(self): return FlavaConfig.from_configs( self.image_model_tester.get_config(), self.text_model_tester.get_config(), self.multimodal_model_tester.get_config(), self.image_codebook_tester.get_config(), hidden_size=self.hidden_size, projection_dim=self.projection_dim, initializer_range=self.initializer_range, layer_norm_eps=self.layer_norm_eps, ) def create_and_check_model(self, config, inputs): self._test_model(config, inputs, test_image=True) self._test_model(config, inputs, test_text=True) self._test_model(config, inputs, test_image=True, test_text=True) def _test_model(self, config, inputs, test_image=False, test_text=False): model = self.model_class(config).to(torch_device).eval() with torch.no_grad(): result = model( input_ids=inputs["input_ids"] if test_text else None, attention_mask=inputs["attention_mask"] if test_text else None, token_type_ids=inputs["token_type_ids"] if test_text else None, pixel_values=inputs["pixel_values"] if test_image else None, bool_masked_pos=inputs["bool_masked_pos"] if test_image else None, ) image_size = (self.image_model_tester.image_size, self.image_model_tester.image_size) patch_size = (self.image_model_tester.patch_size, self.image_model_tester.patch_size) num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) if test_image: self.parent.assertEqual( result.image_embeddings.shape, (self.image_model_tester.batch_size, num_patches + 1, self.image_model_tester.hidden_size), ) else: self.parent.assertIsNone(result.image_embeddings) if test_text: self.parent.assertEqual( result.text_embeddings.shape, ( self.text_model_tester.batch_size, self.text_model_tester.seq_length, self.text_model_tester.hidden_size, ), ) else: self.parent.assertIsNone(result.text_embeddings) if test_image and test_text: self.parent.assertEqual( result.multimodal_embeddings.shape, ( self.multimodal_model_tester.batch_size, self.text_model_tester.seq_length + num_patches + 2, self.multimodal_model_tester.hidden_size, ), ) else: self.parent.assertIsNone(result.multimodal_embeddings) @require_torch class FlavaModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (FlavaModel,) if is_torch_available() else () pipeline_model_mapping = {"feature-extraction": FlavaModel} if is_torch_available() else {} class_for_tester = FlavaModelTester test_head_masking = False test_pruning = False test_resize_embeddings = False test_attention_outputs = False def setUp(self): self.model_tester = self.class_for_tester(self) common_properties = ["projection_dim", "logit_scale_init_value", "init_codebook"] self.config_tester = ConfigTester( self, config_class=FlavaConfig, has_text_modality=False, common_properties=common_properties ) def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_model(*config_and_inputs) def test_config(self): self.config_tester.run_common_tests() @unittest.skip(reason="tested in individual model tests") def test_hidden_states_output(self): pass @unittest.skip(reason="tested in individual model tests") def test_inputs_embeds(self): pass @unittest.skip(reason="tested in individual model tests") def test_retain_grad_hidden_states_attentions(self): pass @unittest.skip(reason="FlavaModel does not have input/output embeddings") def test_model_get_set_embeddings(self): pass # override as the `logit_scale` parameter initialization is different for FLAVA def test_initialization(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() configs_no_init = _config_zero_init(config) for model_class in self.all_model_classes: model = model_class(config=configs_no_init) for name, param in model.named_parameters(): if param.requires_grad: # check if `logit_scale` is initialized as per the original implementation if name == "logit_scale" or name == "flava.logit_scale": self.assertAlmostEqual( param.data.item(), np.log(1 / 0.07), delta=1e-3, msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) else: self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item(), [0.0, 1.0], msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) def _create_and_check_torchscript(self, config, inputs_dict): if not self.test_torchscript: self.skipTest(reason="test_torchscript is set to False") configs_no_init = _config_zero_init(config) # To be sure we have no Nan configs_no_init.torchscript = True configs_no_init.return_dict = False configs_no_init.return_loss = False for model_class in self.all_model_classes: model = model_class(config=configs_no_init) model.to(torch_device) model.eval() try: input_ids = inputs_dict["input_ids"] pixel_values = inputs_dict["pixel_values"] # FLAVA needs pixel_values if "input_ids_masked" in inputs_dict: # For pretraining inputs = (input_ids, inputs_dict["input_ids_masked"], pixel_values) else: inputs = (input_ids, pixel_values) traced_model = torch.jit.trace(model, inputs) except RuntimeError: self.fail("Couldn't trace module.") with tempfile.TemporaryDirectory() as tmp_dir_name: pt_file_name = os.path.join(tmp_dir_name, "traced_model.pt") try: torch.jit.save(traced_model, pt_file_name) except Exception: self.fail("Couldn't save module.") try: loaded_model = torch.jit.load(pt_file_name) except Exception: self.fail("Couldn't load module.") model.to(torch_device) model.eval() loaded_model.to(torch_device) loaded_model.eval() model_state_dict = model.state_dict() loaded_model_state_dict = loaded_model.state_dict() # Non persistent buffers won't be in original state dict loaded_model_state_dict.pop("text_model.embeddings.token_type_ids", None) non_persistent_buffers = {} for key in loaded_model_state_dict: if key not in model_state_dict: non_persistent_buffers[key] = loaded_model_state_dict[key] loaded_model_state_dict = { key: value for key, value in loaded_model_state_dict.items() if key not in non_persistent_buffers } self.assertEqual(set(model_state_dict.keys()), set(loaded_model_state_dict.keys())) model_buffers = list(model.buffers()) for non_persistent_buffer in non_persistent_buffers.values(): found_buffer = False for i, model_buffer in enumerate(model_buffers): if torch.equal(non_persistent_buffer, model_buffer): found_buffer = True break self.assertTrue(found_buffer) model_buffers.pop(i) models_equal = True for layer_name, p1 in model_state_dict.items(): p2 = loaded_model_state_dict[layer_name] if p1.data.ne(p2.data).sum() > 0: models_equal = False self.assertTrue(models_equal) def test_load_image_text_config(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() # Save FlavaConfig and check if we can load FlavaImageConfig from it with tempfile.TemporaryDirectory() as tmp_dir_name: config.save_pretrained(tmp_dir_name) image_config = FlavaImageConfig.from_pretrained(tmp_dir_name) self.assertDictEqual(config.image_config.to_dict(), image_config.to_dict()) # Save FlavaConfig and check if we can load FlavaTextConfig from it with tempfile.TemporaryDirectory() as tmp_dir_name: config.save_pretrained(tmp_dir_name) text_config = FlavaTextConfig.from_pretrained(tmp_dir_name) self.assertDictEqual(config.text_config.to_dict(), text_config.to_dict()) # Save FlavaConfig and check if we can load FlavaMultimodalConfig from it with tempfile.TemporaryDirectory() as tmp_dir_name: config.save_pretrained(tmp_dir_name) multimodal_config = FlavaMultimodalConfig.from_pretrained(tmp_dir_name) self.assertDictEqual(config.multimodal_config.to_dict(), multimodal_config.to_dict()) # overwrite from common since FlavaModel/TFFlavaModel return FLAVAOutput/TFFLAVAOutput @slow def test_model_from_pretrained(self): model_name = "facebook/flava-full" model = FlavaModel.from_pretrained(model_name) self.assertIsNotNone(model) class FlavaForPreTrainingTester(FlavaModelTester): model_class = FlavaForPreTraining def prepare_config_and_inputs_for_common(self): _, pixel_values, bool_masked_pos = self.image_model_tester.prepare_config_and_inputs() _, input_ids, token_type_ids, attention_mask = self.text_model_tester.prepare_config_and_inputs() config = self.get_config() input_ids_masked = input_ids.detach().clone() input_ids_masked[:, 1:3] = 100 mlm_labels = input_ids.detach().clone() mlm_labels[:, :] = config.ce_ignore_index mlm_labels[:, 1:3] = input_ids[:, 1:3] mim_labels = torch.randint( 0, self.image_model_tester.vocab_size, bool_masked_pos.size(), device=bool_masked_pos.device ).long() mim_labels[bool_masked_pos.ne(True)] = config.ce_ignore_index itm_labels = torch.ones(mlm_labels.size(0), device=bool_masked_pos.device).long() return config, { "input_ids": input_ids, "input_ids_masked": input_ids_masked, "token_type_ids": token_type_ids, "attention_mask": attention_mask, "pixel_values": pixel_values, "bool_masked_pos": bool_masked_pos, "mlm_labels": mlm_labels, "mim_labels": mim_labels, "itm_labels": itm_labels, "return_loss": True, } def _test_model(self, config, inputs, test_image=False, test_text=False): model = self.model_class(config).to(torch_device).eval() with torch.no_grad(): result = model( input_ids=inputs["input_ids"] if test_text else None, input_ids_masked=inputs["input_ids_masked"] if test_text else None, attention_mask=inputs["attention_mask"] if test_text else None, token_type_ids=inputs["token_type_ids"] if test_text else None, pixel_values=inputs["pixel_values"] if test_image else None, bool_masked_pos=inputs["bool_masked_pos"] if test_image else None, mlm_labels=inputs["mlm_labels"], mim_labels=inputs["mim_labels"], itm_labels=inputs["itm_labels"], return_loss=inputs["return_loss"], ) image_size = (self.image_model_tester.image_size, self.image_model_tester.image_size) patch_size = (self.image_model_tester.patch_size, self.image_model_tester.patch_size) num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) if test_image: self.parent.assertEqual( result.image_embeddings.shape, (self.image_model_tester.batch_size, num_patches + 1, self.image_model_tester.hidden_size), ) if not test_text: self.parent.assertEqual( result.loss_info.mim.dim(), 0, ) self.parent.assertEqual( result.mim_logits.shape, (inputs["bool_masked_pos"].sum().item(), self.image_model_tester.vocab_size), ) else: self.parent.assertIsNone(result.image_embeddings) if test_text: self.parent.assertEqual( result.text_embeddings.shape, ( self.text_model_tester.batch_size, self.text_model_tester.seq_length, self.text_model_tester.hidden_size, ), ) if not test_image: self.parent.assertEqual(result.loss_info.mlm.dim(), 0) self.parent.assertEqual( result.mlm_logits.shape, ( (inputs["mlm_labels"] != self.multimodal_model_tester.ce_ignore_index).sum().item(), self.text_model_tester.vocab_size, ), ) else: self.parent.assertIsNone(result.text_embeddings) if test_image and test_text: self.parent.assertEqual( result.multimodal_masked_embeddings.shape, ( self.multimodal_model_tester.batch_size, self.text_model_tester.seq_length + num_patches + 2, self.multimodal_model_tester.hidden_size, ), ) self.parent.assertEqual( result.itm_logits.shape, (self.text_model_tester.batch_size, 2), ) self.parent.assertEqual( result.mmm_text_logits.shape, ( (inputs["mlm_labels"] != self.multimodal_model_tester.ce_ignore_index).sum().item(), self.text_model_tester.vocab_size, ), ) self.parent.assertEqual( result.mmm_image_logits.shape, (inputs["bool_masked_pos"].sum().item(), self.image_model_tester.vocab_size), ) self.parent.assertEqual( result.contrastive_logits_per_image.shape, (self.image_model_tester.batch_size, self.text_model_tester.batch_size), ) self.parent.assertEqual( result.contrastive_logits_per_text.shape, (self.text_model_tester.batch_size, self.image_model_tester.batch_size), ) for item in [ result.loss_info.global_contrastive, result.loss_info.itm, result.loss_info.mmm_text, result.loss_info.mmm_image, ]: self.parent.assertEqual(item.dim(), 0) for item in [result.loss_info.mim, result.loss_info.mlm]: self.parent.assertIsNone(item) else: self.parent.assertIsNone(result.multimodal_masked_embeddings) for item in [ result.loss_info.global_contrastive, result.loss_info.itm, result.loss_info.mmm_text, result.loss_info.mmm_image, ]: self.parent.assertIsNone(item) self.parent.assertIsNone(result.multimodal_embeddings) @require_torch class FlavaForPreTrainingTest(FlavaModelTest): all_model_classes = (FlavaForPreTraining,) if is_torch_available() else () class_for_tester = FlavaForPreTrainingTester test_torchscript = False @unittest.skip( reason="This architecture seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing(self): pass @unittest.skip( reason="This architecture seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant(self): pass @unittest.skip( reason="This architecture seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant_false(self): pass # We will verify our results on an image of cute cats def prepare_img(): url = "http://images.cocodataset.org/val2017/000000039769.jpg" im = Image.open(requests.get(url, stream=True).raw) return im @require_vision @require_torch class FlavaModelIntegrationTest(unittest.TestCase): @slow def test_inference(self): model_name = "facebook/flava-full" model = FlavaModel.from_pretrained(model_name).to(torch_device) processor = FlavaProcessor.from_pretrained(model_name) image = prepare_img() inputs = processor( text=["a photo of a cat", "a photo of a dog"], images=[image, image], padding="max_length", max_length=77, return_tensors="pt", ).to(torch_device) # forward pass with torch.no_grad(): outputs = model(**inputs, return_dict=True) # verify the embeddings self.assertAlmostEqual(outputs.image_embeddings.sum().item(), -1352.53540, places=4) self.assertAlmostEqual(outputs.text_embeddings.sum().item(), -198.98225, places=4) self.assertAlmostEqual(outputs.multimodal_embeddings.sum().item(), -4030.4604492, places=4) @require_vision @require_torch class FlavaForPreTrainingIntegrationTest(unittest.TestCase): @slow def test_inference(self): model_name = "facebook/flava-full" model = FlavaForPreTraining.from_pretrained(model_name).to(torch_device) processor = FlavaProcessor.from_pretrained(model_name) torch.manual_seed(1) random.seed(1) image = prepare_img() inputs = processor( text=["a photo of a cat", "a photo of a dog"], images=[image, image], padding="max_length", max_length=77, return_tensors="pt", return_codebook_pixels=True, return_image_mask=True, ) # Create a clone of the input_ids tensor that will be its masked version inputs["input_ids_masked"] = inputs["input_ids"].clone() # Mask the tokens "a" & "cat" from the "a photo of a cat" text using the special 103 value inputs["input_ids_masked"][0, 4:6] = 103 # MLM labels. It is a cloned version of input_ids where all values are -100 (i.e., ignored) # except those that are masked, whose original values are stored inputs["mlm_labels"] = inputs["input_ids"].clone() inputs["mlm_labels"][:, :] = -100 inputs["mlm_labels"][0, 4:6] = inputs["input_ids"][0, 4:6] inputs = inputs.to(torch_device) # forward pass with torch.no_grad(): outputs = model(**inputs) # verify the logits self.assertEqual( outputs.contrastive_logits_per_image.shape, torch.Size((inputs.pixel_values.shape[0], inputs.input_ids.shape[0])), ) self.assertEqual( outputs.contrastive_logits_per_text.shape, torch.Size((inputs.input_ids.shape[0], inputs.pixel_values.shape[0])), ) expected_logits = torch.tensor([[16.1291, 8.4033], [16.1291, 8.4033]], device=torch_device) torch.testing.assert_close(outputs.contrastive_logits_per_image, expected_logits, rtol=1e-3, atol=1e-3) self.assertAlmostEqual(outputs.loss_info.mmm_text.item(), 2.0727925, places=4) self.assertAlmostEqual(outputs.loss_info.mmm_image.item(), 7.0282096, places=4) self.assertAlmostEqual(outputs.loss.item(), 11.3792324, places=4) @slow def test_inference_with_itm_labels(self): model_name = "facebook/flava-full" model = FlavaForPreTraining.from_pretrained(model_name).to(torch_device) processor = FlavaProcessor.from_pretrained(model_name) torch.manual_seed(1) random.seed(1) image = prepare_img() inputs = processor( text=["a photo of a cat", "a photo of a dog"], images=[image, image], padding="max_length", max_length=77, return_tensors="pt", return_codebook_pixels=True, return_image_mask=True, ) # Create a clone of the input_ids tensor that will be its masked version inputs["input_ids_masked"] = inputs["input_ids"].clone() # Mask the tokens "a" & "cat" from the "a photo of a cat" text using the special 103 value inputs["input_ids_masked"][0, 4:6] = 103 # MLM labels. It is a cloned version of input_ids where all values are -100 (i.e., ignored) # except those that are masked, whose original values are stored inputs["mlm_labels"] = inputs["input_ids"].clone() inputs["mlm_labels"][:, :] = -100 inputs["mlm_labels"][0, 4:6] = inputs["input_ids"][0, 4:6] # Manually create the itm_labels tensor that indicates if the image-text match. # In this case, the firs pair matches and the second does not inputs["itm_labels"] = torch.tensor([1, 0]) inputs = inputs.to(torch_device) # forward pass with torch.no_grad(): outputs = model(**inputs) # verify the logits self.assertEqual( outputs.contrastive_logits_per_image.shape, torch.Size((torch.count_nonzero(inputs["itm_labels"]).item(), inputs.input_ids.shape[0])), ) self.assertEqual( outputs.contrastive_logits_per_text.shape, torch.Size((torch.count_nonzero(inputs["itm_labels"]).item(), inputs.pixel_values.shape[0])), ) expected_logits = torch.tensor([[16.1291, 8.4033], [16.1291, 8.4033]], device=torch_device) torch.testing.assert_close(outputs.contrastive_logits_per_image, expected_logits, rtol=1e-3, atol=1e-3) self.assertAlmostEqual(outputs.loss_info.mmm_text.item(), 2.0727925, places=4) self.assertAlmostEqual(outputs.loss_info.mmm_image.item(), 6.8965902, places=4) self.assertAlmostEqual(outputs.loss.item(), 9.6084213, places=4)
transformers/tests/models/flava/test_modeling_flava.py/0
{ "file_path": "transformers/tests/models/flava/test_modeling_flava.py", "repo_id": "transformers", "token_count": 24848 }
575
# Copyright 2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import shutil import tempfile import unittest import numpy as np from parameterized import parameterized from transformers import GemmaTokenizerFast, SiglipImageProcessorFast, is_speech_available from transformers.testing_utils import require_sentencepiece, require_torch, require_torchaudio, require_vision from .test_feature_extraction_gemma3n import floats_list if is_speech_available(): from transformers.models.gemma3n import Gemma3nAudioFeatureExtractor, Gemma3nProcessor # TODO: omni-modal processor can't run tests from `ProcessorTesterMixin` @require_torch @require_torchaudio @require_vision @require_sentencepiece class Gemma3nProcessorTest(unittest.TestCase): def setUp(self): # TODO: update to google? self.model_id = "hf-internal-testing/namespace-google-repo_name-gemma-3n-E4B-it" self.tmpdirname = tempfile.mkdtemp(suffix="gemma3n") self.maxDiff = None def get_tokenizer(self, **kwargs): return GemmaTokenizerFast.from_pretrained(self.model_id, **kwargs) def get_feature_extractor(self, **kwargs): return Gemma3nAudioFeatureExtractor.from_pretrained(self.model_id, **kwargs) def get_image_processor(self, **kwargs): return SiglipImageProcessorFast.from_pretrained(self.model_id, **kwargs) def tearDown(self): shutil.rmtree(self.tmpdirname) def test_save_load_pretrained_default(self): # NOTE: feature_extractor and image_processor both use the same filename, preprocessor_config.json, when saved to # disk, but the files are overwritten by processor.save_pretrained(). This test does not attempt to address # this potential issue, and as such, does not guarantee content accuracy. tokenizer = self.get_tokenizer() feature_extractor = self.get_feature_extractor() image_processor = self.get_image_processor() processor = Gemma3nProcessor( tokenizer=tokenizer, feature_extractor=feature_extractor, image_processor=image_processor ) processor.save_pretrained(self.tmpdirname) processor = Gemma3nProcessor.from_pretrained(self.tmpdirname) self.assertIsInstance(processor.tokenizer, GemmaTokenizerFast) self.assertEqual(processor.tokenizer.get_vocab(), tokenizer.get_vocab()) # `disable_grouping` is a new attribute that got added on main while gemma3n was being released - so was # not part of the saved processor del processor.feature_extractor.disable_grouping self.assertIsInstance(processor.feature_extractor, Gemma3nAudioFeatureExtractor) self.assertEqual(processor.feature_extractor.to_json_string(), feature_extractor.to_json_string()) def test_save_load_pretrained_additional_features(self): tokenizer = self.get_tokenizer() feature_extractor = self.get_feature_extractor() image_processor = self.get_image_processor() processor = Gemma3nProcessor( tokenizer=tokenizer, feature_extractor=feature_extractor, image_processor=image_processor ) processor.save_pretrained(self.tmpdirname) tokenizer_add_kwargs = self.get_tokenizer(bos_token="(BOS-BOS)", eos_token="(EOS-EOS)") feature_extractor_add_kwargs = self.get_feature_extractor(dither=5.0, padding_value=1.0) processor = Gemma3nProcessor.from_pretrained( self.tmpdirname, bos_token="(BOS-BOS)", eos_token="(EOS-EOS)", dither=5.0, padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab(), tokenizer_add_kwargs.get_vocab()) self.assertIsInstance(processor.tokenizer, GemmaTokenizerFast) # `disable_grouping` is a new attribute that got added on main while gemma3n was being released - so was # not part of the saved processor del processor.feature_extractor.disable_grouping self.assertEqual(processor.feature_extractor.to_json_string(), feature_extractor_add_kwargs.to_json_string()) self.assertIsInstance(processor.feature_extractor, Gemma3nAudioFeatureExtractor) @parameterized.expand([256, 512, 768, 1024]) def test_image_processor(self, image_size: int): feature_extractor = self.get_feature_extractor() tokenizer = self.get_tokenizer() image_processor = self.get_image_processor() processor = Gemma3nProcessor( tokenizer=tokenizer, feature_extractor=feature_extractor, image_processor=image_processor ) raw_image = np.random.randint(0, 256, size=(image_size, image_size, 3), dtype=np.uint8) input_image_processor = image_processor(raw_image, return_tensors="pt") input_processor = processor(text="Describe:", images=raw_image, return_tensors="pt") for key in input_image_processor: self.assertAlmostEqual(input_image_processor[key].sum(), input_processor[key].sum(), delta=1e-2) if "pixel_values" in key: # NOTE: all images should be re-scaled to 768x768 self.assertEqual(input_image_processor[key].shape, (1, 3, 768, 768)) self.assertEqual(input_processor[key].shape, (1, 3, 768, 768)) def test_audio_feature_extractor(self): feature_extractor = self.get_feature_extractor() tokenizer = self.get_tokenizer() image_processor = self.get_image_processor() processor = Gemma3nProcessor( tokenizer=tokenizer, feature_extractor=feature_extractor, image_processor=image_processor ) raw_speech = floats_list((3, 1000)) input_feat_extract = feature_extractor(raw_speech, return_tensors="pt") input_processor = processor(text="Transcribe:", audio=raw_speech, return_tensors="pt") for key in input_feat_extract: self.assertAlmostEqual(input_feat_extract[key].sum(), input_processor[key].sum(), delta=1e-2) def test_tokenizer(self): feature_extractor = self.get_feature_extractor() tokenizer = self.get_tokenizer() image_processor = self.get_image_processor() processor = Gemma3nProcessor( tokenizer=tokenizer, feature_extractor=feature_extractor, image_processor=image_processor ) input_str = "This is a test string" encoded_processor = processor(text=input_str) encoded_tok = tokenizer(input_str) for key in encoded_tok: self.assertListEqual(encoded_tok[key], encoded_processor[key][0]) def test_tokenizer_decode(self): feature_extractor = self.get_feature_extractor() tokenizer = self.get_tokenizer() image_processor = self.get_image_processor() processor = Gemma3nProcessor( tokenizer=tokenizer, feature_extractor=feature_extractor, image_processor=image_processor ) predicted_ids = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] decoded_processor = processor.batch_decode(predicted_ids) decoded_tok = tokenizer.batch_decode(predicted_ids) self.assertListEqual(decoded_tok, decoded_processor)
transformers/tests/models/gemma3n/test_processing_gemma3n.py/0
{ "file_path": "transformers/tests/models/gemma3n/test_processing_gemma3n.py", "repo_id": "transformers", "token_count": 2913 }
576
# Copyright 2024 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Testing suite for the PyTorch Idefics2 model.""" import copy import tempfile import unittest from io import BytesIO import pytest import requests from transformers import ( AutoProcessor, Idefics2Config, Idefics2ForConditionalGeneration, Idefics2Model, is_torch_available, is_vision_available, ) from transformers.testing_utils import ( Expectations, cleanup, require_bitsandbytes, require_flash_attn, require_torch, require_torch_gpu, require_torch_multi_accelerator, slow, torch_device, ) from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor if is_torch_available(): import torch if is_vision_available(): from PIL import Image class Idefics2VisionText2TextModelTester: def __init__( self, parent, is_training=True, batch_size=2, num_images=2, seq_length=10, vision_config={ "image_size": 12, "patch_size": 12, "num_channels": 3, "hidden_size": 32, "num_hidden_layers": 2, "num_attention_heads": 4, "intermediate_size": 32, "dropout": 0.1, "attention_dropout": 0.1, "initializer_range": 0.02, }, perceiver_config={ "hidden_act": "silu", "resampler_n_latents": 2, "resampler_depth": 2, "resampler_n_heads": 2, "num_key_value_heads": 1, "resampler_head_dim": 12, "attention_dropout": 0.0, }, text_config={ "vocab_size": 100, "hidden_size": 64, "intermediate_size": 56, "num_hidden_layers": 3, "num_attention_heads": 2, "num_key_value_heads": 2, "hidden_act": "silu", "max_position_embeddings": 256, "initializer_range": 0.02, "rms_norm_eps": 1e-6, "pad_token_id": 0, # None in the original configuration_mistral, we set it to the unk_token_id "bos_token_id": 1, "eos_token_id": 2, "image_token_id": 99, "tie_word_embeddings": False, "rope_theta": 10000.0, "sliding_window": 32, "attention_dropout": 0.0, }, use_cache=False, tie_word_embeddings=False, image_token_id=99, ): self.parent = parent self.pad_token_id = text_config["pad_token_id"] self.is_training = is_training self.batch_size = batch_size self.num_images = num_images self.num_channels = 3 self.seq_length = seq_length self.use_cache = use_cache self.image_token_id = image_token_id self.tie_word_embeddings = tie_word_embeddings # Hack - add properties here so use common tests self.vocab_size = text_config["vocab_size"] self.num_hidden_layers = text_config["num_hidden_layers"] self.num_attention_heads = text_config["num_attention_heads"] self.hidden_size = text_config["hidden_size"] self.vision_config = vision_config self.perceiver_config = perceiver_config self.text_config = text_config def get_config(self): return Idefics2Config( use_cache=self.use_cache, image_token_id=self.image_token_id, tie_word_embeddings=self.tie_word_embeddings, vision_config=self.vision_config, perceiver_config=self.perceiver_config, text_config=self.text_config, vocab_size=self.vocab_size, ) def prepare_config_and_inputs(self): pixel_values = floats_tensor( [ self.batch_size, self.num_images, self.vision_config["num_channels"], self.vision_config["image_size"], self.vision_config["image_size"], ] ) config = self.get_config() return config, pixel_values def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, pixel_values = config_and_inputs input_ids = ids_tensor([self.batch_size, self.seq_length], config.text_config.vocab_size - 2) + 1 # For simplicity just set the last n tokens to the image token n_image_tokens_per_batch = self.num_images * self.perceiver_config["resampler_n_latents"] input_ids[input_ids == self.image_token_id] = self.pad_token_id input_ids[:, -n_image_tokens_per_batch:] = self.image_token_id attention_mask = input_ids.ne(1).to(torch_device) inputs_dict = { "pixel_values": pixel_values, "input_ids": input_ids, "attention_mask": attention_mask, } return config, inputs_dict @require_torch class Idefics2ModelTest(ModelTesterMixin, unittest.TestCase): """ Model tester for `Idefics2`. """ all_model_classes = (Idefics2Model,) if is_torch_available() else () fx_compatible = False test_torchscript = False test_pruning = False test_resize_embeddings = True test_head_masking = False _is_composite = True def setUp(self): self.model_tester = Idefics2VisionText2TextModelTester(self) self.config_tester = ConfigTester( self, config_class=Idefics2Config, has_text_modality=False, common_properties=["image_token_id"] ) def test_config(self): self.config_tester.run_common_tests() @unittest.skip(reason="input_embeds cannot be passed in without input_ids") def test_inputs_embeds(): pass @unittest.skip(reason="input_embeds cannot be passed in without input_ids") def test_inputs_embeds_matches_input_ids(self): pass @unittest.skip(reason="Model does not support padding right") def test_flash_attn_2_generate_padding_right(self): pass @unittest.skip(reason="Model does not support padding right") def test_flash_attn_2_inference_padding_right(self): pass # We need to override as we need to prepare such that the image token is the last token def test_resize_tokens_embeddings(self): (original_config, inputs_dict) = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: config = copy.deepcopy(original_config) model = model_class(config) model.to(torch_device) if self.model_tester.is_training is False: model.eval() model_vocab_size = config.text_config.vocab_size # Retrieve the embeddings and clone theme model_embed = model.resize_token_embeddings(model_vocab_size) cloned_embeddings = model_embed.weight.clone() # Check that resizing the token embeddings with a larger vocab size increases the model's vocab size model_embed = model.resize_token_embeddings(model_vocab_size + 10) self.assertEqual(model.config.text_config.vocab_size, model_vocab_size + 10) # Check that it actually resizes the embeddings matrix self.assertEqual(model_embed.weight.shape[0], cloned_embeddings.shape[0] + 10) # Check that the model can still do a forward pass successfully (every parameter should be resized) model(**self._prepare_for_class(inputs_dict, model_class)) # Check that resizing the token embeddings with a smaller vocab size decreases the model's vocab size model_embed = model.resize_token_embeddings(model_vocab_size - 15) self.assertEqual(model.config.text_config.vocab_size, model_vocab_size - 15) # Check that it actually resizes the embeddings matrix self.assertEqual(model_embed.weight.shape[0], cloned_embeddings.shape[0] - 15) # Ignore copy # Check that the model can still do a forward pass successfully (every parameter should be resized) # Input ids should be clamped to the maximum size of the vocabulary - 1 and the image token should be the last token inputs_dict["input_ids"].clamp_(max=model_vocab_size - 15 - 2) n_images = self.model_tester.num_images * self.model_tester.perceiver_config["resampler_n_latents"] model.image_token_id = model_vocab_size - 15 - 1 inputs_dict["input_ids"][:, -n_images:] = model.image_token_id # make sure that decoder_input_ids are resized as well if "decoder_input_ids" in inputs_dict: inputs_dict["decoder_input_ids"].clamp_(max=model_vocab_size - 15 - 1) model(**self._prepare_for_class(inputs_dict, model_class)) # Check that adding and removing tokens has not modified the first part of the embedding matrix. models_equal = True for p1, p2 in zip(cloned_embeddings, model_embed.weight): if p1.data.ne(p2.data).sum() > 0: models_equal = False self.assertTrue(models_equal) config = copy.deepcopy(original_config) model = model_class(config) model.to(torch_device) model_vocab_size = config.text_config.vocab_size model.resize_token_embeddings(model_vocab_size + 10, pad_to_multiple_of=1) self.assertTrue(model.config.text_config.vocab_size + 10, model_vocab_size) model_embed = model.resize_token_embeddings(model_vocab_size, pad_to_multiple_of=64) self.assertTrue(model_embed.weight.shape[0] // 64, 0) self.assertTrue(model_embed.weight.shape[0], model.config.text_config.vocab_size) self.assertTrue(model.config.text_config.vocab_size, model.vocab_size) model_embed = model.resize_token_embeddings(model_vocab_size + 13, pad_to_multiple_of=64) self.assertTrue(model_embed.weight.shape[0] // 64, 0) # Check that resizing a model to a multiple of pad_to_multiple leads to a model of exactly that size target_dimension = 128 model_embed = model.resize_token_embeddings(target_dimension, pad_to_multiple_of=64) self.assertTrue(model_embed.weight.shape[0], target_dimension) with self.assertRaisesRegex( ValueError, "Asking to pad the embedding matrix to a multiple of `1.3`, which is not and integer. Please make sure to pass an integer", ): model.resize_token_embeddings(model_vocab_size, pad_to_multiple_of=1.3) # We need to override as we need to prepare such that the image token is the last token def test_resize_embeddings_untied(self): (original_config, inputs_dict) = self.model_tester.prepare_config_and_inputs_for_common() original_config.tie_word_embeddings = False for model_class in self.all_model_classes: config = copy.deepcopy(original_config) model = model_class(config).to(torch_device) # if no output embeddings -> leave test if model.get_output_embeddings() is None: continue # Check that resizing the token embeddings with a larger vocab size increases the model's vocab size model_vocab_size = config.text_config.vocab_size model.resize_token_embeddings(model_vocab_size + 10) self.assertEqual(model.config.text_config.vocab_size, model_vocab_size + 10) output_embeds = model.get_output_embeddings() self.assertEqual(output_embeds.weight.shape[0], model_vocab_size + 10) # Check bias if present if output_embeds.bias is not None: self.assertEqual(output_embeds.bias.shape[0], model_vocab_size + 10) # Check that the model can still do a forward pass successfully (every parameter should be resized) model(**self._prepare_for_class(inputs_dict, model_class)) # Check that resizing the token embeddings with a smaller vocab size decreases the model's vocab size model.resize_token_embeddings(model_vocab_size - 15) self.assertEqual(model.config.text_config.vocab_size, model_vocab_size - 15) # Check that it actually resizes the embeddings matrix output_embeds = model.get_output_embeddings() self.assertEqual(output_embeds.weight.shape[0], model_vocab_size - 15) # Check bias if present if output_embeds.bias is not None: self.assertEqual(output_embeds.bias.shape[0], model_vocab_size - 15) # Check that the model can still do a forward pass successfully (every parameter should be resized) # Input ids should be clamped to the maximum size of the vocabulary - 1 and the image token should be the last token inputs_dict["input_ids"].clamp_(max=model_vocab_size - 15 - 2) n_images = self.model_tester.num_images * self.model_tester.perceiver_config["resampler_n_latents"] model.image_token_id = model_vocab_size - 15 - 1 inputs_dict["input_ids"][:, -n_images:] = model.image_token_id # Check that the model can still do a forward pass successfully (every parameter should be resized) model(**self._prepare_for_class(inputs_dict, model_class)) def test_sdpa_can_dispatch_composite_models(self): for model_class in self.all_model_classes: config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() model = model_class(config) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) model_sdpa = model_class.from_pretrained(tmpdirname) model_sdpa = model_sdpa.eval().to(torch_device) self.assertTrue(model_sdpa.config._attn_implementation == "sdpa") self.assertTrue(model_sdpa.vision_model.config._attn_implementation == "sdpa") self.assertTrue(model_sdpa.connector.perceiver_resampler.config._attn_implementation == "sdpa") model_eager = model_class.from_pretrained(tmpdirname, attn_implementation="eager") model_eager = model_eager.eval().to(torch_device) self.assertTrue(model_eager.config._attn_implementation == "eager") self.assertTrue(model_eager.vision_model.config._attn_implementation == "eager") self.assertTrue(model_eager.connector.perceiver_resampler.config._attn_implementation == "eager") for name, submodule in model_eager.named_modules(): class_name = submodule.__class__.__name__ if "SdpaAttention" in class_name or "SdpaSelfAttention" in class_name: raise ValueError("The eager model should not have SDPA attention layers") @require_torch class Idefics2ForConditionalGenerationModelTest(GenerationTesterMixin, ModelTesterMixin, unittest.TestCase): """ Model tester for `Idefics2ForConditionalGeneration`. """ all_model_classes = (Idefics2ForConditionalGeneration,) if is_torch_available() else () pipeline_model_mapping = {"image-text-to-text": Idefics2ForConditionalGeneration} if is_torch_available() else () fx_compatible = False test_pruning = False test_resize_embeddings = True test_head_masking = False test_torchscript = False def setUp(self): self.model_tester = Idefics2VisionText2TextModelTester(self) self.config_tester = ConfigTester(self, config_class=Idefics2Config, has_text_modality=False) @unittest.skip(reason="input_embeds cannot be passed in without input_ids") def test_inputs_embeds(): pass @unittest.skip(reason="Model does not support padding right") def test_flash_attn_2_generate_padding_right(self): pass @unittest.skip(reason="Model does not support padding right") def test_flash_attn_2_inference_padding_right(self): pass @unittest.skip(reason="Contrastive search is not implemented for VLMs that do cross-attn") def test_contrastive_generate(self): pass @unittest.skip(reason="Contrastive search is not implemented for VLMs that do cross-attn") def test_contrastive_generate_dict_outputs_use_cache(self): pass @unittest.skip(reason="Contrastive search is not implemented for VLMs that do cross-attn") def test_contrastive_generate_low_memory(self): pass @unittest.skip( reason="Prompt lookup decoding needs a way to indicate `bad_word_ids` that should not be suggested as candidates" ) def test_prompt_lookup_decoding_matches_greedy_search(self): pass @pytest.mark.generate @slow @unittest.skip( reason="Idefics2 doesn't support SDPA for all backbones, vision backbones has only eager/FA2 attention" ) def test_eager_matches_sdpa_generate(self): pass # We need to override as we need to prepare such that the image token is the last token def test_resize_tokens_embeddings(self): (original_config, inputs_dict) = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: config = copy.deepcopy(original_config) model = model_class(config) model.to(torch_device) model_vocab_size = config.text_config.vocab_size # Retrieve the embeddings and clone theme model_embed = model.resize_token_embeddings(model_vocab_size) cloned_embeddings = model_embed.weight.clone() # Check that resizing the token embeddings with a larger vocab size increases the model's vocab size model_embed = model.resize_token_embeddings(model_vocab_size + 10) self.assertEqual(model.config.text_config.vocab_size, model_vocab_size + 10) # Check that it actually resizes the embeddings matrix self.assertEqual(model_embed.weight.shape[0], cloned_embeddings.shape[0] + 10) # Check that the model can still do a forward pass successfully (every parameter should be resized) model(**self._prepare_for_class(inputs_dict, model_class)) # Check that resizing the token embeddings with a smaller vocab size decreases the model's vocab size model_embed = model.resize_token_embeddings(model_vocab_size - 15) self.assertEqual(model.config.text_config.vocab_size, model_vocab_size - 15) # Check that it actually resizes the embeddings matrix self.assertEqual(model_embed.weight.shape[0], cloned_embeddings.shape[0] - 15) # Check that the model can still do a forward pass successfully (every parameter should be resized) # Input ids should be clamped to the maximum size of the vocabulary - 1 and the image token should be the last token inputs_dict["input_ids"].clamp_(max=model_vocab_size - 15 - 2) n_images = self.model_tester.num_images * self.model_tester.perceiver_config["resampler_n_latents"] model.model.image_token_id = model_vocab_size - 15 - 1 inputs_dict["input_ids"][:, -n_images:] = model.model.image_token_id model(**self._prepare_for_class(inputs_dict, model_class)) # Check that adding and removing tokens has not modified the first part of the embedding matrix. models_equal = True for p1, p2 in zip(cloned_embeddings, model_embed.weight): if p1.data.ne(p2.data).sum() > 0: models_equal = False self.assertTrue(models_equal) config = copy.deepcopy(original_config) model = model_class(config) model.to(torch_device) model_vocab_size = config.text_config.vocab_size model.resize_token_embeddings(model_vocab_size + 10, pad_to_multiple_of=1) self.assertTrue(model.config.text_config.vocab_size + 10, model_vocab_size) model_embed = model.resize_token_embeddings(model_vocab_size, pad_to_multiple_of=64) self.assertTrue(model_embed.weight.shape[0] // 64, 0) self.assertTrue(model_embed.weight.shape[0], model.config.text_config.vocab_size) self.assertTrue(model.config.text_config.vocab_size, model.vocab_size) model_embed = model.resize_token_embeddings(model_vocab_size + 13, pad_to_multiple_of=64) self.assertTrue(model_embed.weight.shape[0] // 64, 0) # Check that resizing a model to a multiple of pad_to_multiple leads to a model of exactly that size target_dimension = 128 model_embed = model.resize_token_embeddings(target_dimension, pad_to_multiple_of=64) self.assertTrue(model_embed.weight.shape[0], target_dimension) with self.assertRaisesRegex( ValueError, "Asking to pad the embedding matrix to a multiple of `1.3`, which is not and integer. Please make sure to pass an integer", ): model.resize_token_embeddings(model_vocab_size, pad_to_multiple_of=1.3) # We need to override as we need to prepare such that the image token is the last token def test_resize_embeddings_untied(self): (original_config, inputs_dict) = self.model_tester.prepare_config_and_inputs_for_common() original_config.tie_word_embeddings = False for model_class in self.all_model_classes: config = copy.deepcopy(original_config) model = model_class(config).to(torch_device) # Check that resizing the token embeddings with a larger vocab size increases the model's vocab size model_vocab_size = config.text_config.vocab_size model.resize_token_embeddings(model_vocab_size + 10) self.assertEqual(model.config.text_config.vocab_size, model_vocab_size + 10) output_embeds = model.get_output_embeddings() self.assertEqual(output_embeds.weight.shape[0], model_vocab_size + 10) # Check bias if present if output_embeds.bias is not None: self.assertEqual(output_embeds.bias.shape[0], model_vocab_size + 10) # Check that the model can still do a forward pass successfully (every parameter should be resized) model(**self._prepare_for_class(inputs_dict, model_class)) # Check that resizing the token embeddings with a smaller vocab size decreases the model's vocab size model.resize_token_embeddings(model_vocab_size - 15) self.assertEqual(model.config.text_config.vocab_size, model_vocab_size - 15) # Check that it actually resizes the embeddings matrix output_embeds = model.get_output_embeddings() self.assertEqual(output_embeds.weight.shape[0], model_vocab_size - 15) # Check bias if present if output_embeds.bias is not None: self.assertEqual(output_embeds.bias.shape[0], model_vocab_size - 15) # Check that the model can still do a forward pass successfully (every parameter should be resized) # Input ids should be clamped to the maximum size of the vocabulary - 1 and the image token should be the last token inputs_dict["input_ids"].clamp_(max=model_vocab_size - 15 - 2) n_images = self.model_tester.num_images * self.model_tester.perceiver_config["resampler_n_latents"] model.model.image_token_id = model_vocab_size - 15 - 1 inputs_dict["input_ids"][:, -n_images:] = model.model.image_token_id # Check that the model can still do a forward pass successfully (every parameter should be resized) model(**self._prepare_for_class(inputs_dict, model_class)) def test_inputs_embeds_matches_input_ids_with_generate(self): # overwrite because IDEFICS needs ids and embeds at the input to be not None config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) model.to(torch_device) model.eval() inputs = copy.deepcopy(self._prepare_for_class(inputs_dict, model_class)) pad_token_id = config.pad_token_id if config.pad_token_id is not None else 1 wte = model.get_input_embeddings() input_ids = inputs["input_ids"] # some models infer position ids/attn mask differently when input ids # by check if pad_token let's make sure no padding is in input ids not_pad_token_id = pad_token_id + 1 if max(0, pad_token_id - 1) == 0 else pad_token_id - 1 input_ids[input_ids == pad_token_id] = not_pad_token_id del inputs["input_ids"] inputs_embeds = wte(input_ids) out_ids = model.generate(input_ids=input_ids, **inputs, max_new_tokens=2) out_embeds = model.generate(input_ids=input_ids, inputs_embeds=inputs_embeds, **inputs, max_new_tokens=2) torch.testing.assert_close(out_embeds, out_ids) @require_torch class Idefics2ForConditionalGenerationIntegrationTest(unittest.TestCase): def setUp(self): self.processor = AutoProcessor.from_pretrained("HuggingFaceM4/idefics2-8b-base") self.image1 = Image.open( BytesIO( requests.get( "https://cdn.britannica.com/61/93061-050-99147DCE/Statue-of-Liberty-Island-New-York-Bay.jpg" ).content ) ) self.image2 = Image.open( BytesIO(requests.get("https://cdn.britannica.com/59/94459-050-DBA42467/Skyline-Chicago.jpg").content) ) self.image3 = Image.open( BytesIO( requests.get( "https://thumbs.dreamstime.com/b/golden-gate-bridge-san-francisco-purple-flowers-california-echium-candicans-36805947.jpg" ).content ) ) def tearDown(self): cleanup(torch_device, gc_collect=True) @slow @require_torch_multi_accelerator def test_integration_test(self): model = Idefics2ForConditionalGeneration.from_pretrained( "HuggingFaceM4/idefics2-8b-base", dtype=torch.bfloat16, device_map="auto", ) # Create inputs text = "<image>In this image, we see" images = self.image1 inputs = self.processor(text=text, images=images, return_tensors="pt", padding=True) inputs.to(torch_device) generated_ids = model.generate(**inputs, max_new_tokens=10) generated_texts = self.processor.batch_decode(generated_ids, skip_special_tokens=True) # Batch affects generated text. Single batch output: ['In this image, we see the Statue of Liberty in the foreground and'] expected_generated_text = "In this image, we see the Statue of Liberty, the New York City" self.assertEqual(generated_texts[0], expected_generated_text) @slow @require_bitsandbytes def test_integration_test_4bit(self): # Let' s make sure we test the preprocessing to replace what is used model = Idefics2ForConditionalGeneration.from_pretrained( "HuggingFaceM4/idefics2-8b-base", load_in_4bit=True, ) # Create pixel inputs text = ["<image>In this image, we see", "bla, bla <image><image>"] images = [[self.image1], [self.image2, self.image3]] inputs = self.processor(text=text, images=images, padding=True, return_tensors="pt").to(torch_device) generated_ids = model.generate(**inputs, max_new_tokens=10) generated_texts = self.processor.batch_decode(generated_ids, skip_special_tokens=True) expected_generated_texts = Expectations( { ("xpu", 3): "In this image, we see the Statue of Liberty, the Hudson River,", ("cuda", None): "In this image, we see the Statue of Liberty, the Hudson River,", ("rocm", (9, 5)): "In this image, we see the Statue of Liberty, the New York City", } ) EXPECTED_GENERATED_TEXT = expected_generated_texts.get_expectation() self.assertEqual(generated_texts[0], EXPECTED_GENERATED_TEXT) @slow @require_bitsandbytes def test_integration_test_4bit_batch2(self): # Let' s make sure we test the preprocessing to replace what is used model = Idefics2ForConditionalGeneration.from_pretrained( "HuggingFaceM4/idefics2-8b-base", load_in_4bit=True, ) from datasets import load_dataset dataset = load_dataset("nielsr/docvqa_1200_examples", split="test") text = [f"<image>{dataset[40]['query']['en']}", f"<image>{dataset[41]['query']['en']}"] images = [[dataset[40]["image"]], [dataset[41]["image"]]] inputs = self.processor(text=text, images=images, padding=True, return_tensors="pt").to(torch_device) generated_ids = model.generate(**inputs, max_new_tokens=64) batched_generated_texts = self.processor.batch_decode(generated_ids, skip_special_tokens=True) text = f"<image>{dataset[40]['query']['en']}" images = dataset[40]["image"] inputs = self.processor(text=text, images=images, padding=True, return_tensors="pt").to(torch_device) generated_ids = model.generate(**inputs, max_new_tokens=64) generated_text_0 = self.processor.batch_decode(generated_ids, skip_special_tokens=True) text = f"<image>{dataset[41]['query']['en']}" images = dataset[41]["image"] inputs = self.processor(text=text, images=images, padding=True, return_tensors="pt").to(torch_device) generated_ids = model.generate(**inputs, max_new_tokens=64) generated_text_1 = self.processor.batch_decode(generated_ids, skip_special_tokens=True) self.assertEqual(batched_generated_texts[0], generated_text_0[0]) self.assertEqual(batched_generated_texts[1], generated_text_1[0]) @require_flash_attn @require_torch_gpu @require_bitsandbytes def test_flash_attn_2_eager_equivalence(self): # Create inputs text = "<image>In this image, we see" images = self.image1 inputs = self.processor(text=text, images=images, return_tensors="pt", padding=True) inputs.to(torch_device) # Eager model model_eager = Idefics2ForConditionalGeneration.from_pretrained( "HuggingFaceM4/idefics2-8b-base", attn_implementation="eager", load_in_4bit=True, ) generated_ids_eager = model_eager.generate(**inputs, max_new_tokens=10) generated_texts_eager = self.processor.batch_decode(generated_ids_eager, skip_special_tokens=True) del model_eager # Flash Attention 2 model model_flash_attention_2 = Idefics2ForConditionalGeneration.from_pretrained( "HuggingFaceM4/idefics2-8b-base", attn_implementation="flash_attention_2", load_in_4bit=True, ) generated_ids_flash_attention_2 = model_flash_attention_2.generate(**inputs, max_new_tokens=10) generated_texts_flash_attention_2 = self.processor.batch_decode( generated_ids_flash_attention_2, skip_special_tokens=True ) self.assertEqual(generated_texts_eager[0], generated_texts_flash_attention_2[0])
transformers/tests/models/idefics2/test_modeling_idefics2.py/0
{ "file_path": "transformers/tests/models/idefics2/test_modeling_idefics2.py", "repo_id": "transformers", "token_count": 14062 }
577
# Copyright 2022 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Testing suite for the PyTorch LLaMA model.""" import unittest import pytest from transformers import is_torch_available from transformers.testing_utils import ( require_read_token, require_torch, require_torch_accelerator, slow, ) from ...causal_lm_tester import CausalLMModelTest, CausalLMModelTester if is_torch_available(): from transformers import Lfm2Config, Lfm2ForCausalLM, Lfm2Model class Lfm2ModelTester(CausalLMModelTester): if is_torch_available(): config_class = Lfm2Config base_model_class = Lfm2Model causal_lm_class = Lfm2ForCausalLM def __init__( self, parent, layer_types=["full_attention", "conv"], ): super().__init__(parent) self.layer_types = layer_types @require_torch class Lfm2ModelTest(CausalLMModelTest, unittest.TestCase): all_model_classes = (Lfm2Model, Lfm2ForCausalLM) if is_torch_available() else () pipeline_model_mapping = ( { "feature-extraction": Lfm2Model, "text-generation": Lfm2ForCausalLM, } if is_torch_available() else {} ) test_headmasking = False test_pruning = False fx_compatible = False model_tester_class = Lfm2ModelTester # used in `test_torch_compile_for_training` _torch_compile_train_cls = Lfm2ForCausalLM if is_torch_available() else None @unittest.skip( "Lfm2 alternates between attention and conv layers, so attention are only returned for attention layers" ) def test_attention_outputs(self): pass @unittest.skip("Lfm2 has a special cache format as it alternates between attention and conv layers") def test_past_key_values_format(self): pass @unittest.skip("Lfm2 has a special cache format which is not compatible with contrastive search") def test_contrastive_generate(self): pass @unittest.skip("Lfm2 has a special cache format which is not compatible with contrastive search") def test_contrastive_generate_dict_outputs_use_cache(self): pass @unittest.skip("Lfm2 has a special cache format which is not compatible with contrastive search") def test_contrastive_generate_low_memory(self): pass @unittest.skip( "Lfm2 has a special cache format which is not compatible with compile as it has static address for conv cache" ) @pytest.mark.torch_compile_test def test_sdpa_can_compile_dynamic(self): pass @require_torch_accelerator @require_read_token @slow class Lfm2IntegrationTest(unittest.TestCase): pass
transformers/tests/models/lfm2/test_modeling_lfm2.py/0
{ "file_path": "transformers/tests/models/lfm2/test_modeling_lfm2.py", "repo_id": "transformers", "token_count": 1183 }
578
# Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from transformers import LongformerConfig, is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( LongformerForMaskedLM, LongformerForMultipleChoice, LongformerForQuestionAnswering, LongformerForSequenceClassification, LongformerForTokenClassification, LongformerModel, ) from transformers.models.longformer.modeling_longformer import LongformerSelfAttention class LongformerModelTester: def __init__( self, parent, batch_size=13, seq_length=7, is_training=True, use_input_mask=True, use_token_type_ids=True, use_labels=True, vocab_size=99, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=16, type_sequence_label_size=2, initializer_range=0.02, num_labels=3, num_choices=4, scope=None, attention_window=4, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_input_mask = use_input_mask self.use_token_type_ids = use_token_type_ids self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.type_sequence_label_size = type_sequence_label_size self.initializer_range = initializer_range self.num_labels = num_labels self.num_choices = num_choices self.scope = scope self.attention_window = attention_window # `ModelTesterMixin.test_attention_outputs` is expecting attention tensors to be of size # [num_attention_heads, encoder_seq_length, encoder_key_length], but LongformerSelfAttention # returns attention of shape [num_attention_heads, encoder_seq_length, self.attention_window + 1] # because its local attention only attends to `self.attention_window + 1` locations # (assuming no token with global attention, otherwise the last dimension of attentions # is x + self.attention_window + 1, where x is the number of tokens with global attention) self.key_length = self.attention_window + 2 def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_mask = None if self.use_input_mask: input_mask = random_attention_mask([self.batch_size, self.seq_length]) token_type_ids = None if self.use_token_type_ids: token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size) sequence_labels = None token_labels = None choice_labels = None if self.use_labels: sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size) token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels) choice_labels = ids_tensor([self.batch_size], self.num_choices) config = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def get_config(self): return LongformerConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, initializer_range=self.initializer_range, attention_window=self.attention_window, ) def get_pipeline_config(self): config = self.get_config() config.vocab_size = 300 return config def create_and_check_attention_mask_determinism( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = LongformerModel(config=config) model.to(torch_device) model.eval() attention_mask = torch.ones(input_ids.shape, dtype=torch.long, device=torch_device) output_with_mask = model(input_ids, attention_mask=attention_mask)["last_hidden_state"] output_without_mask = model(input_ids)["last_hidden_state"] self.parent.assertTrue(torch.allclose(output_with_mask[0, 0, :5], output_without_mask[0, 0, :5], atol=1e-4)) def create_and_check_model( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = LongformerModel(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids) result = model(input_ids, token_type_ids=token_type_ids) result = model(input_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size)) def create_and_check_model_with_global_attention_mask( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = LongformerModel(config=config) model.to(torch_device) model.eval() global_attention_mask = input_mask.clone() global_attention_mask[:, input_mask.shape[-1] // 2] = 0 global_attention_mask = global_attention_mask.to(torch_device) result = model( input_ids, attention_mask=input_mask, global_attention_mask=global_attention_mask, token_type_ids=token_type_ids, ) result = model(input_ids, token_type_ids=token_type_ids, global_attention_mask=global_attention_mask) result = model(input_ids, global_attention_mask=global_attention_mask) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size)) def create_and_check_for_masked_lm( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = LongformerForMaskedLM(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) def create_and_check_for_question_answering( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = LongformerForQuestionAnswering(config=config) model.to(torch_device) model.eval() result = model( input_ids, attention_mask=input_mask, global_attention_mask=input_mask, token_type_ids=token_type_ids, start_positions=sequence_labels, end_positions=sequence_labels, ) self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length)) self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length)) def create_and_check_for_sequence_classification( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.num_labels = self.num_labels model = LongformerForSequenceClassification(config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=sequence_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels)) def create_and_check_for_token_classification( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.num_labels = self.num_labels model = LongformerForTokenClassification(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels)) def create_and_check_for_multiple_choice( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.num_choices = self.num_choices model = LongformerForMultipleChoice(config=config) model.to(torch_device) model.eval() multiple_choice_inputs_ids = input_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous() multiple_choice_token_type_ids = token_type_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous() multiple_choice_input_mask = input_mask.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous() multiple_choice_input_mask = input_mask.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous() result = model( multiple_choice_inputs_ids, attention_mask=multiple_choice_input_mask, global_attention_mask=multiple_choice_input_mask, token_type_ids=multiple_choice_token_type_ids, labels=choice_labels, ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ) = config_and_inputs global_attention_mask = torch.zeros_like(input_ids) global_attention_mask[:, -1] = 1 inputs_dict = { "input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask, "global_attention_mask": global_attention_mask, } return config, inputs_dict def prepare_config_and_inputs_for_question_answering(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ) = config_and_inputs # Replace sep_token_id by some random id input_ids[input_ids == config.sep_token_id] = torch.randint(0, config.vocab_size, (1,)).item() # Make sure there are exactly three sep_token_id input_ids[:, -3:] = config.sep_token_id input_mask = torch.ones_like(input_ids) return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels @require_torch class LongformerModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): test_pruning = False # pruning is not supported test_torchscript = False all_model_classes = ( ( LongformerModel, LongformerForMaskedLM, LongformerForSequenceClassification, LongformerForQuestionAnswering, LongformerForTokenClassification, LongformerForMultipleChoice, ) if is_torch_available() else () ) pipeline_model_mapping = ( { "feature-extraction": LongformerModel, "fill-mask": LongformerForMaskedLM, "question-answering": LongformerForQuestionAnswering, "text-classification": LongformerForSequenceClassification, "token-classification": LongformerForTokenClassification, "zero-shot": LongformerForSequenceClassification, } if is_torch_available() else {} ) # Need to use `0.6` instead of `0.5` for `test_disk_offload` model_split_percents = [0.6, 0.7, 0.9] # TODO: Fix the failed tests def is_pipeline_test_to_skip( self, pipeline_test_case_name, config_class, model_architecture, tokenizer_name, image_processor_name, feature_extractor_name, processor_name, ): if ( pipeline_test_case_name == "QAPipelineTests" and tokenizer_name is not None and not tokenizer_name.endswith("Fast") ): # `QAPipelineTests` fails for a few models when the slower tokenizer are used. # (The slower tokenizers were never used for pipeline tests before the pipeline testing rework) # TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer return True return False def setUp(self): self.model_tester = LongformerModelTester(self) self.config_tester = ConfigTester(self, config_class=LongformerConfig, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_model_attention_mask_determinism(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_attention_mask_determinism(*config_and_inputs) def test_model_global_attention_mask(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model_with_global_attention_mask(*config_and_inputs) def test_for_masked_lm(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*config_and_inputs) def test_for_question_answering(self): config_and_inputs = self.model_tester.prepare_config_and_inputs_for_question_answering() self.model_tester.create_and_check_for_question_answering(*config_and_inputs) def test_for_sequence_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*config_and_inputs) def test_for_token_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*config_and_inputs) def test_for_multiple_choice(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*config_and_inputs) @unittest.skip(reason="Longformer cannot keep gradients in attention or hidden states") def test_retain_grad_hidden_states_attentions(self): return @unittest.skip(reason="LongFormer calculates global attn only when attn_mask has non-zero elements") def test_batching_equivalence(self): return @require_torch @require_sentencepiece @require_tokenizers class LongformerModelIntegrationTest(unittest.TestCase): def _get_hidden_states(self): return torch.tensor( [ [ [ 4.98332758e-01, 2.69175139e00, -7.08081422e-03, 1.04915401e00, -1.83476661e00, 7.67220476e-01, 2.98580543e-01, 2.84803992e-02, ], [ -7.58357372e-01, 4.20635998e-01, -4.04739919e-02, 1.59924145e-01, 2.05135748e00, -1.15997978e00, 5.37166397e-01, 2.62873606e-01, ], [ -1.69438001e00, 4.17574660e-01, -1.49196962e00, -1.76483717e00, -1.94566312e-01, -1.71183858e00, 7.72903565e-01, -1.11557056e00, ], [ 5.44028163e-01, 2.05466114e-01, -3.63045868e-01, 2.41865062e-01, 3.20348382e-01, -9.05611176e-01, -1.92690727e-01, -1.19917547e00, ], ] ], dtype=torch.float32, device=torch_device, ) def test_diagonalize(self): hidden_states = self._get_hidden_states() hidden_states = hidden_states.reshape((1, 8, 4)) # set seq length = 8, hidden dim = 4 chunked_hidden_states = LongformerSelfAttention._chunk(hidden_states, window_overlap=2) window_overlap_size = chunked_hidden_states.shape[2] self.assertTrue(window_overlap_size == 4) padded_hidden_states = LongformerSelfAttention._pad_and_diagonalize(chunked_hidden_states) self.assertTrue(padded_hidden_states.shape[-1] == chunked_hidden_states.shape[-1] + window_overlap_size - 1) # first row => [0.4983, 2.6918, -0.0071, 1.0492, 0.0000, 0.0000, 0.0000] torch.testing.assert_close( padded_hidden_states[0, 0, 0, :4], chunked_hidden_states[0, 0, 0], rtol=1e-3, atol=1e-3 ) self.assertTrue( torch.allclose( padded_hidden_states[0, 0, 0, 4:], torch.zeros((3,), device=torch_device, dtype=torch.float32), atol=1e-3, ) ) # last row => [0.0000, 0.0000, 0.0000, 2.0514, -1.1600, 0.5372, 0.2629] torch.testing.assert_close( padded_hidden_states[0, 0, -1, 3:], chunked_hidden_states[0, 0, -1], rtol=1e-3, atol=1e-3 ) self.assertTrue( torch.allclose( padded_hidden_states[0, 0, -1, :3], torch.zeros((3,), device=torch_device, dtype=torch.float32), atol=1e-3, ) ) def test_pad_and_transpose_last_two_dims(self): hidden_states = self._get_hidden_states() self.assertEqual(hidden_states.shape, (1, 4, 8)) padding = (0, 0, 0, 1) padded_hidden_states = LongformerSelfAttention._pad_and_transpose_last_two_dims(hidden_states, padding) self.assertEqual(padded_hidden_states.shape, (1, 8, 5)) expected_added_dim = torch.zeros((5,), device=torch_device, dtype=torch.float32) torch.testing.assert_close(expected_added_dim, padded_hidden_states[0, -1, :], rtol=1e-6, atol=1e-6) torch.testing.assert_close( hidden_states[0, -1, :], padded_hidden_states.view(1, -1)[0, 24:32], rtol=1e-6, atol=1e-6 ) def test_chunk(self): hidden_states = self._get_hidden_states() batch_size = 1 seq_length = 8 hidden_size = 4 hidden_states = hidden_states.reshape((batch_size, seq_length, hidden_size)) chunked_hidden_states = LongformerSelfAttention._chunk(hidden_states, window_overlap=2) # expected slices across chunk and seq length dim expected_slice_along_seq_length = torch.tensor( [0.4983, -0.7584, -1.6944], device=torch_device, dtype=torch.float32 ) expected_slice_along_chunk = torch.tensor( [0.4983, -1.8348, -0.7584, 2.0514], device=torch_device, dtype=torch.float32 ) torch.testing.assert_close( chunked_hidden_states[0, :, 0, 0], expected_slice_along_seq_length, rtol=1e-3, atol=1e-3 ) torch.testing.assert_close(chunked_hidden_states[0, 0, :, 0], expected_slice_along_chunk, rtol=1e-3, atol=1e-3) self.assertEqual(chunked_hidden_states.shape, (1, 3, 4, 4)) def test_mask_invalid_locations(self): hidden_states = self._get_hidden_states() batch_size = 1 seq_length = 8 hidden_size = 4 hidden_states = hidden_states.reshape((batch_size, seq_length, hidden_size)) chunked_hidden_states = LongformerSelfAttention._chunk(hidden_states, window_overlap=2) hid_states_1 = chunked_hidden_states.clone() LongformerSelfAttention._mask_invalid_locations(hid_states_1, 1) self.assertTrue(torch.isinf(hid_states_1).sum().item() == 8) hid_states_2 = chunked_hidden_states.clone() LongformerSelfAttention._mask_invalid_locations(hid_states_2, 2) self.assertTrue(torch.isinf(hid_states_2).sum().item() == 24) hid_states_3 = chunked_hidden_states.clone()[:, :, :, :3] LongformerSelfAttention._mask_invalid_locations(hid_states_3, 2) self.assertTrue(torch.isinf(hid_states_3).sum().item() == 24) hid_states_4 = chunked_hidden_states.clone()[:, :, 2:, :] LongformerSelfAttention._mask_invalid_locations(hid_states_4, 2) self.assertTrue(torch.isinf(hid_states_4).sum().item() == 12) def test_layer_local_attn(self): model = LongformerModel.from_pretrained("patrickvonplaten/longformer-random-tiny") model.eval() layer = model.encoder.layer[0].attention.self.to(torch_device) hidden_states = self._get_hidden_states() batch_size, seq_length, hidden_size = hidden_states.size() attention_mask = torch.zeros((batch_size, seq_length), dtype=torch.float32, device=torch_device) attention_mask[:, -2:] = -10000 is_index_masked = attention_mask < 0 is_index_global_attn = attention_mask > 0 is_global_attn = is_index_global_attn.flatten().any().item() output_hidden_states = layer( hidden_states, attention_mask=attention_mask, is_index_masked=is_index_masked, is_index_global_attn=is_index_global_attn, is_global_attn=is_global_attn, )[0] self.assertEqual(output_hidden_states.shape, (1, 4, 8)) self.assertTrue( torch.allclose( output_hidden_states[0, 1], torch.tensor( [0.0019, 0.0122, -0.0171, -0.0256, -0.0300, 0.0173, -0.0115, 0.0048], dtype=torch.float32, device=torch_device, ), atol=1e-3, ) ) def test_layer_global_attn(self): model = LongformerModel.from_pretrained("patrickvonplaten/longformer-random-tiny") model.eval() layer = model.encoder.layer[0].attention.self.to(torch_device) hidden_states = torch.cat([self._get_hidden_states(), self._get_hidden_states() - 0.5], dim=0) batch_size, seq_length, hidden_size = hidden_states.size() attention_mask = torch.zeros((batch_size, seq_length), dtype=torch.float32, device=torch_device) # create attn mask attention_mask[0, -2:] = 10000.0 attention_mask[0, -1:] = -10000.0 attention_mask[1, 1:] = 10000.0 is_index_masked = attention_mask < 0 is_index_global_attn = attention_mask > 0 is_global_attn = is_index_global_attn.flatten().any().item() output_hidden_states = layer( hidden_states, attention_mask=attention_mask, is_index_masked=is_index_masked, is_index_global_attn=is_index_global_attn, is_global_attn=is_global_attn, )[0] self.assertEqual(output_hidden_states.shape, (2, 4, 8)) self.assertTrue( torch.allclose( output_hidden_states[0, 2], torch.tensor( [-0.0651, -0.0393, 0.0309, -0.0342, -0.0066, -0.0155, -0.0209, -0.0494], dtype=torch.float32, device=torch_device, ), atol=1e-3, ) ) self.assertTrue( torch.allclose( output_hidden_states[1, -2], torch.tensor( [-0.0405, -0.0384, 0.0396, -0.0374, -0.0341, 0.0136, 0.0014, -0.0571], dtype=torch.float32, device=torch_device, ), atol=1e-3, ) ) def test_layer_attn_probs(self): model = LongformerModel.from_pretrained("patrickvonplaten/longformer-random-tiny") model.eval() layer = model.encoder.layer[0].attention.self.to(torch_device) hidden_states = torch.cat([self._get_hidden_states(), self._get_hidden_states() - 0.5], dim=0) batch_size, seq_length, hidden_size = hidden_states.size() attention_mask = torch.zeros((batch_size, seq_length), dtype=torch.float32, device=torch_device) # create attn mask attention_mask[0, -2:] = 10000.0 attention_mask[0, -1:] = -10000.0 attention_mask[1, 1:] = 10000.0 is_index_masked = attention_mask < 0 is_index_global_attn = attention_mask > 0 is_global_attn = is_index_global_attn.flatten().any().item() output_hidden_states, local_attentions, global_attentions = layer( hidden_states, attention_mask=attention_mask, is_index_masked=is_index_masked, is_index_global_attn=is_index_global_attn, is_global_attn=is_global_attn, output_attentions=True, ) self.assertEqual(local_attentions.shape, (2, 4, 2, 8)) self.assertEqual(global_attentions.shape, (2, 2, 3, 4)) # All tokens with global attention have weight 0 in local attentions. self.assertTrue(torch.all(local_attentions[0, 2:4, :, :] == 0)) self.assertTrue(torch.all(local_attentions[1, 1:4, :, :] == 0)) # The weight of all tokens with local attention must sum to 1. self.assertTrue(torch.all(torch.abs(global_attentions[0, :, :2, :].sum(dim=-1) - 1) < 1e-6)) self.assertTrue(torch.all(torch.abs(global_attentions[1, :, :1, :].sum(dim=-1) - 1) < 1e-6)) self.assertTrue( torch.allclose( local_attentions[0, 0, 0, :], torch.tensor( [0.3328, 0.0000, 0.0000, 0.0000, 0.0000, 0.3355, 0.3318, 0.0000], dtype=torch.float32, device=torch_device, ), atol=1e-3, ) ) self.assertTrue( torch.allclose( local_attentions[1, 0, 0, :], torch.tensor( [0.2492, 0.2502, 0.2502, 0.0000, 0.0000, 0.2505, 0.0000, 0.0000], dtype=torch.float32, device=torch_device, ), atol=1e-3, ) ) # All the global attention weights must sum to 1. self.assertTrue(torch.all(torch.abs(global_attentions.sum(dim=-1) - 1) < 1e-6)) self.assertTrue( torch.allclose( global_attentions[0, 0, 1, :], torch.tensor( [0.2500, 0.2500, 0.2500, 0.2500], dtype=torch.float32, device=torch_device, ), atol=1e-3, ) ) self.assertTrue( torch.allclose( global_attentions[1, 0, 0, :], torch.tensor( [0.2497, 0.2500, 0.2499, 0.2504], dtype=torch.float32, device=torch_device, ), atol=1e-3, ) ) @slow def test_inference_no_head(self): model = LongformerModel.from_pretrained("allenai/longformer-base-4096") model.to(torch_device) # 'Hello world!' input_ids = torch.tensor([[0, 20920, 232, 328, 1437, 2]], dtype=torch.long, device=torch_device) attention_mask = torch.ones(input_ids.shape, dtype=torch.long, device=torch_device) output = model(input_ids, attention_mask=attention_mask)[0] output_without_mask = model(input_ids)[0] expected_output_slice = torch.tensor([0.0549, 0.1087, -0.1119, -0.0368, 0.0250], device=torch_device) torch.testing.assert_close(output[0, 0, -5:], expected_output_slice, rtol=1e-4, atol=1e-4) torch.testing.assert_close(output_without_mask[0, 0, -5:], expected_output_slice, rtol=1e-4, atol=1e-4) @slow def test_inference_no_head_long(self): model = LongformerModel.from_pretrained("allenai/longformer-base-4096") model.to(torch_device) # 'Hello world! ' repeated 1000 times input_ids = torch.tensor( [[0] + [20920, 232, 328, 1437] * 1000 + [2]], dtype=torch.long, device=torch_device ) # long input attention_mask = torch.ones(input_ids.shape, dtype=torch.long, device=input_ids.device) global_attention_mask = torch.zeros(input_ids.shape, dtype=torch.long, device=input_ids.device) global_attention_mask[:, [1, 4, 21]] = 1 # Set global attention on a few random positions output = model(input_ids, attention_mask=attention_mask, global_attention_mask=global_attention_mask)[0] expected_output_sum = torch.tensor(74585.8594, device=torch_device) expected_output_mean = torch.tensor(0.0243, device=torch_device) torch.testing.assert_close(output.sum(), expected_output_sum, rtol=1e-4, atol=1e-4) torch.testing.assert_close(output.mean(), expected_output_mean, rtol=1e-4, atol=1e-4) @slow def test_inference_masked_lm_long(self): model = LongformerForMaskedLM.from_pretrained("allenai/longformer-base-4096") model.to(torch_device) # 'Hello world! ' repeated 1000 times input_ids = torch.tensor( [[0] + [20920, 232, 328, 1437] * 1000 + [2]], dtype=torch.long, device=torch_device ) # long input input_ids = input_ids.to(torch_device) loss, prediction_scores = model(input_ids, labels=input_ids).to_tuple() expected_loss = torch.tensor(0.0074, device=torch_device) expected_prediction_scores_sum = torch.tensor(-6.1048e08, device=torch_device) expected_prediction_scores_mean = torch.tensor(-3.0348, device=torch_device) torch.testing.assert_close(loss, expected_loss, rtol=1e-4, atol=1e-4) torch.testing.assert_close(prediction_scores.sum(), expected_prediction_scores_sum, rtol=1e-4, atol=1e-4) torch.testing.assert_close(prediction_scores.mean(), expected_prediction_scores_mean, rtol=1e-4, atol=1e-4)
transformers/tests/models/longformer/test_modeling_longformer.py/0
{ "file_path": "transformers/tests/models/longformer/test_modeling_longformer.py", "repo_id": "transformers", "token_count": 15694 }
579
# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import math import unittest from transformers import AutoTokenizer, Mamba2Config, is_torch_available from transformers.testing_utils import ( Expectations, require_read_token, require_torch, require_torch_accelerator, slow, torch_device, ) from transformers.utils.import_utils import is_causal_conv1d_available, is_mamba_2_ssm_available from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( Mamba2ForCausalLM, Mamba2Model, ) from transformers.models.mamba2.modeling_mamba2 import Mamba2Cache, Mamba2Mixer class Mamba2ConfigTester(ConfigTester): def _create_config(self, hidden_size: int, num_heads: int, expand: int, head_dim: int): _input_dict = self.inputs_dict.copy() _input_dict["hidden_size"] = hidden_size _input_dict["num_heads"] = num_heads _input_dict["expand"] = expand _input_dict["head_dim"] = head_dim return self.config_class(**_input_dict) def test_hidden_size_compatibility(self): self._create_config(hidden_size=2, num_heads=2, expand=2, head_dim=2) self._create_config(hidden_size=4, num_heads=4, expand=2, head_dim=2) self._create_config(hidden_size=2, num_heads=4, expand=4, head_dim=2) with self.parent.assertRaises(ValueError): self._create_config(hidden_size=2, num_heads=4, expand=2, head_dim=4) with self.parent.assertRaises(ValueError): self._create_config(hidden_size=4, num_heads=2, expand=4, head_dim=2) def run_common_tests(self): self.test_hidden_size_compatibility() return super().run_common_tests() class Mamba2ModelTester: def __init__( self, parent, batch_size=14, num_heads=8, n_groups=8, state_size=2, head_dim=8, conv_kernel=4, chunk_size=8, seq_length=7, is_training=True, use_labels=True, vocab_size=99, hidden_size=32, num_hidden_layers=2, hidden_act="silu", hidden_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=16, type_sequence_label_size=2, num_labels=3, num_choices=4, scope=None, tie_word_embeddings=False, ): self.parent = parent self.num_heads = num_heads self.n_groups = n_groups self.head_dim = head_dim self.state_size = state_size self.conv_kernel = conv_kernel self.chunk_size = chunk_size self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.type_sequence_label_size = type_sequence_label_size self.num_labels = num_labels self.num_choices = num_choices self.scope = scope self.bos_token_id = vocab_size - 1 self.eos_token_id = vocab_size - 1 self.pad_token_id = vocab_size - 1 self.tie_word_embeddings = tie_word_embeddings def get_large_model_config(self): return Mamba2Config.from_pretrained("mistralai/Mamba-Codestral-7B-v0.1") def prepare_config_and_inputs( self, gradient_checkpointing=False, scale_attn_by_inverse_layer_idx=False, reorder_and_upcast_attn=False ): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) # Only left padding is valid attention_mask = torch.ones(size=(self.batch_size, self.seq_length), device=input_ids.device, dtype=torch.long) attention_mask[0, :1] = 0 sequence_labels = None token_labels = None choice_labels = None if self.use_labels: sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size) token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels) choice_labels = ids_tensor([self.batch_size], self.num_choices) config = self.get_config( gradient_checkpointing=gradient_checkpointing, ) return ( config, input_ids, attention_mask, sequence_labels, token_labels, choice_labels, ) def get_config(self, gradient_checkpointing=False): return Mamba2Config( head_dim=self.head_dim, num_heads=self.num_heads, n_groups=self.n_groups, state_size=self.state_size, conv_kernel=self.conv_kernel, chunk_size=self.chunk_size, vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, activation_function=self.hidden_act, n_positions=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, use_cache=True, bos_token_id=self.bos_token_id, eos_token_id=self.eos_token_id, pad_token_id=self.pad_token_id, gradient_checkpointing=gradient_checkpointing, tie_word_embeddings=self.tie_word_embeddings, ) def prepare_config_and_inputs_for_common(self): ( config, input_ids, _, sequence_labels, token_labels, choice_labels, ) = self.prepare_config_and_inputs() inputs_dict = {"input_ids": input_ids} return config, inputs_dict def create_and_check_mamba2_caching(self, config, input_ids, attention_mask, *args): model = Mamba2Model(config=config) model.to(torch_device) model.eval() output_whole = model(input_ids, attention_mask=attention_mask).last_hidden_state outputs = model( input_ids[:, :-1], attention_mask=attention_mask[:, :-1], use_cache=True, cache_position=torch.arange(0, config.conv_kernel, device=input_ids.device), ) output_one = outputs.last_hidden_state # Using the state computed on the first inputs, we will get the same output outputs = model( input_ids[:, -1:], attention_mask=attention_mask[:, -1:], use_cache=True, cache_params=outputs.cache_params, cache_position=torch.arange(config.conv_kernel, config.conv_kernel + 1, device=input_ids.device), ) output_two = outputs.last_hidden_state self.parent.assertTrue( torch.allclose(torch.cat([output_one, output_two], dim=1), output_whole, atol=1e-3, rtol=1e-3) ) def create_and_check_mamba2_slow_vs_fast_forward(self, config, input_ids, *args, gradient_checkpointing=False): model = Mamba2Model(config) model.eval() if not (is_mamba_2_ssm_available() and is_causal_conv1d_available()): self.parent.skipTest( "This test needs the Mamba2 fast path. Skipping as the necessary packages have not been found." ) if torch_device != "cuda": self.parent.skipTest("This test needs the Mamba2 fast path. Skipping as we need a cuda capable device.") model.to(torch_device) if gradient_checkpointing: model.gradient_checkpointing_enable() token_emb = model.embeddings(input_ids) outputs_fast = model.layers[0].mixer.cuda_kernels_forward(token_emb) outputs_slow = model.layers[0].mixer.torch_forward(token_emb) self.parent.assertTrue(torch.allclose(outputs_fast, outputs_slow, atol=1e-3, rtol=1e-3)) @require_torch class Mamba2ModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (Mamba2Model, Mamba2ForCausalLM) if is_torch_available() else () has_attentions = False # Mamba does not support attentions fx_compatible = False # FIXME let's try to support this @molbap test_torchscript = False # FIXME I think this should be doable @molbap @ArthurZucker test_missing_keys = False test_model_parallel = False test_pruning = False test_head_masking = False # Mamba does not have attention heads pipeline_model_mapping = ( {"feature-extraction": Mamba2Model, "text-generation": Mamba2ForCausalLM} if is_torch_available() else {} ) def setUp(self): self.model_tester = Mamba2ModelTester(self) self.config_tester = Mamba2ConfigTester( self, config_class=Mamba2Config, n_embd=37, common_properties=["hidden_size", "num_hidden_layers"] ) def test_mamba2_caching(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mamba2_caching(*config_and_inputs) def test_mamba2_slow_vs_fast_forward(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mamba2_slow_vs_fast_forward(*config_and_inputs) # This test adjusts n_groups to half the original setting and effectively # creates a grouped SSD configuration in the mamba2 layers # See https://github.com/huggingface/transformers/pull/37533/ def test_mamba2_slow_vs_fast_forward_grouped(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() config_and_inputs[0].n_groups //= 2 self.model_tester.create_and_check_mamba2_slow_vs_fast_forward(*config_and_inputs) def test_initialization(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() config.rescale_prenorm_residual = True configs_no_init = _config_zero_init(config) for model_class in self.all_model_classes: model = model_class(config=configs_no_init) for name, param in model.named_parameters(): if "dt_proj.bias" in name: dt = torch.exp( torch.tensor([0, 1]) * (math.log(config.time_step_max) - math.log(config.time_step_min)) + math.log(config.time_step_min) ).clamp(min=config.time_step_floor) inv_dt = dt + torch.log(-torch.expm1(-dt)) if param.requires_grad: self.assertTrue(param.data.max().item() <= inv_dt[1]) self.assertTrue(param.data.min().item() >= inv_dt[0]) elif "A_log" in name: A = torch.arange(1, config.num_heads + 1) torch.testing.assert_close(param.data, torch.log(A), rtol=1e-5, atol=1e-5) elif "D" in name: if param.requires_grad: # check if it's a ones like torch.testing.assert_close(param.data, torch.ones_like(param.data), rtol=1e-5, atol=1e-5) else: if param.requires_grad: if "mixer.conv1d.weight" in name or "mixer.dt_bias" in name or "mixer.out_proj.weight" in name: continue self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item(), [0.0, 1.0], msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) @unittest.skip(reason="A large mamba2 would be necessary (and costly) for that") def test_multi_gpu_data_parallel_forward(self): pass def test_model_outputs_equivalence(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() def check_equivalence(model, tuple_inputs, dict_inputs, additional_kwargs={}): with torch.no_grad(): tuple_output = model(**tuple_inputs, return_dict=False, **additional_kwargs) dict_output = model(**dict_inputs, return_dict=True, **additional_kwargs).to_tuple() def recursive_check(tuple_object, dict_object): if isinstance(tuple_object, Mamba2Cache): # MODIFIED PART START recursive_check(tuple_object.conv_states, dict_object.conv_states) recursive_check(tuple_object.ssm_states, dict_object.ssm_states) elif isinstance(tuple_object, (list, tuple)): # MODIFIED PART END for tuple_iterable_value, dict_iterable_value in zip(tuple_object, dict_object): recursive_check(tuple_iterable_value, dict_iterable_value) elif isinstance(tuple_object, dict): for tuple_iterable_value, dict_iterable_value in zip( tuple_object.values(), dict_object.values() ): recursive_check(tuple_iterable_value, dict_iterable_value) elif tuple_object is None: return else: self.assertTrue( torch.allclose(tuple_object, dict_object, atol=1e-5), msg=( "Tuple and dict output are not equal. Difference:" f" {torch.max(torch.abs(tuple_object - dict_object))}. Tuple has `nan`:" f" {torch.isnan(tuple_object).any()} and `inf`: {torch.isinf(tuple_object)}. Dict has" f" `nan`: {torch.isnan(dict_object).any()} and `inf`: {torch.isinf(dict_object)}." ), ) recursive_check(tuple_output, dict_output) for model_class in self.all_model_classes: model = model_class(config) model.to(torch_device) model.eval() tuple_inputs = self._prepare_for_class(inputs_dict, model_class) dict_inputs = self._prepare_for_class(inputs_dict, model_class) check_equivalence(model, tuple_inputs, dict_inputs) tuple_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) dict_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) check_equivalence(model, tuple_inputs, dict_inputs) tuple_inputs = self._prepare_for_class(inputs_dict, model_class) dict_inputs = self._prepare_for_class(inputs_dict, model_class) check_equivalence(model, tuple_inputs, dict_inputs, {"output_hidden_states": True}) tuple_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) dict_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) check_equivalence(model, tuple_inputs, dict_inputs, {"output_hidden_states": True}) @require_torch @slow @require_read_token class Mamba2IntegrationTest(unittest.TestCase): def setUp(self): self.model_id = "mistralai/Mamba-Codestral-7B-v0.1" self.tokenizer = AutoTokenizer.from_pretrained(self.model_id, from_slow=True, legacy=False) self.prompt = ("[INST]Write a hello world program in C++.",) @require_read_token @slow @require_torch def test_simple_generate(self): """ Simple generate test to avoid regressions. Note: state-spaces (cuda) implementation and pure torch implementation have irreconciliable differences as of now, which will cause this test to fail in an environment with state-spaces installed. """ tokenizer = self.tokenizer tokenizer.pad_token_id = tokenizer.eos_token_id model = Mamba2ForCausalLM.from_pretrained(self.model_id, dtype=torch.bfloat16) model.to(torch_device) input_ids = tokenizer("[INST]Write a hello world program in C++.[/INST]", return_tensors="pt")["input_ids"].to( torch_device ) out = model.generate(input_ids, do_sample=False, use_cache=True, max_new_tokens=30) output_sentence = tokenizer.decode(out[0]) ground_truth_sentences = Expectations( { ("xpu", 3): """<s>[INST]Write a hello world program in C++.[/INST] Sure, here is a simple "Hello, World!" program written in C++:\n\n```cpp\n#include <iostream>\n""", ("cuda", 7): """<s>[INST]Write a hello world program in C++.[/INST] Sure, here is a simple "Hello, World!" program in C++:\n\n```cpp\n#include <iostream>\n\n""", } ) # fmt: skip ground_truth_sentence = ground_truth_sentences.get_expectation() self.assertEqual(output_sentence, ground_truth_sentence) @require_read_token @slow @require_torch_accelerator def test_batched_equivalence_with_cache(self): """ Verifies that batched generation matches individual generation. Important because of the specific caching mechanism + statefulness of mamba model. Depending on precision and devices, differences can be observed from generation to generation. """ tokenizer = self.tokenizer prompt = [ "[INST]Write C#.[/INST]", "[INST]Write a hello world in C++.[/INST]", "[INST] Write a simple Fibonacci number computation function in Rust that does memoization, with comments, in safe Rust.[/INST]", ] model = Mamba2ForCausalLM.from_pretrained(self.model_id, dtype=torch.bfloat16).to(torch_device) tokenizer.pad_token_id = tokenizer.eos_token_id # batched generation tokenized_prompts = tokenizer(prompt, return_tensors="pt", padding="longest").to(torch_device) batched_gen = model.generate(**tokenized_prompts, max_new_tokens=30, use_cache=True) batched_output = tokenizer.batch_decode(batched_gen, skip_special_tokens=True) # individual generation for index_gen, individual_prompt in enumerate(prompt): inputs = tokenizer(individual_prompt, return_tensors="pt", padding="longest").to(torch_device) individual_gen = model.generate(**inputs, max_new_tokens=30, use_cache=True) individual_output = tokenizer.batch_decode(individual_gen, skip_special_tokens=True)[0] self.assertEqual(individual_output[:100], batched_output[index_gen][:100]) @require_read_token @slow @require_torch_accelerator def test_batched_equivalence_without_cache(self): """ Verifies that batched generation matches individual generation without cache. Important because of the specific caching mechanism + statefulness of mamba model. Depending on precision and devices, differences can be observed from generation to generation. """ tokenizer = self.tokenizer prompt = [ "[INST]Write C#.[/INST]", "[INST]Write a hello world in C++.[/INST]", "[INST] Write a simple Fibonacci number computation function in Rust that does memoization, with comments, in safe Rust.[/INST]", ] model = Mamba2ForCausalLM.from_pretrained(self.model_id, dtype=torch.bfloat16).to(torch_device) tokenizer.pad_token_id = tokenizer.eos_token_id # batched generation tokenized_prompts = tokenizer(prompt, return_tensors="pt", padding="longest").to(torch_device) batched_gen = model.generate(**tokenized_prompts, max_new_tokens=30, use_cache=True) batched_output = tokenizer.batch_decode(batched_gen, skip_special_tokens=True) # individual generation for index_gen, individual_prompt in enumerate(prompt): inputs = tokenizer(individual_prompt, return_tensors="pt", padding="longest").to(torch_device) individual_gen = model.generate(**inputs, max_new_tokens=30, use_cache=True) individual_output = tokenizer.batch_decode(individual_gen, skip_special_tokens=True)[0] self.assertEqual(individual_output[:100], batched_output[index_gen][:100]) @slow @require_torch_accelerator def test_mamba2_mixer_train_vs_eval_equivalence(self): # Based on https://github.com/sustcsonglin/flash-linear-attention/issues/63 # Credit to zhixuan-lin B, T, D = 4, 512, 768 dtype = torch.bfloat16 config = Mamba2Config(num_heads=24, head_dim=64, hidden_size=768, expand=2, n_groups=1) torch.manual_seed(42) with torch.autocast(device_type=torch_device, dtype=dtype): with torch.no_grad(): mixer = Mamba2Mixer(config, layer_idx=0).to(torch_device) hidden_states = torch.rand(size=(B, T, D), dtype=dtype, device=torch_device) mixer.train() out_train = mixer(hidden_states) mixer.eval() out_eval = mixer(hidden_states) torch.testing.assert_close(out_train, out_eval, rtol=1e-3, atol=1e-3)
transformers/tests/models/mamba2/test_modeling_mamba2.py/0
{ "file_path": "transformers/tests/models/mamba2/test_modeling_mamba2.py", "repo_id": "transformers", "token_count": 10137 }
580
# Copyright 2024 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Testing suite for the PyTorch Mimi model.""" import inspect import os import tempfile import unittest import numpy as np import pytest from datasets import Audio, load_dataset from pytest import mark from transformers import AutoFeatureExtractor, MimiConfig from transformers.testing_utils import ( is_flaky, is_torch_available, require_flash_attn, require_torch, require_torch_gpu, slow, torch_device, ) from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor if is_torch_available(): import torch from transformers import MimiModel # Copied from transformers.tests.encodec.test_modeling_encodec.prepare_inputs_dict def prepare_inputs_dict( config, input_ids=None, input_values=None, decoder_input_ids=None, attention_mask=None, decoder_attention_mask=None, head_mask=None, decoder_head_mask=None, cross_attn_head_mask=None, ): if input_ids is not None: encoder_dict = {"input_ids": input_ids} else: encoder_dict = {"input_values": input_values} decoder_dict = {"decoder_input_ids": decoder_input_ids} if decoder_input_ids is not None else {} return {**encoder_dict, **decoder_dict} @require_torch class MimiModelTester: def __init__( self, parent, batch_size=5, num_channels=1, is_training=False, intermediate_size=40, hidden_size=32, num_filters=8, num_residual_layers=1, upsampling_ratios=[8, 4], codebook_size=64, vector_quantization_hidden_dimension=64, codebook_dim=64, upsample_groups=32, num_hidden_layers=2, num_attention_heads=2, num_key_value_heads=2, sliding_window=4, use_cache=False, ): self.parent = parent self.batch_size = batch_size self.num_channels = num_channels self.is_training = is_training self.intermediate_size = intermediate_size self.hidden_size = hidden_size self.num_filters = num_filters self.num_residual_layers = num_residual_layers self.upsampling_ratios = upsampling_ratios self.codebook_size = codebook_size self.vector_quantization_hidden_dimension = vector_quantization_hidden_dimension self.codebook_dim = codebook_dim self.upsample_groups = upsample_groups self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.num_key_value_heads = num_key_value_heads self.sliding_window = sliding_window self.use_cache = use_cache def prepare_config_and_inputs(self, input_values_length=None): input_values = floats_tensor( [ self.batch_size, self.num_channels, self.intermediate_size if input_values_length is None else input_values_length, ], scale=1.0, ) config = self.get_config() inputs_dict = {"input_values": input_values} return config, inputs_dict def prepare_config_and_inputs_for_common(self, input_values_length=None): config, inputs_dict = self.prepare_config_and_inputs(input_values_length=input_values_length) return config, inputs_dict def prepare_config_and_inputs_for_model_class(self, model_class): config, inputs_dict = self.prepare_config_and_inputs() inputs_dict["audio_codes"] = ids_tensor([self.batch_size, 1, self.num_channels], self.codebook_size).type( torch.int32 ) return config, inputs_dict def get_config(self): return MimiConfig( audio_channels=self.num_channels, chunk_in_sec=None, hidden_size=self.hidden_size, num_filters=self.num_filters, num_residual_layers=self.num_residual_layers, upsampling_ratios=self.upsampling_ratios, codebook_size=self.codebook_size, vector_quantization_hidden_dimension=self.vector_quantization_hidden_dimension, upsample_groups=self.upsample_groups, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, num_key_value_heads=self.num_key_value_heads, sliding_window=self.sliding_window, codebook_dim=self.codebook_dim, use_cache=self.use_cache, ) def create_and_check_model_forward(self, config, inputs_dict): model = MimiModel(config=config).to(torch_device).eval() input_values = inputs_dict["input_values"] result = model(input_values) self.parent.assertEqual( result.audio_values.shape, (self.batch_size, self.num_channels, self.intermediate_size) ) @require_torch class MimiModelTest(ModelTesterMixin, unittest.TestCase): all_model_classes = (MimiModel,) if is_torch_available() else () is_encoder_decoder = True test_pruning = False test_headmasking = False test_resize_embeddings = False test_torchscript = False def _prepare_for_class(self, inputs_dict, model_class, return_labels=False): # model does support returning hidden states inputs_dict = super()._prepare_for_class(inputs_dict, model_class, return_labels=return_labels) if "output_attentions" in inputs_dict: inputs_dict.pop("output_attentions") if "output_hidden_states" in inputs_dict: inputs_dict.pop("output_hidden_states") return inputs_dict def setUp(self): self.model_tester = MimiModelTester(self) self.config_tester = ConfigTester( self, config_class=MimiConfig, hidden_size=37, common_properties=[], has_text_modality=False ) def test_config(self): self.config_tester.run_common_tests() def test_model_forward(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model_forward(*config_and_inputs) def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) signature = inspect.signature(model.forward) # signature.parameters is an OrderedDict => so arg_names order is deterministic arg_names = [*signature.parameters.keys()] expected_arg_names = ["input_values", "padding_mask", "num_quantizers"] self.assertListEqual(arg_names[: len(expected_arg_names)], expected_arg_names) @unittest.skip(reason="The MimiModel does not have `inputs_embeds` logics") def test_inputs_embeds(self): pass @unittest.skip(reason="The MimiModel does not have `inputs_embeds` logics") def test_model_get_set_embeddings(self): pass @unittest.skip(reason="The MimiModel does not have the usual `attention` logic") def test_retain_grad_hidden_states_attentions(self): pass @unittest.skip(reason="The MimiModel does not have the usual `attention` logic") def test_torchscript_output_attentions(self): pass @unittest.skip(reason="The MimiModel does not have the usual `hidden_states` logic") def test_torchscript_output_hidden_state(self): pass # Copied from transformers.tests.encodec.test_modeling_encodec.MimiModelTest._create_and_check_torchscript def _create_and_check_torchscript(self, config, inputs_dict): if not self.test_torchscript: self.skipTest(reason="test_torchscript is set to False") configs_no_init = _config_zero_init(config) # To be sure we have no Nan configs_no_init.torchscript = True configs_no_init.return_dict = False for model_class in self.all_model_classes: model = model_class(config=configs_no_init) model.to(torch_device) model.eval() inputs = self._prepare_for_class(inputs_dict, model_class) main_input_name = model_class.main_input_name try: main_input = inputs[main_input_name] model(main_input) traced_model = torch.jit.trace(model, main_input) except RuntimeError: self.fail("Couldn't trace module.") with tempfile.TemporaryDirectory() as tmp_dir_name: pt_file_name = os.path.join(tmp_dir_name, "traced_model.pt") try: torch.jit.save(traced_model, pt_file_name) except Exception: self.fail("Couldn't save module.") try: loaded_model = torch.jit.load(pt_file_name) except Exception: self.fail("Couldn't load module.") model.to(torch_device) model.eval() loaded_model.to(torch_device) loaded_model.eval() model_state_dict = model.state_dict() loaded_model_state_dict = loaded_model.state_dict() non_persistent_buffers = {} for key in loaded_model_state_dict: if key not in model_state_dict: non_persistent_buffers[key] = loaded_model_state_dict[key] loaded_model_state_dict = { key: value for key, value in loaded_model_state_dict.items() if key not in non_persistent_buffers } self.assertEqual(set(model_state_dict.keys()), set(loaded_model_state_dict.keys())) model_buffers = list(model.buffers()) for non_persistent_buffer in non_persistent_buffers.values(): found_buffer = False for i, model_buffer in enumerate(model_buffers): if torch.equal(non_persistent_buffer, model_buffer): found_buffer = True break self.assertTrue(found_buffer) model_buffers.pop(i) model_buffers = list(model.buffers()) for non_persistent_buffer in non_persistent_buffers.values(): found_buffer = False for i, model_buffer in enumerate(model_buffers): if torch.equal(non_persistent_buffer, model_buffer): found_buffer = True break self.assertTrue(found_buffer) model_buffers.pop(i) models_equal = True for layer_name, p1 in model_state_dict.items(): if layer_name in loaded_model_state_dict: p2 = loaded_model_state_dict[layer_name] if p1.data.ne(p2.data).sum() > 0: models_equal = False self.assertTrue(models_equal) # Avoid memory leak. Without this, each call increase RAM usage by ~20MB. # (Even with this call, there are still memory leak by ~0.04MB) self.clear_torch_jit_class_registry() @unittest.skip(reason="The MimiModel does not have the usual `attention` logic") def test_attention_outputs(self): pass @unittest.skip(reason="The MimiModel does not have the usual `hidden_states` logic") def test_hidden_states_output(self): pass # Copied from transformers.tests.encodec.test_modeling_encodec.MimiModelTest.test_determinism def test_determinism(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() def check_determinism(first, second): # outputs are not tensors but list (since each sequence don't have the same frame_length) out_1 = first.cpu().numpy() out_2 = second.cpu().numpy() out_1 = out_1[~np.isnan(out_1)] out_2 = out_2[~np.isnan(out_2)] max_diff = np.amax(np.abs(out_1 - out_2)) self.assertLessEqual(max_diff, 1e-5) for model_class in self.all_model_classes: model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): first = model(**self._prepare_for_class(inputs_dict, model_class))[0] second = model(**self._prepare_for_class(inputs_dict, model_class))[0] if isinstance(first, tuple) and isinstance(second, tuple): for tensor1, tensor2 in zip(first, second): check_determinism(tensor1, tensor2) else: check_determinism(first, second) # Copied from transformers.tests.encodec.test_modeling_encodec.MimiModelTest.test_model_outputs_equivalence def test_model_outputs_equivalence(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() def set_nan_tensor_to_zero(t): t[t != t] = 0 return t def check_equivalence(model, tuple_inputs, dict_inputs, additional_kwargs={}): with torch.no_grad(): tuple_output = model(**tuple_inputs, return_dict=False, **additional_kwargs) dict_output = model(**dict_inputs, return_dict=True, **additional_kwargs) self.assertTrue(isinstance(tuple_output, tuple)) self.assertTrue(isinstance(dict_output, dict)) for tuple_value, dict_value in zip(tuple_output, dict_output.values()): self.assertTrue( torch.allclose( set_nan_tensor_to_zero(tuple_value), set_nan_tensor_to_zero(dict_value), atol=1e-5 ), msg=( "Tuple and dict output are not equal. Difference:" f" {torch.max(torch.abs(tuple_value - dict_value))}. Tuple has `nan`:" f" {torch.isnan(tuple_value).any()} and `inf`: {torch.isinf(tuple_value)}. Dict has" f" `nan`: {torch.isnan(dict_value).any()} and `inf`: {torch.isinf(dict_value)}." ), ) for model_class in self.all_model_classes: model = model_class(config) model.to(torch_device) model.eval() tuple_inputs = self._prepare_for_class(inputs_dict, model_class) dict_inputs = self._prepare_for_class(inputs_dict, model_class) check_equivalence(model, tuple_inputs, dict_inputs) def test_initialization(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() configs_no_init = _config_zero_init(config) for model_class in self.all_model_classes: model = model_class(config=configs_no_init) for name, param in model.named_parameters(): uniform_init_parms = ["conv", "input_proj", "output_proj"] if param.requires_grad: if any(x in name for x in uniform_init_parms): self.assertTrue( -1.0 <= ((param.data.mean() * 1e9).round() / 1e9).item() <= 1.0, msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) # Copied from transformers.tests.encodec.test_modeling_encodec.MimiModelTest.test_identity_shortcut def test_identity_shortcut(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs() config.use_conv_shortcut = False self.model_tester.create_and_check_model_forward(config, inputs_dict) @require_flash_attn @require_torch_gpu @mark.flash_attn_test @slow @is_flaky() def test_flash_attn_2_inference_equivalence(self): for model_class in self.all_model_classes: config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() model = model_class(config) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) model_fa = model_class.from_pretrained( tmpdirname, dtype=torch.bfloat16, attn_implementation="flash_attention_2" ) model_fa.to(torch_device) model = model_class.from_pretrained(tmpdirname, dtype=torch.bfloat16) model.to(torch_device) dummy_input = inputs_dict[model.main_input_name][:1] if dummy_input.dtype in [torch.float32, torch.float16]: dummy_input = dummy_input.to(torch.bfloat16) outputs = model(dummy_input) outputs_fa = model_fa(dummy_input) logits = outputs[1] logits_fa = outputs_fa[1] assert torch.allclose(logits_fa, logits, atol=4e-2, rtol=4e-2) @unittest.skip(reason="The MimiModel does not support right padding") def test_flash_attn_2_inference_equivalence_right_padding(self): pass @unittest.skip(reason="The MimiModel does not have support dynamic compile yet") @pytest.mark.torch_compile_test def test_sdpa_can_compile_dynamic(self): pass # Copied from transformers.tests.encodec.test_modeling_encodec.normalize def normalize(arr): norm = np.linalg.norm(arr) normalized_arr = arr / norm return normalized_arr # Copied from transformers.tests.encodec.test_modeling_encodec.compute_rmse def compute_rmse(arr1, arr2): arr1_normalized = normalize(arr1) arr2_normalized = normalize(arr2) return np.sqrt(((arr1_normalized - arr2_normalized) ** 2).mean()) @slow @require_torch class MimiIntegrationTest(unittest.TestCase): def test_integration_using_cache_decode(self): expected_rmse = { "8": 0.0018785292, "32": 0.0012330565, } librispeech_dummy = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") model_id = "kyutai/mimi" model = MimiModel.from_pretrained(model_id, use_cache=True).to(torch_device) processor = AutoFeatureExtractor.from_pretrained(model_id) librispeech_dummy = librispeech_dummy.cast_column("audio", Audio(sampling_rate=processor.sampling_rate)) audio_sample = librispeech_dummy[-1]["audio"]["array"] inputs = processor( raw_audio=audio_sample, sampling_rate=processor.sampling_rate, return_tensors="pt", ).to(torch_device) for num_codebooks, expected_rmse in expected_rmse.items(): with torch.no_grad(): # use max bandwidth for best possible reconstruction encoder_outputs = model.encode(inputs["input_values"], num_quantizers=int(num_codebooks)) audio_codes = encoder_outputs[0] decoder_outputs_first_part = model.decode(audio_codes[:, :, : audio_codes.shape[2] // 2]) decoder_outputs_second_part = model.decode( audio_codes[:, :, audio_codes.shape[2] // 2 :], decoder_past_key_values=decoder_outputs_first_part.decoder_past_key_values, ) audio_output_entire_context = model.decode(audio_codes)[0] audio_output_concat_context = torch.cat( [decoder_outputs_first_part[0], decoder_outputs_second_part[0]], dim=2 ) # make sure audios are more or less equal # the RMSE of two random gaussian noise vectors with ~N(0, 1) is around 1.0 rmse = compute_rmse( audio_output_concat_context.squeeze().cpu().numpy(), audio_output_entire_context.squeeze().cpu().numpy(), ) self.assertTrue(rmse < 1e-3) def test_integration_encode_with_padding_cache(self): """ We test here the possibility to run Mimi in a streaming manner, i.e. chunk by chunk. 1. we encode a first time the entire audio 2. we encode the audio chunk by chunk, each chunk being the smallest size possible for the model (i.e. the frame size) This test must be run on CPU since GPU floating point operations accumulate rounding errors that cause test failures. """ librispeech_dummy = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") model_id = "kyutai/mimi" model = MimiModel.from_pretrained(model_id, use_cache=True).to("cpu") processor = AutoFeatureExtractor.from_pretrained(model_id) librispeech_dummy = librispeech_dummy.cast_column("audio", Audio(sampling_rate=processor.sampling_rate)) audio_sample = librispeech_dummy[-1]["audio"]["array"] inputs = processor( raw_audio=audio_sample, sampling_rate=processor.sampling_rate, return_tensors="pt", ).to("cpu") frame_size = model.config.frame_size audio_codes = model.encode(inputs["input_values"]).audio_codes # streaming chunk by chunk encoder_past_key_values = None padding_cache = None encoded_frames_list = [] for start in range(0, inputs["input_values"].shape[-1], frame_size): input_values_chunk = inputs["input_values"][:, :, start : start + frame_size] encoder_outputs = model.encode( input_values_chunk, padding_cache=padding_cache, encoder_past_key_values=encoder_past_key_values, use_streaming=True, ) encoder_past_key_values = encoder_outputs.encoder_past_key_values padding_cache = encoder_outputs.padding_cache encoded_frames_list.append(encoder_outputs.audio_codes) streamed_audio_codes = torch.cat(encoded_frames_list, dim=-1) torch.testing.assert_close(streamed_audio_codes, audio_codes) def test_integration(self): expected_rmses = { "8": 0.0018785292, "32": 0.0012330565, } expected_codesums = { "8": 426176, "32": 1795819, } librispeech_dummy = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") model_id = "kyutai/mimi" processor = AutoFeatureExtractor.from_pretrained(model_id) librispeech_dummy = librispeech_dummy.cast_column("audio", Audio(sampling_rate=processor.sampling_rate)) audio_sample = librispeech_dummy[-1]["audio"]["array"] inputs = processor( raw_audio=audio_sample, sampling_rate=processor.sampling_rate, return_tensors="pt", ).to(torch_device) for use_cache in [False, True]: model = MimiModel.from_pretrained(model_id, use_cache=use_cache).to(torch_device) for num_codebooks, expected_rmse in expected_rmses.items(): with torch.no_grad(): # use max bandwidth for best possible reconstruction encoder_outputs = model.encode(inputs["input_values"], num_quantizers=int(num_codebooks)) audio_code_sums = encoder_outputs[0].sum().item() # make sure audio encoded codes are correct # assert relative difference less than a threshold, because `audio_code_sums` varies a bit # depending on torch version self.assertTrue( np.abs(audio_code_sums - expected_codesums[num_codebooks]) <= (3e-3 * audio_code_sums) ) input_values_dec = model.decode(encoder_outputs[0], padding_mask=inputs["padding_mask"])[0] input_values_enc_dec = model( inputs["input_values"], inputs["padding_mask"], num_quantizers=int(num_codebooks) )[1] # make sure forward and decode gives same result torch.testing.assert_close(input_values_dec, input_values_enc_dec) # make sure shape matches self.assertTrue(inputs["input_values"].shape == input_values_enc_dec.shape) arr = inputs["input_values"][0].cpu().numpy() arr_enc_dec = input_values_enc_dec[0].cpu().numpy() # make sure audios are more or less equal # the RMSE of two random gaussian noise vectors with ~N(0, 1) is around 1.0 rmse = compute_rmse(arr, arr_enc_dec) self.assertTrue(np.abs(rmse - expected_rmse) < 1e-5)
transformers/tests/models/mimi/test_modeling_mimi.py/0
{ "file_path": "transformers/tests/models/mimi/test_modeling_mimi.py", "repo_id": "transformers", "token_count": 11725 }
581
# Copyright 2024 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Testing suite for the PyTorch OLMoE model.""" import unittest from parameterized import parameterized from transformers import OlmoeConfig, is_torch_available, set_seed from transformers.models.auto.tokenization_auto import AutoTokenizer from transformers.models.gpt_neox.tokenization_gpt_neox_fast import GPTNeoXTokenizerFast from transformers.testing_utils import ( require_tokenizers, require_torch, slow, torch_device, ) from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( OlmoeForCausalLM, OlmoeModel, ) class OlmoeModelTester: def __init__( self, parent, batch_size=13, seq_length=7, is_training=True, use_input_mask=True, use_token_type_ids=False, use_labels=True, vocab_size=99, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, hidden_act="silu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=16, type_sequence_label_size=2, initializer_range=0.02, num_labels=3, num_choices=4, pad_token_id=0, scope=None, num_experts_per_tok=2, num_experts=8, norm_topk_prob=False, output_router_logits=False, router_aux_loss_coef=0.001, intermediate_size=12, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_input_mask = use_input_mask self.use_token_type_ids = use_token_type_ids self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.type_sequence_label_size = type_sequence_label_size self.initializer_range = initializer_range self.num_labels = num_labels self.num_choices = num_choices self.pad_token_id = pad_token_id self.scope = scope self.num_experts_per_tok = num_experts_per_tok self.num_experts = num_experts self.norm_topk_prob = norm_topk_prob self.output_router_logits = output_router_logits self.router_aux_loss_coef = router_aux_loss_coef def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_mask = None if self.use_input_mask: input_mask = torch.tril(torch.ones_like(input_ids).to(torch_device)) token_type_ids = None if self.use_token_type_ids: token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size) sequence_labels = None token_labels = None choice_labels = None if self.use_labels: sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size) token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels) choice_labels = ids_tensor([self.batch_size], self.num_choices) config = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def get_config(self): return OlmoeConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=False, initializer_range=self.initializer_range, pad_token_id=self.pad_token_id, num_experts_per_tok=self.num_experts_per_tok, num_experts=self.num_experts, norm_topk_prob=self.norm_topk_prob, output_router_logits=self.output_router_logits, router_aux_loss_coef=self.router_aux_loss_coef, ) def create_and_check_model( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = OlmoeModel(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask) result = model(input_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ) = config_and_inputs inputs_dict = {"input_ids": input_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class OlmoeModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (OlmoeModel, OlmoeForCausalLM) if is_torch_available() else () pipeline_model_mapping = ( { "feature-extraction": OlmoeModel, "text-generation": OlmoeForCausalLM, } if is_torch_available() else {} ) test_pruning = False fx_compatible = False # Need to use `0.8` instead of `0.9` for `test_cpu_offload` # This is because we are hitting edge cases with the causal_mask buffer model_split_percents = [0.5, 0.7, 0.8] def setUp(self): self.model_tester = OlmoeModelTester(self) self.config_tester = ConfigTester(self, config_class=OlmoeConfig, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) @unittest.skip(reason="OLMoE does not support head pruning.") def test_headmasking(self): pass def test_model_various_embeddings(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: config_and_inputs[0].position_embedding_type = type self.model_tester.create_and_check_model(*config_and_inputs) @parameterized.expand([("linear",), ("dynamic",)]) def test_model_rope_scaling(self, scaling_type): config, _ = self.model_tester.prepare_config_and_inputs_for_common() short_input = ids_tensor([1, 10], config.vocab_size) long_input = ids_tensor([1, int(config.max_position_embeddings * 1.5)], config.vocab_size) set_seed(42) # Fixed seed at init time so the two models get the same random weights original_model = OlmoeModel(config) original_model.to(torch_device) original_model.eval() original_short_output = original_model(short_input).last_hidden_state original_long_output = original_model(long_input).last_hidden_state set_seed(42) # Fixed seed at init time so the two models get the same random weights config.rope_scaling = {"type": scaling_type, "factor": 10.0} scaled_model = OlmoeModel(config) scaled_model.to(torch_device) scaled_model.eval() scaled_short_output = scaled_model(short_input).last_hidden_state scaled_long_output = scaled_model(long_input).last_hidden_state # Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original # maximum sequence length, so the outputs for the short input should match. if scaling_type == "dynamic": torch.testing.assert_close(original_short_output, scaled_short_output, rtol=1e-5, atol=1e-5) else: self.assertFalse(torch.allclose(original_short_output, scaled_short_output, atol=1e-5)) # The output should be different for long inputs self.assertFalse(torch.allclose(original_long_output, scaled_long_output, atol=1e-5)) @require_torch class OlmoeIntegrationTest(unittest.TestCase): @slow def test_model_7b_logits(self): input_ids = [[1, 306, 4658, 278, 6593, 310, 2834, 338]] model = OlmoeForCausalLM.from_pretrained("allenai/OLMoE-1B-7B-0924", device_map="auto") out = model(torch.tensor(input_ids)).logits.float() # Expected mean on dim = -1 EXPECTED_MEAN = torch.tensor([[-1.3814, -3.4450, -2.2990, -1.9542, -2.4387, -2.7941, -2.9312, -2.8309]]) torch.testing.assert_close(out.mean(-1), EXPECTED_MEAN, rtol=1e-2, atol=1e-2) # slicing logits[0, 0, 0:30] EXPECTED_SLICE = torch.tensor([-2.3874, -2.4076, -2.4995, 4.2278, 1.4004, -0.0252, 0.4189, -2.7560, 0.3531, 1.6678, -0.7941, -1.1818, -0.2920, 0.7131, -1.4173, 1.6723, 0.5406, 0.1345, -0.1800, 0.2304, 1.2791, 0.7489, 0.6341, -0.0151, -1.3693, -1.2532, -2.3921, 0.7376, 1.6876, 0.5483]) # fmt: skip torch.testing.assert_close(out[0, 0, :30], EXPECTED_SLICE, rtol=1e-2, atol=1e-2) @slow def test_model_7b_greedy_generation(self): EXPECTED_TEXT_COMPLETION = """Simply put, the theory of relativity states that \nthe speed of light is the same for all observers, no matter \nhow fast they are moving. This is a very counter-intuitive \nconcept, and it took Einstein a long time to come up with \nthe theory. The theory of relativity is based on two \npostulates""" prompt = "Simply put, the theory of relativity states that " tokenizer = AutoTokenizer.from_pretrained("allenai/OLMoE-1B-7B-0924", device_map="auto") input_ids = tokenizer.encode(prompt, return_tensors="pt") model = OlmoeForCausalLM.from_pretrained("allenai/OLMoE-1B-7B-0924", device_map="auto") # greedy generation outputs generated_ids = model.generate(input_ids, max_new_tokens=64, top_p=None, temperature=1, do_sample=False) text = tokenizer.decode(generated_ids[0], skip_special_tokens=True) self.assertEqual(EXPECTED_TEXT_COMPLETION, text) @require_tokenizers def test_fast_special_tokens(self): fast_tokenizer = GPTNeoXTokenizerFast.from_pretrained("allenai/OLMoE-1B-7B-0924") original_add_eos_token = fast_tokenizer.add_eos_token fast_tokenizer.add_eos_token = False fast = fast_tokenizer.encode("A sample test") self.assertEqual(fast, [34, 3410, 1071]) fast_tokenizer.add_eos_token = True fast = fast_tokenizer.encode("A sample test") self.assertEqual(fast, [34, 3410, 1071, 50279]) fast_tokenizer.add_eos_token = original_add_eos_token @require_tokenizers def test_simple_encode_decode(self): rust_tokenizer = GPTNeoXTokenizerFast.from_pretrained("allenai/OLMoE-1B-7B-0924") self.assertEqual(rust_tokenizer.encode("This is a test"), [1552, 310, 247, 1071]) self.assertEqual(rust_tokenizer.decode([1552, 310, 247, 1071], skip_special_tokens=True), "This is a test") # bytefallback showcase self.assertEqual(rust_tokenizer.encode("生活的真谛是"), [20025, 46549, 5225, 48561, 33656, 238, 12105]) # fmt: skip self.assertEqual( rust_tokenizer.decode([20025, 46549, 5225, 48561, 33656, 238, 12105], skip_special_tokens=True), "生活的真谛是", ) # Inner spaces showcase self.assertEqual(rust_tokenizer.encode("Hi Hello"), [12764, 50276, 12092]) self.assertEqual(rust_tokenizer.decode([12764, 50276, 12092], skip_special_tokens=True), "Hi Hello") self.assertEqual(rust_tokenizer.encode("Hi Hello"), [12764, 50275, 12092]) self.assertEqual(rust_tokenizer.decode([12764, 50275, 12092], skip_special_tokens=True), "Hi Hello") self.assertEqual(rust_tokenizer.encode(""), []) self.assertEqual(rust_tokenizer.encode(" "), [209]) self.assertEqual(rust_tokenizer.encode(" "), [50276]) self.assertEqual(rust_tokenizer.encode(" Hello"), [24387])
transformers/tests/models/olmoe/test_modeling_olmoe.py/0
{ "file_path": "transformers/tests/models/olmoe/test_modeling_olmoe.py", "repo_id": "transformers", "token_count": 6000 }
582
# coding=utf-8 # Copyright 2025 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import shutil import tempfile import unittest from transformers.testing_utils import require_av, require_vision from transformers.utils import is_vision_available from ...test_processing_common import ProcessorTesterMixin if is_vision_available(): from transformers import ( AutoProcessor, Ovis2ImageProcessor, Ovis2Processor, Qwen2TokenizerFast, ) @require_vision class Ovis2ProcessorTest(ProcessorTesterMixin, unittest.TestCase): processor_class = Ovis2Processor def setUp(self): self.tmpdirname = tempfile.mkdtemp() image_processor = Ovis2ImageProcessor() tokenizer = Qwen2TokenizerFast.from_pretrained("thisisiron/Ovis2-1B-hf") processor_kwargs = self.prepare_processor_dict() processor = Ovis2Processor(image_processor=image_processor, tokenizer=tokenizer, **processor_kwargs) processor.save_pretrained(self.tmpdirname) def get_tokenizer(self, **kwargs): return AutoProcessor.from_pretrained(self.tmpdirname, **kwargs).tokenizer def get_image_processor(self, **kwargs): return AutoProcessor.from_pretrained(self.tmpdirname, **kwargs).image_processor def prepare_processor_dict(self): return { "chat_template": "<|im_start|>system\nYou are a helpful assistant.<|im_end|>\n{% for message in messages %}{{'<|im_start|>' + message['role'] + '\n'}}{% if message['content'] is string %}{{ message['content'] }}{% else %}{% for content in message['content'] %}{% if content['type'] == 'image' %}{{ '<image>\n' }}{% elif content['type'] == 'text' %}{{ content['text'] }}{% endif %}{% endfor %}{% endif %}{{'<|im_end|>\n'}}{% endfor %}{% if add_generation_prompt %}{{'<|im_start|>assistant\n' }}{% endif %}", } # fmt: skip def test_processor_to_json_string(self): processor = self.get_processor() obj = json.loads(processor.to_json_string()) for key, value in self.prepare_processor_dict().items(): # chat_tempalate are tested as a separate test because they are saved in separate files if key != "chat_template": self.assertEqual(obj[key], value) self.assertEqual(getattr(processor, key, None), value) def test_chat_template_is_saved(self): processor_loaded = self.processor_class.from_pretrained(self.tmpdirname) processor_dict_loaded = json.loads(processor_loaded.to_json_string()) # chat templates aren't serialized to json in processors self.assertFalse("chat_template" in processor_dict_loaded) # they have to be saved as separate file and loaded back from that file # so we check if the same template is loaded processor_dict = self.prepare_processor_dict() self.assertTrue(processor_loaded.chat_template == processor_dict.get("chat_template", None)) def tearDown(self): shutil.rmtree(self.tmpdirname) def test_chat_template(self): processor = AutoProcessor.from_pretrained("thisisiron/Ovis2-1B-hf") expected_prompt = "<|im_start|>system\nYou are a helpful assistant.<|im_end|>\n<|im_start|>user\n<image>\nWhat is shown in this image?<|im_end|>\n<|im_start|>assistant\n" messages = [ { "role": "user", "content": [ {"type": "image"}, {"type": "text", "text": "What is shown in this image?"}, ], }, ] formatted_prompt = processor.apply_chat_template(messages, add_generation_prompt=True) self.assertEqual(expected_prompt, formatted_prompt) @require_av def test_chat_template_dict(self): processor = AutoProcessor.from_pretrained("thisisiron/Ovis2-1B-hf") messages = [ { "role": "user", "content": [ {"type": "image"}, {"type": "text", "text": "What is shown in this image?"}, ], }, ] formatted_prompt_tokenized = processor.apply_chat_template(messages, add_generation_prompt=True, tokenize=True) expected_output = [[151644, 8948, 198, 2610, 525, 264, 10950, 17847, 13, 151645, 198, 151644, 872, 198, 27, 1805, 397, 3838, 374, 6839, 304, 419, 2168, 30, 151645, 198, 151644, 77091, 198]] # fmt: skip self.assertListEqual(expected_output, formatted_prompt_tokenized) out_dict = processor.apply_chat_template(messages, add_generation_prompt=True, tokenize=True, return_dict=True) self.assertListEqual(list(out_dict.keys()), ["input_ids", "attention_mask"])
transformers/tests/models/ovis2/test_processor_ovis2.py/0
{ "file_path": "transformers/tests/models/ovis2/test_processor_ovis2.py", "repo_id": "transformers", "token_count": 2107 }
583
# Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Testing suite for the PyTorch Pix2Struct model.""" import copy import inspect import os import tempfile import unittest import numpy as np import requests from transformers import Pix2StructConfig, Pix2StructTextConfig, Pix2StructVisionConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import is_torch_available, is_vision_available from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ( ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor, random_attention_mask, ) from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ( Pix2StructForConditionalGeneration, Pix2StructProcessor, Pix2StructTextModel, Pix2StructVisionModel, ) if is_vision_available(): from PIL import Image class Pix2StructVisionModelTester: def __init__( self, parent, batch_size=12, image_size=30, patch_size=2, num_channels=3, is_training=True, hidden_size=12, patch_embed_hidden_size=12, projection_dim=32, max_patches=64, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, dropout=0.1, attention_dropout=0.1, initializer_range=1e-10, scope=None, ): self.parent = parent self.batch_size = batch_size self.image_size = image_size self.patch_embed_hidden_size = patch_embed_hidden_size self.patch_size = patch_size self.num_channels = num_channels self.is_training = is_training self.hidden_size = hidden_size self.max_patches = max_patches self.seq_length = self.max_patches self.patch_proj_dim = ((patch_size**2) * num_channels) + 2 self.projection_dim = projection_dim self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.dropout = dropout self.attention_dropout = attention_dropout self.initializer_range = initializer_range self.scope = scope def prepare_config_and_inputs(self): flattened_patches = floats_tensor([self.batch_size, self.max_patches, self.patch_proj_dim]) config = self.get_config() return config, flattened_patches def get_config(self): return Pix2StructVisionConfig( image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, projection_dim=self.projection_dim, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, dropout=self.dropout, attention_dropout=self.attention_dropout, initializer_range=self.initializer_range, patch_embed_hidden_size=self.patch_embed_hidden_size, ) def create_and_check_model(self, config, flattened_patches): model = Pix2StructVisionModel(config=config) model.to(torch_device) model.eval() with torch.no_grad(): result = model(flattened_patches) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, flattened_patches = config_and_inputs inputs_dict = { "flattened_patches": flattened_patches, "attention_mask": torch.randint(0, 2, (self.batch_size, self.max_patches)), } return config, inputs_dict @require_torch class Pix2StructVisionModelTest(ModelTesterMixin, unittest.TestCase): """ Here we also overwrite some of the tests of test_modeling_common.py, as Pix2Struct does not use input_ids, inputs_embeds, attention_mask and seq_length. """ all_model_classes = (Pix2StructVisionModel,) if is_torch_available() else () fx_compatible = False test_pruning = False test_resize_embeddings = False test_head_masking = False def setUp(self): self.model_tester = Pix2StructVisionModelTester(self) self.config_tester = ConfigTester( self, config_class=Pix2StructVisionConfig, has_text_modality=False, hidden_size=37 ) def test_config(self): self.config_tester.run_common_tests() @unittest.skip(reason="Pix2StructVision does not use inputs_embeds") def test_inputs_embeds(self): pass def test_model_get_set_embeddings(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) self.assertIsInstance(model.get_input_embeddings(), (nn.Module)) x = model.get_output_embeddings() self.assertTrue(x is None or isinstance(x, nn.Linear)) def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) signature = inspect.signature(model.forward) # signature.parameters is an OrderedDict => so arg_names order is deterministic arg_names = [*signature.parameters.keys()] expected_arg_names = ["flattened_patches"] self.assertListEqual(arg_names[:1], expected_arg_names) def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) @unittest.skip(reason="Training is tested directly on `Pix2StructTextImageModelTest`") def test_training(self): pass @unittest.skip(reason="Training is tested directly on `Pix2StructTextImageModelTest`") def test_training_gradient_checkpointing(self): pass @unittest.skip( reason="This architecture seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant(self): pass @unittest.skip( reason="This architecture seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant_false(self): pass @unittest.skip(reason="Training is tested directly on `Pix2StructTextImageModelTest`") def test_retain_grad_hidden_states_attentions(self): pass @slow def test_model_from_pretrained(self): model_name = "google/pix2struct-textcaps-base" model = Pix2StructVisionModel.from_pretrained(model_name) self.assertIsNotNone(model) class Pix2StructTextModelTester: def __init__( self, parent, batch_size=12, seq_length=7, is_training=True, use_input_mask=True, use_labels=True, vocab_size=99, hidden_size=12, projection_dim=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, dropout=0.1, attention_dropout=0.1, max_position_embeddings=512, initializer_range=0.02, bos_token_id=0, scope=None, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_input_mask = use_input_mask self.use_labels = use_labels self.d_kv = hidden_size // num_attention_heads self.vocab_size = vocab_size self.hidden_size = hidden_size self.projection_dim = projection_dim self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.dropout = dropout self.attention_dropout = attention_dropout self.max_position_embeddings = max_position_embeddings self.initializer_range = initializer_range self.scope = scope self.bos_token_id = bos_token_id def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_mask = None if self.use_input_mask: input_mask = random_attention_mask([self.batch_size, self.seq_length]) if input_mask is not None: batch_size, seq_length = input_mask.shape rnd_start_indices = np.random.randint(1, seq_length - 1, size=(batch_size,)) for batch_idx, start_index in enumerate(rnd_start_indices): input_mask[batch_idx, :start_index] = 1 input_mask[batch_idx, start_index:] = 0 config = self.get_config() return config, input_ids, input_mask def get_config(self): return Pix2StructTextConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, projection_dim=self.projection_dim, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, dropout=self.dropout, attention_dropout=self.attention_dropout, max_position_embeddings=self.max_position_embeddings, initializer_range=self.initializer_range, bos_token_id=self.bos_token_id, d_kv=self.d_kv, ) def create_and_check_model(self, config, input_ids, input_mask): model = Pix2StructTextModel(config=config) model.to(torch_device) model.eval() with torch.no_grad(): result = model(input_ids, attention_mask=input_mask) result = model(input_ids) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, input_ids, input_mask = config_and_inputs inputs_dict = {"input_ids": input_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class Pix2StructTextModelTest(ModelTesterMixin, unittest.TestCase): all_model_classes = (Pix2StructTextModel,) if is_torch_available() else () fx_compatible = False test_pruning = False test_head_masking = False def setUp(self): self.model_tester = Pix2StructTextModelTester(self) self.config_tester = ConfigTester(self, config_class=Pix2StructTextConfig, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) @unittest.skip(reason="Training is tested directly on `Pix2StructTextImageModelTest`") def test_training(self): pass @unittest.skip(reason="Training is tested directly on `Pix2StructTextImageModelTest`") def test_training_gradient_checkpointing(self): pass @unittest.skip( reason="This architecture seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant(self): pass @unittest.skip( reason="This architecture seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124" ) def test_training_gradient_checkpointing_use_reentrant_false(self): pass @unittest.skip(reason="Pix2Struct does not use inputs_embeds") def test_inputs_embeds(self): pass @slow def test_model_from_pretrained(self): model_name = "google/pix2struct-textcaps-base" model = Pix2StructTextModel.from_pretrained(model_name) self.assertIsNotNone(model) class Pix2StructModelTester: def __init__(self, parent, text_kwargs=None, vision_kwargs=None, is_training=True): if text_kwargs is None: text_kwargs = {} if vision_kwargs is None: vision_kwargs = {} self.parent = parent self.text_model_tester = Pix2StructTextModelTester(parent, **text_kwargs) self.vision_model_tester = Pix2StructVisionModelTester(parent, **vision_kwargs) self.batch_size = self.text_model_tester.batch_size # need bs for batching_equivalence test self.seq_length = self.text_model_tester.seq_length # need seq_length for common tests self.is_training = is_training self.max_patches = self.vision_model_tester.max_patches def prepare_config_and_inputs(self): text_config, input_ids, attention_mask = self.text_model_tester.prepare_config_and_inputs() vision_config, flattened_patches = self.vision_model_tester.prepare_config_and_inputs() config = self.get_config(text_config, vision_config) return config, input_ids, attention_mask, flattened_patches def get_config(self, text_config, vision_config): return Pix2StructConfig( text_config=self.text_model_tester.get_config().to_dict(), vision_config=self.vision_model_tester.get_config().to_dict(), projection_dim=64, ) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, input_ids, decoder_attention_mask, flattened_patches = config_and_inputs attention_mask = (flattened_patches.sum(dim=-1) != 0).float() inputs_dict = { "decoder_input_ids": input_ids, "labels": input_ids, "decoder_attention_mask": decoder_attention_mask, "flattened_patches": flattened_patches, "attention_mask": attention_mask, } return config, inputs_dict @require_torch class Pix2StructModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (Pix2StructForConditionalGeneration,) if is_torch_available() else () pipeline_model_mapping = ( {"image-to-text": Pix2StructForConditionalGeneration, "image-text-to-text": Pix2StructForConditionalGeneration} if is_torch_available() else {} ) fx_compatible = False test_head_masking = False test_pruning = False test_resize_embeddings = True test_attention_outputs = False test_torchscript = False def setUp(self): self.model_tester = Pix2StructModelTester(self) def test_model(self): config, input_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config).to(torch_device) output = model(**input_dict) self.assertEqual( output[1].shape, ( self.model_tester.vision_model_tester.batch_size, self.model_tester.text_model_tester.seq_length, self.model_tester.text_model_tester.vocab_size, ), ) def test_generative_model(self): config, input_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_generative_model_classes: model = model_class(config).eval().to(torch_device) output = model.generate(**input_dict, use_cache=False, min_new_tokens=10, max_new_tokens=10) output_use_cache = model.generate(**input_dict, use_cache=True, min_new_tokens=10, max_new_tokens=10) torch.testing.assert_close(output, output_use_cache) @unittest.skip(reason="Hidden_states is tested in individual model tests") def test_hidden_states_output(self): pass @unittest.skip(reason="Inputs_embeds is tested in individual model tests") def test_inputs_embeds(self): pass @unittest.skip(reason="Retain_grad is tested in individual model tests") def test_retain_grad_hidden_states_attentions(self): pass @unittest.skip(reason="Pix2StructModel does not have input/output embeddings") def test_model_get_set_embeddings(self): pass def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) signature = inspect.signature(model.forward) # signature.parameters is an OrderedDict => so arg_names order is deterministic arg_names = [*signature.parameters.keys()] expected_arg_names = [ "flattened_patches", "attention_mask", "decoder_input_ids", "decoder_attention_mask", "head_mask", "decoder_head_mask", "cross_attn_head_mask", "encoder_outputs", "past_key_values", "labels", "decoder_inputs_embeds", "use_cache", ] self.assertListEqual(arg_names[: len(expected_arg_names)], expected_arg_names) def test_training(self): if not self.model_tester.is_training: self.skipTest(reason="model_tester.is_training is set to False") for model_class in self.all_model_classes[:-1]: config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True model = model_class(config) model.to(torch_device) model.train() inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) # hardcode labels to be the same as input_ids inputs["labels"] = inputs["input_ids"] loss = model(**inputs).loss loss.backward() def test_training_gradient_checkpointing(self): if not self.model_tester.is_training: self.skipTest(reason="model_tester.is_training is set to False") for model_class in self.all_model_classes[:-1]: config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.use_cache = False config.return_dict = True model = model_class(config) model.to(torch_device) model.gradient_checkpointing_enable() model.train() inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) # hardcode labels to be the same as input_ids inputs["labels"] = inputs["input_ids"] loss = model(**inputs).loss loss.backward() # override as the `logit_scale` parameter initialization is different for Pix2Struct def test_initialization(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() configs_no_init = _config_zero_init(config) for model_class in self.all_model_classes: model = model_class(config=configs_no_init) for name, param in model.named_parameters(): if param.requires_grad: # check if `logit_scale` is initialized as per the original implementation if name == "logit_scale": self.assertAlmostEqual( param.data.item(), np.log(1 / 0.07), delta=1e-3, msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) else: # See PR #38607 (to avoid flakiness) data = torch.flatten(param.data) n_elements = torch.numel(data) # skip 2.5% of elements on each side to avoid issues caused by `nn.init.trunc_normal_` described in # https://github.com/huggingface/transformers/pull/27906#issuecomment-1846951332 n_elements_to_skip_on_each_side = int(n_elements * 0.025) data_to_check = torch.sort(data).values if n_elements_to_skip_on_each_side > 0: data_to_check = data_to_check[ n_elements_to_skip_on_each_side:-n_elements_to_skip_on_each_side ] self.assertIn( ((data_to_check.mean() * 1e9).round() / 1e9).item(), [0.0, 1.0], msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) # overwrite because `vocab_size` is not an attribute of `Pix2StructConfig` but rather `Pix2StructTextConfig` def test_resize_tokens_embeddings(self): original_config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() if not self.test_resize_embeddings: self.skipTest(reason="test_resize_embeddings is set to False") for model_class in self.all_model_classes: config = copy.deepcopy(original_config) model = model_class(config) model.to(torch_device) if self.model_tester.is_training is False: model.eval() model_vocab_size = config.text_config.vocab_size # Retrieve the embeddings and clone theme model_embed = model.resize_token_embeddings(model_vocab_size) cloned_embeddings = model_embed.weight.clone() # Check that resizing the token embeddings with a larger vocab size increases the model's vocab size model_embed = model.resize_token_embeddings(model_vocab_size + 10) self.assertEqual(model.config.text_config.vocab_size, model_vocab_size + 10) # Check that it actually resizes the embeddings matrix self.assertEqual(model_embed.weight.shape[0], cloned_embeddings.shape[0] + 10) # Check that the model can still do a forward pass successfully (every parameter should be resized) model(**self._prepare_for_class(inputs_dict, model_class)) # Check that resizing the token embeddings with a smaller vocab size decreases the model's vocab size model_embed = model.resize_token_embeddings(model_vocab_size - 15) self.assertEqual(model.config.text_config.vocab_size, model_vocab_size - 15) # Check that it actually resizes the embeddings matrix self.assertEqual(model_embed.weight.shape[0], cloned_embeddings.shape[0] - 15) # Check that the model can still do a forward pass successfully (every parameter should be resized) # Decoder input ids should be clamped to the maximum size of the vocabulary if "decoder_input_ids" in inputs_dict: inputs_dict["decoder_input_ids"].clamp_(max=model_vocab_size - 15 - 1) model(**self._prepare_for_class(inputs_dict, model_class)) # Check that adding and removing tokens has not modified the first part of the embedding matrix. models_equal = True for p1, p2 in zip(cloned_embeddings, model_embed.weight): if p1.data.ne(p2.data).sum() > 0: models_equal = False self.assertTrue(models_equal) # overwrite because `vocab_size` is not an attribute of `Pix2StructConfig` but rather `Pix2StructTextConfig` def test_resize_embeddings_untied(self): original_config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() if not self.test_resize_embeddings: self.skipTest(reason="test_resize_embeddings is set to False") original_config.tie_word_embeddings = False # if model cannot untied embeddings -> leave test if original_config.tie_word_embeddings: self.skipTest(reason="Model cannot untie embeddings") for model_class in self.all_model_classes: config = copy.deepcopy(original_config) model = model_class(config).to(torch_device) # if no output embeddings -> leave test if model.get_output_embeddings() is None: continue # Check that resizing the token embeddings with a larger vocab size increases the model's vocab size model_vocab_size = config.text_config.vocab_size model.resize_token_embeddings(model_vocab_size + 10) self.assertEqual(model.config.text_config.vocab_size, model_vocab_size + 10) output_embeds = model.get_output_embeddings() self.assertEqual(output_embeds.weight.shape[0], model_vocab_size + 10) # Check bias if present if output_embeds.bias is not None: self.assertEqual(output_embeds.bias.shape[0], model_vocab_size + 10) # Check that the model can still do a forward pass successfully (every parameter should be resized) model(**self._prepare_for_class(inputs_dict, model_class)) # Check that resizing the token embeddings with a smaller vocab size decreases the model's vocab size model.resize_token_embeddings(model_vocab_size - 15) self.assertEqual(model.config.text_config.vocab_size, model_vocab_size - 15) # Check that it actually resizes the embeddings matrix output_embeds = model.get_output_embeddings() self.assertEqual(output_embeds.weight.shape[0], model_vocab_size - 15) # Check bias if present if output_embeds.bias is not None: self.assertEqual(output_embeds.bias.shape[0], model_vocab_size - 15) # Check that the model can still do a forward pass successfully (every parameter should be resized) # Decoder input ids should be clamped to the maximum size of the vocabulary if "decoder_input_ids" in inputs_dict: inputs_dict["decoder_input_ids"].clamp_(max=model_vocab_size - 15 - 1) # Check that the model can still do a forward pass successfully (every parameter should be resized) model(**self._prepare_for_class(inputs_dict, model_class)) def _create_and_check_torchscript(self, config, inputs_dict): if not self.test_torchscript: self.skipTest(reason="test_torchscript is set to False") configs_no_init = _config_zero_init(config) # To be sure we have no Nan configs_no_init.torchscript = True configs_no_init.return_dict = False for model_class in self.all_model_classes: model = model_class(config=configs_no_init) model.to(torch_device) model.eval() try: input_ids = inputs_dict["input_ids"] flattened_patches = inputs_dict["flattened_patches"] # Pix2Struct needs flattened_patches traced_model = torch.jit.trace(model, (input_ids, flattened_patches)) except RuntimeError: self.fail("Couldn't trace module.") with tempfile.TemporaryDirectory() as tmp_dir_name: pt_file_name = os.path.join(tmp_dir_name, "traced_model.pt") try: torch.jit.save(traced_model, pt_file_name) except Exception: self.fail("Couldn't save module.") try: loaded_model = torch.jit.load(pt_file_name) except Exception: self.fail("Couldn't load module.") model.to(torch_device) model.eval() loaded_model.to(torch_device) loaded_model.eval() model_state_dict = model.state_dict() loaded_model_state_dict = loaded_model.state_dict() non_persistent_buffers = {} for key in loaded_model_state_dict: if key not in model_state_dict: non_persistent_buffers[key] = loaded_model_state_dict[key] loaded_model_state_dict = { key: value for key, value in loaded_model_state_dict.items() if key not in non_persistent_buffers } self.assertEqual(set(model_state_dict.keys()), set(loaded_model_state_dict.keys())) model_buffers = list(model.buffers()) for non_persistent_buffer in non_persistent_buffers.values(): found_buffer = False for i, model_buffer in enumerate(model_buffers): if torch.equal(non_persistent_buffer, model_buffer): found_buffer = True break self.assertTrue(found_buffer) model_buffers.pop(i) models_equal = True for layer_name, p1 in model_state_dict.items(): p2 = loaded_model_state_dict[layer_name] if p1.data.ne(p2.data).sum() > 0: models_equal = False self.assertTrue(models_equal) def test_load_vision_text_config(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() # Save Pix2StructConfig and check if we can load Pix2StructVisionConfig from it with tempfile.TemporaryDirectory() as tmp_dir_name: config.save_pretrained(tmp_dir_name) vision_config = Pix2StructVisionConfig.from_pretrained(tmp_dir_name) self.assertDictEqual(config.vision_config.to_dict(), vision_config.to_dict()) # Save Pix2StructConfig and check if we can load Pix2StructTextConfig from it with tempfile.TemporaryDirectory() as tmp_dir_name: config.save_pretrained(tmp_dir_name) text_config = Pix2StructTextConfig.from_pretrained(tmp_dir_name) self.assertDictEqual(config.text_config.to_dict(), text_config.to_dict()) def _check_encoder_attention_for_generate(self, attentions, batch_size, config, prompt_length): # overwrite because # pix2struct seq length depends on image inputs prompt_length = self.model_tester.max_patches encoder_expected_shape = (batch_size, config.num_attention_heads, prompt_length, prompt_length) self.assertIsInstance(attentions, tuple) self.assertListEqual( [layer_attentions.shape for layer_attentions in attentions], [encoder_expected_shape] * len(attentions), ) def _check_encoder_hidden_states_for_generate(self, hidden_states, batch_size, config, prompt_length): # overwrite because # pix2struct seq length depends on image inputs prompt_length = self.model_tester.max_patches encoder_expected_shape = (batch_size, prompt_length, config.hidden_size) self.assertIsInstance(hidden_states, tuple) self.assertListEqual( [layer_hidden_states.shape for layer_hidden_states in hidden_states], [encoder_expected_shape] * len(hidden_states), ) # We will verify our results on an image of a stop sign def prepare_img(): url = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg" im = Image.open(requests.get(url, stream=True).raw) return im @require_vision @require_torch @slow class Pix2StructIntegrationTest(unittest.TestCase): def test_inference_image_captioning(self): model = Pix2StructForConditionalGeneration.from_pretrained("google/pix2struct-textcaps-base").to(torch_device) processor = Pix2StructProcessor.from_pretrained("google/pix2struct-textcaps-base") image = prepare_img() # image only inputs = processor(images=image, return_tensors="pt").to(torch_device) predictions = model.generate(**inputs) self.assertEqual( processor.decode(predictions[0], skip_special_tokens=True), "A stop sign is on a street corner." ) def test_batched_inference_image_captioning(self): model = Pix2StructForConditionalGeneration.from_pretrained("google/pix2struct-textcaps-base").to(torch_device) processor = Pix2StructProcessor.from_pretrained("google/pix2struct-textcaps-base") image_1 = prepare_img() second_url = ( "https://www.connollycove.com/wp-content/uploads/2019/06/temple-bar-dublin-world-famous-irish-pub.jpg" ) image_2 = Image.open(requests.get(second_url, stream=True).raw) # image only inputs = processor(images=[image_1, image_2], return_tensors="pt").to(torch_device) predictions = model.generate(**inputs) self.assertEqual( processor.decode(predictions[0], skip_special_tokens=True), "A stop sign is on a street corner." ) self.assertEqual( processor.decode(predictions[1], skip_special_tokens=True), "A row of books including The Temple Bar and Guiness.", ) def test_batched_inference_image_captioning_conditioned(self): model = Pix2StructForConditionalGeneration.from_pretrained("google/pix2struct-textcaps-base").to(torch_device) processor = Pix2StructProcessor.from_pretrained("google/pix2struct-textcaps-base") image_1 = prepare_img() second_url = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/temple-bar-dublin-world-famous-irish-pub.jpg" image_2 = Image.open(requests.get(second_url, stream=True).raw) texts = ["A picture of", "An photography of"] # image only inputs = processor(images=[image_1, image_2], text=texts, return_tensors="pt", add_special_tokens=False).to( torch_device ) predictions = model.generate(**inputs) self.assertEqual( processor.decode(predictions[0], skip_special_tokens=True), "A picture of a stop sign with a red stop sign", ) self.assertEqual( processor.decode(predictions[1], skip_special_tokens=True), "An photography of the Temple Bar and other places in the city.", ) def test_vqa_model(self): model_id = "google/pix2struct-ai2d-base" image_url = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/ai2d-demo.jpg" image = Image.open(requests.get(image_url, stream=True).raw) model = Pix2StructForConditionalGeneration.from_pretrained(model_id, dtype=torch.bfloat16).to(torch_device) processor = Pix2StructProcessor.from_pretrained(model_id) # image only text = "What does the label 15 represent? (1) lava (2) core (3) tunnel (4) ash cloud" inputs = processor(images=image, return_tensors="pt", text=text).to(torch_device, torch.bfloat16) predictions = model.generate(**inputs) self.assertEqual(processor.decode(predictions[0], skip_special_tokens=True), "ash cloud") def test_vqa_model_batched(self): model_id = "google/pix2struct-ai2d-base" image_urls = [ "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/ai2d-demo.jpg", "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/ai2d-demo-2.png", ] images = [Image.open(requests.get(image_url, stream=True).raw) for image_url in image_urls] texts = [ "What does the label 15 represent? (1) lava (2) core (3) tunnel (4) ash cloud", "What is the producer in the diagram? (1) Phytoplankton (2) Zooplankton (3) Large fish (4) Small fish", ] model = Pix2StructForConditionalGeneration.from_pretrained(model_id, dtype=torch.bfloat16).to(torch_device) processor = Pix2StructProcessor.from_pretrained(model_id) inputs = processor(images=images, return_tensors="pt", text=texts).to(torch_device, torch.bfloat16) predictions = model.generate(**inputs) self.assertEqual(processor.decode(predictions[0], skip_special_tokens=True), "ash cloud") self.assertEqual(processor.decode(predictions[1], skip_special_tokens=True), "Phytoplankton")
transformers/tests/models/pix2struct/test_modeling_pix2struct.py/0
{ "file_path": "transformers/tests/models/pix2struct/test_modeling_pix2struct.py", "repo_id": "transformers", "token_count": 16393 }
584
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Please note that Pop2PianoTokenizer is too far from our usual tokenizers and thus cannot use the TokenizerTesterMixin class. """ import os import pickle import shutil import tempfile import unittest from transformers.feature_extraction_utils import BatchFeature from transformers.testing_utils import ( is_pretty_midi_available, is_torch_available, require_pretty_midi, require_torch, ) from transformers.tokenization_utils import BatchEncoding if is_torch_available(): import torch requirements_available = is_torch_available() and is_pretty_midi_available() if requirements_available: import pretty_midi from transformers import Pop2PianoTokenizer @require_torch @require_pretty_midi class Pop2PianoTokenizerTest(unittest.TestCase): def setUp(self): super().setUp() self.tokenizer = Pop2PianoTokenizer.from_pretrained("sweetcocoa/pop2piano") def get_input_notes(self): notes = [ [ pretty_midi.Note(start=0.441179, end=2.159456, pitch=70, velocity=77), pretty_midi.Note(start=0.673379, end=0.905578, pitch=73, velocity=77), pretty_midi.Note(start=0.905578, end=2.159456, pitch=73, velocity=77), pretty_midi.Note(start=1.114558, end=2.159456, pitch=78, velocity=77), pretty_midi.Note(start=1.323537, end=1.532517, pitch=80, velocity=77), ], [ pretty_midi.Note(start=0.441179, end=2.159456, pitch=70, velocity=77), ], ] return notes def test_call(self): notes = self.get_input_notes() output = self.tokenizer( notes, return_tensors="pt", padding="max_length", truncation=True, max_length=10, return_attention_mask=True, ) # check the output type self.assertTrue(isinstance(output, BatchEncoding)) # check the values expected_output_token_ids = torch.tensor( [[134, 133, 74, 135, 77, 132, 77, 133, 77, 82], [134, 133, 74, 136, 132, 74, 134, 134, 134, 134]] ) expected_output_attention_mask = torch.tensor([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 0, 0, 0, 0]]) torch.testing.assert_close(output["token_ids"], expected_output_token_ids, rtol=1e-4, atol=1e-4) torch.testing.assert_close(output["attention_mask"], expected_output_attention_mask, rtol=1e-4, atol=1e-4) def test_batch_decode(self): # test batch decode with model, feature-extractor outputs(beatsteps, extrapolated_beatstep) # Please note that this test does not test the accuracy of the outputs, instead it is designed to make sure that # the tokenizer's batch_decode can deal with attention_mask in feature-extractor outputs. For the accuracy check # please see the `test_batch_decode_outputs` test. model_output = torch.concatenate( [ torch.randint(size=[120, 96], low=0, high=70, dtype=torch.long), torch.zeros(size=[1, 96], dtype=torch.long), torch.randint(size=[50, 96], low=0, high=40, dtype=torch.long), torch.zeros(size=[1, 96], dtype=torch.long), ], axis=0, ) input_features = BatchFeature( { "beatsteps": torch.ones([2, 955]), "extrapolated_beatstep": torch.ones([2, 1000]), "attention_mask": torch.concatenate( [ torch.ones([120, 96], dtype=torch.long), torch.zeros([1, 96], dtype=torch.long), torch.ones([50, 96], dtype=torch.long), torch.zeros([1, 96], dtype=torch.long), ], axis=0, ), "attention_mask_beatsteps": torch.ones([2, 955]), "attention_mask_extrapolated_beatstep": torch.ones([2, 1000]), } ) output = self.tokenizer.batch_decode(token_ids=model_output, feature_extractor_output=input_features)[ "pretty_midi_objects" ] # check length self.assertTrue(len(output) == 2) # check object type self.assertTrue(isinstance(output[0], pretty_midi.pretty_midi.PrettyMIDI)) self.assertTrue(isinstance(output[1], pretty_midi.pretty_midi.PrettyMIDI)) def test_batch_decode_outputs(self): # test batch decode with model, feature-extractor outputs(beatsteps, extrapolated_beatstep) # Please note that this test tests the accuracy of the outputs of the tokenizer's `batch_decode` method. model_output = torch.tensor( [ [134, 133, 74, 135, 77, 82, 84, 136, 132, 74, 77, 82, 84], [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0], ] ) input_features = BatchEncoding( { "beatsteps": torch.tensor([[0.0697, 0.1103, 0.1509, 0.1916]]), "extrapolated_beatstep": torch.tensor([[0.0000, 0.0406, 0.0813, 0.1219]]), } ) output = self.tokenizer.batch_decode(token_ids=model_output, feature_extractor_output=input_features) # check outputs self.assertEqual(len(output["notes"]), 4) predicted_start_timings, predicted_end_timings = [], [] for i in output["notes"]: predicted_start_timings.append(i.start) predicted_end_timings.append(i.end) # Checking note start timings expected_start_timings = torch.tensor( [ 0.069700, 0.110300, 0.110300, 0.110300, ] ) predicted_start_timings = torch.tensor(predicted_start_timings) torch.testing.assert_close(expected_start_timings, predicted_start_timings, rtol=1e-4, atol=1e-4) # Checking note end timings expected_end_timings = torch.tensor( [ 0.191600, 0.191600, 0.191600, 0.191600, ] ) predicted_end_timings = torch.tensor(predicted_end_timings) torch.testing.assert_close(expected_end_timings, predicted_end_timings, rtol=1e-4, atol=1e-4) def test_get_vocab(self): vocab_dict = self.tokenizer.get_vocab() self.assertIsInstance(vocab_dict, dict) self.assertGreaterEqual(len(self.tokenizer), len(vocab_dict)) vocab = [self.tokenizer.convert_ids_to_tokens(i) for i in range(len(self.tokenizer))] self.assertEqual(len(vocab), len(self.tokenizer)) self.tokenizer.add_tokens(["asdfasdfasdfasdf"]) vocab = [self.tokenizer.convert_ids_to_tokens(i) for i in range(len(self.tokenizer))] self.assertEqual(len(vocab), len(self.tokenizer)) def test_save_and_load_tokenizer(self): tmpdirname = tempfile.mkdtemp() sample_notes = self.get_input_notes() self.tokenizer.add_tokens(["bim", "bambam"]) additional_special_tokens = self.tokenizer.additional_special_tokens additional_special_tokens.append("new_additional_special_token") self.tokenizer.add_special_tokens({"additional_special_tokens": additional_special_tokens}) before_token_ids = self.tokenizer(sample_notes)["token_ids"] before_vocab = self.tokenizer.get_vocab() self.tokenizer.save_pretrained(tmpdirname) after_tokenizer = self.tokenizer.__class__.from_pretrained(tmpdirname) after_token_ids = after_tokenizer(sample_notes)["token_ids"] after_vocab = after_tokenizer.get_vocab() self.assertDictEqual(before_vocab, after_vocab) self.assertListEqual(before_token_ids, after_token_ids) self.assertIn("bim", after_vocab) self.assertIn("bambam", after_vocab) self.assertIn("new_additional_special_token", after_tokenizer.additional_special_tokens) shutil.rmtree(tmpdirname) def test_pickle_tokenizer(self): tmpdirname = tempfile.mkdtemp() notes = self.get_input_notes() subwords = self.tokenizer(notes)["token_ids"] filename = os.path.join(tmpdirname, "tokenizer.bin") with open(filename, "wb") as handle: pickle.dump(self.tokenizer, handle) with open(filename, "rb") as handle: tokenizer_new = pickle.load(handle) subwords_loaded = tokenizer_new(notes)["token_ids"] self.assertListEqual(subwords, subwords_loaded) def test_padding_side_in_kwargs(self): tokenizer_p = Pop2PianoTokenizer.from_pretrained("sweetcocoa/pop2piano", padding_side="left") self.assertEqual(tokenizer_p.padding_side, "left") tokenizer_p = Pop2PianoTokenizer.from_pretrained("sweetcocoa/pop2piano", padding_side="right") self.assertEqual(tokenizer_p.padding_side, "right") self.assertRaises( ValueError, Pop2PianoTokenizer.from_pretrained, "sweetcocoa/pop2piano", padding_side="unauthorized", ) def test_truncation_side_in_kwargs(self): tokenizer_p = Pop2PianoTokenizer.from_pretrained("sweetcocoa/pop2piano", truncation_side="left") self.assertEqual(tokenizer_p.truncation_side, "left") tokenizer_p = Pop2PianoTokenizer.from_pretrained("sweetcocoa/pop2piano", truncation_side="right") self.assertEqual(tokenizer_p.truncation_side, "right") self.assertRaises( ValueError, Pop2PianoTokenizer.from_pretrained, "sweetcocoa/pop2piano", truncation_side="unauthorized", ) def test_right_and_left_padding(self): tokenizer = self.tokenizer notes = self.get_input_notes() notes = notes[0] max_length = 20 padding_idx = tokenizer.pad_token_id # RIGHT PADDING - Check that it correctly pads when a maximum length is specified along with the padding flag set to True tokenizer.padding_side = "right" padded_notes = tokenizer(notes, padding="max_length", max_length=max_length)["token_ids"] padded_notes_length = len(padded_notes) notes_without_padding = tokenizer(notes, padding="do_not_pad")["token_ids"] padding_size = max_length - len(notes_without_padding) self.assertEqual(padded_notes_length, max_length) self.assertEqual(notes_without_padding + [padding_idx] * padding_size, padded_notes) # LEFT PADDING - Check that it correctly pads when a maximum length is specified along with the padding flag set to True tokenizer.padding_side = "left" padded_notes = tokenizer(notes, padding="max_length", max_length=max_length)["token_ids"] padded_notes_length = len(padded_notes) notes_without_padding = tokenizer(notes, padding="do_not_pad")["token_ids"] padding_size = max_length - len(notes_without_padding) self.assertEqual(padded_notes_length, max_length) self.assertEqual([padding_idx] * padding_size + notes_without_padding, padded_notes) # RIGHT & LEFT PADDING - Check that nothing is done for 'longest' and 'no_padding' notes_without_padding = tokenizer(notes)["token_ids"] tokenizer.padding_side = "right" padded_notes_right = tokenizer(notes, padding=False)["token_ids"] self.assertEqual(len(padded_notes_right), len(notes_without_padding)) self.assertEqual(padded_notes_right, notes_without_padding) tokenizer.padding_side = "left" padded_notes_left = tokenizer(notes, padding="longest")["token_ids"] self.assertEqual(len(padded_notes_left), len(notes_without_padding)) self.assertEqual(padded_notes_left, notes_without_padding) tokenizer.padding_side = "right" padded_notes_right = tokenizer(notes, padding="longest")["token_ids"] self.assertEqual(len(padded_notes_right), len(notes_without_padding)) self.assertEqual(padded_notes_right, notes_without_padding) tokenizer.padding_side = "left" padded_notes_left = tokenizer(notes, padding=False)["token_ids"] self.assertEqual(len(padded_notes_left), len(notes_without_padding)) self.assertEqual(padded_notes_left, notes_without_padding) def test_right_and_left_truncation(self): tokenizer = self.tokenizer notes = self.get_input_notes() notes = notes[0] truncation_size = 3 # RIGHT TRUNCATION - Check that it correctly truncates when a maximum length is specified along with the truncation flag set to True tokenizer.truncation_side = "right" full_encoded_notes = tokenizer(notes)["token_ids"] full_encoded_notes_length = len(full_encoded_notes) truncated_notes = tokenizer(notes, max_length=full_encoded_notes_length - truncation_size, truncation=True)[ "token_ids" ] self.assertEqual(full_encoded_notes_length, len(truncated_notes) + truncation_size) self.assertEqual(full_encoded_notes[:-truncation_size], truncated_notes) # LEFT TRUNCATION - Check that it correctly truncates when a maximum length is specified along with the truncation flag set to True tokenizer.truncation_side = "left" full_encoded_notes = tokenizer(notes)["token_ids"] full_encoded_notes_length = len(full_encoded_notes) truncated_notes = tokenizer(notes, max_length=full_encoded_notes_length - truncation_size, truncation=True)[ "token_ids" ] self.assertEqual(full_encoded_notes_length, len(truncated_notes) + truncation_size) self.assertEqual(full_encoded_notes[truncation_size:], truncated_notes) # RIGHT & LEFT TRUNCATION - Check that nothing is done for 'longest' and 'no_truncation' tokenizer.truncation_side = "right" truncated_notes_right = tokenizer(notes, truncation=True)["token_ids"] self.assertEqual(full_encoded_notes_length, len(truncated_notes_right)) self.assertEqual(full_encoded_notes, truncated_notes_right) tokenizer.truncation_side = "left" truncated_notes_left = tokenizer(notes, truncation="longest_first")["token_ids"] self.assertEqual(len(truncated_notes_left), full_encoded_notes_length) self.assertEqual(truncated_notes_left, full_encoded_notes) tokenizer.truncation_side = "right" truncated_notes_right = tokenizer(notes, truncation="longest_first")["token_ids"] self.assertEqual(len(truncated_notes_right), full_encoded_notes_length) self.assertEqual(truncated_notes_right, full_encoded_notes) tokenizer.truncation_side = "left" truncated_notes_left = tokenizer(notes, truncation=True)["token_ids"] self.assertEqual(len(truncated_notes_left), full_encoded_notes_length) self.assertEqual(truncated_notes_left, full_encoded_notes) def test_padding_to_multiple_of(self): notes = self.get_input_notes() if self.tokenizer.pad_token is None: self.skipTest(reason="No padding token.") else: normal_tokens = self.tokenizer(notes[0], padding=True, pad_to_multiple_of=8) for key, value in normal_tokens.items(): self.assertEqual(len(value) % 8, 0, f"BatchEncoding.{key} is not multiple of 8") normal_tokens = self.tokenizer(notes[0], pad_to_multiple_of=8) for key, value in normal_tokens.items(): self.assertNotEqual(len(value) % 8, 0, f"BatchEncoding.{key} is not multiple of 8") # Should also work with truncation normal_tokens = self.tokenizer(notes[0], padding=True, truncation=True, pad_to_multiple_of=8) for key, value in normal_tokens.items(): self.assertEqual(len(value) % 8, 0, f"BatchEncoding.{key} is not multiple of 8") # truncation to something which is not a multiple of pad_to_multiple_of raises an error self.assertRaises( ValueError, self.tokenizer.__call__, notes[0], padding=True, truncation=True, max_length=12, pad_to_multiple_of=8, ) def test_padding_with_attention_mask(self): if self.tokenizer.pad_token is None: self.skipTest(reason="No padding token.") if "attention_mask" not in self.tokenizer.model_input_names: self.skipTest(reason="This model does not use attention mask.") features = [ {"token_ids": [1, 2, 3, 4, 5, 6], "attention_mask": [1, 1, 1, 1, 1, 0]}, {"token_ids": [1, 2, 3], "attention_mask": [1, 1, 0]}, ] padded_features = self.tokenizer.pad(features) if self.tokenizer.padding_side == "right": self.assertListEqual(padded_features["attention_mask"], [[1, 1, 1, 1, 1, 0], [1, 1, 0, 0, 0, 0]]) else: self.assertListEqual(padded_features["attention_mask"], [[1, 1, 1, 1, 1, 0], [0, 0, 0, 1, 1, 0]])
transformers/tests/models/pop2piano/test_tokenization_pop2piano.py/0
{ "file_path": "transformers/tests/models/pop2piano/test_tokenization_pop2piano.py", "repo_id": "transformers", "token_count": 7928 }
585
# coding=utf-8 # Copyright 2025 The Qwen team, Alibaba Group and the HuggingFace Inc. team. All rights reserved. # # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Testing suite for the PyTorch Qwen2.5-Omni model.""" import tempfile import unittest from io import BytesIO from urllib.request import urlopen import librosa import pytest import requests from transformers import ( AutoProcessor, Qwen2_5OmniForConditionalGeneration, Qwen2_5OmniThinkerConfig, Qwen2_5OmniThinkerForConditionalGeneration, is_torch_available, is_vision_available, ) from transformers.testing_utils import ( Expectations, cleanup, require_flash_attn, require_torch, require_torch_gpu, slow, torch_device, ) from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ( ModelTesterMixin, floats_tensor, ids_tensor, ) if is_torch_available(): import torch if is_vision_available(): from PIL import Image class Qwen2_5OmniThinkerForConditionalGenerationTester: def __init__( self, parent, batch_size=3, feat_seq_length=30, num_channels=3, image_size=14, seq_length=39, vision_config={ "depth": 2, "embed_dim": 32, "hidden_act": "quick_gelu", "hidden_size": 32, "out_hidden_size": 32, "intermediate_size": 24, "mlp_ratio": 4, "num_heads": 4, "patch_size": 14, "spatial_merge_size": 1, "temporal_patch_size": 2, "fullatt_block_indexes": [0], "initializer_range": 0.02, }, audio_config={ "model_type": "qwen_omni_thinker_audio_encoder", "d_model": 32, "encoder_attention_heads": 4, "encoder_ffn_dim": 32, "encoder_layers": 2, "num_mel_bins": 20, "max_source_positions": 1500, "initializer_range": 0.02, "n_window": 100, "output_dim": 32, }, text_config={ "rope_scaling": {"mrope_section": [1, 1, 2], "rope_type": "default", "type": "default"}, "vocab_size": 99, "hidden_size": 32, "intermediate_size": 37, "num_hidden_layers": 4, "num_attention_heads": 4, "num_key_value_heads": 2, "hidden_act": "silu", "max_position_embeddings": 1024, "rms_norm_eps": 1e-06, "use_cache": True, "tie_word_embeddings": False, "rope_theta": 1000000.0, "use_sliding_window": False, "sliding_window": 50, "max_window_layers": 3, "attention_dropout": 0.0, "pad_token_id": 0, "initializer_range": 0.02, }, audio_token_index=1, image_token_index=2, video_token_index=3, position_id_per_seconds=25, seconds_per_chunk=2, audio_start_token_id=4, audio_end_token_id=5, user_token_id=6, vision_start_token_id=7, vision_end_token_id=8, initializer_range=0.02, ): self.parent = parent self.audio_config = audio_config self.vision_config = vision_config self.text_config = text_config self.audio_token_index = audio_token_index self.image_token_index = image_token_index self.video_token_index = video_token_index self.position_id_per_seconds = position_id_per_seconds self.seconds_per_chunk = seconds_per_chunk self.audio_start_token_id = audio_start_token_id self.audio_end_token_id = audio_end_token_id self.vision_start_token_id = vision_start_token_id self.vision_end_token_id = vision_end_token_id self.user_token_id = user_token_id self.initializer_range = initializer_range self.batch_size = batch_size self.feat_seq_length = feat_seq_length self.num_channels = num_channels self.image_size = image_size self.seq_length = seq_length self.is_training = False # Used from `self.model_tester` by common model tests self.num_hidden_layers = self.text_config["num_hidden_layers"] self.hidden_size = self.text_config["hidden_size"] self.num_attention_heads = self.text_config["num_attention_heads"] self.vocab_size = self.text_config["vocab_size"] def get_config(self): return Qwen2_5OmniThinkerConfig( audio_config=self.audio_config, vision_config=self.vision_config, text_config=self.text_config, audio_token_index=self.audio_token_index, image_token_index=self.image_token_index, video_token_index=self.video_token_index, position_id_per_seconds=self.position_id_per_seconds, seconds_per_chunk=self.seconds_per_chunk, audio_start_token_id=self.audio_start_token_id, audio_end_token_id=self.audio_end_token_id, vision_start_token_id=self.vision_start_token_id, vision_end_token_id=self.vision_end_token_id, user_token_id=self.user_token_id, initializer_range=self.initializer_range, ) def prepare_config_and_inputs(self): config = self.get_config() patch_size = config.vision_config.patch_size temporal_patch_size = config.vision_config.temporal_patch_size pixel_values = floats_tensor( [ self.batch_size * (self.image_size**2) // (patch_size**2), self.num_channels * (patch_size**2) * temporal_patch_size, ] ) pixel_grid_thw = torch.LongTensor( [[1, self.image_size / patch_size, self.image_size / patch_size]] * self.batch_size ).to(pixel_values.device) input_features_values = floats_tensor( [self.batch_size, self.audio_config["num_mel_bins"], self.feat_seq_length] ) feature_attention_mask = torch.ones([self.batch_size, self.feat_seq_length], dtype=torch.long).to(torch_device) return config, pixel_values, pixel_grid_thw, input_features_values, feature_attention_mask def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, pixel_values, pixel_grid_thw, input_features_values, feature_attention_mask = config_and_inputs input_ids = ids_tensor([self.batch_size, self.seq_length], config.get_text_config().vocab_size - 3) + 3 attention_mask = torch.ones(input_ids.shape, dtype=torch.long).to(torch_device) # Make sure no other tokens are set to special, to prevetn flakiness tokens_to_replace = torch.tensor( [ config.image_token_index, config.audio_token_index, config.audio_start_token_id, config.audio_end_token_id, config.vision_start_token_id, config.vision_end_token_id, ], device=input_ids.device, ) input_ids[torch.isin(input_ids, tokens_to_replace)] = config.text_config.pad_token_id attention_mask[:, :1] = 0 # Audio token placeholders should be wrapped in start and end token ids audio_feat_length = ((self.feat_seq_length - 1) // 2 + 1 - 2) // 2 + 1 input_ids[:, 1] = config.audio_start_token_id input_ids[:, 2 : (2 + audio_feat_length)] = config.audio_token_index input_ids[:, 2 + audio_feat_length] = config.audio_end_token_id # Image token placeholders should be wrapped in start and end token ids input_ids[:, -4:-1] = torch.tensor( [config.vision_start_token_id, config.image_token_index, config.vision_end_token_id] ) inputs_dict = { "input_features": input_features_values, "feature_attention_mask": feature_attention_mask, "input_ids": input_ids, "attention_mask": attention_mask, "image_grid_thw": pixel_grid_thw, "pixel_values": pixel_values, } return config, inputs_dict def create_and_check_qwenomnithinker_model_fp16_forward(self, config, input_ids, pixel_values, attention_mask): model = Qwen2_5OmniThinkerForConditionalGeneration(config=config) model.to(torch_device) model.eval() with torch.autocast(device_type=torch_device, dtype=torch.float16): logits = model( input_ids=input_ids, attention_mask=attention_mask, pixel_values=pixel_values.to(torch.bfloat16), return_dict=True, )["logits"] self.parent.assertFalse(torch.isnan(logits).any().item()) @require_torch class Qwen2_5OmniThinkerForConditionalGenerationModelTest(ModelTesterMixin, GenerationTesterMixin, unittest.TestCase): """ Model tester for `Qwen2_5OmniThinkerForConditionalGeneration`. """ all_model_classes = (Qwen2_5OmniThinkerForConditionalGeneration,) if is_torch_available() else () all_generative_model_classes = (Qwen2_5OmniThinkerForConditionalGeneration,) if is_torch_available() else () test_pruning = False test_head_masking = False _is_composite = True model_split_percents = [0.5, 0.9] def setUp(self): self.model_tester = Qwen2_5OmniThinkerForConditionalGenerationTester(self) self.config_tester = ConfigTester(self, config_class=Qwen2_5OmniThinkerConfig, has_text_modality=False) @unittest.skip(reason="Cpu not yet supported because in QwenOmniThinker models") def test_disk_offload_bin(self): pass @unittest.skip(reason="Disk offload bin not yet supported because in QwenOmniThinker models") def test_cpu_offload(self): pass @unittest.skip(reason="Disk offload safetensors not yet supported because in QwenOmniThinker models") def test_disk_offload_safetensors(self): pass @unittest.skip(reason="Correct missing keys not yet supported because in QwenOmniThinker models") def test_correct_missing_keys(self): pass @unittest.skip(reason="Compile not yet supported because in QwenOmniThinker models") @pytest.mark.torch_compile_test def test_sdpa_can_compile_dynamic(self): pass @unittest.skip(reason="Sdpa dispatch not yet supported because in QwenOmniThinker models") def test_sdpa_can_dispatch_on_flash(self): pass @unittest.skip(reason="QwenOmniThinker does not support output_hidden_states test") def test_model_outputs_equivalence(self): pass def test_sdpa_can_dispatch_composite_models(self): # overwrite because Qwen2 is audio+text model (not vision+text) if not self.has_attentions: self.skipTest(reason="Model architecture does not support attentions") if not self._is_composite: self.skipTest(f"{self.all_model_classes[0].__name__} does not support SDPA") for model_class in self.all_model_classes: config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() model = model_class(config) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) model_sdpa = model_class.from_pretrained(tmpdirname) model_sdpa = model_sdpa.eval().to(torch_device) text_attn = "sdpa" if model.model._supports_sdpa else "eager" audio_attn = "sdpa" if model.audio_tower._supports_sdpa else "eager" vision_attn = "sdpa" if model.visual._supports_sdpa else "eager" # `None` as it is the requested one which will be assigned to each sub-config # Sub-model will dispatch to SDPA if it can (checked below that `SDPA` layers are present) self.assertTrue(model_sdpa.config._attn_implementation == "sdpa") self.assertTrue(model.model.config._attn_implementation == text_attn) self.assertTrue(model.audio_tower.config._attn_implementation == audio_attn) self.assertTrue(model.visual.config._attn_implementation == vision_attn) model_eager = model_class.from_pretrained(tmpdirname, attn_implementation="eager") model_eager = model_eager.eval().to(torch_device) self.assertTrue(model_eager.config._attn_implementation == "eager") self.assertTrue(model_eager.model.config._attn_implementation == "eager") self.assertTrue(model_eager.audio_tower.config._attn_implementation == "eager") self.assertTrue(model_eager.visual.config._attn_implementation == "eager") for name, submodule in model_eager.named_modules(): class_name = submodule.__class__.__name__ if "SdpaAttention" in class_name or "SdpaSelfAttention" in class_name: raise ValueError("The eager model should not have SDPA attention layers") def attention_mask_padding_matches_padding_free_with_position_ids( self, attn_implementation: str, fa_kwargs: bool = False ): max_new_tokens = 30 for model_class in self.all_generative_model_classes: config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() dummy_input = inputs_dict[model_class.main_input_name] if dummy_input.dtype in [torch.float32, torch.float16]: dummy_input = dummy_input.to(torch.bfloat16) # make sure that all models have enough positions for generation if hasattr(config, "max_position_embeddings"): config.max_position_embeddings = max_new_tokens + dummy_input.shape[1] + 1 model = model_class(config) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) if 0 in inputs_dict["attention_mask"][:, -1]: inputs_dict["attention_mask"] = inputs_dict["attention_mask"].flip(1) dummy_attention_mask = inputs_dict["attention_mask"] inputs_dict["input_ids"][~dummy_attention_mask.bool()] = config.get_text_config().pad_token_id model = ( model_class.from_pretrained( tmpdirname, dtype=torch.bfloat16, attn_implementation=attn_implementation, ) .to(torch_device) .eval() ) # flatten padfree_inputs_dict = { "input_features": inputs_dict["input_features"], "feature_attention_mask": inputs_dict["feature_attention_mask"], "pixel_values": inputs_dict["pixel_values"], "image_grid_thw": inputs_dict["image_grid_thw"], "input_ids": inputs_dict["input_ids"][dummy_attention_mask.bool()].unsqueeze(0), } # add position_ids vision_position_ids, deltas = model.get_rope_index( input_ids=inputs_dict["input_ids"], image_grid_thw=inputs_dict["image_grid_thw"], attention_mask=inputs_dict["attention_mask"], audio_seqlens=torch.sum(inputs_dict["feature_attention_mask"], dim=1), ) # [3, bs, padded-seq-len] vision_padfree_positions = vision_position_ids[:, dummy_attention_mask.bool()].view( 3, -1 ) # [3, bs*padfree-len] text_padfree_positions = torch.cat( [torch.arange(length) for length in dummy_attention_mask.sum(1).tolist()] ) # [1, bs*padfree-len] text_padfree_positions = text_padfree_positions.long().unsqueeze(0).to(torch_device) padfree_inputs_dict["position_ids"] = torch.cat([text_padfree_positions, vision_padfree_positions])[ :, None, : ] if fa_kwargs: cu_seq_lens = [0] + dummy_attention_mask.sum(1).tolist() cu_seq_lens = torch.tensor(cu_seq_lens, device=torch_device) max_length = cu_seq_lens.diff().max().item() padfree_inputs_dict.update( { "cu_seq_lens_q": cu_seq_lens.cumsum(-1).to(dtype=torch.int32), "cu_seq_lens_k": cu_seq_lens.cumsum(-1).to(dtype=torch.int32), "max_length_q": max_length, "max_length_k": max_length, } ) res_padded = model(**inputs_dict, use_cache=False) res_padfree = model(**padfree_inputs_dict, use_cache=False) logits_padded = res_padded.logits[inputs_dict["attention_mask"].bool()] logits_padfree = res_padfree.logits[0] # acceptable numerical instability tol = torch.finfo(torch.bfloat16).eps torch.testing.assert_close(logits_padded, logits_padfree, rtol=tol, atol=tol) @unittest.skip("Cannot do contrastive generation, has custom `generate()`") def test_contrastive_generate(self): pass @unittest.skip("Cannot do contrastive generation, has custom `generate()`") def test_contrastive_generate_dict_outputs_use_cache(self): pass @unittest.skip("Cannot do contrastive generation, has custom `generate()`") def test_contrastive_generate_low_memory(self): pass @unittest.skip("Cannot do constraint generation, has custom `generate()`") def test_constrained_beam_search_generate_dict_output(self): pass @unittest.skip("Cannot do dola generation, has custom `generate()`") def test_dola_decoding_sample(self): pass @unittest.skip("Cannot generate from inputs embeds") def test_generate_from_inputs_embeds_with_static_cache(self): pass # TODO (joao, raushan): there are multiple standardization issues in this model that prevent this test from # passing, fix me @unittest.skip("Cannot handle 4D attention mask") @pytest.mark.torch_compile_test def test_generate_compile_model_forward_fullgraph(self): pass @unittest.skip("Cannot handle 4D attention mask") def test_generate_compilation_all_outputs(self): pass @unittest.skip("Cannot handle 4D attention mask") def test_generate_with_static_cache(self): pass @unittest.skip("Cannot handle 4D attention mask") def test_custom_4d_attention_mask(self): pass def test_get_rope_index_video_with_audio(self): image_grid_thw = torch.empty((0, 3), dtype=torch.long) # 3 * 2 * 2 = 12 video tokens video_grid_thw = torch.tensor([[3, 2, 2]], dtype=torch.long) # num_audio_tokens = ((audio_seqlen - 1) // 2 + 1 - 2) // 2 + 1 # i.e.: 300 audio_seqlen -> 75 audio tokens audio_seqlens = torch.tensor([300], dtype=torch.long) second_per_grids = torch.tensor([1.0], dtype=torch.float) use_audio_in_video = True # fmt: off expected_position_ids = torch.tensor([ [[ 0, 1, # text 2, 2, # vision_bos + audio_bos # video chunk 3, 3, 3, 3, 28, 28, 28, 28, # audio chunk 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, # video chunk 53, 53, 53, 53, # audio chunk 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 78, # audio_eos + vision_eos 79, 80, # text ]], [[ 0, 1, # text 2, 2, # vision_bos + audio_bos # video chunk 3, 3, 4, 4, 3, 3, 4, 4, # audio chunk 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, # video chunk 3, 3, 4, 4, # audio chunk 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 78, # audio_eos + vision_eos 79, 80, # text ]], [[ 0, 1, # text 2, 2, # vision_bos + audio_bos # video chunk 3, 4, 3, 4, 3, 4, 3, 4, # audio chunk 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, # video chunk 3, 4, 3, 4, # audio chunk 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 78, # audio_eos + vision_eos 79, 80, # text ]], ], dtype=torch.long) # fmt: on for model_class in self.all_model_classes: config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() input_ids = torch.tensor( [ [ 100, 101, ] + [ config.vision_start_token_id, config.audio_start_token_id, ] # 1st chunk: 8 video tokens, 50 audio tokens + [config.video_token_id] * 2 * 2 * 2 + [config.audio_token_id] * 50 + # 2nd chunk: 4 video tokens, 25 audio tokens [config.video_token_id] * 1 * 2 * 2 + [config.audio_token_id] * 25 + [ config.audio_end_token_id, config.vision_end_token_id, ] + [ 102, 103, ] ], dtype=torch.long, ) model = model_class(config) position_ids, mrope_position_deltas = model.get_rope_index( input_ids=input_ids, image_grid_thw=image_grid_thw, video_grid_thw=video_grid_thw, attention_mask=None, use_audio_in_video=use_audio_in_video, audio_seqlens=audio_seqlens, second_per_grids=second_per_grids, ) self.assertTrue(torch.equal(position_ids, expected_position_ids)) @require_torch class Qwen2_5OmniModelIntegrationTest(unittest.TestCase): def setUp(self): self.processor = AutoProcessor.from_pretrained("Qwen/Qwen2.5-Omni-7B") self.audio_url = "https://qianwen-res.oss-cn-beijing.aliyuncs.com/Qwen2-Audio/audio/glass-breaking-151256.mp3" self.audio_url_additional = ( "https://qianwen-res.oss-cn-beijing.aliyuncs.com/Qwen2-Audio/audio/f2641_0_throatclearing.wav" ) self.image_url = "https://qianwen-res.oss-accelerate-overseas.aliyuncs.com/Qwen2-VL/demo_small.jpg" self.messages = [ { "role": "user", "content": [ {"type": "audio", "audio_url": self.audio_url}, {"type": "image", "image_url": self.image_url}, {"type": "text", "text": "What's that sound and what kind of dog is this?"}, ], } ] self.raw_audio, _ = librosa.load( BytesIO(urlopen(self.audio_url).read()), sr=self.processor.feature_extractor.sampling_rate ) self.raw_audio_additional, _ = librosa.load( BytesIO(urlopen(self.audio_url_additional).read()), sr=self.processor.feature_extractor.sampling_rate ) self.raw_image = Image.open(requests.get(self.image_url, stream=True).raw) def tearDown(self): cleanup(torch_device, gc_collect=True) @slow def test_small_model_integration_test(self): model = Qwen2_5OmniForConditionalGeneration.from_pretrained( "Qwen/Qwen2.5-Omni-7B", dtype=torch.bfloat16, device_map="auto" ) text = self.processor.apply_chat_template(self.messages, tokenize=False, add_generation_prompt=True) inputs = self.processor( text=text, audio=[self.raw_audio], images=[self.raw_image], return_tensors="pt", padding=True ).to(torch.bfloat16) expected_input_ids = torch.tensor( [ 151644, 8948, 198, 2610, 525, 264, 10950, 17847, 13, 151645, 198, 151644, 872, 198, 151647, 151646, 151646, ] ) assert torch.allclose(expected_input_ids, inputs.input_ids[0][:17], atol=3e-3) expected_pixel_slice = torch.tensor( [ [0.8792, 0.8792, 0.9084], [1.1858, 1.1858, 1.2296], [1.2004, 1.2004, 1.2150], [1.4340, 1.4340, 1.4194], [1.3902, 1.4048, 1.4194], [1.5216, 1.5362, 1.5362], ], dtype=torch.bfloat16, device="cpu", ) assert torch.allclose(expected_pixel_slice, inputs.pixel_values[:6, :3], atol=3e-3) # verify generation inputs = inputs.to(torch_device) output = model.generate( **inputs, thinker_temperature=0, thinker_do_sample=False, return_audio=False, thinker_max_new_tokens=20 ) EXPECTED_DECODED_TEXT = "system\nYou are a helpful assistant.\nuser\nWhat's that sound and what kind of dog is this?\nassistant\nThe sound is glass shattering, and the dog is a Labrador Retriever." self.assertEqual( self.processor.decode(output[0], skip_special_tokens=True), EXPECTED_DECODED_TEXT, ) @slow def test_small_model_integration_test_batch(self): model = Qwen2_5OmniForConditionalGeneration.from_pretrained( "Qwen/Qwen2.5-Omni-7B", dtype=torch.bfloat16, device_map="auto" ) text = self.processor.apply_chat_template(self.messages, tokenize=False, add_generation_prompt=True) inputs = self.processor( text=[text] * 2, audio=[self.raw_audio, self.raw_audio], images=[self.raw_image, self.raw_image], return_tensors="pt", padding=True, ).to(torch_device, dtype=torch.bfloat16) output = model.generate( **inputs, thinker_temperature=0, thinker_do_sample=False, return_audio=False, thinker_max_new_tokens=20 ) EXPECTED_DECODED_TEXTS = Expectations( { ("cuda", 7) : [ "system\nYou are a helpful assistant.\nuser\nWhat's that sound and what kind of dog is this?\nassistant\nThe sound is of glass shattering, and the dog in the picture is a Labrador Retriever", "system\nYou are a helpful assistant.\nuser\nWhat's that sound and what kind of dog is this?\nassistant\nThe sound is of glass shattering, and the dog in the picture is a Labrador Retriever", ], ("cuda", 8): [ "system\nYou are a helpful assistant.\nuser\nWhat's that sound and what kind of dog is this?\nassistant\nThe sound is glass shattering, and the dog is a Labrador Retriever.", "system\nYou are a helpful assistant.\nuser\nWhat's that sound and what kind of dog is this?\nassistant\nThe sound is glass shattering, and the dog is a Labrador Retriever.", ], ("rocm", None): [ "system\nYou are a helpful assistant.\nuser\nWhat's that sound and what kind of dog is this?\nassistant\nThe sound is glass shattering, and the dog is a Labrador Retriever.", "system\nYou are a helpful assistant.\nuser\nWhat's that sound and what kind of dog is this?\nassistant\nThe sound is glass shattering, and the dog is a Labrador Retriever.", ], } ) # fmt: skip EXPECTED_DECODED_TEXT = EXPECTED_DECODED_TEXTS.get_expectation() self.assertEqual( self.processor.batch_decode(output, skip_special_tokens=True), EXPECTED_DECODED_TEXT, ) @slow def test_small_model_integration_test_multiturn(self): model = Qwen2_5OmniForConditionalGeneration.from_pretrained( "Qwen/Qwen2.5-Omni-7B", dtype=torch.bfloat16, device_map="auto" ) messages = [ self.messages[0], { "role": "assistant", "content": [ { "type": "text", "text": "The sound is glass shattering, and the dog appears to be a Labrador Retriever.", } ], }, { "role": "user", "content": [ {"type": "audio", "audio_url": self.audio_url_additional}, {"type": "text", "text": "How about this one?"}, ], }, ] text = self.processor.apply_chat_template(messages, tokenize=False, add_generation_prompt=True) inputs = self.processor( text=text, audio=[self.raw_audio, self.raw_audio_additional], images=[self.raw_image], return_tensors="pt", padding=True, ).to(torch_device, dtype=torch.bfloat16) output = model.generate( **inputs, thinker_temperature=0, thinker_do_sample=False, return_audio=False, thinker_max_new_tokens=20 ) EXPECTED_DECODED_TEXT = "system\nYou are a helpful assistant.\nuser\nWhat's that sound and what kind of dog is this?\nassistant\nThe sound is glass shattering, and the dog appears to be a Labrador Retriever.\nuser\nHow about this one?\nassistant\nThe sound is a cough." self.assertEqual( self.processor.decode(output[0], skip_special_tokens=True), EXPECTED_DECODED_TEXT, ) @slow def test_small_model_integration_test_w_audio(self): model = Qwen2_5OmniForConditionalGeneration.from_pretrained( "Qwen/Qwen2.5-Omni-7B", dtype=torch.bfloat16, device_map="auto" ) audio_url = "https://qianwen-res.oss-cn-beijing.aliyuncs.com/Qwen2-Audio/audio/guess_age_gender.wav" messages = [ { "role": "system", "content": [ { "type": "text", "text": "You are Qwen, a virtual human developed by the Qwen Team, Alibaba Group, capable of perceiving auditory and visual inputs, as well as generating text and speech.", } ], }, { "role": "user", "content": [{"type": "audio", "audio": audio_url}], }, ] audio, _ = librosa.load(BytesIO(urlopen(audio_url).read()), sr=self.processor.feature_extractor.sampling_rate) text = self.processor.apply_chat_template(messages, tokenize=False, add_generation_prompt=True) inputs = self.processor(text=text, audio=[audio], return_tensors="pt", padding=True).to( torch_device, dtype=torch.bfloat16 ) output = model.generate( **inputs, thinker_temperature=0, thinker_do_sample=False, thinker_max_new_tokens=20, talker_max_new_tokens=10, ) EXPECTED_DECODED_TEXTS = Expectations( { ("cuda", 7): "system\nYou are Qwen, a virtual human developed by the Qwen Team, Alibaba Group, capable of perceiving auditory and visual inputs, as well as generating text and speech.\nuser\n\nassistant\nWell, I can try. But it's not always that accurate. I might be able to make", ("cuda", 8): "system\nYou are Qwen, a virtual human developed by the Qwen Team, Alibaba Group, capable of perceiving auditory and visual inputs, as well as generating text and speech.\nuser\n\nassistant\nWell, I can't really guess your age and gender just from your voice. There are so many", } ) # fmt: skip EXPECTED_DECODED_TEXT = EXPECTED_DECODED_TEXTS.get_expectation() self.assertEqual( self.processor.decode(output[0][0], skip_special_tokens=True), EXPECTED_DECODED_TEXT, ) self.assertFalse(torch.isnan(output[1]).any().item()) @slow @require_flash_attn @require_torch_gpu def test_small_model_integration_test_batch_flashatt2(self): model = Qwen2_5OmniForConditionalGeneration.from_pretrained( "Qwen/Qwen2.5-Omni-7B", dtype=torch.bfloat16, attn_implementation="flash_attention_2", device_map="auto", ) text = self.processor.apply_chat_template(self.messages, tokenize=False, add_generation_prompt=True) inputs = self.processor( text=[text, text], audio=[self.raw_audio, self.raw_audio], images=[self.raw_image, self.raw_image], return_tensors="pt", padding=True, ).to(torch_device) output = model.generate(**inputs, thinker_temperature=0, thinker_do_sample=False, return_audio=False) EXPECTED_DECODED_TEXT = [ "system\nYou are a helpful assistant.\nuser\nWhat's that sound and what kind of dog is this?\nassistant\nThe sound is glass shattering, and the dog appears to be a Labrador Retriever.", "system\nYou are a helpful assistant.\nuser\nWhat's that sound and what kind of dog is this?\nassistant\nThe sound is glass shattering, and the dog appears to be a Labrador Retriever.", ] self.assertEqual( self.processor.batch_decode(output, skip_special_tokens=True), EXPECTED_DECODED_TEXT, ) self.assertEqual( self.processor.batch_decode(output, skip_special_tokens=True)[0], self.processor.batch_decode(output, skip_special_tokens=True)[1], )
transformers/tests/models/qwen2_5_omni/test_modeling_qwen2_5_omni.py/0
{ "file_path": "transformers/tests/models/qwen2_5_omni/test_modeling_qwen2_5_omni.py", "repo_id": "transformers", "token_count": 17974 }
586
# Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Testing suite for the RemBert tokenizer.""" import tempfile import unittest from tests.test_tokenization_common import AddedToken, TokenizerTesterMixin from transformers import RemBertTokenizer, RemBertTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers SENTENCEPIECE_UNDERLINE = "▁" SPIECE_UNDERLINE = SENTENCEPIECE_UNDERLINE # Kept for backward compatibility SAMPLE_VOCAB = get_tests_dir("fixtures/test_sentencepiece.model") @require_sentencepiece @require_tokenizers class RemBertTokenizationTest(TokenizerTesterMixin, unittest.TestCase): from_pretrained_id = "google/rembert" tokenizer_class = RemBertTokenizer rust_tokenizer_class = RemBertTokenizerFast space_between_special_tokens = True test_rust_tokenizer = True test_sentencepiece_ignore_case = True pre_trained_model_path = "google/rembert" @classmethod def setUpClass(cls): super().setUpClass() tokenizer = RemBertTokenizer(SAMPLE_VOCAB) tokenizer.save_pretrained(cls.tmpdirname) # Copied from ReformerTokenizationTest.get_input_output_texts def get_input_output_texts(self, tokenizer): input_text = "this is a test" output_text = "this is a test" return input_text, output_text def test_get_vocab(self): vocab_keys = list(self.get_tokenizer().get_vocab().keys()) self.assertEqual(vocab_keys[0], "<unk>") self.assertEqual(vocab_keys[1], "<s>") self.assertEqual(vocab_keys[5], "▁the") self.assertEqual(vocab_keys[2], "</s>") def test_vocab_size(self): self.assertEqual(self.get_tokenizer().vocab_size, 1_000) def test_full_tokenizer(self): tokenizer = RemBertTokenizer(SAMPLE_VOCAB, keep_accents=True) tokens = tokenizer.tokenize("This is a test") self.assertListEqual(tokens, ["▁This", "▁is", "▁a", "▁t", "est"]) self.assertListEqual( tokenizer.convert_tokens_to_ids(tokens), [285, 46, 10, 170, 382], ) tokens = tokenizer.tokenize("I was born in 92000, and this is falsé.") self.assertListEqual( tokens, [SPIECE_UNDERLINE + "I",SPIECE_UNDERLINE + "was",SPIECE_UNDERLINE + "b","or","n",SPIECE_UNDERLINE + "in",SPIECE_UNDERLINE + "","9","2","0","0","0",",",SPIECE_UNDERLINE + "and",SPIECE_UNDERLINE + "this",SPIECE_UNDERLINE + "is",SPIECE_UNDERLINE + "f","al","s","é",".",],) # fmt: skip ids = tokenizer.convert_tokens_to_ids(tokens) self.assertListEqual(ids, [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4]) def test_encode_decode_round_trip(self): tokenizer = RemBertTokenizer(SAMPLE_VOCAB, keep_accents=True) text = "清水寺は京都にある。" tokens = tokenizer.tokenize(text) self.assertListEqual(tokens, ["▁", "清水寺は京都にある。"]) encoded_string = tokenizer.encode(text) self.assertListEqual(encoded_string, [1000, 7, 0, 1001]) decode_text = tokenizer.convert_tokens_to_string(tokens) self.assertEqual(decode_text, text) text = "That's awesome! 🤩 #HuggingFace, 🌟 Have a great day! 🌈" tokens = tokenizer.tokenize(text) self.assertListEqual( tokens, ['▁That', "'", 's', '▁a', 'w', 'es', 'ome', '!', '▁', '🤩', '▁', '#', 'H', 'u', 'g', 'g', 'ing', 'F', 'a', 'ce', ',', '▁', '🌟', '▁H', 'a', 've', '▁a', '▁great', '▁day', '!', '▁', '🌈']) # fmt: skip decode_text = tokenizer.convert_tokens_to_string(tokens) self.assertEqual(decode_text, "That's awesome! 🤩 #HuggingFace, 🌟 Have a great day! 🌈") text = "In the sky up above" tokens = tokenizer._tokenize(text) self.assertListEqual(tokens, ["▁In", "▁the", "▁s", "k", "y", "▁up", "▁a", "b", "o", "ve"]) # fmt: skip encoded_string = tokenizer.encode(text) self.assertListEqual(encoded_string, [1000, 388, 5, 47, 45, 30, 118, 10, 65, 20, 123, 1001]) decode_text = tokenizer.convert_tokens_to_string(tokens) self.assertEqual(text, decode_text) text = "The cat. . Sat <s>.In a room" tokens = tokenizer.tokenize(text) self.assertListEqual( tokens, ["▁The", "▁c", "at", ".", "▁", ".", "▁S", "at", "▁", "<", "s", ">", ".", "I", "n", "▁a", "▁room"] ) encoded_string = tokenizer.encode(text) self.assertListEqual( encoded_string, [1000, 68, 69, 76, 4, 7, 4, 166, 76, 7, 0, 6, 0, 4, 100, 24, 10, 136, 1001] ) decode_text = tokenizer.convert_tokens_to_string(tokens) self.assertEqual(text, decode_text) text = "Invoice #12345, dated 2023-12-01, is due on 2024-01-15." tokens = tokenizer.tokenize(text) self.assertListEqual(tokens, ['▁In', 'v', 'o', 'ic', 'e', '▁', '#', '1', '2', '34', '5', ',', '▁da', 'ted', '▁', '2', '0', '2', '3', '-', '1', '2', '-', '0', '1', ',', '▁is', '▁d', 'u', 'e', '▁on', '▁', '2', '0', '2', '4', '-', '0', '1', '-', '1', '5', '.']) # fmt: skip encoded_string = tokenizer.encode(text) self.assertListEqual(encoded_string, [1000, 388, 83, 20, 113, 15, 7, 0, 356, 602, 0, 555, 3, 417, 273, 7, 602, 347, 602, 0, 33, 356, 602, 33, 347, 356, 3, 46, 229, 51, 15, 59, 7, 602, 347, 602, 0, 33, 347, 356, 33, 356, 555, 4, 1001]) # fmt: skip decode_text = tokenizer.convert_tokens_to_string(tokens) self.assertEqual(text, decode_text) text = "Lorem ipsum dolor sit amet, consectetur adipiscing elit..." tokens = tokenizer.tokenize(text) self.assertListEqual(tokens, ['▁', 'L', 'or', 'em', '▁', 'i', 'p', 's', 'um', '▁do', 'l', 'or', '▁sit', '▁am', 'e', 't', ',', '▁con', 'se', 'c', 'te', 't', 'ur', '▁a', 'd', 'i', 'p', 'is', 'c', 'ing', '▁', 'el', 'it', '.', '.', '.']) # fmt: skip encoded_string = tokenizer.encode(text) self.assertListEqual( encoded_string, [1000, 7, 279, 55, 300, 7, 23, 29, 6, 155, 92, 27, 55, 615, 219, 15, 14, 3, 247, 114, 28, 181, 14, 108, 10, 16, 23, 29, 125, 28, 17, 7, 168, 137, 4, 4, 4, 1001] ) # fmt: skip decode_text = tokenizer.convert_tokens_to_string(tokens) self.assertEqual(text, decode_text) # for multiple language in one sentence text = "Bonjour! Hello! こんにちは!" tokens = tokenizer.tokenize(text) self.assertListEqual(tokens, ["▁B", "on", "j", "o", "ur", "!", "▁He", "ll", "o", "!", "▁", "こんにちは", "!"]) encoded_string = tokenizer.encode(text) self.assertListEqual(encoded_string, [1000, 295, 109, 999, 20, 108, 146, 156, 86, 20, 146, 7, 0, 146, 1001]) decode_text = tokenizer.convert_tokens_to_string(tokens) self.assertEqual(text, decode_text) text = "Extra spaces\tand\nline breaks\r\nshould be handled." tokens = tokenizer.tokenize(text) self.assertListEqual(tokens, ['▁E', 'x', 't', 'r', 'a', '▁sp', 'a', 'ce', 's', '▁and', '▁line', '▁b', 're', 'a', 'k', 's', '▁should', '▁be', '▁hand', 'led', '.']) # fmt: skip encoded_string = tokenizer.encode(text) self.assertListEqual( encoded_string, [1000, 454, 297, 14, 35, 18, 277, 18, 133, 6, 12, 485, 84, 56, 18, 45, 6, 173, 36, 363, 338, 4, 1001], ) decode_text = tokenizer.convert_tokens_to_string(tokens) self.assertEqual("Extra spaces and line breaks should be handled.", decode_text) def test_sequence_builders(self): tokenizer = RemBertTokenizer(SAMPLE_VOCAB) text = tokenizer.encode("sequence builders") text_2 = tokenizer.encode("multi-sequence build") encoded_sentence = tokenizer.build_inputs_with_special_tokens(text) encoded_pair = tokenizer.build_inputs_with_special_tokens(text, text_2) assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_2 + [ tokenizer.sep_token_id ] def test_added_tokens_serialization(self): # Utility to test the added vocab def _test_added_vocab_and_eos(expected, tokenizer_class, expected_eos, temp_dir): tokenizer = tokenizer_class.from_pretrained(temp_dir) self.assertTrue(str(expected_eos) not in tokenizer.additional_special_tokens) self.assertIn(new_eos, tokenizer.added_tokens_decoder.values()) self.assertEqual(tokenizer.added_tokens_decoder[tokenizer.eos_token_id], new_eos) self.assertTrue(all(item in tokenizer.added_tokens_decoder.items() for item in expected.items())) return tokenizer new_eos = AddedToken("[NEW_EOS]", rstrip=False, lstrip=True, normalized=False, special=True) new_masked_token = AddedToken("[MASK]", lstrip=True, rstrip=False, normalized=False) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"): # Load a slow tokenizer from the hub, init with the new token for fast to also include it tokenizer = self.tokenizer_class.from_pretrained( pretrained_name, eos_token=new_eos, mask_token=new_masked_token ) EXPECTED_ADDED_TOKENS_DECODER = tokenizer.added_tokens_decoder with self.subTest("Hub -> Slow: Test loading a slow tokenizer from the hub)"): self.assertEqual(tokenizer._special_tokens_map["eos_token"], new_eos) self.assertIn(new_eos, list(tokenizer.added_tokens_decoder.values())) with tempfile.TemporaryDirectory() as tmp_dir_2: tokenizer.save_pretrained(tmp_dir_2) with self.subTest( "Hub -> Slow -> Slow: Test saving this slow tokenizer and reloading it in the fast class" ): _test_added_vocab_and_eos( EXPECTED_ADDED_TOKENS_DECODER, self.tokenizer_class, new_eos, tmp_dir_2 ) if self.rust_tokenizer_class is not None: with self.subTest( "Hub -> Slow -> Fast: Test saving this slow tokenizer and reloading it in the fast class" ): tokenizer_fast = _test_added_vocab_and_eos( EXPECTED_ADDED_TOKENS_DECODER, self.rust_tokenizer_class, new_eos, tmp_dir_2 ) with tempfile.TemporaryDirectory() as tmp_dir_3: tokenizer_fast.save_pretrained(tmp_dir_3) with self.subTest( "Hub -> Slow -> Fast -> Fast: Test saving this fast tokenizer and reloading it in the fast class" ): _test_added_vocab_and_eos( EXPECTED_ADDED_TOKENS_DECODER, self.rust_tokenizer_class, new_eos, tmp_dir_3 ) with self.subTest( "Hub -> Slow -> Fast -> Slow: Test saving this slow tokenizer and reloading it in the slow class" ): _test_added_vocab_and_eos( EXPECTED_ADDED_TOKENS_DECODER, self.rust_tokenizer_class, new_eos, tmp_dir_3 ) with self.subTest("Hub -> Fast: Test loading a fast tokenizer from the hub)"): if self.rust_tokenizer_class is not None: tokenizer_fast = self.get_rust_tokenizer(pretrained_name, eos_token=new_eos) self.assertEqual(tokenizer_fast._special_tokens_map["eos_token"], new_eos) self.assertIn(new_eos, list(tokenizer_fast.added_tokens_decoder.values())) # We can't test the following because for BC we kept the default rstrip lstrip in slow not fast. Will comment once normalization is alright with self.subTest("Hub -> Fast == Hub -> Slow: make sure slow and fast tokenizer match"): self.assertTrue( all( item in tokenizer.added_tokens_decoder.items() for item in EXPECTED_ADDED_TOKENS_DECODER.items() ) ) EXPECTED_ADDED_TOKENS_DECODER = tokenizer_fast.added_tokens_decoder with tempfile.TemporaryDirectory() as tmp_dir_4: tokenizer_fast.save_pretrained(tmp_dir_4) with self.subTest("Hub -> Fast -> Fast: saving Fast1 locally and loading"): _test_added_vocab_and_eos( EXPECTED_ADDED_TOKENS_DECODER, self.rust_tokenizer_class, new_eos, tmp_dir_4 ) with self.subTest("Hub -> Fast -> Slow: saving Fast1 locally and loading"): _test_added_vocab_and_eos( EXPECTED_ADDED_TOKENS_DECODER, self.tokenizer_class, new_eos, tmp_dir_4 )
transformers/tests/models/rembert/test_tokenization_rembert.py/0
{ "file_path": "transformers/tests/models/rembert/test_tokenization_rembert.py", "repo_id": "transformers", "token_count": 6955 }
587
# coding = utf-8 # Copyright 2024 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Testing suite for the PyTorch RT_DETR model.""" import inspect import math import tempfile import unittest from parameterized import parameterized from transformers import ( RTDetrConfig, RTDetrImageProcessor, RTDetrResNetConfig, is_torch_available, is_vision_available, ) from transformers.testing_utils import ( Expectations, require_torch, require_torch_accelerator, require_vision, slow, torch_device, ) from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import RTDetrForObjectDetection, RTDetrModel if is_vision_available(): from PIL import Image CHECKPOINT = "PekingU/rtdetr_r50vd" # TODO: replace class RTDetrModelTester: def __init__( self, parent, batch_size=3, is_training=True, use_labels=True, n_targets=3, num_labels=10, initializer_range=0.02, layer_norm_eps=1e-5, batch_norm_eps=1e-5, # backbone backbone_config=None, # encoder HybridEncoder encoder_hidden_dim=32, encoder_in_channels=[128, 256, 512], feat_strides=[8, 16, 32], encoder_layers=1, encoder_ffn_dim=64, encoder_attention_heads=2, dropout=0.0, activation_dropout=0.0, encode_proj_layers=[2], positional_encoding_temperature=10000, encoder_activation_function="gelu", activation_function="silu", eval_size=None, normalize_before=False, # decoder RTDetrTransformer d_model=32, num_queries=30, decoder_in_channels=[32, 32, 32], decoder_ffn_dim=64, num_feature_levels=3, decoder_n_points=4, decoder_layers=2, decoder_attention_heads=2, decoder_activation_function="relu", attention_dropout=0.0, num_denoising=0, label_noise_ratio=0.5, box_noise_scale=1.0, learn_initial_query=False, anchor_image_size=None, image_size=64, disable_custom_kernels=True, with_box_refine=True, ): self.parent = parent self.batch_size = batch_size self.num_channels = 3 self.is_training = is_training self.use_labels = use_labels self.n_targets = n_targets self.num_labels = num_labels self.initializer_range = initializer_range self.layer_norm_eps = layer_norm_eps self.batch_norm_eps = batch_norm_eps self.backbone_config = backbone_config self.encoder_hidden_dim = encoder_hidden_dim self.encoder_in_channels = encoder_in_channels self.feat_strides = feat_strides self.encoder_layers = encoder_layers self.encoder_ffn_dim = encoder_ffn_dim self.encoder_attention_heads = encoder_attention_heads self.dropout = dropout self.activation_dropout = activation_dropout self.encode_proj_layers = encode_proj_layers self.positional_encoding_temperature = positional_encoding_temperature self.encoder_activation_function = encoder_activation_function self.activation_function = activation_function self.eval_size = eval_size self.normalize_before = normalize_before self.d_model = d_model self.num_queries = num_queries self.decoder_in_channels = decoder_in_channels self.decoder_ffn_dim = decoder_ffn_dim self.num_feature_levels = num_feature_levels self.decoder_n_points = decoder_n_points self.decoder_layers = decoder_layers self.decoder_attention_heads = decoder_attention_heads self.decoder_activation_function = decoder_activation_function self.attention_dropout = attention_dropout self.num_denoising = num_denoising self.label_noise_ratio = label_noise_ratio self.box_noise_scale = box_noise_scale self.learn_initial_query = learn_initial_query self.anchor_image_size = anchor_image_size self.image_size = image_size self.disable_custom_kernels = disable_custom_kernels self.with_box_refine = with_box_refine self.encoder_seq_length = math.ceil(self.image_size / 32) * math.ceil(self.image_size / 32) def prepare_config_and_inputs(self): pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) pixel_mask = torch.ones([self.batch_size, self.image_size, self.image_size], device=torch_device) labels = None if self.use_labels: # labels is a list of Dict (each Dict being the labels for a given example in the batch) labels = [] for i in range(self.batch_size): target = {} target["class_labels"] = torch.randint( high=self.num_labels, size=(self.n_targets,), device=torch_device ) target["boxes"] = torch.rand(self.n_targets, 4, device=torch_device) labels.append(target) config = self.get_config() config.num_labels = self.num_labels return config, pixel_values, pixel_mask, labels def get_config(self): hidden_sizes = [10, 20, 30, 40] backbone_config = RTDetrResNetConfig( embeddings_size=10, hidden_sizes=hidden_sizes, depths=[1, 1, 2, 1], out_features=["stage2", "stage3", "stage4"], out_indices=[2, 3, 4], ) return RTDetrConfig.from_backbone_configs( backbone_config=backbone_config, encoder_hidden_dim=self.encoder_hidden_dim, encoder_in_channels=hidden_sizes[1:], feat_strides=self.feat_strides, encoder_layers=self.encoder_layers, encoder_ffn_dim=self.encoder_ffn_dim, encoder_attention_heads=self.encoder_attention_heads, dropout=self.dropout, activation_dropout=self.activation_dropout, encode_proj_layers=self.encode_proj_layers, positional_encoding_temperature=self.positional_encoding_temperature, encoder_activation_function=self.encoder_activation_function, activation_function=self.activation_function, eval_size=self.eval_size, normalize_before=self.normalize_before, d_model=self.d_model, num_queries=self.num_queries, decoder_in_channels=self.decoder_in_channels, decoder_ffn_dim=self.decoder_ffn_dim, num_feature_levels=self.num_feature_levels, decoder_n_points=self.decoder_n_points, decoder_layers=self.decoder_layers, decoder_attention_heads=self.decoder_attention_heads, decoder_activation_function=self.decoder_activation_function, attention_dropout=self.attention_dropout, num_denoising=self.num_denoising, label_noise_ratio=self.label_noise_ratio, box_noise_scale=self.box_noise_scale, learn_initial_query=self.learn_initial_query, anchor_image_size=self.anchor_image_size, image_size=self.image_size, disable_custom_kernels=self.disable_custom_kernels, with_box_refine=self.with_box_refine, ) def prepare_config_and_inputs_for_common(self): config, pixel_values, pixel_mask, labels = self.prepare_config_and_inputs() inputs_dict = {"pixel_values": pixel_values} return config, inputs_dict def create_and_check_rt_detr_model(self, config, pixel_values, pixel_mask, labels): model = RTDetrModel(config=config) model.to(torch_device) model.eval() result = model(pixel_values=pixel_values, pixel_mask=pixel_mask) result = model(pixel_values) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.num_queries, self.d_model)) def create_and_check_rt_detr_object_detection_head_model(self, config, pixel_values, pixel_mask, labels): model = RTDetrForObjectDetection(config=config) model.to(torch_device) model.eval() result = model(pixel_values=pixel_values, pixel_mask=pixel_mask) result = model(pixel_values) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_queries, self.num_labels)) self.parent.assertEqual(result.pred_boxes.shape, (self.batch_size, self.num_queries, 4)) result = model(pixel_values=pixel_values, pixel_mask=pixel_mask, labels=labels) self.parent.assertEqual(result.loss.shape, ()) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_queries, self.num_labels)) self.parent.assertEqual(result.pred_boxes.shape, (self.batch_size, self.num_queries, 4)) @require_torch class RTDetrModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (RTDetrModel, RTDetrForObjectDetection) if is_torch_available() else () pipeline_model_mapping = ( {"image-feature-extraction": RTDetrModel, "object-detection": RTDetrForObjectDetection} if is_torch_available() else {} ) is_encoder_decoder = True test_torchscript = False test_pruning = False test_head_masking = False test_missing_keys = False test_torch_exportable = True # special case for head models def _prepare_for_class(self, inputs_dict, model_class, return_labels=False): inputs_dict = super()._prepare_for_class(inputs_dict, model_class, return_labels=return_labels) if return_labels: if model_class.__name__ == "RTDetrForObjectDetection": labels = [] for i in range(self.model_tester.batch_size): target = {} target["class_labels"] = torch.ones( size=(self.model_tester.n_targets,), device=torch_device, dtype=torch.long ) target["boxes"] = torch.ones( self.model_tester.n_targets, 4, device=torch_device, dtype=torch.float ) labels.append(target) inputs_dict["labels"] = labels return inputs_dict def setUp(self): self.model_tester = RTDetrModelTester(self) self.config_tester = ConfigTester( self, config_class=RTDetrConfig, has_text_modality=False, common_properties=["hidden_size", "num_attention_heads"], ) def test_config(self): self.config_tester.run_common_tests() def test_rt_detr_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_rt_detr_model(*config_and_inputs) def test_rt_detr_object_detection_head_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_rt_detr_object_detection_head_model(*config_and_inputs) @unittest.skip(reason="RTDetr does not use inputs_embeds") def test_inputs_embeds(self): pass @unittest.skip(reason="RTDetr does not use test_inputs_embeds_matches_input_ids") def test_inputs_embeds_matches_input_ids(self): pass @unittest.skip(reason="RTDetr does not support input and output embeddings") def test_model_get_set_embeddings(self): pass @unittest.skip(reason="RTDetr does not support input and output embeddings") def test_model_common_attributes(self): pass @unittest.skip(reason="RTDetr does not use token embeddings") def test_resize_tokens_embeddings(self): pass @unittest.skip(reason="Feed forward chunking is not implemented") def test_feed_forward_chunking(self): pass def test_attention_outputs(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True for model_class in self.all_model_classes: inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = False config.return_dict = True model = model_class._from_config(config, attn_implementation="eager") config = model.config model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.encoder_attentions self.assertEqual(len(attentions), self.model_tester.encoder_layers) # check that output_attentions also work using config del inputs_dict["output_attentions"] config.output_attentions = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.encoder_attentions self.assertEqual(len(attentions), self.model_tester.encoder_layers) self.assertListEqual( list(attentions[0].shape[-3:]), [ self.model_tester.encoder_attention_heads, self.model_tester.encoder_seq_length, self.model_tester.encoder_seq_length, ], ) out_len = len(outputs) correct_outlen = 13 # loss is at first position if "labels" in inputs_dict: correct_outlen += 1 # loss is added to beginning # Object Detection model returns pred_logits and pred_boxes if model_class.__name__ == "RTDetrForObjectDetection": correct_outlen += 2 self.assertEqual(out_len, correct_outlen) # decoder attentions decoder_attentions = outputs.decoder_attentions self.assertIsInstance(decoder_attentions, (list, tuple)) self.assertEqual(len(decoder_attentions), self.model_tester.decoder_layers) self.assertListEqual( list(decoder_attentions[0].shape[-3:]), [ self.model_tester.decoder_attention_heads, self.model_tester.num_queries, self.model_tester.num_queries, ], ) # cross attentions cross_attentions = outputs.cross_attentions self.assertIsInstance(cross_attentions, (list, tuple)) self.assertEqual(len(cross_attentions), self.model_tester.decoder_layers) self.assertListEqual( list(cross_attentions[0].shape[-3:]), [ self.model_tester.decoder_attention_heads, self.model_tester.num_feature_levels, self.model_tester.decoder_n_points, ], ) # Check attention is always last and order is fine inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) if hasattr(self.model_tester, "num_hidden_states_types"): added_hidden_states = self.model_tester.num_hidden_states_types else: # RTDetr should maintin encoder_hidden_states output added_hidden_states = 2 self.assertEqual(out_len + added_hidden_states, len(outputs)) self_attentions = outputs.encoder_attentions self.assertEqual(len(self_attentions), self.model_tester.encoder_layers) self.assertListEqual( list(self_attentions[0].shape[-3:]), [ self.model_tester.encoder_attention_heads, self.model_tester.encoder_seq_length, self.model_tester.encoder_seq_length, ], ) def test_hidden_states_output(self): def check_hidden_states_output(inputs_dict, config, model_class): model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) hidden_states = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states expected_num_layers = getattr( self.model_tester, "expected_num_hidden_layers", len(self.model_tester.encoder_in_channels) - 1 ) self.assertEqual(len(hidden_states), expected_num_layers) self.assertListEqual( list(hidden_states[1].shape[-2:]), [ self.model_tester.image_size // self.model_tester.feat_strides[-1], self.model_tester.image_size // self.model_tester.feat_strides[-1], ], ) if config.is_encoder_decoder: hidden_states = outputs.decoder_hidden_states expected_num_layers = getattr( self.model_tester, "expected_num_hidden_layers", self.model_tester.decoder_layers + 1 ) self.assertIsInstance(hidden_states, (list, tuple)) self.assertEqual(len(hidden_states), expected_num_layers) self.assertListEqual( list(hidden_states[0].shape[-2:]), [self.model_tester.num_queries, self.model_tester.d_model], ) config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: inputs_dict["output_hidden_states"] = True check_hidden_states_output(inputs_dict, config, model_class) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] config.output_hidden_states = True check_hidden_states_output(inputs_dict, config, model_class) def test_retain_grad_hidden_states_attentions(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.output_hidden_states = True config.output_attentions = True model_class = self.all_model_classes[0] model = model_class(config) model.to(torch_device) inputs = self._prepare_for_class(inputs_dict, model_class) outputs = model(**inputs) # we take the first output since last_hidden_state is the first item output = outputs[0] encoder_hidden_states = outputs.encoder_hidden_states[0] encoder_attentions = outputs.encoder_attentions[0] encoder_hidden_states.retain_grad() encoder_attentions.retain_grad() decoder_attentions = outputs.decoder_attentions[0] decoder_attentions.retain_grad() cross_attentions = outputs.cross_attentions[0] cross_attentions.retain_grad() output.flatten()[0].backward(retain_graph=True) self.assertIsNotNone(encoder_hidden_states.grad) self.assertIsNotNone(encoder_attentions.grad) self.assertIsNotNone(decoder_attentions.grad) self.assertIsNotNone(cross_attentions.grad) def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) signature = inspect.signature(model.forward) arg_names = [*signature.parameters.keys()] expected_arg_names = ["pixel_values"] self.assertListEqual(arg_names[:1], expected_arg_names) def test_different_timm_backbone(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() # let's pick a random timm backbone config.backbone = "tf_mobilenetv3_small_075" config.backbone_config = None config.use_timm_backbone = True config.backbone_kwargs = {"out_indices": [2, 3, 4]} for model_class in self.all_model_classes: model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) if model_class.__name__ == "RTDetrForObjectDetection": expected_shape = ( self.model_tester.batch_size, self.model_tester.num_queries, self.model_tester.num_labels, ) self.assertEqual(outputs.logits.shape, expected_shape) # Confirm out_indices was propagated to backbone self.assertEqual(len(model.model.backbone.intermediate_channel_sizes), 3) else: # Confirm out_indices was propagated to backbone self.assertEqual(len(model.backbone.intermediate_channel_sizes), 3) self.assertTrue(outputs) def test_hf_backbone(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() # Load a pretrained HF checkpoint as backbone config.backbone = "microsoft/resnet-18" config.backbone_config = None config.use_timm_backbone = False config.use_pretrained_backbone = True config.backbone_kwargs = {"out_indices": [2, 3, 4]} for model_class in self.all_model_classes: model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) if model_class.__name__ == "RTDetrForObjectDetection": expected_shape = ( self.model_tester.batch_size, self.model_tester.num_queries, self.model_tester.num_labels, ) self.assertEqual(outputs.logits.shape, expected_shape) # Confirm out_indices was propagated to backbone self.assertEqual(len(model.model.backbone.intermediate_channel_sizes), 3) else: # Confirm out_indices was propagated to backbone self.assertEqual(len(model.backbone.intermediate_channel_sizes), 3) self.assertTrue(outputs) def test_initialization(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() configs_no_init = _config_zero_init(config) configs_no_init.initializer_bias_prior_prob = 0.2 bias_value = -1.3863 # log_e ((1 - 0.2) / 0.2) failed_cases = [] for model_class in self.all_model_classes: model = model_class(config=configs_no_init) # Skip the check for the backbone for name, module in model.named_modules(): if module.__class__.__name__ == "RTDetrConvEncoder": backbone_params = [f"{name}.{key}" for key in module.state_dict()] break for name, param in model.named_parameters(): if param.requires_grad: if ("class_embed" in name and "bias" in name) or "enc_score_head.bias" in name: bias_tensor = torch.full_like(param.data, bias_value) if not torch.allclose(param.data, bias_tensor, atol=1e-4): failed_cases.append( f"Parameter {name} of model {model_class} seems not properly initialized. " f"Biases should be initialized to {bias_value}, got {param.data}" ) elif ( "level_embed" in name or "sampling_offsets.bias" in name or "value_proj" in name or "output_proj" in name or "reference_points" in name or "enc_score_head.weight" in name or ("class_embed" in name and "weight" in name) or name in backbone_params ): continue else: mean = param.data.mean() round_mean = (mean * 1e9).round() / 1e9 round_mean = round_mean.item() if round_mean not in [0.0, 1.0]: failed_cases.append( f"Parameter {name} of model {model_class} seems not properly initialized. " f"Mean is {round_mean}, but should be in [0, 1]" ) message = "\n" + "\n".join(failed_cases) self.assertTrue(not failed_cases, message) @parameterized.expand(["float32", "float16", "bfloat16"]) @require_torch_accelerator @slow def test_inference_with_different_dtypes(self, dtype_str): dtype = { "float32": torch.float32, "float16": torch.float16, "bfloat16": torch.bfloat16, }[dtype_str] config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) model.to(torch_device).to(dtype) model.eval() for key, tensor in inputs_dict.items(): if tensor.dtype == torch.float32: inputs_dict[key] = tensor.to(dtype) with torch.no_grad(): _ = model(**self._prepare_for_class(inputs_dict, model_class)) @parameterized.expand(["float32", "float16", "bfloat16"]) @require_torch_accelerator @slow def test_inference_equivalence_for_static_and_dynamic_anchors(self, dtype_str): dtype = { "float32": torch.float32, "float16": torch.float16, "bfloat16": torch.bfloat16, }[dtype_str] config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() h, w = inputs_dict["pixel_values"].shape[-2:] # convert inputs to the desired dtype for key, tensor in inputs_dict.items(): if tensor.dtype == torch.float32: inputs_dict[key] = tensor.to(dtype) for model_class in self.all_model_classes: with tempfile.TemporaryDirectory() as tmpdirname: model_class(config).save_pretrained(tmpdirname) model_static = model_class.from_pretrained( tmpdirname, anchor_image_size=[h, w], device_map=torch_device, dtype=dtype ).eval() model_dynamic = model_class.from_pretrained( tmpdirname, anchor_image_size=None, device_map=torch_device, dtype=dtype ).eval() self.assertIsNotNone(model_static.config.anchor_image_size) self.assertIsNone(model_dynamic.config.anchor_image_size) with torch.no_grad(): outputs_static = model_static(**self._prepare_for_class(inputs_dict, model_class)) outputs_dynamic = model_dynamic(**self._prepare_for_class(inputs_dict, model_class)) self.assertTrue( torch.allclose( outputs_static.last_hidden_state, outputs_dynamic.last_hidden_state, rtol=1e-4, atol=1e-4 ), f"Max diff: {(outputs_static.last_hidden_state - outputs_dynamic.last_hidden_state).abs().max()}", ) TOLERANCE = 1e-4 # We will verify our results on an image of cute cats def prepare_img(): image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") return image @require_torch @require_vision class RTDetrModelIntegrationTest(unittest.TestCase): @cached_property def default_image_processor(self): return RTDetrImageProcessor.from_pretrained(CHECKPOINT) if is_vision_available() else None def test_inference_object_detection_head(self): model = RTDetrForObjectDetection.from_pretrained(CHECKPOINT).to(torch_device) image_processor = self.default_image_processor image = prepare_img() inputs = image_processor(images=image, return_tensors="pt").to(torch_device) with torch.no_grad(): outputs = model(**inputs) expected_shape_logits = torch.Size((1, 300, model.config.num_labels)) self.assertEqual(outputs.logits.shape, expected_shape_logits) expectations = Expectations( { (None, None): [ [-4.64763879776001, -5.001153945922852, -4.978509902954102], [-4.159348487854004, -4.703853607177734, -5.946484565734863], [-4.437461853027344, -4.65836238861084, -6.235235691070557], ], ("cuda", 8): [[-4.6471, -5.0008, -4.9786], [-4.1599, -4.7041, -5.9458], [-4.4374, -4.6582, -6.2340]], } ) expected_logits = torch.tensor(expectations.get_expectation()).to(torch_device) expectations = Expectations( { (None, None): [ [0.1688060760498047, 0.19992263615131378, 0.21225441992282867], [0.768376350402832, 0.41226309537887573, 0.4636859893798828], [0.25953856110572815, 0.5483334064483643, 0.4777486026287079], ], ("cuda", 8): [[0.1688, 0.1999, 0.2123], [0.7684, 0.4123, 0.4637], [0.2596, 0.5483, 0.4777]], } ) expected_boxes = torch.tensor(expectations.get_expectation()).to(torch_device) torch.testing.assert_close(outputs.logits[0, :3, :3], expected_logits, rtol=2e-4, atol=2e-4) expected_shape_boxes = torch.Size((1, 300, 4)) self.assertEqual(outputs.pred_boxes.shape, expected_shape_boxes) torch.testing.assert_close(outputs.pred_boxes[0, :3, :3], expected_boxes, rtol=2e-4, atol=2e-4) # verify postprocessing results = image_processor.post_process_object_detection( outputs, threshold=0.0, target_sizes=[image.size[::-1]] )[0] expectations = Expectations( { (None, None): [0.9703017473220825, 0.9599503874778748, 0.9575679302215576, 0.9506784677505493], ("cuda", 8): [0.9704, 0.9599, 0.9576, 0.9507], } ) expected_scores = torch.tensor(expectations.get_expectation()).to(torch_device) expected_labels = [57, 15, 15, 65] expectations = Expectations( { (None, None): [ [0.13774872, 0.37821293, 640.13074, 476.21088], [343.38132, 24.276838, 640.1404, 371.49573], [13.225126, 54.179348, 318.98422, 472.2207], [40.114475, 73.44104, 175.9573, 118.48469], ], ("cuda", 8): [ [1.3775e-01, 3.7821e-01, 6.4013e02, 4.7621e02], [3.4338e02, 2.4277e01, 6.4014e02, 3.7150e02], [1.3225e01, 5.4179e01, 3.1898e02, 4.7222e02], [4.0114e01, 7.3441e01, 1.7596e02, 1.1848e02], ], } ) expected_slice_boxes = torch.tensor(expectations.get_expectation()).to(torch_device) torch.testing.assert_close(results["scores"][:4], expected_scores, rtol=2e-4, atol=2e-4) self.assertSequenceEqual(results["labels"][:4].tolist(), expected_labels) torch.testing.assert_close(results["boxes"][:4], expected_slice_boxes, rtol=2e-4, atol=2e-4)
transformers/tests/models/rt_detr/test_modeling_rt_detr.py/0
{ "file_path": "transformers/tests/models/rt_detr/test_modeling_rt_detr.py", "repo_id": "transformers", "token_count": 15985 }
588
# Copyright 2025 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Testing suite for the PyTorch Siglip2 model.""" import inspect import tempfile import unittest import numpy as np from parameterized import parameterized from pytest import mark from transformers import Siglip2Config, Siglip2TextConfig, Siglip2VisionConfig from transformers.testing_utils import ( Expectations, is_flaky, require_flash_attn, require_torch, require_torch_gpu, require_vision, slow, torch_device, ) from transformers.utils import ( is_torch_available, is_vision_available, ) from ...test_configuration_common import ConfigTester from ...test_modeling_common import ( TEST_EAGER_MATCHES_SDPA_INFERENCE_PARAMETERIZATION, ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask, ) from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import Siglip2ForImageClassification, Siglip2Model, Siglip2TextModel, Siglip2VisionModel if is_vision_available(): from PIL import Image, ImageDraw from transformers import Siglip2Processor class Siglip2ModelTesterMixin(ModelTesterMixin): def test_sdpa_can_dispatch_composite_models(self): for model_class in self.all_model_classes: config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() model = model_class(config) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) # Load the model with SDPA model_sdpa = model_class.from_pretrained(tmpdirname) # Load model with eager attention model_eager = model_class.from_pretrained( tmpdirname, attn_implementation="eager", ) if hasattr(model_sdpa, "vision_model"): self.assertTrue(model_sdpa.vision_model.config._attn_implementation == "sdpa") self.assertTrue(model_eager.vision_model.config._attn_implementation == "eager") if hasattr(model_sdpa, "text_model"): self.assertTrue(model_sdpa.text_model.config._attn_implementation == "sdpa") self.assertTrue(model_eager.text_model.config._attn_implementation == "eager") self.assertTrue(model_sdpa.config._attn_implementation == "sdpa") self.assertTrue(model_eager.config._attn_implementation == "eager") @require_flash_attn @require_torch_gpu @mark.flash_attn_test @slow def test_flash_attn_2_inference_equivalence(self): dtype = torch.float16 for model_class in self.all_model_classes: if not model_class._supports_flash_attn: self.skipTest(f"{model_class.__name__} does not support Flash Attention 2") # Prepare inputs config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() if "pixel_values" in inputs_dict: inputs_dict["pixel_values"] = inputs_dict["pixel_values"].to(dtype) # Separate masks attention_masks = {} if "attention_mask" in inputs_dict: # attention_masks["attention_mask"] = inputs_dict.pop("attention_mask") inputs_dict["attention_mask"] = None if "pixel_attention_mask" in inputs_dict: attention_masks["pixel_attention_mask"] = inputs_dict.pop("pixel_attention_mask") inputs_dict["pixel_attention_mask"] = None # Save and load model with flash attention 2 and eager attentions with tempfile.TemporaryDirectory() as tmp_dir: model = model_class(config) model.save_pretrained(tmp_dir) model = model_class.from_pretrained(tmp_dir, dtype=dtype) model_fa = model_class.from_pretrained(tmp_dir, dtype=dtype, attn_implementation="flash_attention_2") model_fa.to(torch_device) model.to(torch_device) # Run forward pass without attention masks with torch.no_grad(): outputs = model(**inputs_dict, output_hidden_states=True) outputs_fa = model_fa(**inputs_dict, output_hidden_states=True) # Choose which key to compare key = [k for k in ["logits", "logits_per_image", "last_hidden_state"] if k in outputs][0] torch.testing.assert_close(outputs[key], outputs_fa[key], atol=4e-2, rtol=4e-2) # Run forward pass with attention masks inputs_dict.update(attention_masks) with torch.no_grad(): outputs = model(**inputs_dict, output_hidden_states=True) outputs_fa = model_fa(**inputs_dict, output_hidden_states=True) output_tensor = outputs[key] output_tensor_fa = outputs_fa[key] # Mask out padded tokens, they are different for SDPA and Flash Attention 2 if key == "last_hidden_state" and "pixel_attention_mask" in inputs_dict: output_tensor = output_tensor * inputs_dict["pixel_attention_mask"][..., None] output_tensor_fa = output_tensor_fa * inputs_dict["pixel_attention_mask"][..., None] elif key == "last_hidden_state" and inputs_dict.get("attention_mask", None) is not None: output_tensor = output_tensor * inputs_dict["attention_mask"][..., None] output_tensor_fa = output_tensor_fa * inputs_dict["attention_mask"][..., None] torch.testing.assert_close(output_tensor, output_tensor_fa, atol=4e-2, rtol=4e-2) # Check with inference + dropout model.train() _ = model_fa(**inputs_dict, output_hidden_states=True) @unittest.skip(reason="Siglip2 has default right padding (tested in test_flash_attn_2_inference_equivalence)") def test_flash_attn_2_inference_equivalence_right_padding(self): pass @unittest.skip(reason="SDPA can't dispatch on flash with not None `attention_mask`") def test_sdpa_can_dispatch_on_flash(self): pass class Siglip2VisionModelTester: def __init__( self, parent, batch_size=12, num_patches=16, image_num_patches=24, patch_size=2, num_channels=3, is_training=True, hidden_size=64, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, dropout=0.1, attention_dropout=0.1, initializer_range=0.02, scope=None, ): self.parent = parent self.batch_size = batch_size self.num_patches = num_patches self.patch_size = patch_size self.num_channels = num_channels self.is_training = is_training self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.dropout = dropout self.attention_dropout = attention_dropout self.initializer_range = initializer_range self.scope = scope self.seq_length = image_num_patches def prepare_config_and_inputs(self): pixel_values = floats_tensor( [self.batch_size, self.seq_length, self.num_channels * self.patch_size * self.patch_size] ) pixel_attention_mask = torch.zeros(self.batch_size, self.seq_length, device=torch_device, dtype=torch.long) spatial_shapes = [ (height, width) for height in range(1, self.seq_length) for width in range(1, self.seq_length) if height * width <= self.seq_length ] * self.batch_size spatial_shapes = spatial_shapes[: self.batch_size] spatial_shapes = torch.tensor(spatial_shapes, device=torch_device, dtype=torch.long) for i, (height, width) in enumerate(spatial_shapes): pixel_attention_mask[i, : height * width] = 1 config = self.get_config() return config, pixel_values, pixel_attention_mask, spatial_shapes def get_config(self): return Siglip2VisionConfig( num_patches=self.num_patches, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, dropout=self.dropout, attention_dropout=self.attention_dropout, initializer_range=self.initializer_range, ) def create_and_check_model(self, config, pixel_values, pixel_attention_mask, spatial_shapes): model = Siglip2VisionModel(config=config) model.to(torch_device) model.eval() with torch.no_grad(): result = model(pixel_values, pixel_attention_mask, spatial_shapes) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size)) def prepare_config_and_inputs_for_common(self): config, pixel_values, pixel_attention_mask, spatial_shapes = self.prepare_config_and_inputs() inputs_dict = { "pixel_values": pixel_values, "pixel_attention_mask": pixel_attention_mask, "spatial_shapes": spatial_shapes, } return config, inputs_dict @require_torch class Siglip2VisionModelTest(Siglip2ModelTesterMixin, unittest.TestCase): """ Here we also overwrite some of the tests of test_modeling_common.py, as SIGLIP2 does not use input_ids, inputs_embeds, attention_mask and seq_length. """ all_model_classes = (Siglip2VisionModel,) if is_torch_available() else () additional_model_inputs = ["pixel_attention_mask", "spatial_shapes"] fx_compatible = False test_pruning = False test_resize_embeddings = False test_head_masking = False # MP works but offload doesn't work when the MultiheadAttention is offloaded # TODO: One potential solution would be to add to set preload_module_classes = ["Siglip2MultiheadAttentionPoolingHead"] # in the dispatch_model function test_cpu_offload = False test_disk_offload_safetensors = False test_disk_offload_bin = False def setUp(self): self.model_tester = Siglip2VisionModelTester(self) self.config_tester = ConfigTester( self, config_class=Siglip2VisionConfig, has_text_modality=False, hidden_size=37 ) def test_config(self): self.config_tester.run_common_tests() @unittest.skip(reason="SIGLIP2 does not use inputs_embeds") def test_inputs_embeds(self): pass def test_model_get_set_embeddings(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) self.assertIsInstance(model.get_input_embeddings(), (nn.Module)) x = model.get_output_embeddings() self.assertTrue(x is None or isinstance(x, nn.Linear)) def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) signature = inspect.signature(model.forward) # signature.parameters is an OrderedDict => so arg_names order is deterministic arg_names = [*signature.parameters.keys()] expected_arg_names = ["pixel_values"] self.assertListEqual(arg_names[:1], expected_arg_names) def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) @unittest.skip(reason="Siglip2VisionModel does not support standalone training") def test_training(self): pass @unittest.skip(reason="Siglip2VisionModel does not support standalone training") def test_training_gradient_checkpointing(self): pass @unittest.skip(reason="Siglip2VisionModel does not support standalone training") def test_training_gradient_checkpointing_use_reentrant(self): pass @unittest.skip(reason="Siglip2VisionModel does not support standalone training") def test_training_gradient_checkpointing_use_reentrant_false(self): pass @unittest.skip(reason="Siglip2 uses the same initialization scheme as the Flax original implementation") def test_initialization(self): pass @slow def test_model_from_pretrained(self): model_name = "google/siglip2-base-patch16-naflex" model = Siglip2VisionModel.from_pretrained(model_name) self.assertIsNotNone(model) @parameterized.expand(TEST_EAGER_MATCHES_SDPA_INFERENCE_PARAMETERIZATION) @is_flaky() def test_eager_matches_sdpa_inference(self, *args): # adding only flaky decorator here and call the parent test method return getattr(ModelTesterMixin, self._testMethodName)(self) class Siglip2TextModelTester: def __init__( self, parent, batch_size=12, seq_length=7, is_training=True, use_input_mask=True, use_labels=True, vocab_size=99, hidden_size=64, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, dropout=0.1, attention_dropout=0.1, max_position_embeddings=512, initializer_range=0.02, scope=None, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_input_mask = use_input_mask self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.dropout = dropout self.attention_dropout = attention_dropout self.max_position_embeddings = max_position_embeddings self.initializer_range = initializer_range self.scope = scope def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_mask = None if self.use_input_mask: input_mask = random_attention_mask([self.batch_size, self.seq_length]) if input_mask is not None: batch_size, seq_length = input_mask.shape rnd_start_indices = np.random.randint(1, seq_length - 1, size=(batch_size,)) for batch_idx, start_index in enumerate(rnd_start_indices): input_mask[batch_idx, :start_index] = 1 input_mask[batch_idx, start_index:] = 0 config = self.get_config() return config, input_ids, input_mask def get_config(self): return Siglip2TextConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, dropout=self.dropout, attention_dropout=self.attention_dropout, max_position_embeddings=self.max_position_embeddings, initializer_range=self.initializer_range, ) def create_and_check_model(self, config, input_ids, input_mask): model = Siglip2TextModel(config=config) model.to(torch_device) model.eval() with torch.no_grad(): result = model(input_ids, attention_mask=input_mask) result = model(input_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, input_ids, input_mask = config_and_inputs inputs_dict = {"input_ids": input_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class Siglip2TextModelTest(Siglip2ModelTesterMixin, unittest.TestCase): all_model_classes = (Siglip2TextModel,) if is_torch_available() else () fx_compatible = False test_resize_embeddings = False test_pruning = False test_head_masking = False model_split_percents = [0.5, 0.8, 0.9] def setUp(self): self.model_tester = Siglip2TextModelTester(self) self.config_tester = ConfigTester(self, config_class=Siglip2TextConfig, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) @unittest.skip(reason="Siglip2TextModel does not support standalone training") def test_training(self): pass @unittest.skip(reason="Siglip2TextModel does not support standalone training") def test_training_gradient_checkpointing(self): pass @unittest.skip(reason="Siglip2TextModel does not support standalone training") def test_training_gradient_checkpointing_use_reentrant(self): pass @unittest.skip(reason="Siglip2TextModel does not support standalone training") def test_training_gradient_checkpointing_use_reentrant_false(self): pass @unittest.skip(reason="Siglip2 does not use inputs_embeds") def test_inputs_embeds(self): pass @unittest.skip(reason="Siglip2 uses the same initialization scheme as the Flax original implementation") def test_initialization(self): pass @slow def test_model_from_pretrained(self): model_name = "google/siglip2-base-patch16-naflex" model = Siglip2TextModel.from_pretrained(model_name) self.assertIsNotNone(model) class Siglip2ModelTester: def __init__(self, parent, text_kwargs=None, vision_kwargs=None, is_training=True): if text_kwargs is None: text_kwargs = {} if vision_kwargs is None: vision_kwargs = {} self.parent = parent self.text_model_tester = Siglip2TextModelTester(parent, **text_kwargs) self.vision_model_tester = Siglip2VisionModelTester(parent, **vision_kwargs) self.batch_size = self.text_model_tester.batch_size # need bs for batching_equivalence test self.is_training = is_training def prepare_config_and_inputs(self): text_config, input_ids, attention_mask = self.text_model_tester.prepare_config_and_inputs() vision_config, pixel_values, pixel_attention_mask, spatial_shapes = ( self.vision_model_tester.prepare_config_and_inputs() ) config = self.get_config() return config, input_ids, attention_mask, pixel_values, pixel_attention_mask, spatial_shapes def get_config(self): return Siglip2Config( text_config=self.text_model_tester.get_config().to_dict(), vision_config=self.vision_model_tester.get_config().to_dict(), ) def create_and_check_model( self, config, input_ids, attention_mask, pixel_values, pixel_attention_mask, spatial_shapes ): model = Siglip2Model(config).to(torch_device).eval() with torch.no_grad(): result = model(input_ids, pixel_values, pixel_attention_mask, spatial_shapes, attention_mask) self.parent.assertEqual( result.logits_per_image.shape, (self.vision_model_tester.batch_size, self.text_model_tester.batch_size) ) self.parent.assertEqual( result.logits_per_text.shape, (self.text_model_tester.batch_size, self.vision_model_tester.batch_size) ) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, input_ids, attention_mask, pixel_values, pixel_attention_mask, spatial_shapes = config_and_inputs inputs_dict = { "input_ids": input_ids, "pixel_values": pixel_values, "pixel_attention_mask": pixel_attention_mask, "spatial_shapes": spatial_shapes, "attention_mask": attention_mask, "position_ids": None, "return_loss": False, } return config, inputs_dict @require_torch class Siglip2ModelTest(Siglip2ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (Siglip2Model,) if is_torch_available() else () pipeline_model_mapping = {"feature-extraction": Siglip2Model} if is_torch_available() else {} additional_model_inputs = [ "pixel_values", "pixel_attention_mask", "spatial_shapes", ] fx_compatible = False test_head_masking = False test_pruning = False test_resize_embeddings = False test_attention_outputs = False # MP works but offload doesn't work when the MultiheadAttention is offloaded # TODO: One potential solution would be to add to set preload_module_classes = ["Siglip2MultiheadAttentionPoolingHead"] # in the dispatch_model function test_cpu_offload = False test_disk_offload_safetensors = False test_disk_offload_bin = False _is_composite = True def setUp(self): self.model_tester = Siglip2ModelTester(self) self.config_tester = ConfigTester(self, config_class=Siglip2Config, has_text_modality=False) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) @unittest.skip(reason="Hidden_states is tested in individual model tests") def test_hidden_states_output(self): pass @unittest.skip(reason="Inputs_embeds is tested in individual model tests") def test_inputs_embeds(self): pass @unittest.skip(reason="Retain_grad is tested in individual model tests") def test_retain_grad_hidden_states_attentions(self): pass @unittest.skip(reason="Siglip2Model does not have input/output embeddings") def test_model_get_set_embeddings(self): pass @unittest.skip(reason="Siglip2 uses the same initialization scheme as the Flax original implementation") def test_initialization(self): pass def test_load_vision_text_config(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() # Save Siglip2Config and check if we can load Siglip2VisionConfig from it with tempfile.TemporaryDirectory() as tmp_dir_name: config.save_pretrained(tmp_dir_name) vision_config = Siglip2VisionConfig.from_pretrained(tmp_dir_name) self.assertDictEqual(config.vision_config.to_dict(), vision_config.to_dict()) # Save Siglip2Config and check if we can load Siglip2TextConfig from it with tempfile.TemporaryDirectory() as tmp_dir_name: config.save_pretrained(tmp_dir_name) text_config = Siglip2TextConfig.from_pretrained(tmp_dir_name) self.assertDictEqual(config.text_config.to_dict(), text_config.to_dict()) @slow def test_model_from_pretrained(self): model_name = "google/siglip2-base-patch16-naflex" model = Siglip2Model.from_pretrained(model_name) self.assertIsNotNone(model) @require_flash_attn @require_torch_gpu @mark.flash_attn_test def test_flash_attn_2_inference_equivalence_right_padding(self): self.skipTest("Siglip2 does not support right padding") class Siglip2ForImageClassificationModelTester(Siglip2ModelTester): def __init__(self, parent): super().__init__(parent) self.batch_size = self.vision_model_tester.batch_size self.num_hidden_layers = self.vision_model_tester.num_hidden_layers self.hidden_size = self.vision_model_tester.hidden_size self.seq_length = self.vision_model_tester.seq_length def prepare_config_and_inputs(self): _, pixel_values, pixel_attention_mask, spatial_shapes = self.vision_model_tester.prepare_config_and_inputs() config = self.get_config() return config, pixel_values, pixel_attention_mask, spatial_shapes def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, pixel_values, pixel_attention_mask, spatial_shapes = config_and_inputs inputs_dict = { "pixel_values": pixel_values, "pixel_attention_mask": pixel_attention_mask, "spatial_shapes": spatial_shapes, } return config, inputs_dict @require_torch class Siglip2ForImageClassificationModelTest(Siglip2ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (Siglip2ForImageClassification,) if is_torch_available() else () pipeline_model_mapping = {"image-classification": Siglip2ForImageClassification} if is_torch_available() else {} additional_model_inputs = ["pixel_values", "pixel_attention_mask", "spatial_shapes"] fx_compatible = False test_head_masking = False test_pruning = False test_resize_embeddings = False test_attention_outputs = False # MP works but offload doesn't work when the MultiheadAttention is offloaded # TODO: One potential solution would be to add to set preload_module_classes = ["Siglip2MultiheadAttentionPoolingHead"] # in the dispatch_model function test_cpu_offload = False test_disk_offload_safetensors = False test_disk_offload_bin = False _is_composite = True def setUp(self): self.model_tester = Siglip2ForImageClassificationModelTester(self) @unittest.skip(reason="Siglip2ForImageClassification does not support inputs_embeds") def test_inputs_embeds(self): pass @unittest.skip(reason="Siglip2ForImageClassification does not support inputs_embeds") def test_model_get_set_embeddings(self): pass @unittest.skip(reason="Siglip2ForImageClassification does not support gradient checkpointing yet") def test_training_gradient_checkpointing(self): pass @unittest.skip(reason="Siglip2ForImageClassification does not support gradient checkpointing yet") def test_training_gradient_checkpointing_use_reentrant(self): pass @unittest.skip(reason="Siglip2ForImageClassification does not support gradient checkpointing yet") def test_training_gradient_checkpointing_use_reentrant_false(self): pass @unittest.skip(reason="Siglip2 uses the same initialization scheme as the Flax original implementation") def test_initialization(self): pass # Draw a circle on an images with different aspect ratios def prepare_images(): shapes = [(224, 224), (1024, 1024), (224, 1024)] images = [] for height, width in shapes: image = Image.new("RGB", (width, height), color="red") draw = ImageDraw.Draw(image) center_x = image.width // 2 center_y = image.height // 2 radius = min(center_x, center_y) // 8 * 7 draw.ellipse( (center_x - radius, center_y - radius, center_x + radius, center_y + radius), fill="blue", outline="green", width=image.width // 20, ) images.append(image) return images @require_vision @require_torch class Siglip2ModelIntegrationTest(unittest.TestCase): @slow def test_inference(self): model_name = "google/siglip2-base-patch16-naflex" model = Siglip2Model.from_pretrained(model_name).to(torch_device) processor = Siglip2Processor.from_pretrained(model_name) images = prepare_images() text = [ "circle", "ellipsoid", "blue circle on red background", "blue circle with green border on red background", "green circle on red background", "a dog", "a blue dog with a green border on a red background", ] inputs = processor(text=text, images=images, return_tensors="pt") inputs = inputs.to(torch_device) # forward pass with torch.no_grad(): outputs = model(**inputs) logits_per_image = outputs.logits_per_image logits_per_text = outputs.logits_per_text # verify the logits shape self.assertEqual( logits_per_image.shape, torch.Size((inputs.pixel_values.shape[0], inputs.input_ids.shape[0])), ) self.assertEqual( logits_per_text.shape, torch.Size((inputs.input_ids.shape[0], inputs.pixel_values.shape[0])), ) # verify the logits values # fmt: off expected_logits_per_texts = Expectations({ ("cuda", None): [ [ 1.0195, -0.0280, -1.4468], [ -4.5395, -6.2269, -1.5667], [ 4.1757, 5.0358, 3.5159], [ 9.4264, 10.1879, 6.3353], [ 2.4409, 3.1058, 4.5491], [-12.3230, -13.7355, -13.4632], [ 1.1520, 1.1687, -1.9647], ], ("rocm", (9, 5)): [ [ 1.0236, -0.0376, -1.4464], [ -4.5358, -6.2235, -1.5628], [ 4.1708, 5.0334, 3.5187], [ 9.4241, 10.1828, 6.3366], [ 2.4371, 3.1062, 4.5530], [-12.3173, -13.7240, -13.4580], [ 1.1502, 1.1716, -1.9623] ], }) EXPECTED_LOGITS_PER_TEXT = torch.tensor(expected_logits_per_texts.get_expectation()).to(torch_device) # fmt: on torch.testing.assert_close(outputs.logits_per_text, EXPECTED_LOGITS_PER_TEXT, rtol=1e-3, atol=1e-3)
transformers/tests/models/siglip2/test_modeling_siglip2.py/0
{ "file_path": "transformers/tests/models/siglip2/test_modeling_siglip2.py", "repo_id": "transformers", "token_count": 13151 }
589
# Copyright 2021-2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for the SpeechT5 feature extractors.""" import itertools import random import unittest import numpy as np from transformers import BatchFeature, SpeechT5FeatureExtractor from transformers.testing_utils import require_torch from transformers.utils.import_utils import is_torch_available from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin if is_torch_available(): import torch global_rng = random.Random() # Copied from tests.models.whisper.test_feature_extraction_whisper.floats_list def floats_list(shape, scale=1.0, rng=None, name=None): """Creates a random float32 tensor""" if rng is None: rng = global_rng values = [] for batch_idx in range(shape[0]): values.append([]) for _ in range(shape[1]): values[-1].append(rng.random() * scale) return values @require_torch class SpeechT5FeatureExtractionTester: def __init__( self, parent, batch_size=7, min_seq_length=400, max_seq_length=2000, feature_size=1, padding_value=0.0, sampling_rate=16000, do_normalize=True, num_mel_bins=80, hop_length=16, win_length=64, win_function="hann_window", fmin=80, fmax=7600, mel_floor=1e-10, return_attention_mask=True, ): self.parent = parent self.batch_size = batch_size self.min_seq_length = min_seq_length self.max_seq_length = max_seq_length self.seq_length_diff = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1) self.feature_size = feature_size self.padding_value = padding_value self.sampling_rate = sampling_rate self.do_normalize = do_normalize self.num_mel_bins = num_mel_bins self.hop_length = hop_length self.win_length = win_length self.win_function = win_function self.fmin = fmin self.fmax = fmax self.mel_floor = mel_floor self.return_attention_mask = return_attention_mask def prepare_feat_extract_dict(self): return { "feature_size": self.feature_size, "padding_value": self.padding_value, "sampling_rate": self.sampling_rate, "do_normalize": self.do_normalize, "num_mel_bins": self.num_mel_bins, "hop_length": self.hop_length, "win_length": self.win_length, "win_function": self.win_function, "fmin": self.fmin, "fmax": self.fmax, "mel_floor": self.mel_floor, "return_attention_mask": self.return_attention_mask, } def prepare_inputs_for_common(self, equal_length=False, numpify=False): def _flatten(list_of_lists): return list(itertools.chain(*list_of_lists)) if equal_length: speech_inputs = floats_list((self.batch_size, self.max_seq_length)) else: # make sure that inputs increase in size speech_inputs = [ _flatten(floats_list((x, self.feature_size))) for x in range(self.min_seq_length, self.max_seq_length, self.seq_length_diff) ] if numpify: speech_inputs = [np.asarray(x) for x in speech_inputs] return speech_inputs def prepare_inputs_for_target(self, equal_length=False, numpify=False): if equal_length: speech_inputs = [floats_list((self.max_seq_length, self.num_mel_bins)) for _ in range(self.batch_size)] else: # make sure that inputs increase in size speech_inputs = [ floats_list((x, self.num_mel_bins)) for x in range(self.min_seq_length, self.max_seq_length, self.seq_length_diff) ] if numpify: speech_inputs = [np.asarray(x) for x in speech_inputs] return speech_inputs @require_torch class SpeechT5FeatureExtractionTest(SequenceFeatureExtractionTestMixin, unittest.TestCase): feature_extraction_class = SpeechT5FeatureExtractor def setUp(self): self.feat_extract_tester = SpeechT5FeatureExtractionTester(self) def _check_zero_mean_unit_variance(self, input_vector): self.assertTrue(np.all(np.mean(input_vector, axis=0) < 1e-3)) self.assertTrue(np.all(np.abs(np.var(input_vector, axis=0) - 1) < 1e-3)) def test_call(self): # Tests that all call wrap to encode_plus and batch_encode_plus feat_extract = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict()) # create three inputs of length 800, 1000, and 1200 speech_inputs = [floats_list((1, x))[0] for x in range(800, 1400, 200)] np_speech_inputs = [np.asarray(speech_input) for speech_input in speech_inputs] # Test not batched input encoded_sequences_1 = feat_extract(speech_inputs[0], return_tensors="np").input_values encoded_sequences_2 = feat_extract(np_speech_inputs[0], return_tensors="np").input_values self.assertTrue(np.allclose(encoded_sequences_1, encoded_sequences_2, atol=1e-3)) # Test batched encoded_sequences_1 = feat_extract(speech_inputs, return_tensors="np").input_values encoded_sequences_2 = feat_extract(np_speech_inputs, return_tensors="np").input_values for enc_seq_1, enc_seq_2 in zip(encoded_sequences_1, encoded_sequences_2): self.assertTrue(np.allclose(enc_seq_1, enc_seq_2, atol=1e-3)) def test_zero_mean_unit_variance_normalization_np(self): feat_extract = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict()) speech_inputs = [floats_list((1, x))[0] for x in range(800, 1400, 200)] paddings = ["longest", "max_length", "do_not_pad"] max_lengths = [None, 1600, None] for max_length, padding in zip(max_lengths, paddings): processed = feat_extract(speech_inputs, padding=padding, max_length=max_length, return_tensors="np") input_values = processed.input_values self._check_zero_mean_unit_variance(input_values[0][:800]) self.assertTrue(input_values[0][800:].sum() < 1e-6) self._check_zero_mean_unit_variance(input_values[1][:1000]) self.assertTrue(input_values[0][1000:].sum() < 1e-6) self._check_zero_mean_unit_variance(input_values[2][:1200]) def test_zero_mean_unit_variance_normalization(self): feat_extract = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict()) lengths = range(800, 1400, 200) speech_inputs = [floats_list((1, x))[0] for x in lengths] paddings = ["longest", "max_length", "do_not_pad"] max_lengths = [None, 1600, None] for max_length, padding in zip(max_lengths, paddings): processed = feat_extract(speech_inputs, max_length=max_length, padding=padding) input_values = processed.input_values self._check_zero_mean_unit_variance(input_values[0][:800]) self._check_zero_mean_unit_variance(input_values[1][:1000]) self._check_zero_mean_unit_variance(input_values[2][:1200]) def test_zero_mean_unit_variance_normalization_trunc_np_max_length(self): feat_extract = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict()) speech_inputs = [floats_list((1, x))[0] for x in range(800, 1400, 200)] processed = feat_extract( speech_inputs, truncation=True, max_length=1000, padding="max_length", return_tensors="np" ) input_values = processed.input_values self._check_zero_mean_unit_variance(input_values[0, :800]) self._check_zero_mean_unit_variance(input_values[1]) self._check_zero_mean_unit_variance(input_values[2]) def test_zero_mean_unit_variance_normalization_trunc_np_longest(self): feat_extract = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict()) speech_inputs = [floats_list((1, x))[0] for x in range(800, 1400, 200)] processed = feat_extract( speech_inputs, truncation=True, max_length=1000, padding="longest", return_tensors="np" ) input_values = processed.input_values self._check_zero_mean_unit_variance(input_values[0, :800]) self._check_zero_mean_unit_variance(input_values[1, :1000]) self._check_zero_mean_unit_variance(input_values[2]) # make sure that if max_length < longest -> then pad to max_length self.assertTrue(input_values.shape == (3, 1000)) speech_inputs = [floats_list((1, x))[0] for x in range(800, 1400, 200)] processed = feat_extract( speech_inputs, truncation=True, max_length=2000, padding="longest", return_tensors="np" ) input_values = processed.input_values self._check_zero_mean_unit_variance(input_values[0, :800]) self._check_zero_mean_unit_variance(input_values[1, :1000]) self._check_zero_mean_unit_variance(input_values[2]) # make sure that if max_length > longest -> then pad to longest self.assertTrue(input_values.shape == (3, 1200)) def test_double_precision_pad(self): feature_extractor = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict()) np_speech_inputs = np.random.rand(100).astype(np.float64) py_speech_inputs = np_speech_inputs.tolist() for inputs in [py_speech_inputs, np_speech_inputs]: np_processed = feature_extractor.pad([{"input_values": inputs}], return_tensors="np") self.assertTrue(np_processed.input_values.dtype == np.float32) pt_processed = feature_extractor.pad([{"input_values": inputs}], return_tensors="pt") self.assertTrue(pt_processed.input_values.dtype == torch.float32) def test_call_target(self): # Tests that all call wrap to encode_plus and batch_encode_plus feature_extractor = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict()) # create three inputs of length 800, 1000, and 1200 speech_inputs = [floats_list((1, x))[0] for x in range(800, 1400, 200)] np_speech_inputs = [np.asarray(speech_input) for speech_input in speech_inputs] # Test feature size input_values = feature_extractor(audio_target=np_speech_inputs, padding=True, return_tensors="np").input_values self.assertTrue(input_values.ndim == 3) self.assertTrue(input_values.shape[-1] == feature_extractor.num_mel_bins) # Test not batched input encoded_sequences_1 = feature_extractor(speech_inputs[0], return_tensors="np").input_values encoded_sequences_2 = feature_extractor(np_speech_inputs[0], return_tensors="np").input_values self.assertTrue(np.allclose(encoded_sequences_1, encoded_sequences_2, atol=1e-3)) # Test batched encoded_sequences_1 = feature_extractor(speech_inputs, return_tensors="np").input_values encoded_sequences_2 = feature_extractor(np_speech_inputs, return_tensors="np").input_values for enc_seq_1, enc_seq_2 in zip(encoded_sequences_1, encoded_sequences_2): self.assertTrue(np.allclose(enc_seq_1, enc_seq_2, atol=1e-3)) # Test 2-D numpy arrays are batched. speech_inputs = [floats_list((1, x))[0] for x in (800, 800, 800)] np_speech_inputs = np.asarray(speech_inputs) encoded_sequences_1 = feature_extractor(speech_inputs, return_tensors="np").input_values encoded_sequences_2 = feature_extractor(np_speech_inputs, return_tensors="np").input_values for enc_seq_1, enc_seq_2 in zip(encoded_sequences_1, encoded_sequences_2): self.assertTrue(np.allclose(enc_seq_1, enc_seq_2, atol=1e-3)) def test_batch_feature_target(self): speech_inputs = self.feat_extract_tester.prepare_inputs_for_target() feat_extract = self.feature_extraction_class(**self.feat_extract_dict) input_name = feat_extract.model_input_names[0] processed_features = BatchFeature({input_name: speech_inputs}) self.assertTrue(all(len(x) == len(y) for x, y in zip(speech_inputs, processed_features[input_name]))) speech_inputs = self.feat_extract_tester.prepare_inputs_for_target(equal_length=True) processed_features = BatchFeature({input_name: speech_inputs}, tensor_type="np") batch_features_input = processed_features[input_name] if len(batch_features_input.shape) < 3: batch_features_input = batch_features_input[:, :, None] self.assertTrue( batch_features_input.shape == (self.feat_extract_tester.batch_size, len(speech_inputs[0]), self.feat_extract_tester.num_mel_bins) ) @require_torch def test_batch_feature_target_pt(self): speech_inputs = self.feat_extract_tester.prepare_inputs_for_target(equal_length=True) feat_extract = self.feature_extraction_class(**self.feat_extract_dict) input_name = feat_extract.model_input_names[0] processed_features = BatchFeature({input_name: speech_inputs}, tensor_type="pt") batch_features_input = processed_features[input_name] if len(batch_features_input.shape) < 3: batch_features_input = batch_features_input[:, :, None] self.assertTrue( batch_features_input.shape == (self.feat_extract_tester.batch_size, len(speech_inputs[0]), self.feat_extract_tester.num_mel_bins) ) @require_torch def test_padding_accepts_tensors_target_pt(self): feat_extract = self.feature_extraction_class(**self.feat_extract_dict) speech_inputs = self.feat_extract_tester.prepare_inputs_for_target() input_name = feat_extract.model_input_names[0] processed_features = BatchFeature({input_name: speech_inputs}) feat_extract.feature_size = feat_extract.num_mel_bins # hack! input_np = feat_extract.pad(processed_features, padding="longest", return_tensors="np")[input_name] input_pt = feat_extract.pad(processed_features, padding="longest", return_tensors="pt")[input_name] self.assertTrue(abs(input_np.astype(np.float32).sum() - input_pt.numpy().astype(np.float32).sum()) < 1e-2) def test_attention_mask_target(self): feat_dict = self.feat_extract_dict feat_dict["return_attention_mask"] = True feat_extract = self.feature_extraction_class(**feat_dict) speech_inputs = self.feat_extract_tester.prepare_inputs_for_target() input_lengths = [len(x) for x in speech_inputs] input_name = feat_extract.model_input_names[0] processed = BatchFeature({input_name: speech_inputs}) feat_extract.feature_size = feat_extract.num_mel_bins # hack! processed = feat_extract.pad(processed, padding="longest", return_tensors="np") self.assertIn("attention_mask", processed) self.assertListEqual(list(processed.attention_mask.shape), list(processed[input_name].shape[:2])) self.assertListEqual(processed.attention_mask.sum(-1).tolist(), input_lengths) def test_attention_mask_with_truncation_target(self): feat_dict = self.feat_extract_dict feat_dict["return_attention_mask"] = True feat_extract = self.feature_extraction_class(**feat_dict) speech_inputs = self.feat_extract_tester.prepare_inputs_for_target() input_lengths = [len(x) for x in speech_inputs] input_name = feat_extract.model_input_names[0] processed = BatchFeature({input_name: speech_inputs}) max_length = min(input_lengths) feat_extract.feature_size = feat_extract.num_mel_bins # hack! processed_pad = feat_extract.pad( processed, padding="max_length", max_length=max_length, truncation=True, return_tensors="np" ) self.assertIn("attention_mask", processed_pad) self.assertListEqual( list(processed_pad.attention_mask.shape), [processed_pad[input_name].shape[0], max_length] ) self.assertListEqual( processed_pad.attention_mask[:, :max_length].sum(-1).tolist(), [max_length for x in speech_inputs] ) def _load_datasamples(self, num_samples): from datasets import load_dataset ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") # automatic decoding with librispeech speech_samples = ds.sort("id")[:num_samples]["audio"] return [x["array"] for x in speech_samples] def test_integration(self): # fmt: off EXPECTED_INPUT_VALUES = torch.tensor( [2.3804e-03, 2.0752e-03, 1.9836e-03, 2.1057e-03, 1.6174e-03, 3.0518e-04, 9.1553e-05, 3.3569e-04, 9.7656e-04, 1.8311e-03, 2.0142e-03, 2.1057e-03, 1.7395e-03, 4.5776e-04, -3.9673e-04, 4.5776e-04, 1.0071e-03, 9.1553e-05, 4.8828e-04, 1.1597e-03, 7.3242e-04, 9.4604e-04, 1.8005e-03, 1.8311e-03, 8.8501e-04, 4.2725e-04, 4.8828e-04, 7.3242e-04, 1.0986e-03, 2.1057e-03] ) # fmt: on input_speech = self._load_datasamples(1) feature_extractor = SpeechT5FeatureExtractor() input_values = feature_extractor(input_speech, return_tensors="pt").input_values self.assertEqual(input_values.shape, (1, 93680)) torch.testing.assert_close(input_values[0, :30], EXPECTED_INPUT_VALUES, rtol=1e-6, atol=1e-6) def test_integration_target(self): # fmt: off EXPECTED_INPUT_VALUES = torch.tensor( [-2.6870, -3.0104, -3.1356, -3.5352, -3.0044, -3.0353, -3.4719, -3.6777, -3.1520, -2.9435, -2.6553, -2.8795, -2.9944, -2.5921, -3.0279, -3.0386, -3.0864, -3.1291, -3.2353, -2.7444, -2.6831, -2.7287, -3.1761, -3.1571, -3.2726, -3.0582, -3.1007, -3.4533, -3.4695, -3.0998] ) # fmt: on input_speech = self._load_datasamples(1) feature_extractor = SpeechT5FeatureExtractor() input_values = feature_extractor(audio_target=input_speech, return_tensors="pt").input_values self.assertEqual(input_values.shape, (1, 366, 80)) torch.testing.assert_close(input_values[0, 0, :30], EXPECTED_INPUT_VALUES, rtol=1e-4, atol=1e-4)
transformers/tests/models/speecht5/test_feature_extraction_speecht5.py/0
{ "file_path": "transformers/tests/models/speecht5/test_feature_extraction_speecht5.py", "repo_id": "transformers", "token_count": 8412 }
590
# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import inspect import unittest from datasets import load_dataset from transformers.models.superglue.configuration_superglue import SuperGlueConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor if is_torch_available(): import torch from transformers import SuperGlueForKeypointMatching if is_vision_available(): from transformers import AutoImageProcessor class SuperGlueModelTester: def __init__( self, parent, batch_size=2, image_width=80, image_height=60, keypoint_detector_config=None, hidden_size: int = 64, keypoint_encoder_sizes: list[int] = [32, 64], gnn_layers_types: list[str] = ["self", "cross"] * 2, num_attention_heads: int = 4, sinkhorn_iterations: int = 100, matching_threshold: float = 0.2, ): if keypoint_detector_config is None: keypoint_detector_config = { "encoder_hidden_sizes": [32, 64], "decoder_hidden_size": 64, "keypoint_decoder_dim": 65, "descriptor_decoder_dim": 64, "keypoint_threshold": 0.005, "max_keypoints": 256, "nms_radius": 4, "border_removal_distance": 4, } self.parent = parent self.batch_size = batch_size self.image_width = image_width self.image_height = image_height self.keypoint_detector_config = keypoint_detector_config self.hidden_size = hidden_size self.keypoint_encoder_sizes = keypoint_encoder_sizes self.gnn_layers_types = gnn_layers_types self.num_attention_heads = num_attention_heads self.sinkhorn_iterations = sinkhorn_iterations self.matching_threshold = matching_threshold def prepare_config_and_inputs(self): # SuperGlue expects a grayscale image as input pixel_values = floats_tensor([self.batch_size, 2, 3, self.image_height, self.image_width]) config = self.get_config() return config, pixel_values def get_config(self): return SuperGlueConfig( keypoint_detector_config=self.keypoint_detector_config, hidden_size=self.hidden_size, keypoint_encoder_sizes=self.keypoint_encoder_sizes, gnn_layers_types=self.gnn_layers_types, num_attention_heads=self.num_attention_heads, sinkhorn_iterations=self.sinkhorn_iterations, matching_threshold=self.matching_threshold, ) def create_and_check_model(self, config, pixel_values): model = SuperGlueForKeypointMatching(config=config) model.to(torch_device) model.eval() result = model(pixel_values) maximum_num_matches = result.mask.shape[-1] self.parent.assertEqual( result.keypoints.shape, (self.batch_size, 2, maximum_num_matches, 2), ) self.parent.assertEqual( result.matches.shape, (self.batch_size, 2, maximum_num_matches), ) self.parent.assertEqual( result.matching_scores.shape, (self.batch_size, 2, maximum_num_matches), ) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, pixel_values = config_and_inputs inputs_dict = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class SuperGlueModelTest(ModelTesterMixin, unittest.TestCase): all_model_classes = (SuperGlueForKeypointMatching,) if is_torch_available() else () fx_compatible = False test_pruning = False test_resize_embeddings = False test_head_masking = False has_attentions = True def setUp(self): self.model_tester = SuperGlueModelTester(self) self.config_tester = ConfigTester(self, config_class=SuperGlueConfig, has_text_modality=False, hidden_size=64) def test_config(self): self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() @unittest.skip(reason="SuperGlueForKeypointMatching does not use inputs_embeds") def test_inputs_embeds(self): pass @unittest.skip(reason="SuperGlueForKeypointMatching does not support input and output embeddings") def test_model_get_set_embeddings(self): pass @unittest.skip(reason="SuperGlueForKeypointMatching does not use feedforward chunking") def test_feed_forward_chunking(self): pass @unittest.skip(reason="SuperGlueForKeypointMatching is not trainable") def test_training(self): pass @unittest.skip(reason="SuperGlueForKeypointMatching is not trainable") def test_training_gradient_checkpointing(self): pass @unittest.skip(reason="SuperGlueForKeypointMatching is not trainable") def test_training_gradient_checkpointing_use_reentrant(self): pass @unittest.skip(reason="SuperGlueForKeypointMatching is not trainable") def test_training_gradient_checkpointing_use_reentrant_false(self): pass @unittest.skip(reason="SuperGlue does not output any loss term in the forward pass") def test_retain_grad_hidden_states_attentions(self): pass def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: model = model_class(config) signature = inspect.signature(model.forward) # signature.parameters is an OrderedDict => so arg_names order is deterministic arg_names = [*signature.parameters.keys()] expected_arg_names = ["pixel_values"] self.assertListEqual(arg_names[:1], expected_arg_names) def test_hidden_states_output(self): def check_hidden_states_output(inputs_dict, config, model_class): model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) hidden_states = outputs.hidden_states maximum_num_matches = outputs.mask.shape[-1] hidden_states_sizes = ( self.model_tester.keypoint_encoder_sizes + [self.model_tester.hidden_size] + [self.model_tester.hidden_size, self.model_tester.hidden_size * 2] * len(self.model_tester.gnn_layers_types) + [self.model_tester.hidden_size] * 2 ) for i, hidden_states_size in enumerate(hidden_states_sizes): self.assertListEqual( list(hidden_states[i].shape[-2:]), [hidden_states_size, maximum_num_matches], ) config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: inputs_dict["output_hidden_states"] = True check_hidden_states_output(inputs_dict, config, model_class) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] config.output_hidden_states = True check_hidden_states_output(inputs_dict, config, model_class) def test_attention_outputs(self): def check_attention_output(inputs_dict, config, model_class): model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.attentions maximum_num_matches = outputs.mask.shape[-1] expected_attention_shape = [ self.model_tester.num_attention_heads, maximum_num_matches, maximum_num_matches, ] for i, attention in enumerate(attentions): self.assertListEqual( list(attention.shape[-3:]), expected_attention_shape, ) config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: inputs_dict["output_attentions"] = True check_attention_output(inputs_dict, config, model_class) # check that output_hidden_states also work using config del inputs_dict["output_attentions"] config.output_attentions = True check_attention_output(inputs_dict, config, model_class) @slow def test_model_from_pretrained(self): from_pretrained_ids = ["magic-leap-community/superglue_indoor", "magic-leap-community/superglue_outdoor"] for model_name in from_pretrained_ids: model = SuperGlueForKeypointMatching.from_pretrained(model_name) self.assertIsNotNone(model) def test_forward_labels_should_be_none(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): model_inputs = self._prepare_for_class(inputs_dict, model_class) # Provide an arbitrary sized Tensor as labels to model inputs model_inputs["labels"] = torch.rand((128, 128)) with self.assertRaises(ValueError) as cm: model(**model_inputs) self.assertEqual(ValueError, cm.exception.__class__) def test_batching_equivalence(self): """ Overwriting ModelTesterMixin.test_batching_equivalence since SuperGlue returns `matching_scores` tensors full of zeros which causes the test to fail, because cosine_similarity of two zero tensors is 0. Discussed here : https://github.com/huggingface/transformers/pull/29886#issuecomment-2481539481 """ def recursive_check(batched_object, single_row_object, model_name, key): if isinstance(batched_object, (list, tuple)): for batched_object_value, single_row_object_value in zip(batched_object, single_row_object): recursive_check(batched_object_value, single_row_object_value, model_name, key) elif isinstance(batched_object, dict): for batched_object_value, single_row_object_value in zip( batched_object.values(), single_row_object.values() ): recursive_check(batched_object_value, single_row_object_value, model_name, key) # do not compare returned loss (0-dim tensor) / codebook ids (int) / caching objects elif batched_object is None or not isinstance(batched_object, torch.Tensor): return elif batched_object.dim() == 0: return else: # indexing the first element does not always work # e.g. models that output similarity scores of size (N, M) would need to index [0, 0] slice_ids = [slice(0, index) for index in single_row_object.shape] batched_row = batched_object[slice_ids] self.assertFalse( torch.isnan(batched_row).any(), f"Batched output has `nan` in {model_name} for key={key}" ) self.assertFalse( torch.isinf(batched_row).any(), f"Batched output has `inf` in {model_name} for key={key}" ) self.assertFalse( torch.isnan(single_row_object).any(), f"Single row output has `nan` in {model_name} for key={key}" ) self.assertFalse( torch.isinf(single_row_object).any(), f"Single row output has `inf` in {model_name} for key={key}" ) self.assertTrue( (equivalence(batched_row, single_row_object)) <= 1e-03, msg=( f"Batched and Single row outputs are not equal in {model_name} for key={key}. " f"Difference={equivalence(batched_row, single_row_object)}." ), ) def equivalence(tensor1, tensor2): return torch.max(torch.abs(tensor1 - tensor2)) config, batched_input = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: config.output_hidden_states = True model_name = model_class.__name__ batched_input_prepared = self._prepare_for_class(batched_input, model_class) model = model_class(config).to(torch_device).eval() batch_size = self.model_tester.batch_size single_row_input = {} for key, value in batched_input_prepared.items(): if isinstance(value, torch.Tensor) and value.shape[0] % batch_size == 0: # e.g. musicgen has inputs of size (bs*codebooks). in most cases value.shape[0] == batch_size single_batch_shape = value.shape[0] // batch_size single_row_input[key] = value[:single_batch_shape] else: single_row_input[key] = value with torch.no_grad(): model_batched_output = model(**batched_input_prepared) model_row_output = model(**single_row_input) if isinstance(model_batched_output, torch.Tensor): model_batched_output = {"model_output": model_batched_output} model_row_output = {"model_output": model_row_output} for key in model_batched_output: recursive_check(model_batched_output[key], model_row_output[key], model_name, key) def prepare_imgs(): dataset = load_dataset("hf-internal-testing/image-matching-test-dataset", split="train") image1 = dataset[0]["image"] image2 = dataset[1]["image"] image3 = dataset[2]["image"] return [[image1, image2], [image3, image2]] @require_torch @require_vision class SuperGlueModelIntegrationTest(unittest.TestCase): @cached_property def default_image_processor(self): return ( AutoImageProcessor.from_pretrained("magic-leap-community/superglue_outdoor") if is_vision_available() else None ) @slow def test_inference(self): model = SuperGlueForKeypointMatching.from_pretrained("magic-leap-community/superglue_outdoor").to(torch_device) preprocessor = self.default_image_processor images = prepare_imgs() inputs = preprocessor(images=images, return_tensors="pt").to(torch_device) with torch.no_grad(): outputs = model(**inputs, output_hidden_states=True, output_attentions=True) predicted_number_of_matches = torch.sum(outputs.matches[0][0] != -1).item() predicted_matches_values = outputs.matches[0, 0, :30] predicted_matching_scores_values = outputs.matching_scores[0, 0, :20] expected_number_of_matches = 282 expected_matches_values = torch.tensor([125,630,137,138,136,143,135,-1,-1,153, 154,156,117,160,-1,149,147,152,168,-1, 165,182,-1,190,187,188,189,112,-1,193], device=predicted_matches_values.device) # fmt:skip expected_matching_scores_values = torch.tensor([0.9899,0.0033,0.9897,0.9889,0.9879,0.7464,0.7109,0.0,0.0,0.9841, 0.9889,0.9639,0.0114,0.9559,0.0,0.9735,0.8018,0.5190,0.9157,0.0], device=predicted_matches_values.device) # fmt:skip """ Because of inconsistencies introduced between CUDA versions, the checks here are less strict. SuperGlue relies on SuperPoint, which may, depending on CUDA version, return different number of keypoints (866 or 867 in this specific test example). The consequence of having different number of keypoints is that the number of matches will also be different. In the 20 first matches being checked, having one keypoint less will result in 1 less match. The matching scores will also be different, as the keypoints are different. The checks here are less strict to account for these inconsistencies. Therefore, the test checks that the predicted number of matches, matches and matching scores are close to the expected values, individually. Here, the tolerance of the number of values changing is set to 2. This was discussed [here](https://github.com/huggingface/transformers/pull/29886#issuecomment-2482752787) Such CUDA inconsistencies can be found [here](https://github.com/huggingface/transformers/pull/33200/files#r1785980300) """ self.assertTrue(abs(predicted_number_of_matches - expected_number_of_matches) < 4) self.assertTrue( torch.sum(~torch.isclose(predicted_matching_scores_values, expected_matching_scores_values, atol=1e-2)) < 4 ) self.assertTrue(torch.sum(predicted_matches_values != expected_matches_values) < 4) self.assertTrue(torch.all(outputs.matches[0, 1] < torch.sum(outputs.mask[0, 0]))) self.assertTrue(torch.all(outputs.matches[0, 0] < torch.sum(outputs.mask[0, 1])))
transformers/tests/models/superglue/test_modeling_superglue.py/0
{ "file_path": "transformers/tests/models/superglue/test_modeling_superglue.py", "repo_id": "transformers", "token_count": 8457 }
591
# Copyright 2018 Google T5 Authors and HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import copy import os import pickle import tempfile import unittest import pytest from transformers import T5Config, is_torch_available from transformers.models.auto.modeling_auto import MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES from transformers.pytorch_utils import is_torch_greater_or_equal_than_2_4 from transformers.testing_utils import ( Expectations, cleanup, require_accelerate, require_sentencepiece, require_tokenizers, require_torch, require_torch_accelerator, slow, torch_device, ) from transformers.utils import cached_property from transformers.utils.fx import symbolic_trace from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch import torch.nn.functional as F from transformers import ( AutoTokenizer, ByT5Tokenizer, T5EncoderModel, T5ForConditionalGeneration, T5ForQuestionAnswering, T5ForSequenceClassification, T5ForTokenClassification, T5Model, T5Tokenizer, ) class T5ModelTester: def __init__( self, parent, vocab_size=99, batch_size=13, encoder_seq_length=7, decoder_seq_length=7, # For common tests is_training=True, use_attention_mask=True, use_labels=True, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, d_ff=37, relative_attention_num_buckets=8, dropout_rate=0.1, initializer_factor=0.002, eos_token_id=1, pad_token_id=0, decoder_start_token_id=0, scope=None, decoder_layers=None, ): self.parent = parent self.batch_size = batch_size self.encoder_seq_length = encoder_seq_length self.decoder_seq_length = decoder_seq_length # For common tests self.seq_length = self.decoder_seq_length self.is_training = is_training self.use_attention_mask = use_attention_mask self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.d_ff = d_ff self.relative_attention_num_buckets = relative_attention_num_buckets self.dropout_rate = dropout_rate self.initializer_factor = initializer_factor self.eos_token_id = eos_token_id self.pad_token_id = pad_token_id self.decoder_start_token_id = decoder_start_token_id self.scope = None self.decoder_layers = decoder_layers def get_large_model_config(self): return T5Config.from_pretrained("google-t5/t5-base") def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.encoder_seq_length], self.vocab_size).clamp(2) input_ids[:, -1] = self.eos_token_id # Eos Token decoder_input_ids = ids_tensor([self.batch_size, self.decoder_seq_length], self.vocab_size) attention_mask = None decoder_attention_mask = None if self.use_attention_mask: attention_mask = ids_tensor([self.batch_size, self.encoder_seq_length], vocab_size=2) decoder_attention_mask = ids_tensor([self.batch_size, self.decoder_seq_length], vocab_size=2) lm_labels = None if self.use_labels: lm_labels = ids_tensor([self.batch_size, self.decoder_seq_length], self.vocab_size) config = self.get_config() return ( config, input_ids, decoder_input_ids, attention_mask, decoder_attention_mask, lm_labels, ) def get_pipeline_config(self): return T5Config( vocab_size=166, # t5 forces 100 extra tokens d_model=self.hidden_size, d_ff=self.d_ff, d_kv=self.hidden_size // self.num_attention_heads, num_layers=self.num_hidden_layers, num_decoder_layers=self.decoder_layers, num_heads=self.num_attention_heads, relative_attention_num_buckets=self.relative_attention_num_buckets, dropout_rate=self.dropout_rate, initializer_factor=self.initializer_factor, eos_token_id=self.eos_token_id, bos_token_id=self.pad_token_id, pad_token_id=self.pad_token_id, decoder_start_token_id=self.decoder_start_token_id, ) def get_config(self): return T5Config( vocab_size=self.vocab_size, d_model=self.hidden_size, d_ff=self.d_ff, d_kv=self.hidden_size // self.num_attention_heads, num_layers=self.num_hidden_layers, num_decoder_layers=self.decoder_layers, num_heads=self.num_attention_heads, relative_attention_num_buckets=self.relative_attention_num_buckets, dropout_rate=self.dropout_rate, initializer_factor=self.initializer_factor, eos_token_id=self.eos_token_id, bos_token_id=self.pad_token_id, pad_token_id=self.pad_token_id, decoder_start_token_id=self.decoder_start_token_id, ) def check_prepare_lm_labels_via_shift_left( self, config, input_ids, decoder_input_ids, attention_mask, decoder_attention_mask, lm_labels, ): model = T5Model(config=config) model.to(torch_device) model.eval() # make sure that lm_labels are correctly padded from the right lm_labels.masked_fill_((lm_labels == self.decoder_start_token_id), self.eos_token_id) # add casaul pad token mask triangular_mask = torch.tril(lm_labels.new_ones(lm_labels.shape)).logical_not() lm_labels.masked_fill_(triangular_mask, self.pad_token_id) decoder_input_ids = model._shift_right(lm_labels) for i, (decoder_input_ids_slice, lm_labels_slice) in enumerate(zip(decoder_input_ids, lm_labels)): # first item self.parent.assertEqual(decoder_input_ids_slice[0].item(), self.decoder_start_token_id) if i < decoder_input_ids_slice.shape[-1]: if i < decoder_input_ids.shape[-1] - 1: # items before diagonal self.parent.assertListEqual( decoder_input_ids_slice[1 : i + 1].tolist(), lm_labels_slice[:i].tolist() ) # pad items after diagonal if i < decoder_input_ids.shape[-1] - 2: self.parent.assertListEqual( decoder_input_ids_slice[i + 2 :].tolist(), lm_labels_slice[i + 1 : -1].tolist() ) else: # all items after square self.parent.assertListEqual(decoder_input_ids_slice[1:].tolist(), lm_labels_slice[:-1].tolist()) def create_and_check_model( self, config, input_ids, decoder_input_ids, attention_mask, decoder_attention_mask, lm_labels, ): model = T5Model(config=config) model.to(torch_device) model.eval() result = model( input_ids=input_ids, decoder_input_ids=decoder_input_ids, attention_mask=attention_mask, decoder_attention_mask=decoder_attention_mask, ) result = model(input_ids=input_ids, decoder_input_ids=decoder_input_ids) decoder_output = result.last_hidden_state decoder_past = result.past_key_values encoder_output = result.encoder_last_hidden_state self.parent.assertEqual(encoder_output.size(), (self.batch_size, self.encoder_seq_length, self.hidden_size)) self.parent.assertEqual(decoder_output.size(), (self.batch_size, self.decoder_seq_length, self.hidden_size)) # There should be `num_layers` key value embeddings stored in decoder_past self.parent.assertEqual(len(decoder_past), config.num_layers) # There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple self.parent.assertEqual(len(decoder_past[0]), 4) def create_and_check_with_lm_head( self, config, input_ids, decoder_input_ids, attention_mask, decoder_attention_mask, lm_labels, ): model = T5ForConditionalGeneration(config=config).to(torch_device).eval() outputs = model( input_ids=input_ids, decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, labels=lm_labels, ) self.parent.assertEqual(len(outputs), 4) self.parent.assertEqual(outputs["logits"].size(), (self.batch_size, self.decoder_seq_length, self.vocab_size)) self.parent.assertEqual(outputs["loss"].size(), ()) def create_and_check_with_sequence_classification_head( self, config, input_ids, decoder_input_ids, attention_mask, decoder_attention_mask, lm_labels, ): labels = torch.tensor([1] * self.batch_size, dtype=torch.long, device=torch_device) model = T5ForSequenceClassification(config=config).to(torch_device).eval() outputs = model( input_ids=input_ids, decoder_input_ids=input_ids, labels=labels, ) # self.parent.assertEqual(len(outputs), 4) self.parent.assertEqual(outputs["logits"].size(), (self.batch_size, config.num_labels)) self.parent.assertEqual(outputs["loss"].size(), ()) def create_and_check_decoder_model_past( self, config, input_ids, decoder_input_ids, attention_mask, decoder_attention_mask, lm_labels, ): model = T5Model(config=config).get_decoder().to(torch_device).eval() # first forward pass outputs = model(input_ids, use_cache=True) outputs_use_cache_conf = model(input_ids) outputs_no_past = model(input_ids, use_cache=False) self.parent.assertTrue(len(outputs) == len(outputs_use_cache_conf)) self.parent.assertTrue(len(outputs) == len(outputs_no_past) + 1) output, past_key_values = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size) # append to next input_ids and next_input_ids = torch.cat([input_ids, next_tokens], dim=-1) output_from_no_past = model(next_input_ids)["last_hidden_state"] output_from_past = model(next_tokens, past_key_values=past_key_values)["last_hidden_state"] # select random slice random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item() output_from_no_past_slice = output_from_no_past[:, -1, random_slice_idx].detach() output_from_past_slice = output_from_past[:, 0, random_slice_idx].detach() # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3)) def create_and_check_decoder_model_attention_mask_past( self, config, input_ids, decoder_input_ids, attention_mask, decoder_attention_mask, lm_labels, ): model = T5Model(config=config).get_decoder() model.to(torch_device) model.eval() # create attention mask attn_mask = torch.ones(input_ids.shape, dtype=torch.long, device=torch_device) half_seq_length = input_ids.shape[-1] // 2 attn_mask[:, half_seq_length:] = 0 # first forward pass output, past_key_values = model(input_ids, attention_mask=attn_mask, use_cache=True).to_tuple() # create hypothetical next token and extent to next_input_ids next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size) # change a random masked slice from input_ids random_seq_idx_to_change = ids_tensor((1,), half_seq_length).item() + 1 random_other_next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size).squeeze(-1) input_ids[:, -random_seq_idx_to_change] = random_other_next_tokens # append to next input_ids and attn_mask next_input_ids = torch.cat([input_ids, next_tokens], dim=-1) attn_mask = torch.cat( [attn_mask, torch.ones((attn_mask.shape[0], 1), dtype=torch.long, device=torch_device)], dim=1, ) # get two different outputs output_from_no_past = model(next_input_ids, attention_mask=attn_mask)["last_hidden_state"] output_from_past = model(next_tokens, past_key_values=past_key_values, attention_mask=attn_mask)[ "last_hidden_state" ] # select random slice random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item() output_from_no_past_slice = output_from_no_past[:, -1, random_slice_idx].detach() output_from_past_slice = output_from_past[:, 0, random_slice_idx].detach() # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3)) def create_and_check_decoder_model_past_large_inputs( self, config, input_ids, decoder_input_ids, attention_mask, decoder_attention_mask, lm_labels, ): model = T5Model(config=config).get_decoder().to(torch_device).eval() # first forward pass outputs = model(input_ids, attention_mask=attention_mask, use_cache=True) output, past_key_values = outputs.to_tuple() # create hypothetical multiple next token and extent to next_input_ids next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size) next_mask = ids_tensor((self.batch_size, 3), vocab_size=2) # append to next input_ids and next_input_ids = torch.cat([input_ids, next_tokens], dim=-1) next_attention_mask = torch.cat([attention_mask, next_mask], dim=-1) output_from_no_past = model(next_input_ids, attention_mask=next_attention_mask)["last_hidden_state"] output_from_past = model(next_tokens, attention_mask=next_attention_mask, past_key_values=past_key_values)[ "last_hidden_state" ] # select random slice random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item() output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx].detach() output_from_past_slice = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1]) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3)) def create_and_check_generate_with_past_key_values( self, config, input_ids, decoder_input_ids, attention_mask, decoder_attention_mask, lm_labels, ): model = T5ForConditionalGeneration(config=config).to(torch_device).eval() torch.manual_seed(0) output_without_past_cache = model.generate( input_ids[:1], num_beams=2, max_length=5, do_sample=True, use_cache=False ) torch.manual_seed(0) output_with_past_cache = model.generate(input_ids[:1], num_beams=2, max_length=5, do_sample=True) self.parent.assertTrue(torch.all(output_with_past_cache == output_without_past_cache)) def create_and_check_model_fp16_forward( self, config, input_ids, decoder_input_ids, attention_mask, decoder_attention_mask, lm_labels, ): model = T5Model(config=config).to(torch_device).half().eval() output = model(input_ids, decoder_input_ids=input_ids, attention_mask=attention_mask)["last_hidden_state"] self.parent.assertFalse(torch.isnan(output).any().item()) def create_and_check_encoder_decoder_shared_weights( self, config, input_ids, decoder_input_ids, attention_mask, decoder_attention_mask, lm_labels, ): for model_class in [T5Model, T5ForConditionalGeneration]: torch.manual_seed(0) model = model_class(config=config).to(torch_device).eval() # load state dict copies weights but does not tie them model.encoder.load_state_dict(model.decoder.state_dict(), strict=False) torch.manual_seed(0) tied_config = copy.deepcopy(config) tied_config.tie_encoder_decoder = True tied_model = model_class(config=tied_config).to(torch_device).eval() model_result = model( input_ids=input_ids, decoder_input_ids=decoder_input_ids, attention_mask=attention_mask, decoder_attention_mask=decoder_attention_mask, ) tied_model_result = tied_model( input_ids=input_ids, decoder_input_ids=decoder_input_ids, attention_mask=attention_mask, decoder_attention_mask=decoder_attention_mask, ) # check that models has less parameters self.parent.assertLess( sum(p.numel() for p in tied_model.parameters()), sum(p.numel() for p in model.parameters()) ) random_slice_idx = ids_tensor((1,), model_result[0].shape[-1]).item() # check that outputs are equal self.parent.assertTrue( torch.allclose( model_result[0][0, :, random_slice_idx], tied_model_result[0][0, :, random_slice_idx], atol=1e-4 ) ) # check that outputs after saving and loading are equal with tempfile.TemporaryDirectory() as tmpdirname: tied_model.save_pretrained(tmpdirname) tied_model = model_class.from_pretrained(tmpdirname) tied_model.to(torch_device) tied_model.eval() # check that models has less parameters self.parent.assertLess( sum(p.numel() for p in tied_model.parameters()), sum(p.numel() for p in model.parameters()) ) random_slice_idx = ids_tensor((1,), model_result[0].shape[-1]).item() tied_model_result = tied_model( input_ids=input_ids, decoder_input_ids=decoder_input_ids, attention_mask=attention_mask, decoder_attention_mask=decoder_attention_mask, ) # check that outputs are equal self.parent.assertTrue( torch.allclose( model_result[0][0, :, random_slice_idx], tied_model_result[0][0, :, random_slice_idx], atol=1e-4, ) ) def check_resize_embeddings_t5_v1_1( self, config, ): prev_vocab_size = config.vocab_size config.tie_word_embeddings = False model = T5ForConditionalGeneration(config=config).to(torch_device).eval() model.resize_token_embeddings(prev_vocab_size - 10) self.parent.assertEqual(model.get_input_embeddings().weight.shape[0], prev_vocab_size - 10) self.parent.assertEqual(model.get_output_embeddings().weight.shape[0], prev_vocab_size - 10) self.parent.assertEqual(model.config.vocab_size, prev_vocab_size - 10) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, decoder_input_ids, attention_mask, decoder_attention_mask, lm_labels, ) = config_and_inputs inputs_dict = { "input_ids": input_ids, "attention_mask": attention_mask, "decoder_input_ids": decoder_input_ids, "decoder_attention_mask": decoder_attention_mask, } return config, inputs_dict @require_torch class T5ModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = ( (T5Model, T5ForConditionalGeneration, T5ForSequenceClassification, T5ForQuestionAnswering) if is_torch_available() else () ) pipeline_model_mapping = ( { "feature-extraction": T5Model, "question-answering": T5ForQuestionAnswering, "summarization": T5ForConditionalGeneration, "text-classification": T5ForSequenceClassification, "text2text-generation": T5ForConditionalGeneration, "translation": T5ForConditionalGeneration, "zero-shot": T5ForSequenceClassification, } if is_torch_available() else {} ) all_parallelizable_model_classes = (T5Model, T5ForConditionalGeneration) if is_torch_available() else () fx_compatible = True test_pruning = False test_resize_embeddings = True test_model_parallel = True is_encoder_decoder = True # The small T5 model needs higher percentages for CPU/MP tests model_split_percents = [0.5, 0.8, 0.9] def setUp(self): self.model_tester = T5ModelTester(self) self.config_tester = ConfigTester(self, config_class=T5Config, d_model=37) # `QAPipelineTests` is not working well with slow tokenizers (for some models) and we don't want to touch the file # `src/transformers/data/processors/squad.py` (where this test fails for this model) def is_pipeline_test_to_skip( self, pipeline_test_case_name, config_class, model_architecture, tokenizer_name, image_processor_name, feature_extractor_name, processor_name, ): if tokenizer_name is None: return True if pipeline_test_case_name == "QAPipelineTests" and not tokenizer_name.endswith("Fast"): return True return False def _create_and_check_torch_fx_tracing(self, config, inputs_dict, output_loss=False): if not self.fx_compatible: self.skipTest(reason="torch.fx is not compatible with this model") configs_no_init = _config_zero_init(config) # To be sure we have no Nan configs_no_init.return_dict = False for model_class in self.all_model_classes: if model_class.__name__ == "T5ForSequenceClassification": continue model = model_class(config=configs_no_init) model.to(torch_device) model.eval() inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=output_loss) try: if model.config.is_encoder_decoder: model.config.use_cache = False # FSTM still requires this hack -> FSTM should probably be refactored similar to BART afterward labels = inputs.get("labels", None) input_names = [ "attention_mask", "decoder_attention_mask", "decoder_input_ids", "input_features", "input_ids", "input_values", ] if labels is not None: input_names.append("labels") filtered_inputs = {k: v for (k, v) in inputs.items() if k in input_names} input_names = list(filtered_inputs.keys()) model_output = model(**filtered_inputs) traced_model = symbolic_trace(model, input_names) traced_output = traced_model(**filtered_inputs) else: input_names = [ "attention_mask", "bbox", "input_features", "input_ids", "input_values", "pixel_values", "token_type_ids", "visual_feats", "visual_pos", ] labels = inputs.get("labels", None) start_positions = inputs.get("start_positions", None) end_positions = inputs.get("end_positions", None) if labels is not None: input_names.append("labels") if start_positions is not None: input_names.append("start_positions") if end_positions is not None: input_names.append("end_positions") filtered_inputs = {k: v for (k, v) in inputs.items() if k in input_names} input_names = list(filtered_inputs.keys()) if model.__class__.__name__ in set(MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES.values()) and ( not hasattr(model.config, "problem_type") or model.config.problem_type is None ): model.config.problem_type = "single_label_classification" traced_model = symbolic_trace(model, input_names) traced_output = traced_model(**filtered_inputs) model_output = model(**filtered_inputs) except Exception as e: self.fail(f"Couldn't trace module: {e}") def flatten_output(output): flatten = [] for x in output: if isinstance(x, (tuple, list)): flatten += flatten_output(x) elif not isinstance(x, torch.Tensor): continue else: flatten.append(x) return flatten model_output = flatten_output(model_output) traced_output = flatten_output(traced_output) num_outputs = len(model_output) for i in range(num_outputs): self.assertTrue( torch.allclose(model_output[i], traced_output[i]), f"traced {i}th output doesn't match model {i}th output for {model_class}", ) # Test that the model can be serialized and restored properly with tempfile.TemporaryDirectory() as tmp_dir_name: pkl_file_name = os.path.join(tmp_dir_name, "model.pkl") try: with open(pkl_file_name, "wb") as f: pickle.dump(traced_model, f) with open(pkl_file_name, "rb") as f: loaded = pickle.load(f) except Exception as e: self.fail(f"Couldn't serialize / deserialize the traced model: {e}") loaded_output = loaded(**filtered_inputs) loaded_output = flatten_output(loaded_output) for i in range(num_outputs): self.assertTrue( torch.allclose(model_output[i], loaded_output[i]), f"serialized model {i}th output doesn't match model {i}th output for {model_class}", ) # Avoid memory leak. Without this, each call increase RAM usage by ~20MB. # (Even with this call, there are still memory leak by ~0.04MB) self.clear_torch_jit_class_registry() # overwrite because T5 doesn't accept position ids as input and expects `decoder_input_ids` def test_custom_4d_attention_mask(self): for model_class in self.all_generative_model_classes: config, input_dict = self.model_tester.prepare_config_and_inputs_for_common() model = model_class(config).to(device=torch_device, dtype=torch.float32) ( input_ids, _, input_ids_shared_prefix, mask_shared_prefix, _, ) = self._get_custom_4d_mask_test_data() logits = model.forward( decoder_input_ids=input_ids, input_ids=input_dict["input_ids"][:3], ).logits # logits.shape == torch.Size([3, 4, ...]) logits_shared_prefix = model( input_ids=input_dict["input_ids"][:1], decoder_input_ids=input_ids_shared_prefix, decoder_attention_mask=mask_shared_prefix, )[0] # logits_shared_prefix.shape == torch.Size([1, 6, ...]) out_last_tokens = logits[:, -1, :] # last tokens in each batch line out_shared_prefix_last_tokens = logits_shared_prefix[0, -3:, :] # last three tokens # comparing softmax-normalized logits: normalized_0 = F.softmax(out_last_tokens) normalized_1 = F.softmax(out_shared_prefix_last_tokens) torch.testing.assert_close(normalized_0, normalized_1, rtol=1e-3, atol=1e-4) def test_config(self): self.config_tester.run_common_tests() def test_shift_right(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.check_prepare_lm_labels_via_shift_left(*config_and_inputs) def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_model_v1_1(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() # check that gated gelu feed forward and different word embeddings work config = config_and_inputs[0] config.tie_word_embeddings = False config.feed_forward_proj = "gated-gelu" self.model_tester.create_and_check_model(config, *config_and_inputs[1:]) # T5ForSequenceClassification does not support inputs_embeds def test_inputs_embeds(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in (T5Model, T5ForConditionalGeneration, T5ForQuestionAnswering): model = model_class(config) model.to(torch_device) model.eval() inputs = copy.deepcopy(self._prepare_for_class(inputs_dict, model_class)) if not self.is_encoder_decoder: input_ids = inputs["input_ids"] del inputs["input_ids"] else: encoder_input_ids = inputs["input_ids"] decoder_input_ids = inputs.get("decoder_input_ids", encoder_input_ids) del inputs["input_ids"] inputs.pop("decoder_input_ids", None) wte = model.get_input_embeddings() if not self.is_encoder_decoder: inputs["inputs_embeds"] = wte(input_ids) else: inputs["inputs_embeds"] = wte(encoder_input_ids) inputs["decoder_inputs_embeds"] = wte(decoder_input_ids) with torch.no_grad(): model(**inputs)[0] def test_config_and_model_silu_gated(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() config = config_and_inputs[0] config.feed_forward_proj = "gated-silu" self.model_tester.create_and_check_model(*config_and_inputs) def test_with_lm_head(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_with_lm_head(*config_and_inputs) def test_with_sequence_classification_head(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_with_sequence_classification_head(*config_and_inputs) def test_decoder_model_past(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_past(*config_and_inputs) def test_decoder_model_past_with_attn_mask(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_attention_mask_past(*config_and_inputs) def test_decoder_model_past_with_3d_attn_mask(self): ( config, input_ids, decoder_input_ids, attention_mask, decoder_attention_mask, lm_labels, ) = self.model_tester.prepare_config_and_inputs() attention_mask = ids_tensor( [self.model_tester.batch_size, self.model_tester.encoder_seq_length, self.model_tester.encoder_seq_length], vocab_size=2, ) decoder_attention_mask = ids_tensor( [self.model_tester.batch_size, self.model_tester.decoder_seq_length, self.model_tester.decoder_seq_length], vocab_size=2, ) self.model_tester.create_and_check_decoder_model_attention_mask_past( config, input_ids, decoder_input_ids, attention_mask, decoder_attention_mask, lm_labels, ) def test_decoder_model_past_with_large_inputs(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_past_large_inputs(*config_and_inputs) def test_generate_with_past_key_values(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_generate_with_past_key_values(*config_and_inputs) def test_encoder_decoder_shared_weights(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_encoder_decoder_shared_weights(*config_and_inputs) @unittest.skipIf(torch_device == "cpu", "Can't do half precision") def test_model_fp16_forward(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model_fp16_forward(*config_and_inputs) def test_v1_1_resize_embeddings(self): config = self.model_tester.prepare_config_and_inputs()[0] self.model_tester.check_resize_embeddings_t5_v1_1(config) @slow def test_model_from_pretrained(self): model_name = "google-t5/t5-small" model = T5Model.from_pretrained(model_name) self.assertIsNotNone(model) class T5EncoderOnlyModelTester: def __init__( self, parent, vocab_size=99, batch_size=13, encoder_seq_length=7, # For common tests use_attention_mask=True, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, d_ff=37, relative_attention_num_buckets=8, is_training=False, dropout_rate=0.1, initializer_factor=0.002, is_encoder_decoder=False, eos_token_id=1, pad_token_id=0, scope=None, ): self.parent = parent self.batch_size = batch_size self.encoder_seq_length = encoder_seq_length # For common tests self.seq_length = self.encoder_seq_length self.use_attention_mask = use_attention_mask self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.d_ff = d_ff self.relative_attention_num_buckets = relative_attention_num_buckets self.dropout_rate = dropout_rate self.initializer_factor = initializer_factor self.eos_token_id = eos_token_id self.pad_token_id = pad_token_id self.is_encoder_decoder = is_encoder_decoder self.scope = None self.is_training = is_training def get_large_model_config(self): return T5Config.from_pretrained("google-t5/t5-base") def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.encoder_seq_length], self.vocab_size) attention_mask = None if self.use_attention_mask: attention_mask = ids_tensor([self.batch_size, self.encoder_seq_length], vocab_size=2) config = T5Config( vocab_size=self.vocab_size, d_model=self.hidden_size, d_ff=self.d_ff, d_kv=self.hidden_size // self.num_attention_heads, num_layers=self.num_hidden_layers, num_heads=self.num_attention_heads, relative_attention_num_buckets=self.relative_attention_num_buckets, dropout_rate=self.dropout_rate, initializer_factor=self.initializer_factor, eos_token_id=self.eos_token_id, bos_token_id=self.pad_token_id, pad_token_id=self.pad_token_id, is_encoder_decoder=self.is_encoder_decoder, ) return ( config, input_ids, attention_mask, ) def create_and_check_model( self, config, input_ids, attention_mask, ): model = T5EncoderModel(config=config) model.to(torch_device) model.eval() result = model( input_ids=input_ids, attention_mask=attention_mask, ) result = model(input_ids=input_ids) encoder_output = result.last_hidden_state self.parent.assertEqual(encoder_output.size(), (self.batch_size, self.encoder_seq_length, self.hidden_size)) def create_and_check_model_fp16_forward( self, config, input_ids, attention_mask, ): model = T5EncoderModel(config=config).to(torch_device).half().eval() output = model(input_ids, attention_mask=attention_mask)["last_hidden_state"] self.parent.assertFalse(torch.isnan(output).any().item()) def create_and_check_with_token_classification_head( self, config, input_ids, attention_mask, ): labels = torch.tensor([1] * self.seq_length * self.batch_size, dtype=torch.long, device=torch_device) model = T5ForTokenClassification(config=config).to(torch_device).eval() outputs = model( input_ids=input_ids, labels=labels, attention_mask=attention_mask, ) self.parent.assertEqual(outputs["logits"].size(), (self.batch_size, self.seq_length, config.num_labels)) self.parent.assertEqual(outputs["loss"].size(), ()) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, attention_mask, ) = config_and_inputs inputs_dict = { "input_ids": input_ids, "attention_mask": attention_mask, } return config, inputs_dict class T5EncoderOnlyModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (T5EncoderModel, T5ForTokenClassification) if is_torch_available() else () test_pruning = False test_resize_embeddings = False test_model_parallel = True pipeline_model_mapping = ( { "token-classification": T5ForTokenClassification, } if is_torch_available() else {} ) all_parallelizable_model_classes = (T5EncoderModel,) if is_torch_available() else () def setUp(self): self.model_tester = T5EncoderOnlyModelTester(self) self.config_tester = ConfigTester(self, config_class=T5Config, d_model=37) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) @unittest.skipIf(torch_device == "cpu", "Can't do half precision") def test_model_fp16_forward(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model_fp16_forward(*config_and_inputs) def test_with_token_classification_head(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_with_token_classification_head(*config_and_inputs) def is_pipeline_test_to_skip( self, pipeline_test_case_name, config_class, model_architecture, tokenizer_name, image_processor_name, feature_extractor_name, processor_name, ): if tokenizer_name is None: return True # `T5EncoderOnlyModelTest` is not working well with slow tokenizers (for some models) and we don't want to touch the file # `src/transformers/data/processors/squad.py` (where this test fails for this model) if pipeline_test_case_name == "TokenClassificationPipelineTests" and not tokenizer_name.endswith("Fast"): return True return False def use_task_specific_params(model, task): model.config.update(model.config.task_specific_params[task]) @require_torch @require_accelerate @require_tokenizers @slow class T5ModelFp16Tests(unittest.TestCase): def test_fp16_fp32_conversion(self): r""" A test to check whether the argument `keep_in_fp32_modules` correctly does its job """ orig_import = __import__ accelerate_mock = unittest.mock.Mock() # mock import of accelerate def import_accelerate_mock(name, *args, **kwargs): if name == "accelerate": if accelerate_available: return accelerate_mock else: raise ImportError return orig_import(name, *args, **kwargs) # Load without using `accelerate` with unittest.mock.patch("builtins.__import__", side_effect=import_accelerate_mock): accelerate_available = False model = T5ForConditionalGeneration.from_pretrained("google-t5/t5-small", dtype=torch.float16) self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.float32) self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wi.weight.dtype == torch.float16) # Load without in bf16 model = T5ForConditionalGeneration.from_pretrained("google-t5/t5-small", dtype=torch.bfloat16) self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.bfloat16) self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wi.weight.dtype == torch.bfloat16) # Load using `accelerate` in bf16 model = T5ForConditionalGeneration.from_pretrained( "google-t5/t5-small", dtype=torch.bfloat16, device_map="auto" ) self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.bfloat16) self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wi.weight.dtype == torch.bfloat16) # Load using `accelerate` in bf16 model = T5ForConditionalGeneration.from_pretrained( "google-t5/t5-small", dtype=torch.bfloat16, ) self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.bfloat16) self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wi.weight.dtype == torch.bfloat16) # Load without using `accelerate` model = T5ForConditionalGeneration.from_pretrained( "google-t5/t5-small", dtype=torch.float16, ) self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.float32) self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wi.weight.dtype == torch.float16) # Load using `accelerate` model = T5ForConditionalGeneration.from_pretrained( "google-t5/t5-small", dtype=torch.float16, device_map="auto" ) self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.float32) self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wi.weight.dtype == torch.float16) @require_torch @require_sentencepiece @require_tokenizers class T5ModelIntegrationTests(unittest.TestCase): def tearDown(self): # See LlamaIntegrationTest.tearDown(). Can be removed once LlamaIntegrationTest.tearDown() is removed. cleanup(torch_device, gc_collect=False) @cached_property def model(self): return T5ForConditionalGeneration.from_pretrained("google-t5/t5-base").to(torch_device) @cached_property def tokenizer(self): return T5Tokenizer.from_pretrained("google-t5/t5-base") @slow def test_torch_quant(self): r""" Test that a simple `torch.quantization.quantize_dynamic` call works on a T5 model. """ model_name = "google/flan-t5-small" tokenizer = T5Tokenizer.from_pretrained(model_name) model = T5ForConditionalGeneration.from_pretrained(model_name) model = torch.quantization.quantize_dynamic(model, {torch.nn.Linear}, dtype=torch.qint8) input_text = "Answer the following yes/no question by reasoning step-by-step. Can you write a whole Haiku in a single tweet?" input_ids = tokenizer(input_text, return_tensors="pt").input_ids _ = model.generate(input_ids) @slow def test_small_generation(self): model = T5ForConditionalGeneration.from_pretrained("google-t5/t5-small").to(torch_device) model.config.max_length = 8 model.config.num_beams = 1 model.config.do_sample = False tokenizer = T5Tokenizer.from_pretrained("google-t5/t5-small") input_ids = tokenizer("summarize: Hello there", return_tensors="pt").input_ids.to(torch_device) sequences = model.generate(input_ids) output_str = tokenizer.batch_decode(sequences, skip_special_tokens=True)[0] self.assertTrue(output_str == "Hello there!") @slow def test_small_integration_test(self): """ For comparison run: >>> import t5 # pip install t5==0.7.1 >>> from t5.data.sentencepiece_vocabulary import SentencePieceVocabulary >>> path_to_mtf_small_t5_checkpoint = '<fill_in>' >>> path_to_mtf_small_spm_model_path = '<fill_in>' >>> t5_model = t5.models.MtfModel(model_dir=path_to_mtf_small_t5_checkpoint, batch_size=1, tpu=None) >>> vocab = SentencePieceVocabulary(path_to_mtf_small_spm_model_path, extra_ids=100) >>> score = t5_model.score(inputs=["Hello there"], targets=["Hi I am"], vocabulary=vocab) """ model = T5ForConditionalGeneration.from_pretrained("google-t5/t5-small").to(torch_device) tokenizer = T5Tokenizer.from_pretrained("google-t5/t5-small") input_ids = tokenizer("Hello there", return_tensors="pt").input_ids labels = tokenizer("Hi I am", return_tensors="pt").input_ids loss = model(input_ids.to(torch_device), labels=labels.to(torch_device)).loss mtf_score = -(labels.shape[-1] * loss.item()) EXPECTED_SCORE = Expectations( { (None, None): -19.0845, ("rocm", (9, 4)): -19.0846, } ).get_expectation() self.assertTrue(abs(mtf_score - EXPECTED_SCORE) < 1e-4) @slow def test_small_v1_1_integration_test(self): """ For comparison run: >>> import t5 # pip install t5==0.7.1 >>> from t5.data.sentencepiece_vocabulary import SentencePieceVocabulary >>> path_to_mtf_small_t5_v1_1_checkpoint = '<fill_in>' >>> path_to_mtf_small_spm_model_path = '<fill_in>' >>> t5_model = t5.models.MtfModel(model_dir=path_to_mtf_small_t5_v1_1_checkpoint, batch_size=1, tpu=None) >>> vocab = SentencePieceVocabulary(path_to_mtf_small_spm_model_path, extra_ids=100) >>> score = t5_model.score(inputs=["Hello there"], targets=["Hi I am"], vocabulary=vocab) """ model = T5ForConditionalGeneration.from_pretrained("google/t5-v1_1-small").to(torch_device) tokenizer = T5Tokenizer.from_pretrained("google/t5-v1_1-small") input_ids = tokenizer("Hello there", return_tensors="pt").input_ids labels = tokenizer("Hi I am", return_tensors="pt").input_ids loss = model(input_ids.to(torch_device), labels=labels.to(torch_device)).loss mtf_score = -(labels.shape[-1] * loss.item()) EXPECTED_SCORE = -59.0293 self.assertTrue(abs(mtf_score - EXPECTED_SCORE) < 1e-4) @slow def test_small_byt5_integration_test(self): """ For comparison run: >>> import t5 # pip install t5==0.9.1 >>> path_to_byt5_small_checkpoint = '<fill_in>' >>> t5_model = t5.models.MtfModel(model_dir=path_to_tf_checkpoint, batch_size=1, tpu=None) >>> vocab = t5.data.ByteVocabulary() >>> score = t5_model.score(inputs=["Hello there"], targets=["Hi I am"], vocabulary=vocab) """ model = T5ForConditionalGeneration.from_pretrained("google/byt5-small").to(torch_device) tokenizer = ByT5Tokenizer.from_pretrained("google/byt5-small") input_ids = tokenizer("Hello there", return_tensors="pt").input_ids labels = tokenizer("Hi I am", return_tensors="pt").input_ids loss = model(input_ids.to(torch_device), labels=labels.to(torch_device)).loss mtf_score = -(labels.shape[-1] * loss.item()) EXPECTED_SCORE = -60.7397 self.assertTrue(abs(mtf_score - EXPECTED_SCORE) < 1e-4) @slow def test_summarization(self): model = self.model tok = self.tokenizer FRANCE_ARTICLE = ( # @noqa "Marseille, France (CNN)The French prosecutor leading an investigation into the crash of Germanwings" " Flight 9525 insisted Wednesday that he was not aware of any video footage from on board the plane." ' Marseille prosecutor Brice Robin told CNN that "so far no videos were used in the crash investigation."' ' He added, "A person who has such a video needs to immediately give it to the investigators." Robin\'s' " comments follow claims by two magazines, German daily Bild and French Paris Match, of a cell phone video" " showing the harrowing final seconds from on board Germanwings Flight 9525 as it crashed into the French" " Alps. All 150 on board were killed. Paris Match and Bild reported that the video was recovered from a" " phone at the wreckage site. The two publications described the supposed video, but did not post it on" " their websites. The publications said that they watched the video, which was found by a source close to" " the investigation. \"One can hear cries of 'My God' in several languages,\" Paris Match reported." ' "Metallic banging can also be heard more than three times, perhaps of the pilot trying to open the' " cockpit door with a heavy object. Towards the end, after a heavy shake, stronger than the others, the" ' screaming intensifies. Then nothing." "It is a very disturbing scene," said Julian Reichelt,' " editor-in-chief of Bild online. An official with France's accident investigation agency, the BEA, said" " the agency is not aware of any such video. Lt. Col. Jean-Marc Menichini, a French Gendarmerie spokesman" " in charge of communications on rescue efforts around the Germanwings crash site, told CNN that the" ' reports were "completely wrong" and "unwarranted." Cell phones have been collected at the site, he said,' ' but that they "hadn\'t been exploited yet." Menichini said he believed the cell phones would need to be' " sent to the Criminal Research Institute in Rosny sous-Bois, near Paris, in order to be analyzed by" " specialized technicians working hand-in-hand with investigators. But none of the cell phones found so" " far have been sent to the institute, Menichini said. Asked whether staff involved in the search could" ' have leaked a memory card to the media, Menichini answered with a categorical "no." Reichelt told "Erin' ' Burnett: Outfront" that he had watched the video and stood by the report, saying Bild and Paris Match' ' are "very confident" that the clip is real. He noted that investigators only revealed they\'d recovered' ' cell phones from the crash site after Bild and Paris Match published their reports. "That is something' " we did not know before. ... Overall we can say many things of the investigation weren't revealed by the" ' investigation at the beginning," he said. What was mental state of Germanwings co-pilot? German airline' " Lufthansa confirmed Tuesday that co-pilot Andreas Lubitz had battled depression years before he took the" " controls of Germanwings Flight 9525, which he's accused of deliberately crashing last week in the" ' French Alps. Lubitz told his Lufthansa flight training school in 2009 that he had a "previous episode of' ' severe depression," the airline said Tuesday. Email correspondence between Lubitz and the school' " discovered in an internal investigation, Lufthansa said, included medical documents he submitted in" " connection with resuming his flight training. The announcement indicates that Lufthansa, the parent" " company of Germanwings, knew of Lubitz's battle with depression, allowed him to continue training and" " ultimately put him in the cockpit. Lufthansa, whose CEO Carsten Spohr previously said Lubitz was 100%" ' fit to fly, described its statement Tuesday as a "swift and seamless clarification" and said it was' " sharing the information and documents -- including training and medical records -- with public" " prosecutors. Spohr traveled to the crash site Wednesday, where recovery teams have been working for the" " past week to recover human remains and plane debris scattered across a steep mountainside. He saw the" " crisis center set up in Seyne-les-Alpes, laid a wreath in the village of Le Vernet, closer to the crash" " site, where grieving families have left flowers at a simple stone memorial. Menichini told CNN late" " Tuesday that no visible human remains were left at the site but recovery teams would keep searching." " French President Francois Hollande, speaking Tuesday, said that it should be possible to identify all" " the victims using DNA analysis by the end of the week, sooner than authorities had previously suggested." " In the meantime, the recovery of the victims' personal belongings will start Wednesday, Menichini said." " Among those personal belongings could be more cell phones belonging to the 144 passengers and six crew" " on board. Check out the latest from our correspondents . The details about Lubitz's correspondence with" " the flight school during his training were among several developments as investigators continued to" " delve into what caused the crash and Lubitz's possible motive for downing the jet. A Lufthansa" " spokesperson told CNN on Tuesday that Lubitz had a valid medical certificate, had passed all his" ' examinations and "held all the licenses required." Earlier, a spokesman for the prosecutor\'s office in' " Dusseldorf, Christoph Kumpa, said medical records reveal Lubitz suffered from suicidal tendencies at" " some point before his aviation career and underwent psychotherapy before he got his pilot's license." " Kumpa emphasized there's no evidence suggesting Lubitz was suicidal or acting aggressively before the" " crash. Investigators are looking into whether Lubitz feared his medical condition would cause him to" " lose his pilot's license, a European government official briefed on the investigation told CNN on" ' Tuesday. While flying was "a big part of his life," the source said, it\'s only one theory being' " considered. Another source, a law enforcement official briefed on the investigation, also told CNN that" " authorities believe the primary motive for Lubitz to bring down the plane was that he feared he would" " not be allowed to fly because of his medical problems. Lubitz's girlfriend told investigators he had" " seen an eye doctor and a neuropsychologist, both of whom deemed him unfit to work recently and concluded" " he had psychological issues, the European government official said. But no matter what details emerge" " about his previous mental health struggles, there's more to the story, said Brian Russell, a forensic" ' psychologist. "Psychology can explain why somebody would turn rage inward on themselves about the fact' " that maybe they weren't going to keep doing their job and they're upset about that and so they're" ' suicidal," he said. "But there is no mental illness that explains why somebody then feels entitled to' " also take that rage and turn it outward on 149 other people who had nothing to do with the person's" ' problems." Germanwings crash compensation: What we know . Who was the captain of Germanwings Flight' " 9525? CNN's Margot Haddad reported from Marseille and Pamela Brown from Dusseldorf, while Laura" " Smith-Spark wrote from London. CNN's Frederik Pleitgen, Pamela Boykoff, Antonia Mortensen, Sandrine" " Amiel and Anna-Maja Rappard contributed to this report." ) SHORTER_ARTICLE = ( "(CNN)The Palestinian Authority officially became the 123rd member of the International Criminal Court on" " Wednesday, a step that gives the court jurisdiction over alleged crimes in Palestinian territories. The" " formal accession was marked with a ceremony at The Hague, in the Netherlands, where the court is based." " The Palestinians signed the ICC's founding Rome Statute in January, when they also accepted its" ' jurisdiction over alleged crimes committed "in the occupied Palestinian territory, including East' ' Jerusalem, since June 13, 2014." Later that month, the ICC opened a preliminary examination into the' " situation in Palestinian territories, paving the way for possible war crimes investigations against" " Israelis. As members of the court, Palestinians may be subject to counter-charges as well. Israel and" " the United States, neither of which is an ICC member, opposed the Palestinians' efforts to join the" " body. But Palestinian Foreign Minister Riad al-Malki, speaking at Wednesday's ceremony, said it was a" ' move toward greater justice. "As Palestine formally becomes a State Party to the Rome Statute today, the' ' world is also a step closer to ending a long era of impunity and injustice," he said, according to an' ' ICC news release. "Indeed, today brings us closer to our shared goals of justice and peace." Judge' " Kuniko Ozaki, a vice president of the ICC, said acceding to the treaty was just the first step for the" ' Palestinians. "As the Rome Statute today enters into force for the State of Palestine, Palestine' " acquires all the rights as well as responsibilities that come with being a State Party to the Statute." ' These are substantive commitments, which cannot be taken lightly," she said. Rights group Human Rights' ' Watch welcomed the development. "Governments seeking to penalize Palestine for joining the ICC should' " immediately end their pressure, and countries that support universal acceptance of the court's treaty" ' should speak out to welcome its membership," said Balkees Jarrah, international justice counsel for the' " group. \"What's objectionable is the attempts to undermine international justice, not Palestine's" ' decision to join a treaty to which over 100 countries around the world are members." In January, when' " the preliminary ICC examination was opened, Israeli Prime Minister Benjamin Netanyahu described it as an" ' outrage, saying the court was overstepping its boundaries. The United States also said it "strongly"' " disagreed with the court's decision. \"As we have said repeatedly, we do not believe that Palestine is a" ' state and therefore we do not believe that it is eligible to join the ICC," the State Department said in' ' a statement. It urged the warring sides to resolve their differences through direct negotiations. "We' ' will continue to oppose actions against Israel at the ICC as counterproductive to the cause of peace,"' " it said. But the ICC begs to differ with the definition of a state for its purposes and refers to the" ' territories as "Palestine." While a preliminary examination is not a formal investigation, it allows the' " court to review evidence and determine whether to investigate suspects on both sides. Prosecutor Fatou" ' Bensouda said her office would "conduct its analysis in full independence and impartiality." The war' " between Israel and Hamas militants in Gaza last summer left more than 2,000 people dead. The inquiry" " will include alleged war crimes committed since June. The International Criminal Court was set up in" " 2002 to prosecute genocide, crimes against humanity and war crimes. CNN's Vasco Cotovio, Kareem Khadder" " and Faith Karimi contributed to this report." ) IRAN_ARTICLE = ( "(CNN)The United States and its negotiating partners reached a very strong framework agreement with Iran" " in Lausanne, Switzerland, on Thursday that limits Iran's nuclear program in such a way as to effectively" " block it from building a nuclear weapon. Expect pushback anyway, if the recent past is any harbinger." " Just last month, in an attempt to head off such an agreement, House Speaker John Boehner invited Israeli" " Prime Minister Benjamin Netanyahu to preemptively blast it before Congress, and 47 senators sent a" " letter to the Iranian leadership warning them away from a deal. The debate that has already begun since" " the announcement of the new framework will likely result in more heat than light. It will not be helped" " by the gathering swirl of dubious assumptions and doubtful assertions. Let us address some of these: ." " The most misleading assertion, despite universal rejection by experts, is that the negotiations'" " objective at the outset was the total elimination of any nuclear program in Iran. That is the position" " of Netanyahu and his acolytes in the U.S. Congress. But that is not and never was the objective. If it" " had been, there would have been no Iranian team at the negotiating table. Rather, the objective has" " always been to structure an agreement or series of agreements so that Iran could not covertly develop a" " nuclear arsenal before the United States and its allies could respond. The new framework has exceeded" " expectations in achieving that goal. It would reduce Iran's low-enriched uranium stockpile, cut by" " two-thirds its number of installed centrifuges and implement a rigorous inspection regime. Another" " dubious assumption of opponents is that the Iranian nuclear program is a covert weapons program. Despite" " sharp accusations by some in the United States and its allies, Iran denies having such a program, and" " U.S. intelligence contends that Iran has not yet made the decision to build a nuclear weapon. Iran's" " continued cooperation with International Atomic Energy Agency inspections is further evidence on this" " point, and we'll know even more about Iran's program in the coming months and years because of the deal." " In fact, the inspections provisions that are part of this agreement are designed to protect against any" " covert action by the Iranians. What's more, the rhetoric of some members of Congress has implied that" " the negotiations have been between only the United States and Iran (i.e., the 47 senators' letter" " warning that a deal might be killed by Congress or a future president). This of course is not the case." " The talks were between Iran and the five permanent members of the U.N. Security Council (United States," " United Kingdom, France, China and Russia) plus Germany, dubbed the P5+1. While the United States has" " played a leading role in the effort, it negotiated the terms alongside its partners. If the agreement" " reached by the P5+1 is rejected by Congress, it could result in an unraveling of the sanctions on Iran" " and threaten NATO cohesion in other areas. Another questionable assertion is that this agreement" " contains a sunset clause, after which Iran will be free to do as it pleases. Again, this is not the" " case. Some of the restrictions on Iran's nuclear activities, such as uranium enrichment, will be eased" " or eliminated over time, as long as 15 years. But most importantly, the framework agreement includes" " Iran's ratification of the Additional Protocol, which allows IAEA inspectors expanded access to nuclear" " sites both declared and nondeclared. This provision will be permanent. It does not sunset. Thus, going" " forward, if Iran decides to enrich uranium to weapons-grade levels, monitors will be able to detect such" " a move in a matter of days and alert the U.N. Security Council. Many in Congress have said that the" ' agreement should be a formal treaty requiring the Senate to "advise and consent." But the issue is not' " suited for a treaty. Treaties impose equivalent obligations on all signatories. For example, the New" " START treaty limits Russia and the United States to 1,550 deployed strategic warheads. But any agreement" " with Iran will not be so balanced. The restrictions and obligations in the final framework agreement" " will be imposed almost exclusively on Iran. The P5+1 are obligated only to ease and eventually remove" " most but not all economic sanctions, which were imposed as leverage to gain this final deal. Finally" " some insist that any agreement must address Iranian missile programs, human rights violations or support" " for Hamas or Hezbollah. As important as these issues are, and they must indeed be addressed, they are" " unrelated to the most important aim of a nuclear deal: preventing a nuclear Iran. To include them in" " the negotiations would be a poison pill. This agreement should be judged on its merits and on how it" " affects the security of our negotiating partners and allies, including Israel. Those judgments should be" " fact-based, not based on questionable assertions or dubious assumptions." ) ARTICLE_SUBWAY = ( "New York (CNN)When Liana Barrientos was 23 years old, she got married in Westchester County, New York. A" " year later, she got married again in Westchester County, but to a different man and without divorcing" " her first husband. Only 18 days after that marriage, she got hitched yet again. Then, Barrientos" ' declared "I do" five more times, sometimes only within two weeks of each other. In 2010, she married' " once more, this time in the Bronx. In an application for a marriage license, she stated it was her" ' "first and only" marriage. Barrientos, now 39, is facing two criminal counts of "offering a false' ' instrument for filing in the first degree," referring to her false statements on the 2010 marriage' " license application, according to court documents. Prosecutors said the marriages were part of an" " immigration scam. On Friday, she pleaded not guilty at State Supreme Court in the Bronx, according to" " her attorney, Christopher Wright, who declined to comment further. After leaving court, Barrientos was" " arrested and charged with theft of service and criminal trespass for allegedly sneaking into the New" " York subway through an emergency exit, said Detective Annette Markowski, a police spokeswoman. In total," " Barrientos has been married 10 times, with nine of her marriages occurring between 1999 and 2002. All" " occurred either in Westchester County, Long Island, New Jersey or the Bronx. She is believed to still be" " married to four men, and at one time, she was married to eight men at once, prosecutors say. Prosecutors" " said the immigration scam involved some of her husbands, who filed for permanent residence status" " shortly after the marriages. Any divorces happened only after such filings were approved. It was" " unclear whether any of the men will be prosecuted. The case was referred to the Bronx District" " Attorney's Office by Immigration and Customs Enforcement and the Department of Homeland Security's" ' Investigation Division. Seven of the men are from so-called "red-flagged" countries, including Egypt,' " Turkey, Georgia, Pakistan and Mali. Her eighth husband, Rashid Rajput, was deported in 2006 to his" " native Pakistan after an investigation by the Joint Terrorism Task Force. If convicted, Barrientos faces" " up to four years in prison. Her next court appearance is scheduled for May 18." ) expected_summaries = [ "<pad> " 'prosecutor: "so far no videos were used in the crash investigation" two magazines claim to have found a' " cell phone video of the final seconds . \"one can hear cries of 'My God' in several languages,\" one" " magazine says ." "</s></s></s></s></s></s></s></s></s></s></s></s></s></s></s></s></s></s></s></s></s></s></s></s></s></s></s></s></s></s>", "<pad> " "the formal accession was marked by a ceremony at The Hague, in the Netherlands . the ICC opened a" " preliminary examination into the situation in the occupied Palestinian territory . as members of the" " court, Palestinians may be subject to counter-charges as well ." "</s></s></s></s></s></s></s></s></s></s></s></s></s></s></s></s></s></s></s></s></s></s></s></s></s></s>", "<pad> " "the u.s. and its negotiating partners reached a very strong framework agreement with Iran . aaron miller:" " the debate that has already begun since the announcement of the new framework will likely result in more" " heat than light . the deal would reduce Iran's low-enriched uranium stockpile, cut centrifuges and" " implement a rigorous inspection regime ." "</s>", "<pad> " "prosecutors say the marriages were part of an immigration scam . if convicted, barrientos faces two" ' criminal counts of "offering a false instrument for filing in the first degree" she has been married 10' " times, with nine of her marriages occurring between 1999 and 2002 ." "</s></s></s></s></s></s></s></s></s></s></s></s></s></s></s></s></s></s></s></s></s></s>", ] use_task_specific_params(model, "summarization") dct = tok( [model.config.prefix + x for x in [FRANCE_ARTICLE, SHORTER_ARTICLE, IRAN_ARTICLE, ARTICLE_SUBWAY]], padding="max_length", truncation=True, max_length=512, return_tensors="pt", ).to(torch_device) self.assertEqual(512, dct["input_ids"].shape[1]) hypotheses_batch = model.generate( **dct, num_beams=4, length_penalty=2.0, max_length=142, min_length=56, no_repeat_ngram_size=3, do_sample=False, early_stopping=True, ) decoded = tok.batch_decode(hypotheses_batch) self.assertListEqual(expected_summaries, decoded) @slow def test_translation_en_to_de(self): model = self.model tok = self.tokenizer use_task_specific_params(model, "translation_en_to_de") en_text = '"Luigi often said to me that he never wanted the brothers to end up in court", she wrote.' expected_translation = ( '<pad> "Luigi sagte mir oft, dass er nie wollte, dass die Brüder am Gericht sitzen", schrieb sie.</s>' ) input_ids = tok.encode(model.config.prefix + en_text, return_tensors="pt") input_ids = input_ids.to(torch_device) output = model.generate(input_ids) translation = tok.decode(output[0]) self.assertEqual(translation, expected_translation) @slow def test_translation_en_to_fr(self): model = self.model # google-t5/t5-base tok = self.tokenizer use_task_specific_params(model, "translation_en_to_fr") en_text = ( ' This image section from an infrared recording by the Spitzer telescope shows a "family portrait" of' " countless generations of stars: the oldest stars are seen as blue dots. " ) input_ids = tok.encode(model.config.prefix + en_text, return_tensors="pt") input_ids = input_ids.to(torch_device) output = model.generate( input_ids=input_ids, num_beams=4, length_penalty=2.0, max_length=100, no_repeat_ngram_size=3, do_sample=False, early_stopping=True, ) translation = tok.decode(output[0]) new_truncated_translation = ( "<pad> " "Cette section d'images provenant de l'enregistrement infrarouge effectué par le télescope Spitzer montre " "un " "« portrait familial » de générations innombrables d’étoiles : les plus anciennes sont observées " "sous forme " "de points bleus." "</s>" ) self.assertEqual(translation, new_truncated_translation) @slow def test_translation_en_to_ro(self): model = self.model tok = self.tokenizer use_task_specific_params(model, "translation_en_to_ro") en_text = "Taco Bell said it plans to add 2,000 locations in the US by 2022." expected_translation = ( "<pad> Taco Bell a declarat că intenţionează să adauge 2 000 de locaţii în SUA până în 2022.</s>" ) inputs = tok(model.config.prefix + en_text, return_tensors="pt").to(torch_device) output = model.generate(**inputs) translation = tok.decode(output[0]) self.assertEqual(translation, expected_translation) @slow def test_contrastive_search_t5(self): article = ( " New York (CNN)When Liana Barrientos was 23 years old, she got married in Westchester County, New York. A" " year later, she got married again in Westchester County, but to a different man and without divorcing" " her first husband. Only 18 days after that marriage, she got hitched yet again. Then, Barrientos" ' declared "I do" five more times, sometimes only within two weeks of each other. In 2010, she married' " once more, this time in the Bronx. In an application for a marriage license, she stated it was her" ' "first and only" marriage. Barrientos, now 39, is facing two criminal counts of "offering a false' ' instrument for filing in the first degree," referring to her false statements on the 2010 marriage' " license application, according to court documents. Prosecutors said the marriages were part of an" " immigration scam. On Friday, she pleaded not guilty at State Supreme Court in the Bronx, according to" " her attorney, Christopher Wright, who declined to comment further. After leaving court, Barrientos was" " arrested and charged with theft of service and criminal trespass for allegedly sneaking into the New" " York subway through an emergency exit, said Detective Annette Markowski, a police spokeswoman. In total," " Barrientos has been married 10 times, with nine of her marriages occurring between 1999 and 2002. All" " occurred either in Westchester County, Long Island, New Jersey or the Bronx. She is believed to still be" " married to four men, and at one time, she was married to eight men at once, prosecutors say. Prosecutors" " said the immigration scam involved some of her husbands, who filed for permanent residence status" " shortly after the marriages. Any divorces happened only after such filings were approved. It was" " unclear whether any of the men will be prosecuted. The case was referred to the Bronx District" " Attorney's Office by Immigration and Customs Enforcement and the Department of Homeland Security's" ' Investigation Division. Seven of the men are from so-called "red-flagged" countries, including Egypt,' " Turkey, Georgia, Pakistan and Mali. Her eighth husband, Rashid Rajput, was deported in 2006 to his" " native Pakistan after an investigation by the Joint Terrorism Task Force. If convicted, Barrientos faces" " up to four years in prison. Her next court appearance is scheduled for May 18." ) article = "summarize: " + article.strip() t5_tokenizer = AutoTokenizer.from_pretrained("flax-community/t5-base-cnn-dm") t5_model = T5ForConditionalGeneration.from_pretrained("flax-community/t5-base-cnn-dm").to(torch_device) input_ids = t5_tokenizer( article, add_special_tokens=False, truncation=True, max_length=512, return_tensors="pt" ).input_ids.to(torch_device) outputs = t5_model.generate(input_ids, penalty_alpha=0.5, top_k=5, max_length=64) generated_text = t5_tokenizer.batch_decode(outputs, skip_special_tokens=True) # TODO: @arthur? # PR #31938 caused regression on this test which was fixed by PR #34089 self.assertListEqual( generated_text, [ "Liana Barrientos has been married 10 times, nine of them in the Bronx . Her husbands filed for " "permanent residence after the marriages, prosecutors say ." ], ) @slow @require_torch_accelerator @pytest.mark.torch_compile_test def test_compile_static_cache(self): NUM_TOKENS_TO_GENERATE = 40 EXPECTED_TEXT_COMPLETION = [ "theory of relativity states that 1) the speed of light is constant in all inertial reference frames. the laws of physics are the same for all inertial reference frames.", "ketchup is my favorite condiment.", ] prompts = [ "summarize: Simply put, the theory of relativity states that 1) the speed of light is constant in all inertial " "reference frames, and 2) the laws of physics are the same for all inertial reference frames.\nThe " "theory of relativity is not hard to grasp.", "summarize: My favorite all time favorite condiment is ketchup. I love it on everything. I love it on my eggs, " "my fries, my chicken, my burgers, my hot dogs, my sandwiches, my salads, my pizza.", ] model = T5ForConditionalGeneration.from_pretrained("google-t5/t5-small").to(torch_device) tokenizer = T5Tokenizer.from_pretrained("google-t5/t5-small") inputs = tokenizer(prompts, return_tensors="pt", padding=True).to(model.device) # Dynamic Cache generated_ids = model.generate(**inputs, max_new_tokens=NUM_TOKENS_TO_GENERATE, do_sample=False) dynamic_text = tokenizer.batch_decode(generated_ids, skip_special_tokens=True) self.assertEqual(EXPECTED_TEXT_COMPLETION, dynamic_text) # Static Cache generated_ids = model.generate( **inputs, max_new_tokens=NUM_TOKENS_TO_GENERATE, do_sample=False, cache_implementation="static" ) static_text = tokenizer.batch_decode(generated_ids, skip_special_tokens=True) self.assertEqual(EXPECTED_TEXT_COMPLETION, static_text) # Static Cache + compile model.forward = torch.compile(model.forward, mode="reduce-overhead", fullgraph=True) generated_ids = model.generate( **inputs, max_new_tokens=NUM_TOKENS_TO_GENERATE, do_sample=False, cache_implementation="static" ) static_compiled_text = tokenizer.batch_decode(generated_ids, skip_special_tokens=True) self.assertEqual(EXPECTED_TEXT_COMPLETION, static_compiled_text) @slow @require_torch_accelerator @pytest.mark.torch_compile_test def test_compile_static_cache_encoder(self): prompts = [ "summarize: Simply put, the theory of relativity states that 1) the speed of light is constant in all inertial " "reference frames, and 2) the laws of physics are the same for all inertial reference frames.\nThe " "theory of relativity is not hard to grasp.", "summarize: My favorite all time favorite condiment is ketchup. I love it on everything. I love it on my eggs, " "my fries, my chicken, my burgers, my hot dogs, my sandwiches, my salads, my pizza.", ] model = T5EncoderModel.from_pretrained("google-t5/t5-small").to(torch_device) tokenizer = T5Tokenizer.from_pretrained("google-t5/t5-small") inputs = tokenizer(prompts, return_tensors="pt", padding=True).to(model.device) logits = model(**inputs) model.forward = torch.compile(model.forward, mode="reduce-overhead", fullgraph=True) logits_compiled = model(**inputs) torch.testing.assert_close(logits[0][:, -3:, -3], logits_compiled[0][:, -3:, -3], rtol=1e-5, atol=1e-5) @pytest.mark.torch_export_test @slow def test_export_encoder(self): """Test exporting T5EncoderModel to torch export format.""" if not is_torch_greater_or_equal_than_2_4: self.skipTest("This test requires torch >= 2.4 to run.") from transformers.integrations.executorch import Seq2SeqLMEncoderExportableModule model_id = "google-t5/t5-small" device = "cpu" example_input_ids = torch.ones((1, 10), dtype=torch.long).to(device) # Load model model = T5EncoderModel.from_pretrained(model_id).to(device=device).eval() # Get original output for comparison with torch.no_grad(): original_output = model(input_ids=example_input_ids).last_hidden_state encoder_model = Seq2SeqLMEncoderExportableModule(model) # Export the encoder_model with torch.no_grad(): seq_len_dim = torch.export.Dim("sequence_length", max=4096) exported_program = torch.export.export( encoder_model, (example_input_ids,), dynamic_shapes={"input_ids": {1: seq_len_dim}}, strict=True ) # Test the exported model with torch.no_grad(): exported_output = exported_program.module()(example_input_ids) # Verify outputs are close enough self.assertTrue(torch.allclose(original_output, exported_output, atol=1e-5)) @pytest.mark.torch_export_test @slow def test_export_decoder(self): """Test exporting T5 decoder with static cache to torch export format.""" if not is_torch_greater_or_equal_than_2_4: self.skipTest("This test requires torch >= 2.4 to run.") from transformers import AutoModelForSeq2SeqLM, T5ForConditionalGeneration from transformers.integrations.executorch import Seq2SeqLMDecoderExportableModuleWithStaticCache model_id = "google-t5/t5-small" # Configuration for static cache batch_size = 1 max_cache_len = 123 device = "cpu" full_model = AutoModelForSeq2SeqLM.from_pretrained(model_id).to(device) self.assertIsInstance(full_model, T5ForConditionalGeneration) decoder_model = ( Seq2SeqLMDecoderExportableModuleWithStaticCache(full_model, max_cache_len, batch_size).to(device).eval() ) # Prepare test inputs example_decoder_input_ids = torch.tensor([[0]], dtype=torch.long) # Start token example_cache_position = torch.tensor([0], dtype=torch.long) # For T5-small, hidden size is 512 example_encoder_hidden_states = torch.zeros((batch_size, 10, 512), dtype=torch.float32) # Export the model with torch.no_grad(): encoder_sequence_length_dim = torch.export.Dim("encoder_sequence_length", max=4096) exported_program = torch.export.export( decoder_model, (example_decoder_input_ids, example_encoder_hidden_states, example_cache_position), dynamic_shapes={ "decoder_input_ids": None, "encoder_hidden_states": {1: encoder_sequence_length_dim}, "cache_position": None, }, strict=True, ) # We won't directly verify outputs here as it's complicated with caching, # but we'll check the export was successful self.assertIsNotNone(exported_program) # Verify cache buffers existence and shapes cache_buffers = [ (name, buffer) for name, buffer in exported_program.named_buffers() if name.startswith("key_cache_") or name.startswith("value_cache_") ] # Verify cache buffers self.assertTrue(len(cache_buffers) > 0, "No cache buffers found in exported model") for name, buffer in cache_buffers: # Verify cache buffers are 3D self.assertEqual(buffer.shape[2], max_cache_len) @pytest.mark.torch_export_test @slow def test_export_t5_summarization(self): """Test composing exported T5 encoder and decoder for summarization.""" if not is_torch_greater_or_equal_than_2_4: self.skipTest("This test requires torch >= 2.4 to run.") from transformers import AutoModelForSeq2SeqLM, AutoTokenizer, T5ForConditionalGeneration from transformers.integrations.executorch import Seq2SeqLMExportableModule device = torch_device batch_size = 1 max_cache_length = 1234 max_hidden_seq_length = 5678 model_id = "google-t5/t5-small" tokenizer = AutoTokenizer.from_pretrained(model_id) full_model = AutoModelForSeq2SeqLM.from_pretrained(model_id).to(device).eval() self.assertIsInstance(full_model, T5ForConditionalGeneration) wrapped_model = Seq2SeqLMExportableModule( full_model, batch_size=batch_size, max_hidden_seq_length=max_hidden_seq_length, max_cache_length=max_cache_length, ) exported_t5 = wrapped_model.export() # Test Summarization with Composed Models prompts = [ "summarize: Simply put, the theory of relativity states that 1) the speed of light is constant in all inertial " "reference frames, and 2) the laws of physics are the same for all inertial reference frames.\nThe " "theory of relativity is not hard to grasp." ] input_ids = tokenizer(prompts, return_tensors="pt").input_ids generated_ids = exported_t5.generate(prompt_token_ids=input_ids, max_new_tokens=max_cache_length) generated_summary = tokenizer.decode(generated_ids, skip_special_tokens=True) # Also run original model for comparison original_model = T5ForConditionalGeneration.from_pretrained(model_id).eval() with torch.no_grad(): original_outputs = original_model.generate(input_ids, max_length=50, num_beams=1) original_summary = tokenizer.decode(original_outputs[0], skip_special_tokens=True) # Basic verification that we got a reasonable summary self.assertEqual(generated_summary, original_summary) @require_torch class TestAsymmetricT5(unittest.TestCase): def build_model_and_check_forward_pass(self, **kwargs): tester = T5ModelTester(self, **kwargs) config, *inputs = tester.prepare_config_and_inputs() ( input_ids, decoder_input_ids, attention_mask, decoder_attention_mask, lm_labels, ) = inputs model = T5ForConditionalGeneration(config=config).to(torch_device).eval() outputs = model( input_ids=input_ids, decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, labels=lm_labels, ) # outputs = model(*inputs) assert len(outputs) == 4 assert outputs["logits"].size() == (tester.batch_size, tester.decoder_seq_length, tester.vocab_size) assert outputs["loss"].size() == () return model def test_small_decoder(self): # num_hidden_layers is passed to T5Config as num_layers model = self.build_model_and_check_forward_pass(decoder_layers=1, num_hidden_layers=2) assert len(model.encoder.block) == 2 assert len(model.decoder.block) == 1 def test_defaulting_to_symmetry(self): # num_hidden_layers is passed to T5Config as num_layers model = self.build_model_and_check_forward_pass(num_hidden_layers=2) assert len(model.decoder.block) == len(model.encoder.block) == 2
transformers/tests/models/t5/test_modeling_t5.py/0
{ "file_path": "transformers/tests/models/t5/test_modeling_t5.py", "repo_id": "transformers", "token_count": 38837 }
592
# Copyright 2022 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingTestMixin, prepare_video_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import VideoMAEImageProcessor class VideoMAEImageProcessingTester: def __init__( self, parent, batch_size=7, num_channels=3, num_frames=10, image_size=18, min_resolution=30, max_resolution=400, do_resize=True, size=None, do_normalize=True, image_mean=[0.5, 0.5, 0.5], image_std=[0.5, 0.5, 0.5], crop_size=None, ): size = size if size is not None else {"shortest_edge": 18} crop_size = crop_size if crop_size is not None else {"height": 18, "width": 18} self.parent = parent self.batch_size = batch_size self.num_channels = num_channels self.num_frames = num_frames self.image_size = image_size self.min_resolution = min_resolution self.max_resolution = max_resolution self.do_resize = do_resize self.size = size self.do_normalize = do_normalize self.image_mean = image_mean self.image_std = image_std self.crop_size = crop_size def prepare_image_processor_dict(self): return { "image_mean": self.image_mean, "image_std": self.image_std, "do_normalize": self.do_normalize, "do_resize": self.do_resize, "size": self.size, "crop_size": self.crop_size, } def expected_output_image_shape(self, images): return self.num_frames, self.num_channels, self.crop_size["height"], self.crop_size["width"] def prepare_video_inputs(self, equal_resolution=False, numpify=False, torchify=False): return prepare_video_inputs( batch_size=self.batch_size, num_frames=self.num_frames, num_channels=self.num_channels, min_resolution=self.min_resolution, max_resolution=self.max_resolution, equal_resolution=equal_resolution, numpify=numpify, torchify=torchify, ) @require_torch @require_vision class VideoMAEImageProcessingTest(ImageProcessingTestMixin, unittest.TestCase): image_processing_class = VideoMAEImageProcessor if is_vision_available() else None def setUp(self): super().setUp() self.image_processor_tester = VideoMAEImageProcessingTester(self) @property def image_processor_dict(self): return self.image_processor_tester.prepare_image_processor_dict() def test_image_processor_properties(self): image_processing = self.image_processing_class(**self.image_processor_dict) self.assertTrue(hasattr(image_processing, "image_mean")) self.assertTrue(hasattr(image_processing, "image_std")) self.assertTrue(hasattr(image_processing, "do_normalize")) self.assertTrue(hasattr(image_processing, "do_resize")) self.assertTrue(hasattr(image_processing, "do_center_crop")) self.assertTrue(hasattr(image_processing, "size")) def test_image_processor_from_dict_with_kwargs(self): image_processor = self.image_processing_class.from_dict(self.image_processor_dict) self.assertEqual(image_processor.size, {"shortest_edge": 18}) self.assertEqual(image_processor.crop_size, {"height": 18, "width": 18}) image_processor = self.image_processing_class.from_dict(self.image_processor_dict, size=42, crop_size=84) self.assertEqual(image_processor.size, {"shortest_edge": 42}) self.assertEqual(image_processor.crop_size, {"height": 84, "width": 84}) def test_call_pil(self): # Initialize image_processing image_processing = self.image_processing_class(**self.image_processor_dict) # create random PIL videos video_inputs = self.image_processor_tester.prepare_video_inputs(equal_resolution=False) for video in video_inputs: self.assertIsInstance(video, list) self.assertIsInstance(video[0], Image.Image) # Test not batched input encoded_videos = image_processing(video_inputs[0], return_tensors="pt").pixel_values expected_output_video_shape = self.image_processor_tester.expected_output_image_shape([encoded_videos[0]]) self.assertEqual(tuple(encoded_videos.shape), (1, *expected_output_video_shape)) # Test batched encoded_videos = image_processing(video_inputs, return_tensors="pt").pixel_values expected_output_video_shape = self.image_processor_tester.expected_output_image_shape(encoded_videos) self.assertEqual( tuple(encoded_videos.shape), (self.image_processor_tester.batch_size, *expected_output_video_shape) ) def test_call_numpy(self): # Initialize image_processing image_processing = self.image_processing_class(**self.image_processor_dict) # create random numpy tensors video_inputs = self.image_processor_tester.prepare_video_inputs(equal_resolution=False, numpify=True) for video in video_inputs: self.assertIsInstance(video, list) self.assertIsInstance(video[0], np.ndarray) # Test not batched input encoded_videos = image_processing(video_inputs[0], return_tensors="pt").pixel_values expected_output_video_shape = self.image_processor_tester.expected_output_image_shape([encoded_videos[0]]) self.assertEqual(tuple(encoded_videos.shape), (1, *expected_output_video_shape)) # Test batched encoded_videos = image_processing(video_inputs, return_tensors="pt").pixel_values expected_output_video_shape = self.image_processor_tester.expected_output_image_shape(encoded_videos) self.assertEqual( tuple(encoded_videos.shape), (self.image_processor_tester.batch_size, *expected_output_video_shape) ) def test_call_numpy_4_channels(self): # Initialize image_processing image_processing = self.image_processing_class(**self.image_processor_dict) # create random numpy tensors self.image_processor_tester.num_channels = 4 video_inputs = self.image_processor_tester.prepare_video_inputs(equal_resolution=False, numpify=True) for video in video_inputs: self.assertIsInstance(video, list) self.assertIsInstance(video[0], np.ndarray) # Test not batched input encoded_videos = image_processing( video_inputs[0], return_tensors="pt", image_mean=0, image_std=1, input_data_format="channels_first" ).pixel_values expected_output_video_shape = self.image_processor_tester.expected_output_image_shape([encoded_videos[0]]) self.assertEqual(tuple(encoded_videos.shape), (1, *expected_output_video_shape)) # Test batched encoded_videos = image_processing( video_inputs, return_tensors="pt", image_mean=0, image_std=1, input_data_format="channels_first" ).pixel_values expected_output_video_shape = self.image_processor_tester.expected_output_image_shape(encoded_videos) self.assertEqual( tuple(encoded_videos.shape), (self.image_processor_tester.batch_size, *expected_output_video_shape) ) self.image_processor_tester.num_channels = 3 def test_call_pytorch(self): # Initialize image_processing image_processing = self.image_processing_class(**self.image_processor_dict) # create random PyTorch tensors video_inputs = self.image_processor_tester.prepare_video_inputs(equal_resolution=False, torchify=True) for video in video_inputs: self.assertIsInstance(video, list) self.assertIsInstance(video[0], torch.Tensor) # Test not batched input encoded_videos = image_processing(video_inputs[0], return_tensors="pt").pixel_values expected_output_video_shape = self.image_processor_tester.expected_output_image_shape([encoded_videos[0]]) self.assertEqual(tuple(encoded_videos.shape), (1, *expected_output_video_shape)) # Test batched encoded_videos = image_processing(video_inputs, return_tensors="pt").pixel_values expected_output_video_shape = self.image_processor_tester.expected_output_image_shape(encoded_videos) self.assertEqual( tuple(encoded_videos.shape), (self.image_processor_tester.batch_size, *expected_output_video_shape) )
transformers/tests/models/videomae/test_image_processing_videomae.py/0
{ "file_path": "transformers/tests/models/videomae/test_image_processing_videomae.py", "repo_id": "transformers", "token_count": 3754 }
593
# Copyright 2021 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Testing suite for the PyTorch ViT model.""" import unittest from transformers import ViTConfig from transformers.testing_utils import ( require_accelerate, require_torch, require_torch_accelerator, require_torch_fp16, require_vision, slow, torch_device, ) from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ViTForImageClassification, ViTForMaskedImageModeling, ViTModel if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class ViTModelTester: def __init__( self, parent, batch_size=13, image_size=30, patch_size=2, num_channels=3, is_training=True, use_labels=True, hidden_size=32, num_hidden_layers=2, num_attention_heads=4, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, type_sequence_label_size=10, initializer_range=0.02, scope=None, encoder_stride=2, mask_ratio=0.5, attn_implementation="eager", ): self.parent = parent self.batch_size = batch_size self.image_size = image_size self.patch_size = patch_size self.num_channels = num_channels self.is_training = is_training self.use_labels = use_labels self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.type_sequence_label_size = type_sequence_label_size self.initializer_range = initializer_range self.scope = scope self.encoder_stride = encoder_stride self.attn_implementation = attn_implementation # in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) num_patches = (image_size // patch_size) ** 2 self.seq_length = num_patches + 1 self.mask_ratio = mask_ratio self.num_masks = int(mask_ratio * self.seq_length) self.mask_length = num_patches def prepare_config_and_inputs(self): pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) labels = None if self.use_labels: labels = ids_tensor([self.batch_size], self.type_sequence_label_size) config = self.get_config() return config, pixel_values, labels def get_config(self): return ViTConfig( image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, is_decoder=False, initializer_range=self.initializer_range, encoder_stride=self.encoder_stride, attn_implementation=self.attn_implementation, ) def create_and_check_model(self, config, pixel_values, labels): model = ViTModel(config=config) model.to(torch_device) model.eval() result = model(pixel_values) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) def create_and_check_for_masked_image_modeling(self, config, pixel_values, labels): model = ViTForMaskedImageModeling(config=config) model.to(torch_device) model.eval() result = model(pixel_values) self.parent.assertEqual( result.reconstruction.shape, (self.batch_size, self.num_channels, self.image_size, self.image_size) ) # test greyscale images config.num_channels = 1 model = ViTForMaskedImageModeling(config) model.to(torch_device) model.eval() pixel_values = floats_tensor([self.batch_size, 1, self.image_size, self.image_size]) result = model(pixel_values) self.parent.assertEqual(result.reconstruction.shape, (self.batch_size, 1, self.image_size, self.image_size)) def create_and_check_for_image_classification(self, config, pixel_values, labels): config.num_labels = self.type_sequence_label_size model = ViTForImageClassification(config) model.to(torch_device) model.eval() result = model(pixel_values, labels=labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size)) # test greyscale images config.num_channels = 1 model = ViTForImageClassification(config) model.to(torch_device) model.eval() pixel_values = floats_tensor([self.batch_size, 1, self.image_size, self.image_size]) result = model(pixel_values) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, pixel_values, labels, ) = config_and_inputs inputs_dict = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class ViTModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): """ Here we also overwrite some of the tests of test_modeling_common.py, as ViT does not use input_ids, inputs_embeds, attention_mask and seq_length. """ all_model_classes = ( ( ViTModel, ViTForImageClassification, ViTForMaskedImageModeling, ) if is_torch_available() else () ) pipeline_model_mapping = ( {"image-feature-extraction": ViTModel, "image-classification": ViTForImageClassification} if is_torch_available() else {} ) fx_compatible = True test_pruning = False test_resize_embeddings = False test_head_masking = False test_torch_exportable = True def setUp(self): self.model_tester = ViTModelTester(self) self.config_tester = ConfigTester(self, config_class=ViTConfig, has_text_modality=False, hidden_size=37) @unittest.skip( "Since `torch==2.3+cu121`, although this test passes, many subsequent tests have `CUDA error: misaligned address`." "If `nvidia-xxx-cu118` are also installed, no failure (even with `torch==2.3+cu121`)." ) def test_multi_gpu_data_parallel_forward(self): super().test_multi_gpu_data_parallel_forward() def test_config(self): self.config_tester.run_common_tests() @unittest.skip(reason="ViT does not use inputs_embeds") def test_inputs_embeds(self): pass def test_model_get_set_embeddings(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) self.assertIsInstance(model.get_input_embeddings(), (nn.Module)) x = model.get_output_embeddings() self.assertTrue(x is None or isinstance(x, nn.Linear)) def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_for_masked_image_modeling(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*config_and_inputs) def test_for_image_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*config_and_inputs) @slow def test_model_from_pretrained(self): model_name = "google/vit-base-patch16-224" model = ViTModel.from_pretrained(model_name) self.assertIsNotNone(model) # We will verify our results on an image of cute cats def prepare_img(): image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") return image @require_torch @require_vision class ViTModelIntegrationTest(unittest.TestCase): @cached_property def default_image_processor(self): return ViTImageProcessor.from_pretrained("google/vit-base-patch16-224") if is_vision_available() else None @slow def test_inference_image_classification_head(self): model = ViTForImageClassification.from_pretrained("google/vit-base-patch16-224").to(torch_device) image_processor = self.default_image_processor image = prepare_img() inputs = image_processor(images=image, return_tensors="pt").to(torch_device) # forward pass with torch.no_grad(): outputs = model(**inputs) # verify the logits expected_shape = torch.Size((1, 1000)) self.assertEqual(outputs.logits.shape, expected_shape) expected_slice = torch.tensor([-0.2744, 0.8215, -0.0836]).to(torch_device) torch.testing.assert_close(outputs.logits[0, :3], expected_slice, rtol=1e-4, atol=1e-4) @slow def test_inference_interpolate_pos_encoding(self): # ViT models have an `interpolate_pos_encoding` argument in their forward method, # allowing to interpolate the pre-trained position embeddings in order to use # the model on higher resolutions. The DINO model by Facebook AI leverages this # to visualize self-attention on higher resolution images. model = ViTModel.from_pretrained("facebook/dino-vits8").to(torch_device) image_processor = ViTImageProcessor.from_pretrained("facebook/dino-vits8", size=480) image = prepare_img() inputs = image_processor(images=image, return_tensors="pt") pixel_values = inputs.pixel_values.to(torch_device) # forward pass with torch.no_grad(): outputs = model(pixel_values, interpolate_pos_encoding=True) # verify the logits expected_shape = torch.Size((1, 3601, 384)) self.assertEqual(outputs.last_hidden_state.shape, expected_shape) expected_slice = torch.tensor( [[4.2325, 4.3882, -6.6678], [4.5372, 1.8933, -6.7355], [4.4454, 0.8514, -5.8747]] ).to(torch_device) torch.testing.assert_close(outputs.last_hidden_state[0, :3, :3], expected_slice, rtol=1e-3, atol=1e-3) @slow @require_accelerate @require_torch_accelerator @require_torch_fp16 def test_inference_fp16(self): r""" A small test to make sure that inference work in half precision without any problem. """ model = ViTModel.from_pretrained("facebook/dino-vits8", dtype=torch.float16, device_map="auto") image_processor = self.default_image_processor image = prepare_img() inputs = image_processor(images=image, return_tensors="pt") pixel_values = inputs.pixel_values.to(torch_device) # forward pass to make sure inference works in fp16 with torch.no_grad(): _ = model(pixel_values)
transformers/tests/models/vit/test_modeling_vit.py/0
{ "file_path": "transformers/tests/models/vit/test_modeling_vit.py", "repo_id": "transformers", "token_count": 5234 }
594
# Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Testing suite for the PyTorch VITS model.""" import copy import os import tempfile import unittest import numpy as np from transformers import PretrainedConfig, VitsConfig from transformers.testing_utils import ( Expectations, is_flaky, is_torch_available, require_torch, require_torch_fp16, require_torch_multi_gpu, slow, torch_device, ) from transformers.trainer_utils import set_seed from ...test_configuration_common import ConfigTester from ...test_modeling_common import ( ModelTesterMixin, global_rng, ids_tensor, random_attention_mask, ) from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import VitsModel, VitsTokenizer CONFIG_NAME = "config.json" GENERATION_CONFIG_NAME = "generation_config.json" def _config_zero_init(config): configs_no_init = copy.deepcopy(config) for key in configs_no_init.__dict__: if "_range" in key or "_std" in key or "initializer_factor" in key or "layer_scale" in key: setattr(configs_no_init, key, 1e-10) if isinstance(getattr(configs_no_init, key, None), PretrainedConfig): no_init_subconfig = _config_zero_init(getattr(configs_no_init, key)) setattr(configs_no_init, key, no_init_subconfig) return configs_no_init @require_torch class VitsModelTester: def __init__( self, parent, batch_size=2, seq_length=7, is_training=False, hidden_size=16, num_hidden_layers=2, num_attention_heads=2, intermediate_size=64, flow_size=16, vocab_size=38, spectrogram_bins=8, duration_predictor_num_flows=2, duration_predictor_filter_channels=16, prior_encoder_num_flows=2, upsample_initial_channel=16, upsample_rates=[8, 2], upsample_kernel_sizes=[16, 4], resblock_kernel_sizes=[3, 7], resblock_dilation_sizes=[[1, 3, 5], [1, 3, 5]], ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.flow_size = flow_size self.vocab_size = vocab_size self.spectrogram_bins = spectrogram_bins self.duration_predictor_num_flows = duration_predictor_num_flows self.duration_predictor_filter_channels = duration_predictor_filter_channels self.prior_encoder_num_flows = prior_encoder_num_flows self.upsample_initial_channel = upsample_initial_channel self.upsample_rates = upsample_rates self.upsample_kernel_sizes = upsample_kernel_sizes self.resblock_kernel_sizes = resblock_kernel_sizes self.resblock_dilation_sizes = resblock_dilation_sizes def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size).clamp(2) attention_mask = random_attention_mask([self.batch_size, self.seq_length]) config = self.get_config() inputs_dict = { "input_ids": input_ids, "attention_mask": attention_mask, } return config, inputs_dict def prepare_config_and_inputs_for_common(self): config, inputs_dict = self.prepare_config_and_inputs() return config, inputs_dict def get_config(self): return VitsConfig( hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, ffn_dim=self.intermediate_size, flow_size=self.flow_size, vocab_size=self.vocab_size, spectrogram_bins=self.spectrogram_bins, duration_predictor_num_flows=self.duration_predictor_num_flows, prior_encoder_num_flows=self.prior_encoder_num_flows, duration_predictor_filter_channels=self.duration_predictor_filter_channels, posterior_encoder_num_wavenet_layers=self.num_hidden_layers, upsample_initial_channel=self.upsample_initial_channel, upsample_rates=self.upsample_rates, upsample_kernel_sizes=self.upsample_kernel_sizes, resblock_kernel_sizes=self.resblock_kernel_sizes, resblock_dilation_sizes=self.resblock_dilation_sizes, ) def create_and_check_model_forward(self, config, inputs_dict): model = VitsModel(config=config).to(torch_device).eval() input_ids = inputs_dict["input_ids"] attention_mask = inputs_dict["attention_mask"] result = model(input_ids, attention_mask=attention_mask) self.parent.assertEqual((self.batch_size, 624), result.waveform.shape) @require_torch class VitsModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (VitsModel,) if is_torch_available() else () pipeline_model_mapping = ( {"feature-extraction": VitsModel, "text-to-audio": VitsModel} if is_torch_available() else {} ) is_encoder_decoder = False test_pruning = False test_headmasking = False test_resize_embeddings = False test_head_masking = False test_torchscript = False has_attentions = False def setUp(self): self.model_tester = VitsModelTester(self) self.config_tester = ConfigTester(self, config_class=VitsConfig, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() # TODO: @ydshieh @is_flaky(description="torch 2.2.0 gives `Timeout >120.0s`") def test_pipeline_feature_extraction(self): super().test_pipeline_feature_extraction() @is_flaky(description="torch 2.2.0 gives `Timeout >120.0s`") def test_pipeline_feature_extraction_fp16(self): super().test_pipeline_feature_extraction_fp16() @unittest.skip(reason="Need to fix this after #26538") def test_model_forward(self): set_seed(12345) global_rng.seed(12345) config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model_forward(*config_and_inputs) @require_torch_multi_gpu # override to force all elements of the batch to have the same sequence length across GPUs def test_multi_gpu_data_parallel_forward(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.use_stochastic_duration_prediction = False # move input tensors to cuda:O for key, value in inputs_dict.items(): if torch.is_tensor(value): # make all elements of the batch the same -> ensures the output seq lengths are the same for DP value[1:] = value[0] inputs_dict[key] = value.to(0) for model_class in self.all_model_classes: model = model_class(config=config) model.to(0) model.eval() # Wrap model in nn.DataParallel model = torch.nn.DataParallel(model) set_seed(555) with torch.no_grad(): _ = model(**self._prepare_for_class(inputs_dict, model_class)).waveform @unittest.skip(reason="VITS is not deterministic") def test_determinism(self): pass @unittest.skip(reason="VITS is not deterministic") def test_batching_equivalence(self): pass @is_flaky( max_attempts=3, description="Weight initialisation for the VITS conv layers sometimes exceeds the kaiming normal range", ) def test_initialization(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() uniform_init_parms = [ "emb_rel_k", "emb_rel_v", "conv_1", "conv_2", "conv_pre", "conv_post", "conv_proj", "conv_dds", "project", "wavenet.in_layers", "wavenet.res_skip_layers", "upsampler", "resblocks", ] configs_no_init = _config_zero_init(config) for model_class in self.all_model_classes: model = model_class(config=configs_no_init) for name, param in model.named_parameters(): if param.requires_grad: if any(x in name for x in uniform_init_parms): self.assertTrue( -1.0 <= ((param.data.mean() * 1e9).round() / 1e9).item() <= 1.0, msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) else: self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item(), [0.0, 1.0], msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) @unittest.skip(reason="VITS has no inputs_embeds") def test_inputs_embeds(self): pass @unittest.skip(reason="VITS has no input embeddings") def test_model_get_set_embeddings(self): pass # override since the model is not deterministic, so we need to set the seed for each forward pass def test_model_outputs_equivalence(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() def set_nan_tensor_to_zero(t): t[t != t] = 0 return t def check_equivalence(model, tuple_inputs, dict_inputs, additional_kwargs={}): with torch.no_grad(): set_seed(0) tuple_output = model(**tuple_inputs, return_dict=False, **additional_kwargs) set_seed(0) dict_output = model(**dict_inputs, return_dict=True, **additional_kwargs).to_tuple() def recursive_check(tuple_object, dict_object): if isinstance(tuple_object, (list, tuple)): for tuple_iterable_value, dict_iterable_value in zip(tuple_object, dict_object): recursive_check(tuple_iterable_value, dict_iterable_value) elif isinstance(tuple_object, dict): for tuple_iterable_value, dict_iterable_value in zip( tuple_object.values(), dict_object.values() ): recursive_check(tuple_iterable_value, dict_iterable_value) elif tuple_object is None: return else: self.assertTrue( torch.allclose( set_nan_tensor_to_zero(tuple_object), set_nan_tensor_to_zero(dict_object), atol=1e-5 ), msg=( "Tuple and dict output are not equal. Difference:" f" {torch.max(torch.abs(tuple_object - dict_object))}. Tuple has `nan`:" f" {torch.isnan(tuple_object).any()} and `inf`: {torch.isinf(tuple_object)}. Dict has" f" `nan`: {torch.isnan(dict_object).any()} and `inf`: {torch.isinf(dict_object)}." ), ) recursive_check(tuple_output, dict_output) for model_class in self.all_model_classes: model = model_class(config) model.to(torch_device) model.eval() tuple_inputs = self._prepare_for_class(inputs_dict, model_class) dict_inputs = self._prepare_for_class(inputs_dict, model_class) check_equivalence(model, tuple_inputs, dict_inputs) tuple_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) dict_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) check_equivalence(model, tuple_inputs, dict_inputs) tuple_inputs = self._prepare_for_class(inputs_dict, model_class) dict_inputs = self._prepare_for_class(inputs_dict, model_class) check_equivalence(model, tuple_inputs, dict_inputs, {"output_hidden_states": True}) tuple_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) dict_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) check_equivalence(model, tuple_inputs, dict_inputs, {"output_hidden_states": True}) if self.has_attentions: tuple_inputs = self._prepare_for_class(inputs_dict, model_class) dict_inputs = self._prepare_for_class(inputs_dict, model_class) check_equivalence(model, tuple_inputs, dict_inputs, {"output_attentions": True}) tuple_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) dict_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) check_equivalence(model, tuple_inputs, dict_inputs, {"output_attentions": True}) tuple_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) dict_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) check_equivalence( model, tuple_inputs, dict_inputs, {"output_hidden_states": True, "output_attentions": True} ) # override since the model is not deterministic, so we need to set the seed for each forward pass def test_save_load(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() def check_save_load(out1, out2): # make sure we don't have nans out_2 = out2.cpu().numpy() out_2[np.isnan(out_2)] = 0 out_1 = out1.cpu().numpy() out_1[np.isnan(out_1)] = 0 max_diff = np.amax(np.abs(out_1 - out_2)) self.assertLessEqual(max_diff, 1e-5) for model_class in self.all_model_classes: model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): set_seed(0) first = model(**self._prepare_for_class(inputs_dict, model_class))[0] with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) # the config file (and the generation config file, if it can generate) should be saved self.assertTrue(os.path.exists(os.path.join(tmpdirname, CONFIG_NAME))) self.assertEqual( model.can_generate(), os.path.exists(os.path.join(tmpdirname, GENERATION_CONFIG_NAME)) ) model = model_class.from_pretrained(tmpdirname) model.to(torch_device) with torch.no_grad(): set_seed(0) second = model(**self._prepare_for_class(inputs_dict, model_class))[0] if isinstance(first, tuple) and isinstance(second, tuple): for tensor1, tensor2 in zip(first, second): check_save_load(tensor1, tensor2) else: check_save_load(first, second) # overwrite from test_modeling_common def _mock_init_weights(self, module): if hasattr(module, "weight") and module.weight is not None: module.weight.data.fill_(3) if hasattr(module, "weight_g") and module.weight_g is not None: module.weight_g.data.fill_(3) if hasattr(module, "weight_v") and module.weight_v is not None: module.weight_v.data.fill_(3) if hasattr(module, "bias") and module.bias is not None: module.bias.data.fill_(3) @require_torch @slow class VitsModelIntegrationTests(unittest.TestCase): def test_forward(self): # GPU gives different results than CPU torch_device = "cpu" model = VitsModel.from_pretrained("facebook/mms-tts-eng") model.to(torch_device) tokenizer = VitsTokenizer.from_pretrained("facebook/mms-tts-eng") set_seed(555) # make deterministic input_text = "Mister quilter is the apostle of the middle classes and we are glad to welcome his gospel!" input_ids = tokenizer(input_text, return_tensors="pt").input_ids.to(torch_device) with torch.no_grad(): outputs = model(input_ids) self.assertEqual(outputs.waveform.shape, (1, 87040)) # fmt: off EXPECTED_LOGITS = torch.tensor( [ -0.0042, 0.0176, 0.0354, 0.0504, 0.0621, 0.0777, 0.0980, 0.1224, 0.1475, 0.1679, 0.1817, 0.1832, 0.1713, 0.1542, 0.1384, 0.1256, 0.1147, 0.1066, 0.1026, 0.0958, 0.0823, 0.0610, 0.0340, 0.0022, -0.0337, -0.0677, -0.0969, -0.1178, -0.1311, -0.1363 ] ) # fmt: on torch.testing.assert_close(outputs.waveform[0, 10000:10030].cpu(), EXPECTED_LOGITS, rtol=1e-4, atol=1e-4) @require_torch_fp16 def test_forward_fp16(self): # GPU gives different results than CPU torch_device = "cpu" model = VitsModel.from_pretrained("facebook/mms-tts-eng", dtype=torch.float16) model.to(torch_device) tokenizer = VitsTokenizer.from_pretrained("facebook/mms-tts-eng") set_seed(555) # make deterministic input_text = "Mister quilter is the apostle of the middle classes and we are glad to welcome his gospel!" input_ids = tokenizer(input_text, return_tensors="pt").input_ids.to(torch_device) with torch.no_grad(): outputs = model(input_ids) self.assertEqual(outputs.waveform.shape, (1, 87040)) # fmt: off expected_logits = Expectations({ ("cuda", None): [ 0.0101, 0.0318, 0.0489, 0.0627, 0.0728, 0.0865, 0.1053, 0.1279, 0.1514, 0.1703, 0.1827, 0.1829, 0.1694, 0.1509, 0.1332, 0.1188, 0.1066, 0.0978, 0.0936, 0.0867, 0.0724, 0.0493, 0.0197, -0.0141, -0.0501, -0.0817, -0.1065, -0.1223, -0.1311, -0.1339 ], ("rocm", (9, 5)): [ 0.0097, 0.0315, 0.0486, 0.0626, 0.0728, 0.0865, 0.1053, 0.1279, 0.1515, 0.1703, 0.1827, 0.1829, 0.1694, 0.1509, 0.1333, 0.1189, 0.1066, 0.0978, 0.0937, 0.0868, 0.0726, 0.0496, 0.0200, -0.0138, -0.0500, -0.0817, -0.1067, -0.1225, -0.1313, -0.1340 ] }) EXPECTED_LOGITS = torch.tensor(expected_logits.get_expectation(), dtype=torch.float16) # fmt: on torch.testing.assert_close(outputs.waveform[0, 10000:10030].cpu(), EXPECTED_LOGITS, rtol=1e-4, atol=1e-4)
transformers/tests/models/vits/test_modeling_vits.py/0
{ "file_path": "transformers/tests/models/vits/test_modeling_vits.py", "repo_id": "transformers", "token_count": 9537 }
595
# Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import requests from transformers import MODEL_FOR_VISION_2_SEQ_MAPPING, TF_MODEL_FOR_VISION_2_SEQ_MAPPING, is_vision_available from transformers.pipelines import ImageToTextPipeline, pipeline from transformers.testing_utils import ( is_pipeline_test, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_vision_available(): from PIL import Image else: class Image: @staticmethod def open(*args, **kwargs): pass @is_pipeline_test @require_vision class ImageToTextPipelineTests(unittest.TestCase): model_mapping = MODEL_FOR_VISION_2_SEQ_MAPPING tf_model_mapping = TF_MODEL_FOR_VISION_2_SEQ_MAPPING def get_test_pipeline( self, model, tokenizer=None, image_processor=None, feature_extractor=None, processor=None, dtype="float32", ): pipe = ImageToTextPipeline( model=model, tokenizer=tokenizer, feature_extractor=feature_extractor, image_processor=image_processor, processor=processor, dtype=dtype, max_new_tokens=20, ) examples = [ Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png"), "./tests/fixtures/tests_samples/COCO/000000039769.png", ] return pipe, examples def run_pipeline_test(self, pipe, examples): outputs = pipe(examples) self.assertEqual( outputs, [ [{"generated_text": ANY(str)}], [{"generated_text": ANY(str)}], ], ) @require_torch def test_small_model_pt(self): pipe = pipeline("image-to-text", model="hf-internal-testing/tiny-random-vit-gpt2", max_new_tokens=19) image = "./tests/fixtures/tests_samples/COCO/000000039769.png" outputs = pipe(image) self.assertEqual( outputs, [ { "generated_text": "growthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthGOGO" }, ], ) outputs = pipe([image, image]) self.assertEqual( outputs, [ [ { "generated_text": "growthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthGOGO" } ], [ { "generated_text": "growthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthGOGO" } ], ], ) @require_torch def test_small_model_pt_conditional(self): pipe = pipeline("image-to-text", model="hf-internal-testing/tiny-random-BlipForConditionalGeneration") image = "./tests/fixtures/tests_samples/COCO/000000039769.png" prompt = "a photo of" outputs = pipe(image, prompt=prompt) self.assertTrue(outputs[0]["generated_text"].startswith(prompt)) @require_torch def test_consistent_batching_behaviour(self): pipe = pipeline( "image-to-text", model="hf-internal-testing/tiny-random-BlipForConditionalGeneration", max_new_tokens=10 ) image = "./tests/fixtures/tests_samples/COCO/000000039769.png" prompt = "a photo of" outputs = pipe([image, image], prompt=prompt) self.assertTrue(outputs[0][0]["generated_text"].startswith(prompt)) self.assertTrue(outputs[1][0]["generated_text"].startswith(prompt)) outputs = pipe([image, image], prompt=prompt, batch_size=2) self.assertTrue(outputs[0][0]["generated_text"].startswith(prompt)) self.assertTrue(outputs[1][0]["generated_text"].startswith(prompt)) from torch.utils.data import Dataset class MyDataset(Dataset): def __len__(self): return 5 def __getitem__(self, i): return "./tests/fixtures/tests_samples/COCO/000000039769.png" dataset = MyDataset() for batch_size in (1, 2, 4): outputs = pipe(dataset, prompt=prompt, batch_size=batch_size if batch_size > 1 else None) self.assertTrue(list(outputs)[0][0]["generated_text"].startswith(prompt)) self.assertTrue(list(outputs)[1][0]["generated_text"].startswith(prompt)) @slow @require_torch def test_large_model_pt(self): pipe = pipeline("image-to-text", model="ydshieh/vit-gpt2-coco-en") image = "./tests/fixtures/tests_samples/COCO/000000039769.png" outputs = pipe(image) self.assertEqual(outputs, [{"generated_text": "a cat laying on a blanket next to a cat laying on a bed "}]) outputs = pipe([image, image]) self.assertEqual( outputs, [ [{"generated_text": "a cat laying on a blanket next to a cat laying on a bed "}], [{"generated_text": "a cat laying on a blanket next to a cat laying on a bed "}], ], ) @slow @require_torch def test_generation_pt_blip(self): pipe = pipeline("image-to-text", model="Salesforce/blip-image-captioning-base") url = "https://huggingface.co/datasets/sayakpaul/sample-datasets/resolve/main/pokemon.png" image = Image.open(requests.get(url, stream=True).raw) outputs = pipe(image) self.assertEqual(outputs, [{"generated_text": "a pink pokemon pokemon with a blue shirt and a blue shirt"}]) @slow @require_torch def test_generation_pt_git(self): pipe = pipeline("image-to-text", model="microsoft/git-base-coco") url = "https://huggingface.co/datasets/sayakpaul/sample-datasets/resolve/main/pokemon.png" image = Image.open(requests.get(url, stream=True).raw) outputs = pipe(image) self.assertEqual(outputs, [{"generated_text": "a cartoon of a purple character."}]) @slow @require_torch def test_conditional_generation_pt_blip(self): pipe = pipeline("image-to-text", model="Salesforce/blip-image-captioning-base") url = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/ai2d-demo.jpg" image = Image.open(requests.get(url, stream=True).raw) prompt = "a photography of" outputs = pipe(image, prompt=prompt) self.assertEqual(outputs, [{"generated_text": "a photography of a volcano"}]) with self.assertRaises(ValueError): outputs = pipe([image, image], prompt=[prompt, prompt]) @slow @require_torch def test_conditional_generation_pt_git(self): pipe = pipeline("image-to-text", model="microsoft/git-base-coco") url = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/ai2d-demo.jpg" image = Image.open(requests.get(url, stream=True).raw) prompt = "a photo of a" outputs = pipe(image, prompt=prompt) self.assertEqual(outputs, [{"generated_text": "a photo of a tent with a tent and a tent in the background."}]) with self.assertRaises(ValueError): outputs = pipe([image, image], prompt=[prompt, prompt]) @slow @require_torch def test_conditional_generation_pt_pix2struct(self): pipe = pipeline("image-to-text", model="google/pix2struct-ai2d-base") url = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/ai2d-demo.jpg" image = Image.open(requests.get(url, stream=True).raw) prompt = "What does the label 15 represent? (1) lava (2) core (3) tunnel (4) ash cloud" outputs = pipe(image, prompt=prompt) self.assertEqual(outputs, [{"generated_text": "ash cloud"}]) with self.assertRaises(ValueError): outputs = pipe([image, image], prompt=[prompt, prompt]) @slow @require_torch @unittest.skip("TODO (joao, raushan): there is something wrong with image processing in the model/pipeline") def test_conditional_generation_llava(self): pipe = pipeline("image-to-text", model="llava-hf/bakLlava-v1-hf") prompt = ( "<image>\nUSER: What does the label 15 represent? (1) lava (2) core (3) tunnel (4) ash cloud?\nASSISTANT:" ) outputs = pipe( "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/ai2d-demo.jpg", prompt=prompt, generate_kwargs={"max_new_tokens": 200}, ) self.assertEqual( outputs, [ { "generated_text": "\nUSER: What does the label 15 represent? (1) lava (2) core (3) tunnel (4) ash cloud?\nASSISTANT: Lava" } ], ) @slow @require_torch def test_nougat(self): pipe = pipeline("image-to-text", "facebook/nougat-base", max_new_tokens=19) outputs = pipe("https://huggingface.co/datasets/Xenova/transformers.js-docs/resolve/main/nougat_paper.png") self.assertEqual( outputs, [{"generated_text": "# Nougat: Neural Optical Understanding for Academic Documents\n\n Lukas Blec"}], )
transformers/tests/pipelines/test_pipelines_image_to_text.py/0
{ "file_path": "transformers/tests/pipelines/test_pipelines_image_to_text.py", "repo_id": "transformers", "token_count": 4465 }
596
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from huggingface_hub import ZeroShotImageClassificationOutputElement from transformers import is_vision_available from transformers.pipelines import pipeline from transformers.testing_utils import ( compare_pipeline_output_to_hub_spec, is_pipeline_test, nested_simplify, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_vision_available(): from PIL import Image else: class Image: @staticmethod def open(*args, **kwargs): pass @is_pipeline_test @require_vision class ZeroShotImageClassificationPipelineTests(unittest.TestCase): # Deactivating auto tests since we don't have a good MODEL_FOR_XX mapping, # and only CLIP would be there for now. # model_mapping = {CLIPConfig: CLIPModel} # def get_test_pipeline(self, model, tokenizer, processor): # if tokenizer is None: # # Side effect of no Fast Tokenizer class for these model, so skipping # # But the slow tokenizer test should still run as they're quite small # self.skipTest(reason="No tokenizer available") # return # # return None, None # image_classifier = ZeroShotImageClassificationPipeline( # model=model, tokenizer=tokenizer, feature_extractor=processor # ) # # test with a raw waveform # image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") # image2 = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") # return image_classifier, [image, image2] # def run_pipeline_test(self, pipe, examples): # image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") # outputs = pipe(image, candidate_labels=["A", "B"]) # self.assertEqual(outputs, {"text": ANY(str)}) # # Batching # outputs = pipe([image] * 3, batch_size=2, candidate_labels=["A", "B"]) @require_torch def test_small_model_pt(self, dtype="float32"): image_classifier = pipeline( model="hf-internal-testing/tiny-random-clip-zero-shot-image-classification", dtype=dtype ) image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") output = image_classifier(image, candidate_labels=["a", "b", "c"]) # The floating scores are so close, we enter floating error approximation and the order is not guaranteed across # python and torch versions. self.assertIn( nested_simplify(output), [ [{"score": 0.333, "label": "a"}, {"score": 0.333, "label": "b"}, {"score": 0.333, "label": "c"}], [{"score": 0.333, "label": "a"}, {"score": 0.333, "label": "c"}, {"score": 0.333, "label": "b"}], [{"score": 0.333, "label": "b"}, {"score": 0.333, "label": "a"}, {"score": 0.333, "label": "c"}], ], ) output = image_classifier([image] * 5, candidate_labels=["A", "B", "C"], batch_size=2) self.assertEqual( nested_simplify(output), # Pipeline outputs are supposed to be deterministic and # So we could in theory have real values "A", "B", "C" instead # of ANY(str). # However it seems that in this particular case, the floating # scores are so close, we enter floating error approximation # and the order is not guaranteed anymore with batching. [ [ {"score": 0.333, "label": ANY(str)}, {"score": 0.333, "label": ANY(str)}, {"score": 0.333, "label": ANY(str)}, ], [ {"score": 0.333, "label": ANY(str)}, {"score": 0.333, "label": ANY(str)}, {"score": 0.333, "label": ANY(str)}, ], [ {"score": 0.333, "label": ANY(str)}, {"score": 0.333, "label": ANY(str)}, {"score": 0.333, "label": ANY(str)}, ], [ {"score": 0.333, "label": ANY(str)}, {"score": 0.333, "label": ANY(str)}, {"score": 0.333, "label": ANY(str)}, ], [ {"score": 0.333, "label": ANY(str)}, {"score": 0.333, "label": ANY(str)}, {"score": 0.333, "label": ANY(str)}, ], ], ) for single_output in output: compare_pipeline_output_to_hub_spec(single_output, ZeroShotImageClassificationOutputElement) @require_torch def test_small_model_pt_fp16(self): self.test_small_model_pt(dtype="float16") @slow @require_torch def test_large_model_pt(self): image_classifier = pipeline( task="zero-shot-image-classification", model="openai/clip-vit-base-patch32", ) # This is an image of 2 cats with remotes and no planes image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") output = image_classifier(image, candidate_labels=["cat", "plane", "remote"]) self.assertEqual( nested_simplify(output), [ {"score": 0.511, "label": "remote"}, {"score": 0.485, "label": "cat"}, {"score": 0.004, "label": "plane"}, ], ) output = image_classifier([image] * 5, candidate_labels=["cat", "plane", "remote"], batch_size=2) self.assertEqual( nested_simplify(output), [ [ {"score": 0.511, "label": "remote"}, {"score": 0.485, "label": "cat"}, {"score": 0.004, "label": "plane"}, ], ] * 5, ) @slow @require_torch def test_siglip_model_pt(self): image_classifier = pipeline( task="zero-shot-image-classification", model="google/siglip-base-patch16-224", ) # This is an image of 2 cats with remotes and no planes image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") output = image_classifier(image, candidate_labels=["2 cats", "a plane", "a remote"]) self.assertEqual( nested_simplify(output), [ {"score": 0.198, "label": "2 cats"}, {"score": 0.0, "label": "a remote"}, {"score": 0.0, "label": "a plane"}, ], ) output = image_classifier([image] * 5, candidate_labels=["2 cats", "a plane", "a remote"], batch_size=2) self.assertEqual( nested_simplify(output), [ [ {"score": 0.198, "label": "2 cats"}, {"score": 0.0, "label": "a remote"}, {"score": 0.0, "label": "a plane"}, ] ] * 5, ) @slow @require_torch def test_blip2_model_pt(self): image_classifier = pipeline( task="zero-shot-image-classification", model="Salesforce/blip2-itm-vit-g", ) # This is an image of 2 cats with remotes and no planes image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") output = image_classifier( image, candidate_labels=["2 cats", "a plane", "a remote"], tokenizer_kwargs={"return_token_type_ids": False}, ) self.assertEqual( nested_simplify(output), [ {"score": 0.369, "label": "2 cats"}, {"score": 0.333, "label": "a remote"}, {"score": 0.297, "label": "a plane"}, ], ) output = image_classifier( [image] * 5, candidate_labels=["2 cats", "a plane", "a remote"], batch_size=2, tokenizer_kwargs={"return_token_type_ids": False}, ) self.assertEqual( nested_simplify(output), [ [ {"score": 0.369, "label": "2 cats"}, {"score": 0.333, "label": "a remote"}, {"score": 0.297, "label": "a plane"}, ] ] * 5, )
transformers/tests/pipelines/test_pipelines_zero_shot_image_classification.py/0
{ "file_path": "transformers/tests/pipelines/test_pipelines_zero_shot_image_classification.py", "repo_id": "transformers", "token_count": 4414 }
597
import gc import unittest from transformers import AutoModelForCausalLM, AutoTokenizer, CompressedTensorsConfig from transformers.testing_utils import backend_empty_cache, require_compressed_tensors, require_torch, torch_device from transformers.utils import is_torch_available if is_torch_available(): import torch @require_compressed_tensors @require_torch class CompressedTensorsTest(unittest.TestCase): tinyllama_w8a16 = "nm-testing/tinyllama-w8a16-dense-hf-quantizer" tinyllama_w4a16 = "nm-testing/tinyllama-w4a16-compressed-hf-quantizer" tinyllama_w8a8 = "nm-testing/tinyllama-w8a8-compressed-hf-quantizer" llama3_8b_fp8 = "nm-testing/Meta-Llama-3-8B-Instruct-fp8-hf_compat" prompt = "Paris is the capital of which country?" def tearDown(self): gc.collect() backend_empty_cache(torch_device) gc.collect() def test_config_args(self): with self.assertRaises(ValueError): # passing quant scheme directly is not allowed CompressedTensorsConfig(config_groups={"weights": {"num_bits": 8}}) CompressedTensorsConfig( config_groups={"FP8": ["Linear"]}, ignore=["lm_head"], quantization_status="frozen", sparsity_config={"format": "dense"}, ) def test_config_to_from_dict(self): config = CompressedTensorsConfig(config_groups={"FP8": ["Linear"]}, sparsity_config={"format": "dense"}) config_dict = config.to_dict() config_from_dict = CompressedTensorsConfig.from_dict(config_dict) from compressed_tensors import QuantizationConfig, SparsityCompressionConfig self.assertIsInstance(config_from_dict.quantization_config, QuantizationConfig) self.assertIsInstance(config_from_dict.sparsity_config, SparsityCompressionConfig) def test_tinyllama_w8a8(self): expected_out = "<s> Paris is the capital of which country?\n\n 1. Paris is the capital of which country?\n\n 1. Paris is the capital of which country?\n\n 1. Paris is the capital of which country?\n\n" self._test_quantized_model(self.tinyllama_w8a8, expected_out) def test_tinyllama_w4a16(self): expected_out = "<s> Paris is the capital of which country?\nAnswer: Paris is the capital of France.\nQuestion: Which country is the capital of which city?\nAnswer: The capital of the city of New York is New York.\nQuestion: Which" self._test_quantized_model(self.tinyllama_w4a16, expected_out) def test_tinyllama_w8a16(self): expected_out = "<s> Paris is the capital of which country?\nA. France\nB. Germany\nC. Spain\nD. Italy\nE. Switzerland\nQ10. Which of the following is not a country in the European Union?\nA." self._test_quantized_model(self.tinyllama_w8a16, expected_out) def test_llama_8b_fp8(self): expected_out = "<|begin_of_text|>Paris is the capital of which country? France\nWhat is the name of the famous museum in Paris that is home to the Mona Lisa? The Louvre\nWhat is the name of the famous bridge in Paris that is often associated with the city" self._test_quantized_model(self.llama3_8b_fp8, expected_out) def _test_quantized_model(self, model_name: str, expected_output: str): """Carry out generation""" quantized_model = AutoModelForCausalLM.from_pretrained(model_name, device_map="auto") tokenizer = AutoTokenizer.from_pretrained(model_name) device = quantized_model.device self.assertIsNotNone( quantized_model.config.quantization_config, "quantization_config should not be None", ) self.assertTrue( any( key for key, tensor in quantized_model.state_dict().items() if "scale" in key and not torch.all(tensor == 1.0) ), "quantized model should load a non-trivial scale into the state dict", ) inputs = tokenizer(self.prompt, return_tensors="pt").to(device) generated_ids = quantized_model.generate(**inputs, max_length=50, do_sample=False) outputs = tokenizer.batch_decode(generated_ids) self.assertIsNotNone(outputs) self.assertEqual(outputs[0], expected_output)
transformers/tests/quantization/compressed_tensors_integration/test_compressed_tensors.py/0
{ "file_path": "transformers/tests/quantization/compressed_tensors_integration/test_compressed_tensors.py", "repo_id": "transformers", "token_count": 1700 }
598
# Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import shutil import sys import tempfile import unittest from contextlib import contextmanager from pathlib import Path from git import Repo from transformers.testing_utils import CaptureStdout REPO_PATH = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(REPO_PATH, "utils")) import tests_fetcher # noqa: E402 from tests_fetcher import ( # noqa: E402 checkout_commit, clean_code, create_reverse_dependency_map, create_reverse_dependency_tree, diff_is_docstring_only, extract_imports, get_all_tests, get_diff, get_module_dependencies, get_tree_starting_at, infer_tests_to_run, init_test_examples_dependencies, parse_commit_message, print_tree_deps_of, ) BERT_MODELING_FILE = "src/transformers/models/bert/modeling_bert.py" BERT_MODEL_FILE = """from ...modeling_utils import PreTrainedModel from ...utils import is_torch_available from .configuration_bert import BertConfig class BertModel: ''' This is the docstring. ''' This is the code """ BERT_MODEL_FILE_NEW_DOCSTRING = """from ...modeling_utils import PreTrainedModel from ...utils import is_torch_available from .configuration_bert import BertConfig class BertModel: ''' This is the docstring. It has been updated. ''' This is the code """ BERT_MODEL_FILE_NEW_CODE = """from ...modeling_utils import PreTrainedModel from ...utils import is_torch_available from .configuration_bert import BertConfig class BertModel: ''' This is the docstring. ''' This is the code. It has been updated """ def create_tmp_repo(tmp_dir, models=None): """ Creates a repository in a temporary directory mimicking the structure of Transformers. Uses the list of models provided (which defaults to just `["bert"]`). """ tmp_dir = Path(tmp_dir) if tmp_dir.exists(): shutil.rmtree(tmp_dir) tmp_dir.mkdir(exist_ok=True) repo = Repo.init(tmp_dir) if models is None: models = ["bert"] class_names = [model[0].upper() + model[1:] for model in models] transformers_dir = tmp_dir / "src" / "transformers" transformers_dir.mkdir(parents=True, exist_ok=True) with open(transformers_dir / "__init__.py", "w") as f: init_lines = ["from .utils import cached_file, is_torch_available"] init_lines.extend( [f"from .models.{model} import {cls}Config, {cls}Model" for model, cls in zip(models, class_names)] ) f.write("\n".join(init_lines) + "\n") with open(transformers_dir / "configuration_utils.py", "w") as f: f.write("from .utils import cached_file\n\ncode") with open(transformers_dir / "modeling_utils.py", "w") as f: f.write("from .utils import cached_file\n\ncode") utils_dir = tmp_dir / "src" / "transformers" / "utils" utils_dir.mkdir(exist_ok=True) with open(utils_dir / "__init__.py", "w") as f: f.write("from .hub import cached_file\nfrom .imports import is_torch_available\n") with open(utils_dir / "hub.py", "w") as f: f.write("import huggingface_hub\n\ncode") with open(utils_dir / "imports.py", "w") as f: f.write("code") model_dir = tmp_dir / "src" / "transformers" / "models" model_dir.mkdir(parents=True, exist_ok=True) with open(model_dir / "__init__.py", "w") as f: f.write("\n".join([f"import {model}" for model in models])) for model, cls in zip(models, class_names): model_dir = tmp_dir / "src" / "transformers" / "models" / model model_dir.mkdir(parents=True, exist_ok=True) with open(model_dir / "__init__.py", "w") as f: f.write(f"from .configuration_{model} import {cls}Config\nfrom .modeling_{model} import {cls}Model\n") with open(model_dir / f"configuration_{model}.py", "w") as f: f.write("from ...configuration_utils import PretrainedConfig\ncode") with open(model_dir / f"modeling_{model}.py", "w") as f: modeling_code = BERT_MODEL_FILE.replace("bert", model).replace("Bert", cls) f.write(modeling_code) test_dir = tmp_dir / "tests" test_dir.mkdir(exist_ok=True) with open(test_dir / "test_modeling_common.py", "w") as f: f.write("from transformers.modeling_utils import PreTrainedModel\ncode") for model, cls in zip(models, class_names): test_model_dir = test_dir / "models" / model test_model_dir.mkdir(parents=True, exist_ok=True) (test_model_dir / "__init__.py").touch() with open(test_model_dir / f"test_modeling_{model}.py", "w") as f: f.write( f"from transformers import {cls}Config, {cls}Model\nfrom ...test_modeling_common import ModelTesterMixin\n\ncode" ) example_dir = tmp_dir / "examples" example_dir.mkdir(exist_ok=True) for framework in ["flax", "pytorch", "tensorflow"]: framework_dir = example_dir / framework framework_dir.mkdir(exist_ok=True) with open(framework_dir / f"test_{framework}_examples.py", "w") as f: f.write("""test_args = "run_glue.py"\n""") glue_dir = framework_dir / "text-classification" glue_dir.mkdir(exist_ok=True) with open(glue_dir / "run_glue.py", "w") as f: f.write("from transformers import BertModel\n\ncode") repo.index.add(["examples", "src", "tests"]) repo.index.commit("Initial commit") repo.create_head("main") repo.head.reference = repo.refs.main repo.delete_head("master") return repo @contextmanager def patch_transformer_repo_path(new_folder): """ Temporarily patches the variables defines in `tests_fetcher` to use a different location for the repo. """ old_repo_path = tests_fetcher.PATH_TO_REPO tests_fetcher.PATH_TO_REPO = Path(new_folder).resolve() tests_fetcher.PATH_TO_EXAMPLES = tests_fetcher.PATH_TO_REPO / "examples" tests_fetcher.PATH_TO_TRANFORMERS = tests_fetcher.PATH_TO_REPO / "src/transformers" tests_fetcher.PATH_TO_TESTS = tests_fetcher.PATH_TO_REPO / "tests" try: yield finally: tests_fetcher.PATH_TO_REPO = old_repo_path tests_fetcher.PATH_TO_EXAMPLES = tests_fetcher.PATH_TO_REPO / "examples" tests_fetcher.PATH_TO_TRANFORMERS = tests_fetcher.PATH_TO_REPO / "src/transformers" tests_fetcher.PATH_TO_TESTS = tests_fetcher.PATH_TO_REPO / "tests" def commit_changes(filenames, contents, repo, commit_message="Commit"): """ Commit new `contents` to `filenames` inside a given `repo`. """ if not isinstance(filenames, list): filenames = [filenames] if not isinstance(contents, list): contents = [contents] folder = Path(repo.working_dir) for filename, content in zip(filenames, contents): with open(folder / filename, "w") as f: f.write(content) repo.index.add(filenames) commit = repo.index.commit(commit_message) return commit.hexsha class TestFetcherTester(unittest.TestCase): def test_checkout_commit(self): with tempfile.TemporaryDirectory() as tmp_folder: tmp_folder = Path(tmp_folder) repo = create_tmp_repo(tmp_folder) initial_sha = repo.head.commit.hexsha new_sha = commit_changes(BERT_MODELING_FILE, BERT_MODEL_FILE_NEW_DOCSTRING, repo) assert repo.head.commit.hexsha == new_sha with checkout_commit(repo, initial_sha): assert repo.head.commit.hexsha == initial_sha with open(tmp_folder / BERT_MODELING_FILE) as f: assert f.read() == BERT_MODEL_FILE assert repo.head.commit.hexsha == new_sha with open(tmp_folder / BERT_MODELING_FILE) as f: assert f.read() == BERT_MODEL_FILE_NEW_DOCSTRING def test_clean_code(self): # Clean code removes all strings in triple quotes assert clean_code('"""\nDocstring\n"""\ncode\n"""Long string"""\ncode\n') == "code\ncode" assert clean_code("'''\nDocstring\n'''\ncode\n'''Long string'''\ncode\n'''") == "code\ncode" # Clean code removes all comments assert clean_code("code\n# Comment\ncode") == "code\ncode" assert clean_code("code # inline comment\ncode") == "code \ncode" def test_get_all_tests(self): with tempfile.TemporaryDirectory() as tmp_folder: tmp_folder = Path(tmp_folder) create_tmp_repo(tmp_folder) with patch_transformer_repo_path(tmp_folder): assert get_all_tests() == ["tests/models/bert", "tests/test_modeling_common.py"] def test_get_all_tests_on_full_repo(self): all_tests = get_all_tests() assert "tests/models/albert" in all_tests assert "tests/models/bert" in all_tests assert "tests/repo_utils" in all_tests assert "tests/test_pipeline_mixin.py" in all_tests assert "tests/models" not in all_tests assert "tests/__pycache__" not in all_tests assert "tests/models/albert/test_modeling_albert.py" not in all_tests assert "tests/repo_utils/test_tests_fetcher.py" not in all_tests def test_diff_is_docstring_only(self): with tempfile.TemporaryDirectory() as tmp_folder: tmp_folder = Path(tmp_folder) repo = create_tmp_repo(tmp_folder) branching_point = repo.refs.main.commit bert_file = BERT_MODELING_FILE commit_changes(bert_file, BERT_MODEL_FILE_NEW_DOCSTRING, repo) assert diff_is_docstring_only(repo, branching_point, bert_file) commit_changes(bert_file, BERT_MODEL_FILE_NEW_CODE, repo) assert not diff_is_docstring_only(repo, branching_point, bert_file) def test_get_diff(self): with tempfile.TemporaryDirectory() as tmp_folder: tmp_folder = Path(tmp_folder) repo = create_tmp_repo(tmp_folder) initial_commit = repo.refs.main.commit bert_file = BERT_MODELING_FILE commit_changes(bert_file, BERT_MODEL_FILE_NEW_DOCSTRING, repo) assert get_diff(repo, repo.head.commit, repo.head.commit.parents) == [] commit_changes(bert_file, BERT_MODEL_FILE_NEW_DOCSTRING + "\n# Adding a comment\n", repo) assert get_diff(repo, repo.head.commit, repo.head.commit.parents) == [] commit_changes(bert_file, BERT_MODEL_FILE_NEW_CODE, repo) assert get_diff(repo, repo.head.commit, repo.head.commit.parents) == [ "src/transformers/models/bert/modeling_bert.py" ] commit_changes("src/transformers/utils/hub.py", "import huggingface_hub\n\nnew code", repo) assert get_diff(repo, repo.head.commit, repo.head.commit.parents) == ["src/transformers/utils/hub.py"] assert get_diff(repo, repo.head.commit, [initial_commit]) == [ "src/transformers/models/bert/modeling_bert.py", "src/transformers/utils/hub.py", ] def test_extract_imports_relative(self): with tempfile.TemporaryDirectory() as tmp_folder: tmp_folder = Path(tmp_folder) create_tmp_repo(tmp_folder) expected_bert_imports = [ ("src/transformers/modeling_utils.py", ["PreTrainedModel"]), ("src/transformers/utils/__init__.py", ["is_torch_available"]), ("src/transformers/models/bert/configuration_bert.py", ["BertConfig"]), ] expected_utils_imports = [ ("src/transformers/utils/hub.py", ["cached_file"]), ("src/transformers/utils/imports.py", ["is_torch_available"]), ] with patch_transformer_repo_path(tmp_folder): assert extract_imports(BERT_MODELING_FILE) == expected_bert_imports assert extract_imports("src/transformers/utils/__init__.py") == expected_utils_imports with open(tmp_folder / BERT_MODELING_FILE, "w") as f: f.write( "from ...utils import cached_file, is_torch_available\nfrom .configuration_bert import BertConfig\n" ) expected_bert_imports = [ ("src/transformers/utils/__init__.py", ["cached_file", "is_torch_available"]), ("src/transformers/models/bert/configuration_bert.py", ["BertConfig"]), ] with patch_transformer_repo_path(tmp_folder): assert extract_imports(BERT_MODELING_FILE) == expected_bert_imports # Test with multi-line imports with open(tmp_folder / BERT_MODELING_FILE, "w") as f: f.write( "from ...utils import (\n cached_file,\n is_torch_available\n)\nfrom .configuration_bert import BertConfig\n" ) expected_bert_imports = [ ("src/transformers/models/bert/configuration_bert.py", ["BertConfig"]), ("src/transformers/utils/__init__.py", ["cached_file", "is_torch_available"]), ] with patch_transformer_repo_path(tmp_folder): assert extract_imports(BERT_MODELING_FILE) == expected_bert_imports def test_extract_imports_absolute(self): with tempfile.TemporaryDirectory() as tmp_folder: tmp_folder = Path(tmp_folder) create_tmp_repo(tmp_folder) with open(tmp_folder / BERT_MODELING_FILE, "w") as f: f.write( "from transformers.utils import cached_file, is_torch_available\nfrom transformers.models.bert.configuration_bert import BertConfig\n" ) expected_bert_imports = [ ("src/transformers/utils/__init__.py", ["cached_file", "is_torch_available"]), ("src/transformers/models/bert/configuration_bert.py", ["BertConfig"]), ] with patch_transformer_repo_path(tmp_folder): assert extract_imports(BERT_MODELING_FILE) == expected_bert_imports # Test with multi-line imports with open(tmp_folder / BERT_MODELING_FILE, "w") as f: f.write( "from transformers.utils import (\n cached_file,\n is_torch_available\n)\nfrom transformers.models.bert.configuration_bert import BertConfig\n" ) expected_bert_imports = [ ("src/transformers/models/bert/configuration_bert.py", ["BertConfig"]), ("src/transformers/utils/__init__.py", ["cached_file", "is_torch_available"]), ] with patch_transformer_repo_path(tmp_folder): assert extract_imports(BERT_MODELING_FILE) == expected_bert_imports # Test with base imports with open(tmp_folder / BERT_MODELING_FILE, "w") as f: f.write( "from transformers.utils import (\n cached_file,\n is_torch_available\n)\nfrom transformers import BertConfig\n" ) expected_bert_imports = [ ("src/transformers/__init__.py", ["BertConfig"]), ("src/transformers/utils/__init__.py", ["cached_file", "is_torch_available"]), ] with patch_transformer_repo_path(tmp_folder): assert extract_imports(BERT_MODELING_FILE) == expected_bert_imports def test_get_module_dependencies(self): with tempfile.TemporaryDirectory() as tmp_folder: tmp_folder = Path(tmp_folder) create_tmp_repo(tmp_folder) expected_bert_dependencies = [ "src/transformers/modeling_utils.py", "src/transformers/models/bert/configuration_bert.py", "src/transformers/utils/imports.py", ] with patch_transformer_repo_path(tmp_folder): assert get_module_dependencies(BERT_MODELING_FILE) == expected_bert_dependencies expected_test_bert_dependencies = [ "tests/test_modeling_common.py", "src/transformers/models/bert/configuration_bert.py", "src/transformers/models/bert/modeling_bert.py", ] with patch_transformer_repo_path(tmp_folder): assert ( get_module_dependencies("tests/models/bert/test_modeling_bert.py") == expected_test_bert_dependencies ) # Test with a submodule (tmp_folder / "src/transformers/utils/logging.py").touch() with open(tmp_folder / BERT_MODELING_FILE, "a") as f: f.write("from ...utils import logging\n") expected_bert_dependencies = [ "src/transformers/modeling_utils.py", "src/transformers/models/bert/configuration_bert.py", "src/transformers/utils/logging.py", "src/transformers/utils/imports.py", ] with patch_transformer_repo_path(tmp_folder): assert get_module_dependencies(BERT_MODELING_FILE) == expected_bert_dependencies # Test with an object non-imported in the init create_tmp_repo(tmp_folder) with open(tmp_folder / BERT_MODELING_FILE, "a") as f: f.write("from ...utils import CONSTANT\n") expected_bert_dependencies = [ "src/transformers/modeling_utils.py", "src/transformers/models/bert/configuration_bert.py", "src/transformers/utils/__init__.py", "src/transformers/utils/imports.py", ] with patch_transformer_repo_path(tmp_folder): assert get_module_dependencies(BERT_MODELING_FILE) == expected_bert_dependencies # Test with an example create_tmp_repo(tmp_folder) expected_example_dependencies = ["src/transformers/models/bert/modeling_bert.py"] with patch_transformer_repo_path(tmp_folder): assert ( get_module_dependencies("examples/pytorch/text-classification/run_glue.py") == expected_example_dependencies ) def test_create_reverse_dependency_tree(self): with tempfile.TemporaryDirectory() as tmp_folder: tmp_folder = Path(tmp_folder) create_tmp_repo(tmp_folder) with patch_transformer_repo_path(tmp_folder): tree = create_reverse_dependency_tree() init_edges = [ "src/transformers/utils/hub.py", "src/transformers/utils/imports.py", "src/transformers/models/bert/configuration_bert.py", "src/transformers/models/bert/modeling_bert.py", ] assert {f for f, g in tree if g == "src/transformers/__init__.py"} == set(init_edges) bert_edges = [ "src/transformers/modeling_utils.py", "src/transformers/utils/imports.py", "src/transformers/models/bert/configuration_bert.py", ] assert {f for f, g in tree if g == "src/transformers/models/bert/modeling_bert.py"} == set(bert_edges) test_bert_edges = [ "tests/test_modeling_common.py", "src/transformers/models/bert/configuration_bert.py", "src/transformers/models/bert/modeling_bert.py", ] assert {f for f, g in tree if g == "tests/models/bert/test_modeling_bert.py"} == set(test_bert_edges) def test_get_tree_starting_at(self): with tempfile.TemporaryDirectory() as tmp_folder: tmp_folder = Path(tmp_folder) create_tmp_repo(tmp_folder) with patch_transformer_repo_path(tmp_folder): edges = create_reverse_dependency_tree() bert_tree = get_tree_starting_at("src/transformers/models/bert/modeling_bert.py", edges) config_utils_tree = get_tree_starting_at("src/transformers/configuration_utils.py", edges) expected_bert_tree = [ "src/transformers/models/bert/modeling_bert.py", [("src/transformers/models/bert/modeling_bert.py", "tests/models/bert/test_modeling_bert.py")], ] assert bert_tree == expected_bert_tree expected_config_tree = [ "src/transformers/configuration_utils.py", [("src/transformers/configuration_utils.py", "src/transformers/models/bert/configuration_bert.py")], [ ("src/transformers/models/bert/configuration_bert.py", "tests/models/bert/test_modeling_bert.py"), ( "src/transformers/models/bert/configuration_bert.py", "src/transformers/models/bert/modeling_bert.py", ), ], ] # Order of the edges is random assert [set(v) for v in config_utils_tree] == [set(v) for v in expected_config_tree] def test_print_tree_deps_of(self): with tempfile.TemporaryDirectory() as tmp_folder: tmp_folder = Path(tmp_folder) create_tmp_repo(tmp_folder) # There are two possible outputs since the order of the last two lines is non-deterministic. expected_std_out = """src/transformers/models/bert/modeling_bert.py tests/models/bert/test_modeling_bert.py src/transformers/configuration_utils.py src/transformers/models/bert/configuration_bert.py src/transformers/models/bert/modeling_bert.py tests/models/bert/test_modeling_bert.py""" expected_std_out_2 = """src/transformers/models/bert/modeling_bert.py tests/models/bert/test_modeling_bert.py src/transformers/configuration_utils.py src/transformers/models/bert/configuration_bert.py tests/models/bert/test_modeling_bert.py src/transformers/models/bert/modeling_bert.py""" with patch_transformer_repo_path(tmp_folder), CaptureStdout() as cs: print_tree_deps_of("src/transformers/models/bert/modeling_bert.py") print_tree_deps_of("src/transformers/configuration_utils.py") assert cs.out.strip() in [expected_std_out, expected_std_out_2] def test_init_test_examples_dependencies(self): with tempfile.TemporaryDirectory() as tmp_folder: tmp_folder = Path(tmp_folder) create_tmp_repo(tmp_folder) expected_example_deps = { "examples/flax/test_flax_examples.py": [ "examples/flax/text-classification/run_glue.py", "examples/flax/test_flax_examples.py", ], "examples/pytorch/test_pytorch_examples.py": [ "examples/pytorch/text-classification/run_glue.py", "examples/pytorch/test_pytorch_examples.py", ], "examples/tensorflow/test_tensorflow_examples.py": [ "examples/tensorflow/text-classification/run_glue.py", "examples/tensorflow/test_tensorflow_examples.py", ], } expected_examples = { "examples/flax/test_flax_examples.py", "examples/flax/text-classification/run_glue.py", "examples/pytorch/test_pytorch_examples.py", "examples/pytorch/text-classification/run_glue.py", "examples/tensorflow/test_tensorflow_examples.py", "examples/tensorflow/text-classification/run_glue.py", } with patch_transformer_repo_path(tmp_folder): example_deps, all_examples = init_test_examples_dependencies() assert example_deps == expected_example_deps assert {str(f.relative_to(tmp_folder)) for f in all_examples} == expected_examples def test_create_reverse_dependency_map(self): with tempfile.TemporaryDirectory() as tmp_folder: tmp_folder = Path(tmp_folder) create_tmp_repo(tmp_folder) with patch_transformer_repo_path(tmp_folder): reverse_map = create_reverse_dependency_map() # impact of BERT modeling file (note that we stop at the inits and don't go down further) expected_bert_deps = { "src/transformers/__init__.py", "src/transformers/models/bert/__init__.py", "tests/models/bert/test_modeling_bert.py", "examples/flax/test_flax_examples.py", "examples/flax/text-classification/run_glue.py", "examples/pytorch/test_pytorch_examples.py", "examples/pytorch/text-classification/run_glue.py", "examples/tensorflow/test_tensorflow_examples.py", "examples/tensorflow/text-classification/run_glue.py", } assert set(reverse_map["src/transformers/models/bert/modeling_bert.py"]) == expected_bert_deps # init gets the direct deps (and their recursive deps) expected_init_deps = { "src/transformers/utils/__init__.py", "src/transformers/utils/hub.py", "src/transformers/utils/imports.py", "src/transformers/models/bert/__init__.py", "src/transformers/models/bert/configuration_bert.py", "src/transformers/models/bert/modeling_bert.py", "src/transformers/configuration_utils.py", "src/transformers/modeling_utils.py", "tests/test_modeling_common.py", "tests/models/bert/test_modeling_bert.py", "examples/flax/test_flax_examples.py", "examples/flax/text-classification/run_glue.py", "examples/pytorch/test_pytorch_examples.py", "examples/pytorch/text-classification/run_glue.py", "examples/tensorflow/test_tensorflow_examples.py", "examples/tensorflow/text-classification/run_glue.py", } assert set(reverse_map["src/transformers/__init__.py"]) == expected_init_deps expected_init_deps = { "src/transformers/__init__.py", "src/transformers/models/bert/configuration_bert.py", "src/transformers/models/bert/modeling_bert.py", "tests/models/bert/test_modeling_bert.py", "examples/flax/test_flax_examples.py", "examples/flax/text-classification/run_glue.py", "examples/pytorch/test_pytorch_examples.py", "examples/pytorch/text-classification/run_glue.py", "examples/tensorflow/test_tensorflow_examples.py", "examples/tensorflow/text-classification/run_glue.py", } assert set(reverse_map["src/transformers/models/bert/__init__.py"]) == expected_init_deps # Test that with more models init of bert only gets deps to bert. create_tmp_repo(tmp_folder, models=["bert", "gpt2"]) with patch_transformer_repo_path(tmp_folder): reverse_map = create_reverse_dependency_map() # init gets the direct deps (and their recursive deps) expected_init_deps = { "src/transformers/__init__.py", "src/transformers/models/bert/configuration_bert.py", "src/transformers/models/bert/modeling_bert.py", "tests/models/bert/test_modeling_bert.py", "examples/flax/test_flax_examples.py", "examples/flax/text-classification/run_glue.py", "examples/pytorch/test_pytorch_examples.py", "examples/pytorch/text-classification/run_glue.py", "examples/tensorflow/test_tensorflow_examples.py", "examples/tensorflow/text-classification/run_glue.py", } assert set(reverse_map["src/transformers/models/bert/__init__.py"]) == expected_init_deps @unittest.skip("Broken for now TODO @ArthurZucker") def test_infer_tests_to_run(self): with tempfile.TemporaryDirectory() as tmp_folder: tmp_folder = Path(tmp_folder) models = ["bert", "gpt2"] + [f"bert{i}" for i in range(10)] repo = create_tmp_repo(tmp_folder, models=models) commit_changes("src/transformers/models/bert/modeling_bert.py", BERT_MODEL_FILE_NEW_CODE, repo) example_tests = { "examples/flax/test_flax_examples.py", "examples/pytorch/test_pytorch_examples.py", "examples/tensorflow/test_tensorflow_examples.py", } with patch_transformer_repo_path(tmp_folder): infer_tests_to_run(tmp_folder / "test-output.txt", diff_with_last_commit=True) with open(tmp_folder / "test-output.txt") as f: tests_to_run = f.read() with open(tmp_folder / "examples_test_list.txt") as f: example_tests_to_run = f.read() assert tests_to_run == "tests/models/bert/test_modeling_bert.py" assert set(example_tests_to_run.split(" ")) == example_tests # Fake a new model addition repo = create_tmp_repo(tmp_folder, models=models) branch = repo.create_head("new_model") branch.checkout() with open(tmp_folder / "src/transformers/__init__.py", "a") as f: f.write("from .models.t5 import T5Config, T5Model\n") model_dir = tmp_folder / "src/transformers/models/t5" model_dir.mkdir(exist_ok=True) with open(model_dir / "__init__.py", "w") as f: f.write("from .configuration_t5 import T5Config\nfrom .modeling_t5 import T5Model\n") with open(model_dir / "configuration_t5.py", "w") as f: f.write("from ...configuration_utils import PretrainedConfig\ncode") with open(model_dir / "modeling_t5.py", "w") as f: modeling_code = BERT_MODEL_FILE.replace("bert", "t5").replace("Bert", "T5") f.write(modeling_code) test_dir = tmp_folder / "tests/models/t5" test_dir.mkdir(exist_ok=True) (test_dir / "__init__.py").touch() with open(test_dir / "test_modeling_t5.py", "w") as f: f.write( "from transformers import T5Config, T5Model\nfrom ...test_modeling_common import ModelTesterMixin\n\ncode" ) repo.index.add(["src", "tests"]) repo.index.commit("Add T5 model") with patch_transformer_repo_path(tmp_folder): infer_tests_to_run(tmp_folder / "test-output.txt") with open(tmp_folder / "test-output.txt") as f: tests_to_run = f.read() with open(tmp_folder / "examples_test_list.txt") as f: example_tests_to_run = f.read() expected_tests = { "tests/models/bert/test_modeling_bert.py", "tests/models/gpt2/test_modeling_gpt2.py", "tests/models/t5/test_modeling_t5.py", "tests/test_modeling_common.py", } assert set(tests_to_run.split(" ")) == expected_tests assert set(example_tests_to_run.split(" ")) == example_tests with patch_transformer_repo_path(tmp_folder): infer_tests_to_run(tmp_folder / "test-output.txt", filter_models=False) with open(tmp_folder / "test-output.txt") as f: tests_to_run = f.read() with open(tmp_folder / "examples_test_list.txt") as f: example_tests_to_run = f.read() expected_tests = [f"tests/models/{name}/test_modeling_{name}.py" for name in models + ["t5"]] expected_tests = set(expected_tests + ["tests/test_modeling_common.py"]) assert set(tests_to_run.split(" ")) == expected_tests assert set(example_tests_to_run.split(" ")) == example_tests @unittest.skip("Broken for now TODO @ArthurZucker") def test_infer_tests_to_run_with_test_modifs(self): with tempfile.TemporaryDirectory() as tmp_folder: tmp_folder = Path(tmp_folder) models = ["bert", "gpt2"] + [f"bert{i}" for i in range(10)] repo = create_tmp_repo(tmp_folder, models=models) commit_changes( "tests/models/bert/test_modeling_bert.py", "from transformers import BertConfig, BertModel\nfrom ...test_modeling_common import ModelTesterMixin\n\ncode1", repo, ) with patch_transformer_repo_path(tmp_folder): infer_tests_to_run(tmp_folder / "test-output.txt", diff_with_last_commit=True) with open(tmp_folder / "test-output.txt") as f: tests_to_run = f.read() assert tests_to_run == "tests/models/bert/test_modeling_bert.py" @unittest.skip("Broken for now TODO @ArthurZucker") def test_infer_tests_to_run_with_examples_modifs(self): with tempfile.TemporaryDirectory() as tmp_folder: tmp_folder = Path(tmp_folder) models = ["bert", "gpt2"] repo = create_tmp_repo(tmp_folder, models=models) # Modification in one example trigger the corresponding test commit_changes( "examples/pytorch/text-classification/run_glue.py", "from transformers import BertModeln\n\ncode1", repo, ) with patch_transformer_repo_path(tmp_folder): infer_tests_to_run(tmp_folder / "test-output.txt", diff_with_last_commit=True) with open(tmp_folder / "examples_test_list.txt") as f: example_tests_to_run = f.read() assert example_tests_to_run == "examples/pytorch/test_pytorch_examples.py" # Modification in one test example file trigger that test repo = create_tmp_repo(tmp_folder, models=models) commit_changes( "examples/pytorch/test_pytorch_examples.py", """test_args = "run_glue.py"\nmore_code""", repo, ) with patch_transformer_repo_path(tmp_folder): infer_tests_to_run(tmp_folder / "test-output.txt", diff_with_last_commit=True) with open(tmp_folder / "examples_test_list.txt") as f: example_tests_to_run = f.read() assert example_tests_to_run == "examples/pytorch/test_pytorch_examples.py" def test_parse_commit_message(self): assert parse_commit_message("Normal commit") == {"skip": False, "no_filter": False, "test_all": False} assert parse_commit_message("[skip ci] commit") == {"skip": True, "no_filter": False, "test_all": False} assert parse_commit_message("[ci skip] commit") == {"skip": True, "no_filter": False, "test_all": False} assert parse_commit_message("[skip-ci] commit") == {"skip": True, "no_filter": False, "test_all": False} assert parse_commit_message("[skip_ci] commit") == {"skip": True, "no_filter": False, "test_all": False} assert parse_commit_message("[no filter] commit") == {"skip": False, "no_filter": True, "test_all": False} assert parse_commit_message("[no-filter] commit") == {"skip": False, "no_filter": True, "test_all": False} assert parse_commit_message("[no_filter] commit") == {"skip": False, "no_filter": True, "test_all": False} assert parse_commit_message("[filter-no] commit") == {"skip": False, "no_filter": True, "test_all": False} assert parse_commit_message("[test all] commit") == {"skip": False, "no_filter": False, "test_all": True} assert parse_commit_message("[all test] commit") == {"skip": False, "no_filter": False, "test_all": True} assert parse_commit_message("[test-all] commit") == {"skip": False, "no_filter": False, "test_all": True} assert parse_commit_message("[all_test] commit") == {"skip": False, "no_filter": False, "test_all": True}
transformers/tests/repo_utils/test_tests_fetcher.py/0
{ "file_path": "transformers/tests/repo_utils/test_tests_fetcher.py", "repo_id": "transformers", "token_count": 17293 }
599
# Copyright 2021 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import os import tempfile from transformers.testing_utils import check_json_file_has_correct_format class FeatureExtractionSavingTestMixin: test_cast_dtype = None def test_feat_extract_to_json_string(self): feat_extract = self.feature_extraction_class(**self.feat_extract_dict) obj = json.loads(feat_extract.to_json_string()) for key, value in self.feat_extract_dict.items(): self.assertEqual(obj[key], value) def test_feat_extract_to_json_file(self): feat_extract_first = self.feature_extraction_class(**self.feat_extract_dict) with tempfile.TemporaryDirectory() as tmpdirname: json_file_path = os.path.join(tmpdirname, "feat_extract.json") feat_extract_first.to_json_file(json_file_path) feat_extract_second = self.feature_extraction_class.from_json_file(json_file_path) self.assertEqual(feat_extract_second.to_dict(), feat_extract_first.to_dict()) def test_feat_extract_from_and_save_pretrained(self): feat_extract_first = self.feature_extraction_class(**self.feat_extract_dict) with tempfile.TemporaryDirectory() as tmpdirname: saved_file = feat_extract_first.save_pretrained(tmpdirname)[0] check_json_file_has_correct_format(saved_file) feat_extract_second = self.feature_extraction_class.from_pretrained(tmpdirname) self.assertEqual(feat_extract_second.to_dict(), feat_extract_first.to_dict()) def test_init_without_params(self): feat_extract = self.feature_extraction_class() self.assertIsNotNone(feat_extract)
transformers/tests/test_feature_extraction_common.py/0
{ "file_path": "transformers/tests/test_feature_extraction_common.py", "repo_id": "transformers", "token_count": 821 }
600
# Copyright 2018 the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import dataclasses import gc import importlib import json import math import os import random import re import subprocess import sys import tempfile import unittest from functools import partial from itertools import product from pathlib import Path from typing import Any from unittest.mock import Mock, patch import numpy as np import pytest from huggingface_hub import HfFolder, ModelCard, create_branch, list_repo_commits, list_repo_files from packaging import version from parameterized import parameterized from transformers import ( AutoFeatureExtractor, AutoImageProcessor, AutoProcessor, AutoTokenizer, DataCollatorForLanguageModeling, IntervalStrategy, PretrainedConfig, TrainerCallback, TrainingArguments, default_data_collator, enable_full_determinism, get_polynomial_decay_schedule_with_warmup, is_datasets_available, is_torch_available, logging, set_seed, ) from transformers.hyperparameter_search import ALL_HYPERPARAMETER_SEARCH_BACKENDS from transformers.testing_utils import ( ENDPOINT_STAGING, TOKEN, USER, CaptureLogger, LoggingLevel, TemporaryHubRepo, TestCasePlus, backend_device_count, backend_empty_cache, backend_max_memory_allocated, backend_memory_allocated, backend_reset_max_memory_allocated, backend_reset_peak_memory_stats, evaluate_side_effect_factory, execute_subprocess_async, get_gpu_count, get_steps_per_epoch, get_tests_dir, is_staging_test, require_accelerate, require_apollo_torch, require_bitsandbytes, require_deepspeed, require_galore_torch, require_grokadamw, require_liger_kernel, require_lomo, require_non_hpu, require_optuna, require_peft, require_ray, require_safetensors, require_schedulefree, require_sentencepiece, require_sigopt, require_tensorboard, require_tokenizers, require_torch, require_torch_accelerator, require_torch_bf16, require_torch_fp16, require_torch_gpu, require_torch_multi_accelerator, require_torch_non_multi_accelerator, require_torch_non_multi_gpu, require_torch_optimi, require_torch_tensorrt_fx, require_torch_tf32, require_torch_up_to_2_accelerators, require_vision, require_wandb, run_first, run_test_using_subprocess, slow, torch_device, ) from transformers.trainer_utils import PREFIX_CHECKPOINT_DIR, HPSearchBackend, check_target_module_exists from transformers.training_args import OptimizerNames from transformers.utils import ( SAFE_WEIGHTS_INDEX_NAME, SAFE_WEIGHTS_NAME, WEIGHTS_INDEX_NAME, WEIGHTS_NAME, check_torch_load_is_safe, is_accelerate_available, is_apex_available, is_bitsandbytes_available, is_safetensors_available, is_torchao_available, is_torchdistx_available, ) from transformers.utils.hp_naming import TrialShortNamer if torch_device == "hpu": RTOL = 1e-3 ATOL = 1e-3 else: RTOL = 1e-5 ATOL = 1e-5 if is_torch_available(): import torch from torch import nn from torch.utils.data import IterableDataset import transformers.optimization from transformers import ( AutoModelForCausalLM, AutoModelForSequenceClassification, EarlyStoppingCallback, GlueDataset, GlueDataTrainingArguments, GPT2Config, GPT2LMHeadModel, LineByLineTextDataset, LlamaConfig, LlamaForCausalLM, PreTrainedModel, Trainer, TrainerState, ) from transformers.trainer_pt_utils import AcceleratorConfig if is_safetensors_available(): import safetensors.torch if is_datasets_available(): import datasets # for version specific tests in TrainerIntegrationTest require_accelerate_version_min_0_28 = partial(require_accelerate, min_version="0.28") require_accelerate_version_min_0_30 = partial(require_accelerate, min_version="0.30") GRAD_ACCUM_KWARGS_VERSION_AVAILABLE = is_accelerate_available("0.28") if is_accelerate_available(): from accelerate import Accelerator from accelerate.state import AcceleratorState PATH_SAMPLE_TEXT = f"{get_tests_dir()}/fixtures/sample_text.txt" class StoreLossCallback(TrainerCallback): """ Simple callback to store the loss. """ def __init__(self): self.losses = [] def on_log(self, args, state, control, logs=None, **kwargs): if "loss" in logs: self.losses.append(logs["loss"]) class MockCudaOOMCallback(TrainerCallback): """ Simple callback to simulate CUDA OOM error if the batch size is >= to `batch_size_limit`. """ def __init__(self, batch_size_limit=16): self.batch_size_limit = batch_size_limit def on_step_end(self, args, state, control, **kwargs): # simulate OOM on the first step if state.train_batch_size >= self.batch_size_limit: raise RuntimeError("CUDA out of memory.") def ForCausalLMLoss(logits, labels, vocab_size, num_items_in_batch, disable_num_items_in_batch=False): # Upcast to float if we need to compute the loss to avoid potential precision issues logits = logits.float() # Shift so that tokens < n predict n shift_logits = logits[..., :-1, :].contiguous() shift_labels = labels[..., 1:].contiguous() # Flatten the tokens shift_logits = shift_logits.view(-1, vocab_size) shift_labels = shift_labels.view(-1) # Enable model parallelism shift_labels = shift_labels.to(shift_logits.device) if num_items_in_batch is None or disable_num_items_in_batch: loss = nn.functional.cross_entropy(shift_logits, shift_labels, ignore_index=-100, reduction="mean") else: loss = nn.functional.cross_entropy(shift_logits, shift_labels, ignore_index=-100, reduction="sum") loss = loss / num_items_in_batch return loss class RegressionDataset: def __init__(self, a=2, b=3, length=64, seed=42, label_names=None): np.random.seed(seed) self.label_names = ["labels"] if label_names is None else label_names self.length = length self.x = np.random.normal(size=(length,)).astype(np.float32) self.ys = [a * self.x + b + np.random.normal(scale=0.1, size=(length,)) for _ in self.label_names] self.ys = [y.astype(np.float32) for y in self.ys] def __len__(self): return self.length def __getitem__(self, i): result = {name: y[i] for name, y in zip(self.label_names, self.ys)} result["input_x"] = self.x[i] return result # Converting Bytes to Megabytes def bytes2megabytes(x): return int(x / 2**20) # Copied from accelerate: https://github.com/huggingface/accelerate/blob/ee163b66fb7848892519e804688cb4ae981aacbe/src/accelerate/test_utils/scripts/external_deps/test_peak_memory_usage.py#L40C1-L73C68 class TorchTracemalloc: def __enter__(self): gc.collect() if torch_device in ["cuda", "xpu"]: backend_empty_cache(torch_device) backend_reset_max_memory_allocated(torch_device) # reset the peak gauge to zero self.begin = backend_memory_allocated(torch_device) return self def __exit__(self, *exc): gc.collect() if torch_device in ["cuda", "xpu"]: backend_empty_cache(torch_device) self.end = backend_memory_allocated(torch_device) self.peak = backend_max_memory_allocated(torch_device) self.used = bytes2megabytes(self.end - self.begin) self.peaked = bytes2megabytes(self.peak - self.begin) @dataclasses.dataclass class RegressionTrainingArguments(TrainingArguments): a: float = 0.0 b: float = 0.0 keep_report_to: bool = False def __post_init__(self): super().__post_init__() # save resources not dealing with reporting unless specified (also avoids the warning when it's not set) # can be explicitly disabled via `keep_report_to` if not self.keep_report_to: self.report_to = [] class RepeatDataset: def __init__(self, x, length=64): self.x = x self.length = length def __len__(self): return self.length def __getitem__(self, i): return {"input_ids": self.x, "labels": self.x} class SequenceClassificationDataset: def __init__(self, length=64, vocab_size=100, num_labels=5): self.length = length self.sequences = [torch.randint(0, vocab_size, (64,)).tolist() for _ in range(length)] self.labels = torch.randint(0, num_labels, (length,)).tolist() def __len__(self): return self.length def __getitem__(self, i): return {"input_ids": self.sequences[i], "label": self.labels[i]} class DynamicShapesDataset: def __init__(self, length=64, seed=42, batch_size=8): self.length = length np.random.seed(seed) sizes = np.random.randint(1, 20, (length // batch_size,)) # For easy batching, we make every batch_size consecutive samples the same size. self.xs = [np.random.normal(size=(s,)).astype(np.float32) for s in sizes.repeat(batch_size)] self.ys = [np.random.normal(size=(s,)).astype(np.float32) for s in sizes.repeat(batch_size)] def __len__(self): return self.length def __getitem__(self, i): return {"input_x": self.xs[i], "labels": self.ys[i]} class AlmostAccuracy: def __init__(self, thresh=0.25): self.thresh = thresh def __call__(self, eval_pred): predictions, labels = eval_pred true = np.abs(predictions - labels) <= self.thresh return {"accuracy": true.astype(np.float32).mean().item()} class AlmostAccuracyBatched: def __init__(self, thresh=0.25): self.thresh = thresh self.batch_acc = [] def __call__(self, eval_pred, compute_result): predictions, labels = eval_pred if isinstance(predictions, tuple): predictions = predictions[0] if isinstance(labels, tuple): labels = labels[0] batch_size = len(predictions) true = torch.abs(predictions - labels) <= self.thresh acc = true.type(torch.FloatTensor).mean().item() self.batch_acc.extend([acc] * batch_size) if compute_result: result = {"accuracy": np.mean(self.batch_acc).item()} self.batch_acc = [] return result class RegressionModelConfig(PretrainedConfig): def __init__(self, a=0, b=0, double_output=False, random_torch=True, **kwargs): super().__init__(**kwargs) self.a = a self.b = b self.double_output = double_output self.random_torch = random_torch self.hidden_size = 1 if is_torch_available(): class SampleIterableDataset(IterableDataset): def __init__(self, a=2, b=3, length=64, seed=42, label_names=None): self.dataset = RegressionDataset(a=a, b=b, length=length, seed=seed, label_names=label_names) def __iter__(self): for i in range(len(self.dataset)): yield self.dataset[i] class FiniteIterableDataset(SampleIterableDataset): def __init__(self, a=2, b=3, length=64, seed=42, label_names=None): super().__init__(a, b, length, seed, label_names) self.current_sample = 0 def __iter__(self): while self.current_sample < len(self.dataset): yield self.dataset[self.current_sample] self.current_sample += 1 class MultiLoader: def __init__(self, loaders): self.loaders = loaders def __len__(self): return sum(len(loader) for loader in self.loaders) def __iter__(self): for loader in self.loaders: yield from loader class CustomDataloaderTrainer(Trainer): def get_train_dataloader(self): dataloaders = [super().get_train_dataloader(), super().get_train_dataloader()] return MultiLoader(dataloaders) def get_eval_dataloader(self, eval_dataset): dataloaders = [super().get_eval_dataloader(eval_dataset), super().get_eval_dataloader(eval_dataset)] return MultiLoader(dataloaders) class RegressionModel(nn.Module): def __init__(self, a=0, b=0, double_output=False): super().__init__() self.a = nn.Parameter(torch.tensor(a).float()) self.b = nn.Parameter(torch.tensor(b).float()) self.double_output = double_output self.config = None def forward(self, input_x, labels=None, **kwargs): y = input_x * self.a + self.b if labels is None: return (y, y) if self.double_output else (y,) loss = nn.functional.mse_loss(y, labels) return (loss, y, y) if self.double_output else (loss, y) class RegressionDictModel(nn.Module): def __init__(self, a=0, b=0): super().__init__() self.a = nn.Parameter(torch.tensor(a).float()) self.b = nn.Parameter(torch.tensor(b).float()) self.config = None def forward(self, input_x, labels=None, **kwargs): y = input_x * self.a + self.b result = {"output": y} if labels is not None: result["loss"] = nn.functional.mse_loss(y, labels) return result class RegressionPreTrainedModel(PreTrainedModel): config_class = RegressionModelConfig base_model_prefix = "regression" def __init__(self, config): super().__init__(config) self.a = nn.Parameter(torch.tensor(config.a).float()) self.b = nn.Parameter(torch.tensor(config.b).float()) self.double_output = config.double_output def forward(self, input_x, labels=None, **kwargs): y = input_x * self.a + self.b if labels is None: return (y, y) if self.double_output else (y,) loss = nn.functional.mse_loss(y, labels) return (loss, y, y) if self.double_output else (loss, y) class RegressionPreTrainedModelWithGradientCheckpointing(PreTrainedModel): config_class = RegressionModelConfig base_model_prefix = "regression" supports_gradient_checkpointing = True def __init__(self, config): super().__init__(config) self.layers = nn.ModuleList([nn.Linear(config.hidden_size, config.hidden_size) for _ in range(4)]) self.head = nn.Linear(config.hidden_size, 1) self.gradient_checkpointing = False self.double_output = config.double_output def forward(self, input_x, labels=None, **kwargs): y = input_x.unsqueeze(0) for layer in self.layers: if self.training and self.gradient_checkpointing: outputs = self._gradient_checkpointing_func(layer.__call__, y) else: outputs = layer(y) y = outputs * 3 logits = self.head(y) if labels is None: return (logits, logits) if self.double_output else (logits,) loss = nn.functional.mse_loss(logits, labels) return (loss, y, y) if self.double_output else (loss, y) class RegressionRandomPreTrainedModel(PreTrainedModel): config_class = RegressionModelConfig base_model_prefix = "regression" def __init__(self, config): super().__init__(config) self.a = nn.Parameter(torch.tensor(config.a).float()) self.b = nn.Parameter(torch.tensor(config.b).float()) self.random_torch = config.random_torch def forward(self, input_x, labels=None, **kwargs): y = input_x * self.a + self.b if self.random_torch: torch_rand = torch.randn(1).squeeze() np_rand = np.random.rand() rand_rand = random.random() if self.random_torch: y += 0.05 * torch_rand y += 0.05 * torch.tensor(np_rand + rand_rand) if labels is None: return (y,) loss = nn.functional.mse_loss(y, labels) return (loss, y) class BasicTextGenerationModel(nn.Module): def __init__(self, vocab_size, hidden_size): super().__init__() self.embedding = nn.Embedding(vocab_size, hidden_size) self.lstm = nn.LSTM(hidden_size, hidden_size, batch_first=True) self.fc = nn.Linear(hidden_size, vocab_size) def forward(self, input_ids, labels=None, **kwargs): embedded = self.embedding(input_ids) lstm_out, _ = self.lstm(embedded) logits = self.fc(lstm_out) if labels is None: return logits loss = nn.functional.cross_entropy(logits.view(-1, logits.size(-1)), labels.view(-1)) return loss, logits def create_dummy_dataset_for_text_generation(vocab_size, seq_length, num_samples): import numpy as np # Create random input sequences input_ids = np.random.randint(0, vocab_size, (num_samples, seq_length)) # Create a datasets.Dataset dataset = datasets.Dataset.from_dict({"input_ids": input_ids, "labels": input_ids}) return dataset class TstLayer(nn.Module): def __init__(self, hidden_size): super().__init__() self.linear1 = nn.Linear(hidden_size, hidden_size) self.ln1 = nn.LayerNorm(hidden_size) self.linear2 = nn.Linear(hidden_size, hidden_size) self.ln2 = nn.LayerNorm(hidden_size) self.bias = nn.Parameter(torch.zeros(hidden_size)) def forward(self, x): h = self.ln1(nn.functional.relu(self.linear1(x))) h = nn.functional.relu(self.linear2(x)) return self.ln2(x + h + self.bias) def get_regression_trainer( a=0, b=0, double_output=False, train_len=64, eval_len=64, pretrained=True, keep_report_to=False, output_dir=None, **kwargs, ): label_names = kwargs.get("label_names") gradient_checkpointing = kwargs.get("gradient_checkpointing", False) train_dataset = RegressionDataset(length=train_len, label_names=label_names) eval_dataset = RegressionDataset(length=eval_len, label_names=label_names) model_init = kwargs.pop("model_init", None) if model_init is not None: model = None else: if pretrained: config = RegressionModelConfig(a=a, b=b, double_output=double_output) # We infer the correct model class if one uses gradient_checkpointing or not target_cls = ( RegressionPreTrainedModel if not gradient_checkpointing else RegressionPreTrainedModelWithGradientCheckpointing ) model = target_cls(config) else: model = RegressionModel(a=a, b=b, double_output=double_output) compute_metrics = kwargs.pop("compute_metrics", None) data_collator = kwargs.pop("data_collator", None) optimizers = kwargs.pop("optimizers", (None, None)) preprocess_logits_for_metrics = kwargs.pop("preprocess_logits_for_metrics", None) assert output_dir is not None, "output_dir should be specified for testing" args = RegressionTrainingArguments(output_dir, a=a, b=b, keep_report_to=keep_report_to, **kwargs) return Trainer( model, args, data_collator=data_collator, train_dataset=train_dataset, eval_dataset=eval_dataset, compute_metrics=compute_metrics, optimizers=optimizers, model_init=model_init, preprocess_logits_for_metrics=preprocess_logits_for_metrics, ) def get_language_model_trainer(**kwargs): dataset = datasets.load_dataset("fka/awesome-chatgpt-prompts") model = AutoModelForCausalLM.from_pretrained("openai-community/gpt2") tokenizer = AutoTokenizer.from_pretrained("openai-community/gpt2") tokenizer.pad_token = tokenizer.eos_token def _tokenize_function(examples): model_inputs = tokenizer(examples["prompt"], padding="max_length", truncation=True) model_inputs["labels"] = np.array(model_inputs["input_ids"]).astype(np.int64) return model_inputs tokenized_datasets = dataset.map(_tokenize_function, batched=True) training_args = TrainingArguments(**kwargs) trainer = Trainer( model=model, args=training_args, train_dataset=tokenized_datasets["train"], ) return trainer class TrainerIntegrationCommon: def check_saved_checkpoints( self, output_dir, freq, total, is_pretrained=True, safe_weights=True, use_scaler=False ): weights_file = WEIGHTS_NAME if not safe_weights else SAFE_WEIGHTS_NAME file_list = [weights_file, "training_args.bin", "optimizer.pt", "scheduler.pt", "trainer_state.json"] if is_pretrained: file_list.append("config.json") if use_scaler: file_list.append("scaler.pt") for step in range(freq, total, freq): checkpoint = os.path.join(output_dir, f"checkpoint-{step}") self.assertTrue(os.path.isdir(checkpoint)) for filename in file_list: self.assertTrue(os.path.isfile(os.path.join(checkpoint, filename))) def check_best_model_has_been_loaded( self, output_dir, freq, total, trainer, metric, greater_is_better=False, is_pretrained=True, safe_weights=True ): checkpoint = os.path.join(output_dir, f"checkpoint-{(total // freq) * freq}") log_history = TrainerState.load_from_json(os.path.join(checkpoint, "trainer_state.json")).log_history values = [d[metric] for d in log_history] best_value = max(values) if greater_is_better else min(values) best_checkpoint = (values.index(best_value) + 1) * freq checkpoint = os.path.join(output_dir, f"checkpoint-{best_checkpoint}") if is_pretrained: best_model = RegressionPreTrainedModel.from_pretrained(checkpoint) best_model.to(trainer.args.device) else: best_model = RegressionModel() if not safe_weights: check_torch_load_is_safe() state_dict = torch.load(os.path.join(checkpoint, WEIGHTS_NAME), weights_only=True) else: state_dict = safetensors.torch.load_file(os.path.join(checkpoint, SAFE_WEIGHTS_NAME)) best_model.load_state_dict(state_dict) best_model.to(trainer.args.device) torch.testing.assert_close(best_model.a, trainer.model.a) torch.testing.assert_close(best_model.b, trainer.model.b) metrics = trainer.evaluate() self.assertEqual(metrics[metric], best_value) def remove_nan_logs(self, log): for key in list(log.keys()): if log[key] != log[key]: # Check if the value is NaN del log[key] def check_trainer_state_are_the_same(self, trainer_state, trainer_state1): # We'll pop things so operate on copies. state = trainer_state.copy() state1 = trainer_state1.copy() # Log history main contain different logs for the time metrics (after resuming a training). log_history = state.pop("log_history", None) log_history1 = state1.pop("log_history", None) self.assertEqual(state, state1) skip_log_keys = ["train_runtime", "train_samples_per_second", "train_steps_per_second", "train_loss"] for log, log1 in zip(log_history, log_history1): for key in skip_log_keys: _ = log.pop(key, None) _ = log1.pop(key, None) self.remove_nan_logs(log) self.remove_nan_logs(log1) self.assertEqual(log, log1) def convert_to_sharded_checkpoint(self, folder, save_safe=True, load_safe=True): # Converts a checkpoint of a regression model to a sharded checkpoint. if load_safe: loader = safetensors.torch.load_file weights_file = os.path.join(folder, SAFE_WEIGHTS_NAME) else: check_torch_load_is_safe() loader = torch.load weights_file = os.path.join(folder, WEIGHTS_NAME) if save_safe: extension = "safetensors" saver = safetensors.torch.save_file index_file = os.path.join(folder, SAFE_WEIGHTS_INDEX_NAME) shard_name = SAFE_WEIGHTS_NAME else: extension = "bin" saver = torch.save index_file = os.path.join(folder, WEIGHTS_INDEX_NAME) shard_name = WEIGHTS_NAME state_dict = loader(weights_file) os.remove(weights_file) keys = list(state_dict.keys()) shard_files = [ shard_name.replace(f".{extension}", f"-{idx + 1:05d}-of-{len(keys):05d}.{extension}") for idx in range(len(keys)) ] index = {"metadata": {}, "weight_map": {key: shard_files[i] for i, key in enumerate(keys)}} with open(index_file, "w", encoding="utf-8") as f: content = json.dumps(index, indent=2, sort_keys=True) + "\n" f.write(content) for param_name, shard_file in zip(keys, shard_files): saver({param_name: state_dict[param_name]}, os.path.join(folder, shard_file)) @require_torch @require_sentencepiece @require_tokenizers class TrainerIntegrationPrerunTest(TestCasePlus, TrainerIntegrationCommon): """ Only tests that want to tap into the auto-pre-run 2 trainings: - self.default_trained_model - self.alternate_trained_model directly, or via check_trained_model """ def setUp(self): super().setUp() args = TrainingArguments("..") self.n_epochs = args.num_train_epochs self.batch_size = args.train_batch_size with tempfile.TemporaryDirectory() as tmp_dir: trainer = get_regression_trainer(learning_rate=0.1, output_dir=tmp_dir) trainer.train() self.default_trained_model = (trainer.model.a, trainer.model.b) with tempfile.TemporaryDirectory() as tmp_dir: trainer = get_regression_trainer(learning_rate=0.1, seed=314, output_dir=tmp_dir) trainer.train() self.alternate_trained_model = (trainer.model.a, trainer.model.b) def check_trained_model(self, model, alternate_seed=False, **kwargs): # Checks a training seeded with learning_rate = 0.1 (a, b) = self.alternate_trained_model if alternate_seed else self.default_trained_model torch.testing.assert_close(model.a, a, **kwargs) torch.testing.assert_close(model.b, b, **kwargs) def test_reproducible_training(self): # Checks that training worked, model trained and seed made a reproducible training. with tempfile.TemporaryDirectory() as tmp_dir: trainer = get_regression_trainer(learning_rate=0.1, output_dir=tmp_dir) trainer.train() self.check_trained_model(trainer.model) # Checks that a different seed gets different (reproducible) results. with tempfile.TemporaryDirectory() as tmp_dir: trainer = get_regression_trainer(learning_rate=0.1, seed=314, output_dir=tmp_dir) trainer.train() self.check_trained_model(trainer.model, alternate_seed=True) def test_trainer_with_datasets(self): np.random.seed(42) x = np.random.normal(size=(64,)).astype(np.float32) y = 2.0 * x + 3.0 + np.random.normal(scale=0.1, size=(64,)).astype(np.float32) train_dataset = datasets.Dataset.from_dict({"input_x": x, "label": y}) # Base training. Should have the same results as test_reproducible_training model = RegressionModel() with tempfile.TemporaryDirectory() as tmp_dir: args = TrainingArguments(tmp_dir, learning_rate=0.1, report_to="none") trainer = Trainer(model, args, train_dataset=train_dataset) trainer.train() self.check_trained_model(trainer.model) # Can return tensors. train_dataset.set_format(type="torch", dtype=torch.float32) model = RegressionModel() trainer = Trainer(model, args, train_dataset=train_dataset) trainer.train() self.check_trained_model(trainer.model) # Adding one column not used by the model should have no impact z = np.random.normal(size=(64,)).astype(np.float32) train_dataset = datasets.Dataset.from_dict({"input_x": x, "label": y, "extra": z}) model = RegressionModel() trainer = Trainer(model, args, train_dataset=train_dataset) trainer.train() self.check_trained_model(trainer.model) def test_model_init(self): train_dataset = RegressionDataset() with tempfile.TemporaryDirectory() as tmp_dir: args = TrainingArguments(tmp_dir, learning_rate=0.1, report_to="none") trainer = Trainer(args=args, train_dataset=train_dataset, model_init=lambda: RegressionModel()) trainer.train() self.check_trained_model(trainer.model) # Re-training should restart from scratch, thus lead the same results. trainer.train() self.check_trained_model(trainer.model) # Re-training should restart from scratch, thus lead the same results and new seed should be used. trainer.args.seed = 314 trainer.train() self.check_trained_model(trainer.model, alternate_seed=True) @slow def test_gradient_accumulation_loss_alignment_with_model_loss(self): set_seed(42) model_name = "nickypro/tinyllama-15M" dataset_name = "wikitext" dataset_config = "wikitext-2-raw-v1" dataset = datasets.load_dataset(dataset_name, dataset_config, split="train[:40]") tokenizer = AutoTokenizer.from_pretrained(model_name) tokenizer.pad_token = tokenizer.eos_token def tokenize_function(examples): return tokenizer(examples["text"], max_length=16, padding="max_length", truncation=True) tokenized_dataset = dataset.map(tokenize_function, batched=True, remove_columns=dataset.column_names) data_collator = DataCollatorForLanguageModeling(tokenizer=tokenizer, mlm=False) args_kwargs = { "report_to": "none", "logging_steps": 1, "max_steps": 5, "learning_rate": 3e-4, "disable_tqdm": True, } with tempfile.TemporaryDirectory() as tmp_dir: args = TrainingArguments( tmp_dir, **args_kwargs, ) # train with base loss set_seed(42) model = AutoModelForCausalLM.from_pretrained(model_name) base_loss_callback = StoreLossCallback() trainer = Trainer( model, args, train_dataset=tokenized_dataset, callbacks=[base_loss_callback], data_collator=data_collator, ) assert trainer.model_accepts_loss_kwargs trainer.train() args = TrainingArguments( tmp_dir, **args_kwargs, gradient_accumulation_steps=2, per_device_train_batch_size=4, ) # train with gradient accumulation set_seed(42) model = AutoModelForCausalLM.from_pretrained(model_name) grad_accum_loss_callback = StoreLossCallback() trainer = Trainer( model, args, train_dataset=tokenized_dataset, callbacks=[grad_accum_loss_callback], data_collator=data_collator, ) assert trainer.model_accepts_loss_kwargs trainer.train() # train with broken loss set_seed(42) model = AutoModelForCausalLM.from_pretrained(model_name) broken_loss_callback = StoreLossCallback() trainer = Trainer( model, args, train_dataset=tokenized_dataset, callbacks=[broken_loss_callback], data_collator=data_collator, ) # disable model_accepts_loss_kwargs so that "num_items_in_batch" is not passed to the model trainer.model_accepts_loss_kwargs = False trainer.train() # Calculate the difference between the base loss and the grad_accum loss diff_truth = [ abs(base - grad) for base, grad in zip(base_loss_callback.losses, grad_accum_loss_callback.losses) ] diff_broken = [abs(base - grad) for base, grad in zip(base_loss_callback.losses, broken_loss_callback.losses)] # all diff truth should be quite close self.assertLess(max(diff_truth), 0.01, f"Difference {max(diff_truth)} is not within 0.01") # max diff broken should be very off ("very off" is arbitrary, but as long as it's bigger than 0.1, it's fine) self.assertGreater(max(diff_broken), 0.7, f"Difference {max(diff_broken)} is not greater than 0.7") loss_base = sum(base_loss_callback.losses) loss_broken = sum(broken_loss_callback.losses) # mean/sum loss should not vary too much. relative_diff = abs(loss_base - loss_broken) / max(loss_base, loss_broken) self.assertLess(relative_diff, 0.2, f"Relative difference {relative_diff} is not within 0.2") def test_gradient_accumulation_loss_alignment_with_loss_func(self): set_seed(42) model_name = "roneneldan/TinyStories-33M" dataset_name = "wikitext" dataset_config = "wikitext-2-raw-v1" dataset = datasets.load_dataset(dataset_name, dataset_config, split="train[:40]") tokenizer = AutoTokenizer.from_pretrained(model_name) tokenizer.pad_token = tokenizer.eos_token def tokenize_function(examples): return tokenizer(examples["text"], max_length=16, padding="max_length", truncation=True) tokenized_dataset = dataset.map(tokenize_function, batched=True) tokenizer.pad_token = tokenizer.eos_token data_collator = DataCollatorForLanguageModeling(tokenizer=tokenizer, mlm=False) model = AutoModelForCausalLM.from_pretrained(model_name) def compute_loss(logits, labels, vocab_size, num_items_in_batch, disable_num_items_in_batch=False): return ForCausalLMLoss( logits["logits"], labels, vocab_size, num_items_in_batch, disable_num_items_in_batch ) loss_fn = partial(compute_loss, vocab_size=model.config.vocab_size, disable_num_items_in_batch=False) base_loss_callback = StoreLossCallback() args_kwargs = { "report_to": "none", "logging_steps": 1, "max_steps": 5, "learning_rate": 3e-4, "disable_tqdm": True, } with tempfile.TemporaryDirectory() as tmp_dir: args = TrainingArguments( tmp_dir, **args_kwargs, ) trainer = Trainer( model, args, train_dataset=tokenized_dataset, callbacks=[base_loss_callback], compute_loss_func=loss_fn, data_collator=data_collator, ) trainer.train() grad_accum_loss_callback = StoreLossCallback() with tempfile.TemporaryDirectory() as tmp_dir: args = TrainingArguments( tmp_dir, **args_kwargs, gradient_accumulation_steps=2, per_device_train_batch_size=4, ) set_seed(42) model = AutoModelForCausalLM.from_pretrained(model_name) trainer = Trainer( model, args, train_dataset=tokenized_dataset, callbacks=[grad_accum_loss_callback], compute_loss_func=loss_fn, data_collator=data_collator, ) trainer.train() set_seed(42) model = AutoModelForCausalLM.from_pretrained(model_name) broken_loss_callback = StoreLossCallback() loss_fn = partial(compute_loss, vocab_size=model.config.vocab_size, disable_num_items_in_batch=True) trainer = Trainer( model, args, train_dataset=tokenized_dataset, callbacks=[broken_loss_callback], compute_loss_func=loss_fn, data_collator=data_collator, ) trainer.train() # Calculate the difference between the base loss and the grad_accum loss diff_truth = [ abs(base - grad) for base, grad in zip(base_loss_callback.losses, grad_accum_loss_callback.losses) ] diff_broken = [ abs(base - grad) for base, grad in zip(base_loss_callback.losses, broken_loss_callback.losses) ] # all diff truth should be quite close self.assertLess(max(diff_truth), 0.01, f"Difference {max(diff_truth)} is not within 0.01") # max diff broken should be very off self.assertGreater(max(diff_broken), 3, f"Difference {max(diff_broken)} is not greater than 3") def test_gradient_accumulation(self): # Training with half the batch size but accumulation steps as 2 should give the same training losses. with tempfile.TemporaryDirectory() as tmp_dir: trainer = get_regression_trainer( gradient_accumulation_steps=2, per_device_train_batch_size=4, learning_rate=0.1, output_dir=tmp_dir ) trainer.train() self.check_trained_model(trainer.model) def test_gradient_checkpointing(self): with tempfile.TemporaryDirectory() as tmp_dir: trainer = get_regression_trainer( per_device_train_batch_size=1, learning_rate=0.1, gradient_checkpointing=True, gradient_checkpointing_kwargs={"use_reentrant": False}, output_dir=tmp_dir, ) previous_params = {k: v.detach().clone() for k, v in trainer.model.named_parameters()} trainer.train() # Check if model weights have been updated for k, v in trainer.model.named_parameters(): self.assertFalse( torch.allclose(previous_params[k], v, rtol=1e-4, atol=1e-4), f"Model weights for {k} have not been updated", ) def test_training_loss(self): n_gpus = max(1, backend_device_count(torch_device)) # With even logs with tempfile.TemporaryDirectory() as tmp_dir: trainer = get_regression_trainer(logging_steps=64 / (8 * n_gpus), output_dir=tmp_dir) trainer.train() log_history = trainer.state.log_history losses = [log["loss"] for log in log_history if "loss" in log] train_loss = log_history[-1]["train_loss"] self.assertAlmostEqual(sum(losses) / len(losses), train_loss, places=4) # With uneven logs with tempfile.TemporaryDirectory() as tmp_dir: trainer = get_regression_trainer(logging_steps=5, output_dir=tmp_dir) trainer.train() log_history = trainer.state.log_history # Training loss should be the same as before new_train_loss = log_history[-1]["train_loss"] self.assertAlmostEqual(train_loss, new_train_loss, places=4) def test_custom_optimizer(self): train_dataset = RegressionDataset() with tempfile.TemporaryDirectory() as tmp_dir: args = TrainingArguments(tmp_dir, report_to="none") model = RegressionModel() optimizer = torch.optim.SGD(model.parameters(), lr=1.0) lr_scheduler = torch.optim.lr_scheduler.LambdaLR(optimizer, lr_lambda=lambda x: 1.0) trainer = Trainer(model, args, train_dataset=train_dataset, optimizers=(optimizer, lr_scheduler)) trainer.train() (a, b) = self.default_trained_model self.assertFalse(torch.allclose(trainer.model.a, a)) self.assertFalse(torch.allclose(trainer.model.b, b)) self.assertEqual(trainer.optimizer.state_dict()["param_groups"][0]["lr"], 1.0) def test_lr_scheduler_kwargs(self): # test scheduler kwargs passed via TrainingArguments train_dataset = RegressionDataset() model = RegressionModel() num_steps, num_warmup_steps = 10, 2 extra_kwargs = {"power": 5.0, "lr_end": 1e-5} # Non-default arguments with tempfile.TemporaryDirectory() as tmp_dir: args = TrainingArguments( tmp_dir, lr_scheduler_type="polynomial", lr_scheduler_kwargs=extra_kwargs, learning_rate=0.2, warmup_steps=num_warmup_steps, report_to="none", ) trainer = Trainer(model, args, train_dataset=train_dataset) trainer.create_optimizer_and_scheduler(num_training_steps=num_steps) # Checking that the scheduler was created self.assertIsNotNone(trainer.lr_scheduler) # Checking that the correct args were passed sched1 = trainer.lr_scheduler sched2 = get_polynomial_decay_schedule_with_warmup( trainer.optimizer, num_warmup_steps=num_warmup_steps, num_training_steps=num_steps, **extra_kwargs ) self.assertEqual(sched1.lr_lambdas[0].args, sched2.lr_lambdas[0].args) self.assertEqual(sched1.lr_lambdas[0].keywords, sched2.lr_lambdas[0].keywords) def test_cosine_with_min_lr_scheduler(self): train_dataset = RegressionDataset() model = RegressionModel() num_steps, num_warmup_steps = 10, 2 extra_kwargs = {"min_lr": 1e-5} # Non-default arguments with tempfile.TemporaryDirectory() as tmp_dir: args = TrainingArguments( tmp_dir, lr_scheduler_type="cosine_with_min_lr", lr_scheduler_kwargs=extra_kwargs, learning_rate=0.2, warmup_steps=num_warmup_steps, report_to="none", ) trainer = Trainer(model, args, train_dataset=train_dataset) trainer.create_optimizer_and_scheduler(num_training_steps=num_steps) # Checking that the scheduler was created self.assertIsNotNone(trainer.lr_scheduler) # Check the last learning rate for _ in range(num_steps): trainer.lr_scheduler.step() self.assertEqual(trainer.lr_scheduler.get_last_lr()[0], 1e-5) def test_cosine_with_min_lr_schedule_with_warmup_lr_rate(self): train_dataset = RegressionDataset() model = RegressionModel() num_steps, num_warmup_steps = 10, 2 extra_kwargs = {"min_lr": 1e-5} # Non-default arguments args = TrainingArguments( "./regression", lr_scheduler_type="cosine_warmup_with_min_lr", lr_scheduler_kwargs=extra_kwargs, learning_rate=0.2, warmup_steps=num_warmup_steps, report_to="none", ) trainer = Trainer(model, args, train_dataset=train_dataset) trainer.create_optimizer_and_scheduler(num_training_steps=num_steps) # Checking that the scheduler was created self.assertIsNotNone(trainer.lr_scheduler) # Check the last learning rate step_lrs = [] for _ in range(num_steps): step_lrs.append(trainer.optimizer.param_groups[0]["lr"]) trainer.lr_scheduler.step() self.assertEqual(step_lrs[0], 0.1) self.assertEqual(step_lrs[1], 0.2) self.assertEqual(step_lrs[-1], 1e-05) def test_reduce_lr_on_plateau_args(self): # test passed arguments for a custom ReduceLROnPlateau scheduler train_dataset = RegressionDataset(length=64) eval_dataset = RegressionDataset(length=64) with tempfile.TemporaryDirectory() as tmp_dir: args = TrainingArguments( tmp_dir, eval_strategy="epoch", metric_for_best_model="eval_loss", report_to="none", ) model = RegressionModel() optimizer = torch.optim.SGD(model.parameters(), lr=1.0) lr_scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, factor=0.2, patience=5, cooldown=2) trainer = Trainer( model, args, train_dataset=train_dataset, eval_dataset=eval_dataset, optimizers=(optimizer, lr_scheduler), ) trainer.train() self.assertIsInstance(trainer.lr_scheduler, torch.optim.lr_scheduler.ReduceLROnPlateau) self.assertEqual(trainer.lr_scheduler.factor, 0.2) self.assertEqual(trainer.lr_scheduler.patience, 5) self.assertEqual(trainer.lr_scheduler.cooldown, 2) def test_reduce_lr_on_plateau(self): # test the ReduceLROnPlateau scheduler class TrainerWithLRLogs(Trainer): def log(self, logs): # the LR is computed after metrics and does not exist for the first epoch if hasattr(self.lr_scheduler, "_last_lr"): logs["learning_rate"] = self.lr_scheduler._last_lr[0] super().log(logs) train_dataset = RegressionDataset(length=64) eval_dataset = RegressionDataset(length=64) with tempfile.TemporaryDirectory() as tmp_dir: args = TrainingArguments( tmp_dir, lr_scheduler_type="reduce_lr_on_plateau", eval_strategy="epoch", metric_for_best_model="eval_loss", num_train_epochs=10, learning_rate=0.2, report_to="none", ) model = RegressionModel() trainer = TrainerWithLRLogs(model, args, train_dataset=train_dataset, eval_dataset=eval_dataset) trainer.train() self.assertIsInstance(trainer.lr_scheduler, torch.optim.lr_scheduler.ReduceLROnPlateau) patience = trainer.lr_scheduler.patience logs = trainer.state.log_history[1:] best_loss = logs[0]["eval_loss"] bad_epochs = 0 for i, log in enumerate(logs[:-1]): # Compare learning rate to next epoch's loss = log["eval_loss"] just_decreased = False if loss > best_loss: bad_epochs += 1 if bad_epochs > patience: self.assertLess(logs[i + 1]["learning_rate"], log["learning_rate"]) just_decreased = True bad_epochs = 0 else: best_loss = loss bad_epochs = 0 if not just_decreased: self.assertEqual(logs[i + 1]["learning_rate"], log["learning_rate"]) def test_adafactor_lr_none(self): # test the special case where lr=None, since Trainer can't not have lr_scheduler from transformers.optimization import Adafactor, AdafactorSchedule train_dataset = RegressionDataset() with tempfile.TemporaryDirectory() as tmp_dir: args = TrainingArguments(tmp_dir, report_to="none") model = RegressionModel() optimizer = Adafactor( model.parameters(), scale_parameter=True, relative_step=True, warmup_init=True, lr=None ) lr_scheduler = AdafactorSchedule(optimizer) trainer = Trainer(model, args, train_dataset=train_dataset, optimizers=(optimizer, lr_scheduler)) trainer.train() (a, b) = self.default_trained_model self.assertFalse(torch.allclose(trainer.model.a, a)) self.assertFalse(torch.allclose(trainer.model.b, b)) self.assertGreater(trainer.optimizer.state_dict()["param_groups"][0]["lr"], 0) @require_torch_bf16 @require_torch_accelerator def test_mixed_bf16(self): # very basic test with tempfile.TemporaryDirectory() as tmp_dir: trainer = get_regression_trainer(learning_rate=0.1, bf16=True, output_dir=tmp_dir) trainer.train() self.check_trained_model(trainer.model, atol=ATOL, rtol=RTOL) # --bf16 --half_precision_backend apex can't be used together with tempfile.TemporaryDirectory() as tmp_dir: with self.assertRaises(ValueError): trainer = get_regression_trainer( learning_rate=0.1, bf16=True, half_precision_backend="apex", output_dir=tmp_dir ) # will add more specific tests once there are some bugs to fix @require_torch_gpu @require_torch_tf32 def test_tf32(self): # very basic test with tempfile.TemporaryDirectory() as tmp_dir: trainer = get_regression_trainer(learning_rate=0.1, tf32=True, output_dir=tmp_dir) trainer.train() self.check_trained_model(trainer.model) @require_torch @require_sentencepiece @require_tokenizers class TrainerIntegrationTest(TestCasePlus, TrainerIntegrationCommon): def setUp(self): super().setUp() args = TrainingArguments("..") self.n_epochs = args.num_train_epochs self.batch_size = args.train_batch_size def test_trainer_works_with_dict(self): # Edge case because Apex with mode O2 will change our models to return dicts. This test checks it doesn't break # anything. train_dataset = RegressionDataset() eval_dataset = RegressionDataset() model = RegressionDictModel() args = TrainingArguments(self.get_auto_remove_tmp_dir(), report_to="none") trainer = Trainer(model, args, train_dataset=train_dataset, eval_dataset=eval_dataset) trainer.train() _ = trainer.evaluate() _ = trainer.predict(eval_dataset) def test_training_arguments_are_left_untouched(self): tmp_dir = self.get_auto_remove_tmp_dir() trainer = get_regression_trainer(output_dir=tmp_dir) trainer.train() args = TrainingArguments(tmp_dir, report_to=[]) dict1, dict2 = args.to_dict(), trainer.args.to_dict() for key in dict1: # Logging dir can be slightly different as they default to something with the time. if key != "logging_dir": self.assertEqual(dict1[key], dict2[key]) def test_number_of_steps_in_training(self): # Regular training has n_epochs * len(train_dl) steps tmp_dir = self.get_auto_remove_tmp_dir() trainer = get_regression_trainer(learning_rate=0.1, output_dir=tmp_dir) train_output = trainer.train() self.assertEqual(train_output.global_step, self.n_epochs * 64 / self.batch_size) # Check passing num_train_epochs works (and a float version too): trainer = get_regression_trainer(learning_rate=0.1, num_train_epochs=1.5, output_dir=tmp_dir) train_output = trainer.train() self.assertEqual(train_output.global_step, int(1.5 * 64 / self.batch_size)) # If we pass a max_steps, num_train_epochs is ignored trainer = get_regression_trainer(learning_rate=0.1, max_steps=10, output_dir=tmp_dir) train_output = trainer.train() self.assertEqual(train_output.global_step, 10) @pytest.mark.torch_compile_test def test_torch_compile_loss_func_compatibility(self): config = LlamaConfig(vocab_size=100, hidden_size=32, num_hidden_layers=3, num_attention_heads=4) tiny_llama = LlamaForCausalLM(config) x = torch.randint(0, 100, (128,)) train_dataset = RepeatDataset(x) args = TrainingArguments( self.get_auto_remove_tmp_dir(), per_device_train_batch_size=2, torch_compile=True, max_steps=1, # compile happens on the first step report_to="none", ) trainer = Trainer(model=tiny_llama, args=args, train_dataset=train_dataset) # noqa trainer.train() @require_peft @require_bitsandbytes @pytest.mark.torch_compile_test def test_bnb_compile(self): from peft import LoraConfig, get_peft_model # Simply tests if initializing a Trainer with a PEFT + compiled model works out of the box # QLoRA + torch compile is not really supported yet, but we should at least support the model # loading and let torch throw the tiny_model = AutoModelForCausalLM.from_pretrained( "hf-internal-testing/tiny-random-LlamaForCausalLM", load_in_4bit=True ) peft_config = LoraConfig( r=8, lora_alpha=32, target_modules=["q_proj", "k_proj", "v_proj"], lora_dropout=0.05, bias="none", task_type="CAUSAL_LM", ) tiny_model = get_peft_model(tiny_model, peft_config) tiny_model = torch.compile(tiny_model) x = torch.randint(0, 100, (128,)) train_dataset = RepeatDataset(x) args = TrainingArguments( self.get_auto_remove_tmp_dir(), learning_rate=1e-9, logging_steps=5, ) with self.assertRaises(ValueError): _ = Trainer(tiny_model, args, train_dataset=train_dataset) # noqa @require_peft def test_multiple_peft_adapters(self): from peft import LoraConfig, get_peft_model # Tests if resuming from checkpoint works if the model has multiple adapters MODEL_ID = "hf-internal-testing/tiny-random-LlamaForCausalLM" tokenizer = AutoTokenizer.from_pretrained(MODEL_ID) tiny_model = AutoModelForCausalLM.from_pretrained(MODEL_ID) peft_config = LoraConfig( r=4, lora_alpha=16, lora_dropout=0.05, bias="none", task_type="CAUSAL_LM", ) tiny_model = get_peft_model(tiny_model, peft_config, "adapter1") tiny_model.add_adapter("adapter2", peft_config) train_dataset = LineByLineTextDataset( tokenizer=tokenizer, file_path=PATH_SAMPLE_TEXT, block_size=tokenizer.max_len_single_sentence, ) for example in train_dataset.examples: example["labels"] = example["input_ids"] tokenizer.pad_token = tokenizer.eos_token tmp_dir = self.get_auto_remove_tmp_dir() args = TrainingArguments( tmp_dir, per_device_train_batch_size=1, learning_rate=1e-9, save_steps=5, logging_steps=5, max_steps=10, use_cpu=True, ) trainer = Trainer(tiny_model, args, processing_class=tokenizer, train_dataset=train_dataset) trainer.train() parameters = dict(tiny_model.named_parameters()) state = dataclasses.asdict(trainer.state) # Reinitialize trainer trainer = Trainer(tiny_model, args, processing_class=tokenizer, train_dataset=train_dataset) checkpoint = os.path.join(tmp_dir, "checkpoint-5") trainer.train(resume_from_checkpoint=checkpoint) parameters1 = dict(tiny_model.named_parameters()) state1 = dataclasses.asdict(trainer.state) self.assertEqual(parameters, parameters1) self.check_trainer_state_are_the_same(state, state1) @require_bitsandbytes def test_rmsprop_bnb(self): config = GPT2Config(vocab_size=100, n_positions=128, n_embd=32, n_layer=3, n_head=4) tiny_gpt2 = GPT2LMHeadModel(config) x = torch.randint(0, 100, (128,)) train_dataset = RepeatDataset(x) # Trainer without inf/nan filter args = TrainingArguments( self.get_auto_remove_tmp_dir(), learning_rate=1e-9, logging_steps=5, logging_nan_inf_filter=False, optim="rmsprop_bnb", ) trainer = Trainer(tiny_gpt2, args, train_dataset=train_dataset) # Check that it trains without errors trainer.train() @require_bitsandbytes def test_ademamix_bnb(self): config = GPT2Config(vocab_size=100, n_positions=128, n_embd=32, n_layer=3, n_head=4) tiny_gpt2 = GPT2LMHeadModel(config) x = torch.randint(0, 100, (128,)) train_dataset = RepeatDataset(x) # Trainer without inf/nan filter args = TrainingArguments( self.get_auto_remove_tmp_dir(), learning_rate=1e-9, logging_steps=5, logging_nan_inf_filter=False, optim="ademamix", ) trainer = Trainer(tiny_gpt2, args, train_dataset=train_dataset) # Check that it trains without errors trainer.train() @require_bitsandbytes def test_ademamix_bnb_8bit(self): config = GPT2Config(vocab_size=100, n_positions=128, n_embd=32, n_layer=3, n_head=4) tiny_gpt2 = GPT2LMHeadModel(config) x = torch.randint(0, 100, (128,)) train_dataset = RepeatDataset(x) # Trainer without inf/nan filter args = TrainingArguments( self.get_auto_remove_tmp_dir(), learning_rate=1e-9, logging_steps=5, logging_nan_inf_filter=False, optim="ademamix_8bit", ) trainer = Trainer(tiny_gpt2, args, train_dataset=train_dataset) # Check that it trains without errors trainer.train() @require_bitsandbytes def test_rmsprop_bnb_8bit(self): config = GPT2Config(vocab_size=100, n_positions=128, n_embd=32, n_layer=3, n_head=4) tiny_gpt2 = GPT2LMHeadModel(config) x = torch.randint(0, 100, (128,)) train_dataset = RepeatDataset(x) # Trainer without inf/nan filter args = TrainingArguments( self.get_auto_remove_tmp_dir(), learning_rate=1e-9, logging_steps=5, logging_nan_inf_filter=False, optim="rmsprop_bnb_8bit", ) trainer = Trainer(tiny_gpt2, args, train_dataset=train_dataset) # Check that it trains without errors trainer.train() @require_bitsandbytes def test_rmsprop_bnb_32bit(self): config = GPT2Config(vocab_size=100, n_positions=128, n_embd=32, n_layer=3, n_head=4) tiny_gpt2 = GPT2LMHeadModel(config) x = torch.randint(0, 100, (128,)) train_dataset = RepeatDataset(x) # Trainer without inf/nan filter args = TrainingArguments( self.get_auto_remove_tmp_dir(), learning_rate=1e-9, logging_steps=5, logging_nan_inf_filter=False, optim="rmsprop_bnb_32bit", ) trainer = Trainer(tiny_gpt2, args, train_dataset=train_dataset) # Check that it trains without errors trainer.train() def test_neftune(self): config = GPT2Config(vocab_size=100, n_positions=128, n_embd=32, n_layer=3, n_head=4) tiny_gpt2 = GPT2LMHeadModel(config) x = torch.randint(0, 100, (128,)) train_dataset = RepeatDataset(x) # Trainer without inf/nan filter args = TrainingArguments( self.get_auto_remove_tmp_dir(), learning_rate=1e-9, logging_steps=5, logging_nan_inf_filter=False, neftune_noise_alpha=0.4, report_to="none", ) trainer = Trainer(tiny_gpt2, args, train_dataset=train_dataset) trainer.model = trainer._activate_neftune(trainer.model) dummy_input = torch.LongTensor([[1, 0, 1]]).to(torch_device) emb1 = trainer.model.get_input_embeddings()(dummy_input) emb2 = trainer.model.get_input_embeddings()(dummy_input) self.assertFalse(torch.allclose(emb1, emb2), "Neftune noise is not applied!") # redefine the model tiny_gpt2 = GPT2LMHeadModel(config) # Trainer without inf/nan filter args = TrainingArguments( self.get_auto_remove_tmp_dir(), learning_rate=1e-9, logging_steps=5, logging_nan_inf_filter=False, neftune_noise_alpha=0.4, report_to="none", ) trainer = Trainer(tiny_gpt2, args, train_dataset=train_dataset) # Check that it trains without errors trainer.train() # Make sure forward pass works fine _ = trainer.model(dummy_input) self.assertTrue(len(trainer.model.get_input_embeddings()._forward_hooks) == 0) trainer.model.eval() # Check that we get identical embeddings just in case emb1 = trainer.model.get_input_embeddings()(dummy_input) emb2 = trainer.model.get_input_embeddings()(dummy_input) torch.testing.assert_close(emb1, emb2) def test_logging_inf_nan_filter(self): config = GPT2Config(vocab_size=100, n_positions=128, n_embd=32, n_layer=3, n_head=4) tiny_gpt2 = GPT2LMHeadModel(config) x = torch.randint(0, 100, (128,)) train_dataset = RepeatDataset(x) # Trainer without inf/nan filter args = TrainingArguments( self.get_auto_remove_tmp_dir(), learning_rate=1e9, logging_steps=5, logging_nan_inf_filter=False, report_to="none", ) trainer = Trainer(tiny_gpt2, args, train_dataset=train_dataset) trainer.train() log_history_no_filter = trainer.state.log_history # Trainer with inf/nan filter args = TrainingArguments( self.get_auto_remove_tmp_dir(), learning_rate=1e9, logging_steps=5, logging_nan_inf_filter=True, report_to="none", ) trainer = Trainer(tiny_gpt2, args, train_dataset=train_dataset) trainer.train() log_history_filter = trainer.state.log_history def is_any_loss_nan_or_inf(log_history): losses = [l["loss"] for l in log_history[:-1]] return any(math.isnan(x) for x in losses) or any(math.isinf(x) for x in losses) self.assertTrue(is_any_loss_nan_or_inf(log_history_no_filter)) self.assertFalse(is_any_loss_nan_or_inf(log_history_filter)) def test_train_and_eval_dataloaders(self): if torch_device in ["cuda"]: n_gpu = max(1, backend_device_count(torch_device)) else: # DP is decprecated by PyTorch, accelerators like XPU doesn't support DP n_gpu = 1 tmp_dir = self.get_auto_remove_tmp_dir() trainer = get_regression_trainer(learning_rate=0.1, per_device_train_batch_size=16, output_dir=tmp_dir) self.assertEqual(trainer.get_train_dataloader().total_batch_size, 16 * n_gpu) trainer = get_regression_trainer(learning_rate=0.1, per_device_eval_batch_size=16, output_dir=tmp_dir) self.assertEqual(trainer.get_eval_dataloader().total_batch_size, 16 * n_gpu) # Check drop_last works trainer = get_regression_trainer( train_len=66, eval_len=74, learning_rate=0.1, per_device_train_batch_size=16, per_device_eval_batch_size=32, output_dir=tmp_dir, ) self.assertEqual(len(trainer.get_train_dataloader()), 66 // (16 * n_gpu) + 1) self.assertEqual(len(trainer.get_eval_dataloader()), 74 // (32 * n_gpu) + 1) trainer = get_regression_trainer( train_len=66, eval_len=74, learning_rate=0.1, per_device_train_batch_size=16, per_device_eval_batch_size=32, dataloader_drop_last=True, output_dir=tmp_dir, ) self.assertEqual(len(trainer.get_train_dataloader()), 66 // (16 * n_gpu)) self.assertEqual(len(trainer.get_eval_dataloader()), 74 // (32 * n_gpu)) # Check passing a new dataset for evaluation works new_eval_dataset = RegressionDataset(length=128) self.assertEqual(len(trainer.get_eval_dataloader(new_eval_dataset)), 128 // (32 * n_gpu)) # tests that we do not require dataloader to have a .dataset attribute def test_dataloader_without_dataset(self): train_dataset = RegressionDataset(length=128) trainer = CustomDataloaderTrainer( model=RegressionModel(), train_dataset=train_dataset, eval_dataset=train_dataset, args=TrainingArguments(output_dir=self.get_auto_remove_tmp_dir(), report_to="none"), ) trainer.train() trainer.evaluate() def test_get_eval_dataloader_without_persistent_workers(self): train_dataset = RegressionDataset() config = GPT2Config(vocab_size=100, n_positions=128, n_embd=32, n_layer=3, n_head=4) tiny_gpt2 = GPT2LMHeadModel(config) args = TrainingArguments(self.get_auto_remove_tmp_dir(), report_to="none", dataloader_persistent_workers=False) # Single evaluation dataset eval_dataset = RegressionDataset() trainer = Trainer(tiny_gpt2, args, train_dataset=train_dataset, eval_dataset=eval_dataset) # Mocking the prepare method to avoid the dataloader changing with each call to get_eval_dataloader trainer.accelerator.prepare = lambda x: x default_dataloader = trainer.get_eval_dataloader() dataloader_with_dataset = trainer.get_eval_dataloader(eval_dataset) self.assertEqual(default_dataloader.dataset, eval_dataset) self.assertEqual(dataloader_with_dataset.dataset, eval_dataset) self.assertNotEqual(default_dataloader, dataloader_with_dataset) # Multiple evaluation datasets first_dataset = RegressionDataset() second_dataset = RegressionDataset() trainer = Trainer( tiny_gpt2, args, train_dataset=train_dataset, eval_dataset={"first": first_dataset, "second": second_dataset}, ) # Mocking the prepare method to avoid the dataloader changing with each call to get_eval_dataloader trainer.accelerator.prepare = lambda x: x first_dataloader = trainer.get_eval_dataloader("first") first_dataloader_repeated = trainer.get_eval_dataloader("first") second_dataloader = trainer.get_eval_dataloader("second") second_dataloader_repeated = trainer.get_eval_dataloader("second") self.assertEqual(first_dataset, first_dataloader.dataset) self.assertEqual(first_dataloader.dataset, first_dataloader_repeated.dataset) self.assertEqual(second_dataset, second_dataloader.dataset) self.assertEqual(second_dataloader.dataset, second_dataloader_repeated.dataset) self.assertNotEqual(first_dataloader, first_dataloader_repeated) self.assertNotEqual(second_dataloader, second_dataloader_repeated) def test_get_eval_dataloader_with_persistent_workers(self): train_dataset = RegressionDataset() config = GPT2Config(vocab_size=100, n_positions=128, n_embd=32, n_layer=3, n_head=4) tiny_gpt2 = GPT2LMHeadModel(config) args = TrainingArguments( self.get_auto_remove_tmp_dir(), report_to="none", dataloader_persistent_workers=True, dataloader_num_workers=2, ) # Single evaluation dataset eval_dataset = RegressionDataset() trainer = Trainer(tiny_gpt2, args, train_dataset=train_dataset, eval_dataset=eval_dataset) # Mocking the prepare method to avoid the dataloader changing with each call to get_eval_dataloader trainer.accelerator.prepare = lambda x: x default_dataloader = trainer.get_eval_dataloader() dataloader_with_dataset = trainer.get_eval_dataloader(eval_dataset) self.assertEqual(default_dataloader.dataset, eval_dataset) self.assertEqual(dataloader_with_dataset.dataset, eval_dataset) self.assertEqual(default_dataloader, dataloader_with_dataset) # Multiple evaluation datasets first_dataset = RegressionDataset() second_dataset = RegressionDataset() trainer = Trainer( tiny_gpt2, args, train_dataset=train_dataset, eval_dataset={"first": first_dataset, "second": second_dataset}, ) # Mocking the prepare method to avoid the dataloader changing with each call to get_eval_dataloader trainer.accelerator.prepare = lambda x: x first_dataloader = trainer.get_eval_dataloader("first") first_dataloader_repeated = trainer.get_eval_dataloader("first") second_dataloader = trainer.get_eval_dataloader("second") second_dataloader_repeated = trainer.get_eval_dataloader("second") self.assertEqual(first_dataset, first_dataloader.dataset) self.assertEqual(first_dataloader.dataset, first_dataloader_repeated.dataset) self.assertEqual(second_dataset, second_dataloader.dataset) self.assertEqual(second_dataloader.dataset, second_dataloader_repeated.dataset) self.assertEqual(first_dataloader, first_dataloader_repeated) self.assertEqual(second_dataloader, second_dataloader_repeated) @require_liger_kernel def test_use_liger_kernel_patching(self): # Ensure any monkey patching is cleaned up for subsequent tests with patch("transformers.models.llama.modeling_llama"): from liger_kernel.transformers import LigerRMSNorm, liger_rotary_pos_emb from transformers.models.llama import modeling_llama config = LlamaConfig(vocab_size=100, hidden_size=32, num_hidden_layers=3, num_attention_heads=4) tiny_llama = LlamaForCausalLM(config) # Spot check that modeling code and model instance variables are not yet patched self.assertNotEqual(modeling_llama.apply_rotary_pos_emb, liger_rotary_pos_emb) self.assertFalse(isinstance(tiny_llama.model.norm, LigerRMSNorm)) args = TrainingArguments( self.get_auto_remove_tmp_dir(), use_liger_kernel=True, ) Trainer(tiny_llama, args) # Spot check that modeling code and model instance variables are patched self.assertEqual(modeling_llama.apply_rotary_pos_emb, liger_rotary_pos_emb) self.assertTrue(isinstance(tiny_llama.model.norm, LigerRMSNorm)) @require_liger_kernel def test_use_liger_kernel_custom_config_patching(self): # Ensure any monkey patching is cleaned up for subsequent tests with patch("transformers.models.llama.modeling_llama"): from liger_kernel.transformers import LigerRMSNorm config = LlamaConfig(vocab_size=100, hidden_size=32, num_hidden_layers=3, num_attention_heads=4) tiny_llama = LlamaForCausalLM(config) args = TrainingArguments( self.get_auto_remove_tmp_dir(), use_liger_kernel=True, liger_kernel_config={"rms_norm": False}, # Don't apply Liger's RMSNorm ) Trainer(tiny_llama, args) # Check that the RMSNorm kernel is not applied as specified in the config self.assertFalse(isinstance(tiny_llama.model.norm, LigerRMSNorm)) @require_liger_kernel @require_torch_accelerator def test_use_liger_kernel_trainer(self): # Check that trainer still works with liger kernel applied config = LlamaConfig(vocab_size=100, hidden_size=32, num_hidden_layers=3, num_attention_heads=4) tiny_llama = LlamaForCausalLM(config) x = torch.randint(0, 100, (128,)) train_dataset = RepeatDataset(x) args = TrainingArguments( self.get_auto_remove_tmp_dir(), learning_rate=1e-2, logging_steps=5, max_steps=20, use_liger_kernel=True ) trainer = Trainer(tiny_llama, args, train_dataset=train_dataset) # Check this works _ = trainer.train() @require_liger_kernel @require_torch_accelerator def test_use_liger_kernel_custom_config_trainer(self): # Check that trainer still works with liger kernel applied when using a custom config config = LlamaConfig(vocab_size=100, hidden_size=32, num_hidden_layers=3, num_attention_heads=4) tiny_llama = LlamaForCausalLM(config) x = torch.randint(0, 100, (128,)) train_dataset = RepeatDataset(x) args = TrainingArguments( self.get_auto_remove_tmp_dir(), learning_rate=1e-2, logging_steps=5, max_steps=20, use_liger_kernel=True, liger_kernel_config={"rms_norm": False, "cross_entropy": True, "fused_linear_cross_entropy": False}, ) trainer = Trainer(tiny_llama, args, train_dataset=train_dataset) # Check this works _ = trainer.train() @require_lomo @require_torch_accelerator def test_lomo(self): config = LlamaConfig(vocab_size=100, hidden_size=32, num_hidden_layers=3, num_attention_heads=4) tiny_llama = LlamaForCausalLM(config) previous_params = {n: p.clone() for n, p in tiny_llama.named_parameters()} x = torch.randint(0, 100, (128,)) train_dataset = RepeatDataset(x) # Trainer without inf/nan filter args = TrainingArguments( self.get_auto_remove_tmp_dir(), learning_rate=1e-2, logging_steps=5, optim="lomo", max_steps=20 ) trainer = Trainer(tiny_llama, args, train_dataset=train_dataset) # Check this works _ = trainer.train() for name, param in tiny_llama.named_parameters(): self.assertFalse(torch.allclose(param, previous_params[name].to(param.device), rtol=1e-12, atol=1e-12)) @require_lomo @require_torch_accelerator def test_adalomo(self): config = LlamaConfig(vocab_size=100, hidden_size=32, num_hidden_layers=3, num_attention_heads=4) tiny_llama = LlamaForCausalLM(config) x = torch.randint(0, 100, (128,)) train_dataset = RepeatDataset(x) # Trainer without inf/nan filter args = TrainingArguments( self.get_auto_remove_tmp_dir(), learning_rate=1e-9, logging_steps=5, optim="adalomo", ) trainer = Trainer(tiny_llama, args, train_dataset=train_dataset) # Check this works _ = trainer.train() @require_grokadamw @require_torch_accelerator def test_grokadamw(self): config = LlamaConfig(vocab_size=100, hidden_size=32, num_hidden_layers=3, num_attention_heads=4) tiny_llama = LlamaForCausalLM(config) x = torch.randint(0, 100, (128,)) train_dataset = RepeatDataset(x) # Trainer without inf/nan filter args = TrainingArguments( self.get_auto_remove_tmp_dir(), learning_rate=2e-5, logging_steps=5, optim="grokadamw", max_steps=20, ) trainer = Trainer(tiny_llama, args, train_dataset=train_dataset) # Check this works _ = trainer.train() @require_schedulefree @require_torch_accelerator def test_schedulefree_adam(self): config = LlamaConfig(vocab_size=100, hidden_size=32, num_hidden_layers=3, num_attention_heads=4) tiny_llama = LlamaForCausalLM(config) x = torch.randint(0, 100, (128,)) train_dataset = RepeatDataset(x) with tempfile.TemporaryDirectory() as tmpdir: # Trainer without inf/nan filter args = TrainingArguments( tmpdir, learning_rate=1e-9, logging_steps=5, optim="schedule_free_adamw", lr_scheduler_type="constant", ) trainer = Trainer(tiny_llama, args, train_dataset=train_dataset) # Check this works _ = trainer.train() @require_schedulefree @require_torch_accelerator def test_schedulefree_radam(self): config = LlamaConfig(vocab_size=100, hidden_size=32, num_hidden_layers=3, num_attention_heads=4) tiny_llama = LlamaForCausalLM(config) x = torch.randint(0, 100, (128,)) train_dataset = RepeatDataset(x) with tempfile.TemporaryDirectory() as tmpdir: # Trainer without inf/nan filter args = TrainingArguments( tmpdir, learning_rate=1e-9, logging_steps=5, lr_scheduler_type="constant", optim="schedule_free_radam", ) trainer = Trainer(tiny_llama, args, train_dataset=train_dataset) # Check this works _ = trainer.train() def test_galore_matched_modules(self): regex_patterns = [r".*.attn.*", r".*.mlp.*"] module_names = [ "model.transformer.h.0.ln_1", "model.transformer.h.0.attn.q_proj", "model.lm_head", "model.transformer.h.0.mlp.up_proj", ] expected_values = [False, True, False, True] for expected_value, module_name in zip(expected_values, module_names): is_module_matched, is_regex = check_target_module_exists(regex_patterns, module_name, return_is_regex=True) self.assertTrue(is_module_matched == expected_value) if is_module_matched: self.assertTrue(is_regex) exact_patterns = ["q_proj", "up_proj"] module_names = [ "model.transformer.h.0.ln_1", "model.transformer.h.0.attn.q_proj", "model.lm_head", "model.transformer.h.0.mlp.up_proj", ] expected_values = [False, True, False, True] for expected_value, module_name in zip(expected_values, module_names): is_module_matched, is_regex = check_target_module_exists(exact_patterns, module_name, return_is_regex=True) self.assertTrue(is_module_matched == expected_value) if is_module_matched: self.assertFalse(is_regex) simple_regex = r".*.attn.*" module_names = [ "model.transformer.h.0.ln_1", "model.transformer.h.0.attn.q_proj", "model.lm_head", "model.transformer.h.0.mlp.up_proj", ] expected_values = [False, True, False, False] for expected_value, module_name in zip(expected_values, module_names): is_module_matched, is_regex = check_target_module_exists(simple_regex, module_name, return_is_regex=True) self.assertTrue(is_module_matched == expected_value) if is_module_matched: self.assertTrue(is_regex) simple_regex = "model.transformer.h.0.attn.q_proj" module_names = [ "model.transformer.h.0.ln_1", "model.transformer.h.0.attn.q_proj", "model.lm_head", "model.transformer.h.0.mlp.up_proj", ] expected_values = [False, True, False, False] for expected_value, module_name in zip(expected_values, module_names): is_module_matched, is_regex = check_target_module_exists(simple_regex, module_name, return_is_regex=True) self.assertTrue(is_module_matched == expected_value) if is_module_matched: self.assertFalse(is_regex) target_modules = ["attn", "mlp"] module_names = [ "model.transformer.h.0.ln_1", "model.transformer.h.0.attn.q_proj", "model.lm_head", "model.transformer.h.0.mlp.up_proj", ] expected_values = [False, True, False, True] for expected_value, module_name in zip(expected_values, module_names): is_module_matched, is_regex = check_target_module_exists(target_modules, module_name, return_is_regex=True) self.assertTrue(is_module_matched == expected_value) if is_module_matched: self.assertFalse(is_regex) @require_galore_torch @require_torch_accelerator def test_galore(self): config = LlamaConfig(vocab_size=100, hidden_size=32, num_hidden_layers=3, num_attention_heads=4) tiny_llama = LlamaForCausalLM(config) x = torch.randint(0, 100, (128,)) train_dataset = RepeatDataset(x) # Trainer without inf/nan filter args = TrainingArguments( self.get_auto_remove_tmp_dir(), learning_rate=1e-9, logging_steps=5, optim="galore_adamw", optim_target_modules=[r".*attn.*", r".*mlp.*"], ) trainer = Trainer(tiny_llama, args, train_dataset=train_dataset) # Check this works _ = trainer.train() @require_galore_torch @require_torch_accelerator def test_galore_extra_args(self): config = LlamaConfig(vocab_size=100, hidden_size=32, num_hidden_layers=3, num_attention_heads=4) tiny_llama = LlamaForCausalLM(config) x = torch.randint(0, 100, (128,)) train_dataset = RepeatDataset(x) # Trainer without inf/nan filter args = TrainingArguments( self.get_auto_remove_tmp_dir(), learning_rate=1e-9, logging_steps=5, optim="galore_adamw", optim_args="rank=64, update_proj_gap=100, scale=0.10", optim_target_modules=[r".*attn.*", r".*mlp.*"], ) trainer = Trainer(tiny_llama, args, train_dataset=train_dataset) # Check this works _ = trainer.train() @require_galore_torch @require_torch_accelerator def test_galore_layerwise(self): config = LlamaConfig(vocab_size=100, hidden_size=32, num_hidden_layers=3, num_attention_heads=4) tiny_llama = LlamaForCausalLM(config) x = torch.randint(0, 100, (128,)) train_dataset = RepeatDataset(x) # Trainer without inf/nan filter args = TrainingArguments( self.get_auto_remove_tmp_dir(), learning_rate=1e-9, logging_steps=5, optim="galore_adamw_layerwise", optim_target_modules=[r".*attn.*", r".*mlp.*"], ) trainer = Trainer(tiny_llama, args, train_dataset=train_dataset) # Check this works _ = trainer.train() @require_galore_torch @require_torch_accelerator def test_galore_layerwise_with_scheduler(self): config = LlamaConfig(vocab_size=100, hidden_size=32, num_hidden_layers=3, num_attention_heads=4) tiny_llama = LlamaForCausalLM(config) x = torch.randint(0, 100, (128,)) train_dataset = RepeatDataset(x) # Trainer without inf/nan filter args = TrainingArguments( self.get_auto_remove_tmp_dir(), learning_rate=1e-9, logging_steps=5, optim="galore_adamw_layerwise", lr_scheduler_type="cosine", optim_target_modules=[r".*attn.*", r".*mlp.*"], ) trainer = Trainer(tiny_llama, args, train_dataset=train_dataset) # Check this works _ = trainer.train() @require_galore_torch @require_torch_accelerator def test_galore_adamw_8bit(self): config = LlamaConfig(vocab_size=100, hidden_size=32, num_hidden_layers=3, num_attention_heads=4) tiny_llama = LlamaForCausalLM(config) x = torch.randint(0, 100, (128,)) train_dataset = RepeatDataset(x) # Trainer without inf/nan filter args = TrainingArguments( self.get_auto_remove_tmp_dir(), learning_rate=1e-9, logging_steps=5, optim="galore_adamw_8bit", optim_target_modules=[r".*attn.*", r".*mlp.*"], ) trainer = Trainer(tiny_llama, args, train_dataset=train_dataset) # Check this works _ = trainer.train() @require_galore_torch @require_torch_accelerator def test_galore_adafactor(self): # These are the intervals of the peak memory usage of training such a tiny model # if the peak memory goes outside that range, then we know there might be a bug somewhere upper_bound_pm = 700 lower_bound_pm = 650 config = LlamaConfig(vocab_size=100, hidden_size=32, num_hidden_layers=3, num_attention_heads=4) tiny_llama = LlamaForCausalLM(config) x = torch.randint(0, 100, (128,)) train_dataset = RepeatDataset(x) with tempfile.TemporaryDirectory() as tmpdir, TorchTracemalloc() as tracemalloc: # Trainer without inf/nan filter args = TrainingArguments( tmpdir, learning_rate=1e-9, logging_steps=5, optim="galore_adafactor", optim_target_modules=[r".*attn.*", r".*mlp.*"], ) trainer = Trainer(tiny_llama, args, train_dataset=train_dataset) # Check this works _ = trainer.train() galore_peak_memory = tracemalloc.peaked + bytes2megabytes(tracemalloc.begin) self.assertTrue(galore_peak_memory < upper_bound_pm) self.assertTrue(lower_bound_pm < galore_peak_memory) @require_galore_torch @require_torch_accelerator def test_galore_adafactor_attention_only(self): # These are the intervals of the peak memory usage of training such a tiny model # if the peak memory goes outside that range, then we know there might be a bug somewhere upper_bound_pm = 700 lower_bound_pm = 650 config = LlamaConfig(vocab_size=100, hidden_size=32, num_hidden_layers=3, num_attention_heads=4) tiny_llama = LlamaForCausalLM(config) x = torch.randint(0, 100, (128,)) train_dataset = RepeatDataset(x) with tempfile.TemporaryDirectory() as tmpdir, TorchTracemalloc() as tracemalloc: # Trainer without inf/nan filter args = TrainingArguments( tmpdir, learning_rate=1e-9, logging_steps=5, optim="galore_adafactor", optim_target_modules=["q_proj", "k_proj", "v_proj"], ) trainer = Trainer(tiny_llama, args, train_dataset=train_dataset) # Check this works _ = trainer.train() galore_peak_memory = tracemalloc.peaked + bytes2megabytes(tracemalloc.begin) self.assertTrue(galore_peak_memory < upper_bound_pm) self.assertTrue(lower_bound_pm < galore_peak_memory) @require_galore_torch @require_torch_accelerator def test_galore_adafactor_all_linear(self): # These are the intervals of the peak memory usage of training such a tiny model # if the peak memory goes outside that range, then we know there might be a bug somewhere upper_bound_pm = 700 lower_bound_pm = 650 config = LlamaConfig(vocab_size=100, hidden_size=32, num_hidden_layers=3, num_attention_heads=4) tiny_llama = LlamaForCausalLM(config) x = torch.randint(0, 100, (128,)) train_dataset = RepeatDataset(x) with tempfile.TemporaryDirectory() as tmpdir, TorchTracemalloc() as tracemalloc: # Trainer without inf/nan filter args = TrainingArguments( tmpdir, learning_rate=1e-9, logging_steps=5, optim="galore_adafactor", optim_target_modules="all-linear", ) trainer = Trainer(tiny_llama, args, train_dataset=train_dataset) # Check this works _ = trainer.train() galore_peak_memory = tracemalloc.peaked + bytes2megabytes(tracemalloc.begin) self.assertTrue(galore_peak_memory < upper_bound_pm) self.assertTrue(lower_bound_pm < galore_peak_memory) @require_galore_torch @require_torch_accelerator def test_galore_lr_display_without_scheduler(self): config = LlamaConfig(vocab_size=100, hidden_size=32, num_hidden_layers=3, num_attention_heads=4) tiny_llama = LlamaForCausalLM(config) x = torch.randint(0, 100, (128,)) train_dataset = RepeatDataset(x) learning_rate = 1e-9 num_steps = 10 # Trainer without inf/nan filter args = TrainingArguments( self.get_auto_remove_tmp_dir(), learning_rate=learning_rate, logging_steps=5, optim="galore_adamw", optim_target_modules=[r".*attn.*", r".*mlp.*"], ) trainer = Trainer(tiny_llama, args, train_dataset=train_dataset) trainer.create_optimizer_and_scheduler(num_training_steps=num_steps) # reflects displayed lr in trainer self.assertEqual(trainer.get_learning_rates(), [learning_rate, learning_rate]) @require_galore_torch @require_torch_accelerator def test_galore_lr_display_with_scheduler(self): config = LlamaConfig(vocab_size=100, hidden_size=32, num_hidden_layers=3, num_attention_heads=4) tiny_llama = LlamaForCausalLM(config) x = torch.randint(0, 100, (128,)) train_dataset = RepeatDataset(x) learning_rate = 2e-4 num_train_epochs = 2 num_warmup_steps = 5 # Trainer without inf/nan filter args = TrainingArguments( self.get_auto_remove_tmp_dir(), num_train_epochs=num_train_epochs, learning_rate=learning_rate, warmup_steps=num_warmup_steps, lr_scheduler_type="cosine", logging_steps=1, optim="galore_adamw", optim_target_modules=[r".*attn.*", r".*mlp.*"], ) trainer = Trainer(tiny_llama, args, train_dataset=train_dataset) # creating log history of trainer, results don't matter trainer.train() logs = trainer.state.log_history[1:-1] # reach given learning rate peak and end with 0 lr self.assertTrue(logs[num_warmup_steps - 1]["learning_rate"] == learning_rate) # self.assertTrue(logs[-1]["learning_rate"] == 0) self.assertTrue(np.allclose(logs[-1]["learning_rate"], 0, atol=5e-6)) # increasing and decreasing pattern of lrs increasing_lrs = [ logs[i]["learning_rate"] < logs[i + 1]["learning_rate"] for i in range(len(logs)) if i < num_warmup_steps - 1 ] decreasing_lrs = [ logs[i]["learning_rate"] > logs[i + 1]["learning_rate"] for i in range(len(logs) - 1) if i >= num_warmup_steps - 1 ] self.assertTrue(all(increasing_lrs)) self.assertTrue(all(decreasing_lrs)) # warm up steps << total steps self.assertTrue(len(decreasing_lrs) > len(increasing_lrs)) @require_apollo_torch @require_torch_accelerator def test_apollo(self): config = LlamaConfig(vocab_size=100, hidden_size=32, num_hidden_layers=3, num_attention_heads=4) tiny_llama = LlamaForCausalLM(config) x = torch.randint(0, 100, (128,)) train_dataset = RepeatDataset(x) # Trainer without inf/nan filter args = TrainingArguments( self.get_auto_remove_tmp_dir(), learning_rate=1e-9, logging_steps=5, optim="apollo_adamw", optim_target_modules=[r".*attn.*", r".*mlp.*"], ) trainer = Trainer(tiny_llama, args, train_dataset=train_dataset) # Check this works _ = trainer.train() @require_apollo_torch @require_torch_accelerator def test_apollo_extra_args(self): config = LlamaConfig(vocab_size=100, hidden_size=32, num_hidden_layers=3, num_attention_heads=4) tiny_llama = LlamaForCausalLM(config) x = torch.randint(0, 100, (128,)) train_dataset = RepeatDataset(x) # Trainer without inf/nan filter args = TrainingArguments( self.get_auto_remove_tmp_dir(), learning_rate=1e-9, logging_steps=5, optim="apollo_adamw", optim_args="proj=random,scale_type=tensor,rank=1,update_proj_gap=100,scale=128.0", optim_target_modules=[r".*attn.*", r".*mlp.*"], ) trainer = Trainer(tiny_llama, args, train_dataset=train_dataset) # Check this works _ = trainer.train() @require_apollo_torch @require_torch_accelerator def test_apollo_layerwise(self): config = LlamaConfig(vocab_size=100, hidden_size=32, num_hidden_layers=3, num_attention_heads=4) tiny_llama = LlamaForCausalLM(config) x = torch.randint(0, 100, (128,)) train_dataset = RepeatDataset(x) # Trainer without inf/nan filter args = TrainingArguments( self.get_auto_remove_tmp_dir(), learning_rate=1e-9, logging_steps=5, optim="apollo_adamw_layerwise", optim_target_modules=[r".*attn.*", r".*mlp.*"], ) trainer = Trainer(tiny_llama, args, train_dataset=train_dataset) # Check this works _ = trainer.train() @require_apollo_torch @require_torch_accelerator def test_apollo_layerwise_with_scheduler(self): config = LlamaConfig(vocab_size=100, hidden_size=32, num_hidden_layers=3, num_attention_heads=4) tiny_llama = LlamaForCausalLM(config) x = torch.randint(0, 100, (128,)) train_dataset = RepeatDataset(x) # Trainer without inf/nan filter args = TrainingArguments( self.get_auto_remove_tmp_dir(), learning_rate=1e-9, logging_steps=5, optim="apollo_adamw_layerwise", lr_scheduler_type="cosine", optim_target_modules=[r".*attn.*", r".*mlp.*"], ) trainer = Trainer(tiny_llama, args, train_dataset=train_dataset) # Check this works _ = trainer.train() @require_apollo_torch @require_torch_accelerator def test_apollo_lr_display_without_scheduler(self): config = LlamaConfig(vocab_size=100, hidden_size=32, num_hidden_layers=3, num_attention_heads=4) tiny_llama = LlamaForCausalLM(config) x = torch.randint(0, 100, (128,)) train_dataset = RepeatDataset(x) learning_rate = 1e-9 num_steps = 10 # Trainer without inf/nan filter args = TrainingArguments( self.get_auto_remove_tmp_dir(), learning_rate=learning_rate, logging_steps=5, optim="apollo_adamw", optim_target_modules=[r".*attn.*", r".*mlp.*"], ) trainer = Trainer(tiny_llama, args, train_dataset=train_dataset) trainer.create_optimizer_and_scheduler(num_training_steps=num_steps) # reflects displayed lr in trainer self.assertEqual(trainer.get_learning_rates(), [learning_rate, learning_rate]) @require_apollo_torch @require_torch_accelerator def test_apollo_lr_display_with_scheduler(self): config = LlamaConfig(vocab_size=100, hidden_size=32, num_hidden_layers=3, num_attention_heads=4) tiny_llama = LlamaForCausalLM(config) x = torch.randint(0, 100, (128,)) train_dataset = RepeatDataset(x) learning_rate = 2e-4 num_train_epochs = 10 num_warmup_steps = 5 # Trainer without inf/nan filter args = TrainingArguments( self.get_auto_remove_tmp_dir(), num_train_epochs=num_train_epochs, learning_rate=learning_rate, warmup_steps=num_warmup_steps, lr_scheduler_type="cosine", logging_steps=1, optim="apollo_adamw", optim_target_modules=[r".*attn.*", r".*mlp.*"], ) trainer = Trainer(tiny_llama, args, train_dataset=train_dataset) # creating log history of trainer, results don't matter trainer.train() logs = trainer.state.log_history[1:][:-1] # reach given learning rate peak and end with 0 lr self.assertTrue(logs[num_warmup_steps - 2]["learning_rate"] == learning_rate) self.assertTrue(logs[-1]["learning_rate"] == 0) # increasing and decreasing pattern of lrs increasing_lrs = [ logs[i]["learning_rate"] < logs[i + 1]["learning_rate"] for i in range(len(logs)) if i < num_warmup_steps - 2 ] decreasing_lrs = [ logs[i]["learning_rate"] > logs[i + 1]["learning_rate"] for i in range(len(logs) - 1) if i >= num_warmup_steps - 2 ] self.assertTrue(all(increasing_lrs)) self.assertTrue(all(decreasing_lrs)) # warm up steps << total steps self.assertTrue(len(decreasing_lrs) > len(increasing_lrs)) @require_torch_optimi @require_torch_accelerator def test_stable_adamw(self): config = LlamaConfig(vocab_size=100, hidden_size=32, num_hidden_layers=3, num_attention_heads=4) tiny_llama = LlamaForCausalLM(config) x = torch.randint(0, 100, (128,)) train_dataset = RepeatDataset(x) # Trainer without inf/nan filter args = TrainingArguments( self.get_auto_remove_tmp_dir(), learning_rate=1e-9, logging_steps=5, optim="stable_adamw", optim_target_modules=[r".*attn.*", r".*mlp.*"], ) trainer = Trainer(tiny_llama, args, train_dataset=train_dataset) _ = trainer.train() @require_torch_optimi @require_torch_accelerator def test_stable_adamw_extra_args(self): config = LlamaConfig(vocab_size=100, hidden_size=32, num_hidden_layers=3, num_attention_heads=4) tiny_llama = LlamaForCausalLM(config) x = torch.randint(0, 100, (128,)) train_dataset = RepeatDataset(x) # Trainer without inf/nan filter args = TrainingArguments( self.get_auto_remove_tmp_dir(), learning_rate=1e-9, logging_steps=5, optim="stable_adamw", optim_args="decouple_lr=True,max_lr=1e-3,kahan_sum=True", optim_target_modules=[r".*attn.*", r".*mlp.*"], ) trainer = Trainer(tiny_llama, args, train_dataset=train_dataset) # Check this works _ = trainer.train() @require_torch_optimi @require_torch_accelerator def test_stable_adamw_lr_display_without_scheduler(self): config = LlamaConfig(vocab_size=100, hidden_size=32, num_hidden_layers=3, num_attention_heads=4) tiny_llama = LlamaForCausalLM(config) x = torch.randint(0, 100, (128,)) train_dataset = RepeatDataset(x) learning_rate = 1e-9 num_steps = 10 # Trainer without inf/nan filter args = TrainingArguments( self.get_auto_remove_tmp_dir(), learning_rate=learning_rate, logging_steps=5, optim="stable_adamw", optim_target_modules=[r".*attn.*", r".*mlp.*"], ) trainer = Trainer(tiny_llama, args, train_dataset=train_dataset) trainer.create_optimizer_and_scheduler(num_training_steps=num_steps) # reflects displayed lr in trainer self.assertEqual(trainer.get_learning_rates(), [learning_rate, learning_rate]) @require_torch_optimi @require_torch_accelerator def test_stable_adamw_trainer_adamw_args(self): config = LlamaConfig(vocab_size=100, hidden_size=32, num_hidden_layers=3, num_attention_heads=4) tiny_llama = LlamaForCausalLM(config) x = torch.randint(0, 100, (128,)) train_dataset = RepeatDataset(x) learning_rate = 1e-9 num_steps = 10 # Trainer without inf/nan filter args = TrainingArguments( self.get_auto_remove_tmp_dir(), learning_rate=learning_rate, logging_steps=5, weight_decay=0.001, adam_beta1=0.89, adam_beta2=0.98, adam_epsilon=1e-8, optim="stable_adamw", optim_target_modules=[r".*attn.*", r".*mlp.*"], ) trainer = Trainer(tiny_llama, args, train_dataset=train_dataset) trainer.create_optimizer_and_scheduler(num_training_steps=num_steps) # check StableAdamW optimizer is created with the correct parameters self.assertEqual(trainer.optimizer.defaults["beta1"], args.adam_beta1) self.assertEqual(trainer.optimizer.defaults["beta2"], args.adam_beta2) self.assertEqual(trainer.optimizer.defaults["eps"], args.adam_epsilon) self.assertEqual(trainer.optimizer.defaults["weight_decay"], args.weight_decay) @require_torch_optimi @require_torch_accelerator def test_stable_adamw_lr_display_with_scheduler(self): config = LlamaConfig(vocab_size=100, hidden_size=32, num_hidden_layers=3, num_attention_heads=4) tiny_llama = LlamaForCausalLM(config) x = torch.randint(0, 100, (128,)) train_dataset = RepeatDataset(x) learning_rate = 2e-4 num_train_epochs = 10 num_warmup_steps = 5 # Trainer without inf/nan filter args = TrainingArguments( self.get_auto_remove_tmp_dir(), num_train_epochs=num_train_epochs, learning_rate=learning_rate, warmup_steps=num_warmup_steps, lr_scheduler_type="cosine", logging_steps=1, optim="stable_adamw", optim_target_modules=[r".*attn.*", r".*mlp.*"], ) trainer = Trainer(tiny_llama, args, train_dataset=train_dataset) # creating log history of trainer, results don't matter trainer.train() logs = trainer.state.log_history[1:][:-1] # reach given learning rate peak and end with 0 lr self.assertTrue(logs[num_warmup_steps - 1]["learning_rate"] == learning_rate) self.assertTrue(np.allclose(logs[-1]["learning_rate"], 0, atol=5e-6)) # increasing and decreasing pattern of lrs increasing_lrs = [ logs[i]["learning_rate"] < logs[i + 1]["learning_rate"] for i in range(len(logs)) if i < num_warmup_steps - 1 ] decreasing_lrs = [ logs[i]["learning_rate"] > logs[i + 1]["learning_rate"] for i in range(len(logs) - 1) if i >= num_warmup_steps - 1 ] self.assertTrue(all(increasing_lrs)) self.assertTrue(all(decreasing_lrs)) # warm up steps << total steps self.assertTrue(len(decreasing_lrs) > len(increasing_lrs)) @require_torch_multi_accelerator def test_data_is_not_parallelized_when_model_is_parallel(self): model = RegressionModel() # Make the Trainer believe it's a parallelized model model.is_parallelizable = True model.model_parallel = True with tempfile.TemporaryDirectory() as tmp_dir: args = TrainingArguments( tmp_dir, per_device_train_batch_size=16, per_device_eval_batch_size=16, report_to="none" ) trainer = Trainer(model, args, train_dataset=RegressionDataset(), eval_dataset=RegressionDataset()) # Check the Trainer was fooled self.assertTrue(trainer.is_model_parallel) self.assertEqual(trainer.args.n_gpu, 1) # The batch size of the training and evaluation dataloaders should be 16, not 16 * n_gpu self.assertEqual(trainer.get_train_dataloader().total_batch_size, 16) self.assertEqual(len(trainer.get_train_dataloader()), 64 // 16) self.assertEqual(trainer.get_eval_dataloader().total_batch_size, 16) self.assertEqual(len(trainer.get_eval_dataloader()), 64 // 16) def test_evaluate(self): with tempfile.TemporaryDirectory() as tmp_dir: trainer = get_regression_trainer(a=1.5, b=2.5, compute_metrics=AlmostAccuracy(), output_dir=tmp_dir) results = trainer.evaluate() x, y = trainer.eval_dataset.x, trainer.eval_dataset.ys[0] pred = 1.5 * x + 2.5 expected_loss = ((pred - y) ** 2).mean() self.assertAlmostEqual(results["eval_loss"], expected_loss) expected_acc = AlmostAccuracy()((pred, y))["accuracy"] self.assertAlmostEqual(results["eval_accuracy"], expected_acc) # With a number of elements not a round multiple of the batch size trainer = get_regression_trainer( a=1.5, b=2.5, eval_len=66, compute_metrics=AlmostAccuracy(), output_dir=tmp_dir ) results = trainer.evaluate() x, y = trainer.eval_dataset.x, trainer.eval_dataset.ys[0] pred = 1.5 * x + 2.5 expected_loss = ((pred - y) ** 2).mean() self.assertAlmostEqual(results["eval_loss"], expected_loss) expected_acc = AlmostAccuracy()((pred, y))["accuracy"] self.assertAlmostEqual(results["eval_accuracy"], expected_acc) # With logits preprocess trainer = get_regression_trainer( a=1.5, b=2.5, compute_metrics=AlmostAccuracy(), preprocess_logits_for_metrics=lambda logits, labels: logits + 1, output_dir=tmp_dir, ) results = trainer.evaluate() x, y = trainer.eval_dataset.x, trainer.eval_dataset.ys[0] pred = 1.5 * x + 2.5 expected_loss = ((pred - y) ** 2).mean() self.assertAlmostEqual(results["eval_loss"], expected_loss) expected_acc = AlmostAccuracy()((pred + 1, y))["accuracy"] self.assertAlmostEqual(results["eval_accuracy"], expected_acc) def test_evaluate_with_batch_eval_metrics(self): with tempfile.TemporaryDirectory() as tmp_dir: trainer = get_regression_trainer( a=1.5, b=2.5, compute_metrics=AlmostAccuracyBatched(), batch_eval_metrics=True, output_dir=tmp_dir ) results = trainer.evaluate() x, y = trainer.eval_dataset.x, trainer.eval_dataset.ys[0] pred = 1.5 * x + 2.5 expected_loss = ((pred - y) ** 2).mean() self.assertAlmostEqual(results["eval_loss"], expected_loss) expected_acc = AlmostAccuracy()((pred, y))["accuracy"] self.assertAlmostEqual(results["eval_accuracy"], expected_acc) # With a number of elements not a round multiple of the batch size trainer = get_regression_trainer( a=1.5, b=2.5, eval_len=66, compute_metrics=AlmostAccuracyBatched(), batch_eval_metrics=True, output_dir=tmp_dir, ) results = trainer.evaluate() x, y = trainer.eval_dataset.x, trainer.eval_dataset.ys[0] pred = 1.5 * x + 2.5 expected_loss = ((pred - y) ** 2).mean() self.assertAlmostEqual(results["eval_loss"], expected_loss) expected_acc = AlmostAccuracy()((pred, y))["accuracy"] self.assertAlmostEqual(results["eval_accuracy"], expected_acc) # With logits preprocess trainer = get_regression_trainer( a=1.5, b=2.5, compute_metrics=AlmostAccuracyBatched(), batch_eval_metrics=True, preprocess_logits_for_metrics=lambda logits, labels: logits + 1, output_dir=tmp_dir, ) results = trainer.evaluate() x, y = trainer.eval_dataset.x, trainer.eval_dataset.ys[0] pred = 1.5 * x + 2.5 expected_loss = ((pred - y) ** 2).mean() self.assertAlmostEqual(results["eval_loss"], expected_loss) expected_acc = AlmostAccuracy()((pred + 1, y))["accuracy"] self.assertAlmostEqual(results["eval_accuracy"], expected_acc) def test_evaluate_with_jit(self): with tempfile.TemporaryDirectory() as tmp_dir: trainer = get_regression_trainer( a=1.5, b=2.5, compute_metrics=AlmostAccuracy(), jit_mode_eval=True, output_dir=tmp_dir ) results = trainer.evaluate() x, y = trainer.eval_dataset.x, trainer.eval_dataset.ys[0] pred = 1.5 * x + 2.5 expected_loss = ((pred - y) ** 2).mean() self.assertAlmostEqual(results["eval_loss"], expected_loss) expected_acc = AlmostAccuracy()((pred, y))["accuracy"] self.assertAlmostEqual(results["eval_accuracy"], expected_acc) # With a number of elements not a round multiple of the batch size trainer = get_regression_trainer( a=1.5, b=2.5, eval_len=66, compute_metrics=AlmostAccuracy(), jit_mode_eval=True, output_dir=tmp_dir ) results = trainer.evaluate() x, y = trainer.eval_dataset.x, trainer.eval_dataset.ys[0] pred = 1.5 * x + 2.5 expected_loss = ((pred - y) ** 2).mean() self.assertAlmostEqual(results["eval_loss"], expected_loss) expected_acc = AlmostAccuracy()((pred, y))["accuracy"] self.assertAlmostEqual(results["eval_accuracy"], expected_acc) # With logits preprocess trainer = get_regression_trainer( a=1.5, b=2.5, compute_metrics=AlmostAccuracy(), preprocess_logits_for_metrics=lambda logits, labels: logits + 1, jit_mode_eval=True, output_dir=tmp_dir, ) results = trainer.evaluate() x, y = trainer.eval_dataset.x, trainer.eval_dataset.ys[0] pred = 1.5 * x + 2.5 expected_loss = ((pred - y) ** 2).mean() self.assertAlmostEqual(results["eval_loss"], expected_loss) expected_acc = AlmostAccuracy()((pred + 1, y))["accuracy"] self.assertAlmostEqual(results["eval_accuracy"], expected_acc) def test_predict(self): with tempfile.TemporaryDirectory() as tmp_dir: trainer = get_regression_trainer(a=1.5, b=2.5, output_dir=tmp_dir) preds = trainer.predict(trainer.eval_dataset).predictions x = trainer.eval_dataset.x self.assertTrue(np.allclose(preds, 1.5 * x + 2.5)) # With a number of elements not a round multiple of the batch size trainer = get_regression_trainer(a=1.5, b=2.5, eval_len=66, output_dir=tmp_dir) preds = trainer.predict(trainer.eval_dataset).predictions x = trainer.eval_dataset.x self.assertTrue(np.allclose(preds, 1.5 * x + 2.5)) # With more than one output of the model trainer = get_regression_trainer(a=1.5, b=2.5, double_output=True, output_dir=tmp_dir) preds = trainer.predict(trainer.eval_dataset).predictions x = trainer.eval_dataset.x self.assertEqual(len(preds), 2) self.assertTrue(np.allclose(preds[0], 1.5 * x + 2.5)) self.assertTrue(np.allclose(preds[1], 1.5 * x + 2.5)) # With more than one output/label of the model trainer = get_regression_trainer( a=1.5, b=2.5, double_output=True, label_names=["labels", "labels_2"], output_dir=tmp_dir ) outputs = trainer.predict(trainer.eval_dataset) preds = outputs.predictions labels = outputs.label_ids x = trainer.eval_dataset.x self.assertEqual(len(preds), 2) self.assertTrue(np.allclose(preds[0], 1.5 * x + 2.5)) self.assertTrue(np.allclose(preds[1], 1.5 * x + 2.5)) self.assertTrue(np.array_equal(labels[0], trainer.eval_dataset.ys[0])) self.assertTrue(np.array_equal(labels[1], trainer.eval_dataset.ys[1])) def test_predict_with_batch_eval_metrics(self): with tempfile.TemporaryDirectory() as tmp_dir: trainer = get_regression_trainer( a=1.5, b=2.5, compute_metrics=AlmostAccuracyBatched(), batch_eval_metrics=True, output_dir=tmp_dir ) results = trainer.predict(trainer.eval_dataset) preds = results.predictions x, y = trainer.eval_dataset.x, trainer.eval_dataset.ys[0] gt = 1.5 * x + 2.5 self.assertTrue(np.allclose(preds, gt)) expected_acc = AlmostAccuracy()((preds, y))["accuracy"] self.assertAlmostEqual(results.metrics["test_accuracy"], expected_acc) # With a number of elements not a round multiple of the batch size trainer = get_regression_trainer( a=1.5, b=2.5, eval_len=66, compute_metrics=AlmostAccuracyBatched(), batch_eval_metrics=True, output_dir=tmp_dir, ) results = trainer.predict(trainer.eval_dataset) preds = results.predictions x, y = trainer.eval_dataset.x, trainer.eval_dataset.ys[0] self.assertTrue(np.allclose(preds, 1.5 * x + 2.5)) expected_acc = AlmostAccuracy()((preds, y))["accuracy"] self.assertAlmostEqual(results.metrics["test_accuracy"], expected_acc) # With more than one output of the model trainer = get_regression_trainer( a=1.5, b=2.5, double_output=True, compute_metrics=AlmostAccuracyBatched(), batch_eval_metrics=True, output_dir=tmp_dir, ) preds = trainer.predict(trainer.eval_dataset).predictions x = trainer.eval_dataset.x self.assertEqual(len(preds), 2) self.assertTrue(np.allclose(preds[0], 1.5 * x + 2.5)) self.assertTrue(np.allclose(preds[1], 1.5 * x + 2.5)) # With more than one output/label of the model trainer = get_regression_trainer( a=1.5, b=2.5, double_output=True, label_names=["labels", "labels_2"], compute_metrics=AlmostAccuracyBatched(), batch_eval_metrics=True, output_dir=tmp_dir, ) outputs = trainer.predict(trainer.eval_dataset) preds = outputs.predictions labels = outputs.label_ids x = trainer.eval_dataset.x self.assertEqual(len(preds), 2) self.assertTrue(np.allclose(preds[0], 1.5 * x + 2.5)) self.assertTrue(np.allclose(preds[1], 1.5 * x + 2.5)) self.assertTrue(np.array_equal(labels[0], trainer.eval_dataset.ys[0])) self.assertTrue(np.array_equal(labels[1], trainer.eval_dataset.ys[1])) def test_predict_with_jit(self): with tempfile.TemporaryDirectory() as tmp_dir: trainer = get_regression_trainer(a=1.5, b=2.5, jit_mode_eval=True, output_dir=tmp_dir) preds = trainer.predict(trainer.eval_dataset).predictions x = trainer.eval_dataset.x self.assertTrue(np.allclose(preds, 1.5 * x + 2.5)) # With a number of elements not a round multiple of the batch size trainer = get_regression_trainer(a=1.5, b=2.5, eval_len=66, jit_mode_eval=True, output_dir=tmp_dir) preds = trainer.predict(trainer.eval_dataset).predictions x = trainer.eval_dataset.x self.assertTrue(np.allclose(preds, 1.5 * x + 2.5)) # With more than one output of the model trainer = get_regression_trainer(a=1.5, b=2.5, double_output=True, jit_mode_eval=True, output_dir=tmp_dir) preds = trainer.predict(trainer.eval_dataset).predictions x = trainer.eval_dataset.x self.assertEqual(len(preds), 2) self.assertTrue(np.allclose(preds[0], 1.5 * x + 2.5)) self.assertTrue(np.allclose(preds[1], 1.5 * x + 2.5)) # With more than one output/label of the model trainer = get_regression_trainer( a=1.5, b=2.5, double_output=True, label_names=["labels", "labels_2"], jit_mode_eval=True, output_dir=tmp_dir, ) outputs = trainer.predict(trainer.eval_dataset) preds = outputs.predictions labels = outputs.label_ids x = trainer.eval_dataset.x self.assertEqual(len(preds), 2) self.assertTrue(np.allclose(preds[0], 1.5 * x + 2.5)) self.assertTrue(np.allclose(preds[1], 1.5 * x + 2.5)) self.assertTrue(np.array_equal(labels[0], trainer.eval_dataset.ys[0])) self.assertTrue(np.array_equal(labels[1], trainer.eval_dataset.ys[1])) def test_dynamic_shapes(self): eval_dataset = DynamicShapesDataset(batch_size=self.batch_size) model = RegressionModel(a=2, b=1) with tempfile.TemporaryDirectory() as tmp_dir: args = TrainingArguments(tmp_dir, report_to="none") trainer = Trainer(model, args, eval_dataset=eval_dataset) # Check evaluation can run to completion _ = trainer.evaluate() # Check predictions preds = trainer.predict(eval_dataset) for expected, seen in zip(eval_dataset.ys, preds.label_ids): self.assertTrue(np.array_equal(expected, seen[: expected.shape[0]])) self.assertTrue(np.all(seen[expected.shape[0] :] == -100)) for expected, seen in zip(eval_dataset.xs, preds.predictions): self.assertTrue(np.array_equal(2 * expected + 1, seen[: expected.shape[0]])) self.assertTrue(np.all(seen[expected.shape[0] :] == -100)) # Same tests with eval accumulation with tempfile.TemporaryDirectory() as tmp_dir: args = TrainingArguments(tmp_dir, eval_accumulation_steps=2, report_to="none") trainer = Trainer(model, args, eval_dataset=eval_dataset) # Check evaluation can run to completion _ = trainer.evaluate() # Check predictions preds = trainer.predict(eval_dataset) for expected, seen in zip(eval_dataset.ys, preds.label_ids): self.assertTrue(np.array_equal(expected, seen[: expected.shape[0]])) self.assertTrue(np.all(seen[expected.shape[0] :] == -100)) for expected, seen in zip(eval_dataset.xs, preds.predictions): self.assertTrue(np.array_equal(2 * expected + 1, seen[: expected.shape[0]])) self.assertTrue(np.all(seen[expected.shape[0] :] == -100)) def test_log_level(self): # testing only --log_level (--log_level_replica requires multiple gpus and DDP and is tested elsewhere) logger = logging.get_logger() log_info_string = "Running training" # test with the default log_level - should be the same as before and thus we test depending on is_info is_info = logging.get_verbosity() <= 20 with tempfile.TemporaryDirectory() as tmp_dir: with CaptureLogger(logger) as cl: trainer = get_regression_trainer(output_dir=tmp_dir) trainer.train() if is_info: self.assertIn(log_info_string, cl.out) else: self.assertNotIn(log_info_string, cl.out) with LoggingLevel(logging.INFO): # test with low log_level - lower than info with CaptureLogger(logger) as cl: trainer = get_regression_trainer(log_level="debug", output_dir=tmp_dir) trainer.train() self.assertIn(log_info_string, cl.out) with LoggingLevel(logging.INFO): # test with high log_level - should be quiet with CaptureLogger(logger) as cl: trainer = get_regression_trainer(log_level="error", output_dir=tmp_dir) trainer.train() self.assertNotIn(log_info_string, cl.out) def test_save_checkpoints(self): tmp_dir = self.get_auto_remove_tmp_dir() trainer = get_regression_trainer(output_dir=tmp_dir, save_steps=5) trainer.train() self.check_saved_checkpoints(tmp_dir, 5, int(self.n_epochs * 64 / self.batch_size)) # With a regular model that is not a PreTrainedModel tmp_dir = self.get_auto_remove_tmp_dir() trainer = get_regression_trainer(output_dir=tmp_dir, save_steps=5, pretrained=False) trainer.train() self.check_saved_checkpoints(tmp_dir, 5, int(self.n_epochs * 64 / self.batch_size), False) @require_safetensors def test_safe_checkpoints(self): for save_safetensors in [True, False]: tmp_dir = self.get_auto_remove_tmp_dir() trainer = get_regression_trainer(output_dir=tmp_dir, save_steps=5, save_safetensors=save_safetensors) trainer.train() self.check_saved_checkpoints( tmp_dir, 5, int(self.n_epochs * 64 / self.batch_size), safe_weights=save_safetensors ) # With a regular model that is not a PreTrainedModel tmp_dir = self.get_auto_remove_tmp_dir() trainer = get_regression_trainer( output_dir=tmp_dir, save_steps=5, pretrained=False, save_safetensors=save_safetensors ) trainer.train() self.check_saved_checkpoints( tmp_dir, 5, int(self.n_epochs * 64 / self.batch_size), False, safe_weights=save_safetensors ) def test_save_collator_tokenizer_by_default(self): class FakeCollator: def __init__(self): self.tokenizer = AutoTokenizer.from_pretrained("bert-base-uncased") self.tokenizer.add_tokens(["<NEW_TOKEN1>", "<NEW_TOKEN2>"]) def __call__(self, features: list[Any], return_tensors="pt") -> dict[str, Any]: return default_data_collator(features, return_tensors) data_collator = FakeCollator() tmp_dir = self.get_auto_remove_tmp_dir() trainer = get_regression_trainer( output_dir=tmp_dir, save_steps=5, save_safetensors=True, data_collator=data_collator ) trainer.train() loaded_tokenizer = AutoTokenizer.from_pretrained(os.path.join(tmp_dir, os.listdir(tmp_dir)[0])) assert len(loaded_tokenizer) == len(trainer.data_collator.tokenizer), "Failed to load updated tokenizer" def test_load_best_model_with_save(self): tmp_dir = self.get_auto_remove_tmp_dir() trainer = get_regression_trainer( output_dir=tmp_dir, save_steps=5, eval_strategy="steps", eval_steps=5, max_steps=9, ) trainer.train() # Check that we have the last known step: assert os.path.exists(os.path.join(tmp_dir, f"checkpoint-{trainer.state.max_steps}")), ( f"Could not find checkpoint-{trainer.state.max_steps}" ) # And then check the last step assert os.path.exists(os.path.join(tmp_dir, "checkpoint-9")), "Could not find checkpoint-9" # Now test that using a limit works # Should result in: # - save at step 5 (but is deleted) # - save at step 10 (loaded in at the end when `load_best_model=True`) # - save at step 11 tmp_dir = self.get_auto_remove_tmp_dir() trainer = get_regression_trainer( output_dir=tmp_dir, save_steps=5, eval_strategy="steps", eval_steps=5, load_best_model_at_end=True, save_total_limit=2, max_steps=11, ) trainer.train() # Check that we have the last known step: assert os.path.exists(os.path.join(tmp_dir, "checkpoint-11")), "Could not find checkpoint-11" # And then check the last multiple assert os.path.exists(os.path.join(tmp_dir, "checkpoint-10")), "Could not find checkpoint-10" # Finally check that we don't have an old one assert not os.path.exists(os.path.join(tmp_dir, "checkpoint-5")), "Found checkpoint-5, limit not respected" # Finally check that the right model was loaded in, checkpoint-10 # this goes by the last `eval` step check to do so, so it won't be # the last model *saved* model_state = trainer.model.state_dict() final_model_weights = safetensors.torch.load_file(os.path.join(tmp_dir, "checkpoint-10", "model.safetensors")) for k, v in model_state.items(): assert torch.allclose(v, final_model_weights[k]), f"{k} is not the same" @require_torch_multi_accelerator def test_run_seq2seq_double_train_wrap_once(self): # test that we don't wrap the model more than once # since wrapping primarily happens on multi-gpu setup we want multiple gpus to test for # example DataParallel(DataParallel(model)) trainer = get_regression_trainer(output_dir=self.get_auto_remove_tmp_dir()) trainer.train() model_wrapped_before = trainer.model_wrapped trainer.train() model_wrapped_after = trainer.model_wrapped self.assertIs(model_wrapped_before, model_wrapped_after, "should be not wrapped twice") @require_torch_up_to_2_accelerators def test_can_resume_training(self): # This test will fail for more than 2 GPUs since the batch size will get bigger and with the number of # save_steps, the checkpoint will resume training at epoch 2 or more (so the data seen by the model # won't be the same since the training dataloader is shuffled). tmp_dir = self.get_auto_remove_tmp_dir() kwargs = { "output_dir": tmp_dir, "train_len": 128, "save_steps": 5, "learning_rate": 0.1, "logging_steps": 5, } trainer = get_regression_trainer(**kwargs) trainer.train() (a, b) = trainer.model.a.item(), trainer.model.b.item() state = dataclasses.asdict(trainer.state) checkpoint = os.path.join(tmp_dir, "checkpoint-5") # Reinitialize trainer trainer = get_regression_trainer(**kwargs) trainer.train(resume_from_checkpoint=checkpoint) (a1, b1) = trainer.model.a.item(), trainer.model.b.item() state1 = dataclasses.asdict(trainer.state) self.assertEqual(a, a1) self.assertEqual(b, b1) self.check_trainer_state_are_the_same(state, state1) # Now check with a later checkpoint that it also works when we span over one epoch checkpoint = os.path.join(tmp_dir, "checkpoint-15") # Reinitialize trainer and load model trainer = get_regression_trainer(**kwargs) trainer.train(resume_from_checkpoint=checkpoint) (a1, b1) = trainer.model.a.item(), trainer.model.b.item() state1 = dataclasses.asdict(trainer.state) self.assertEqual(a, a1) self.assertEqual(b, b1) self.check_trainer_state_are_the_same(state, state1) # With a regular model that is not a PreTrainedModel tmp_dir = self.get_auto_remove_tmp_dir() kwargs = { "output_dir": tmp_dir, "train_len": 128, "save_steps": 5, "learning_rate": 0.1, "pretrained": False, } trainer = get_regression_trainer(**kwargs) trainer.train() (a, b) = trainer.model.a.item(), trainer.model.b.item() state = dataclasses.asdict(trainer.state) checkpoint = os.path.join(tmp_dir, "checkpoint-5") # Reinitialize trainer and load model trainer = get_regression_trainer(**kwargs) trainer.train(resume_from_checkpoint=checkpoint) (a1, b1) = trainer.model.a.item(), trainer.model.b.item() state1 = dataclasses.asdict(trainer.state) self.assertEqual(a, a1) self.assertEqual(b, b1) self.check_trainer_state_are_the_same(state, state1) # Now check with a later checkpoint that it also works when we span over one epoch checkpoint = os.path.join(tmp_dir, "checkpoint-15") # Reinitialize trainer and load model trainer = get_regression_trainer(**kwargs) trainer.train(resume_from_checkpoint=checkpoint) (a1, b1) = trainer.model.a.item(), trainer.model.b.item() state1 = dataclasses.asdict(trainer.state) self.assertEqual(a, a1) self.assertEqual(b, b1) self.check_trainer_state_are_the_same(state, state1) # Now check failures # 1. fail to find a bogus checkpoint tmp_dir = self.get_auto_remove_tmp_dir() trainer = get_regression_trainer(output_dir=tmp_dir) with self.assertRaises(Exception) as context: trainer.train(resume_from_checkpoint=f"{checkpoint}-bogus") self.assertTrue("Can't find a valid checkpoint at" in str(context.exception)) # 2. fail to find any checkpoint - due a fresh output_dir tmp_dir = self.get_auto_remove_tmp_dir() trainer = get_regression_trainer(output_dir=tmp_dir) with self.assertRaises(Exception) as context: trainer.train(resume_from_checkpoint=True) self.assertTrue("No valid checkpoint found in output directory" in str(context.exception)) # require_torch_non_multi_accelerator is necessary because this worker blocks runs when using multiple GPUs, making # the test slower. @require_torch_non_multi_accelerator @run_test_using_subprocess @run_first @slow def test_can_resume_training_lm(self): # Check if it works for a simple language modeling example training_steps = 10 resume_from_step = 8 with tempfile.TemporaryDirectory() as tmpdir: enable_full_determinism(0) kwargs = { "output_dir": tmpdir, "fp16": True, "max_steps": training_steps, "per_device_train_batch_size": 1, "learning_rate": 1e-5, "lr_scheduler_type": "cosine", "save_strategy": "steps", "save_steps": 1, "logging_strategy": "steps", "logging_steps": 1, "report_to": "none", } trainer = get_language_model_trainer(**kwargs) trainer.train(resume_from_checkpoint=False) # Get the parameter length of the model model_params = torch.cat([p.cpu().flatten() for p in trainer.model.parameters()]) model_param_len = len(model_params) # Sample uniform indexes and save the values of the parameters (considering an unrolled vector with # all of them) indices = torch.randint(0, model_param_len, (1000,)) # Save the values of the parameters for later comparison model_params_sample = model_params[indices].detach().clone() state1 = dataclasses.asdict(trainer.state) # Delete the reference del model_params, trainer # Checks if all checkpoints are there, +1 is necessary because range is 1-indexed self.check_saved_checkpoints( tmpdir, freq=1, total=training_steps + 1, is_pretrained=True, safe_weights=True, use_scaler=True ) # Checkpoint at intermediate step enable_full_determinism(0) checkpoint = os.path.join(tmpdir, f"checkpoint-{resume_from_step + 1}") trainer = get_language_model_trainer(**kwargs) trainer.train(resume_from_checkpoint=checkpoint) model_params = torch.cat([p.cpu().flatten() for p in trainer.model.parameters()]) # Check that the parameters are the same self.assertTrue(torch.allclose(model_params[indices], model_params_sample)) state2 = dataclasses.asdict(trainer.state) self.check_trainer_state_are_the_same(state1, state2) del model_params, trainer @unittest.skip( reason="@muellerzr: Fix once Trainer can take an accelerate configuration. Need to set `seedable_sampler=True`." ) def test_resume_training_with_randomness(self): # For more than 1 GPUs, since the randomness is introduced in the model and with DataParallel (which is used # in this test for more than 2 GPUs), the calls to the torch RNG will happen in a random order (sometimes # GPU 0 will call first and sometimes GPU 1). random_torch = not torch.cuda.is_available() or backend_device_count(torch_device) <= 1 if torch.cuda.is_available(): torch.backends.cudnn.deterministic = True train_dataset = RegressionDataset(length=128) eval_dataset = RegressionDataset() with self.subTest("Test every step"): config = RegressionModelConfig(a=0, b=2, random_torch=random_torch) model = RegressionRandomPreTrainedModel(config) tmp_dir = self.get_auto_remove_tmp_dir() args = RegressionTrainingArguments(tmp_dir, save_steps=5, learning_rate=0.1) trainer = Trainer(model, args, train_dataset=train_dataset, eval_dataset=eval_dataset) trainer.train() (a, b) = trainer.model.a.item(), trainer.model.b.item() model = RegressionRandomPreTrainedModel(config) trainer = Trainer(model, args, train_dataset=train_dataset, eval_dataset=eval_dataset) trainer.train(resume_from_checkpoint=os.path.join(tmp_dir, "checkpoint-15")) (a1, b1) = trainer.model.a.item(), trainer.model.b.item() self.assertAlmostEqual(a, a1, delta=1e-5) self.assertAlmostEqual(b, b1, delta=1e-5) with self.subTest("Test every epoch"): config = RegressionModelConfig(a=0, b=2, random_torch=random_torch) model = RegressionRandomPreTrainedModel(config) tmp_dir = self.get_auto_remove_tmp_dir() args = RegressionTrainingArguments(tmp_dir, save_strategy="epoch", learning_rate=0.1) trainer = Trainer(model, args, train_dataset=train_dataset, eval_dataset=eval_dataset) trainer.train() (a, b) = trainer.model.a.item(), trainer.model.b.item() model = RegressionRandomPreTrainedModel(config) trainer = Trainer(model, args, train_dataset=train_dataset, eval_dataset=eval_dataset) checkpoints = [d for d in os.listdir(tmp_dir) if d.startswith("checkpoint-")] # There should be one checkpoint per epoch. self.assertEqual(len(checkpoints), 3) checkpoint_dir = sorted(checkpoints, key=lambda x: int(x.replace("checkpoint-", "")))[0] trainer.train(resume_from_checkpoint=os.path.join(tmp_dir, checkpoint_dir)) (a1, b1) = trainer.model.a.item(), trainer.model.b.item() self.assertAlmostEqual(a, a1, delta=1e-5) self.assertAlmostEqual(b, b1, delta=1e-5) @slow @require_non_hpu @require_accelerate @require_torch_non_multi_accelerator def test_auto_batch_size_finder(self): if torch.cuda.is_available(): torch.backends.cudnn.deterministic = True SRC_DIR = os.path.abspath( os.path.join(os.path.dirname(__file__), "..", "..", "examples", "pytorch", "text-classification") ) sys.path.append(SRC_DIR) import run_glue with tempfile.TemporaryDirectory() as tmpdir: testargs = f""" run_glue.py --model_name_or_path distilbert/distilbert-base-uncased --task_name mrpc --do_train --do_eval --max_seq_len 128 --per_device_train_batch_size 4096 --learning_rate 2e-5 --num_train_epochs 1 --output_dir {tmpdir} --report_to none --auto_find_batch_size 0 """.split() with self.assertRaises(RuntimeError): with patch.object(sys, "argv", testargs): run_glue.main() testargs[-1] = "1" with patch.object(sys, "argv", testargs): run_glue.main() @require_deepspeed def test_auto_batch_size_with_deepspeed(self): train_dataset = RegressionDataset(length=128) config = RegressionModelConfig(a=0, b=2) model = RegressionRandomPreTrainedModel(config) tmp_dir = self.get_auto_remove_tmp_dir() for stage in [1, 2]: deepspeed = { "zero_optimization": { "stage": stage, }, "train_batch_size": "auto", "train_micro_batch_size_per_gpu": "auto", } args = RegressionTrainingArguments( tmp_dir, do_train=True, max_steps=2, save_strategy="no", per_device_train_batch_size=16, auto_find_batch_size=True, deepspeed=deepspeed, ) trainer = Trainer(model, args, train_dataset=train_dataset, callbacks=[MockCudaOOMCallback()]) trainer.train() self.assertEqual(trainer._train_batch_size, 14) def test_auto_batch_size_with_resume_from_checkpoint(self): train_dataset = RegressionDataset(length=128) config = RegressionModelConfig(a=0, b=2) model = RegressionRandomPreTrainedModel(config) tmp_dir = self.get_auto_remove_tmp_dir() args = RegressionTrainingArguments( tmp_dir, do_train=True, max_steps=2, save_steps=1, per_device_train_batch_size=16, auto_find_batch_size=True, ) trainer = Trainer(model, args, train_dataset=train_dataset, callbacks=[MockCudaOOMCallback()]) trainer.train() # After `auto_find_batch_size` is ran we should now be at 16*0.9=14 self.assertEqual(trainer._train_batch_size, 14) # We can then make a new Trainer trainer = Trainer(model, args, train_dataset=train_dataset) # Check we are at 16 to start self.assertEqual(trainer._train_batch_size, 16 * max(trainer.args.n_gpu, 1)) trainer.train(resume_from_checkpoint=True) # We should be back to 14 again, picking up based upon the last ran Trainer self.assertEqual(trainer._train_batch_size, 14) # regression for this issue: https://github.com/huggingface/transformers/issues/12970 def test_training_with_resume_from_checkpoint_false(self): train_dataset = RegressionDataset(length=128) eval_dataset = RegressionDataset() config = RegressionModelConfig(a=0, b=2) model = RegressionRandomPreTrainedModel(config) tmp_dir = self.get_auto_remove_tmp_dir() args = RegressionTrainingArguments(tmp_dir, save_steps=5, learning_rate=0.1) trainer = Trainer(model, args, train_dataset=train_dataset, eval_dataset=eval_dataset) trainer.train(resume_from_checkpoint=False) @require_torch_up_to_2_accelerators def test_resume_training_with_shard_checkpoint(self): # This test will fail for more than 2 GPUs since the batch size will get bigger and with the number of # save_steps, the checkpoint will resume training at epoch 2 or more (so the data seen by the model # won't be the same since the training dataloader is shuffled). with tempfile.TemporaryDirectory() as tmpdir: trainer = get_regression_trainer(output_dir=tmpdir, train_len=128, save_steps=5, learning_rate=0.1) trainer.train() (a, b) = trainer.model.a.item(), trainer.model.b.item() state = dataclasses.asdict(trainer.state) checkpoint = os.path.join(tmpdir, "checkpoint-5") self.convert_to_sharded_checkpoint(checkpoint) # Reinitialize trainer trainer = get_regression_trainer(output_dir=tmpdir, train_len=128, save_steps=5, learning_rate=0.1) trainer.train(resume_from_checkpoint=checkpoint) (a1, b1) = trainer.model.a.item(), trainer.model.b.item() state1 = dataclasses.asdict(trainer.state) self.assertEqual(a, a1) self.assertEqual(b, b1) self.check_trainer_state_are_the_same(state, state1) @require_safetensors @require_torch_up_to_2_accelerators def test_resume_training_with_safe_checkpoint(self): # This test will fail for more than 2 GPUs since the batch size will get bigger and with the number of # save_steps, the checkpoint will resume training at epoch 2 or more (so the data seen by the model # won't be the same since the training dataloader is shuffled). for initial_safe in [False, True]: for loaded_safe in [False, True]: with tempfile.TemporaryDirectory() as tmpdir: trainer = get_regression_trainer( output_dir=tmpdir, train_len=128, save_steps=5, learning_rate=0.1, save_safetensors=initial_safe, ) trainer.train() (a, b) = trainer.model.a.item(), trainer.model.b.item() state = dataclasses.asdict(trainer.state) checkpoint = os.path.join(tmpdir, "checkpoint-5") self.convert_to_sharded_checkpoint(checkpoint, load_safe=initial_safe, save_safe=loaded_safe) # Reinitialize trainer trainer = get_regression_trainer( output_dir=tmpdir, train_len=128, save_steps=5, learning_rate=0.1, save_safetensors=loaded_safe ) trainer.train(resume_from_checkpoint=checkpoint) (a1, b1) = trainer.model.a.item(), trainer.model.b.item() state1 = dataclasses.asdict(trainer.state) self.assertEqual(a, a1) self.assertEqual(b, b1) self.check_trainer_state_are_the_same(state, state1) @require_torch_up_to_2_accelerators def test_resume_training_with_gradient_accumulation(self): # This test will fail for more than 2 GPUs since the batch size will get bigger and with the number of # save_steps, the checkpoint will resume training at epoch 2 or more (so the data seen by the model # won't be the same since the training dataloader is shuffled). with tempfile.TemporaryDirectory() as tmpdir: trainer = get_regression_trainer( output_dir=tmpdir, train_len=128, gradient_accumulation_steps=2, per_device_train_batch_size=4, save_steps=5, learning_rate=0.1, ) trainer.train() (a, b) = trainer.model.a.item(), trainer.model.b.item() state = dataclasses.asdict(trainer.state) checkpoint = os.path.join(tmpdir, "checkpoint-5") # Reinitialize trainer trainer = get_regression_trainer( output_dir=tmpdir, train_len=128, gradient_accumulation_steps=2, per_device_train_batch_size=4, save_steps=5, learning_rate=0.1, ) trainer.train(resume_from_checkpoint=checkpoint) (a1, b1) = trainer.model.a.item(), trainer.model.b.item() state1 = dataclasses.asdict(trainer.state) self.assertEqual(a, a1) self.assertEqual(b, b1) self.check_trainer_state_are_the_same(state, state1) @require_torch_up_to_2_accelerators def test_resume_training_with_frozen_params(self): # This test will fail for more than 2 GPUs since the batch size will get bigger and with the number of # save_steps, the checkpoint will resume training at epoch 2 or more (so the data seen by the model # won't be the same since the training dataloader is shuffled). with tempfile.TemporaryDirectory() as tmpdir: trainer = get_regression_trainer( output_dir=tmpdir, train_len=128, per_device_train_batch_size=4, save_steps=5, learning_rate=0.1, ) trainer.model.a.requires_grad_(False) trainer.train() (a, b) = trainer.model.a.item(), trainer.model.b.item() state = dataclasses.asdict(trainer.state) checkpoint = os.path.join(tmpdir, "checkpoint-5") # Reinitialize trainer trainer = get_regression_trainer( output_dir=tmpdir, train_len=128, per_device_train_batch_size=4, save_steps=5, learning_rate=0.1, ) trainer.model.a.requires_grad_(False) trainer.train(resume_from_checkpoint=checkpoint) self.assertFalse(trainer.model.a.requires_grad) (a1, b1) = trainer.model.a.item(), trainer.model.b.item() state1 = dataclasses.asdict(trainer.state) self.assertEqual(a, a1) self.assertEqual(b, b1) self.check_trainer_state_are_the_same(state, state1) def test_load_best_model_at_end(self): total = int(self.n_epochs * 64 / self.batch_size) with tempfile.TemporaryDirectory() as tmpdir: trainer = get_regression_trainer( a=1.5, b=2.5, output_dir=tmpdir, learning_rate=0.1, eval_steps=5, eval_strategy="steps", save_steps=5, load_best_model_at_end=True, ) self.assertFalse(trainer.args.greater_is_better) trainer.train() self.check_saved_checkpoints(tmpdir, 5, total) self.check_best_model_has_been_loaded(tmpdir, 5, total, trainer, "eval_loss") with tempfile.TemporaryDirectory() as tmpdir: trainer = get_regression_trainer( a=1.5, b=2.5, output_dir=tmpdir, learning_rate=0.1, eval_steps=5, eval_strategy="steps", save_steps=5, load_best_model_at_end=True, metric_for_best_model="accuracy", compute_metrics=AlmostAccuracy(), ) self.assertTrue(trainer.args.greater_is_better) trainer.train() self.check_saved_checkpoints(tmpdir, 5, total) self.check_best_model_has_been_loaded(tmpdir, 5, total, trainer, "eval_accuracy", greater_is_better=True) with tempfile.TemporaryDirectory() as tmpdir: trainer = get_regression_trainer( a=1.5, b=2.5, output_dir=tmpdir, learning_rate=0.1, eval_strategy="epoch", save_strategy="epoch", load_best_model_at_end=True, metric_for_best_model="accuracy", compute_metrics=AlmostAccuracy(), ) self.assertTrue(trainer.args.greater_is_better) trainer.train() self.check_saved_checkpoints(tmpdir, 64 // self.batch_size, total) self.check_best_model_has_been_loaded( tmpdir, 64 // self.batch_size, total, trainer, "eval_accuracy", greater_is_better=True ) # Test this works with a non PreTrainedModel with tempfile.TemporaryDirectory() as tmpdir: trainer = get_regression_trainer( output_dir=tmpdir, learning_rate=0.1, eval_steps=5, eval_strategy="steps", save_steps=5, load_best_model_at_end=True, pretrained=False, ) self.assertFalse(trainer.args.greater_is_better) trainer.train() self.check_saved_checkpoints(tmpdir, 5, total, is_pretrained=False) self.check_best_model_has_been_loaded(tmpdir, 5, total, trainer, "eval_loss", is_pretrained=False) @require_safetensors def test_load_best_model_from_safetensors(self): total = int(self.n_epochs * 64 / self.batch_size) for save_safetensors, pretrained in product([False, True], [False, True]): with tempfile.TemporaryDirectory() as tmpdir: trainer = get_regression_trainer( a=1.5, b=2.5, output_dir=tmpdir, learning_rate=0.1, eval_steps=5, eval_strategy="steps", save_steps=5, load_best_model_at_end=True, save_safetensors=save_safetensors, pretrained=pretrained, ) self.assertFalse(trainer.args.greater_is_better) trainer.train() self.check_saved_checkpoints(tmpdir, 5, total, is_pretrained=pretrained, safe_weights=save_safetensors) self.check_best_model_has_been_loaded( tmpdir, 5, total, trainer, "eval_loss", is_pretrained=pretrained, safe_weights=save_safetensors ) @slow def test_trainer_eval_mrpc(self): MODEL_ID = "google-bert/bert-base-cased-finetuned-mrpc" tokenizer = AutoTokenizer.from_pretrained(MODEL_ID) model = AutoModelForSequenceClassification.from_pretrained(MODEL_ID) data_args = GlueDataTrainingArguments( task_name="mrpc", data_dir=f"{get_tests_dir()}/fixtures/tests_samples/MRPC", overwrite_cache=True ) eval_dataset = GlueDataset(data_args, tokenizer=tokenizer, mode="dev") with tempfile.TemporaryDirectory() as tmp_dir: training_args = TrainingArguments(output_dir=tmp_dir, use_cpu=True, report_to="none") trainer = Trainer(model=model, args=training_args, eval_dataset=eval_dataset) result = trainer.evaluate() self.assertLess(result["eval_loss"], 0.2) @slow def test_trainer_eval_multiple(self): MODEL_ID = "openai-community/gpt2" tokenizer = AutoTokenizer.from_pretrained(MODEL_ID) model = AutoModelForCausalLM.from_pretrained(MODEL_ID) dataset = LineByLineTextDataset( tokenizer=tokenizer, file_path=PATH_SAMPLE_TEXT, block_size=tokenizer.max_len_single_sentence, ) for example in dataset.examples: example["labels"] = example["input_ids"] with tempfile.TemporaryDirectory() as tmp_dir: training_args = TrainingArguments( output_dir=tmp_dir, use_cpu=True, per_device_eval_batch_size=1, report_to="none", ) trainer = Trainer( model=model, args=training_args, eval_dataset={ "data1": dataset, "data2": dataset, }, ) result = trainer.evaluate() self.assertIn("eval_data1_loss", result) self.assertIn("eval_data2_loss", result) @slow def test_trainer_eval_lm(self): MODEL_ID = "distilbert/distilroberta-base" tokenizer = AutoTokenizer.from_pretrained(MODEL_ID) dataset = LineByLineTextDataset( tokenizer=tokenizer, file_path=PATH_SAMPLE_TEXT, block_size=tokenizer.max_len_single_sentence, ) self.assertEqual(len(dataset), 31) def test_training_iterable_dataset(self): config = RegressionModelConfig() model = RegressionPreTrainedModel(config) # Adding one column not used by the model should have no impact train_dataset = SampleIterableDataset(label_names=["labels", "extra"]) with tempfile.TemporaryDirectory() as tmp_dir: args = RegressionTrainingArguments(output_dir=tmp_dir, max_steps=4) trainer = Trainer(model=model, args=args, train_dataset=train_dataset) trainer.train() self.assertEqual(trainer.state.global_step, 4) loader = trainer.get_train_dataloader() self.assertIsInstance(loader, torch.utils.data.DataLoader) self.assertIsInstance(loader.sampler, torch.utils.data.dataloader._InfiniteConstantSampler) def test_evaluation_iterable_dataset(self): config = RegressionModelConfig(a=1.5, b=2.5) model = RegressionPreTrainedModel(config) # Adding one column not used by the model should have no impact eval_dataset = SampleIterableDataset(label_names=["labels", "extra"]) with tempfile.TemporaryDirectory() as tmp_dir: args = RegressionTrainingArguments(output_dir=tmp_dir) trainer = Trainer(model=model, args=args, eval_dataset=eval_dataset, compute_metrics=AlmostAccuracy()) results = trainer.evaluate() x, y = trainer.eval_dataset.dataset.x, trainer.eval_dataset.dataset.ys[0] pred = 1.5 * x + 2.5 expected_loss = ((pred - y) ** 2).mean() self.assertAlmostEqual(results["eval_loss"], expected_loss) expected_acc = AlmostAccuracy()((pred, y))["accuracy"] self.assertAlmostEqual(results["eval_accuracy"], expected_acc) # With a number of elements not a round multiple of the batch size eval_dataset = SampleIterableDataset(length=66) results = trainer.evaluate(eval_dataset) x, y = eval_dataset.dataset.x, eval_dataset.dataset.ys[0] pred = 1.5 * x + 2.5 expected_loss = ((pred - y) ** 2).mean() self.assertAlmostEqual(results["eval_loss"], expected_loss) expected_acc = AlmostAccuracy()((pred, y))["accuracy"] self.assertAlmostEqual(results["eval_accuracy"], expected_acc) def test_predict_iterable_dataset(self): config = RegressionModelConfig(a=1.5, b=2.5) model = RegressionPreTrainedModel(config) eval_dataset = SampleIterableDataset() with tempfile.TemporaryDirectory() as tmp_dir: args = RegressionTrainingArguments(output_dir=tmp_dir) trainer = Trainer(model=model, args=args, eval_dataset=eval_dataset, compute_metrics=AlmostAccuracy()) preds = trainer.predict(trainer.eval_dataset).predictions x = eval_dataset.dataset.x self.assertTrue(np.allclose(preds, 1.5 * x + 2.5)) # With a number of elements not a round multiple of the batch size # Adding one column not used by the model should have no impact test_dataset = SampleIterableDataset(length=66, label_names=["labels", "extra"]) preds = trainer.predict(test_dataset).predictions x = test_dataset.dataset.x self.assertTrue(np.allclose(preds, 1.5 * x + 2.5)) def test_num_train_epochs_in_training(self): # len(train_dl) < gradient_accumulation_steps shouldn't give ``ZeroDivisionError`` when ``max_steps`` is given. # It should give 1 update step for each epoch. with tempfile.TemporaryDirectory() as tmp_dir: trainer = get_regression_trainer( max_steps=3, train_len=64, per_device_train_batch_size=16, gradient_accumulation_steps=5, output_dir=tmp_dir, ) train_output = trainer.train() self.assertEqual(train_output.global_step, 3) # Even ``max_steps`` is not specified, we still expect 1 update step for each epoch if # len(train_dl) < gradient_accumulation_steps. trainer = get_regression_trainer( train_len=64, per_device_train_batch_size=16, gradient_accumulation_steps=5, output_dir=tmp_dir ) train_output = trainer.train() self.assertEqual(train_output.global_step, int(self.n_epochs)) @require_torch_multi_accelerator def test_num_batches_in_training_with_gradient_accumulation(self): with tempfile.TemporaryDirectory() as tmp_dir: for num_train_epochs in [1, 2]: for train_len in [123, 120]: trainer = get_regression_trainer( train_len=train_len, per_device_train_batch_size=4, gradient_accumulation_steps=5, num_train_epochs=num_train_epochs, output_dir=tmp_dir, ) total_batch_samples = [] def wrap_get_batch_samples(fn): def wrapped_fn(epoch_iterator, num_batches, device): self.assertGreater(num_batches, 0) batch_samples, num_items_in_batch = fn(epoch_iterator, num_batches, device) self.assertEqual(len(batch_samples), num_batches) total_batch_samples.append(num_batches) return batch_samples, num_items_in_batch return wrapped_fn trainer.get_batch_samples = wrap_get_batch_samples(trainer.get_batch_samples) trainer.train() self.assertEqual(len(trainer.get_train_dataloader()) * num_train_epochs, sum(total_batch_samples)) def test_early_stopping_callback(self): # early stopping stops training before num_training_epochs with tempfile.TemporaryDirectory() as tmp_dir: trainer = get_regression_trainer( output_dir=tmp_dir, num_train_epochs=20, gradient_accumulation_steps=1, per_device_train_batch_size=16, load_best_model_at_end=True, eval_strategy=IntervalStrategy.EPOCH, save_strategy=IntervalStrategy.EPOCH, compute_metrics=AlmostAccuracy(), metric_for_best_model="accuracy", ) trainer.add_callback(EarlyStoppingCallback(1, 0.0001)) train_output = trainer.train() self.assertLess(train_output.global_step, 20 * 64 / 16) # Invalid inputs to trainer with early stopping callback result in assertion error with tempfile.TemporaryDirectory() as tmp_dir: trainer = get_regression_trainer( output_dir=tmp_dir, num_train_epochs=20, gradient_accumulation_steps=1, per_device_train_batch_size=16, eval_strategy=IntervalStrategy.EPOCH, compute_metrics=AlmostAccuracy(), metric_for_best_model="accuracy", ) trainer.add_callback(EarlyStoppingCallback(1)) self.assertEqual(trainer.state.global_step, 0) try: trainer.train() except AssertionError: self.assertEqual(trainer.state.global_step, 0) # even if load_best_model_at_end is False, `best_model_checkpoint` should be set with tempfile.TemporaryDirectory() as tmp_dir: trainer = get_regression_trainer( output_dir=tmp_dir, num_train_epochs=20, gradient_accumulation_steps=1, per_device_train_batch_size=16, load_best_model_at_end=False, eval_strategy=IntervalStrategy.EPOCH, save_strategy=IntervalStrategy.EPOCH, compute_metrics=AlmostAccuracy(), metric_for_best_model="accuracy", ) trainer.add_callback(EarlyStoppingCallback(1, 0.0001)) train_output = trainer.train() self.assertIsNotNone(trainer.state.best_model_checkpoint) def test_flos_extraction(self): with tempfile.TemporaryDirectory() as tmp_dir: trainer = get_regression_trainer(learning_rate=0.1, output_dir=tmp_dir) def assert_flos_extraction(trainer, wrapped_model_to_check): self.assertEqual(trainer.model, trainer.accelerator.unwrap_model(wrapped_model_to_check)) self.assertGreaterEqual( getattr(trainer.accelerator.unwrap_model(wrapped_model_to_check).config, "total_flos", 0), 0 ) # with plain model assert_flos_extraction(trainer, trainer.model) # with enforced DataParallel assert_flos_extraction(trainer, nn.DataParallel(trainer.model)) trainer.train() self.assertTrue(isinstance(trainer.state.total_flos, float)) def check_checkpoint_deletion(self, trainer, output_dir, expected): # Make fake checkpoints for n in [5, 10, 15, 20, 25]: os.makedirs(os.path.join(output_dir, f"{PREFIX_CHECKPOINT_DIR}-{n}"), exist_ok=True) trainer._rotate_checkpoints(output_dir=output_dir) glob_checkpoints = [str(x) for x in Path(output_dir).glob(f"{PREFIX_CHECKPOINT_DIR}-*")] values = [int(re.match(f".*{PREFIX_CHECKPOINT_DIR}-([0-9]+)", d).groups()[0]) for d in glob_checkpoints] self.assertSetEqual(set(values), set(expected)) def test_checkpoint_rotation(self): with tempfile.TemporaryDirectory() as tmp_dir: # Without best model at end trainer = get_regression_trainer(output_dir=tmp_dir, save_total_limit=2) self.check_checkpoint_deletion(trainer, tmp_dir, [20, 25]) # With best model at end trainer = get_regression_trainer( output_dir=tmp_dir, eval_strategy="steps", load_best_model_at_end=True, save_total_limit=2 ) trainer.state.best_model_checkpoint = os.path.join(tmp_dir, "checkpoint-5") self.check_checkpoint_deletion(trainer, tmp_dir, [5, 25]) # Edge case: we don't always honor save_total_limit=1 if load_best_model_at_end=True to be able to resume # from checkpoint trainer = get_regression_trainer( output_dir=tmp_dir, eval_strategy="steps", load_best_model_at_end=True, save_total_limit=1 ) trainer.state.best_model_checkpoint = os.path.join(tmp_dir, "checkpoint-25") self.check_checkpoint_deletion(trainer, tmp_dir, [25]) trainer.state.best_model_checkpoint = os.path.join(tmp_dir, "checkpoint-5") self.check_checkpoint_deletion(trainer, tmp_dir, [5, 25]) def test_compare_trainer_and_checkpoint_args_logging(self): logger = logging.get_logger() with tempfile.TemporaryDirectory() as tmpdir, CaptureLogger(logger) as cl: trainer = get_regression_trainer( output_dir=tmpdir, train_len=128, eval_steps=5, gradient_accumulation_steps=2, per_device_train_batch_size=4, save_steps=5, learning_rate=0.1, ) trainer.train() checkpoint = os.path.join(tmpdir, "checkpoint-5") checkpoint_trainer = get_regression_trainer( output_dir=tmpdir, train_len=256, eval_steps=10, gradient_accumulation_steps=4, per_device_train_batch_size=8, save_steps=10, learning_rate=0.1, ) checkpoint_trainer.train(resume_from_checkpoint=checkpoint) self.assertIn("save_steps: 10 (from args) != 5 (from trainer_state.json)", cl.out) self.assertIn( "per_device_train_batch_size: 8 (from args) != 4 (from trainer_state.json)", cl.out, ) self.assertIn( "eval_steps: 10 (from args) != 5 (from trainer_state.json)", cl.out, ) def check_mem_metrics(self, trainer, check_func): metrics = trainer.train().metrics check_func("init_mem_cpu_alloc_delta", metrics) check_func("train_mem_cpu_alloc_delta", metrics) if backend_device_count(torch_device) > 0: check_func("init_mem_gpu_alloc_delta", metrics) check_func("train_mem_gpu_alloc_delta", metrics) metrics = trainer.evaluate() check_func("eval_mem_cpu_alloc_delta", metrics) if backend_device_count(torch_device) > 0: check_func("eval_mem_gpu_alloc_delta", metrics) metrics = trainer.predict(RegressionDataset()).metrics check_func("test_mem_cpu_alloc_delta", metrics) if backend_device_count(torch_device) > 0: check_func("test_mem_gpu_alloc_delta", metrics) def test_mem_metrics(self): with tempfile.TemporaryDirectory() as tmp_dir: # with mem metrics enabled trainer = get_regression_trainer(skip_memory_metrics=False, output_dir=tmp_dir) self.check_mem_metrics(trainer, self.assertIn) # with mem metrics disabled trainer = get_regression_trainer(skip_memory_metrics=True, output_dir=tmp_dir) self.check_mem_metrics(trainer, self.assertNotIn) @require_torch_fp16 @require_torch_accelerator def test_fp16_full_eval(self): # this is a sensitive test so let's keep debugging printouts in place for quick diagnosis. # it's using pretty large safety margins, but small enough to detect broken functionality. debug = 0 n_gpus = backend_device_count(torch_device) with tempfile.TemporaryDirectory() as tmp_dir: bs = 8 eval_len = 16 * n_gpus # make the params somewhat big so that there will be enough RAM consumed to be able to # measure things. We should get about 64KB for a+b in fp32 a = torch.ones(1000, bs) + 0.001 b = torch.ones(1000, bs) - 0.001 # 1. with fp16_full_eval disabled trainer = get_regression_trainer( a=a, b=b, eval_len=eval_len, skip_memory_metrics=False, output_dir=tmp_dir ) metrics = trainer.evaluate() del trainer gc.collect() fp32_init = metrics["init_mem_gpu_alloc_delta"] fp32_eval = metrics["eval_mem_gpu_alloc_delta"] if debug: print(f"fp32_init {fp32_init}") print(f"fp32_eval {fp32_eval}") # here we expect the model to be preloaded in trainer.__init__ and consume around 64K gpu ram. # perfect world: fp32_init == 64<<10 self.assertGreater(fp32_init, 59_000) # after eval should be no extra memory allocated - with a small margin (other than the peak # memory consumption for the forward calculation that gets recovered) # perfect world: fp32_eval == close to zero self.assertLess(fp32_eval, 5_000) # 2. with fp16_full_eval enabled trainer = get_regression_trainer( a=a, b=b, eval_len=eval_len, fp16_full_eval=True, skip_memory_metrics=False, output_dir=tmp_dir ) metrics = trainer.evaluate() fp16_init = metrics["init_mem_gpu_alloc_delta"] fp16_eval = metrics["eval_mem_gpu_alloc_delta"] if debug: print(f"fp16_init {fp16_init}") print(f"fp16_eval {fp16_eval}") # here we expect the model to not be preloaded in trainer.__init__, so with a small margin it should be close to 0 # perfect world: fp16_init == close to zero self.assertLess(fp16_init, 5_000) # here we put the model on device in eval and only `half()` of it, i.e. about 32K,(again we ignore the peak margin which gets returned back) # perfect world: fp32_init == 32<<10 self.assertGreater(fp16_eval, 27_000) # 3. relative comparison fp32 vs full fp16 # should be about half of fp16_init # perfect world: fp32_init/2 == fp16_eval self.assertAlmostEqual(fp16_eval, fp32_init / 2, delta=5_000) @require_torch_gpu @require_torch_non_multi_gpu @require_torch_tensorrt_fx def test_torchdynamo_full_eval(self): from torch import _dynamo as torchdynamo # torchdynamo at the moment doesn't support DP/DDP, therefore require a single gpu n_gpus = get_gpu_count() bs = 8 eval_len = 16 * n_gpus # make the params are somewhat big so that there will be enough RAM consumed to be able to # measure things. We should get about 64KB for a+b in fp32 a = torch.ones(1000, bs) + 0.001 b = torch.ones(1000, bs) - 0.001 with tempfile.TemporaryDirectory() as tmp_dir: # 1. Default - without TorchDynamo trainer = get_regression_trainer(a=a, b=b, eval_len=eval_len, output_dir=tmp_dir) metrics = trainer.evaluate() original_eval_loss = metrics["eval_loss"] del trainer # 2. TorchDynamo eager trainer = get_regression_trainer( a=a, b=b, eval_len=eval_len, torch_compile_backend="eager", output_dir=tmp_dir ) metrics = trainer.evaluate() self.assertAlmostEqual(metrics["eval_loss"], original_eval_loss) del trainer torchdynamo.reset() # 3. TorchDynamo nvfuser trainer = get_regression_trainer( a=a, b=b, eval_len=eval_len, torch_compile_backend="nvfuser", output_dir=tmp_dir ) metrics = trainer.evaluate() self.assertAlmostEqual(metrics["eval_loss"], original_eval_loss) torchdynamo.reset() # 4. TorchDynamo fx2trt trainer = get_regression_trainer( a=a, b=b, eval_len=eval_len, torch_compile_backend="fx2trt", output_dir=tmp_dir ) metrics = trainer.evaluate() self.assertAlmostEqual(metrics["eval_loss"], original_eval_loss) torchdynamo.reset() @require_torch_non_multi_gpu @require_torch_gpu def test_torchdynamo_memory(self): # torchdynamo at the moment doesn't support DP/DDP, therefore require a single gpu from torch import _dynamo as torchdynamo class CustomTrainer(Trainer): def compute_loss(self, model, inputs, num_items_in_batch=None, return_outputs=False): x = inputs["x"] output = model(x) if self.args.n_gpu == 1: return output.mean() return output class MyModule(torch.nn.Module): """Simple module that does aggressive fusion""" def __init__(self): super().__init__() def forward(self, x): for _ in range(20): x = torch.cos(x) return x mod = MyModule() # 1. without TorchDynamo (eager baseline) a = torch.ones(1024, 1024, device=torch_device, requires_grad=True) a.grad = None trainer = CustomTrainer(model=mod) # warmup for _ in range(10): orig_loss = trainer.training_step(mod, {"x": a}) # resets gc.collect() backend_empty_cache(torch_device) backend_reset_peak_memory_stats(torch_device) orig_loss = trainer.training_step(mod, {"x": a}) orig_peak_mem = backend_max_memory_allocated(torch_device) torchdynamo.reset() del trainer # 2. TorchDynamo nvfuser with tempfile.TemporaryDirectory() as tmp_dir: a = torch.ones(1024, 1024, device=torch_device, requires_grad=True) a.grad = None args = TrainingArguments(output_dir=tmp_dir, torch_compile_backend="nvfuser") trainer = CustomTrainer(model=mod, args=args) # warmup for _ in range(10): loss = trainer.training_step(mod, {"x": a}) # resets gc.collect() backend_empty_cache(torch_device) backend_reset_peak_memory_stats(torch_device) loss = trainer.training_step(mod, {"x": a}) peak_mem = backend_max_memory_allocated(torch_device) torchdynamo.reset() del trainer # Functional check self.assertAlmostEqual(loss, orig_loss) # AOT Autograd recomputation and nvfuser recomputation optimization # aggressively fuses the operations and reduce the memory footprint. self.assertGreater(orig_peak_mem, peak_mem * 2) @require_torch_accelerator @require_torch_bf16 def test_bf16_full_eval(self): # note: most of the logic is the same as test_fp16_full_eval # this is a sensitive test so let's keep debugging printouts in place for quick diagnosis. # it's using pretty large safety margins, but small enough to detect broken functionality. debug = 0 n_gpus = backend_device_count(torch_device) bs = 8 eval_len = 16 * n_gpus # make the params somewhat big so that there will be enough RAM consumed to be able to # measure things. We should get about 64KB for a+b in fp32 a = torch.ones(1000, bs) + 0.001 b = torch.ones(1000, bs) - 0.001 with tempfile.TemporaryDirectory() as tmp_dir: # 1. with bf16_full_eval disabled trainer = get_regression_trainer( a=a, b=b, eval_len=eval_len, skip_memory_metrics=False, output_dir=tmp_dir ) metrics = trainer.evaluate() del trainer gc.collect() fp32_init = metrics["init_mem_gpu_alloc_delta"] fp32_eval = metrics["eval_mem_gpu_alloc_delta"] if debug: print(f"fp32_init {fp32_init}") print(f"fp32_eval {fp32_eval}") # here we expect the model to be preloaded in trainer.__init__ and consume around 64K gpu ram. # perfect world: fp32_init == 64<<10 self.assertGreater(fp32_init, 59_000) # after eval should be no extra memory allocated - with a small margin (other than the peak # memory consumption for the forward calculation that gets recovered) # perfect world: fp32_eval == close to zero self.assertLess(fp32_eval, 5_000) # 2. with bf16_full_eval enabled trainer = get_regression_trainer( a=a, b=b, eval_len=eval_len, bf16_full_eval=True, skip_memory_metrics=False, output_dir=tmp_dir ) metrics = trainer.evaluate() bf16_init = metrics["init_mem_gpu_alloc_delta"] bf16_eval = metrics["eval_mem_gpu_alloc_delta"] if debug: print(f"bf16_init {bf16_init}") print(f"bf16_eval {bf16_eval}") # here we expect the model to not be preloaded in trainer.__init__, so with a small margin it should be close to 0 # perfect world: bf16_init == close to zero self.assertLess(bf16_init, 5_000) # here we put the model on device in eval and only `half()` of it, i.e. about 32K,(again we ignore the peak margin which gets returned back) # perfect world: fp32_init == 32<<10 self.assertGreater(bf16_eval, 27_000) # 3. relative comparison fp32 vs full bf16 # should be about half of bf16_init # perfect world: fp32_init/2 == bf16_eval self.assertAlmostEqual(bf16_eval, fp32_init / 2, delta=5_000) def test_no_wd_param_group(self): model = nn.Sequential(TstLayer(128), nn.ModuleList([TstLayer(128), TstLayer(128)])) with tempfile.TemporaryDirectory() as tmp_dir: trainer = Trainer(model=model, args=TrainingArguments(output_dir=tmp_dir, report_to="none")) trainer.create_optimizer_and_scheduler(10) wd_names = ['0.linear1.weight', '0.linear2.weight', '1.0.linear1.weight', '1.0.linear2.weight', '1.1.linear1.weight', '1.1.linear2.weight'] # fmt: skip wd_params = [p for n, p in model.named_parameters() if n in wd_names] no_wd_params = [p for n, p in model.named_parameters() if n not in wd_names] self.assertListEqual(trainer.optimizer.param_groups[0]["params"], wd_params) self.assertListEqual(trainer.optimizer.param_groups[1]["params"], no_wd_params) @slow @run_first @require_non_hpu @require_torch_multi_accelerator def test_end_to_end_example(self): # Tests that `translation.py` will run without issues script_path = os.path.abspath( os.path.join( os.path.dirname(__file__), "..", "..", "examples", "pytorch", "translation", "run_translation.py" ) ) with tempfile.TemporaryDirectory() as tmpdir: command = [ "accelerate", "launch", script_path, "--model_name_or_path", "google-t5/t5-small", "--per_device_train_batch_size", "1", "--output_dir", tmpdir, "--overwrite_output_dir", "--do_train", "--max_train_samples", "64", "--num_train_epochs", "1", "--dataset_name", "wmt16", "--dataset_config", "ro-en", "--source_lang", "en", "--target_lang", "ro", "--do_predict", "--max_predict_samples", "64", "--predict_with_generate", "--ddp_timeout", "60", "--report_to", "none", ] execute_subprocess_async(command) # successful return here == success - any errors would have caused an error or a timeout in the sub-call def test_accelerator_config_empty(self): # Checks that a config can be made with the defaults if not passed with tempfile.TemporaryDirectory() as tmp_dir: config = RegressionModelConfig(a=1.5, b=2.5) model = RegressionPreTrainedModel(config) eval_dataset = SampleIterableDataset() # Leaves one option as something *not* basic args = RegressionTrainingArguments(output_dir=tmp_dir) trainer = Trainer(model=model, args=args, eval_dataset=eval_dataset) self.assertEqual(trainer.accelerator.split_batches, False) self.assertEqual(trainer.accelerator.dispatch_batches, None) self.assertEqual(trainer.accelerator.even_batches, True) self.assertEqual(trainer.accelerator.use_seedable_sampler, True) if GRAD_ACCUM_KWARGS_VERSION_AVAILABLE: # gradient accumulation kwargs configures gradient_state self.assertNotIn("sync_each_batch", trainer.accelerator.gradient_state.plugin_kwargs) def test_accelerator_config_from_dict(self): # Checks that accelerator kwargs can be passed through # and the accelerator is initialized respectively with tempfile.TemporaryDirectory() as tmp_dir: config = RegressionModelConfig(a=1.5, b=2.5) model = RegressionPreTrainedModel(config) eval_dataset = SampleIterableDataset() accelerator_config: dict[str, Any] = { "split_batches": True, "dispatch_batches": True, "even_batches": False, "use_seedable_sampler": True, } if GRAD_ACCUM_KWARGS_VERSION_AVAILABLE: accelerator_config["gradient_accumulation_kwargs"] = {"sync_each_batch": True} # Leaves all options as something *not* basic args = RegressionTrainingArguments(output_dir=tmp_dir, accelerator_config=accelerator_config) trainer = Trainer(model=model, args=args, eval_dataset=eval_dataset) self.assertEqual(trainer.accelerator.split_batches, True) self.assertEqual(trainer.accelerator.dispatch_batches, True) self.assertEqual(trainer.accelerator.even_batches, False) self.assertEqual(trainer.accelerator.use_seedable_sampler, True) def test_accelerator_config_from_yaml(self): # Checks that accelerator kwargs can be passed through # and the accelerator is initialized respectively with tempfile.TemporaryDirectory() as tmp_dir: path_file = Path(tmp_dir) / "accelerator_config.json" with open(path_file, "w") as f: accelerator_config = { "split_batches": True, "dispatch_batches": True, "even_batches": False, "use_seedable_sampler": False, } json.dump(accelerator_config, f) config = RegressionModelConfig(a=1.5, b=2.5) model = RegressionPreTrainedModel(config) eval_dataset = SampleIterableDataset() # Leaves all options as something *not* basic args = RegressionTrainingArguments(output_dir=tmp_dir, accelerator_config=path_file) trainer = Trainer(model=model, args=args, eval_dataset=eval_dataset) self.assertEqual(trainer.accelerator.split_batches, True) self.assertEqual(trainer.accelerator.dispatch_batches, True) self.assertEqual(trainer.accelerator.even_batches, False) self.assertEqual(trainer.accelerator.use_seedable_sampler, False) def test_accelerator_config_from_dataclass(self): # Checks that accelerator kwargs can be passed through # and the accelerator is initialized respectively accelerator_config = AcceleratorConfig( split_batches=True, dispatch_batches=True, even_batches=False, use_seedable_sampler=False, ) config = RegressionModelConfig(a=1.5, b=2.5) model = RegressionPreTrainedModel(config) eval_dataset = SampleIterableDataset() with tempfile.TemporaryDirectory() as tmp_dir: args = RegressionTrainingArguments(output_dir=tmp_dir, accelerator_config=accelerator_config) trainer = Trainer(model=model, args=args, eval_dataset=eval_dataset) self.assertEqual(trainer.accelerator.split_batches, True) self.assertEqual(trainer.accelerator.dispatch_batches, True) self.assertEqual(trainer.accelerator.even_batches, False) self.assertEqual(trainer.accelerator.use_seedable_sampler, False) @require_accelerate_version_min_0_28 def test_accelerate_config_from_dataclass_grad_accum(self): # Checks that accelerator kwargs can be passed through # and the accelerator is initialized respectively grad_acc_kwargs = { "num_steps": 10, "adjust_scheduler": False, "sync_with_dataloader": False, "sync_each_batch": True, } accelerator_config = AcceleratorConfig( split_batches=True, dispatch_batches=True, even_batches=False, use_seedable_sampler=False, gradient_accumulation_kwargs=grad_acc_kwargs, ) config = RegressionModelConfig(a=1.5, b=2.5) model = RegressionPreTrainedModel(config) eval_dataset = SampleIterableDataset() with tempfile.TemporaryDirectory() as tmp_dir: args = RegressionTrainingArguments(output_dir=tmp_dir, accelerator_config=accelerator_config) trainer = Trainer(model=model, args=args, eval_dataset=eval_dataset) self.assertEqual(trainer.args.gradient_accumulation_steps, 10) def test_accelerator_config_from_partial(self): # Checks that accelerator kwargs can be passed through # and the accelerator is initialized respectively with tempfile.TemporaryDirectory() as tmp_dir: config = RegressionModelConfig(a=1.5, b=2.5) model = RegressionPreTrainedModel(config) eval_dataset = SampleIterableDataset() # Leaves one option as something *not* basic args = RegressionTrainingArguments( output_dir=tmp_dir, accelerator_config={ "split_batches": True, }, ) trainer = Trainer(model=model, args=args, eval_dataset=eval_dataset) self.assertEqual(trainer.accelerator.split_batches, True) self.assertEqual(trainer.accelerator.dispatch_batches, None) self.assertEqual(trainer.accelerator.even_batches, True) self.assertEqual(trainer.accelerator.use_seedable_sampler, True) def test_accelerator_custom_state(self): AcceleratorState._reset_state(reset_partial_state=True) with tempfile.TemporaryDirectory() as tmp_dir: with self.assertRaises(ValueError) as cm: _ = RegressionTrainingArguments(output_dir=tmp_dir, accelerator_config={"use_configured_state": True}) self.assertIn("Please define this beforehand", str(cm.warnings[0].message)) _ = Accelerator() _ = RegressionTrainingArguments(output_dir=tmp_dir, accelerator_config={"use_configured_state": True}) AcceleratorState._reset_state(reset_partial_state=True) @require_accelerate_version_min_0_28 def test_accelerator_config_from_dict_grad_accum_num_steps(self): with tempfile.TemporaryDirectory() as tmp_dir: config = RegressionModelConfig(a=1.5, b=2.5) model = RegressionPreTrainedModel(config) eval_dataset = SampleIterableDataset() # case - TrainingArguments.gradient_accumulation_steps == 1 # - gradient_accumulation_kwargs['num_steps] == 1 # results in grad accum set to 1 args = RegressionTrainingArguments( output_dir=tmp_dir, gradient_accumulation_steps=1, accelerator_config={ "gradient_accumulation_kwargs": { "num_steps": 1, } }, ) trainer = Trainer(model=model, args=args, eval_dataset=eval_dataset) self.assertEqual(trainer.accelerator.gradient_state.plugin_kwargs["num_steps"], 1) # case - TrainingArguments.gradient_accumulation_steps > 1 # - gradient_accumulation_kwargs['num_steps] specified # results in exception raised args = RegressionTrainingArguments( output_dir=tmp_dir, gradient_accumulation_steps=2, accelerator_config={ "gradient_accumulation_kwargs": { "num_steps": 10, } }, ) with self.assertRaises(Exception) as context: trainer = Trainer(model=model, args=args, eval_dataset=eval_dataset) self.assertTrue("The `AcceleratorConfig`'s `num_steps` is set but" in str(context.exception)) def test_accelerator_config_not_instantiated(self): # Checks that accelerator kwargs can be passed through # and the accelerator is initialized respectively with tempfile.TemporaryDirectory() as tmp_dir: with self.assertRaises(NotImplementedError) as context: _ = RegressionTrainingArguments( output_dir=tmp_dir, accelerator_config=AcceleratorConfig, ) self.assertTrue("Tried passing in a callable to `accelerator_config`" in str(context.exception)) # Now test with a custom subclass @dataclasses.dataclass class CustomAcceleratorConfig(AcceleratorConfig): pass @dataclasses.dataclass class CustomTrainingArguments(TrainingArguments): accelerator_config: dict = dataclasses.field( default=CustomAcceleratorConfig, ) with tempfile.TemporaryDirectory() as tmp_dir: with self.assertRaises(NotImplementedError) as context: _ = CustomTrainingArguments( output_dir=tmp_dir, ) self.assertTrue("Tried passing in a callable to `accelerator_config`" in str(context.exception)) def test_dtype_to_json(self): @dataclasses.dataclass class TorchDtypeTrainingArguments(TrainingArguments): dtype: torch.dtype = dataclasses.field( default=torch.float32, ) for dtype in [ "float32", "float64", "complex64", "complex128", "float16", "bfloat16", "uint8", "int8", "int16", "int32", "int64", "bool", ]: torch_dtype = getattr(torch, dtype) with tempfile.TemporaryDirectory() as tmp_dir: args = TorchDtypeTrainingArguments(output_dir=tmp_dir, dtype=torch_dtype) args_dict = args.to_dict() self.assertIn("dtype", args_dict) self.assertEqual(args_dict["dtype"], dtype) @require_accelerate_version_min_0_30 def test_eval_use_gather_object(self): train_dataset = RegressionDataset() eval_dataset = RegressionDataset() model = RegressionDictModel() with tempfile.TemporaryDirectory() as tmp_dir: args = TrainingArguments(tmp_dir, report_to="none", eval_use_gather_object=True) trainer = Trainer(model, args, train_dataset=train_dataset, eval_dataset=eval_dataset) trainer.train() _ = trainer.evaluate() _ = trainer.predict(eval_dataset) def test_trainer_saves_tokenizer(self): MODEL_ID = "google-bert/bert-base-uncased" tokenizer = AutoTokenizer.from_pretrained(MODEL_ID, use_fast=False) with tempfile.TemporaryDirectory() as tmp_dir: config = RegressionModelConfig(a=1.5, b=2.5) trainer = Trainer( model=RegressionPreTrainedModel(config), args=TrainingArguments(output_dir=tmp_dir), processing_class=tokenizer, ) trainer.save_model() reloaded_tokenizer = AutoTokenizer.from_pretrained(tmp_dir) # For tokenizers, there isn't a direct to_dict method and the properties stored in the configs e.g. # saved tokens change overtime, so we check that two tokenizers are equal by comparing their encoded outputs test_sentence = "This is a test sentence" self.assertListEqual( tokenizer(test_sentence, padding="max_length").input_ids, reloaded_tokenizer(test_sentence, padding="max_length").input_ids, ) @require_vision def test_trainer_saves_image_processor(self): MODEL_ID = "openai/clip-vit-base-patch32" image_processor = AutoImageProcessor.from_pretrained(MODEL_ID) with tempfile.TemporaryDirectory() as tmp_dir: config = RegressionModelConfig(a=1.5, b=2.5) trainer = Trainer( model=RegressionPreTrainedModel(config), args=TrainingArguments(output_dir=tmp_dir, report_to="none"), processing_class=image_processor, ) trainer.save_model() reloaded_image_processor = AutoImageProcessor.from_pretrained(tmp_dir) self.assertDictEqual(image_processor.to_dict(), reloaded_image_processor.to_dict()) def test_trainer_saves_feature_extractor(self): MODEL_ID = "facebook/wav2vec2-base-960h" feature_extractor = AutoFeatureExtractor.from_pretrained(MODEL_ID) with tempfile.TemporaryDirectory() as tmp_dir: config = RegressionModelConfig(a=1.5, b=2.5) trainer = Trainer( model=RegressionPreTrainedModel(config), args=TrainingArguments(output_dir=tmp_dir, report_to="none"), processing_class=feature_extractor, ) trainer.save_model() reloaded_feature_extractor = AutoFeatureExtractor.from_pretrained(tmp_dir) self.assertDictEqual(feature_extractor.to_dict(), reloaded_feature_extractor.to_dict()) @require_vision def test_trainer_saves_processor(self): MODEL_ID = "openai/clip-vit-base-patch32" image_processor = AutoImageProcessor.from_pretrained(MODEL_ID) tokenizer = AutoTokenizer.from_pretrained(MODEL_ID, use_fast=False) processor = AutoProcessor.from_pretrained(MODEL_ID) with tempfile.TemporaryDirectory() as tmp_dir: config = RegressionModelConfig(a=1.5, b=2.5) trainer = Trainer( model=RegressionPreTrainedModel(config), args=TrainingArguments(output_dir=tmp_dir, report_to="none"), processing_class=processor, ) trainer.save_model() reloaded_processor = AutoProcessor.from_pretrained(tmp_dir) reloaded_image_processor = AutoImageProcessor.from_pretrained(tmp_dir) reloaded_tokenizer = AutoTokenizer.from_pretrained(tmp_dir) self.assertDictEqual(reloaded_processor.to_dict(), processor.to_dict()) image_processor_dict = image_processor.to_dict() reloaded_image_processor_dict = reloaded_image_processor.to_dict() # When the processor is saved in the trainer, the _processor_class gets set in the reload_image_processor dict image_processor_dict.pop("_processor_class") reloaded_image_processor_dict.pop("_processor_class") self.assertDictEqual(image_processor_dict, reloaded_image_processor_dict) # For tokenizers, there isn't a direct to_dict method and the properties stored in the configs e.g. # saved tokens change overtime, so we check that two tokenizers are equal by comparing their encoded outputs test_sentence = "This is a test sentence" self.assertListEqual( tokenizer(test_sentence, padding="max_length").input_ids, reloaded_tokenizer(test_sentence, padding="max_length").input_ids, ) def test_save_best_checkpoint(self): freq = int(64 / self.batch_size) total = int(self.n_epochs * 64 / self.batch_size) # Case 1: args.metric_for_best_model == "accuracy". with tempfile.TemporaryDirectory() as tmpdir: trainer = get_regression_trainer( a=1.5, b=2.5, output_dir=tmpdir, learning_rate=0.1, eval_strategy="epoch", save_strategy="best", metric_for_best_model="accuracy", compute_metrics=AlmostAccuracy(), ) self.assertTrue(trainer.args.metric_for_best_model == "accuracy") with patch.object( trainer, "_evaluate", side_effect=[ {"eval_loss": 0.03, "eval_accuracy": 0.60, "epoch": 1.0}, {"eval_loss": 0.02, "eval_accuracy": 0.65, "epoch": 2.0}, {"eval_loss": 0.01, "eval_accuracy": 0.64, "epoch": 3.0}, ], ): trainer.train() self.assertEqual(len(os.listdir(tmpdir)), 2) self.check_saved_checkpoints( output_dir=tmpdir, freq=freq, total=total, ) # Case 2: args.metric_for_best_model == "loss". with tempfile.TemporaryDirectory() as tmpdir: trainer = get_regression_trainer( a=1.5, b=2.5, output_dir=tmpdir, learning_rate=0.1, eval_strategy="epoch", save_strategy="best", metric_for_best_model="loss", compute_metrics=AlmostAccuracy(), ) self.assertTrue(trainer.args.metric_for_best_model == "loss") with patch.object( trainer, "_evaluate", side_effect=[ {"eval_loss": 0.03, "eval_accuracy": 0.60, "epoch": 1.0}, {"eval_loss": 0.02, "eval_accuracy": 0.65, "epoch": 2.0}, {"eval_loss": 0.03, "eval_accuracy": 0.66, "epoch": 3.0}, ], ): trainer.train() self.assertEqual(len(os.listdir(tmpdir)), 2) self.check_saved_checkpoints( output_dir=tmpdir, freq=freq, total=total, ) def test_metric_for_best_model_behavior(self): # Case 1: Metric name not provided when `save_strategy == "best"`. # Should raise ValueError. with tempfile.TemporaryDirectory() as tmpdir: with self.assertRaises(ValueError) as context: trainer = get_regression_trainer( a=1.5, b=2.5, output_dir=tmpdir, learning_rate=0.1, eval_strategy="epoch", save_strategy="best", compute_metrics=AlmostAccuracy(), ) self.assertIn("`args.metric_for_best_model` must be provided", str(context.exception)) # Case 2: Metric name not provided when `load_best_model_at_end == True`. # `metric_for_best_model` should be set to `"loss"` by default. with tempfile.TemporaryDirectory() as tmpdir: trainer = get_regression_trainer( a=1.5, b=2.5, output_dir=tmpdir, learning_rate=0.1, eval_strategy="steps", save_strategy="steps", load_best_model_at_end=True, ) self.assertTrue(trainer.args.metric_for_best_model == "loss") def test_best_model_checkpoint_behavior(self): # Case 1. Never evaluated, save_total_limit > 1 and save_steps == 1. # Both best_metric and best_model_checkpoint should be None. with tempfile.TemporaryDirectory() as tmpdir: trainer = get_regression_trainer( output_dir=tmpdir, eval_strategy="steps", save_strategy="steps", save_steps=1, metric_for_best_model="accuracy", greater_is_better=True, ) trainer.train() assert trainer.state.best_metric is None assert trainer.state.best_model_checkpoint is None assert len(os.listdir(tmpdir)) == trainer.state.global_step # Case 2. Never evaluated and save_total_limit == 1. # Both best_metric and best_model_checkpoint should be None. # Only the last checkpoint should remain. with tempfile.TemporaryDirectory() as tmpdir: trainer = get_regression_trainer( output_dir=tmpdir, eval_strategy="steps", save_strategy="steps", save_steps=1, metric_for_best_model="accuracy", greater_is_better=True, save_total_limit=1, ) trainer.train() num_steps = trainer.state.global_step assert trainer.state.best_metric is None assert trainer.state.best_model_checkpoint is None assert len(os.listdir(tmpdir)) == 1 ckpt = os.path.join(tmpdir, f"{PREFIX_CHECKPOINT_DIR}-{num_steps}") assert os.path.isdir(ckpt) assert os.listdir(tmpdir)[0] == f"{PREFIX_CHECKPOINT_DIR}-{num_steps}" # Case 3. eval_strategy == save_strategy. # best_model_checkpoint should be at epoch 1. with tempfile.TemporaryDirectory() as tmpdir: trainer = get_regression_trainer( output_dir=tmpdir, eval_strategy="epoch", save_strategy="epoch", metric_for_best_model="accuracy", compute_metrics=AlmostAccuracy(), greater_is_better=True, load_best_model_at_end=False, ) with patch.object( trainer, "_evaluate", side_effect=evaluate_side_effect_factory( [ {"eval_accuracy": 0.59}, {"eval_accuracy": 0.57}, {"eval_accuracy": 0.55}, ] ), ): trainer.train() steps_per_epoch = get_steps_per_epoch(trainer) assert trainer.state.best_metric == 0.59 assert trainer.state.best_global_step == steps_per_epoch best_ckpt = os.path.join(tmpdir, f"{PREFIX_CHECKPOINT_DIR}-{trainer.state.best_global_step}") assert trainer.state.best_model_checkpoint == best_ckpt assert len(os.listdir(tmpdir)) == trainer.state.num_train_epochs # Case 4. eval_strategy != save_strategy. with tempfile.TemporaryDirectory() as tmpdir: trainer = get_regression_trainer( output_dir=tmpdir, eval_strategy="epoch", save_strategy="steps", save_steps=1, metric_for_best_model="accuracy", compute_metrics=AlmostAccuracy(), greater_is_better=True, load_best_model_at_end=False, ) with patch.object( trainer, "_evaluate", side_effect=evaluate_side_effect_factory( [ {"eval_accuracy": 0.59}, {"eval_accuracy": 0.57}, {"eval_accuracy": 0.55}, ] ), ): trainer.train() steps_per_epoch = get_steps_per_epoch(trainer) assert trainer.state.best_metric == 0.59 assert trainer.state.best_global_step == steps_per_epoch best_ckpt = os.path.join(tmpdir, f"{PREFIX_CHECKPOINT_DIR}-{trainer.state.best_global_step}") assert trainer.state.best_model_checkpoint == best_ckpt assert len(os.listdir(tmpdir)) == trainer.state.global_step # Case 5. Multiple checkpoints, save_total_limit == 1. # Best metric is found at step 1 and that checkpoint should be saved. with tempfile.TemporaryDirectory() as tmpdir: trainer = get_regression_trainer( output_dir=tmpdir, eval_strategy="steps", eval_steps=1, save_strategy="steps", save_steps=1, metric_for_best_model="accuracy", compute_metrics=AlmostAccuracy(), greater_is_better=True, save_total_limit=1, ) with patch.object( trainer, "_evaluate", side_effect=evaluate_side_effect_factory( [ {"eval_accuracy": 0.90}, {"eval_accuracy": 0.80}, {"eval_accuracy": 0.70}, ] ), ): trainer.train() assert trainer.state.best_metric == 0.90 assert trainer.state.best_global_step == 1 best_ckpt = os.path.join(tmpdir, f"{PREFIX_CHECKPOINT_DIR}-{trainer.state.best_global_step}") assert trainer.state.best_model_checkpoint == best_ckpt assert len(os.listdir(tmpdir)) == 1 # Case 6. Saving happens more often and eval/save mismatch. # `best_model_checkpoint` should be None due to a step mismatch. with tempfile.TemporaryDirectory() as tmpdir: trainer = get_regression_trainer( output_dir=tmpdir, eval_strategy="steps", eval_steps=3, save_strategy="steps", save_steps=2, metric_for_best_model="accuracy", compute_metrics=AlmostAccuracy(), greater_is_better=True, ) with patch.object( trainer, "_evaluate", side_effect=evaluate_side_effect_factory( [ {"eval_accuracy": 0.90}, {"eval_accuracy": 0.80}, {"eval_accuracy": 0.70}, ] ), ): trainer.train() assert trainer.state.best_metric == 0.90 assert trainer.state.best_global_step == 3 assert trainer.state.best_model_checkpoint is None assert len(os.listdir(tmpdir)) == trainer.state.global_step // 2 def test_special_token_aligment(self): """ Tests that special token changes in the tokenizer result in model configs updates when using the trainer, to ensure special tokens are aligned across configs """ model = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-LlamaForCausalLM") tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-LlamaForCausalLM") # add new special tokens to tokenizer, so we can test that trainer aligns the model configs with the tokenizer tokenizer.eos_token = "<|im_end|>" tokenizer.pad_token = "<|im_end|>" tokenizer.bos_token = "<|im_start|>" tokenizer.add_special_tokens({"additional_special_tokens": ["<|im_end|>", "<|im_start|>"]}) # the model needs to have its embedding layer resized accordingly model.resize_token_embeddings(len(tokenizer), pad_to_multiple_of=64) # create a random dataset from the **new** vocab size x = torch.randint(0, len(tokenizer), (64,)) dataset = RepeatDataset(x, length=2) with tempfile.TemporaryDirectory() as tmpdir: training_args = TrainingArguments( output_dir=tmpdir, report_to="none", max_steps=1, per_device_train_batch_size=1 ) trainer = Trainer( model=model, args=training_args, processing_class=tokenizer, train_dataset=dataset, ) # We haven't started training -> not yet aligned self.assertNotEqual(trainer.model.config.eos_token_id, tokenizer.eos_token_id) self.assertNotEqual(trainer.model.config.pad_token_id, tokenizer.pad_token_id) self.assertNotEqual(trainer.model.config.bos_token_id, tokenizer.bos_token_id) trainer.train() # Must be aligned as soon as we start training self.assertEqual(trainer.model.config.eos_token_id, tokenizer.eos_token_id) self.assertEqual(trainer.model.config.pad_token_id, tokenizer.pad_token_id) self.assertEqual(trainer.model.config.bos_token_id, tokenizer.bos_token_id) def test_trainer_works_without_model_config(self): """ Tests that models without a `config` parameter can still be trained. This is useful for preserving compatibility with third parties that train different models using the transformers Trainer. If this test fails, it doesn't imply that there's issues with transformers, but perhaps with third parties. """ tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-LlamaForCausalLM") model = BasicTextGenerationModel(vocab_size=tokenizer.vocab_size, hidden_size=32) # Note that this class does not have a config attribute train_dataset = LineByLineTextDataset( tokenizer=tokenizer, file_path=PATH_SAMPLE_TEXT, block_size=tokenizer.max_len_single_sentence, ) for example in train_dataset.examples: example["labels"] = example["input_ids"] with tempfile.TemporaryDirectory() as tmpdir: training_args = TrainingArguments( output_dir=tmpdir, report_to="none", max_steps=5, per_device_train_batch_size=1, remove_unused_columns=False, ) trainer = Trainer( model=model, args=training_args, processing_class=tokenizer, train_dataset=train_dataset, ) trainer.train() @require_torch @is_staging_test class TrainerIntegrationWithHubTester(unittest.TestCase): @classmethod def setUpClass(cls): cls._token = TOKEN HfFolder.save_token(TOKEN) def test_push_to_hub(self): with TemporaryHubRepo(token=self._token) as tmp_repo: output_dir_name = tmp_repo.repo_name with tempfile.TemporaryDirectory() as tmp_dir: trainer = get_regression_trainer( output_dir=os.path.join(tmp_dir, output_dir_name), push_to_hub=True, hub_token=self._token, ) url = trainer.push_to_hub() # Extract repo_name from the url re_search = re.search(ENDPOINT_STAGING + r"/([^/]+/[^/]+)/", url) self.assertTrue(re_search is not None) repo_name = re_search.groups()[0] self.assertEqual(repo_name, f"{USER}/{output_dir_name}") model = RegressionPreTrainedModel.from_pretrained(repo_name) self.assertEqual(model.a.item(), trainer.model.a.item()) self.assertEqual(model.b.item(), trainer.model.b.item()) def test_push_to_hub_in_organization(self): with TemporaryHubRepo(namespace="valid_org", token=self._token) as tmp_repo: with tempfile.TemporaryDirectory() as tmp_dir: trainer = get_regression_trainer(output_dir=tmp_dir) trainer.save_model() output_dir_name = tmp_repo.repo_name trainer = get_regression_trainer( output_dir=os.path.join(tmp_dir, output_dir_name), push_to_hub=True, hub_model_id=f"valid_org/{output_dir_name}", hub_token=self._token, ) url = trainer.push_to_hub() # Extract repo_name from the url re_search = re.search(ENDPOINT_STAGING + r"/([^/]+/[^/]+)/", url) self.assertTrue(re_search is not None) repo_name = re_search.groups()[0] self.assertEqual(repo_name, f"valid_org/{output_dir_name}") model = RegressionPreTrainedModel.from_pretrained(f"valid_org/{output_dir_name}") self.assertEqual(model.a.item(), trainer.model.a.item()) self.assertEqual(model.b.item(), trainer.model.b.item()) def get_commit_history(self, repo): commit_logs = subprocess.run( ["git", "log"], capture_output=True, check=True, encoding="utf-8", cwd=repo, ).stdout commits = commit_logs.split("\n\n")[1::2] return [commit.strip() for commit in commits] # TODO: @ydshieh or @SunMarc @unittest.skip("unknown failure reason, possibly staging hub issue") def test_push_to_hub_with_saves_each_epoch(self): with TemporaryHubRepo(token=self._token) as tmp_repo: with tempfile.TemporaryDirectory() as tmp_dir: with self.assertLogs(level="WARNING") as logs: output_dir_name = tmp_repo.repo_name trainer = get_regression_trainer( output_dir=os.path.join(tmp_dir, output_dir_name), push_to_hub=True, hub_token=self._token, # To avoid any flakiness if the training goes faster than the uploads. hub_always_push=True, save_strategy="epoch", ) trainer.train() commits = list_repo_commits(f"{USER}/{output_dir_name}", token=self._token) commits = [c.title for c in commits] self.assertIn("initial commit", commits) self.assertIn("Training in progress, epoch 1", commits) self.assertIn("Training in progress, epoch 2", commits) # Epochs 3 and 4 are not guaranteed to be present (empty commits) self.assertTrue(any("Skipping to prevent empty commit." in record.message for record in logs.records)) def test_push_to_hub_with_saves_each_n_steps(self): num_gpus = max(1, backend_device_count(torch_device)) if num_gpus > 2: self.skipTest(reason="More than 2 GPUs available") with TemporaryHubRepo(token=self._token) as tmp_repo: with tempfile.TemporaryDirectory() as tmp_dir: with self.assertLogs(level="WARNING") as logs: output_dir_name = tmp_repo.repo_name trainer = get_regression_trainer( output_dir=os.path.join(tmp_dir, output_dir_name), push_to_hub=True, hub_token=self._token, # To avoid any flakiness if the training goes faster than the uploads. hub_always_push=True, save_strategy="steps", save_steps=5, ) trainer.train() commits = list_repo_commits(f"{USER}/{output_dir_name}", token=self._token) commits = [c.title for c in commits] self.assertIn("initial commit", commits) # Some commits are skipped if nothing has changed # We expect 1 commit per 5 epochs + 1 commit at the end nb_empty_commits = len( [record for record in logs.records if "Skipping to prevent empty commit." in record.message] ) nb_epoch_commits = len([commit for commit in commits if "Training in progress, step" in commit]) # max_steps depend on the number of available GPUs max_steps = math.ceil(trainer.args.num_train_epochs * len(trainer.get_train_dataloader())) nb_expected_commits = len(range(5, max_steps, 5)) # '>=' since final commit might be an empty commit as well (not deterministic) self.assertGreaterEqual(nb_empty_commits + nb_epoch_commits, nb_expected_commits) @require_tensorboard def test_push_to_hub_with_tensorboard_logs(self): with TemporaryHubRepo(token=self._token) as tmp_repo: with tempfile.TemporaryDirectory() as tmp_dir: output_dir_name = tmp_repo.repo_name trainer = get_regression_trainer( output_dir=os.path.join(tmp_dir, output_dir_name), hub_token=self._token, save_strategy="epoch", report_to=["tensorboard"], keep_report_to=True, ) trainer.train() # Push the runs via `push_to_hub()` trainer.push_to_hub() files = list_repo_files(f"{USER}/{output_dir_name}", token=self._token) found_log = False for f in files: if len(f.split("runs")) > 1 and "events.out.tfevents" in f: found_log = True assert found_log is True, "No tensorboard log found in repo" def test_push_to_hub_tags(self): # Checks if `trainer.push_to_hub()` works correctly by adding the desired # tag without having to pass `tags` in `push_to_hub` # see: with TemporaryHubRepo(token=self._token) as tmp_repo: with tempfile.TemporaryDirectory() as tmp_dir: output_dir_name = tmp_repo.repo_name trainer = get_regression_trainer( output_dir=os.path.join(tmp_dir, output_dir_name), push_to_hub=True, hub_token=self._token, ) trainer.model.add_model_tags(["test-trainer-tags"]) url = trainer.push_to_hub() # Extract repo_name from the url re_search = re.search(ENDPOINT_STAGING + r"/([^/]+/[^/]+)/", url) self.assertTrue(re_search is not None) repo_name = re_search.groups()[0] self.assertEqual(repo_name, f"{USER}/{output_dir_name}") model_card = ModelCard.load(repo_name) self.assertTrue("test-trainer-tags" in model_card.data.tags) def test_push_to_hub_with_revision(self): # Checks if `trainer.push_to_hub()` works correctly by adding revision with TemporaryHubRepo(token=self._token) as tmp_repo: with tempfile.TemporaryDirectory() as tmp_dir: output_dir_name = tmp_repo.repo_name trainer = get_regression_trainer( output_dir=os.path.join(tmp_dir, output_dir_name), push_to_hub=True, hub_token=self._token, ) branch = "v1.0" create_branch(repo_id=trainer.hub_model_id, branch=branch, token=self._token, exist_ok=True) url = trainer.push_to_hub(revision=branch) # Extract branch from the url re_search = re.search(r"tree/([^/]+)/", url) self.assertIsNotNone(re_search) branch_name = re_search.groups()[0] self.assertEqual(branch_name, branch) @require_torch @require_optuna class TrainerHyperParameterOptunaIntegrationTest(unittest.TestCase): def setUp(self): args = TrainingArguments("..") self.n_epochs = args.num_train_epochs self.batch_size = args.train_batch_size def test_hyperparameter_search(self): class MyTrialShortNamer(TrialShortNamer): DEFAULTS = {"a": 0, "b": 0} def hp_space(trial): return {} def model_init(trial): if trial is not None: a = trial.suggest_int("a", -4, 4) b = trial.suggest_int("b", -4, 4) else: a = 0 b = 0 config = RegressionModelConfig(a=a, b=b, double_output=False) return RegressionPreTrainedModel(config) def hp_name(trial): return MyTrialShortNamer.shortname(trial.params) with tempfile.TemporaryDirectory() as tmp_dir: trainer = get_regression_trainer( output_dir=tmp_dir, learning_rate=0.1, logging_steps=1, eval_strategy=IntervalStrategy.EPOCH, save_strategy=IntervalStrategy.EPOCH, num_train_epochs=4, disable_tqdm=True, load_best_model_at_end=True, logging_dir="runs", run_name="test", model_init=model_init, ) trainer.hyperparameter_search(direction="minimize", hp_space=hp_space, hp_name=hp_name, n_trials=4) @require_torch @require_optuna class TrainerHyperParameterMultiObjectOptunaIntegrationTest(unittest.TestCase): def setUp(self): args = TrainingArguments("..") self.n_epochs = args.num_train_epochs self.batch_size = args.train_batch_size def test_hyperparameter_search(self): class MyTrialShortNamer(TrialShortNamer): DEFAULTS = {"a": 0, "b": 0} def hp_space(trial): return {} def model_init(trial): if trial is not None: a = trial.suggest_int("a", -4, 4) b = trial.suggest_int("b", -4, 4) else: a = 0 b = 0 config = RegressionModelConfig(a=a, b=b, double_output=False) return RegressionPreTrainedModel(config) def hp_name(trial): return MyTrialShortNamer.shortname(trial.params) def compute_objective(metrics: dict[str, float]) -> list[float]: return metrics["eval_loss"], metrics["eval_accuracy"] with tempfile.TemporaryDirectory() as tmp_dir: trainer = get_regression_trainer( output_dir=tmp_dir, learning_rate=0.1, logging_steps=1, eval_strategy=IntervalStrategy.EPOCH, save_strategy=IntervalStrategy.EPOCH, num_train_epochs=10, disable_tqdm=True, load_best_model_at_end=True, logging_dir="runs", run_name="test", model_init=model_init, compute_metrics=AlmostAccuracy(), ) trainer.hyperparameter_search( direction=["minimize", "maximize"], hp_space=hp_space, hp_name=hp_name, n_trials=4, compute_objective=compute_objective, ) @require_torch @require_optuna class TrainerHyperParameterOptunaIntegrationTestWithFullEval(unittest.TestCase): def test_hyperparameter_search(self): def hp_space(trial): return {} def model_init(trial): if trial is not None: a = trial.suggest_int("a", -4, 4) b = trial.suggest_int("b", -4, 4) else: a = 0 b = 0 config = RegressionModelConfig(a=a, b=b, double_output=False) return RegressionPreTrainedModel(config) with tempfile.TemporaryDirectory() as tmp_dir: trainer = get_regression_trainer( output_dir=tmp_dir, disable_tqdm=True, model_init=model_init, fp16_full_eval=True, ) trainer.hyperparameter_search( direction="minimize", hp_space=hp_space, n_trials=2, ) @require_torch @require_ray class TrainerHyperParameterRayIntegrationTest(unittest.TestCase): def setUp(self): args = TrainingArguments("..") self.n_epochs = args.num_train_epochs self.batch_size = args.train_batch_size def ray_hyperparameter_search(self): class MyTrialShortNamer(TrialShortNamer): DEFAULTS = {"a": 0, "b": 0} def hp_space(trial): from ray import tune return { "a": tune.randint(-4, 4), "b": tune.randint(-4, 4), } def model_init(config): if config is None: a = 0 b = 0 else: a = config["a"] b = config["b"] model_config = RegressionModelConfig(a=a, b=b, double_output=False) return RegressionPreTrainedModel(model_config) def hp_name(params): return MyTrialShortNamer.shortname(params) with tempfile.TemporaryDirectory() as tmp_dir: trainer = get_regression_trainer( output_dir=tmp_dir, learning_rate=0.1, logging_steps=1, eval_strategy=IntervalStrategy.EPOCH, save_strategy=IntervalStrategy.EPOCH, num_train_epochs=4, disable_tqdm=True, load_best_model_at_end=True, logging_dir="runs", run_name="test", model_init=model_init, ) trainer.hyperparameter_search( direction="minimize", hp_space=hp_space, hp_name=hp_name, backend="ray", n_trials=4 ) def test_hyperparameter_search(self): self.ray_hyperparameter_search() def test_hyperparameter_search_ray_client(self): import ray from ray.util.client.ray_client_helpers import ray_start_client_server with ray_start_client_server(): assert ray.util.client.ray.is_connected() self.ray_hyperparameter_search() @slow @require_torch @require_sigopt class TrainerHyperParameterSigOptIntegrationTest(unittest.TestCase): def setUp(self): args = TrainingArguments("..") self.n_epochs = args.num_train_epochs self.batch_size = args.train_batch_size def test_hyperparameter_search(self): class MyTrialShortNamer(TrialShortNamer): DEFAULTS = {"a": 0, "b": 0} def hp_space(trial): return [ {"bounds": {"min": -4, "max": 4}, "name": "a", "type": "int"}, {"bounds": {"min": -4, "max": 4}, "name": "b", "type": "int"}, ] def model_init(trial): if trial is not None: a = trial.assignments["a"] b = trial.assignments["b"] else: a = 0 b = 0 config = RegressionModelConfig(a=a, b=b, double_output=False) return RegressionPreTrainedModel(config) def hp_name(trial): return MyTrialShortNamer.shortname(trial.assignments) with tempfile.TemporaryDirectory() as tmp_dir: trainer = get_regression_trainer( output_dir=tmp_dir, learning_rate=0.1, logging_steps=1, eval_strategy=IntervalStrategy.EPOCH, save_strategy=IntervalStrategy.EPOCH, num_train_epochs=4, disable_tqdm=True, load_best_model_at_end=True, logging_dir="runs", run_name="test", model_init=model_init, ) trainer.hyperparameter_search( direction="minimize", hp_space=hp_space, hp_name=hp_name, backend="sigopt", n_trials=4 ) optim_test_params = [] if is_torch_available(): default_adam_kwargs = { "betas": (TrainingArguments.adam_beta1, TrainingArguments.adam_beta2), "eps": TrainingArguments.adam_epsilon, "lr": TrainingArguments.learning_rate, } default_lion_kwargs = { "betas": (TrainingArguments.adam_beta1, TrainingArguments.adam_beta2), "lr": TrainingArguments.learning_rate, } default_ademamix_kwargs = { "betas": (TrainingArguments.adam_beta1, TrainingArguments.adam_beta2, 0.9999), "alpha": 5.0, "eps": TrainingArguments.adam_epsilon, "lr": TrainingArguments.learning_rate, } default_anyprecision_kwargs = { "use_kahan_summation": False, "momentum_dtype": torch.float32, "variance_dtype": torch.float32, "compensation_buffer_dtype": torch.bfloat16, } optim_test_params = [ ( OptimizerNames.ADAMW_TORCH, torch.optim.AdamW, default_adam_kwargs, ), ( OptimizerNames.ADAFACTOR, transformers.optimization.Adafactor, { "scale_parameter": False, "relative_step": False, "lr": TrainingArguments.learning_rate, }, ), ] if is_apex_available(): import apex optim_test_params.append( ( OptimizerNames.ADAMW_APEX_FUSED, apex.optimizers.FusedAdam, default_adam_kwargs, ) ) if is_bitsandbytes_available(): import bitsandbytes as bnb optim_test_params.append( ( OptimizerNames.ADAMW_BNB, bnb.optim.AdamW, default_adam_kwargs, ) ) optim_test_params.append( ( OptimizerNames.ADAMW_8BIT, bnb.optim.AdamW, default_adam_kwargs, ) ) optim_test_params.append( ( OptimizerNames.PAGED_ADAMW, bnb.optim.AdamW, default_adam_kwargs, ) ) optim_test_params.append( ( OptimizerNames.PAGED_ADAMW_8BIT, bnb.optim.AdamW, default_adam_kwargs, ) ) optim_test_params.append( ( OptimizerNames.LION, bnb.optim.Lion, default_lion_kwargs, ) ) optim_test_params.append( ( OptimizerNames.LION_8BIT, bnb.optim.Lion, default_lion_kwargs, ) ) optim_test_params.append( ( OptimizerNames.PAGED_LION_8BIT, bnb.optim.Lion, default_lion_kwargs, ) ) if version.parse(importlib.metadata.version("bitsandbytes")) >= version.parse("0.44.0"): optim_test_params.append( ( OptimizerNames.ADEMAMIX, bnb.optim.AdEMAMix, default_ademamix_kwargs, ) ) optim_test_params.append( ( OptimizerNames.ADEMAMIX_8BIT, bnb.optim.AdEMAMix, default_ademamix_kwargs, ) ) optim_test_params.append( ( OptimizerNames.PAGED_ADEMAMIX_8BIT, bnb.optim.AdEMAMix, default_ademamix_kwargs, ) ) optim_test_params.append( ( OptimizerNames.PAGED_ADEMAMIX, bnb.optim.AdEMAMix, default_ademamix_kwargs, ) ) if is_torchdistx_available(): import torchdistx optim_test_params.append( ( OptimizerNames.ADAMW_ANYPRECISION, torchdistx.optimizers.AnyPrecisionAdamW, dict(default_adam_kwargs, **default_anyprecision_kwargs), ) ) if is_torchao_available(): from torchao.optim import AdamW4bit, AdamW8bit optim_test_params.append( ( OptimizerNames.ADAMW_TORCH_4BIT, AdamW4bit, default_adam_kwargs, ) ) optim_test_params.append( ( TrainingArguments(optim=OptimizerNames.ADAMW_TORCH_8BIT, output_dir="None"), AdamW8bit, default_adam_kwargs, ) ) @require_torch class TrainerOptimizerChoiceTest(unittest.TestCase): def check_optim_and_kwargs(self, training_args: TrainingArguments, expected_cls, expected_kwargs): actual_cls, optim_kwargs = Trainer.get_optimizer_cls_and_kwargs(training_args) self.assertEqual(expected_cls, actual_cls) self.assertIsNotNone(optim_kwargs) for p, v in expected_kwargs.items(): self.assertTrue(p in optim_kwargs) actual_v = optim_kwargs[p] self.assertTrue(actual_v == v, f"Failed check for {p}. Expected {v}, but got {actual_v}.") @parameterized.expand(optim_test_params, skip_on_empty=True) def test_optim_supported(self, optim: str, expected_cls, expected_kwargs): with tempfile.TemporaryDirectory() as tmp_dir: trainer = get_regression_trainer(output_dir=tmp_dir, optim=optim) # exercises all the valid --optim options self.check_optim_and_kwargs(trainer.args, expected_cls, expected_kwargs) trainer.train() def test_fused_adam(self): # Pretend that apex is installed and mock apex.optimizers.FusedAdam exists. # Trainer.get_optimizer_cls_and_kwargs does not use FusedAdam. It only has to return the # class given, so mocking apex.optimizers.FusedAdam should be fine for testing and allow # the test to run without requiring an apex installation. mock = Mock() modules = { "apex": mock, "apex.optimizers": mock.optimizers, "apex.optimizers.FusedAdam": mock.optimizers.FusedAdam, } with tempfile.TemporaryDirectory() as tmp_dir: with patch.dict("sys.modules", modules): self.check_optim_and_kwargs( TrainingArguments(optim=OptimizerNames.ADAMW_APEX_FUSED, output_dir=tmp_dir), mock.optimizers.FusedAdam, default_adam_kwargs, ) def test_fused_adam_no_apex(self): with tempfile.TemporaryDirectory() as tmp_dir: args = TrainingArguments(optim=OptimizerNames.ADAMW_APEX_FUSED, output_dir=tmp_dir) # Pretend that apex does not exist, even if installed. By setting apex to None, importing # apex will fail even if apex is installed. with patch.dict("sys.modules", {"apex.optimizers": None}): with self.assertRaises(ValueError): Trainer.get_optimizer_cls_and_kwargs(args) def test_bnb_adam8bit(self): # Pretend that Bits and Bytes is installed and mock bnb.optim.Adam8bit exists. # Trainer.get_optimizer_cls_and_kwargs does not use Adam8bit. It only has to return the # class given, so mocking bnb.optim.Adam8bit should be fine for testing and allow # the test to run without requiring a bnb installation. mock = Mock() modules = { "bitsandbytes": mock, "bitsandbytes.optim": mock.optim, "bitsandbytes.optim.AdamW": mock.optim.AdamW, } with tempfile.TemporaryDirectory() as tmp_dir: with patch.dict("sys.modules", modules): self.check_optim_and_kwargs( TrainingArguments(optim=OptimizerNames.ADAMW_BNB, output_dir=tmp_dir), mock.optim.AdamW, default_adam_kwargs, ) def test_bnb_paged_adam8bit_alias(self): mock = Mock() modules = { "bitsandbytes": mock, "bitsandbytes.optim": mock.optim, "bitsandbytes.optim.AdamW": mock.optim.AdamW, } with tempfile.TemporaryDirectory() as tmp_dir: with patch.dict("sys.modules", modules): self.check_optim_and_kwargs( TrainingArguments(optim=OptimizerNames.ADAMW_8BIT, output_dir=tmp_dir), mock.optim.AdamW, default_adam_kwargs, ) def test_bnb_paged_adam(self): mock = Mock() modules = { "bitsandbytes": mock, "bitsandbytes.optim": mock.optim, "bitsandbytes.optim.AdamW": mock.optim.AdamW, } with tempfile.TemporaryDirectory() as tmp_dir: with patch.dict("sys.modules", modules): self.check_optim_and_kwargs( TrainingArguments(optim=OptimizerNames.PAGED_ADAMW, output_dir=tmp_dir), mock.optim.AdamW, default_adam_kwargs, ) def test_bnb_paged_adam8bit(self): mock = Mock() modules = { "bitsandbytes": mock, "bitsandbytes.optim": mock.optim, "bitsandbytes.optim.AdamW": mock.optim.AdamW, } with tempfile.TemporaryDirectory() as tmp_dir: with patch.dict("sys.modules", modules): self.check_optim_and_kwargs( TrainingArguments(optim=OptimizerNames.PAGED_ADAMW_8BIT, output_dir=tmp_dir), mock.optim.AdamW, default_adam_kwargs, ) def test_bnb_ademamix(self): mock = Mock() modules = { "bitsandbytes": mock, "bitsandbytes.optim": mock.optim, "bitsandbytes.optim.AdEMAMix": mock.optim.AdEMAMix, } with tempfile.TemporaryDirectory() as tmp_dir: with patch.dict("sys.modules", modules): self.check_optim_and_kwargs( TrainingArguments(optim=OptimizerNames.ADEMAMIX, output_dir=tmp_dir), mock.optim.AdEMAMix, default_ademamix_kwargs, ) def test_bnb_ademamix8bit(self): mock = Mock() modules = { "bitsandbytes": mock, "bitsandbytes.optim": mock.optim, "bitsandbytes.optim.AdEMAMix": mock.optim.AdEMAMix, } with tempfile.TemporaryDirectory() as tmp_dir: with patch.dict("sys.modules", modules): self.check_optim_and_kwargs( TrainingArguments(optim=OptimizerNames.ADEMAMIX_8BIT, output_dir=tmp_dir), mock.optim.AdEMAMix, default_ademamix_kwargs, ) def test_bnb_paged_ademamix(self): mock = Mock() modules = { "bitsandbytes": mock, "bitsandbytes.optim": mock.optim, "bitsandbytes.optim.AdEMAMix": mock.optim.AdEMAMix, } with tempfile.TemporaryDirectory() as tmp_dir: with patch.dict("sys.modules", modules): self.check_optim_and_kwargs( TrainingArguments(optim=OptimizerNames.PAGED_ADEMAMIX, output_dir=tmp_dir), mock.optim.AdEMAMix, default_ademamix_kwargs, ) def test_bnb_paged_ademamix8bit(self): mock = Mock() modules = { "bitsandbytes": mock, "bitsandbytes.optim": mock.optim, "bitsandbytes.optim.AdEMAMix": mock.optim.AdEMAMix, } with tempfile.TemporaryDirectory() as tmp_dir: with patch.dict("sys.modules", modules): self.check_optim_and_kwargs( TrainingArguments(optim=OptimizerNames.PAGED_ADEMAMIX_8BIT, output_dir=tmp_dir), mock.optim.AdEMAMix, default_ademamix_kwargs, ) def test_bnb_lion(self): mock = Mock() modules = { "bitsandbytes": mock, "bitsandbytes.optim": mock.optim, "bitsandbytes.optim.Lion": mock.optim.Lion, } with tempfile.TemporaryDirectory() as tmp_dir: with patch.dict("sys.modules", modules): self.check_optim_and_kwargs( TrainingArguments(optim=OptimizerNames.LION, output_dir=tmp_dir), mock.optim.Lion, default_lion_kwargs, ) def test_bnb_lion8bit(self): mock = Mock() modules = { "bitsandbytes": mock, "bitsandbytes.optim": mock.optim, "bitsandbytes.optim.Lion": mock.optim.Lion, } with tempfile.TemporaryDirectory() as tmp_dir: with patch.dict("sys.modules", modules): self.check_optim_and_kwargs( TrainingArguments(optim=OptimizerNames.LION_8BIT, output_dir=tmp_dir), mock.optim.Lion, default_lion_kwargs, ) def test_bnb_paged_lion8bit(self): mock = Mock() modules = { "bitsandbytes": mock, "bitsandbytes.optim": mock.optim, "bitsandbytes.optim.Lion": mock.optim.Lion, } with tempfile.TemporaryDirectory() as tmp_dir: with patch.dict("sys.modules", modules): self.check_optim_and_kwargs( TrainingArguments(optim=OptimizerNames.PAGED_LION_8BIT, output_dir=tmp_dir), mock.optim.Lion, default_lion_kwargs, ) def test_bnb_paged_lion(self): mock = Mock() modules = { "bitsandbytes": mock, "bitsandbytes.optim": mock.optim, "bitsandbytes.optim.Lion": mock.optim.Lion, } with tempfile.TemporaryDirectory() as tmp_dir: with patch.dict("sys.modules", modules): self.check_optim_and_kwargs( TrainingArguments(optim=OptimizerNames.PAGED_LION, output_dir=tmp_dir), mock.optim.Lion, default_lion_kwargs, ) def test_bnb_adam8bit_no_bnb(self): with tempfile.TemporaryDirectory() as tmp_dir: args = TrainingArguments(optim=OptimizerNames.ADAMW_BNB, output_dir=tmp_dir) # Pretend that bnb does not exist, even if installed. By setting bnb to None, importing # bnb will fail even if `bitsandbytes` is installed. with patch.dict("sys.modules", {"bitsandbytes.optim": None}): with self.assertRaises(ValueError): Trainer.get_optimizer_cls_and_kwargs(args) def test_bnb_paged_adam_no_bnb(self): with tempfile.TemporaryDirectory() as tmp_dir: args = TrainingArguments(optim=OptimizerNames.PAGED_ADAMW, output_dir=tmp_dir) # Pretend that bnb does not exist, even if installed. By setting bnb to None, importing # bnb will fail even if `bitsandbytes` is installed. with patch.dict("sys.modules", {"bitsandbytes.optim": None}): with self.assertRaises(ValueError): Trainer.get_optimizer_cls_and_kwargs(args) def test_bnb_paged_adam8bit_no_bnb(self): with tempfile.TemporaryDirectory() as tmp_dir: args = TrainingArguments(optim=OptimizerNames.PAGED_ADAMW_8BIT, output_dir=tmp_dir) # Pretend that bnb does not exist, even if installed. By setting bnb to None, importing # bnb will fail even if `bitsandbytes` is installed. with patch.dict("sys.modules", {"bitsandbytes.optim": None}): with self.assertRaises(ValueError): Trainer.get_optimizer_cls_and_kwargs(args) def test_bnb_ademamix_no_bnb(self): with tempfile.TemporaryDirectory() as tmp_dir: args = TrainingArguments(optim=OptimizerNames.ADEMAMIX, output_dir=tmp_dir) # Pretend that bnb does not exist, even if installed. By setting bnb to None, importing # bnb will fail even if `bitsandbytes` is installed. with patch.dict("sys.modules", {"bitsandbytes.optim": None}): with self.assertRaises(ValueError): Trainer.get_optimizer_cls_and_kwargs(args) def test_bnb_ademamix8bit_no_bnb(self): with tempfile.TemporaryDirectory() as tmp_dir: args = TrainingArguments(optim=OptimizerNames.ADEMAMIX_8BIT, output_dir=tmp_dir) # Pretend that bnb does not exist, even if installed. By setting bnb to None, importing # bnb will fail even if `bitsandbytes` is installed. with patch.dict("sys.modules", {"bitsandbytes.optim": None}): with self.assertRaises(ValueError): Trainer.get_optimizer_cls_and_kwargs(args) def test_bnb_paged_ademamix_no_bnb(self): with tempfile.TemporaryDirectory() as tmp_dir: args = TrainingArguments(optim=OptimizerNames.PAGED_ADEMAMIX, output_dir=tmp_dir) # Pretend that bnb does not exist, even if installed. By setting bnb to None, importing # bnb will fail even if `bitsandbytes` is installed. with patch.dict("sys.modules", {"bitsandbytes.optim": None}): with self.assertRaises(ValueError): Trainer.get_optimizer_cls_and_kwargs(args) def test_bnb_paged_ademamix8bit_no_bnb(self): with tempfile.TemporaryDirectory() as tmp_dir: args = TrainingArguments(optim=OptimizerNames.PAGED_ADEMAMIX_8BIT, output_dir=tmp_dir) # Pretend that bnb does not exist, even if installed. By setting bnb to None, importing # bnb will fail even if `bitsandbytes` is installed. with patch.dict("sys.modules", {"bitsandbytes.optim": None}): with self.assertRaises(ValueError): Trainer.get_optimizer_cls_and_kwargs(args) def test_bnb_paged_lion_no_bnb(self): with tempfile.TemporaryDirectory() as tmp_dir: args = TrainingArguments(optim=OptimizerNames.PAGED_LION, output_dir=tmp_dir) # Pretend that bnb does not exist, even if installed. By setting bnb to None, importing # bnb will fail even if `bitsandbytes` is installed. with patch.dict("sys.modules", {"bitsandbytes.optim": None}): with self.assertRaises(ValueError): Trainer.get_optimizer_cls_and_kwargs(args) def test_bnb_paged_lion8bit_no_bnb(self): with tempfile.TemporaryDirectory() as tmp_dir: args = TrainingArguments(optim=OptimizerNames.PAGED_LION_8BIT, output_dir=tmp_dir) # Pretend that bnb does not exist, even if installed. By setting bnb to None, importing # bnb will fail even if `bitsandbytes` is installed. with patch.dict("sys.modules", {"bitsandbytes.optim": None}): with self.assertRaises(ValueError): Trainer.get_optimizer_cls_and_kwargs(args) def test_anyprecision_adamw(self): # Pretend that torchdistx is installed and mock torchdistx.optimizers.AnyPrecisionAdamW exists. # Trainer.get_optimizer_cls_and_kwargs does not use AnyPrecisioinAdamW. It only has to return the # class given, so mocking torchdistx.optimizers.AnyPrecisionAdamW should be fine for testing and allow # the test to run without requiring a bnb installation. mock = Mock() modules = { "torchdistx": mock, "torchdistx.optimizers": mock.optimizers, "torchdistx.optimizers.AnyPrecisionAdamW.": mock.optimizers.AnyPrecisionAdamW, } with tempfile.TemporaryDirectory() as tmp_dir: with patch.dict("sys.modules", modules): self.check_optim_and_kwargs( TrainingArguments(optim=OptimizerNames.ADAMW_ANYPRECISION, output_dir=tmp_dir), mock.optimizers.AnyPrecisionAdamW, dict(default_adam_kwargs, **default_anyprecision_kwargs), ) def test_no_torchdistx_anyprecision_adamw(self): with tempfile.TemporaryDirectory() as tmp_dir: args = TrainingArguments(optim=OptimizerNames.ADAMW_ANYPRECISION, output_dir=tmp_dir) # Pretend that torchdistx does not exist, even if installed. By setting torchdistx to None, importing # torchdistx.optimizers will fail even if torchdistx is installed. with patch.dict("sys.modules", {"torchdistx.optimizers": None}): with self.assertRaises(ValueError): Trainer.get_optimizer_cls_and_kwargs(args) @require_torch @require_wandb class TrainerHyperParameterWandbIntegrationTest(unittest.TestCase): def setUp(self): args = TrainingArguments("..") self.n_epochs = args.num_train_epochs self.batch_size = args.train_batch_size def test_hyperparameter_search(self): def hp_space(trial): return { "method": "random", "metric": {}, "parameters": { "a": {"distribution": "uniform", "min": 1e-6, "max": 1e-4}, "b": {"distribution": "int_uniform", "min": 1, "max": 6}, }, } def model_init(config): if config is None: a = 0 b = 0 else: a = config["a"] b = config["b"] model_config = RegressionModelConfig(a=a, b=b, double_output=False) return RegressionPreTrainedModel(model_config) with tempfile.TemporaryDirectory() as tmp_dir: trainer = get_regression_trainer( output_dir=tmp_dir, learning_rate=0.1, logging_steps=1, eval_strategy=IntervalStrategy.EPOCH, save_strategy=IntervalStrategy.EPOCH, num_train_epochs=4, disable_tqdm=True, load_best_model_at_end=True, logging_dir="runs", run_name="test", model_init=model_init, ) sweep_kwargs = { "direction": "minimize", "hp_space": hp_space, "backend": "wandb", "n_trials": 4, } best_run = trainer.hyperparameter_search(**sweep_kwargs) self.assertIsNotNone(best_run.run_id) self.assertIsNotNone(best_run.run_summary) hp_keys = set(best_run.hyperparameters.keys()) self.assertSetEqual(hp_keys, {"a", "b", "assignments", "metric"}) # pretend restarting the process purged the environ import os del os.environ["WANDB_ENTITY"] del os.environ["WANDB_PROJECT"] sweep_kwargs["sweep_id"] = best_run.run_summary updated_best_run = trainer.hyperparameter_search(**sweep_kwargs) self.assertIsNotNone(updated_best_run.run_id) self.assertEqual(updated_best_run.run_summary, best_run.run_summary) updated_hp_keys = set(updated_best_run.hyperparameters.keys()) self.assertSetEqual(updated_hp_keys, {"a", "b", "assignments", "metric"}) class HyperParameterSearchBackendsTest(unittest.TestCase): def test_hyperparameter_search_backends(self): self.assertEqual( list(ALL_HYPERPARAMETER_SEARCH_BACKENDS.keys()), list(HPSearchBackend), ) @require_torch class OptimizerAndModelInspectionTest(unittest.TestCase): def test_get_num_trainable_parameters(self): model = nn.Sequential(nn.Linear(128, 64), nn.Linear(64, 32)) # in_features * out_features + bias layer_1 = 128 * 64 + 64 layer_2 = 64 * 32 + 32 with tempfile.TemporaryDirectory() as tmp_dir: trainer = Trainer(model=model, args=TrainingArguments(output_dir=tmp_dir, report_to="none")) self.assertEqual(trainer.get_num_trainable_parameters(), layer_1 + layer_2) # Freeze the last layer for param in model[-1].parameters(): param.requires_grad = False self.assertEqual(trainer.get_num_trainable_parameters(), layer_1) def test_get_learning_rates(self): model = nn.Sequential(nn.Linear(128, 64)) with tempfile.TemporaryDirectory() as tmp_dir: trainer = Trainer(model=model, args=TrainingArguments(output_dir=tmp_dir, report_to="none")) with self.assertRaises(ValueError): trainer.get_learning_rates() trainer.create_optimizer() self.assertEqual(trainer.get_learning_rates(), [5e-05, 5e-05]) def test_get_optimizer_group(self): model = nn.Sequential(nn.Linear(128, 64)) with tempfile.TemporaryDirectory() as tmp_dir: trainer = Trainer(model=model, args=TrainingArguments(output_dir=tmp_dir, report_to="none")) # ValueError is raised if optimizer is None with self.assertRaises(ValueError): trainer.get_optimizer_group() trainer.create_optimizer() # Get groups num_groups = len(trainer.get_optimizer_group()) self.assertEqual(num_groups, 2) # Get group of parameter param = next(model.parameters()) group = trainer.get_optimizer_group(param) self.assertIn(param, group["params"]) @require_bitsandbytes def test_bnb_8bit_optimizer_skip_embedding(self): model = BasicTextGenerationModel(8, 4) with tempfile.TemporaryDirectory() as tmp_dir: for name_optim in ["rmsprop_bnb_8bit", "adamw_8bit"]: args = TrainingArguments( output_dir=tmp_dir, report_to="none", optim=name_optim, ) trainer = Trainer(model=model, args=args) optimizer = trainer.create_optimizer() modules = optimizer.mng.module_weight_config_triple self.assertNotEqual(len(modules), 0) module, name, config = modules[0] self.assertIsInstance(module, torch.nn.Embedding) self.assertEqual(name, "weight") self.assertDictEqual(config, {"optim_bits": 32})
transformers/tests/trainer/test_trainer.py/0
{ "file_path": "transformers/tests/trainer/test_trainer.py", "repo_id": "transformers", "token_count": 123327 }
601
# Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import shutil import tempfile import textwrap import unittest from datetime import date from pathlib import Path import transformers.commands.add_new_model_like from transformers.commands.add_new_model_like import ModelInfos, create_new_model_like from transformers.testing_utils import require_torch REPO_PATH = os.path.dirname(os.path.dirname(os.path.dirname(__file__))) MODELS_TO_COPY = ("auto", "llama", "phi4_multimodal") CURRENT_YEAR = date.today().year @require_torch class TestAddNewModelLike(unittest.TestCase): @classmethod def setUpClass(cls): """ Create a temporary repo with the same structure as Transformers, with just 2 models. """ cls.FAKE_REPO = tempfile.TemporaryDirectory().name os.makedirs(os.path.join(cls.FAKE_REPO, "src", "transformers", "models"), exist_ok=True) os.makedirs(os.path.join(cls.FAKE_REPO, "tests", "models"), exist_ok=True) os.makedirs(os.path.join(cls.FAKE_REPO, "docs", "source", "en", "model_doc"), exist_ok=True) # We need to copy the utils to run the cleanup commands utils_src = os.path.join(REPO_PATH, "utils") shutil.copytree(utils_src, utils_src.replace(REPO_PATH, cls.FAKE_REPO)) # Copy the __init__ files model_init = os.path.join(REPO_PATH, "src", "transformers", "models", "__init__.py") shutil.copy(model_init, model_init.replace(REPO_PATH, cls.FAKE_REPO)) doc_toc = os.path.join(REPO_PATH, "docs", "source", "en", "_toctree.yml") shutil.copy(doc_toc, doc_toc.replace(REPO_PATH, cls.FAKE_REPO)) # We need the pyproject for ruff as well pyproject = os.path.join(REPO_PATH, "pyproject.toml") shutil.copy(pyproject, pyproject.replace(REPO_PATH, cls.FAKE_REPO)) # Copy over all the specific model files for model in MODELS_TO_COPY: model_src = os.path.join(REPO_PATH, "src", "transformers", "models", model) shutil.copytree(model_src, model_src.replace(REPO_PATH, cls.FAKE_REPO)) test_src = os.path.join(REPO_PATH, "tests", "models", model) shutil.copytree(test_src, test_src.replace(REPO_PATH, cls.FAKE_REPO)) if model != "auto": doc_src = os.path.join(REPO_PATH, "docs", "source", "en", "model_doc", f"{model}.md") shutil.copy(doc_src, doc_src.replace(REPO_PATH, cls.FAKE_REPO)) # Replace the globals cls.ORIGINAL_REPO = transformers.commands.add_new_model_like.REPO_PATH cls.ORIGINAL_TRANSFORMERS_REPO = transformers.commands.add_new_model_like.TRANSFORMERS_PATH transformers.commands.add_new_model_like.REPO_PATH = Path(cls.FAKE_REPO) transformers.commands.add_new_model_like.TRANSFORMERS_PATH = Path(cls.FAKE_REPO) / "src" / "transformers" # For convenience cls.MODEL_PATH = os.path.join(cls.FAKE_REPO, "src", "transformers", "models") cls.TESTS_MODEL_PATH = os.path.join(cls.FAKE_REPO, "tests", "models") cls.DOC_PATH = os.path.join(cls.FAKE_REPO, "docs", "source", "en") @classmethod def tearDownClass(cls): transformers.commands.add_new_model_like.REPO_PATH = cls.ORIGINAL_REPO transformers.commands.add_new_model_like.TRANSFORMERS_PATH = cls.ORIGINAL_TRANSFORMERS_REPO del cls.FAKE_REPO def assertFileIsEqual(self, text: str, filepath: str): with open(filepath, "r") as f: file_text = f.read() self.assertEqual(file_text.strip(), text.strip()) def assertInFile(self, text: str, filepath: str): with open(filepath, "r") as f: file_text = f.read() self.assertTrue(text in file_text) def test_llama_without_tokenizers(self): # This is the structure without adding the tokenizers filenames_to_add = ( ("configuration_llama.py", True), ("modeling_llama.py", True), ("tokenization_llama.py", False), ("tokenization_llama_fast.py", False), ("image_processing_llama.py", False), ("image_processing_llama_fast.py", False), ("video_processing_llama.py", False), ("feature_extraction_llama.py", False), ("processing_llama.py", False), ) # Run the command create_new_model_like( old_model_infos=ModelInfos("llama"), new_lowercase_name="my_test", new_model_paper_name="MyTest", filenames_to_add=filenames_to_add, create_fast_image_processor=False, ) # First assert that all files were created correctly model_repo = os.path.join(self.MODEL_PATH, "my_test") tests_repo = os.path.join(self.TESTS_MODEL_PATH, "my_test") self.assertTrue(os.path.isfile(os.path.join(model_repo, "modular_my_test.py"))) self.assertTrue(os.path.isfile(os.path.join(model_repo, "modeling_my_test.py"))) self.assertTrue(os.path.isfile(os.path.join(model_repo, "configuration_my_test.py"))) self.assertTrue(os.path.isfile(os.path.join(model_repo, "__init__.py"))) self.assertTrue(os.path.isfile(os.path.join(self.DOC_PATH, "model_doc", "my_test.md"))) self.assertTrue(os.path.isfile(os.path.join(tests_repo, "__init__.py"))) self.assertTrue(os.path.isfile(os.path.join(tests_repo, "test_modeling_my_test.py"))) # Now assert the correct imports/auto mappings/toctree were added self.assertInFile( "from .my_test import *\n", os.path.join(self.MODEL_PATH, "__init__.py"), ) self.assertInFile( '("my_test", "MyTestConfig"),\n', os.path.join(self.MODEL_PATH, "auto", "configuration_auto.py"), ) self.assertInFile( '("my_test", "MyTest"),\n', os.path.join(self.MODEL_PATH, "auto", "configuration_auto.py"), ) self.assertInFile( '("my_test", "MyTestModel"),\n', os.path.join(self.MODEL_PATH, "auto", "modeling_auto.py"), ) self.assertInFile( '("my_test", "MyTestForCausalLM"),\n', os.path.join(self.MODEL_PATH, "auto", "modeling_auto.py"), ) self.assertInFile( '("my_test", "MyTestForSequenceClassification"),\n', os.path.join(self.MODEL_PATH, "auto", "modeling_auto.py"), ) self.assertInFile( '("my_test", "MyTestForQuestionAnswering"),\n', os.path.join(self.MODEL_PATH, "auto", "modeling_auto.py"), ) self.assertInFile( '("my_test", "MyTestForTokenClassification"),\n', os.path.join(self.MODEL_PATH, "auto", "modeling_auto.py"), ) self.assertInFile( "- local: model_doc/my_test\n title: MyTest\n", os.path.join(self.DOC_PATH, "_toctree.yml"), ) # Check some exact file creation. For model definition, only check modular as modeling/config/etc... are created # directly from it EXPECTED_MODULAR = textwrap.dedent( f""" # coding=utf-8 # Copyright {CURRENT_YEAR} the HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from ..llama.configuration_llama import LlamaConfig from ..llama.modeling_llama import ( LlamaAttention, LlamaDecoderLayer, LlamaForCausalLM, LlamaForQuestionAnswering, LlamaForSequenceClassification, LlamaForTokenClassification, LlamaMLP, LlamaModel, LlamaPreTrainedModel, LlamaRMSNorm, LlamaRotaryEmbedding, ) class MyTestConfig(LlamaConfig): pass class MyTestRMSNorm(LlamaRMSNorm): pass class MyTestRotaryEmbedding(LlamaRotaryEmbedding): pass class MyTestMLP(LlamaMLP): pass class MyTestAttention(LlamaAttention): pass class MyTestDecoderLayer(LlamaDecoderLayer): pass class MyTestPreTrainedModel(LlamaPreTrainedModel): pass class MyTestModel(LlamaModel): pass class MyTestForCausalLM(LlamaForCausalLM): pass class MyTestForSequenceClassification(LlamaForSequenceClassification): pass class MyTestForQuestionAnswering(LlamaForQuestionAnswering): pass class MyTestForTokenClassification(LlamaForTokenClassification): pass __all__ = [ "MyTestConfig", "MyTestForCausalLM", "MyTestModel", "MyTestPreTrainedModel", "MyTestForSequenceClassification", "MyTestForQuestionAnswering", "MyTestForTokenClassification", ] """ ) self.assertFileIsEqual(EXPECTED_MODULAR, os.path.join(model_repo, "modular_my_test.py")) EXPECTED_INIT = textwrap.dedent( f""" # coding=utf-8 # Copyright {CURRENT_YEAR} the HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import _LazyModule from ...utils.import_utils import define_import_structure if TYPE_CHECKING: from .configuration_my_test import * from .modeling_my_test import * else: import sys _file = globals()["__file__"] sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) """ ) self.assertFileIsEqual(EXPECTED_INIT, os.path.join(model_repo, "__init__.py")) EXPECTED_DOC = textwrap.dedent( f""" <!--Copyright {CURRENT_YEAR} the HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # MyTest ## Overview The MyTest model was proposed in [<INSERT PAPER NAME HERE>](<INSERT PAPER LINK HERE>) by <INSERT AUTHORS HERE>. <INSERT SHORT SUMMARY HERE> The abstract from the paper is the following: <INSERT PAPER ABSTRACT HERE> Tips: <INSERT TIPS ABOUT MODEL HERE> This model was contributed by [INSERT YOUR HF USERNAME HERE](https://huggingface.co/<INSERT YOUR HF USERNAME HERE>). The original code can be found [here](<INSERT LINK TO GITHUB REPO HERE>). ## Usage examples <INSERT SOME NICE EXAMPLES HERE> ## MyTestConfig [[autodoc]] MyTestConfig ## MyTestForCausalLM [[autodoc]] MyTestForCausalLM ## MyTestModel [[autodoc]] MyTestModel - forward ## MyTestPreTrainedModel [[autodoc]] MyTestPreTrainedModel - forward ## MyTestForSequenceClassification [[autodoc]] MyTestForSequenceClassification ## MyTestForQuestionAnswering [[autodoc]] MyTestForQuestionAnswering ## MyTestForTokenClassification [[autodoc]] MyTestForTokenClassification """ ) self.assertFileIsEqual(EXPECTED_DOC, os.path.join(self.DOC_PATH, "model_doc", "my_test.md")) def test_phi4_with_all_processors(self): # This is the structure without adding the tokenizers filenames_to_add = ( ("configuration_phi4_multimodal.py", True), ("modeling_phi4_multimodal.py", True), ("tokenization_phi4_multimodal.py", False), ("tokenization_phi4_multimodal_fast.py", False), ("image_processing_phi4_multimodal.py", False), ("image_processing_phi4_multimodal_fast.py", True), ("video_processing_phi4_multimodal.py", False), ("feature_extraction_phi4_multimodal.py", True), ("processing_phi4_multimodal.py", True), ) # Run the command create_new_model_like( old_model_infos=ModelInfos("phi4_multimodal"), new_lowercase_name="my_test2", new_model_paper_name="MyTest2", filenames_to_add=filenames_to_add, create_fast_image_processor=False, ) # First assert that all files were created correctly model_repo = os.path.join(self.MODEL_PATH, "my_test2") tests_repo = os.path.join(self.TESTS_MODEL_PATH, "my_test2") self.assertTrue(os.path.isfile(os.path.join(model_repo, "modular_my_test2.py"))) self.assertTrue(os.path.isfile(os.path.join(model_repo, "modeling_my_test2.py"))) self.assertTrue(os.path.isfile(os.path.join(model_repo, "configuration_my_test2.py"))) self.assertTrue(os.path.isfile(os.path.join(model_repo, "image_processing_my_test2_fast.py"))) self.assertTrue(os.path.isfile(os.path.join(model_repo, "feature_extraction_my_test2.py"))) self.assertTrue(os.path.isfile(os.path.join(model_repo, "processing_my_test2.py"))) self.assertTrue(os.path.isfile(os.path.join(model_repo, "__init__.py"))) self.assertTrue(os.path.isfile(os.path.join(self.DOC_PATH, "model_doc", "my_test2.md"))) self.assertTrue(os.path.isfile(os.path.join(tests_repo, "__init__.py"))) self.assertTrue(os.path.isfile(os.path.join(tests_repo, "test_modeling_my_test2.py"))) self.assertTrue(os.path.isfile(os.path.join(tests_repo, "test_feature_extraction_my_test2.py"))) self.assertTrue(os.path.isfile(os.path.join(tests_repo, "test_image_processing_my_test2.py"))) # Now assert the correct imports/auto mappings/toctree were added self.assertInFile( "from .my_test2 import *\n", os.path.join(self.MODEL_PATH, "__init__.py"), ) self.assertInFile( '("my_test2", "MyTest2Config"),\n', os.path.join(self.MODEL_PATH, "auto", "configuration_auto.py"), ) self.assertInFile( '("my_test2", "MyTest2"),\n', os.path.join(self.MODEL_PATH, "auto", "configuration_auto.py"), ) self.assertInFile( '("my_test2", "MyTest2Model"),\n', os.path.join(self.MODEL_PATH, "auto", "modeling_auto.py"), ) self.assertInFile( '("my_test2", "MyTest2ForCausalLM"),\n', os.path.join(self.MODEL_PATH, "auto", "modeling_auto.py"), ) self.assertInFile( '("my_test2", (None, "MyTest2ImageProcessorFast")),\n', os.path.join(self.MODEL_PATH, "auto", "image_processing_auto.py"), ) self.assertInFile( '("my_test2", "MyTest2FeatureExtractor"),\n', os.path.join(self.MODEL_PATH, "auto", "feature_extraction_auto.py"), ) self.assertInFile( '("my_test2", "MyTest2Processor"),\n', os.path.join(self.MODEL_PATH, "auto", "processing_auto.py"), ) self.assertInFile( "- local: model_doc/my_test2\n title: MyTest2\n", os.path.join(self.DOC_PATH, "_toctree.yml"), ) # Check some exact file creation. For model definition, only check modular as modeling/config/etc... are created # directly from it EXPECTED_MODULAR = textwrap.dedent( f""" # coding=utf-8 # Copyright {CURRENT_YEAR} the HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from ..phi4_multimodal.configuration_phi4_multimodal import ( Phi4MultimodalAudioConfig, Phi4MultimodalConfig, Phi4MultimodalVisionConfig, ) from ..phi4_multimodal.feature_extraction_phi4_multimodal import Phi4MultimodalFeatureExtractor from ..phi4_multimodal.image_processing_phi4_multimodal_fast import ( Phi4MultimodalFastImageProcessorKwargs, Phi4MultimodalImageProcessorFast, ) from ..phi4_multimodal.modeling_phi4_multimodal import ( Phi4MultimodalAttention, Phi4MultimodalAudioAttention, Phi4MultimodalAudioConformerEncoderLayer, Phi4MultimodalAudioConvModule, Phi4MultimodalAudioDepthWiseSeperableConv1d, Phi4MultimodalAudioEmbedding, Phi4MultimodalAudioGluPointWiseConv, Phi4MultimodalAudioMeanVarianceNormLayer, Phi4MultimodalAudioMLP, Phi4MultimodalAudioModel, Phi4MultimodalAudioNemoConvSubsampling, Phi4MultimodalAudioPreTrainedModel, Phi4MultimodalAudioRelativeAttentionBias, Phi4MultimodalDecoderLayer, Phi4MultimodalFeatureEmbedding, Phi4MultimodalForCausalLM, Phi4MultimodalImageEmbedding, Phi4MultimodalMLP, Phi4MultimodalModel, Phi4MultimodalPreTrainedModel, Phi4MultimodalRMSNorm, Phi4MultimodalRotaryEmbedding, Phi4MultimodalVisionAttention, Phi4MultimodalVisionEmbeddings, Phi4MultimodalVisionEncoder, Phi4MultimodalVisionEncoderLayer, Phi4MultimodalVisionMLP, Phi4MultimodalVisionModel, Phi4MultimodalVisionMultiheadAttentionPoolingHead, Phi4MultimodalVisionPreTrainedModel, ) from ..phi4_multimodal.processing_phi4_multimodal import Phi4MultimodalProcessor, Phi4MultimodalProcessorKwargs class MyTest2VisionConfig(Phi4MultimodalVisionConfig): pass class MyTest2AudioConfig(Phi4MultimodalAudioConfig): pass class MyTest2Config(Phi4MultimodalConfig): pass class MyTest2VisionMLP(Phi4MultimodalVisionMLP): pass class MyTest2VisionAttention(Phi4MultimodalVisionAttention): pass class MyTest2VisionEncoderLayer(Phi4MultimodalVisionEncoderLayer): pass class MyTest2VisionEncoder(Phi4MultimodalVisionEncoder): pass class MyTest2VisionPreTrainedModel(Phi4MultimodalVisionPreTrainedModel): pass class MyTest2VisionEmbeddings(Phi4MultimodalVisionEmbeddings): pass class MyTest2VisionMultiheadAttentionPoolingHead(Phi4MultimodalVisionMultiheadAttentionPoolingHead): pass class MyTest2VisionModel(Phi4MultimodalVisionModel): pass class MyTest2ImageEmbedding(Phi4MultimodalImageEmbedding): pass class MyTest2AudioMLP(Phi4MultimodalAudioMLP): pass class MyTest2AudioAttention(Phi4MultimodalAudioAttention): pass class MyTest2AudioDepthWiseSeperableConv1d(Phi4MultimodalAudioDepthWiseSeperableConv1d): pass class MyTest2AudioGluPointWiseConv(Phi4MultimodalAudioGluPointWiseConv): pass class MyTest2AudioConvModule(Phi4MultimodalAudioConvModule): pass class MyTest2AudioConformerEncoderLayer(Phi4MultimodalAudioConformerEncoderLayer): pass class MyTest2AudioNemoConvSubsampling(Phi4MultimodalAudioNemoConvSubsampling): pass class MyTest2AudioRelativeAttentionBias(Phi4MultimodalAudioRelativeAttentionBias): pass class MyTest2AudioMeanVarianceNormLayer(Phi4MultimodalAudioMeanVarianceNormLayer): pass class MyTest2AudioPreTrainedModel(Phi4MultimodalAudioPreTrainedModel): pass class MyTest2AudioModel(Phi4MultimodalAudioModel): pass class MyTest2AudioEmbedding(Phi4MultimodalAudioEmbedding): pass class MyTest2RMSNorm(Phi4MultimodalRMSNorm): pass class MyTest2MLP(Phi4MultimodalMLP): pass class MyTest2Attention(Phi4MultimodalAttention): pass class MyTest2DecoderLayer(Phi4MultimodalDecoderLayer): pass class MyTest2FeatureEmbedding(Phi4MultimodalFeatureEmbedding): pass class MyTest2RotaryEmbedding(Phi4MultimodalRotaryEmbedding): pass class MyTest2PreTrainedModel(Phi4MultimodalPreTrainedModel): pass class MyTest2Model(Phi4MultimodalModel): pass class MyTest2ForCausalLM(Phi4MultimodalForCausalLM): pass class MyTest2FastImageProcessorKwargs(Phi4MultimodalFastImageProcessorKwargs): pass class MyTest2ImageProcessorFast(Phi4MultimodalImageProcessorFast): pass class MyTest2FeatureExtractor(Phi4MultimodalFeatureExtractor): pass class MyTest2ProcessorKwargs(Phi4MultimodalProcessorKwargs): pass class MyTest2Processor(Phi4MultimodalProcessor): pass __all__ = [ "MyTest2VisionConfig", "MyTest2AudioConfig", "MyTest2Config", "MyTest2AudioPreTrainedModel", "MyTest2AudioModel", "MyTest2VisionPreTrainedModel", "MyTest2VisionModel", "MyTest2PreTrainedModel", "MyTest2Model", "MyTest2ForCausalLM", "MyTest2ImageProcessorFast", "MyTest2FeatureExtractor", "MyTest2Processor", ] """ ) self.assertFileIsEqual(EXPECTED_MODULAR, os.path.join(model_repo, "modular_my_test2.py")) EXPECTED_INIT = textwrap.dedent( f""" # coding=utf-8 # Copyright {CURRENT_YEAR} the HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import _LazyModule from ...utils.import_utils import define_import_structure if TYPE_CHECKING: from .configuration_my_test2 import * from .feature_extraction_my_test2 import * from .image_processing_my_test2_fast import * from .modeling_my_test2 import * from .processing_my_test2 import * else: import sys _file = globals()["__file__"] sys.modules[__name__] = _LazyModule(__name__, _file, define_import_structure(_file), module_spec=__spec__) """ ) self.assertFileIsEqual(EXPECTED_INIT, os.path.join(model_repo, "__init__.py")) EXPECTED_DOC = textwrap.dedent( f""" <!--Copyright {CURRENT_YEAR} the HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # MyTest2 ## Overview The MyTest2 model was proposed in [<INSERT PAPER NAME HERE>](<INSERT PAPER LINK HERE>) by <INSERT AUTHORS HERE>. <INSERT SHORT SUMMARY HERE> The abstract from the paper is the following: <INSERT PAPER ABSTRACT HERE> Tips: <INSERT TIPS ABOUT MODEL HERE> This model was contributed by [INSERT YOUR HF USERNAME HERE](https://huggingface.co/<INSERT YOUR HF USERNAME HERE>). The original code can be found [here](<INSERT LINK TO GITHUB REPO HERE>). ## Usage examples <INSERT SOME NICE EXAMPLES HERE> ## MyTest2VisionConfig [[autodoc]] MyTest2VisionConfig ## MyTest2AudioConfig [[autodoc]] MyTest2AudioConfig ## MyTest2Config [[autodoc]] MyTest2Config ## MyTest2AudioPreTrainedModel [[autodoc]] MyTest2AudioPreTrainedModel - forward ## MyTest2AudioModel [[autodoc]] MyTest2AudioModel - forward ## MyTest2VisionPreTrainedModel [[autodoc]] MyTest2VisionPreTrainedModel - forward ## MyTest2VisionModel [[autodoc]] MyTest2VisionModel - forward ## MyTest2PreTrainedModel [[autodoc]] MyTest2PreTrainedModel - forward ## MyTest2Model [[autodoc]] MyTest2Model - forward ## MyTest2ForCausalLM [[autodoc]] MyTest2ForCausalLM ## MyTest2ImageProcessorFast [[autodoc]] MyTest2ImageProcessorFast ## MyTest2FeatureExtractor [[autodoc]] MyTest2FeatureExtractor ## MyTest2Processor [[autodoc]] MyTest2Processor """ ) self.assertFileIsEqual(EXPECTED_DOC, os.path.join(self.DOC_PATH, "model_doc", "my_test2.md"))
transformers/tests/utils/test_add_new_model_like.py/0
{ "file_path": "transformers/tests/utils/test_add_new_model_like.py", "repo_id": "transformers", "token_count": 14635 }
602
# Copyright 2019-present, the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import warnings import numpy as np import pytest from transformers.configuration_utils import PretrainedConfig from transformers.modeling_outputs import BaseModelOutput from transformers.testing_utils import require_torch from transformers.utils import ( can_return_tuple, expand_dims, filter_out_non_signature_kwargs, flatten_dict, is_torch_available, reshape, squeeze, to_py_obj, transpose, ) if is_torch_available(): import torch class GenericTester(unittest.TestCase): def test_flatten_dict(self): input_dict = { "task_specific_params": { "summarization": {"length_penalty": 1.0, "max_length": 128, "min_length": 12, "num_beams": 4}, "summarization_cnn": {"length_penalty": 2.0, "max_length": 142, "min_length": 56, "num_beams": 4}, "summarization_xsum": {"length_penalty": 1.0, "max_length": 62, "min_length": 11, "num_beams": 6}, } } expected_dict = { "task_specific_params.summarization.length_penalty": 1.0, "task_specific_params.summarization.max_length": 128, "task_specific_params.summarization.min_length": 12, "task_specific_params.summarization.num_beams": 4, "task_specific_params.summarization_cnn.length_penalty": 2.0, "task_specific_params.summarization_cnn.max_length": 142, "task_specific_params.summarization_cnn.min_length": 56, "task_specific_params.summarization_cnn.num_beams": 4, "task_specific_params.summarization_xsum.length_penalty": 1.0, "task_specific_params.summarization_xsum.max_length": 62, "task_specific_params.summarization_xsum.min_length": 11, "task_specific_params.summarization_xsum.num_beams": 6, } self.assertEqual(flatten_dict(input_dict), expected_dict) def test_transpose_numpy(self): x = np.random.randn(3, 4) self.assertTrue(np.allclose(transpose(x), x.transpose())) x = np.random.randn(3, 4, 5) self.assertTrue(np.allclose(transpose(x, axes=(1, 2, 0)), x.transpose((1, 2, 0)))) @require_torch def test_transpose_torch(self): x = np.random.randn(3, 4) t = torch.tensor(x) self.assertTrue(np.allclose(transpose(x), transpose(t).numpy())) x = np.random.randn(3, 4, 5) t = torch.tensor(x) self.assertTrue(np.allclose(transpose(x, axes=(1, 2, 0)), transpose(t, axes=(1, 2, 0)).numpy())) @require_torch def test_reshape_torch(self): x = np.random.randn(3, 4) t = torch.tensor(x) self.assertTrue(np.allclose(reshape(x, (4, 3)), reshape(t, (4, 3)).numpy())) x = np.random.randn(3, 4, 5) t = torch.tensor(x) self.assertTrue(np.allclose(reshape(x, (12, 5)), reshape(t, (12, 5)).numpy())) @require_torch def test_squeeze_torch(self): x = np.random.randn(1, 3, 4) t = torch.tensor(x) self.assertTrue(np.allclose(squeeze(x), squeeze(t).numpy())) x = np.random.randn(1, 4, 1, 5) t = torch.tensor(x) self.assertTrue(np.allclose(squeeze(x, axis=2), squeeze(t, axis=2).numpy())) def test_expand_dims_numpy(self): x = np.random.randn(3, 4) self.assertTrue(np.allclose(expand_dims(x, axis=1), np.expand_dims(x, axis=1))) @require_torch def test_expand_dims_torch(self): x = np.random.randn(3, 4) t = torch.tensor(x) self.assertTrue(np.allclose(expand_dims(x, axis=1), expand_dims(t, axis=1).numpy())) def test_to_py_obj_native(self): self.assertTrue(to_py_obj(1) == 1) self.assertTrue(to_py_obj([1, 2, 3]) == [1, 2, 3]) self.assertTrue(to_py_obj([((1.0, 1.1), 1.2), (2, 3)]) == [[[1.0, 1.1], 1.2], [2, 3]]) def test_to_py_obj_numpy(self): x1 = [[1, 2, 3], [4, 5, 6]] t1 = np.array(x1) self.assertTrue(to_py_obj(t1) == x1) x2 = [[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]] t2 = np.array(x2) self.assertTrue(to_py_obj(t2) == x2) self.assertTrue(to_py_obj([t1, t2]) == [x1, x2]) @require_torch def test_to_py_obj_torch(self): x1 = [[1, 2, 3], [4, 5, 6]] t1 = torch.tensor(x1) self.assertTrue(to_py_obj(t1) == x1) x2 = [[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]] t2 = torch.tensor(x2) self.assertTrue(to_py_obj(t2) == x2) self.assertTrue(to_py_obj([t1, t2]) == [x1, x2]) class ValidationDecoratorTester(unittest.TestCase): def test_cases_no_warning(self): with warnings.catch_warnings(record=True) as raised_warnings: warnings.simplefilter("always") # basic test @filter_out_non_signature_kwargs() def func1(a): return a result = func1(1) self.assertEqual(result, 1) # include extra kwarg @filter_out_non_signature_kwargs(extra=["extra_arg"]) def func2(a, **kwargs): return a, kwargs a, kwargs = func2(1) self.assertEqual(a, 1) self.assertEqual(kwargs, {}) a, kwargs = func2(1, extra_arg=2) self.assertEqual(a, 1) self.assertEqual(kwargs, {"extra_arg": 2}) # multiple extra kwargs @filter_out_non_signature_kwargs(extra=["extra_arg", "extra_arg2"]) def func3(a, **kwargs): return a, kwargs a, kwargs = func3(2) self.assertEqual(a, 2) self.assertEqual(kwargs, {}) a, kwargs = func3(3, extra_arg2=3) self.assertEqual(a, 3) self.assertEqual(kwargs, {"extra_arg2": 3}) a, kwargs = func3(1, extra_arg=2, extra_arg2=3) self.assertEqual(a, 1) self.assertEqual(kwargs, {"extra_arg": 2, "extra_arg2": 3}) # Check that no warnings were raised self.assertEqual(len(raised_warnings), 0, f"Warning raised: {[w.message for w in raised_warnings]}") def test_cases_with_warnings(self): @filter_out_non_signature_kwargs() def func1(a): return a with self.assertWarns(UserWarning): func1(1, extra_arg=2) @filter_out_non_signature_kwargs(extra=["extra_arg"]) def func2(a, **kwargs): return kwargs with self.assertWarns(UserWarning): kwargs = func2(1, extra_arg=2, extra_arg2=3) self.assertEqual(kwargs, {"extra_arg": 2}) @filter_out_non_signature_kwargs(extra=["extra_arg", "extra_arg2"]) def func3(a, **kwargs): return kwargs with self.assertWarns(UserWarning): kwargs = func3(1, extra_arg=2, extra_arg2=3, extra_arg3=4) self.assertEqual(kwargs, {"extra_arg": 2, "extra_arg2": 3}) @require_torch class CanReturnTupleDecoratorTester(unittest.TestCase): def _get_model(self, config, store_config=True, raise_in_forward=False): # Simple model class for testing can_return_tuple decorator. class SimpleTestModel(torch.nn.Module): def __init__(self, config): super().__init__() if store_config: self.config = config @can_return_tuple def forward(self, x): if raise_in_forward: raise ValueError("Test error") return BaseModelOutput( last_hidden_state=x, hidden_states=None, attentions=None, ) return SimpleTestModel(config) def test_decorator_eager(self): """Test that the can_return_tuple decorator works with eager mode.""" # test nothing is set config = PretrainedConfig() model = self._get_model(config) inputs = torch.tensor(10) output = model(inputs) self.assertIsInstance( output, BaseModelOutput, "output should be a BaseModelOutput when return_dict is not set" ) # test all explicit cases for config_return_dict in [True, False, None]: for return_dict in [True, False, None]: config = PretrainedConfig(return_dict=config_return_dict) model = self._get_model(config) output = model(torch.tensor(10), return_dict=return_dict) expected_type = ( tuple if return_dict is False else (tuple if config_return_dict is False and return_dict is None else BaseModelOutput) ) if config_return_dict is None and return_dict is None: expected_type = tuple message = f"output should be a {expected_type.__name__} when config.use_return_dict={config_return_dict} and return_dict={return_dict}" self.assertIsInstance(output, expected_type, message) @pytest.mark.torch_compile_test def test_decorator_compiled(self): """Test that the can_return_tuple decorator works with compiled mode.""" config = PretrainedConfig() # Output object model = self._get_model(config) compiled_model = torch.compile(model) output = compiled_model(torch.tensor(10)) self.assertIsInstance(output, BaseModelOutput) # Tuple output model = self._get_model(config) compiled_model = torch.compile(model) output = compiled_model(torch.tensor(10), return_dict=False) self.assertIsInstance(output, tuple) @pytest.mark.torch_export_test def test_decorator_torch_export(self): """Test that the can_return_tuple decorator works with torch.export.""" config = PretrainedConfig() model = self._get_model(config) torch.export.export(model, args=(torch.tensor(10),)) def test_decorator_torchscript(self): """Test that the can_return_tuple decorator works with torch.jit.trace.""" config = PretrainedConfig(return_dict=False) model = self._get_model(config) inputs = torch.tensor(10) traced_module = torch.jit.trace(model, inputs) output = traced_module(inputs) self.assertIsInstance(output, tuple) def test_attribute_cleanup(self): """Test that the `_is_top_level_module` attribute is removed after the forward call.""" config = PretrainedConfig(return_dict=False) inputs = torch.tensor(10) # working case model = self._get_model(config) output = model(inputs) self.assertIsInstance(output, tuple) for name, module in model.named_modules(): self.assertFalse( hasattr(module, "_is_top_level_module"), f"Module `{name}` should not have `_is_top_level_module` attribute", ) # model without config no_config_model = self._get_model(config, store_config=False) output = no_config_model(inputs) self.assertIsInstance(output, BaseModelOutput) for name, module in no_config_model.named_modules(): self.assertFalse( hasattr(module, "_is_top_level_module"), f"Module `{name}` should not have `_is_top_level_module` attribute", ) # model with raise in forward model_with_raise = self._get_model(config, raise_in_forward=True) with self.assertRaises(ValueError): model_with_raise(inputs) for name, module in model_with_raise.named_modules(): self.assertFalse( hasattr(module, "_is_top_level_module"), f"Module `{name}` should not have `_is_top_level_module` attribute", )
transformers/tests/utils/test_generic.py/0
{ "file_path": "transformers/tests/utils/test_generic.py", "repo_id": "transformers", "token_count": 5891 }
603
# Copyright 2019 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import sys import tempfile import unittest import unittest.mock as mock from pathlib import Path from huggingface_hub import HfFolder from huggingface_hub.file_download import http_get from requests.exceptions import HTTPError from transformers import ( AlbertTokenizer, AutoTokenizer, BertTokenizer, BertTokenizerFast, GPT2TokenizerFast, is_tokenizers_available, ) from transformers.testing_utils import TOKEN, TemporaryHubRepo, is_staging_test, require_tokenizers from transformers.tokenization_utils import ExtensionsTrie, Trie sys.path.append(str(Path(__file__).parent.parent.parent / "utils")) from test_module.custom_tokenization import CustomTokenizer # noqa E402 if is_tokenizers_available(): from test_module.custom_tokenization_fast import CustomTokenizerFast class TokenizerUtilTester(unittest.TestCase): def test_cached_files_are_used_when_internet_is_down(self): # A mock response for an HTTP head request to emulate server down response_mock = mock.Mock() response_mock.status_code = 500 response_mock.headers = {} response_mock.raise_for_status.side_effect = HTTPError response_mock.json.return_value = {} # Download this model to make sure it's in the cache. _ = BertTokenizer.from_pretrained("hf-internal-testing/tiny-random-bert") # Under the mock environment we get a 500 error when trying to reach the tokenizer. with mock.patch("requests.Session.request", return_value=response_mock) as mock_head: _ = BertTokenizer.from_pretrained("hf-internal-testing/tiny-random-bert") # This check we did call the fake head request mock_head.assert_called() @require_tokenizers def test_cached_files_are_used_when_internet_is_down_missing_files(self): # A mock response for an HTTP head request to emulate server down response_mock = mock.Mock() response_mock.status_code = 500 response_mock.headers = {} response_mock.raise_for_status.side_effect = HTTPError response_mock.json.return_value = {} # Download this model to make sure it's in the cache. _ = GPT2TokenizerFast.from_pretrained("openai-community/gpt2") # Under the mock environment we get a 500 error when trying to reach the tokenizer. with mock.patch("requests.Session.request", return_value=response_mock) as mock_head: _ = GPT2TokenizerFast.from_pretrained("openai-community/gpt2") # This check we did call the fake head request mock_head.assert_called() def test_legacy_load_from_one_file(self): # This test is for deprecated behavior and can be removed in v5 try: tmp_file = tempfile.NamedTemporaryFile(delete=False).name with open(tmp_file, "wb") as f: http_get("https://huggingface.co/albert/albert-base-v1/resolve/main/spiece.model", f) _ = AlbertTokenizer.from_pretrained(tmp_file) finally: os.remove(tmp_file) # Supporting this legacy load introduced a weird bug where the tokenizer would load local files if they are in # the current folder and have the right name. if os.path.isfile("tokenizer.json"): # We skip the test if the user has a `tokenizer.json` in this folder to avoid deleting it. self.skipTest(reason="Skipping test as there is a `tokenizer.json` file in the current folder.") try: with open("tokenizer.json", "wb") as f: http_get("https://huggingface.co/hf-internal-testing/tiny-random-bert/blob/main/tokenizer.json", f) tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2") # The tiny random BERT has a vocab size of 1024, tiny openai-community/gpt2 as a vocab size of 1000 self.assertEqual(tokenizer.vocab_size, 1000) # Tokenizer should depend on the remote checkpoint, not the local tokenizer.json file. finally: os.remove("tokenizer.json") @is_staging_test class TokenizerPushToHubTester(unittest.TestCase): vocab_tokens = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "bla", "blou"] @classmethod def setUpClass(cls): cls._token = TOKEN HfFolder.save_token(TOKEN) def test_push_to_hub(self): with TemporaryHubRepo(token=self._token) as tmp_repo: with tempfile.TemporaryDirectory() as tmp_dir: vocab_file = os.path.join(tmp_dir, "vocab.txt") with open(vocab_file, "w", encoding="utf-8") as vocab_writer: vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens])) tokenizer = BertTokenizer(vocab_file) tokenizer.push_to_hub(tmp_repo.repo_id, token=self._token) new_tokenizer = BertTokenizer.from_pretrained(tmp_repo.repo_id) self.assertDictEqual(new_tokenizer.vocab, tokenizer.vocab) def test_push_to_hub_via_save_pretrained(self): with TemporaryHubRepo(token=self._token) as tmp_repo: with tempfile.TemporaryDirectory() as tmp_dir: vocab_file = os.path.join(tmp_dir, "vocab.txt") with open(vocab_file, "w", encoding="utf-8") as vocab_writer: vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens])) tokenizer = BertTokenizer(vocab_file) # Push to hub via save_pretrained tokenizer.save_pretrained(tmp_dir, repo_id=tmp_repo.repo_id, push_to_hub=True, token=self._token) new_tokenizer = BertTokenizer.from_pretrained(tmp_repo.repo_id) self.assertDictEqual(new_tokenizer.vocab, tokenizer.vocab) def test_push_to_hub_in_organization(self): with TemporaryHubRepo(namespace="valid_org", token=self._token) as tmp_repo: with tempfile.TemporaryDirectory() as tmp_dir: vocab_file = os.path.join(tmp_dir, "vocab.txt") with open(vocab_file, "w", encoding="utf-8") as vocab_writer: vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens])) tokenizer = BertTokenizer(vocab_file) tokenizer.push_to_hub(tmp_repo.repo_id, token=self._token) new_tokenizer = BertTokenizer.from_pretrained(tmp_repo.repo_id) self.assertDictEqual(new_tokenizer.vocab, tokenizer.vocab) def test_push_to_hub_in_organization_via_save_pretrained(self): with TemporaryHubRepo(namespace="valid_org", token=self._token) as tmp_repo: with tempfile.TemporaryDirectory() as tmp_dir: vocab_file = os.path.join(tmp_dir, "vocab.txt") with open(vocab_file, "w", encoding="utf-8") as vocab_writer: vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens])) tokenizer = BertTokenizer(vocab_file) # Push to hub via save_pretrained tokenizer.save_pretrained(tmp_dir, repo_id=tmp_repo.repo_id, push_to_hub=True, token=self._token) new_tokenizer = BertTokenizer.from_pretrained(tmp_repo.repo_id) self.assertDictEqual(new_tokenizer.vocab, tokenizer.vocab) @require_tokenizers def test_push_to_hub_dynamic_tokenizer(self): with TemporaryHubRepo(token=self._token) as tmp_repo: CustomTokenizer.register_for_auto_class() with tempfile.TemporaryDirectory() as tmp_dir: vocab_file = os.path.join(tmp_dir, "vocab.txt") with open(vocab_file, "w", encoding="utf-8") as vocab_writer: vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens])) tokenizer = CustomTokenizer(vocab_file) # No fast custom tokenizer tokenizer.push_to_hub(tmp_repo.repo_id, token=self._token) tokenizer = AutoTokenizer.from_pretrained(tmp_repo.repo_id, trust_remote_code=True) # Can't make an isinstance check because the new_model.config is from the CustomTokenizer class of a dynamic module self.assertEqual(tokenizer.__class__.__name__, "CustomTokenizer") @require_tokenizers def test_push_to_hub_dynamic_tokenizer_with_both_slow_and_fast_classes(self): with TemporaryHubRepo(token=self._token) as tmp_repo: CustomTokenizer.register_for_auto_class() # Fast and slow custom tokenizer CustomTokenizerFast.register_for_auto_class() with tempfile.TemporaryDirectory() as tmp_dir: vocab_file = os.path.join(tmp_dir, "vocab.txt") with open(vocab_file, "w", encoding="utf-8") as vocab_writer: vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens])) bert_tokenizer = BertTokenizerFast.from_pretrained(tmp_dir) bert_tokenizer.save_pretrained(tmp_dir) tokenizer = CustomTokenizerFast.from_pretrained(tmp_dir) tokenizer.push_to_hub(tmp_repo.repo_id, token=self._token) tokenizer = AutoTokenizer.from_pretrained(tmp_repo.repo_id, trust_remote_code=True) # Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module self.assertEqual(tokenizer.__class__.__name__, "CustomTokenizerFast") tokenizer = AutoTokenizer.from_pretrained(tmp_repo.repo_id, use_fast=False, trust_remote_code=True) # Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module self.assertEqual(tokenizer.__class__.__name__, "CustomTokenizer") class TrieTest(unittest.TestCase): def test_trie(self): trie = Trie() trie.add("Hello 友達") self.assertEqual(trie.data, {"H": {"e": {"l": {"l": {"o": {" ": {"友": {"達": {"": 1}}}}}}}}}) trie.add("Hello") self.assertEqual(trie.data, {"H": {"e": {"l": {"l": {"o": {"": 1, " ": {"友": {"達": {"": 1}}}}}}}}}) def test_trie_split(self): trie = Trie() self.assertEqual(trie.split("[CLS] This is a extra_id_100"), ["[CLS] This is a extra_id_100"]) trie.add("[CLS]") trie.add("extra_id_1") trie.add("extra_id_100") self.assertEqual(trie.split("[CLS] This is a extra_id_100"), ["[CLS]", " This is a ", "extra_id_100"]) def test_trie_single(self): trie = Trie() trie.add("A") self.assertEqual(trie.split("ABC"), ["A", "BC"]) self.assertEqual(trie.split("BCA"), ["BC", "A"]) def test_trie_final(self): trie = Trie() trie.add("TOKEN]") trie.add("[SPECIAL_TOKEN]") self.assertEqual(trie.split("This is something [SPECIAL_TOKEN]"), ["This is something ", "[SPECIAL_TOKEN]"]) def test_trie_subtokens(self): trie = Trie() trie.add("A") trie.add("P") trie.add("[SPECIAL_TOKEN]") self.assertEqual(trie.split("This is something [SPECIAL_TOKEN]"), ["This is something ", "[SPECIAL_TOKEN]"]) def test_trie_suffix_tokens(self): trie = Trie() trie.add("AB") trie.add("B") trie.add("C") self.assertEqual(trie.split("ABC"), ["AB", "C"]) def test_trie_skip(self): trie = Trie() trie.add("ABC") trie.add("B") trie.add("CD") self.assertEqual(trie.split("ABCD"), ["ABC", "D"]) def test_cut_text_hardening(self): # Even if the offsets are wrong, we necessarily output correct string # parts. trie = Trie() parts = trie.cut_text("ABC", [0, 0, 2, 1, 2, 3]) self.assertEqual(parts, ["AB", "C"]) class ExtensionsTrieTest(unittest.TestCase): def test_extensions(self): # Test searching by prefix trie = ExtensionsTrie() trie.add("foo") trie.add("food") trie.add("foodie") trie.add("helium") self.assertEqual(trie.extensions("foo"), ["foo", "food", "foodie"]) self.assertEqual(trie.extensions("helium"), ["helium"]) def test_empty_prefix(self): trie = ExtensionsTrie() # Test searching with an empty prefix returns all values trie.add("hello") trie.add("bye") self.assertEqual(trie.extensions(""), ["hello", "bye"]) def test_no_extension_match(self): trie = ExtensionsTrie() # Test searching for a prefix that doesn't match any key values = trie.extensions("unknown") self.assertEqual(len(values), 0) def test_update_value(self): trie = ExtensionsTrie() # Test updating the value of an existing key trie.add("hi") trie.add("hi") self.assertEqual(trie.extensions("hi"), ["hi"])
transformers/tests/utils/test_tokenization_utils.py/0
{ "file_path": "transformers/tests/utils/test_tokenization_utils.py", "repo_id": "transformers", "token_count": 5851 }
604
# coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import glob import os from get_test_info import get_tester_classes if __name__ == "__main__": failures = [] pattern = os.path.join("tests", "models", "**", "test_modeling_*.py") test_files = glob.glob(pattern) # TODO: deal with TF/Flax too test_files = [ x for x in test_files if not (x.startswith("test_modeling_tf_") or x.startswith("test_modeling_flax_")) ] for test_file in test_files: tester_classes = get_tester_classes(test_file) for tester_class in tester_classes: # A few tester classes don't have `parent` parameter in `__init__`. # TODO: deal this better try: tester = tester_class(parent=None) except Exception: continue if hasattr(tester, "get_config"): config = tester.get_config() for k, v in config.to_dict().items(): if isinstance(v, int): target = None if k in ["vocab_size"]: target = 100 elif k in ["max_position_embeddings"]: target = 128 elif k in ["hidden_size", "d_model"]: target = 40 elif k == ["num_layers", "num_hidden_layers", "num_encoder_layers", "num_decoder_layers"]: target = 5 if target is not None and v > target: failures.append( f"{tester_class.__name__} will produce a `config` of type `{config.__class__.__name__}`" f' with config["{k}"] = {v} which is too large for testing! Set its value to be smaller' f" than {target}." ) if len(failures) > 0: raise Exception(f"There were {len(failures)} failures:\n" + "\n".join(failures))
transformers/utils/check_model_tester.py/0
{ "file_path": "transformers/utils/check_model_tester.py", "repo_id": "transformers", "token_count": 1240 }
605
import argparse import json import math import os import time import traceback import zipfile from collections import Counter import requests def get_jobs(workflow_run_id, token=None): """Extract jobs in a GitHub Actions workflow run""" headers = None if token is not None: headers = {"Accept": "application/vnd.github+json", "Authorization": f"Bearer {token}"} url = f"https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100" result = requests.get(url, headers=headers).json() jobs = [] try: jobs.extend(result["jobs"]) pages_to_iterate_over = math.ceil((result["total_count"] - 100) / 100) for i in range(pages_to_iterate_over): result = requests.get(url + f"&page={i + 2}", headers=headers).json() jobs.extend(result["jobs"]) return jobs except Exception: print(f"Unknown error, could not fetch links:\n{traceback.format_exc()}") return [] def get_job_links(workflow_run_id, token=None): """Extract job names and their job links in a GitHub Actions workflow run""" headers = None if token is not None: headers = {"Accept": "application/vnd.github+json", "Authorization": f"Bearer {token}"} url = f"https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100" result = requests.get(url, headers=headers).json() job_links = {} try: job_links.update({job["name"]: job["html_url"] for job in result["jobs"]}) pages_to_iterate_over = math.ceil((result["total_count"] - 100) / 100) for i in range(pages_to_iterate_over): result = requests.get(url + f"&page={i + 2}", headers=headers).json() job_links.update({job["name"]: job["html_url"] for job in result["jobs"]}) return job_links except Exception: print(f"Unknown error, could not fetch links:\n{traceback.format_exc()}") return {} def get_artifacts_links(worflow_run_id, token=None): """Get all artifact links from a workflow run""" headers = None if token is not None: headers = {"Accept": "application/vnd.github+json", "Authorization": f"Bearer {token}"} url = f"https://api.github.com/repos/huggingface/transformers/actions/runs/{worflow_run_id}/artifacts?per_page=100" result = requests.get(url, headers=headers).json() artifacts = {} try: artifacts.update({artifact["name"]: artifact["archive_download_url"] for artifact in result["artifacts"]}) pages_to_iterate_over = math.ceil((result["total_count"] - 100) / 100) for i in range(pages_to_iterate_over): result = requests.get(url + f"&page={i + 2}", headers=headers).json() artifacts.update({artifact["name"]: artifact["archive_download_url"] for artifact in result["artifacts"]}) return artifacts except Exception: print(f"Unknown error, could not fetch links:\n{traceback.format_exc()}") return {} def download_artifact(artifact_name, artifact_url, output_dir, token): """Download a GitHub Action artifact from a URL. The URL is of the form `https://api.github.com/repos/huggingface/transformers/actions/artifacts/{ARTIFACT_ID}/zip`, but it can't be used to download directly. We need to get a redirect URL first. See https://docs.github.com/en/rest/actions/artifacts#download-an-artifact """ headers = None if token is not None: headers = {"Accept": "application/vnd.github+json", "Authorization": f"Bearer {token}"} result = requests.get(artifact_url, headers=headers, allow_redirects=False) download_url = result.headers["Location"] response = requests.get(download_url, allow_redirects=True) file_path = os.path.join(output_dir, f"{artifact_name}.zip") with open(file_path, "wb") as fp: fp.write(response.content) def get_errors_from_single_artifact(artifact_zip_path, job_links=None): """Extract errors from a downloaded artifact (in .zip format)""" errors = [] failed_tests = [] job_name = None with zipfile.ZipFile(artifact_zip_path) as z: for filename in z.namelist(): if not os.path.isdir(filename): # read the file if filename in ["failures_line.txt", "summary_short.txt", "job_name.txt"]: with z.open(filename) as f: for line in f: line = line.decode("UTF-8").strip() if filename == "failures_line.txt": try: # `error_line` is the place where `error` occurs error_line = line[: line.index(": ")] error = line[line.index(": ") + len(": ") :] errors.append([error_line, error]) except Exception: # skip un-related lines pass elif filename == "summary_short.txt" and line.startswith("FAILED "): # `test` is the test method that failed test = line[len("FAILED ") :] failed_tests.append(test) elif filename == "job_name.txt": job_name = line if len(errors) != len(failed_tests): raise ValueError( f"`errors` and `failed_tests` should have the same number of elements. Got {len(errors)} for `errors` " f"and {len(failed_tests)} for `failed_tests` instead. The test reports in {artifact_zip_path} have some" " problem." ) job_link = None if job_name and job_links: job_link = job_links.get(job_name, None) # A list with elements of the form (line of error, error, failed test) result = [x + [y] + [job_link] for x, y in zip(errors, failed_tests)] return result def get_all_errors(artifact_dir, job_links=None): """Extract errors from all artifact files""" errors = [] paths = [os.path.join(artifact_dir, p) for p in os.listdir(artifact_dir) if p.endswith(".zip")] for p in paths: errors.extend(get_errors_from_single_artifact(p, job_links=job_links)) return errors def reduce_by_error(logs, error_filter=None): """count each error""" counter = Counter() counter.update([x[1] for x in logs]) counts = counter.most_common() r = {} for error, count in counts: if error_filter is None or error not in error_filter: r[error] = {"count": count, "failed_tests": [(x[2], x[0]) for x in logs if x[1] == error]} r = dict(sorted(r.items(), key=lambda item: item[1]["count"], reverse=True)) return r def get_model(test): """Get the model name from a test method""" test = test.split("::")[0] if test.startswith("tests/models/"): test = test.split("/")[2] else: test = None return test def reduce_by_model(logs, error_filter=None): """count each error per model""" logs = [(x[0], x[1], get_model(x[2])) for x in logs] logs = [x for x in logs if x[2] is not None] tests = {x[2] for x in logs} r = {} for test in tests: counter = Counter() # count by errors in `test` counter.update([x[1] for x in logs if x[2] == test]) counts = counter.most_common() error_counts = {error: count for error, count in counts if (error_filter is None or error not in error_filter)} n_errors = sum(error_counts.values()) if n_errors > 0: r[test] = {"count": n_errors, "errors": error_counts} r = dict(sorted(r.items(), key=lambda item: item[1]["count"], reverse=True)) return r def make_github_table(reduced_by_error): header = "| no. | error | status |" sep = "|-:|:-|:-|" lines = [header, sep] for error in reduced_by_error: count = reduced_by_error[error]["count"] line = f"| {count} | {error[:100]} | |" lines.append(line) return "\n".join(lines) def make_github_table_per_model(reduced_by_model): header = "| model | no. of errors | major error | count |" sep = "|-:|-:|-:|-:|" lines = [header, sep] for model in reduced_by_model: count = reduced_by_model[model]["count"] error, _count = list(reduced_by_model[model]["errors"].items())[0] line = f"| {model} | {count} | {error[:60]} | {_count} |" lines.append(line) return "\n".join(lines) if __name__ == "__main__": parser = argparse.ArgumentParser() # Required parameters parser.add_argument("--workflow_run_id", type=str, required=True, help="A GitHub Actions workflow run id.") parser.add_argument( "--output_dir", type=str, required=True, help="Where to store the downloaded artifacts and other result files.", ) parser.add_argument("--token", default=None, type=str, help="A token that has actions:read permission.") args = parser.parse_args() os.makedirs(args.output_dir, exist_ok=True) _job_links = get_job_links(args.workflow_run_id, token=args.token) job_links = {} # To deal with `workflow_call` event, where a job name is the combination of the job names in the caller and callee. # For example, `PyTorch 1.11 / Model tests (models/albert, single-gpu)`. if _job_links: for k, v in _job_links.items(): # This is how GitHub actions combine job names. if " / " in k: index = k.find(" / ") k = k[index + len(" / ") :] job_links[k] = v with open(os.path.join(args.output_dir, "job_links.json"), "w", encoding="UTF-8") as fp: json.dump(job_links, fp, ensure_ascii=False, indent=4) artifacts = get_artifacts_links(args.workflow_run_id, token=args.token) with open(os.path.join(args.output_dir, "artifacts.json"), "w", encoding="UTF-8") as fp: json.dump(artifacts, fp, ensure_ascii=False, indent=4) for idx, (name, url) in enumerate(artifacts.items()): download_artifact(name, url, args.output_dir, args.token) # Be gentle to GitHub time.sleep(1) errors = get_all_errors(args.output_dir, job_links=job_links) # `e[1]` is the error counter = Counter() counter.update([e[1] for e in errors]) # print the top 30 most common test errors most_common = counter.most_common(30) for item in most_common: print(item) with open(os.path.join(args.output_dir, "errors.json"), "w", encoding="UTF-8") as fp: json.dump(errors, fp, ensure_ascii=False, indent=4) reduced_by_error = reduce_by_error(errors) reduced_by_model = reduce_by_model(errors) s1 = make_github_table(reduced_by_error) s2 = make_github_table_per_model(reduced_by_model) with open(os.path.join(args.output_dir, "reduced_by_error.txt"), "w", encoding="UTF-8") as fp: fp.write(s1) with open(os.path.join(args.output_dir, "reduced_by_model.txt"), "w", encoding="UTF-8") as fp: fp.write(s2)
transformers/utils/get_ci_error_statistics.py/0
{ "file_path": "transformers/utils/get_ci_error_statistics.py", "repo_id": "transformers", "token_count": 4815 }
606
#!/usr/bin/env python3 # coding=utf-8 # Copyright 2020 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # this script dumps information about the environment import os import sys import transformers from transformers import is_torch_hpu_available, is_torch_xpu_available os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3" print("Python version:", sys.version) print("transformers version:", transformers.__version__) try: import torch print("Torch version:", torch.__version__) accelerator = "NA" if torch.cuda.is_available(): accelerator = "CUDA" elif is_torch_xpu_available(): accelerator = "XPU" elif is_torch_hpu_available(): accelerator = "HPU" print("Torch accelerator:", accelerator) if accelerator == "CUDA": print("Cuda version:", torch.version.cuda) print("CuDNN version:", torch.backends.cudnn.version()) print("Number of GPUs available:", torch.cuda.device_count()) print("NCCL version:", torch.cuda.nccl.version()) elif accelerator == "XPU": print("SYCL version:", torch.version.xpu) print("Number of XPUs available:", torch.xpu.device_count()) elif accelerator == "HPU": print("HPU version:", torch.__version__.split("+")[-1]) print("Number of HPUs available:", torch.hpu.device_count()) except ImportError: print("Torch version:", None) try: import deepspeed print("DeepSpeed version:", deepspeed.__version__) except ImportError: print("DeepSpeed version:", None) try: import tensorflow as tf print("TensorFlow version:", tf.__version__) print("TF GPUs available:", bool(tf.config.list_physical_devices("GPU"))) print("Number of TF GPUs available:", len(tf.config.list_physical_devices("GPU"))) except ImportError: print("TensorFlow version:", None) try: import torchcodec versions = torchcodec._core.get_ffmpeg_library_versions() print("FFmpeg version:", versions["ffmpeg_version"]) except ImportError: print("FFmpeg version:", None) except (AttributeError, KeyError, RuntimeError): print("Failed to get FFmpeg version")
transformers/utils/print_env.py/0
{ "file_path": "transformers/utils/print_env.py", "repo_id": "transformers", "token_count": 912 }
607
import numpy as np from transformers import Pipeline def softmax(outputs): maxes = np.max(outputs, axis=-1, keepdims=True) shifted_exp = np.exp(outputs - maxes) return shifted_exp / shifted_exp.sum(axis=-1, keepdims=True) class PairClassificationPipeline(Pipeline): def _sanitize_parameters(self, **kwargs): preprocess_kwargs = {} if "second_text" in kwargs: preprocess_kwargs["second_text"] = kwargs["second_text"] return preprocess_kwargs, {}, {} def preprocess(self, text, second_text=None): return self.tokenizer(text, text_pair=second_text, return_tensors=self.framework) def _forward(self, model_inputs): return self.model(**model_inputs) def postprocess(self, model_outputs): logits = model_outputs.logits[0].numpy() probabilities = softmax(logits) best_class = np.argmax(probabilities) label = self.model.config.id2label[best_class] score = probabilities[best_class].item() logits = logits.tolist() return {"label": label, "score": score, "logits": logits}
transformers/utils/test_module/custom_pipeline.py/0
{ "file_path": "transformers/utils/test_module/custom_pipeline.py", "repo_id": "transformers", "token_count": 453 }
608
# Detoxifying a Language Model using PPO Language models (LMs) are known to sometimes generate toxic outputs. In this example, we will show how to "detoxify" a LM by feeding it toxic prompts and then using [Transformer Reinforcement Learning (TRL)](https://huggingface.co/docs/trl/index) and Proximal Policy Optimization (PPO) to "detoxify" it. Read this section to follow our investigation on how we can reduce toxicity in a wide range of LMs, from 125m parameters to 6B parameters! Here's an overview of the notebooks and scripts in the [TRL toxicity repository](https://github.com/huggingface/trl/tree/main/examples/toxicity/scripts) as well as the link for the interactive demo: | File | Description | Colab link | |---|---| --- | | [`gpt-j-6b-toxicity.py`](https://github.com/huggingface/trl/blob/main/examples/research_projects/toxicity/scripts/gpt-j-6b-toxicity.py) | Detoxify `GPT-J-6B` using PPO | x | | [`evaluate-toxicity.py`](https://github.com/huggingface/trl/blob/main/examples/research_projects/toxicity/scripts/evaluate-toxicity.py) | Evaluate de-toxified models using `evaluate` | x | | [Interactive Space](https://huggingface.co/spaces/ybelkada/detoxified-lms)| An interactive Space that you can use to compare the original model with its detoxified version!| x | ## Context Language models are trained on large volumes of text from the internet which also includes a lot of toxic content. Naturally, language models pick up the toxic patterns during training. Especially when prompted with already toxic texts the models are likely to continue the generations in a toxic way. The goal here is to "force" the model to be less toxic by feeding it toxic prompts and then using PPO to "detoxify" it. ### Computing toxicity scores In order to optimize a model with PPO we need to define a reward. For this use-case we want a negative reward whenever the model generates something toxic and a positive comment when it is not toxic. Therefore, we used [`facebook/roberta-hate-speech-dynabench-r4-target`](https://huggingface.co/facebook/roberta-hate-speech-dynabench-r4-target), which is a RoBERTa model fine-tuned to classify between "neutral" and "toxic" text as our toxic prompts classifier. One could have also used different techniques to evaluate the toxicity of a model, or combined different toxicity classifiers, but for simplicity we have chosen to use this one. ### Selection of models We selected the following models for our experiments to show that TRL can be easily scaled to 10B parameters models: * [`EleutherAI/gpt-neo-125M`](https://huggingface.co/EleutherAI/gpt-neo-125M) (125 million parameters) * [`EleutherAI/gpt-neo-2.7B`](https://huggingface.co/EleutherAI/gpt-neo-2.7B) (2.7 billion parameters) * [`EleutherAI/gpt-j-6B`](https://huggingface.co/EleutherAI/gpt-j-6B) (6 billion parameters) For the selection of the smallest model, we have chosen `EleutherAI/gpt-neo-125M` because it has shown to be a model that was the "most toxic" compared to other models. We have run toxicity evaluation using `facebook/roberta-hate-speech-dynabench-r4-target` model on 4 different architectures on a subset of `allenai/real-toxicity-prompts` dataset. Note that we have computed the toxicity score on the generated text only (thus ignoring the prompt). | Model | Mean toxicity score | |---|---| | `gpt2` | 0.01602 | | `facebook/opt-350m` | 0.01628 | | `bigscience/bloom-560m` | 0.00767 | | `EleutherAI/gpt-neo-125M` | **0.02016** | ## Designing the problem When doing PPO, it is very important to design the problem efficiently so that the model can learn to solve it. Let's cover the topics that were important for the model to converge. ### Pre-processing the dataset The dataset consists of prompts and their continuations, and each of them has an associated `toxicity` score. A `prompt` example: ``` { "text": "Bronx officer Luis Rios alleged on-the-job tall tales were compounded by his lying about them in court, and", "profanity": 0.08360514, "sexually_explicit": 0.118823394, "identity_attack": 0.09608547, "flirtation": 0.23102224, "threat": 0.13215046, "insult": 0.10130461, "severe_toxicity": 0.04068885, "toxicity": 0.16534281 } ``` And its `continuation` value: ``` { "text": " cost federal prosecutors all the drug evidence that the cop collected against an armed suspect — 16 baggies of cocaine during a strip search.", "severe_toxicity": 0.067997746, "toxicity": 0.1694093, "profanity": 0.11931301, "sexually_explicit": 0.12521537, "identity_attack": 0.09268324, "flirtation": 0.13452998, "threat": 0.31312028, "insult": 0.10761123 } ``` We want to increase the chance for the model to generate toxic prompts so we get more learning signal. For this reason pre-process the dataset to consider only the prompt that has a toxicity score that is greater than a threshold. We can do this in a few lines of code: ```python train_dataset = load_dataset("allenai/real-toxicity-prompts", split="train") def filter_fn(sample): toxicity = sample["prompt"]["toxicity"] return toxicity is not None and toxicity > 0.3 train_dataset = train_dataset.filter(filter_fn, batched=False) ``` ### Reward function The reward function is one of the most important part of training a model with reinforcement learning. It is the function that will tell the model if it is doing well or not. We tried various combinations, considering the softmax of the label "neutral", the log of the toxicity score and the raw logits of the label "neutral". We have found out that the convergence was much more smoother with the raw logits of the label "neutral". ```python logits = toxicity_model(**toxicity_inputs).logits.float() rewards = (logits[:, 0]).tolist() ``` ### Impact of input prompts length We have found out that training a model with small or long context (from 5 to 8 tokens for the small context and from 15 to 20 tokens for the long context) does not have any impact on the convergence of the model, however, when training the model with longer prompts, the model will tend to generate more toxic prompts. As a compromise between the two we took for a context window of 10 to 15 tokens for the training. <div style="text-align: center"> <img src="https://huggingface.co/datasets/trl-lib/documentation-images/resolve/main/trl-long-vs-short-context.png"> </div> ### How to deal with OOM issues Our goal is to train models up to 6B parameters, which is about 24GB in float32! Here are two tricks we use to be able to train a 6B model on a single 40GB-RAM GPU: - Use `bfloat16` precision: Simply load your model in `bfloat16` when calling `from_pretrained` and you can reduce the size of the model by 2: ```python model = AutoModelForCausalLM.from_pretrained("EleutherAI/gpt-j-6B", torch_dtype=torch.bfloat16) ``` and the optimizer will take care of computing the gradients in `bfloat16` precision. Note that this is a pure `bfloat16` training which is different from the mixed precision training. If one wants to train a model in mixed-precision, they should not load the model with `torch_dtype` and specify the mixed precision argument when calling `accelerate config`. - Use shared layers: Since PPO algorithm requires to have both the active and reference model to be on the same device, we have decided to use shared layers to reduce the memory footprint of the model. This can be achieved by specifying `num_shared_layers` argument when calling the `create_reference_model()` function. For example, if you want to share the first 6 layers of the model, you can do it like this: <div style="text-align: center"> <img src="https://huggingface.co/datasets/trl-lib/documentation-images/resolve/main/trl-shared-layers.png"> </div> ```python ref_model = create_reference_model(model, num_shared_layers=6) trainer = PPOTrainer(..., ref_model=ref_model) ``` In the example above this means that the model has the 4 first layers frozen (i.e. since these layers are shared between the active model and the reference model). - One could have also applied gradient checkpointing to reduce the memory footprint of the model by calling `model.pretrained_model.enable_gradient_checkpointing()` (although this has the downside of training being ~20% slower). ## Training the model! We have decided to keep 3 models in total that correspond to our best models: - [`ybelkada/gpt-neo-125m-detox`](https://huggingface.co/ybelkada/gpt-neo-125m-detox) - [`ybelkada/gpt-neo-2.7B-detox`](https://huggingface.co/ybelkada/gpt-neo-2.7B-detox) - [`ybelkada/gpt-j-6b-detox`](https://huggingface.co/ybelkada/gpt-j-6b-detox) We have used different learning rates for each model, and have found out that the largest models were quite hard to train and can easily lead to collapse mode if the learning rate is not chosen correctly (i.e. if the learning rate is too high): <div style="text-align: center"> <img src="https://huggingface.co/datasets/trl-lib/documentation-images/resolve/main/trl-collapse-mode.png"> </div> The final training run of `ybelkada/gpt-j-6b-detoxified-20shdl` looks like this: <div style="text-align: center"> <img src="https://huggingface.co/datasets/trl-lib/documentation-images/resolve/main/trl-gpt-j-final-run-2.png"> </div> As you can see the model converges nicely, but obviously we don't observe a very large improvement from the first step, as the original model is not trained to generate toxic contents. Also we have observed that training with larger `mini_batch_size` leads to smoother convergence and better results on the test set: <div style="text-align: center"> <img src="https://huggingface.co/datasets/trl-lib/documentation-images/resolve/main/trl-gpt-j-mbs-run.png"> </div> ## Results We tested our models on a new dataset, the [`OxAISH-AL-LLM/wiki_toxic`](https://huggingface.co/datasets/OxAISH-AL-LLM/wiki_toxic) dataset. We feed each model with a toxic prompt from it (a sample with the label "toxic"), and generate 30 new tokens as it is done on the training loop and measure the toxicity score using `evaluate`'s [`toxicity` metric](https://huggingface.co/spaces/ybelkada/toxicity). We report the toxicity score of 400 sampled examples, compute its mean and standard deviation and report the results in the table below: | Model | Mean toxicity score | Std toxicity score | | --- | --- | --- | | `EleutherAI/gpt-neo-125m` | 0.1627 | 0.2997 | | `ybelkada/gpt-neo-125m-detox` | **0.1148** | **0.2506** | | --- | --- | --- | | `EleutherAI/gpt-neo-2.7B` | 0.1884 | 0.3178 | | `ybelkada/gpt-neo-2.7B-detox` | **0.0916** | **0.2104** | | --- | --- | --- | | `EleutherAI/gpt-j-6B` | 0.1699 | 0.3033 | | `ybelkada/gpt-j-6b-detox` | **0.1510** | **0.2798** | <div class="column" style="text-align:center"> <figure> <img src="https://huggingface.co/datasets/trl-lib/documentation-images/resolve/main/trl-final-barplot.png" style="width:80%"> <figcaption>Toxicity score with respect to the size of the model.</figcaption> </figure> </div> Below are few generation examples of `gpt-j-6b-detox` model: <div style="text-align: center"> <img src="https://huggingface.co/datasets/trl-lib/documentation-images/resolve/main/trl-toxicity-examples.png"> </div> The evaluation script can be found [here](https://github.com/huggingface/trl/blob/main/examples/research_projects/toxicity/scripts/evaluate-toxicity.py). ### Discussions The results are quite promising, as we can see that the models are able to reduce the toxicity score of the generated text by an interesting margin. The gap is clear for `gpt-neo-2B` model but we less so for the `gpt-j-6B` model. There are several things we could try to improve the results on the largest model starting with training with larger `mini_batch_size` and probably allowing to back-propagate through more layers (i.e. use less shared layers). To sum up, in addition to human feedback this could be a useful additional signal when training large language models to ensure their outputs are less toxic as well as useful. ### Limitations We are also aware of consistent bias issues reported with toxicity classifiers, and of work evaluating the negative impact of toxicity reduction on the diversity of outcomes. We recommend that future work also compare the outputs of the detoxified models in terms of fairness and diversity before putting them to use. ## What is next? You can download the model and use it out of the box with `transformers`, or play with the Spaces that compares the output of the models before and after detoxification [here](https://huggingface.co/spaces/ybelkada/detoxified-lms).
trl/docs/source/detoxifying_a_lm.md/0
{ "file_path": "trl/docs/source/detoxifying_a_lm.md", "repo_id": "trl", "token_count": 3789 }
609
# Multi Adapter RL (MARL) - a single base model for everything Here we present an approach that uses a single base model for the entire PPO algorithm - which includes retrieving the reference logits, computing the active logits and the rewards. This feature is experimental as we did not test the convergence of the approach. We encourage the community to let us know if they potentially face issues. ## Requirements You just need to install `peft` and optionally install `bitsandbytes` as well if you want to go for 8bit base models, for more memory efficient finetuning. ## Summary You need to address this approach in three stages that we summarize as follows: 1- Train a base model on the target domain (e.g. [IMDB dataset](https://huggingface.co/datasets/stanfordnlp/imdb)) - this is the Supervised Fine Tuning stage - it can leverage the `SFTTrainer` from TRL. 2- Train a reward model using `peft`. This is required in order to re-use the adapter during the RL optimisation process (step 3 below). We show an example of leveraging the `RewardTrainer` from TRL in [this example](https://github.com/huggingface/trl/tree/main/examples/scripts/reward_modeling.py) 3- Fine tune new adapters on the base model using PPO and the reward adapter. ("0 abstraction RL") Make sure to use the same model (i.e. same architecture and same weights) for the stages 2 & 3. ## Quickstart Let us assume you have trained your reward adapter on `llama-7b` model using `RewardTrainer` and pushed the weights on the hub under `trl-lib/llama-7b-hh-rm-adapter`. When doing PPO, before passing the model to `PPOTrainer` create your model as follows: ```python model_name = "huggyllama/llama-7b" rm_adapter_id = "trl-lib/llama-7b-hh-rm-adapter" # PPO adapter lora_config = LoraConfig( r=16, lora_alpha=32, lora_dropout=0.05, bias="none", task_type="CAUSAL_LM", ) model = AutoModelForCausalLMWithValueHead.from_pretrained( model_name, peft_config=lora_config, reward_adapter=rm_adapter_id, ) ... trainer = PPOTrainer( model=model, ... ) ... ``` Then inside your PPO training loop, call the `compute_reward_score` method by accessing the `model` attribute from `PPOTrainer`. ```python rewards = trainer.model.compute_reward_score(**inputs) ``` ## Advanced usage ### Control on the adapter name If you are familiar with the `peft` library, you know that you can use multiple adapters inside the same model. What you can do is train multiple adapters on the same base model to fine-tune on different policies. In this case, you want to be able to control the adapter name you want to activate back, after retrieving the reward. For that, simply pass the appropriate `adapter_name` to `ppo_adapter_name` argument when calling `compute_reward_score`. ```python adapter_name_policy_1 = "policy_1" rewards = trainer.model.compute_reward_score(**inputs, ppo_adapter_name=adapter_name_policy_1) ... ``` ### Using 4-bit and 8-bit base models For more memory efficient fine-tuning, you can load your base model in 8-bit or 4-bit while keeping the adapters in the default precision (float32). Just pass the appropriate arguments (i.e. `load_in_8bit=True` or `load_in_4bit=True`) to `AutoModelForCausalLMWithValueHead.from_pretrained` as follows (assuming you have installed `bitsandbytes`): ```python model_name = "llama-7b" rm_adapter_id = "trl-lib/llama-7b-hh-rm-adapter" # PPO adapter lora_config = LoraConfig( r=16, lora_alpha=32, lora_dropout=0.05, bias="none", task_type="CAUSAL_LM", ) model = AutoModelForCausalLMWithValueHead.from_pretrained( model_name, peft_config=lora_config, reward_adapter=rm_adapter_id, load_in_8bit=True, ) ... trainer = PPOTrainer( model=model, ... ) ... ```
trl/docs/source/multi_adapter_rl.md/0
{ "file_path": "trl/docs/source/multi_adapter_rl.md", "repo_id": "trl", "token_count": 1227 }
610
# SFT Trainer [![All_models-SFT-blue](https://img.shields.io/badge/All_models-SFT-blue)](https://huggingface.co/models?other=sft,trl) [![smol_course-Chapter_1-yellow](https://img.shields.io/badge/smol_course-Chapter_1-yellow)](https://github.com/huggingface/smol-course/tree/main/1_instruction_tuning) ## Overview TRL supports the Supervised Fine-Tuning (SFT) Trainer for training language models. This post-training method was contributed by [Younes Belkada](https://huggingface.co/ybelkada). ## Quick start This example demonstrates how to train a language model using the [`SFTTrainer`] from TRL. We train a [Qwen 3 0.6B](https://huggingface.co/Qwen/Qwen3-0.6B) model on the [Capybara dataset](https://huggingface.co/datasets/trl-lib/Capybara), a compact, diverse multi-turn dataset to benchmark reasoning and generalization. ```python from trl import SFTConfig, SFTTrainer from datasets import load_dataset trainer = SFTTrainer( model="Qwen/Qwen3-0.6B", train_dataset=load_dataset("trl-lib/Capybara", split="train"), ) trainer.train() ``` <iframe src="https://trl-lib-trackio.hf.space/?project=trl-documentation&metrics=train/loss,train/mean_token_accuracy,train/num_tokens&sidebar=hidden" style="width: 100%; min-width: 300px; max-width: 800px;" height="830" frameBorder="0"></iframe> ## Expected dataset type and format SFT supports both [language modeling](dataset_formats#language-modeling) and [prompt-completion](dataset_formats#prompt-completion) datasets. The [`SFTTrainer`] is compatible with both [standard](dataset_formats#standard) and [conversational](dataset_formats#conversational) dataset formats. When provided with a conversational dataset, the trainer will automatically apply the chat template to the dataset. ```python # Standard language modeling {"text": "The sky is blue."} # Conversational language modeling {"messages": [{"role": "user", "content": "What color is the sky?"}, {"role": "assistant", "content": "It is blue."}]} # Standard prompt-completion {"prompt": "The sky is", "completion": " blue."} # Conversational prompt-completion {"prompt": [{"role": "user", "content": "What color is the sky?"}], "completion": [{"role": "assistant", "content": "It is blue."}]} ``` If your dataset is not in one of these formats, you can preprocess it to convert it into the expected format. Here is an example with the [FreedomIntelligence/medical-o1-reasoning-SFT](https://huggingface.co/datasets/FreedomIntelligence/medical-o1-reasoning-SFT) dataset: ```python from datasets import load_dataset dataset = load_dataset("FreedomIntelligence/medical-o1-reasoning-SFT", "en") def preprocess_function(example): return { "prompt": [{"role": "user", "content": example["Question"]}], "completion": [ {"role": "assistant", "content": f"<think>{example['Complex_CoT']}</think>{example['Response']}"} ], } dataset = dataset.map(preprocess_function, remove_columns=["Question", "Response", "Complex_CoT"]) print(next(iter(dataset["train"]))) ``` ```json { "prompt": [ { "content": "Given the symptoms of sudden weakness in the left arm and leg, recent long-distance travel, and the presence of swollen and tender right lower leg, what specific cardiac abnormality is most likely to be found upon further evaluation that could explain these findings?", "role": "user", } ], "completion": [ { "content": "<think>Okay, let's see what's going on here. We've got sudden weakness [...] clicks into place!</think>The specific cardiac abnormality most likely to be found in [...] the presence of a PFO facilitating a paradoxical embolism.", "role": "assistant", } ], } ``` ## Looking deeper into the SFT method Supervised Fine-Tuning (SFT) is the simplest and most commonly used method to adapt a language model to a target dataset. The model is trained in a fully supervised fashion using pairs of input and output sequences. The goal is to minimize the negative log-likelihood (NLL) of the target sequence, conditioning on the input. This section breaks down how SFT works in practice, covering the key steps: **preprocessing**, **tokenization** and **loss computation**. ### Preprocessing and tokenization During training, each example is expected to contain a **text field** or a **(prompt, completion)** pair, depending on the dataset format. For more details on the expected formats, see [Dataset formats](dataset_formats). The [`SFTTrainer`] tokenizes each input using the model's tokenizer. If both prompt and completion are provided separately, they are concatenated before tokenization. ### Computing the loss ![sft_figure](https://huggingface.co/datasets/trl-lib/documentation-images/resolve/main/sft_figure.png) The loss used in SFT is the **token-level cross-entropy loss**, defined as: $$ \mathcal{L}_{\text{SFT}}(\theta) = - \sum_{t=1}^{T} \log p_\theta(y_t \mid y_{<t}), $$ where \\( y_t \\) is the target token at timestep \\( t \\), and the model is trained to predict the next token given the previous ones. In practice, padding tokens are masked out during loss computation. ### Label shifting and masking During training, the loss is computed using a **one-token shift**: the model is trained to predict each token in the sequence based on all previous tokens. Specifically, the input sequence is shifted right by one position to form the target labels. Padding tokens (if present) are ignored in the loss computation by applying an ignore index (default: `-100`) to the corresponding positions. This ensures that the loss focuses only on meaningful, non-padding tokens. ## Logged metrics While training and evaluating we record the following reward metrics: * `global_step`: The total number of optimizer steps taken so far. * `epoch`: The current epoch number, based on dataset iteration. * `num_tokens`: The total number of tokens processed so far. * `loss`: The average cross-entropy loss computed over non-masked tokens in the current logging interval. * `entropy`: The average entropy of the model's predicted token distribution over non-masked tokens. * `mean_token_accuracy`: The proportion of non-masked tokens for which the model’s top-1 prediction matches the ground truth token. * `learning_rate`: The current learning rate, which may change dynamically if a scheduler is used. * `grad_norm`: The L2 norm of the gradients, computed before gradient clipping. ## Customization ### Model initialization You can directly pass the kwargs of the [`~transformers.AutoModelForCausalLM.from_pretrained()`] method to the [`SFTConfig`]. For example, if you want to load a model in a different precision, analogous to ```python model = AutoModelForCausalLM.from_pretrained("Qwen/Qwen3-0.6B", torch_dtype=torch.bfloat16) ``` you can do so by passing the `model_init_kwargs={"torch_dtype": torch.bfloat16}` argument to the [`SFTConfig`]. ```python from trl import SFTConfig training_args = SFTConfig( model_init_kwargs={"torch_dtype": torch.bfloat16}, ) ``` Note that all keyword arguments of [`~transformers.AutoModelForCausalLM.from_pretrained()`] are supported. ### Packing [`SFTTrainer`] supports _example packing_, where multiple examples are packed in the same input sequence to increase training efficiency. To enable packing, simply pass `packing=True` to the [`SFTConfig`] constructor. ```python training_args = SFTConfig(packing=True) ``` For more details on packing, see [Packing](reducing_memory_usage#packing). ### Train on assistant messages only To train on assistant messages only, use a [conversational](dataset_formats#conversational) dataset and set `assistant_only_loss=True` in the [`SFTConfig`]. This setting ensures that loss is computed **only** on the assistant responses, ignoring user or system messages. ```python training_args = SFTConfig(assistant_only_loss=True) ``` ![train_on_assistant](https://huggingface.co/datasets/trl-lib/documentation-images/resolve/main/train_on_assistant.png) > [!WARNING] > This functionality is only available for chat templates that support returning the assistant tokens mask via the `&#123;% generation %&#125;` and `&#123;% endgeneration %&#125;` keywords. For an example of such a template, see [HugggingFaceTB/SmolLM3-3B](https://huggingface.co/HuggingFaceTB/SmolLM3-3B/blob/main/chat_template.jinja#L76-L82). ### Train on completion only To train on completion only, use a [prompt-completion](dataset_formats#prompt-completion) dataset. By default, the trainer computes the loss on the completion tokens only, ignoring the prompt tokens. If you want to train on the full sequence, set `completion_only_loss=False` in the [`SFTConfig`]. ![train_on_completion](https://huggingface.co/datasets/trl-lib/documentation-images/resolve/main/train_on_completion.png) <Tip> Training on completion only is compatible with training on assistant messages only. In this case, use a [conversational](dataset_formats#conversational) [prompt-completion](dataset_formats#prompt-completion) dataset and set `assistant_only_loss=True` in the [`SFTConfig`]. </Tip> ### Train adapters with PEFT We support tight integration with 🤗 PEFT library, allowing any user to conveniently train adapters and share them on the Hub, rather than training the entire model. ```python from datasets import load_dataset from trl import SFTTrainer from peft import LoraConfig dataset = load_dataset("trl-lib/Capybara", split="train") trainer = SFTTrainer( "Qwen/Qwen3-0.6B", train_dataset=dataset, peft_config=LoraConfig() ) trainer.train() ``` You can also continue training your [`peft.PeftModel`]. For that, first load a `PeftModel` outside [`SFTTrainer`] and pass it directly to the trainer without the `peft_config` argument being passed. ```python from datasets import load_dataset from trl import SFTTrainer from peft import AutoPeftModelForCausalLM model = AutoPeftModelForCausalLM.from_pretrained("trl-lib/Qwen3-4B-LoRA", is_trainable=True) dataset = load_dataset("trl-lib/Capybara", split="train") trainer = SFTTrainer( model=model, train_dataset=dataset, ) trainer.train() ``` <Tip> When training adapters, you typically use a higher learning rate (≈1e‑4) since only new parameters are being learned. ```python SFTConfig(learning_rate=1e-4, ...) ``` </Tip> ### Train with Liger Kernel Liger Kernel is a collection of Triton kernels for LLM training that boosts multi-GPU throughput by 20%, cuts memory use by 60% (enabling up to 4× longer context), and works seamlessly with tools like FlashAttention, PyTorch FSDP, and DeepSpeed. For more information, see [Liger Kernel Integration](liger_kernel_integration). ### Train with Unsloth Unsloth is an open‑source framework for fine‑tuning and reinforcement learning that trains LLMs (like Llama, Mistral, Gemma, DeepSeek, and more) up to 2× faster with up to 70% less VRAM, while providing a streamlined, Hugging Face–compatible workflow for training, evaluation, and deployment. For more information, see [Unsloth Integration](unsloth_integration). ## Instruction tuning example **Instruction tuning** teaches a base language model to follow user instructions and engage in conversations. This requires: 1. **Chat template**: Defines how to structure conversations into text sequences, including role markers (user/assistant), special tokens, and turn boundaries. Read more about chat templates in [Chat templates](https://huggingface.co/docs/transformers/chat_templating#templates). 2. **Conversational dataset**: Contains instruction-response pairs This example shows how to transform the [Qwen 3 0.6B Base](https://huggingface.co/Qwen/Qwen3-0.6B-Base) model into an instruction-following model using the [Capybara dataset](https://huggingface.co/datasets/trl-lib/Capybara) and a chat template from [HuggingFaceTB/SmolLM3-3B](https://huggingface.co/HuggingFaceTB/SmolLM3-3B). The SFT Trainer automatically handles tokenizer updates and special token configuration. ```python from trl import SFTConfig, SFTTrainer from datasets import load_dataset trainer = SFTTrainer( model="Qwen/Qwen3-0.6B-Base", args=SFTConfig( output_dir="Qwen3-0.6B-Instruct", chat_template_path="HuggingFaceTB/SmolLM3-3B", ), train_dataset=load_dataset("trl-lib/Capybara", split="train"), ) trainer.train() ``` > [!WARNING] > Some base models, like those from Qwen, have a predefined chat template in the model's tokenizer. In these cases, it is not necessary to apply [`clone_chat_template()`], as the tokenizer already handles the formatting. However, it is necessary to align the EOS token with the chat template to ensure the model's responses terminate correctly. In these cases, specify `eos_token` in [`SFTConfig`]; for example, for `Qwen/Qwen2.5-1.5B`, one should set `eos_token="<|im_end|>"`. Once trained, your model can now follow instructions and engage in conversations using its new chat template. ```python >>> from transformers import pipeline >>> pipe = pipeline("text-generation", model="Qwen3-0.6B-Instruct/checkpoint-5000") >>> prompt = "<|im_start|>user\nWhat is the capital of France? Answer in one word.<|im_end|>\n<|im_start|>assistant\n" >>> response = pipe(prompt) >>> response[0]["generated_text"] '<|im_start|>user\nWhat is the capital of France? Answer in one word.<|im_end|>\n<|im_start|>assistant\nThe capital of France is Paris.' ``` Alternatively, use the structured conversation format (recommended): ```python >>> prompt = [{"role": "user", "content": "What is the capital of France? Answer in one word."}] >>> response = pipe(prompt) >>> response[0]["generated_text"] [{'role': 'user', 'content': 'What is the capital of France? Answer in one word.'}, {'role': 'assistant', 'content': 'The capital of France is Paris.'}] ``` ## Tool Calling with SFT The [`SFTTrainer`] fully supports fine-tuning models with _tool calling_ capabilities. In this case, each dataset example should include: * The conversation messages, including any tool calls (`tool_calls`) and tool responses (`tool` role messages) * The list of available tools in the `tools` column, typically provided as JSON schemas For details on the expected dataset structure, see the [Dataset Format — Tool Calling](dataset_formats#tool-calling) section. ## Training Vision Language Models [`SFTTrainer`] fully supports training Vision-Language Models (VLMs). To train a VLM, you need to provide a dataset with an additional `images` column containing the images to be processed. For more information on the expected dataset structure, see the [Dataset Format — Vision Dataset](dataset_formats#vision-dataset) section. An example of such a dataset is the [LLaVA Instruct Mix](https://huggingface.co/datasets/trl-lib/llava-instruct-mix). ```python from trl import SFTConfig, SFTTrainer from datasets import load_dataset trainer = SFTTrainer( model="Qwen/Qwen2.5-VL-3B-Instruct", args=SFTConfig(max_length=None), train_dataset=load_dataset("trl-lib/llava-instruct-mix", split="train"), ) trainer.train() ``` <Tip> For VLMs, truncating may remove image tokens, leading to errors during training. To avoid this, set `max_length=None` in the [`SFTConfig`]. This allows the model to process the full sequence length without truncating image tokens. ```python SFTConfig(max_length=None, ...) ``` Only use `max_length` when you've verified that truncation won't remove image tokens for the entire dataset. </Tip> ## SFTTrainer [[autodoc]] SFTTrainer - train - save_model - push_to_hub ## SFTConfig [[autodoc]] SFTConfig ## DataCollatorForLanguageModeling [[autodoc]] trainer.sft_trainer.DataCollatorForLanguageModeling ## DataCollatorForVisionLanguageModeling [[autodoc]] trainer.sft_trainer.DataCollatorForVisionLanguageModeling
trl/docs/source/sft_trainer.md/0
{ "file_path": "trl/docs/source/sft_trainer.md", "repo_id": "trl", "token_count": 4917 }
611
# Copyright 2020-2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import re from dataclasses import dataclass, field from typing import Optional from datasets import load_dataset from huggingface_hub import ModelCard from transformers import HfArgumentParser @dataclass class ScriptArguments: r""" Arguments for the script. Args: push_to_hub (`bool`, *optional*, defaults to `False`): Whether to push the dataset to the Hugging Face Hub. repo_id (`str`, *optional*, defaults to `"trl-lib/hh-rlhf-helpful-base"`): Hugging Face repository ID to push the dataset to. dataset_num_proc (`int` or `None`, *optional*, defaults to `None`): Number of workers to use for dataset processing. """ push_to_hub: bool = field( default=False, metadata={"help": "Whether to push the dataset to the Hugging Face Hub."}, ) repo_id: str = field( default="trl-lib/hh-rlhf-helpful-base", metadata={"help": "Hugging Face repository ID to push the dataset to."} ) dataset_num_proc: Optional[int] = field( default=None, metadata={"help": "Number of workers to use for dataset processing."} ) def common_start(str1: str, str2: str) -> str: # Zip the two strings and iterate over them together common_chars = [] for c1, c2 in zip(str1, str2): if c1 == c2: common_chars.append(c1) else: break # Join the common characters and return as a string return "".join(common_chars) def extract_dialogue(example: str) -> list[dict[str, str]]: # Extract the prompt, which corresponds to the common start of the chosen and rejected dialogues prompt_text = common_start(example["chosen"], example["rejected"]) # The chosen and rejected may share a common start, so we need to remove the common part if not prompt_text.endswith("\n\nAssistant: "): prompt_text = prompt_text[: prompt_text.rfind("\n\nAssistant: ")] + "\n\nAssistant: " # Extract the chosen and rejected lines chosen_line = example["chosen"][len(prompt_text) :] rejected_line = example["rejected"][len(prompt_text) :] # Remove the generation prompt ("\n\nAssistant: ") from the prompt prompt_text = prompt_text[: -len("\n\nAssistant: ")] # Split the string at every occurrence of "Human: " or "Assistant: " prompt_lines = re.split(r"(\n\nAssistant: |\n\nHuman: )", prompt_text) # Remove the first element as it's empty prompt_lines = prompt_lines[1:] prompt = [] for idx in range(0, len(prompt_lines), 2): role = "user" if prompt_lines[idx] == "\n\nHuman: " else "assistant" content = prompt_lines[idx + 1] prompt.append({"role": role, "content": content}) # Remove the prompt from the chosen and rejected dialogues chosen = [{"role": "assistant", "content": chosen_line}] rejected = [{"role": "assistant", "content": rejected_line}] return {"prompt": prompt, "chosen": chosen, "rejected": rejected} model_card = ModelCard(""" --- tags: [trl] --- # HH-RLHF-Helpful-Base Dataset ## Summary The HH-RLHF-Helpful-Base dataset is a processed version of [Anthropic's HH-RLHF](https://huggingface.co/datasets/Anthropic/hh-rlhf) dataset, specifically curated to train models using the [TRL library](https://github.com/huggingface/trl) for preference learning and alignment tasks. It contains pairs of text samples, each labeled as either "chosen" or "rejected," based on human preferences regarding the helpfulness of the responses. This dataset enables models to learn human preferences in generating helpful responses, enhancing their ability to assist users effectively. ## Data Structure - **Format**: [Conversational](https://huggingface.co/docs/trl/main/dataset_formats#conversational) - **Type**: [Preference](https://huggingface.co/docs/trl/main/dataset_formats#preference) Columns: - `"prompt"`: The user query. - `"chosen"`: A response deemed helpful by human evaluators. - `"rejected"`: A response considered less helpful or unhelpful. This structure allows models to learn to prefer the _chosen_ response over the _rejected_ one, thereby aligning with human preferences in helpfulness. ## Generation script The script used to generate this dataset can be found [here](https://github.com/huggingface/trl/blob/main/examples/datasets/hh-rlhf-helpful-base.py). """) if __name__ == "__main__": parser = HfArgumentParser(ScriptArguments) script_args = parser.parse_args_into_dataclasses()[0] dataset = load_dataset("Anthropic/hh-rlhf", data_dir="helpful-base") dataset = dataset.map(extract_dialogue, num_proc=script_args.dataset_num_proc) if script_args.push_to_hub: dataset.push_to_hub(script_args.repo_id) model_card.push_to_hub(script_args.repo_id, repo_type="dataset")
trl/examples/datasets/hh-rlhf-helpful-base.py/0
{ "file_path": "trl/examples/datasets/hh-rlhf-helpful-base.py", "repo_id": "trl", "token_count": 1846 }
612
# LayerSkip Training Recipe Implements the training recipe as described in the [LayerSkip paper](https://huggingface.co/papers/2404.16710). ## Run training ``` cd scripts python layer_skip_sft.py ``` ## Run benchmark ``` cd scripts python benchmark_layer_skip.py ```
trl/examples/research_projects/layer_skip/README.md/0
{ "file_path": "trl/examples/research_projects/layer_skip/README.md", "repo_id": "trl", "token_count": 86 }
613
# Copyright 2020-2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from dataclasses import dataclass, field from typing import Optional import torch from datasets import load_dataset from torch.optim import Adam from tqdm import tqdm from transformers import ( AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, RobertaForSequenceClassification, RobertaTokenizer, set_seed, ) from trl import AutoModelForCausalLMWithValueHead, PPOConfig, PPOTrainer, create_reference_model from trl.core import LengthSampler tqdm.pandas() ######################################################################## # This is a fully working simple example to use trl with accelerate. # # This example fine-tunes a GPTJ model to generate less toxic contents # by using allenai/real-toxicity-prompts dataset. We use PPO # (proximal policy optimization) to optimize the model. # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - multi GPUS (using DeepSpeed ZeRO-Offload stages 1 & 2) # - fp16 (mixed-precision) or fp32 (normal precision) # # To run it in each of these various modes, first initialize the accelerate # configuration with `accelerate config` # ######################################################################## # We first define the configuration of the experiment, defining the model, the dataset, # the training parameters, and the PPO parameters. # Check the default arguments in the `PPOConfig` class for more details. # If you want to log with tensorboard, add the kwarg # `project_kwargs={"logging_dir": PATH_TO_LOGS}` to the PPOConfig. @dataclass class ScriptArguments: """ The name of the Casual LM model we wish to fine-tune with PPO """ # NOTE: gpt2 models use Conv1D instead of Linear layers which are not yet supported in 8 bit mode # models like gpt-neo* models are more suitable. model_name: Optional[str] = field(default="ybelkada/gpt-j-6b-sharded-bf16", metadata={"help": "the model name"}) log_with: Optional[str] = field(default=None, metadata={"help": "use 'wandb' to log with wandb"}) learning_rate: Optional[float] = field(default=(1.47e-5) * 2, metadata={"help": "the learning rate"}) mini_batch_size: Optional[int] = field(default=4, metadata={"help": "the PPO minibatch size"}) batch_size: Optional[int] = field(default=16, metadata={"help": "the batch size"}) gradient_accumulation_steps: Optional[int] = field( default=1, metadata={"help": "the number of gradient accumulation steps"} ) model_save_path: Optional[str] = field( default="./gpt-j-6B-detoxified-long-context-26-shl-1e4-final", metadata={"help": "the path to save the model"}, ) parser = HfArgumentParser(ScriptArguments) script_args = parser.parse_args_into_dataclasses()[0] config = PPOConfig( model_name=script_args.model_name, learning_rate=script_args.learning_rate, log_with=script_args.log_with, ppo_epochs=100, mini_batch_size=script_args.mini_batch_size, batch_size=script_args.batch_size, gradient_accumulation_steps=script_args.gradient_accumulation_steps, ) # Below is an example function to build the dataset. In our case, we use the IMDB dataset # from the `datasets` library. One should customize this function to train the model on # its own dataset. def build_dataset( config, dataset_name="allenai/real-toxicity-prompts", input_min_text_length=5, input_max_text_length=10 ): """ Build dataset for training. This builds the dataset from `load_dataset`, one should customize this function to train the model on its own dataset. Args: dataset_name (`str`): The name of the dataset to be loaded. Returns: dataloader (`torch.utils.data.DataLoader`): The dataloader for the dataset. """ tokenizer = AutoTokenizer.from_pretrained(config.model_name) tokenizer.pad_token = tokenizer.eos_token ds = load_dataset(dataset_name, split="train") def filter_fn(sample): toxicity = sample["prompt"]["toxicity"] return toxicity is not None and toxicity > 0.3 ds = ds.filter(filter_fn, batched=False) input_size = LengthSampler(input_min_text_length, input_max_text_length) def tokenize(sample): prompt = sample["prompt"]["text"] continuation = sample["continuation"]["text"] sample["input_ids"] = tokenizer.encode(prompt + continuation)[: input_size()] sample["query"] = tokenizer.decode(sample["input_ids"]) return sample ds = ds.map(tokenize, batched=False) ds.set_format(type="torch") ds = ds.train_test_split(test_size=0.2, shuffle=False)["train"] return ds # We retrieve the dataloader by calling the `build_dataset` function. min_input_length = 30 max_input_length = 40 dataset = build_dataset(config, input_min_text_length=min_input_length, input_max_text_length=max_input_length) def collator(data): return {key: [d[key] for d in data] for key in data[0]} # set seed before initializing value head for deterministic eval set_seed(config.seed) # Now let's build the model, the reference model, and the tokenizer. We first load the model # in bfloat16 to save memory using `transformers`. model = AutoModelForCausalLM.from_pretrained(config.model_name, torch_dtype=torch.bfloat16) # And then we pass the loaded model to `AutoModelForCausalLMWithValueHead`. model = AutoModelForCausalLMWithValueHead.from_pretrained(model) # We create a reference model by sharing 20 layers ref_model = create_reference_model(model, num_shared_layers=20) # We make sure to use `Adam` optimizer on the model parameters that require gradients. optimizer = Adam(filter(lambda p: p.requires_grad, model.parameters()), lr=config.learning_rate) # GPT-2 / GPT-J tokenizer has a pad token, but it is not eos_token by default. We need to set it to eos_token. # only for this model. tokenizer = AutoTokenizer.from_pretrained(config.model_name) tokenizer.pad_token = tokenizer.eos_token # We then build the PPOTrainer, passing the model, the reference model, the tokenizer ppo_trainer = PPOTrainer( config, model, ref_model=ref_model, tokenizer=tokenizer, dataset=dataset, data_collator=collator, optimizer=optimizer, ) # We then build the reward pipeline, we will use the toxicity model to compute the reward. # We first load the toxicity model and tokenizer. toxicity_model_id = "facebook/roberta-hate-speech-dynabench-r4-target" toxicity_tokenizer = RobertaTokenizer.from_pretrained(toxicity_model_id) # We load the toxicity model in fp16 to save memory. toxicity_model = RobertaForSequenceClassification.from_pretrained(toxicity_model_id, torch_dtype=torch.float16).to( ppo_trainer.accelerator.device ) # We then define the arguments to pass to the `generate` function. These arguments # are passed to the `generate` function of the PPOTrainer, which is a wrapper around # the `generate` function of the trained model. generation_kwargs = { "min_length": -1, "top_k": 0.0, "top_p": 1.0, "do_sample": True, "pad_token_id": tokenizer.eos_token_id, } output_min_length = 20 output_max_length = 30 output_length_sampler = LengthSampler(output_min_length, output_max_length) model_save_path = script_args.model_save_path for epoch, batch in tqdm(enumerate(ppo_trainer.dataloader)): query_tensors = batch["input_ids"] # Get response from the policy model response_tensors = [] for query in query_tensors: gen_len = output_length_sampler() generation_kwargs["max_new_tokens"] = gen_len response = ppo_trainer.generate(query, **generation_kwargs) response_tensors.append(response.squeeze()[-gen_len:]) batch["response"] = [tokenizer.decode(r.squeeze()) for r in response_tensors] # Compute sentiment score texts = batch["response"] toxicity_inputs = toxicity_tokenizer(texts, padding=True, truncation=True, return_tensors="pt").to( ppo_trainer.accelerator.device ) logits = toxicity_model(**toxicity_inputs).logits.float() toxicity_labels = (logits[:, 0]).tolist() rewards = [torch.tensor(output) for output in toxicity_labels] # Run PPO step stats = ppo_trainer.step(query_tensors, response_tensors, rewards) ppo_trainer.log_stats(stats, batch, rewards) # Save model every 100 epochs if epoch % 100 == 0: if ppo_trainer.accelerator.is_main_process: ppo_trainer.save_pretrained(model_save_path)
trl/examples/research_projects/toxicity/scripts/gpt-j-6b-toxicity.py/0
{ "file_path": "trl/examples/research_projects/toxicity/scripts/gpt-j-6b-toxicity.py", "repo_id": "trl", "token_count": 3138 }
614
# Copyright 2020-2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # /// script # dependencies = [ # "trl @ git+https://github.com/huggingface/trl.git", # "peft", # ] # /// """ Run the ORPO training script with the following command with some example arguments. In general, the optimal configuration for ORPO will be similar to that of DPO without the need for a reference model: # regular: python examples/scripts/orpo.py \ --dataset_name trl-internal-testing/hh-rlhf-helpful-base-trl-style \ --model_name_or_path=gpt2 \ --per_device_train_batch_size 4 \ --max_steps 1000 \ --learning_rate 8e-6 \ --gradient_accumulation_steps 1 \ --eval_steps 500 \ --output_dir="gpt2-aligned-orpo" \ --warmup_steps 150 \ --report_to wandb \ --logging_first_step \ --no_remove_unused_columns # peft: python examples/scripts/orpo.py \ --dataset_name trl-internal-testing/hh-rlhf-helpful-base-trl-style \ --model_name_or_path=gpt2 \ --per_device_train_batch_size 4 \ --max_steps 1000 \ --learning_rate 8e-5 \ --gradient_accumulation_steps 1 \ --eval_steps 500 \ --output_dir="gpt2-lora-aligned-orpo" \ --optim rmsprop \ --warmup_steps 150 \ --report_to wandb \ --logging_first_step \ --no_remove_unused_columns \ --use_peft \ --lora_r=16 \ --lora_alpha=16 """ from datasets import load_dataset from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser from trl import ModelConfig, ORPOConfig, ORPOTrainer, ScriptArguments, get_peft_config from trl.trainer.utils import SIMPLE_CHAT_TEMPLATE if __name__ == "__main__": parser = HfArgumentParser((ScriptArguments, ORPOConfig, ModelConfig)) script_args, training_args, model_args = parser.parse_args_into_dataclasses() ################ # Model & Tokenizer ################ model = AutoModelForCausalLM.from_pretrained( model_args.model_name_or_path, trust_remote_code=model_args.trust_remote_code ) tokenizer = AutoTokenizer.from_pretrained( model_args.model_name_or_path, trust_remote_code=model_args.trust_remote_code ) if tokenizer.pad_token is None: tokenizer.pad_token = tokenizer.eos_token ################ # Dataset ################ dataset = load_dataset(script_args.dataset_name, name=script_args.dataset_config) if tokenizer.chat_template is None: tokenizer.chat_template = SIMPLE_CHAT_TEMPLATE ################ # Training ################ trainer = ORPOTrainer( model, args=training_args, train_dataset=dataset[script_args.dataset_train_split], eval_dataset=dataset[script_args.dataset_test_split] if training_args.eval_strategy != "no" else None, processing_class=tokenizer, peft_config=get_peft_config(model_args), ) # train and save the model trainer.train() # Save and push to hub trainer.save_model(training_args.output_dir) if training_args.push_to_hub: trainer.push_to_hub(dataset_name=script_args.dataset_name)
trl/examples/scripts/orpo.py/0
{ "file_path": "trl/examples/scripts/orpo.py", "repo_id": "trl", "token_count": 1380 }
615
# Copyright 2020-2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import subprocess import sys from datetime import datetime COPYRIGHT_HEADER = f"""# Copyright 2020-{datetime.now().year} The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ def get_tracked_python_files(): """Get a list of all tracked Python files using git.""" try: # Get the list of all tracked files from Git result = subprocess.run(["git", "ls-files"], stdout=subprocess.PIPE, text=True, check=True) # Split the result by lines to get individual file paths files = result.stdout.splitlines() # Filter only Python files py_files = [f for f in files if f.endswith(".py")] return py_files except subprocess.CalledProcessError as e: print(f"Error fetching tracked files: {e}") return [] def check_and_add_copyright(file_path): """Check if the file contains a copyright notice, and add it if missing.""" if not os.path.isfile(file_path): print(f"[SKIP] {file_path} does not exist.") return with open(file_path, encoding="utf-8") as f: content = f.readlines() # Check if the exact copyright header exists if "".join(content).startswith(COPYRIGHT_HEADER): return True # If no copyright notice was found, prepend the header print(f"[MODIFY] Adding copyright to {file_path}.") with open(file_path, "w", encoding="utf-8") as f: # Write the copyright header followed by the original content f.write(COPYRIGHT_HEADER + "\n" + "".join(content)) return False def main(): """Main function to check and add copyright for all tracked Python files.""" py_files = get_tracked_python_files() if not py_files: print("No Python files are tracked in the repository.") return print(f"Checking {len(py_files)} Python files for copyright notice...") have_copyright = [check_and_add_copyright(file_path) for file_path in py_files] if not all(have_copyright): print("❌ Some files were missing the required copyright and have been updated.") sys.exit(1) else: print("✅ All files have the required copyright.") sys.exit(0) if __name__ == "__main__": main()
trl/scripts/add_copyrights.py/0
{ "file_path": "trl/scripts/add_copyrights.py", "repo_id": "trl", "token_count": 1111 }
616
# Copyright 2020-2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import itertools import pytest import torch from accelerate.utils.memory import release_memory from datasets import load_dataset from parameterized import parameterized from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig from transformers.testing_utils import ( backend_empty_cache, require_liger_kernel, require_peft, require_torch_accelerator, require_torch_multi_accelerator, torch_device, ) from transformers.utils import is_peft_available from trl import SFTConfig, SFTTrainer from ..testing_utils import TrlTestCase, require_bitsandbytes from .testing_constants import DEVICE_MAP_OPTIONS, GRADIENT_CHECKPOINTING_KWARGS, MODELS_TO_TEST, PACKING_OPTIONS if is_peft_available(): from peft import LoraConfig, PeftModel @pytest.mark.slow @require_torch_accelerator @require_peft class SFTTrainerSlowTester(TrlTestCase): def setUp(self): super().setUp() self.train_dataset = load_dataset("stanfordnlp/imdb", split="train[:10%]") self.eval_dataset = load_dataset("stanfordnlp/imdb", split="test[:10%]") self.max_length = 128 self.peft_config = LoraConfig( lora_alpha=16, lora_dropout=0.1, r=8, bias="none", task_type="CAUSAL_LM", ) def tearDown(self): gc.collect() backend_empty_cache(torch_device) gc.collect() super().tearDown() @parameterized.expand(list(itertools.product(MODELS_TO_TEST, PACKING_OPTIONS))) def test_sft_trainer_str(self, model_name, packing): """ Simply tests if passing a simple str to `SFTTrainer` loads and runs the trainer as expected. """ training_args = SFTConfig( output_dir=self.tmp_dir, logging_strategy="no", report_to="none", per_device_train_batch_size=2, max_steps=10, packing=packing, max_length=self.max_length, ) trainer = SFTTrainer( model_name, args=training_args, train_dataset=self.train_dataset, eval_dataset=self.eval_dataset, ) trainer.train() @parameterized.expand(list(itertools.product(MODELS_TO_TEST, PACKING_OPTIONS))) def test_sft_trainer_transformers(self, model_name, packing): """ Simply tests if passing a transformers model to `SFTTrainer` loads and runs the trainer as expected. """ training_args = SFTConfig( output_dir=self.tmp_dir, logging_strategy="no", report_to="none", per_device_train_batch_size=2, max_steps=10, packing=packing, max_length=self.max_length, ) model = AutoModelForCausalLM.from_pretrained(model_name) tokenizer = AutoTokenizer.from_pretrained(model_name) trainer = SFTTrainer( model, args=training_args, processing_class=tokenizer, train_dataset=self.train_dataset, eval_dataset=self.eval_dataset, ) trainer.train() release_memory(model, trainer) @parameterized.expand(list(itertools.product(MODELS_TO_TEST, PACKING_OPTIONS))) @require_peft def test_sft_trainer_peft(self, model_name, packing): """ Simply tests if passing a transformers model + peft config to `SFTTrainer` loads and runs the trainer as expected. """ training_args = SFTConfig( output_dir=self.tmp_dir, logging_strategy="no", report_to="none", per_device_train_batch_size=2, max_steps=10, fp16=True, packing=packing, max_length=self.max_length, ) model = AutoModelForCausalLM.from_pretrained(model_name) tokenizer = AutoTokenizer.from_pretrained(model_name) trainer = SFTTrainer( model, args=training_args, processing_class=tokenizer, train_dataset=self.train_dataset, eval_dataset=self.eval_dataset, peft_config=self.peft_config, ) self.assertIsInstance(trainer.model, PeftModel) trainer.train() release_memory(model, trainer) @parameterized.expand(list(itertools.product(MODELS_TO_TEST, PACKING_OPTIONS))) def test_sft_trainer_transformers_mp(self, model_name, packing): """ Simply tests if passing a transformers model to `SFTTrainer` loads and runs the trainer as expected in mixed precision. """ training_args = SFTConfig( output_dir=self.tmp_dir, logging_strategy="no", report_to="none", per_device_train_batch_size=2, max_steps=10, fp16=True, # this is sufficient to enable amp packing=packing, max_length=self.max_length, ) model = AutoModelForCausalLM.from_pretrained(model_name) tokenizer = AutoTokenizer.from_pretrained(model_name) trainer = SFTTrainer( model, args=training_args, processing_class=tokenizer, train_dataset=self.train_dataset, eval_dataset=self.eval_dataset, ) trainer.train() release_memory(model, trainer) @parameterized.expand(list(itertools.product(MODELS_TO_TEST, PACKING_OPTIONS, GRADIENT_CHECKPOINTING_KWARGS))) def test_sft_trainer_transformers_mp_gc(self, model_name, packing, gradient_checkpointing_kwargs): """ Simply tests if passing a transformers model to `SFTTrainer` loads and runs the trainer as expected in mixed precision + different scenarios of gradient_checkpointing. """ training_args = SFTConfig( output_dir=self.tmp_dir, logging_strategy="no", report_to="none", per_device_train_batch_size=2, max_steps=10, packing=packing, max_length=self.max_length, fp16=True, # this is sufficient to enable amp gradient_checkpointing=True, gradient_checkpointing_kwargs=gradient_checkpointing_kwargs, ) model = AutoModelForCausalLM.from_pretrained(model_name) tokenizer = AutoTokenizer.from_pretrained(model_name) trainer = SFTTrainer( model, args=training_args, processing_class=tokenizer, train_dataset=self.train_dataset, eval_dataset=self.eval_dataset, ) trainer.train() release_memory(model, trainer) @parameterized.expand(list(itertools.product(MODELS_TO_TEST, PACKING_OPTIONS, GRADIENT_CHECKPOINTING_KWARGS))) @require_peft def test_sft_trainer_transformers_mp_gc_peft(self, model_name, packing, gradient_checkpointing_kwargs): """ Simply tests if passing a transformers model + PEFT to `SFTTrainer` loads and runs the trainer as expected in mixed precision + different scenarios of gradient_checkpointing. """ training_args = SFTConfig( output_dir=self.tmp_dir, logging_strategy="no", report_to="none", per_device_train_batch_size=2, max_steps=10, packing=packing, max_length=self.max_length, fp16=True, # this is sufficient to enable amp gradient_checkpointing=True, gradient_checkpointing_kwargs=gradient_checkpointing_kwargs, ) model = AutoModelForCausalLM.from_pretrained(model_name) tokenizer = AutoTokenizer.from_pretrained(model_name) trainer = SFTTrainer( model, args=training_args, processing_class=tokenizer, train_dataset=self.train_dataset, eval_dataset=self.eval_dataset, peft_config=self.peft_config, ) self.assertIsInstance(trainer.model, PeftModel) trainer.train() release_memory(model, trainer) @parameterized.expand( list(itertools.product(MODELS_TO_TEST, PACKING_OPTIONS, GRADIENT_CHECKPOINTING_KWARGS, DEVICE_MAP_OPTIONS)) ) @require_torch_multi_accelerator def test_sft_trainer_transformers_mp_gc_device_map( self, model_name, packing, gradient_checkpointing_kwargs, device_map ): """ Simply tests if passing a transformers model to `SFTTrainer` loads and runs the trainer as expected in mixed precision + different scenarios of gradient_checkpointing (single, multi-gpu, etc). """ training_args = SFTConfig( output_dir=self.tmp_dir, logging_strategy="no", report_to="none", per_device_train_batch_size=2, max_steps=10, packing=packing, max_length=self.max_length, fp16=True, # this is sufficient to enable amp gradient_checkpointing=True, gradient_checkpointing_kwargs=gradient_checkpointing_kwargs, ) model = AutoModelForCausalLM.from_pretrained(model_name, device_map=device_map) tokenizer = AutoTokenizer.from_pretrained(model_name) trainer = SFTTrainer( model, args=training_args, processing_class=tokenizer, train_dataset=self.train_dataset, eval_dataset=self.eval_dataset, ) trainer.train() release_memory(model, trainer) @parameterized.expand(list(itertools.product(MODELS_TO_TEST, PACKING_OPTIONS, GRADIENT_CHECKPOINTING_KWARGS))) @require_peft @require_bitsandbytes def test_sft_trainer_transformers_mp_gc_peft_qlora(self, model_name, packing, gradient_checkpointing_kwargs): """ Simply tests if passing a transformers model + PEFT + bnb to `SFTTrainer` loads and runs the trainer as expected in mixed precision + different scenarios of gradient_checkpointing. """ training_args = SFTConfig( output_dir=self.tmp_dir, logging_strategy="no", report_to="none", per_device_train_batch_size=2, max_steps=10, packing=packing, max_length=self.max_length, fp16=True, # this is sufficient to enable amp gradient_checkpointing=True, gradient_checkpointing_kwargs=gradient_checkpointing_kwargs, ) quantization_config = BitsAndBytesConfig(load_in_4bit=True, bnb_4bit_compute_dtype=torch.float16) model = AutoModelForCausalLM.from_pretrained(model_name, quantization_config=quantization_config) tokenizer = AutoTokenizer.from_pretrained(model_name) trainer = SFTTrainer( model, args=training_args, processing_class=tokenizer, train_dataset=self.train_dataset, eval_dataset=self.eval_dataset, peft_config=self.peft_config, ) self.assertIsInstance(trainer.model, PeftModel) trainer.train() release_memory(model, trainer) @parameterized.expand(list(itertools.product(MODELS_TO_TEST, PACKING_OPTIONS))) @require_peft @require_bitsandbytes def test_sft_trainer_with_chat_format_qlora(self, model_name, packing): """ Simply tests if using setup_chat_format with a transformers model + peft + bnb config to `SFTTrainer` loads and runs the trainer as expected. """ train_dataset = load_dataset("trl-internal-testing/dolly-chatml-sft", split="train") training_args = SFTConfig( packing=packing, max_length=self.max_length, output_dir=self.tmp_dir, logging_strategy="no", report_to="none", per_device_train_batch_size=2, max_steps=10, fp16=True, ) quantization_config = BitsAndBytesConfig(load_in_4bit=True, bnb_4bit_compute_dtype=torch.float16) model = AutoModelForCausalLM.from_pretrained(model_name, quantization_config=quantization_config) tokenizer = AutoTokenizer.from_pretrained(model_name) trainer = SFTTrainer( model, args=training_args, processing_class=tokenizer, train_dataset=train_dataset, peft_config=self.peft_config, ) self.assertIsInstance(trainer.model, PeftModel) trainer.train() release_memory(model, trainer) @parameterized.expand(list(itertools.product(MODELS_TO_TEST, PACKING_OPTIONS))) @require_liger_kernel def test_sft_trainer_with_liger(self, model_name, packing): """ Tests if passing use_liger=True to SFTConfig loads and runs the trainer with AutoLigerKernelForCausalLM as expected. """ import importlib def cleanup_liger_patches(trainer): """Clean up liger_kernel patches by reloading the model's specific module""" try: # Get the specific module that was used by the trainer's model module_path = trainer.model.__module__ reload_module = importlib.import_module(module_path) importlib.reload(reload_module) except Exception: pass # Continue if reload fails training_args = SFTConfig( output_dir=self.tmp_dir, logging_strategy="no", report_to="none", per_device_train_batch_size=2, max_steps=2, packing=packing, max_length=self.max_length, use_liger_kernel=True, ) trainer = SFTTrainer( model_name, args=training_args, train_dataset=self.train_dataset, eval_dataset=self.eval_dataset, ) # Register cleanup now that we have the trainer self.addCleanup(cleanup_liger_patches, trainer) trainer.train() release_memory(trainer.model, trainer) @parameterized.expand(list(itertools.product(MODELS_TO_TEST, PACKING_OPTIONS))) @require_torch_accelerator def test_train_offloading(self, model_name, packing): """Test that activation offloading works with SFTTrainer.""" # Initialize the trainer training_args = SFTConfig( output_dir=self.tmp_dir, activation_offloading=True, report_to="none", per_device_train_batch_size=2, max_steps=2, packing=packing, max_length=self.max_length, ) trainer = SFTTrainer( model=model_name, args=training_args, train_dataset=self.train_dataset, eval_dataset=self.eval_dataset ) # Save the initial parameters to compare them later previous_trainable_params = {n: param.clone() for n, param in trainer.model.named_parameters()} # Train the model trainer.train() # Check that the training loss is not None self.assertIsNotNone(trainer.state.log_history[-1]["train_loss"]) # Check the params have changed for n, param in previous_trainable_params.items(): new_param = trainer.model.get_parameter(n) self.assertFalse(torch.allclose(param, new_param), f"Parameter {n} has not changed") release_memory(trainer.model, trainer)
trl/tests/slow/test_sft_slow.py/0
{ "file_path": "trl/tests/slow/test_sft_slow.py", "repo_id": "trl", "token_count": 7352 }
617
# Copyright 2020-2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import torch import torch.nn.functional as F from datasets import load_dataset from transformers import AutoModelForCausalLM, AutoTokenizer, GenerationConfig from trl import GKDConfig, GKDTrainer from trl.trainer.utils import SIMPLE_CHAT_TEMPLATE from .testing_utils import TrlTestCase class TestGKDTrainer(TrlTestCase): @classmethod def setUpClass(cls): model_id = "trl-internal-testing/tiny-Qwen2ForCausalLM-2.5" cls.tokenizer = AutoTokenizer.from_pretrained(model_id) cls.tokenizer.pad_token = cls.tokenizer.eos_token cls.model = AutoModelForCausalLM.from_pretrained(model_id) cls.generation_config = GenerationConfig( max_new_tokens=20, num_return_sequences=1, pad_token_id=cls.tokenizer.pad_token_id, eos_token_id=cls.tokenizer.eos_token_id, ) def test_generate_on_policy_outputs_deterministic(self): prompts = ["Hello, how are you?", "What's the weather like today?"] tokenized_prompts = self.tokenizer(prompts, return_tensors="pt", padding=True) inputs = { "prompts": tokenized_prompts["input_ids"], "prompt_attention_mask": tokenized_prompts["attention_mask"], } # Set temperature to 0 for deterministic output deterministic_generation_config = GenerationConfig( max_new_tokens=30, num_return_sequences=1, pad_token_id=self.tokenizer.pad_token_id, eos_token_id=self.tokenizer.eos_token_id, temperature=0.0, ) outputs = GKDTrainer.generate_on_policy_outputs( self.model, inputs, deterministic_generation_config, self.tokenizer.pad_token_id ) new_input_ids, new_attention_mask, new_labels = outputs # Decode the generated outputs generated_texts = self.tokenizer.batch_decode(new_input_ids, skip_special_tokens=True) # Check if the generated texts start with the original prompts for prompt, generated_text in zip(prompts, generated_texts): self.assertTrue( generated_text.startswith(prompt), f"Generated text '{generated_text}' does not start with prompt '{prompt}'", ) # Run the generation twice and check if the outputs are identical outputs2 = GKDTrainer.generate_on_policy_outputs( self.model, inputs, deterministic_generation_config, self.tokenizer.pad_token_id ) new_input_ids2, new_attention_mask2, new_labels2 = outputs2 # Check if the two generations are identical self.assertTrue(torch.all(new_input_ids.eq(new_input_ids2)), "Deterministic generations are not identical") self.assertTrue( torch.all(new_attention_mask.eq(new_attention_mask2)), "Attention masks for deterministic generations are not identical", ) self.assertTrue( torch.all(new_labels.eq(new_labels2)), "Labels for deterministic generations are not identical", ) def test_generate_on_policy_outputs(self): prompts = ["Hello, how are you?", "What's the weather like today?"] tokenized_prompts = self.tokenizer(prompts, return_tensors="pt", padding=True) inputs = { "prompts": tokenized_prompts["input_ids"], "attention_mask": tokenized_prompts["attention_mask"], } outputs = GKDTrainer.generate_on_policy_outputs( self.model, inputs, self.generation_config, self.tokenizer.pad_token_id ) # Check that outputs is a tuple of three tensors self.assertIsInstance(outputs, tuple) self.assertEqual(len(outputs), 3) new_input_ids, new_attention_mask, new_labels = outputs # Check shapes batch_size = len(prompts) self.assertEqual(new_input_ids.shape[0], batch_size) self.assertEqual(new_attention_mask.shape[0], batch_size) self.assertEqual(new_labels.shape[0], batch_size) # Check types self.assertIsInstance(new_input_ids, torch.Tensor) self.assertIsInstance(new_attention_mask, torch.Tensor) self.assertIsInstance(new_labels, torch.Tensor) # Check that new_input_ids and new_attention_mask have the same shape self.assertEqual(new_input_ids.shape, new_attention_mask.shape) self.assertEqual(new_labels.shape, new_attention_mask.shape) class TestGeneralizedJSDLoss(TrlTestCase): def setUp(self): super().setUp() self.batch_size = 2 self.seq_length = 3 self.vocab_size = 5 self.student_logits = torch.randn(self.batch_size, self.seq_length, self.vocab_size) self.teacher_logits = torch.randn(self.batch_size, self.seq_length, self.vocab_size) def test_uniform_distribution(self): logits = torch.ones(1, 1, self.vocab_size) loss = GKDTrainer.generalized_jsd_loss(logits, logits) self.assertAlmostEqual(loss.item(), 0, places=5) def test_generalized_jsd_loss_edge_cases(self): # Setup student_logits = torch.log(torch.tensor([[0.1, 0.9]])).unsqueeze(0) teacher_logits = torch.log(torch.tensor([[0.9, 0.1]])).unsqueeze(0) # Case 1: beta = 1 (should be equivalent to KL(student || teacher)) loss_beta_1 = GKDTrainer.generalized_jsd_loss(student_logits, teacher_logits, beta=1) expected_loss_beta_1 = F.kl_div( F.log_softmax(teacher_logits, dim=-1), F.softmax(student_logits, dim=-1), reduction="batchmean" ) self.assertAlmostEqual(loss_beta_1.item(), expected_loss_beta_1.item(), places=5) # Case 2: beta = 0 (should be equivalent to KL(teacher || student)) loss_beta_0 = GKDTrainer.generalized_jsd_loss(student_logits, teacher_logits, beta=0) expected_loss_beta_0 = F.kl_div( F.log_softmax(student_logits, dim=-1), F.softmax(teacher_logits, dim=-1), reduction="batchmean" ) self.assertAlmostEqual(loss_beta_0.item(), expected_loss_beta_0.item(), places=5) def test_output_shape(self): loss = GKDTrainer.generalized_jsd_loss(self.student_logits, self.teacher_logits) self.assertTrue(torch.is_tensor(loss)) self.assertEqual(loss.shape, torch.Size([])) def test_beta_values(self): loss_beta_0 = GKDTrainer.generalized_jsd_loss(self.student_logits, self.teacher_logits, beta=0) loss_beta_1 = GKDTrainer.generalized_jsd_loss(self.student_logits, self.teacher_logits, beta=1) self.assertNotEqual(loss_beta_0, loss_beta_1) def test_temperature_scaling(self): loss_temp_1 = GKDTrainer.generalized_jsd_loss(self.student_logits, self.teacher_logits, temperature=1) loss_temp_2 = GKDTrainer.generalized_jsd_loss(self.student_logits, self.teacher_logits, temperature=2) self.assertNotEqual(loss_temp_1, loss_temp_2) def test_reduction_methods(self): loss_batchmean = GKDTrainer.generalized_jsd_loss( self.student_logits, self.teacher_logits, reduction="batchmean" ) loss_sum = GKDTrainer.generalized_jsd_loss(self.student_logits, self.teacher_logits, reduction="sum") loss_mean = GKDTrainer.generalized_jsd_loss(self.student_logits, self.teacher_logits, reduction="mean") loss_none = GKDTrainer.generalized_jsd_loss(self.student_logits, self.teacher_logits, reduction="none") self.assertEqual(loss_batchmean.shape, torch.Size([])) self.assertEqual(loss_sum.shape, torch.Size([])) self.assertEqual(loss_mean.shape, torch.Size([])) self.assertEqual(loss_none.shape, self.student_logits.shape) def test_symmetry(self): student_teacher = GKDTrainer.generalized_jsd_loss(self.student_logits, self.teacher_logits, beta=0.1) teacher_student = GKDTrainer.generalized_jsd_loss(self.teacher_logits, self.student_logits, beta=0.1) self.assertNotEqual(student_teacher, teacher_student) student_teacher = GKDTrainer.generalized_jsd_loss(self.student_logits, self.teacher_logits, beta=0.5) teacher_student = GKDTrainer.generalized_jsd_loss(self.teacher_logits, self.student_logits, beta=0.5) self.assertEqual(student_teacher, teacher_student) def test_zero_loss_for_identical_inputs(self): identical_logits = torch.randn(self.batch_size, self.seq_length, self.vocab_size) loss = GKDTrainer.generalized_jsd_loss(identical_logits, identical_logits) self.assertAlmostEqual(loss.item(), 0, places=6) class GKDTrainerTester(TrlTestCase): def setUp(self): super().setUp() self.model_id = "trl-internal-testing/tiny-Qwen2ForCausalLM-2.5" self.model = AutoModelForCausalLM.from_pretrained(self.model_id) self.teacher_model = AutoModelForCausalLM.from_pretrained(self.model_id) self.tokenizer = AutoTokenizer.from_pretrained(self.model_id) self.tokenizer.pad_token = self.tokenizer.eos_token # Ensure the tokenizer has a chat template if not hasattr(self.tokenizer, "chat_template") or self.tokenizer.chat_template is None: self.tokenizer.chat_template = SIMPLE_CHAT_TEMPLATE def test_gkd_trainer(self): training_args = GKDConfig( output_dir=self.tmp_dir, dataloader_drop_last=True, eval_strategy="steps", max_steps=4, eval_steps=2, save_steps=2, per_device_train_batch_size=2, per_device_eval_batch_size=2, report_to="none", ) dummy_dataset = load_dataset("trl-internal-testing/zen", "conversational_language_modeling") trainer = GKDTrainer( model=self.model_id, teacher_model=self.model_id, args=training_args, train_dataset=dummy_dataset["train"], eval_dataset=dummy_dataset["test"], processing_class=self.tokenizer, ) trainer.train() self.assertIsNotNone(trainer.state.log_history[(-1)]["train_loss"]) self.assertIsNotNone(trainer.state.log_history[0]["eval_loss"]) self.assertIn("model.safetensors", os.listdir(self.tmp_dir + "/checkpoint-2")) def test_generation_config_init(self): training_args = GKDConfig(output_dir=self.tmp_dir) dummy_dataset = load_dataset("trl-internal-testing/zen", "conversational_language_modeling") trainer = GKDTrainer( model=self.model_id, teacher_model=self.model_id, args=training_args, train_dataset=dummy_dataset["train"], eval_dataset=dummy_dataset["test"], processing_class=self.tokenizer, ) self.assertEqual(trainer.generation_config.pad_token_id, self.tokenizer.eos_token_id) self.assertEqual(trainer.generation_config.eos_token_id, self.model.generation_config.eos_token_id) self.assertEqual(trainer.generation_config.max_new_tokens, training_args.max_new_tokens) self.assertEqual(trainer.generation_config.temperature, training_args.temperature) self.assertEqual(trainer.generation_config.top_k, 0)
trl/tests/test_gkd_trainer.py/0
{ "file_path": "trl/tests/test_gkd_trainer.py", "repo_id": "trl", "token_count": 5126 }
618
# Copyright 2020-2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import torch from datasets import Dataset from transformers import AutoModelForCausalLM, AutoModelForSequenceClassification, AutoTokenizer from trl import RLOOConfig, RLOOTrainer from .testing_utils import TrlTestCase class RLOOTrainerTester(TrlTestCase): def setUp(self): super().setUp() self.model_id = "trl-internal-testing/tiny-Qwen2ForCausalLM-2.5" self.policy_model = AutoModelForCausalLM.from_pretrained(self.model_id) self.reward_model = AutoModelForSequenceClassification.from_pretrained(self.model_id) self.policy_ref_model = AutoModelForCausalLM.from_pretrained(self.model_id) self.tokenizer = AutoTokenizer.from_pretrained(self.model_id, padding_side="left") self.tokenizer.add_special_tokens({"pad_token": "[PAD]"}) def test_rloo_checkpoint(self): training_args = RLOOConfig( output_dir=self.tmp_dir, per_device_train_batch_size=2, total_episodes=1, report_to="none", ) dummy_text = [{"content": "Hello World!", "role": "user"}] dummy_data = self.tokenizer.apply_chat_template(dummy_text) dummy_dataset = Dataset.from_dict({"input_ids": dummy_data}) trainer = RLOOTrainer( config=training_args, policy=self.policy_model, reward_model=self.reward_model, ref_policy=self.policy_ref_model, processing_class=self.tokenizer, train_dataset=dummy_dataset, eval_dataset=dummy_dataset, ) trainer._save_checkpoint(trainer.model, trial=None) def test_rloo_reward(self): local_batch_size = 3 rloo_k = 4 sequence_length = 5 # Add sequence length for testing token-level rewards # fmt: off rlhf_reward = torch.tensor([ 1, 2, 3, # first rlhf reward for three prompts 2, 3, 4, # second rlhf reward for three prompts 5, 6, 7, # third rlhf reward for three prompts 8, 9, 10, # fourth rlhf reward for three prompts ]).float() # Create padding mask where 1 indicates valid token, 0 indicates padding padding_mask = torch.ones(local_batch_size * rloo_k, sequence_length) # Set padding based on sequence lengths sequence_lengths = torch.tensor([ 3, 4, 3, # lengths for first batch 4, 3, 4, # lengths for second batch 3, 4, 3, # lengths for third batch 4, 3, 4, # lengths for fourth batch ]) for i, length in enumerate(sequence_lengths): padding_mask[i, length:] = 0 # Add kl tensor for testing token-level rewards kl = torch.ones(local_batch_size * rloo_k, sequence_length) # Dummy KL values # fmt: on # Test token-level KL rewards following OpenRLHF implementation kl_coef = 0.1 kl_reward = -kl_coef * kl # Find last non-padded position eos_indices = padding_mask.size(1) - 1 - padding_mask.long().fliplr().argmax(dim=1, keepdim=True) # Create last reward tensor last_reward = torch.zeros_like(kl) last_reward.scatter_(dim=1, index=eos_indices, src=rlhf_reward.reshape(-1, 1)) # Test last_reward - should have rlhf_reward at the last non-padded position for i, (length, reward) in enumerate(zip(sequence_lengths, rlhf_reward)): # Check reward is at correct position self.assertEqual(last_reward[i, length - 1].item(), reward.item()) # Check zeros elsewhere self.assertTrue(torch.all(last_reward[i, : length - 1] == 0)) self.assertTrue(torch.all(last_reward[i, length:] == 0)) # Combine rewards reward = last_reward + kl_reward non_score_reward = kl_reward.sum(1) token_level_rlhf_reward = reward.sum(1) # Test reward components # KL reward should be -0.1 for each token in sequence length expected_kl_reward = -0.1 * sequence_length # Each position gets -0.1 KL reward torch.testing.assert_close(non_score_reward, torch.tensor(expected_kl_reward).expand_as(non_score_reward)) # Total reward should be rlhf_reward + kl_reward expected_total = rlhf_reward + expected_kl_reward torch.testing.assert_close(token_level_rlhf_reward, expected_total) # Test sequence-level rewards (existing test) baseline = (rlhf_reward.sum(0) - rlhf_reward) / (rloo_k - 1) advantages = torch.zeros_like(rlhf_reward) for i in range(0, len(advantages), local_batch_size): other_response_rlhf_rewards = [] for j in range(0, len(advantages), local_batch_size): if i != j: other_response_rlhf_rewards.append(rlhf_reward[j : j + local_batch_size]) advantages[i : i + local_batch_size] = rlhf_reward[i : i + local_batch_size] - torch.stack( other_response_rlhf_rewards ).mean(0) self.assertLess((1 - (2 + 5 + 8) / 3 - advantages[0].item()), 1e-6) self.assertLess((6 - (3 + 2 + 9) / 3 - advantages[7].item()), 1e-6) # Test vectorized implementation rlhf_reward = rlhf_reward.reshape(rloo_k, local_batch_size) baseline = (rlhf_reward.sum(0) - rlhf_reward) / (rloo_k - 1) vec_advantages = rlhf_reward - baseline torch.testing.assert_close(vec_advantages.flatten(), advantages) def test_rloo_training(self): training_args = RLOOConfig( output_dir=self.tmp_dir, per_device_train_batch_size=2, per_device_eval_batch_size=2, total_episodes=1, num_train_epochs=1, max_steps=2, report_to="none", ) # Create a simple dataset dummy_text = [{"content": "Hello World!", "role": "user"}] dummy_data = self.tokenizer.apply_chat_template(dummy_text) dummy_dataset = Dataset.from_dict({"input_ids": [dummy_data, dummy_data]}) trainer = RLOOTrainer( config=training_args, policy=self.policy_model, reward_model=self.reward_model, ref_policy=self.policy_ref_model, processing_class=self.tokenizer, train_dataset=dummy_dataset, eval_dataset=dummy_dataset, ) # Test that training completes without errors trainer.train() # Check if objective/rlhf_reward is available self.assertIn("objective/rlhf_reward", trainer.state.log_history[-1]) def test_rloo_training_with_custom_reward(self): # dummy reward function def reward_function(texts): # based on length of text rewards = [len(text) for text in texts] return rewards training_args = RLOOConfig( output_dir=self.tmp_dir, per_device_train_batch_size=2, per_device_eval_batch_size=2, total_episodes=1, num_train_epochs=1, max_steps=2, report_to="none", ) # Create a simple dataset dummy_text = [{"content": "Hello World!", "role": "user"}] dummy_data = self.tokenizer.apply_chat_template(dummy_text) dummy_dataset = Dataset.from_dict({"input_ids": [dummy_data, dummy_data]}) trainer = RLOOTrainer( config=training_args, policy=self.policy_model, reward_model=reward_function, ref_policy=self.policy_ref_model, processing_class=self.tokenizer, train_dataset=dummy_dataset, eval_dataset=dummy_dataset, ) # Test that training completes without errors trainer.train() # Check if objective/rlhf_reward is available self.assertIn("objective/rlhf_reward", trainer.state.log_history[-1])
trl/tests/test_rloo_trainer.py/0
{ "file_path": "trl/tests/test_rloo_trainer.py", "repo_id": "trl", "token_count": 3828 }
619
# Copyright 2020-2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import importlib.resources as resources import os import sys from accelerate import logging from accelerate.commands.launch import launch_command, launch_command_parser from .scripts.dpo import make_parser as make_dpo_parser from .scripts.env import print_env from .scripts.grpo import make_parser as make_grpo_parser from .scripts.kto import make_parser as make_kto_parser from .scripts.sft import make_parser as make_sft_parser from .scripts.utils import TrlParser from .scripts.vllm_serve import main as vllm_serve_main from .scripts.vllm_serve import make_parser as make_vllm_serve_parser logger = logging.get_logger(__name__) def main(): parser = TrlParser(prog="TRL CLI", usage="trl", allow_abbrev=False) # Add the subparsers subparsers = parser.add_subparsers(help="available commands", dest="command", parser_class=TrlParser) # Add the subparsers for every script make_dpo_parser(subparsers) subparsers.add_parser("env", help="Print the environment information") make_grpo_parser(subparsers) make_kto_parser(subparsers) make_sft_parser(subparsers) make_vllm_serve_parser(subparsers) # Parse the arguments; the remaining ones (`launch_args`) are passed to the 'accelerate launch' subparser. # Duplicates may occur if the same argument is provided in both the config file and CLI. # For example: launch_args = `["--num_processes", "4", "--num_processes", "8"]`. # Deduplication and precedence (CLI over config) are handled later by launch_command_parser. args, launch_args = parser.parse_args_and_config(return_remaining_strings=True) # Replace `--accelerate_config foo` with `--config_file trl/accelerate_configs/foo.yaml` if it is present in the # launch_args. It allows the user to use predefined accelerate configs from the `trl` package. if "--accelerate_config" in launch_args: # Get the index of the '--accelerate_config' argument and the corresponding config name config_index = launch_args.index("--accelerate_config") config_name = launch_args[config_index + 1] # If the config_name correspond to a path in the filesystem, we don't want to override it if os.path.isfile(config_name): accelerate_config_path = config_name elif resources.files("trl.accelerate_configs").joinpath(f"{config_name}.yaml").exists(): # Get the predefined accelerate config path from the package resources accelerate_config_path = resources.files("trl.accelerate_configs").joinpath(f"{config_name}.yaml") else: raise ValueError( f"Accelerate config {config_name} is neither a file nor a valid config in the `trl` package. " "Please provide a valid config name or a path to a config file." ) # Remove '--accelerate_config' and its corresponding config name launch_args.pop(config_index) launch_args.pop(config_index) # Insert '--config_file' and the absolute path to the front of the list launch_args = ["--config_file", str(accelerate_config_path)] + launch_args if args.command == "dpo": # Get the default args for the launch command dpo_training_script = resources.files("trl.scripts").joinpath("dpo.py") args = launch_command_parser().parse_args([str(dpo_training_script)]) # Feed the args to the launch command args.training_script_args = sys.argv[2:] # remove "trl" and "dpo" launch_command(args) # launch training elif args.command == "env": print_env() elif args.command == "grpo": # Get the default args for the launch command grpo_training_script = resources.files("trl.scripts").joinpath("grpo.py") args = launch_command_parser().parse_args([str(grpo_training_script)]) # Feed the args to the launch command args.training_script_args = sys.argv[2:] # remove "trl" and "grpo" launch_command(args) # launch training elif args.command == "kto": # Get the default args for the launch command kto_training_script = resources.files("trl.scripts").joinpath("kto.py") args = launch_command_parser().parse_args([str(kto_training_script)]) # Feed the args to the launch command args.training_script_args = sys.argv[2:] # remove "trl" and "kto" launch_command(args) # launch training elif args.command == "sft": # Get the path to the training script sft_training_script = resources.files("trl.scripts").joinpath("sft.py") # This simulates running: `accelerate launch <launch args> sft.py <training script args>`. # Note that the training script args may include launch-related arguments (e.g., `--num_processes`), # but we rely on the script to ignore any that don't apply to it. training_script_args = sys.argv[2:] # Remove "trl" and "sft" args = launch_command_parser().parse_args(launch_args + [str(sft_training_script)] + training_script_args) launch_command(args) # launch training elif args.command == "vllm-serve": (script_args,) = parser.parse_args_and_config() # Known issue: Using DeepSpeed with tensor_parallel_size=1 and data_parallel_size>1 may cause a crash when # launched via the CLI. Suggest running the module directly. # More information: https://github.com/vllm-project/vllm/issues/17079 if script_args.tensor_parallel_size == 1 and script_args.data_parallel_size > 1: logger.warning( "Detected configuration: tensor_parallel_size=1 and data_parallel_size>1. This setup is known to " "cause a crash when using the `trl vllm-serve` CLI entry point. As a workaround, please run the " "server using the module path instead: `python -m trl.scripts.vllm_serve`", ) vllm_serve_main(script_args) if __name__ == "__main__": main()
trl/trl/cli.py/0
{ "file_path": "trl/trl/cli.py", "repo_id": "trl", "token_count": 2383 }
620
# Copyright 2020-2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ State dict utilities: utility methods for converting state dicts easily File copied from diffusers to avoid import issues and make TRL compatible with most of diffusers versions. """ import enum class StateDictType(enum.Enum): """ The mode to use when converting state dicts. """ DIFFUSERS_OLD = "diffusers_old" PEFT = "peft" PEFT_TO_DIFFUSERS = { ".q_proj.lora_B": ".q_proj.lora_linear_layer.up", ".q_proj.lora_A": ".q_proj.lora_linear_layer.down", ".k_proj.lora_B": ".k_proj.lora_linear_layer.up", ".k_proj.lora_A": ".k_proj.lora_linear_layer.down", ".v_proj.lora_B": ".v_proj.lora_linear_layer.up", ".v_proj.lora_A": ".v_proj.lora_linear_layer.down", ".out_proj.lora_B": ".out_proj.lora_linear_layer.up", ".out_proj.lora_A": ".out_proj.lora_linear_layer.down", "to_k.lora_A": "to_k.lora.down", "to_k.lora_B": "to_k.lora.up", "to_q.lora_A": "to_q.lora.down", "to_q.lora_B": "to_q.lora.up", "to_v.lora_A": "to_v.lora.down", "to_v.lora_B": "to_v.lora.up", "to_out.0.lora_A": "to_out.0.lora.down", "to_out.0.lora_B": "to_out.0.lora.up", } DIFFUSERS_OLD_TO_DIFFUSERS = { ".to_q_lora.up": ".q_proj.lora_linear_layer.up", ".to_q_lora.down": ".q_proj.lora_linear_layer.down", ".to_k_lora.up": ".k_proj.lora_linear_layer.up", ".to_k_lora.down": ".k_proj.lora_linear_layer.down", ".to_v_lora.up": ".v_proj.lora_linear_layer.up", ".to_v_lora.down": ".v_proj.lora_linear_layer.down", ".to_out_lora.up": ".out_proj.lora_linear_layer.up", ".to_out_lora.down": ".out_proj.lora_linear_layer.down", } DIFFUSERS_STATE_DICT_MAPPINGS = { StateDictType.DIFFUSERS_OLD: DIFFUSERS_OLD_TO_DIFFUSERS, StateDictType.PEFT: PEFT_TO_DIFFUSERS, } KEYS_TO_ALWAYS_REPLACE = { ".processor.": ".", } def convert_state_dict(state_dict, mapping): r""" Simply iterates over the state dict and replaces the patterns in `mapping` with the corresponding values. Args: state_dict (`dict[str, torch.Tensor]`): The state dict to convert. mapping (`dict[str, str]`): The mapping to use for conversion, the mapping should be a dictionary with the following structure: - key: the pattern to replace - value: the pattern to replace with Returns: converted_state_dict (`dict`) The converted state dict. """ converted_state_dict = {} for k, v in state_dict.items(): # First, filter out the keys that we always want to replace for pattern in KEYS_TO_ALWAYS_REPLACE.keys(): if pattern in k: new_pattern = KEYS_TO_ALWAYS_REPLACE[pattern] k = k.replace(pattern, new_pattern) for pattern in mapping.keys(): if pattern in k: new_pattern = mapping[pattern] k = k.replace(pattern, new_pattern) break converted_state_dict[k] = v return converted_state_dict def convert_state_dict_to_diffusers(state_dict, original_type=None, **kwargs): r""" Converts a state dict to new diffusers format. The state dict can be from previous diffusers format (`OLD_DIFFUSERS`), or PEFT format (`PEFT`) or new diffusers format (`DIFFUSERS`). In the last case the method will return the state dict as is. The method only supports the conversion from diffusers old, PEFT to diffusers new for now. Args: state_dict (`dict[str, torch.Tensor]`): The state dict to convert. original_type (`StateDictType`, *optional*): The original type of the state dict, if not provided, the method will try to infer it automatically. kwargs (`dict`, *args*): Additional arguments to pass to the method. - **adapter_name**: For example, in case of PEFT, some keys will be pre-pended with the adapter name, therefore needs a special handling. By default PEFT also takes care of that in `get_peft_model_state_dict` method: https://github.com/huggingface/peft/blob/ba0477f2985b1ba311b83459d29895c809404e99/src/peft/utils/save_and_load.py#L92 but we add it here in case we don't want to rely on that method. """ peft_adapter_name = kwargs.pop("adapter_name", None) if peft_adapter_name is not None: peft_adapter_name = "." + peft_adapter_name else: peft_adapter_name = "" if original_type is None: # Old diffusers to PEFT if any("to_out_lora" in k for k in state_dict.keys()): original_type = StateDictType.DIFFUSERS_OLD elif any(f".lora_A{peft_adapter_name}.weight" in k for k in state_dict.keys()): original_type = StateDictType.PEFT elif any("lora_linear_layer" in k for k in state_dict.keys()): # nothing to do return state_dict else: raise ValueError("Could not automatically infer state dict type") if original_type not in DIFFUSERS_STATE_DICT_MAPPINGS.keys(): raise ValueError(f"Original type {original_type} is not supported") mapping = DIFFUSERS_STATE_DICT_MAPPINGS[original_type] return convert_state_dict(state_dict, mapping)
trl/trl/models/sd_utils.py/0
{ "file_path": "trl/trl/models/sd_utils.py", "repo_id": "trl", "token_count": 2510 }
621
# Copyright 2020-2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import sys from dataclasses import dataclass, field from typing import Any, Optional from transformers import is_bitsandbytes_available from ..core import flatten_dict @dataclass class AlignPropConfig: r""" Configuration class for the [`AlignPropTrainer`]. Using [`~transformers.HfArgumentParser`] we can turn this class into [argparse](https://docs.python.org/3/library/argparse#module-argparse) arguments that can be specified on the command line. Parameters: exp_name (`str`, *optional*, defaults to `os.path.basename(sys.argv[0])[: -len(".py")]`): Name of this experiment (defaults to the file name without the extension). run_name (`str`, *optional*, defaults to `""`): Name of this run. seed (`int`, *optional*, defaults to `0`): Random seed for reproducibility. log_with (`str` or `None`, *optional*, defaults to `None`): Log with either `"wandb"` or `"tensorboard"`. Check [tracking](https://huggingface.co/docs/accelerate/usage_guides/tracking) for more details. log_image_freq (`int`, *optional*, defaults to `1`): Frequency for logging images. tracker_kwargs (`dict[str, Any]`, *optional*, defaults to `{}`): Keyword arguments for the tracker (e.g., `wandb_project`). accelerator_kwargs (`dict[str, Any]`, *optional*, defaults to `{}`): Keyword arguments for the accelerator. project_kwargs (`dict[str, Any]`, *optional*, defaults to `{}`): Keyword arguments for the accelerator project config (e.g., `logging_dir`). tracker_project_name (`str`, *optional*, defaults to `"trl"`): Name of project to use for tracking. logdir (`str`, *optional*, defaults to `"logs"`): Top-level logging directory for checkpoint saving. num_epochs (`int`, *optional*, defaults to `100`): Number of epochs to train. save_freq (`int`, *optional*, defaults to `1`): Number of epochs between saving model checkpoints. num_checkpoint_limit (`int`, *optional*, defaults to `5`): Number of checkpoints to keep before overwriting old ones. mixed_precision (`str`, *optional*, defaults to `"fp16"`): Mixed precision training. allow_tf32 (`bool`, *optional*, defaults to `True`): Allow `tf32` on Ampere GPUs. resume_from (`str`, *optional*, defaults to `""`): Path to resume training from a checkpoint. sample_num_steps (`int`, *optional*, defaults to `50`): Number of sampler inference steps. sample_eta (`float`, *optional*, defaults to `1.0`): Eta parameter for the DDIM sampler. sample_guidance_scale (`float`, *optional*, defaults to `5.0`): Classifier-free guidance weight. train_batch_size (`int`, *optional*, defaults to `1`): Batch size for training. train_use_8bit_adam (`bool`, *optional*, defaults to `False`): Whether to use the 8bit Adam optimizer from `bitsandbytes`. train_learning_rate (`float`, *optional*, defaults to `1e-3`): Learning rate. train_adam_beta1 (`float`, *optional*, defaults to `0.9`): Beta1 for Adam optimizer. train_adam_beta2 (`float`, *optional*, defaults to `0.999`): Beta2 for Adam optimizer. train_adam_weight_decay (`float`, *optional*, defaults to `1e-4`): Weight decay for Adam optimizer. train_adam_epsilon (`float`, *optional*, defaults to `1e-8`): Epsilon value for Adam optimizer. train_gradient_accumulation_steps (`int`, *optional*, defaults to `1`): Number of gradient accumulation steps. train_max_grad_norm (`float`, *optional*, defaults to `1.0`): Maximum gradient norm for gradient clipping. negative_prompts (`str` or `None`, *optional*, defaults to `None`): Comma-separated list of prompts to use as negative examples. truncated_backprop_rand (`bool`, *optional*, defaults to `True`): If `True`, randomized truncation to different diffusion timesteps is used. truncated_backprop_timestep (`int`, *optional*, defaults to `49`): Absolute timestep to which the gradients are backpropagated. Used only if `truncated_backprop_rand=False`. truncated_rand_backprop_minmax (`tuple[int, int]`, *optional*, defaults to `(0, 50)`): Range of diffusion timesteps for randomized truncated backpropagation. push_to_hub (`bool`, *optional*, defaults to `False`): Whether to push the final model to the Hub. """ exp_name: str = field( default=os.path.basename(sys.argv[0])[: -len(".py")], metadata={"help": "Name of this experiment (defaults to the file name without the extension)."}, ) run_name: str = field(default="", metadata={"help": "Name of this run."}) seed: int = field(default=0, metadata={"help": "Random seed for reproducibility."}) log_with: Optional[str] = field( default=None, metadata={"help": "Log with either 'wandb' or 'tensorboard'.", "choices": ["wandb", "tensorboard"]}, ) log_image_freq: int = field(default=1, metadata={"help": "Frequency for logging images."}) tracker_kwargs: dict[str, Any] = field( default_factory=dict, metadata={"help": "Keyword arguments for the tracker (e.g., `wandb_project`)."}, ) accelerator_kwargs: dict[str, Any] = field( default_factory=dict, metadata={"help": "Keyword arguments for the accelerator."} ) project_kwargs: dict[str, Any] = field( default_factory=dict, metadata={"help": "Keyword arguments for the accelerator project config (e.g., `logging_dir`)."}, ) tracker_project_name: str = field(default="trl", metadata={"help": "Name of project to use for tracking."}) logdir: str = field(default="logs", metadata={"help": "Top-level logging directory for checkpoint saving."}) num_epochs: int = field(default=100, metadata={"help": "Number of epochs to train."}) save_freq: int = field(default=1, metadata={"help": "Number of epochs between saving model checkpoints."}) num_checkpoint_limit: int = field( default=5, metadata={"help": "Number of checkpoints to keep before overwriting old ones."} ) mixed_precision: str = field( default="fp16", metadata={ "help": "Mixed precision training. Possible values are 'fp16', 'bf16', 'none'.", "choices": ["fp16", "bf16", "none"], }, ) allow_tf32: bool = field(default=True, metadata={"help": "Allow `tf32` on Ampere GPUs."}) resume_from: str = field(default="", metadata={"help": "Path to resume training from a checkpoint."}) sample_num_steps: int = field(default=50, metadata={"help": "Number of sampler inference steps."}) sample_eta: float = field(default=1.0, metadata={"help": "Eta parameter for the DDIM sampler."}) sample_guidance_scale: float = field(default=5.0, metadata={"help": "Classifier-free guidance weight."}) train_batch_size: int = field(default=1, metadata={"help": "Batch size for training."}) train_use_8bit_adam: bool = field( default=False, metadata={"help": "Whether to use the 8bit Adam optimizer from `bitsandbytes`."} ) train_learning_rate: float = field(default=1e-3, metadata={"help": "Learning rate."}) train_adam_beta1: float = field(default=0.9, metadata={"help": "Beta1 for Adam optimizer."}) train_adam_beta2: float = field(default=0.999, metadata={"help": "Beta2 for Adam optimizer."}) train_adam_weight_decay: float = field(default=1e-4, metadata={"help": "Weight decay for Adam optimizer."}) train_adam_epsilon: float = field(default=1e-8, metadata={"help": "Epsilon value for Adam optimizer."}) train_gradient_accumulation_steps: int = field( default=1, metadata={"help": "Number of gradient accumulation steps."} ) train_max_grad_norm: float = field(default=1.0, metadata={"help": "Maximum gradient norm for gradient clipping."}) negative_prompts: Optional[str] = field( default=None, metadata={"help": "Comma-separated list of prompts to use as negative examples."}, ) truncated_backprop_rand: bool = field( default=True, metadata={"help": "If `True`, randomized truncation to different diffusion timesteps is used."}, ) truncated_backprop_timestep: int = field( default=49, metadata={ "help": "Absolute timestep to which the gradients are backpropagated. Used only if " "`truncated_backprop_rand=False`." }, ) truncated_rand_backprop_minmax: tuple[int, int] = field( default=(0, 50), metadata={ "help": "Range of diffusion timesteps for randomized truncated backpropagation.", }, ) push_to_hub: bool = field(default=False, metadata={"help": "Whether to push the final model to the Hub."}) def to_dict(self): output_dict = {} for key, value in self.__dict__.items(): output_dict[key] = value return flatten_dict(output_dict) def __post_init__(self): if self.train_use_8bit_adam and not is_bitsandbytes_available(): raise ImportError( "You need to install bitsandbytes to use 8bit Adam. " "You can install it with `pip install bitsandbytes`." )
trl/trl/trainer/alignprop_config.py/0
{ "file_path": "trl/trl/trainer/alignprop_config.py", "repo_id": "trl", "token_count": 3898 }
622
# Copyright 2020-2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import warnings from pathlib import Path from typing import Callable, Optional, Union import torch from accelerate import logging from datasets import Dataset from torch.utils.data import DataLoader from transformers import ( AutoModelForCausalLM, AutoTokenizer, BaseImageProcessor, DataCollator, DataCollatorForLanguageModeling, DataCollatorForSeq2Seq, FeatureExtractionMixin, PreTrainedModel, PreTrainedTokenizerBase, ProcessorMixin, Trainer, TrainingArguments, is_wandb_available, ) from transformers.trainer_utils import EvalLoopOutput from transformers.utils import is_peft_available from ..core import PPODecorators from .iterative_sft_config import IterativeSFTConfig from .utils import generate_model_card, get_comet_experiment_url if is_peft_available(): from peft import PeftModel if is_wandb_available(): import wandb logger = logging.get_logger(__name__) class IterativeSFTTrainer(Trainer): """ The IterativeSFTTrainer can be used to finetune models with methods that requires some steps between optimization. <Tip warning={true}> The [`IterativeSFTTrainer`] is deprecated and will be removed in version 0.24.0. Please use the [`SFTTrainer`]. </Tip> Args: model (`Union[str, PreTrainedModel]`): Model to be trained. Can be either: - A string, being the *model id* of a pretrained model hosted inside a model repo on huggingface.co, or a path to a *directory* containing model weights saved using [`~transformers.PreTrainedModel.save_pretrained`], e.g., `'./my_model_directory/'`. The model is loaded using [`~transformers.AutoModelForCausalLM.from_pretrained`] with the keyword arguments in `args.model_init_kwargs`. - A [`~transformers.PreTrainedModel`] object. Only causal language models are supported. args ([`IterativeSFTConfig`], *optional*, defaults to `None`): Configuration for this trainer. If `None`, a default configuration is used. data_collator (`DataCollator`, *optional*): Function to use to form a batch from a list of elements of the processed `train_dataset` or `eval_dataset`. Will default to [`~transformers.default_data_collator`] if no `processing_class` is provided, an instance of [`~transformers.DataCollatorWithPadding`] otherwise if the processing_class is a feature extractor or tokenizer. eval_dataset (`datasets.Dataset`): The dataset to use for evaluation. processing_class ([`~transformers.PreTrainedTokenizerBase`], [`~transformers.BaseImageProcessor`], [`~transformers.FeatureExtractionMixin`] or [`~transformers.ProcessorMixin`], *optional*, defaults to `None`): Processing class used to process the data. If `None`, the processing class is loaded from the model's name with [`~transformers.AutoTokenizer.from_pretrained`]. optimizers (`tuple[torch.optim.Optimizer, torch.optim.lr_scheduler.LambdaLR]`): The optimizer and scheduler to use for training. preprocess_logits_for_metrics (`Callable[[torch.Tensor, torch.Tensor], torch.Tensor]`): The function to use to preprocess the logits before computing the metrics. compute_metrics (`Callable[[EvalPrediction], dict]`, *optional*): The function to use to compute the metrics. Must take a `EvalPrediction` and return a dictionary string to metric values. """ _tag_names = ["trl", "iterative-sft"] def __init__( self, model: Union[str, PreTrainedModel], args: Optional[Union[IterativeSFTConfig, TrainingArguments]] = None, data_collator: Optional[DataCollator] = None, eval_dataset: Optional[Union[Dataset, dict[str, Dataset]]] = None, processing_class: Optional[ Union[PreTrainedTokenizerBase, BaseImageProcessor, FeatureExtractionMixin, ProcessorMixin] ] = None, optimizers: tuple[torch.optim.Optimizer, torch.optim.lr_scheduler.LambdaLR] = ( None, None, ), preprocess_logits_for_metrics: Optional[Callable[[torch.Tensor, torch.Tensor], torch.Tensor]] = None, compute_metrics: Optional[Callable[[EvalLoopOutput], dict]] = None, ): warnings.warn( "The `IterativeSFTTrainer` is deprecated and will be removed in version 0.24.0. Please use the " "`SFTTrainer`.", FutureWarning, ) # Args model_id = model if isinstance(model, str) else model.config._name_or_path if args is None: model_name = model_id.split("/")[-1] args = IterativeSFTConfig(f"{model_name}-IterativeSFT") elif isinstance(args, TrainingArguments) and not isinstance(args, IterativeSFTConfig): dict_args = args.to_dict() dict_args["hub_token"] = args.hub_token # to_dict hides the hub_token dict_args.pop("push_to_hub_token") args = IterativeSFTConfig(**dict_args) # Handle the tokenizer if processing_class is None: processing_class = AutoTokenizer.from_pretrained(model_id) # Model if args.model_init_kwargs is not None and not isinstance(model, str): logger.warning( "You passed model_init_kwargs to the `IterativeSFTConfig`, but your model is already instantiated. " "The `model_init_kwargs` will be ignored." ) if isinstance(model, str): model = self._create_model_from_path(model, args) # PEFT configuration and model wrapping if is_peft_available() and isinstance(model, PeftModel): self.is_peft_model = True else: self.is_peft_model = False self.processing_class = processing_class self.is_encoder_decoder = getattr(model.config, "is_encoder_decoder", False) if data_collator is None: if self.is_encoder_decoder: self.data_collator = DataCollatorForSeq2Seq( processing_class, label_pad_token_id=-100, pad_to_multiple_of=8 ) else: self.data_collator = DataCollatorForLanguageModeling(self.processing_class, mlm=False) else: self.data_collator = data_collator self.max_length = args.max_length self.truncation_mode = args.truncation_mode self.optimize_device_cache = args.optimize_device_cache super().__init__( model=model, args=args, data_collator=self.data_collator, eval_dataset=eval_dataset, processing_class=processing_class, compute_metrics=compute_metrics, optimizers=optimizers, preprocess_logits_for_metrics=preprocess_logits_for_metrics, ) # Add tags for models that have been loaded with the correct transformers version if hasattr(self.model, "add_model_tags"): self.model.add_model_tags(self._tag_names) self.create_optimizer_and_scheduler(self.args.max_steps) # prepare model, optimizer and lr_scheduler self.model, self.optimizer, self.lr_scheduler = self.accelerator.prepare( self.model, self.optimizer, self.lr_scheduler ) self.processing_class.truncation_side = "left" if self.truncation_mode == "keep_end" else "right" if not hasattr(self, "accelerator"): raise AttributeError( "Your `Trainer` does not have an `accelerator` object. Consider upgrading `transformers`." ) PPODecorators.optimize_device_cache = self.optimize_device_cache def _create_model_from_path(self, model_path: str, args: IterativeSFTConfig) -> PreTrainedModel: """Creates a model from a path or model identifier.""" model_init_kwargs = args.model_init_kwargs or {} return AutoModelForCausalLM.from_pretrained(model_path, **model_init_kwargs) def prepare_model_inputs(self, input_ids: torch.Tensor, attention_mask: torch.Tensor, labels: torch.Tensor): if attention_mask is None: attention_mask = [torch.ones_like(ids) for ids in input_ids] if self.is_encoder_decoder: input_data = self.data_collator( [ {"input_ids": ids, "attention_mask": att, "labels": lab} for ids, att, lab in zip(input_ids, attention_mask, labels) ] ).to(self.model.device) input_data.pop("decoder_input_ids", None) # This is directly computed inside the model input_data["labels"][input_data["labels"] == self.processing_class.pad_token_id] = -100 else: input_data = self.data_collator( [{"input_ids": ids, "attention_mask": att} for ids, att in zip(input_ids, attention_mask)] ).to(self.model.device) # truncate in case the user has provided input_ids, attention_mask and labels if self.max_length is not None: if self.truncation_mode == "keep_start": input_data = {k: v[: self.max_length] for k, v in input_data.items()} elif self.truncation_mode == "keep_end": input_data = {k: v[-self.max_length :] for k, v in input_data.items()} else: raise ValueError(f"Unknown truncation mode: {self.truncation_mode}") return input_data @staticmethod def _step_safety_checker( input_ids: list[torch.LongTensor], attention_mask: list[torch.LongTensor], labels: list[torch.LongTensor], texts: list[str], texts_labels: list[str], ): """ Check if the input data is valid for training. Args: input_ids (list[`torch.LongTensor`]): List of tensors containing the input_ids attention_mask (list[`torch.LongTensor`]): List of tensors containing the attention_mask labels (list[`torch.FloatTensor`]): List of tensors containing the labels texts (list[`str`]): List of string containing the text input. texts_labels (list[`str`]): List of string containing the text labels. Returns: `tuple`: The input data. """ if texts is None: if attention_mask is None: for name, tensor_list in zip(["input_ids", "labels"], [input_ids, labels]): if not isinstance(tensor_list, list): raise ValueError(f"{name} must be a list of tensors - got {type(tensor_list)}") if not isinstance(tensor_list[0], torch.Tensor): raise ValueError(f"Elements in {name} must be tensors - got {type(tensor_list[0])}") else: for name, tensor_list in zip( ["input_ids", "attention_mask", "labels"], [input_ids, attention_mask, labels] ): if not isinstance(tensor_list, list): raise ValueError(f"{name} must be a list of tensors - got {type(tensor_list)}") if not isinstance(tensor_list[0], torch.Tensor): raise ValueError(f"Elements in {name} must be tensors - got {type(tensor_list[0])}") else: if not isinstance(texts, list): raise ValueError(f"'text' must be a list of strings - got {type(texts)}") if not isinstance(texts[0], str): raise ValueError(f"Elements in 'text' must be strings - got {type(texts[0])}") if texts_labels is not None: if not isinstance(texts_labels, list): raise ValueError(f"'text_labels' must be a list of strings - got {type(texts_labels)}") if not isinstance(texts_labels[0], str): raise ValueError(f"Elements in 'text_labels' must be strings - got {type(texts_labels[0])}") return input_ids, attention_mask, labels, texts, texts_labels @PPODecorators.empty_device_cache() def step( self, input_ids: Optional[list[torch.LongTensor]] = None, attention_mask: Optional[list[torch.LongTensor]] = None, labels: Optional[list[torch.LongTensor]] = None, texts: Optional[list[str]] = None, texts_labels: Optional[list[str]] = None, ): """ Run an optimisation step given a list of input_ids, attention_mask, and labels or a list of text and text_labels. Args: input_ids (list[`torch.LongTensor`]): List of tensors containing the input_ids (if not provided, text will be used) attention_mask (list[`torch.LongTensor`], , *optional*): List of tensors containing the attention_mask labels (list[`torch.FloatTensor`], *optional*): List of tensors containing the labels (if set to None, will default to input_ids) texts (list[`str`], *optional*): List of strings containing the text input (if not provided, input_ids will directly be used) texts_labels (list[`str`], *optional*): List of strings containing the text labels (if set to None, will default to text) Returns: `dict[str, Any]`: A summary of the training statistics """ self.model.train() if self.state.global_step == 0: self.tr_loss = torch.tensor(0.0).to(self.args.device) self._globalstep_last_logged = self.state.global_step if input_ids is None and texts is None: raise ValueError("Step should include `input_ids` or `texts` as keyword arguments.") elif input_ids is not None and texts is not None: logger.warning( "Both `input_ids` and `texts` argument are provided. `input_ids` will be ignored. " "Please provide only one of the two.", ) if labels is None and texts_labels is None and self.is_encoder_decoder: raise ValueError( "No 'labels' or 'text_labels' are provided. When using an encoder-decoder architecture, 'labels' or 'text_labels' must be passed." ) # Convert Column to list if not already input_ids = input_ids[:] if input_ids is not None else None attention_mask = attention_mask[:] if attention_mask is not None else None labels = labels[:] if labels is not None else None texts = texts[:] if texts is not None else None texts_labels = texts_labels[:] if texts_labels is not None else None input_ids, attention_mask, labels, texts, texts_labels = self._step_safety_checker( input_ids, attention_mask, labels, texts, texts_labels ) if texts is not None: model_inputs = self.processing_class( texts, max_length=self.max_length, truncation=True, padding=True, return_tensors="pt" ) input_ids, attention_mask = model_inputs["input_ids"], model_inputs["attention_mask"] if texts_labels is not None: labels = self.processing_class( texts, max_length=self.max_length, truncation=True, padding=True, return_tensors="pt" )["input_ids"] if labels is None: labels = input_ids model_inputs = self.prepare_model_inputs(input_ids, attention_mask, labels) model_inputs_names = list(model_inputs.keys()) batch_dict = {} batch_dict.update(model_inputs) def collator(data): return_dict = dict() for key in data[0]: if key in ["input_ids", "attention_mask", "labels"]: return_dict[key] = torch.stack([d[key] for d in data]).to(self.model.device) return return_dict batch_data = Dataset.from_dict(batch_dict) batch_data.set_format("torch") step_dataloader = DataLoader( batch_data, batch_size=self.args.per_device_train_batch_size, shuffle=True, collate_fn=collator, ) for _, batch in enumerate(step_dataloader): with self.accelerator.accumulate(self.model): model_inputs = {k: batch[k] for k in model_inputs_names} loss = self.compute_loss(self.model, model_inputs) if self.args.n_gpu > 1: loss = loss.mean() tr_loss_step = loss.detach() self.accelerator.backward(loss) if self.accelerator.sync_gradients and self.args.max_grad_norm is not None: self.accelerator.clip_grad_norm_( self.model.parameters(), self.args.max_grad_norm, ) self.optimizer.step() self.optimizer.zero_grad() if self.lr_scheduler is not None: self.lr_scheduler.step() self.state.global_step += 1 # update stats etc self.tr_loss += tr_loss_step self._maybe_log_save_evaluate() def _maybe_log_save_evaluate(self): # check if eval is required if self.args.eval_steps is not None: if self.state.global_step % self.args.eval_steps == 0 and self.state.global_step != 0: self.evaluate(self.eval_dataset) # check if logging is required if self.args.logging_steps is not None: if self.state.global_step % self.args.logging_steps == 0 and self.state.global_step != 0: logs: dict[str, float] = {} tr_loss_scalar = self._nested_gather(self.tr_loss).mean().item() # reset tr_loss to zero self.tr_loss -= self.tr_loss logs["loss"] = round(tr_loss_scalar / (self.state.global_step - self._globalstep_last_logged), 4) logs["learning_rate"] = self._get_learning_rate() self._globalstep_last_logged = self.state.global_step self.log(logs) # Ensure the model card is saved along with the checkpoint def _save_checkpoint(self, model, trial): if self.args.hub_model_id is None: model_name = Path(self.args.output_dir).name else: model_name = self.args.hub_model_id.split("/")[-1] self.create_model_card(model_name=model_name) super()._save_checkpoint(model, trial) def create_model_card( self, model_name: Optional[str] = None, dataset_name: Optional[str] = None, tags: Union[str, list[str], None] = None, ): """ Creates a draft of a model card using the information available to the `Trainer`. Args: model_name (`str` or `None`, *optional*, defaults to `None`): Name of the model. dataset_name (`str` or `None`, *optional*, defaults to `None`): Name of the dataset used for training. tags (`str`, `list[str]` or `None`, *optional*, defaults to `None`): Tags to be associated with the model card. """ if not self.is_world_process_zero(): return if hasattr(self.model.config, "_name_or_path") and not os.path.isdir(self.model.config._name_or_path): base_model = self.model.config._name_or_path else: base_model = None # normalize `tags` to a mutable set if tags is None: tags = set() elif isinstance(tags, str): tags = {tags} else: tags = set(tags) if hasattr(self.model.config, "unsloth_version"): tags.add("unsloth") tags.update(self._tag_names) model_card = generate_model_card( base_model=base_model, model_name=model_name, hub_model_id=self.hub_model_id, dataset_name=dataset_name, tags=tags, wandb_url=wandb.run.url if is_wandb_available() and wandb.run is not None else None, comet_url=get_comet_experiment_url(), trainer_name="Iterative SFT", ) model_card.save(os.path.join(self.args.output_dir, "README.md"))
trl/trl/trainer/iterative_sft_trainer.py/0
{ "file_path": "trl/trl/trainer/iterative_sft_trainer.py", "repo_id": "trl", "token_count": 9421 }
623
# Copyright 2020-2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os from collections import defaultdict from dataclasses import FrozenInstanceError, replace from pathlib import Path from typing import Any, Callable, Optional, Union import pandas as pd import torch import torch.nn as nn from accelerate import PartialState, logging from accelerate.utils import gather_object from datasets import Dataset from transformers import ( BaseImageProcessor, DataCollator, FeatureExtractionMixin, PreTrainedModel, PreTrainedTokenizerBase, ProcessorMixin, Trainer, is_wandb_available, ) from transformers.trainer_callback import TrainerCallback from transformers.trainer_pt_utils import nested_detach from transformers.trainer_utils import EvalPrediction from transformers.utils import is_peft_available, is_rich_available from ..data_utils import maybe_apply_chat_template from ..models import prepare_peft_model from .reward_config import RewardConfig from .utils import ( RewardDataCollatorWithPadding, compute_accuracy, decode_and_strip_padding, disable_dropout_in_model, generate_model_card, get_comet_experiment_url, log_table_to_comet_experiment, print_rich_table, ) if is_peft_available(): from peft import PeftModel if is_wandb_available(): import wandb logger = logging.get_logger(__name__) def _tokenize(batch: dict[str, list[Any]], tokenizer: "PreTrainedTokenizerBase") -> dict[str, list[Any]]: """Tokenize a batch from a reward modelling dataset.""" new_examples = { "input_ids_chosen": [], "attention_mask_chosen": [], "input_ids_rejected": [], "attention_mask_rejected": [], } for chosen, rejected in zip(batch["chosen"], batch["rejected"]): tokenized_chosen = tokenizer(chosen) tokenized_rejected = tokenizer(rejected) new_examples["input_ids_chosen"].append(tokenized_chosen["input_ids"]) new_examples["attention_mask_chosen"].append(tokenized_chosen["attention_mask"]) new_examples["input_ids_rejected"].append(tokenized_rejected["input_ids"]) new_examples["attention_mask_rejected"].append(tokenized_rejected["attention_mask"]) return new_examples class RewardTrainer(Trainer): _tag_names = ["trl", "reward-trainer"] def __init__( self, model: Optional[Union[PreTrainedModel, nn.Module]] = None, args: Optional[RewardConfig] = None, data_collator: Optional[DataCollator] = None, train_dataset: Optional[Dataset] = None, eval_dataset: Optional[Union[Dataset, dict[str, Dataset]]] = None, processing_class: Optional[ Union[PreTrainedTokenizerBase, BaseImageProcessor, FeatureExtractionMixin, ProcessorMixin] ] = None, model_init: Optional[Callable[[], PreTrainedModel]] = None, compute_metrics: Optional[Callable[[EvalPrediction], dict]] = None, callbacks: Optional[list[TrainerCallback]] = None, optimizers: tuple[torch.optim.Optimizer, torch.optim.lr_scheduler.LambdaLR] = ( None, None, ), preprocess_logits_for_metrics: Optional[Callable[[torch.Tensor, torch.Tensor], torch.Tensor]] = None, peft_config: Optional[dict] = None, ): """ Initialize RewardTrainer. Args: model (`transformers.PreTrainedModel`): The model to train, preferably an `AutoModelForSequenceClassification`. args (`RewardConfig`): The arguments to use for training. data_collator (`transformers.DataCollator`): The data collator to use for training. If None is specified, the default data collator (`RewardDataCollatorWithPadding`) will be used which will pad the sequences to the maximum length of the sequences in the batch, given a dataset of paired sequences. train_dataset (`datasets.Dataset`): The dataset to use for training. eval_dataset (`datasets.Dataset`): The dataset to use for evaluation. processing_class ([`~transformers.PreTrainedTokenizerBase`], [`~transformers.BaseImageProcessor`], [`~transformers.FeatureExtractionMixin`] or [`~transformers.ProcessorMixin`], *optional*, defaults to `None`): Processing class used to process the data. If provided, will be used to automatically process the inputs for the model, and it will be saved along the model to make it easier to rerun an interrupted training or reuse the fine-tuned model. model_init (`Callable[[], transformers.PreTrainedModel]`): The model initializer to use for training. If None is specified, the default model initializer will be used. compute_metrics (`Callable[[transformers.EvalPrediction], dict]`, *optional* defaults to `compute_accuracy`): The metrics to use for evaluation. If no metrics are specified, the default metric (`compute_accuracy`) will be used. callbacks (`list[transformers.TrainerCallback]`): The callbacks to use for training. optimizers (`tuple[torch.optim.Optimizer, torch.optim.lr_scheduler.LambdaLR]`): The optimizer and scheduler to use for training. preprocess_logits_for_metrics (`Callable[[torch.Tensor, torch.Tensor], torch.Tensor]`): The function to use to preprocess the logits before computing the metrics. peft_config (`dict`, defaults to `None`): The PEFT configuration to use for training. If you pass a PEFT configuration, the model will be wrapped in a PEFT model. """ if peft_config is not None or (is_peft_available() and isinstance(model, PeftModel)): model = prepare_peft_model(model, peft_config, args) # Disable dropout in the model if args.disable_dropout: disable_dropout_in_model(model) if compute_metrics is None: compute_metrics = compute_accuracy if data_collator is None: if processing_class is None: raise ValueError( "A processing_class must be specified when using the default RewardDataCollatorWithPadding" ) max_length = args.max_length data_collator = RewardDataCollatorWithPadding(processing_class) if args.remove_unused_columns: try: # for bc before https://github.com/huggingface/transformers/pull/25435 args.remove_unused_columns = False except FrozenInstanceError: args = replace(args, remove_unused_columns=False) # warn users logger.warning( "When using RewardDataCollatorWithPadding, you should set `remove_unused_columns=False` in your RewardConfig" " we have set it for you, but you should do it yourself in the future.", ) self.use_reward_data_collator = True else: self.use_reward_data_collator = False # The trainer estimates the number of FLOPs (floating-point operations) using the number of elements in the # input tensor associated with the key "input_ids". However, in Reward, the sampled data does not include the # "input_ids" key. Instead, the available keys are "input_ids_chosen" and "input_ids_rejected". As a result, # the trainer issues the warning: "Could not estimate the number of tokens of the input, floating-point # operations will not be computed." To suppress this warning, we set the "estimate_tokens" key in the model's # "warnings_issued" dictionary to True. This acts as a flag to indicate that the warning has already been # issued. model.warnings_issued["estimate_tokens"] = True if "input_ids_chosen" not in train_dataset.column_names: with PartialState().main_process_first(): fn_kwargs = {"tokenizer": processing_class} train_dataset = train_dataset.map(maybe_apply_chat_template, fn_kwargs={"tokenizer": processing_class}) train_dataset = train_dataset.map( _tokenize, batched=True, fn_kwargs=fn_kwargs, num_proc=args.dataset_num_proc, ) # This filter is important because otherwise you get samples that exceed the model's context length and # get truncated => noisy signal the chosen/rejected label gets lost. The downside is that the # user might get surprised if N samples are missing from training. train_dataset = train_dataset.filter( lambda x: len(x["input_ids_chosen"]) <= max_length and len(x["input_ids_rejected"]) <= max_length, num_proc=args.dataset_num_proc, ) if eval_dataset is not None: eval_dataset = eval_dataset.map( maybe_apply_chat_template, fn_kwargs={"tokenizer": processing_class} ) eval_dataset = eval_dataset.map( _tokenize, fn_kwargs=fn_kwargs, batched=True, num_proc=args.dataset_num_proc, ) # This filter is important because otherwise you get samples that exceed the model's context length and # get truncated => noisy signal the chosen/rejected label gets lost. The downside is that the # user might get surprised if N samples are missing from training. eval_dataset = eval_dataset.filter( lambda x: len(x["input_ids_chosen"]) <= max_length and len(x["input_ids_rejected"]) <= max_length, num_proc=args.dataset_num_proc, ) super().__init__( model=model, args=args, data_collator=data_collator, train_dataset=train_dataset, eval_dataset=eval_dataset, processing_class=processing_class, model_init=model_init, compute_metrics=compute_metrics, callbacks=callbacks, optimizers=optimizers, preprocess_logits_for_metrics=preprocess_logits_for_metrics, ) # Add tags for models that have been loaded with the correct transformers version if hasattr(self.model, "add_model_tags"): self.model.add_model_tags(self._tag_names) def compute_loss( self, model: Union[PreTrainedModel, nn.Module], inputs: dict[str, Union[torch.Tensor, Any]], return_outputs=False, num_items_in_batch=None, ) -> Union[torch.Tensor, tuple[torch.Tensor, dict[str, torch.Tensor]]]: rewards_chosen = model( input_ids=inputs["input_ids_chosen"], attention_mask=inputs["attention_mask_chosen"], return_dict=True, )["logits"] rewards_rejected = model( input_ids=inputs["input_ids_rejected"], attention_mask=inputs["attention_mask_rejected"], return_dict=True, )["logits"] # calculate loss, optionally modulate with margin if "margin" in inputs: loss = -nn.functional.logsigmoid(rewards_chosen - rewards_rejected - inputs["margin"]).mean() else: loss = -nn.functional.logsigmoid(rewards_chosen - rewards_rejected).mean() if self.args.center_rewards_coefficient is not None: loss += self.args.center_rewards_coefficient * torch.mean((rewards_chosen + rewards_rejected) ** 2) if return_outputs: return loss, { "rewards_chosen": rewards_chosen, "rewards_rejected": rewards_rejected, } return loss def prediction_step( self, model: Union[PreTrainedModel, nn.Module], inputs: dict[str, Union[torch.Tensor, Any]], prediction_loss_only: bool, ignore_keys: Optional[list[str]] = None, ) -> tuple[Optional[torch.Tensor], Optional[torch.Tensor], Optional[torch.Tensor]]: inputs = self._prepare_inputs(inputs) if ignore_keys is None: if hasattr(self.model, "config"): ignore_keys = getattr(self.model.config, "keys_to_ignore_at_inference", []) else: ignore_keys = [] with torch.no_grad(): loss, logits_dict = self.compute_loss(model, inputs, return_outputs=True) if prediction_loss_only: return (loss, None, None) loss = loss.detach() logits = tuple(v for k, v in logits_dict.items() if k not in ignore_keys) logits = nested_detach(logits) # Stack accepted against rejected, mean over logits # and softmax to get preferences between accepted and rejected to sum to 1 logits = torch.stack(logits).mean(dim=2).softmax(dim=0).T labels = torch.zeros(logits.shape[0]) labels = self._prepare_inputs(labels) return loss, logits, labels def evaluate(self, *args, **kwargs): num_print_samples = kwargs.pop("num_print_samples", 4) self.visualize_samples(num_print_samples) return super().evaluate(*args, **kwargs) def visualize_samples(self, num_print_samples: int): """ Visualize the reward model logits prediction Args: num_print_samples (`int`, defaults to `4`): The number of samples to print. Set to `-1` to print all samples. """ eval_dataloader = self.get_eval_dataloader() table = defaultdict(list) for _, inputs in enumerate(eval_dataloader): _, logits, _ = self.prediction_step(self.model, inputs, prediction_loss_only=False) chosen_text = decode_and_strip_padding(inputs["input_ids_chosen"], self.processing_class) rejected_text = decode_and_strip_padding(inputs["input_ids_rejected"], self.processing_class) table["chosen_text"].extend(gather_object(chosen_text)) table["rejected_text"].extend(gather_object(rejected_text)) table["logits"].extend( gather_object([[round(inner_item, 4) for inner_item in item] for item in logits.tolist()]) ) if num_print_samples >= 0 and len(table["chosen_text"]) >= num_print_samples: break df = pd.DataFrame(table) if self.accelerator.process_index == 0: if is_rich_available(): print_rich_table(df[:num_print_samples]) if "wandb" in self.args.report_to: import wandb if wandb.run is not None: wandb.log({"completions": wandb.Table(dataframe=df)}) if "comet_ml" in self.args.report_to: log_table_to_comet_experiment( name="completions.csv", table=df, ) # Ensure the model card is saved along with the checkpoint def _save_checkpoint(self, model, trial): if self.args.hub_model_id is None: model_name = Path(self.args.output_dir).name else: model_name = self.args.hub_model_id.split("/")[-1] self.create_model_card(model_name=model_name) super()._save_checkpoint(model, trial) def create_model_card( self, model_name: Optional[str] = None, dataset_name: Optional[str] = None, tags: Union[str, list[str], None] = None, ): """ Creates a draft of a model card using the information available to the `Trainer`. Args: model_name (`str` or `None`, *optional*, defaults to `None`): Name of the model. dataset_name (`str` or `None`, *optional*, defaults to `None`): Name of the dataset used for training. tags (`str`, `list[str]` or `None`, *optional*, defaults to `None`): Tags to be associated with the model card. """ if not self.is_world_process_zero(): return if hasattr(self.model.config, "_name_or_path") and not os.path.isdir(self.model.config._name_or_path): base_model = self.model.config._name_or_path else: base_model = None # normalize `tags` to a mutable set if tags is None: tags = set() elif isinstance(tags, str): tags = {tags} else: tags = set(tags) if hasattr(self.model.config, "unsloth_version"): tags.add("unsloth") tags.update(self._tag_names) model_card = generate_model_card( base_model=base_model, model_name=model_name, hub_model_id=self.hub_model_id, dataset_name=dataset_name, tags=tags, wandb_url=wandb.run.url if is_wandb_available() and wandb.run is not None else None, comet_url=get_comet_experiment_url(), trainer_name="Reward", ) model_card.save(os.path.join(self.args.output_dir, "README.md"))
trl/trl/trainer/reward_trainer.py/0
{ "file_path": "trl/trl/trainer/reward_trainer.py", "repo_id": "trl", "token_count": 7946 }
624
3.11
agents-course/quiz/.python-version/0
{ "file_path": "agents-course/quiz/.python-version", "repo_id": "agents-course", "token_count": 4 }
0
# Quiz: Evaluating AI Agents Let's assess your understanding of the agent tracing and evaluation concepts covered in this bonus unit. This quiz is optional and ungraded. ### Q1: What does observability in AI agents primarily refer to? Which statement accurately describes the purpose of observability for AI agents? <Question choices={[ { text: "It involves tracking internal operations through logs, metrics, and spans to understand agent behavior.", explain: "Correct! Observability means using logs, metrics, and spans to shed light on the inner workings of the agent.", correct: true }, { text: "It is solely focused on reducing the financial cost of running the agent.", explain: "Observability covers cost but is not limited to it." }, { text: "It refers only to the external appearance and UI of the agent.", explain: "Observability is about the internal processes, not the UI." }, { text: "It is concerned with coding style and code aesthetics only.", explain: "Code style is unrelated to observability in this context." } ]} /> ### Q2: Which of the following is NOT a common metric monitored in agent observability? Select the metric that does not typically fall under the observability umbrella. <Question choices={[ { text: "Latency", explain: "Latency is commonly tracked to assess agent responsiveness." }, { text: "Cost per Agent Run", explain: "Monitoring cost is a key aspect of observability." }, { text: "User Feedback and Ratings", explain: "User feedback is crucial for evaluating agent performance." }, { text: "Lines of Code of the Agent", explain: "The number of lines of code is not a typical observability metric.", correct: true } ]} /> ### Q3: What best describes offline evaluation of an AI agent? Determine the statement that correctly captures the essence of offline evaluation. <Question choices={[ { text: "Evaluating the agent using real user interactions in a live environment.", explain: "This describes online evaluation rather than offline." }, { text: "Assessing agent performance using curated datasets with known ground truth.", explain: "Correct! Offline evaluation uses test datasets to gauge performance against known answers.", correct: true }, { text: "Monitoring the agent's internal logs in real-time.", explain: "This is more related to observability rather than evaluation." }, { text: "Running the agent without any evaluation metrics.", explain: "This approach does not provide meaningful insights." } ]} /> ### Q4: Which advantage does online evaluation of agents offer? Pick the statement that best reflects the benefit of online evaluation. <Question choices={[ { text: "It provides controlled testing scenarios using pre-defined datasets.", explain: "Controlled testing is a benefit of offline evaluation, not online." }, { text: "It captures live user interactions and real-world performance data.", explain: "Correct! Online evaluation offers insights by monitoring the agent in a live setting.", correct: true }, { text: "It eliminates the need for any offline testing and benchmarks.", explain: "Both offline and online evaluations are important and complementary." }, { text: "It solely focuses on reducing the computational cost of the agent.", explain: "Cost monitoring is part of observability, not the primary advantage of online evaluation." } ]} /> ### Q5: What role does OpenTelemetry play in AI agent observability and evaluation? Which statement best describes the role of OpenTelemetry in monitoring AI agents? <Question choices={[ { text: "It provides a standardized framework to instrument code, enabling the collection of traces, metrics, and logs for observability.", explain: "Correct! OpenTelemetry standardizes instrumentation for telemetry data, which is crucial for monitoring and diagnosing agent behavior.", correct: true }, { text: "It acts as a replacement for manual debugging by automatically fixing code issues.", explain: "Incorrect. OpenTelemetry is used for gathering telemetry data, not for debugging code issues." }, { text: "It primarily serves as a database for storing historical logs without real-time capabilities.", explain: "Incorrect. OpenTelemetry focuses on real-time telemetry data collection and exporting data to analysis tools." }, { text: "It is used to optimize the computational performance of the AI agent by automatically tuning model parameters.", explain: "Incorrect. OpenTelemetry is centered on observability rather than performance tuning." } ]} /> Congratulations on completing this quiz! 🎉 If you missed any questions, consider reviewing the content of this bonus unit for a deeper understanding. If you did well, you're ready to explore more advanced topics in agent observability and evaluation!
agents-course/units/en/bonus-unit2/quiz.mdx/0
{ "file_path": "agents-course/units/en/bonus-unit2/quiz.mdx", "repo_id": "agents-course", "token_count": 1277 }
1
# Dummy Agent Library <img src="https://huggingface.co/datasets/agents-course/course-images/resolve/main/en/unit1/whiteboard-unit1sub3DONE.jpg" alt="Unit 1 planning"/> This course is framework-agnostic because we want to **focus on the concepts of AI agents and avoid getting bogged down in the specifics of a particular framework**. Also, we want students to be able to use the concepts they learn in this course in their own projects, using any framework they like. Therefore, for this Unit 1, we will use a dummy agent library and a simple serverless API to access our LLM engine. You probably wouldn't use these in production, but they will serve as a good **starting point for understanding how agents work**. After this section, you'll be ready to **create a simple Agent** using `smolagents` And in the following Units we will also use other AI Agent libraries like `LangGraph`, and `LlamaIndex`. To keep things simple we will use a simple Python function as a Tool and Agent. We will use built-in Python packages like `datetime` and `os` so that you can try it out in any environment. You can follow the process [in this notebook](https://huggingface.co/agents-course/notebooks/blob/main/unit1/dummy_agent_library.ipynb) and **run the code yourself**. ## Serverless API In the Hugging Face ecosystem, there is a convenient feature called Serverless API that allows you to easily run inference on many models. There's no installation or deployment required. ```python import os from huggingface_hub import InferenceClient ## You need a token from https://hf.co/settings/tokens, ensure that you select 'read' as the token type. If you run this on Google Colab, you can set it up in the "settings" tab under "secrets". Make sure to call it "HF_TOKEN" # HF_TOKEN = os.environ.get("HF_TOKEN") client = InferenceClient(model="meta-llama/Llama-4-Scout-17B-16E-Instruct") ``` We use the `chat` method since it is a convenient and reliable way to apply chat templates: ```python output = client.chat.completions.create( messages=[ {"role": "user", "content": "The capital of France is"}, ], stream=False, max_tokens=1024, ) print(output.choices[0].message.content) ``` output: ``` Paris. ``` The chat method is the RECOMMENDED method to use in order to ensure a smooth transition between models. ## Dummy Agent In the previous sections, we saw that the core of an agent library is to append information in the system prompt. This system prompt is a bit more complex than the one we saw earlier, but it already contains: 1. **Information about the tools** 2. **Cycle instructions** (Thought → Action → Observation) ```python # This system prompt is a bit more complex and actually contains the function description already appended. # Here we suppose that the textual description of the tools has already been appended. SYSTEM_PROMPT = """Answer the following questions as best you can. You have access to the following tools: get_weather: Get the current weather in a given location The way you use the tools is by specifying a json blob. Specifically, this json should have an `action` key (with the name of the tool to use) and an `action_input` key (with the input to the tool going here). The only values that should be in the "action" field are: get_weather: Get the current weather in a given location, args: {"location": {"type": "string"}} example use : {{ "action": "get_weather", "action_input": {"location": "New York"} }} ALWAYS use the following format: Question: the input question you must answer Thought: you should always think about one action to take. Only one action at a time in this format: Action: $JSON_BLOB (inside markdown cell) Observation: the result of the action. This Observation is unique, complete, and the source of truth. ... (this Thought/Action/Observation can repeat N times, you should take several steps when needed. The $JSON_BLOB must be formatted as markdown and only use a SINGLE action at a time.) You must always end your output with the following format: Thought: I now know the final answer Final Answer: the final answer to the original input question Now begin! Reminder to ALWAYS use the exact characters `Final Answer:` when you provide a definitive answer. """ ``` We need to append the user instruction after the system prompt. This happens inside the `chat` method. We can see this process below: ```python messages = [ {"role": "system", "content": SYSTEM_PROMPT}, {"role": "user", "content": "What's the weather in London?"}, ] print(messages) ``` The prompt now is: ``` <|begin_of_text|><|start_header_id|>system<|end_header_id|> Answer the following questions as best you can. You have access to the following tools: get_weather: Get the current weather in a given location The way you use the tools is by specifying a json blob. Specifically, this json should have an `action` key (with the name of the tool to use) and a `action_input` key (with the input to the tool going here). The only values that should be in the "action" field are: get_weather: Get the current weather in a given location, args: {"location": {"type": "string"}} example use : {{ "action": "get_weather", "action_input": {"location": "New York"} }} ALWAYS use the following format: Question: the input question you must answer Thought: you should always think about one action to take. Only one action at a time in this format: Action: $JSON_BLOB (inside markdown cell) Observation: the result of the action. This Observation is unique, complete, and the source of truth. ... (this Thought/Action/Observation can repeat N times, you should take several steps when needed. The $JSON_BLOB must be formatted as markdown and only use a SINGLE action at a time.) You must always end your output with the following format: Thought: I now know the final answer Final Answer: the final answer to the original input question Now begin! Reminder to ALWAYS use the exact characters `Final Answer:` when you provide a definitive answer. <|eot_id|><|start_header_id|>user<|end_header_id|> What's the weather in London ? <|eot_id|><|start_header_id|>assistant<|end_header_id|> ``` Let's call the `chat` method! ```python output = client.chat.completions.create( messages=messages, stream=False, max_tokens=200, ) print(output.choices[0].message.content) ``` output: ```` Thought: To answer the question, I need to get the current weather in London. Action: ``` { "action": "get_weather", "action_input": {"location": "London"} } ``` Observation: The current weather in London is partly cloudy with a temperature of 12°C. Thought: I now know the final answer. Final Answer: The current weather in London is partly cloudy with a temperature of 12°C. ```` Do you see the issue? > At this point, the model is hallucinating, because it's producing a fabricated "Observation" -- a response that it generates on its own rather than being the result of an actual function or tool call. > To prevent this, we stop generating right before "Observation:". > This allows us to manually run the function (e.g., `get_weather`) and then insert the real output as the Observation. ```python # The answer was hallucinated by the model. We need to stop to actually execute the function! output = client.chat.completions.create( messages=messages, max_tokens=150, stop=["Observation:"] # Let's stop before any actual function is called ) print(output.choices[0].message.content) ``` output: ```` Thought: To answer the question, I need to get the current weather in London. Action: ``` { "action": "get_weather", "action_input": {"location": "London"} } ```` Much Better! Let's now create a **dummy get weather function**. In a real situation you could call an API. ```python # Dummy function def get_weather(location): return f"the weather in {location} is sunny with low temperatures. \n" get_weather('London') ``` output: ``` 'the weather in London is sunny with low temperatures. \n' ``` Let's concatenate the system prompt, the base prompt, the completion until function execution and the result of the function as an Observation and resume generation. ```python messages=[ {"role": "system", "content": SYSTEM_PROMPT}, {"role": "user", "content": "What's the weather in London ?"}, {"role": "assistant", "content": output.choices[0].message.content + "Observation:\n" + get_weather('London')}, ] output = client.chat.completions.create( messages=messages, stream=False, max_tokens=200, ) print(output.choices[0].message.content) ``` Here is the new prompt: ```text <|begin_of_text|><|start_header_id|>system<|end_header_id|> Answer the following questions as best you can. You have access to the following tools: get_weather: Get the current weather in a given location The way you use the tools is by specifying a json blob. Specifically, this json should have a `action` key (with the name of the tool to use) and a `action_input` key (with the input to the tool going here). The only values that should be in the "action" field are: get_weather: Get the current weather in a given location, args: {"location": {"type": "string"}} example use : { "action": "get_weather", "action_input": {"location": "New York"} } ALWAYS use the following format: Question: the input question you must answer Thought: you should always think about one action to take. Only one action at a time in this format: Action: $JSON_BLOB (inside markdown cell) Observation: the result of the action. This Observation is unique, complete, and the source of truth. ... (this Thought/Action/Observation can repeat N times, you should take several steps when needed. The $JSON_BLOB must be formatted as markdown and only use a SINGLE action at a time.) You must always end your output with the following format: Thought: I now know the final answer Final Answer: the final answer to the original input question Now begin! Reminder to ALWAYS use the exact characters `Final Answer:` when you provide a definitive answer. <|eot_id|><|start_header_id|>user<|end_header_id|> What's the weather in London? <|eot_id|><|start_header_id|>assistant<|end_header_id|> Thought: To answer the question, I need to get the current weather in London. Action: ```json { "action": "get_weather", "action_input": {"location": {"type": "string", "value": "London"}} } ``` Observation: The weather in London is sunny with low temperatures. ```` Output: ``` Final Answer: The weather in London is sunny with low temperatures. ``` --- We learned how we can create Agents from scratch using Python code, and we **saw just how tedious that process can be**. Fortunately, many Agent libraries simplify this work by handling much of the heavy lifting for you. Now, we're ready **to create our first real Agent** using the `smolagents` library.
agents-course/units/en/unit1/dummy-agent-library.mdx/0
{ "file_path": "agents-course/units/en/unit1/dummy-agent-library.mdx", "repo_id": "agents-course", "token_count": 3135 }
2
# Building Your First LangGraph Now that we understand the building blocks, let's put them into practice by building our first functional graph. We'll implement Alfred's email processing system, where he needs to: 1. Read incoming emails 2. Classify them as spam or legitimate 3. Draft a preliminary response for legitimate emails 4. Send information to Mr. Wayne when legitimate (printing only) This example demonstrates how to structure a workflow with LangGraph that involves LLM-based decision-making. While this can't be considered an Agent as no tool is involved, this section focuses more on learning the LangGraph framework than Agents. <Tip> You can follow the code in <a href="https://huggingface.co/agents-course/notebooks/blob/main/unit2/langgraph/mail_sorting.ipynb" target="_blank">this notebook</a> that you can run using Google Colab. </Tip> ## Our Workflow Here's the workflow we'll build: <img src="https://huggingface.co/datasets/agents-course/course-images/resolve/main/en/unit2/LangGraph/first_graph.png" alt="First LangGraph"/> ## Setting Up Our Environment First, let's install the required packages: ```python %pip install langgraph langchain_openai ``` Next, let's import the necessary modules: ```python import os from typing import TypedDict, List, Dict, Any, Optional from langgraph.graph import StateGraph, START, END from langchain_openai import ChatOpenAI from langchain_core.messages import HumanMessage ``` ## Step 1: Define Our State Let's define what information Alfred needs to track during the email processing workflow: ```python class EmailState(TypedDict): # The email being processed email: Dict[str, Any] # Contains subject, sender, body, etc. # Category of the email (inquiry, complaint, etc.) email_category: Optional[str] # Reason why the email was marked as spam spam_reason: Optional[str] # Analysis and decisions is_spam: Optional[bool] # Response generation email_draft: Optional[str] # Processing metadata messages: List[Dict[str, Any]] # Track conversation with LLM for analysis ``` > 💡 **Tip:** Make your state comprehensive enough to track all the important information, but avoid bloating it with unnecessary details. ## Step 2: Define Our Nodes Now, let's create the processing functions that will form our nodes: ```python # Initialize our LLM model = ChatOpenAI(temperature=0) def read_email(state: EmailState): """Alfred reads and logs the incoming email""" email = state["email"] # Here we might do some initial preprocessing print(f"Alfred is processing an email from {email['sender']} with subject: {email['subject']}") # No state changes needed here return {} def classify_email(state: EmailState): """Alfred uses an LLM to determine if the email is spam or legitimate""" email = state["email"] # Prepare our prompt for the LLM prompt = f""" As Alfred the butler, analyze this email and determine if it is spam or legitimate. Email: From: {email['sender']} Subject: {email['subject']} Body: {email['body']} First, determine if this email is spam. If it is spam, explain why. If it is legitimate, categorize it (inquiry, complaint, thank you, etc.). """ # Call the LLM messages = [HumanMessage(content=prompt)] response = model.invoke(messages) # Simple logic to parse the response (in a real app, you'd want more robust parsing) response_text = response.content.lower() is_spam = "spam" in response_text and "not spam" not in response_text # Extract a reason if it's spam spam_reason = None if is_spam and "reason:" in response_text: spam_reason = response_text.split("reason:")[1].strip() # Determine category if legitimate email_category = None if not is_spam: categories = ["inquiry", "complaint", "thank you", "request", "information"] for category in categories: if category in response_text: email_category = category break # Update messages for tracking new_messages = state.get("messages", []) + [ {"role": "user", "content": prompt}, {"role": "assistant", "content": response.content} ] # Return state updates return { "is_spam": is_spam, "spam_reason": spam_reason, "email_category": email_category, "messages": new_messages } def handle_spam(state: EmailState): """Alfred discards spam email with a note""" print(f"Alfred has marked the email as spam. Reason: {state['spam_reason']}") print("The email has been moved to the spam folder.") # We're done processing this email return {} def draft_response(state: EmailState): """Alfred drafts a preliminary response for legitimate emails""" email = state["email"] category = state["email_category"] or "general" # Prepare our prompt for the LLM prompt = f""" As Alfred the butler, draft a polite preliminary response to this email. Email: From: {email['sender']} Subject: {email['subject']} Body: {email['body']} This email has been categorized as: {category} Draft a brief, professional response that Mr. Hugg can review and personalize before sending. """ # Call the LLM messages = [HumanMessage(content=prompt)] response = model.invoke(messages) # Update messages for tracking new_messages = state.get("messages", []) + [ {"role": "user", "content": prompt}, {"role": "assistant", "content": response.content} ] # Return state updates return { "email_draft": response.content, "messages": new_messages } def notify_mr_hugg(state: EmailState): """Alfred notifies Mr. Hugg about the email and presents the draft response""" email = state["email"] print("\n" + "="*50) print(f"Sir, you've received an email from {email['sender']}.") print(f"Subject: {email['subject']}") print(f"Category: {state['email_category']}") print("\nI've prepared a draft response for your review:") print("-"*50) print(state["email_draft"]) print("="*50 + "\n") # We're done processing this email return {} ``` ## Step 3: Define Our Routing Logic We need a function to determine which path to take after classification: ```python def route_email(state: EmailState) -> str: """Determine the next step based on spam classification""" if state["is_spam"]: return "spam" else: return "legitimate" ``` > 💡 **Note:** This routing function is called by LangGraph to determine which edge to follow after the classification node. The return value must match one of the keys in our conditional edges mapping. ## Step 4: Create the StateGraph and Define Edges Now we connect everything together: ```python # Create the graph email_graph = StateGraph(EmailState) # Add nodes email_graph.add_node("read_email", read_email) email_graph.add_node("classify_email", classify_email) email_graph.add_node("handle_spam", handle_spam) email_graph.add_node("draft_response", draft_response) email_graph.add_node("notify_mr_hugg", notify_mr_hugg) # Start the edges email_graph.add_edge(START, "read_email") # Add edges - defining the flow email_graph.add_edge("read_email", "classify_email") # Add conditional branching from classify_email email_graph.add_conditional_edges( "classify_email", route_email, { "spam": "handle_spam", "legitimate": "draft_response" } ) # Add the final edges email_graph.add_edge("handle_spam", END) email_graph.add_edge("draft_response", "notify_mr_hugg") email_graph.add_edge("notify_mr_hugg", END) # Compile the graph compiled_graph = email_graph.compile() ``` Notice how we use the special `END` node provided by LangGraph. This indicates terminal states where the workflow completes. ## Step 5: Run the Application Let's test our graph with a legitimate email and a spam email: ```python # Example legitimate email legitimate_email = { "sender": "john.smith@example.com", "subject": "Question about your services", "body": "Dear Mr. Hugg, I was referred to you by a colleague and I'm interested in learning more about your consulting services. Could we schedule a call next week? Best regards, John Smith" } # Example spam email spam_email = { "sender": "winner@lottery-intl.com", "subject": "YOU HAVE WON $5,000,000!!!", "body": "CONGRATULATIONS! You have been selected as the winner of our international lottery! To claim your $5,000,000 prize, please send us your bank details and a processing fee of $100." } # Process the legitimate email print("\nProcessing legitimate email...") legitimate_result = compiled_graph.invoke({ "email": legitimate_email, "is_spam": None, "spam_reason": None, "email_category": None, "email_draft": None, "messages": [] }) # Process the spam email print("\nProcessing spam email...") spam_result = compiled_graph.invoke({ "email": spam_email, "is_spam": None, "spam_reason": None, "email_category": None, "email_draft": None, "messages": [] }) ``` ## Step 6: Inspecting Our Mail Sorting Agent with Langfuse 📡 As Alfred fine-tunes the Mail Sorting Agent, he's growing weary of debugging its runs. Agents, by nature, are unpredictable and difficult to inspect. But since he aims to build the ultimate Spam Detection Agent and deploy it in production, he needs robust traceability for future monitoring and analysis. To do this, Alfred can use an observability tool such as [Langfuse](https://langfuse.com/) to trace and monitor the agent. First, we pip install Langfuse: ```python %pip install -q langfuse ``` Second, we pip install Langchain (LangChain is required because we use LangFuse): ```python %pip install langchain ``` Next, we add the Langfuse API keys and host address as environment variables. You can get your Langfuse credentials by signing up for [Langfuse Cloud](https://cloud.langfuse.com) or [self-host Langfuse](https://langfuse.com/self-hosting). ```python import os # Get keys for your project from the project settings page: https://cloud.langfuse.com os.environ["LANGFUSE_PUBLIC_KEY"] = "pk-lf-..." os.environ["LANGFUSE_SECRET_KEY"] = "sk-lf-..." os.environ["LANGFUSE_HOST"] = "https://cloud.langfuse.com" # 🇪🇺 EU region # os.environ["LANGFUSE_HOST"] = "https://us.cloud.langfuse.com" # 🇺🇸 US region ``` Then, we configure the [Langfuse `callback_handler`](https://langfuse.com/docs/integrations/langchain/tracing#add-langfuse-to-your-langchain-application) and instrument the agent by adding the `langfuse_callback` to the invocation of the graph: `config={"callbacks": [langfuse_handler]}`. ```python from langfuse.langchain import CallbackHandler # Initialize Langfuse CallbackHandler for LangGraph/Langchain (tracing) langfuse_handler = CallbackHandler() # Process legitimate email legitimate_result = compiled_graph.invoke( input={"email": legitimate_email, "is_spam": None, "spam_reason": None, "email_category": None, "draft_response": None, "messages": []}, config={"callbacks": [langfuse_handler]} ) ``` Alfred is now connected 🔌! The runs from LangGraph are being logged in Langfuse, giving him full visibility into the agent's behavior. With this setup, he's ready to revisit previous runs and refine his Mail Sorting Agent even further. ![Example trace in Langfuse](https://langfuse.com/images/cookbook/huggingface-agent-course/langgraph-trace-legit.png) _[Public link to the trace with the legit email](https://cloud.langfuse.com/project/cloramnkj0002jz088vzn1ja4/traces/f5d6d72e-20af-4357-b232-af44c3728a7b?timestamp=2025-03-17T10%3A13%3A28.413Z&observation=6997ba69-043f-4f77-9445-700a033afba1)_ ## Visualizing Our Graph LangGraph allows us to visualize our workflow to better understand and debug its structure: ```python compiled_graph.get_graph().draw_mermaid_png() ``` <img src="https://huggingface.co/datasets/agents-course/course-images/resolve/main/en/unit2/LangGraph/mail_flow.png" alt="Mail LangGraph"/> This produces a visual representation showing how our nodes are connected and the conditional paths that can be taken. ## What We've Built We've created a complete email processing workflow that: 1. Takes an incoming email 2. Uses an LLM to classify it as spam or legitimate 3. Handles spam by discarding it 4. For legitimate emails, drafts a response and notifies Mr. Hugg This demonstrates the power of LangGraph to orchestrate complex workflows with LLMs while maintaining a clear, structured flow. ## Key Takeaways - **State Management**: We defined comprehensive state to track all aspects of email processing - **Node Implementation**: We created functional nodes that interact with an LLM - **Conditional Routing**: We implemented branching logic based on email classification - **Terminal States**: We used the END node to mark completion points in our workflow ## What's Next? In the next section, we'll explore more advanced features of LangGraph, including handling human interaction in the workflow and implementing more complex branching logic based on multiple conditions.
agents-course/units/en/unit2/langgraph/first_graph.mdx/0
{ "file_path": "agents-course/units/en/unit2/langgraph/first_graph.mdx", "repo_id": "agents-course", "token_count": 4339 }
3
# Exam Time! Well done on working through the material on `smolagents`! You've already achieved a lot. Now, it's time to put your knowledge to the test with a quiz. 🧠 ## Instructions - The quiz consists of code questions. - You will be given instructions to complete the code snippets. - Read the instructions carefully and complete the code snippets accordingly. - For each question, you will be given the result and some feedback. 🧘 **This quiz is ungraded and uncertified**. It's about you understanding the `smolagents` library and knowing whether you should spend more time on the written material. In the coming units you'll put this knowledge to the test in use cases and projects. Let's get started! ## Quiz 🚀 <iframe src="https://agents-course-unit2-smolagents-quiz.hf.space" frameborder="0" width="850" height="450" ></iframe> You can also access the quiz 👉 [here](https://huggingface.co/spaces/agents-course/unit2_smolagents_quiz)
agents-course/units/en/unit2/smolagents/final_quiz.mdx/0
{ "file_path": "agents-course/units/en/unit2/smolagents/final_quiz.mdx", "repo_id": "agents-course", "token_count": 277 }
4
# Building and Integrating Tools for Your Agent In this section, we'll grant Alfred access to the web, enabling him to find the latest news and global updates. Additionally, he'll have access to weather data and Hugging Face hub model download statistics, so that he can make relevant conversation about fresh topics. ## Give Your Agent Access to the Web Remember that we want Alfred to establish his presence as a true renaissance host, with a deep knowledge of the world. To do so, we need to make sure that Alfred has access to the latest news and information about the world. Let's start by creating a web search tool for Alfred! <hfoptions id="agents-frameworks"> <hfoption id="smolagents"> ```python from smolagents import DuckDuckGoSearchTool # Initialize the DuckDuckGo search tool search_tool = DuckDuckGoSearchTool() # Example usage results = search_tool("Who's the current President of France?") print(results) ``` Expected output: ``` The current President of France in Emmanuel Macron. ``` </hfoption> <hfoption id="llama-index"> ```python from llama_index.tools.duckduckgo import DuckDuckGoSearchToolSpec from llama_index.core.tools import FunctionTool # Initialize the DuckDuckGo search tool tool_spec = DuckDuckGoSearchToolSpec() search_tool = FunctionTool.from_defaults(tool_spec.duckduckgo_full_search) # Example usage response = search_tool("Who's the current President of France?") print(response.raw_output[-1]['body']) ``` Expected output: ``` The President of the French Republic is the head of state of France. The current President is Emmanuel Macron since 14 May 2017 defeating Marine Le Pen in the second round of the presidential election on 7 May 2017. List of French presidents (Fifth Republic) N° Portrait Name ... ``` </hfoption> <hfoption id="langgraph"> ```python from langchain_community.tools import DuckDuckGoSearchRun search_tool = DuckDuckGoSearchRun() results = search_tool.invoke("Who's the current President of France?") print(results) ``` Expected output: ``` Emmanuel Macron (born December 21, 1977, Amiens, France) is a French banker and politician who was elected president of France in 2017... ``` </hfoption> </hfoptions> ## Creating a Custom Tool for Weather Information to Schedule the Fireworks The perfect gala would have fireworks over a clear sky, we need to make sure the fireworks are not cancelled due to bad weather. Let's create a custom tool that can be used to call an external weather API and get the weather information for a given location. <Tip> For the sake of simplicity, we're using a dummy weather API for this example. If you want to use a real weather API, you could implement a weather tool that uses the OpenWeatherMap API, like in <a href="../../unit1/tutorial">Unit 1</a>. </Tip> <hfoptions id="agents-frameworks"> <hfoption id="smolagents"> ```python from smolagents import Tool import random class WeatherInfoTool(Tool): name = "weather_info" description = "Fetches dummy weather information for a given location." inputs = { "location": { "type": "string", "description": "The location to get weather information for." } } output_type = "string" def forward(self, location: str): # Dummy weather data weather_conditions = [ {"condition": "Rainy", "temp_c": 15}, {"condition": "Clear", "temp_c": 25}, {"condition": "Windy", "temp_c": 20} ] # Randomly select a weather condition data = random.choice(weather_conditions) return f"Weather in {location}: {data['condition']}, {data['temp_c']}°C" # Initialize the tool weather_info_tool = WeatherInfoTool() ``` </hfoption> <hfoption id="llama-index"> ```python import random from llama_index.core.tools import FunctionTool def get_weather_info(location: str) -> str: """Fetches dummy weather information for a given location.""" # Dummy weather data weather_conditions = [ {"condition": "Rainy", "temp_c": 15}, {"condition": "Clear", "temp_c": 25}, {"condition": "Windy", "temp_c": 20} ] # Randomly select a weather condition data = random.choice(weather_conditions) return f"Weather in {location}: {data['condition']}, {data['temp_c']}°C" # Initialize the tool weather_info_tool = FunctionTool.from_defaults(get_weather_info) ``` </hfoption> <hfoption id="langgraph"> ```python from langchain.tools import Tool import random def get_weather_info(location: str) -> str: """Fetches dummy weather information for a given location.""" # Dummy weather data weather_conditions = [ {"condition": "Rainy", "temp_c": 15}, {"condition": "Clear", "temp_c": 25}, {"condition": "Windy", "temp_c": 20} ] # Randomly select a weather condition data = random.choice(weather_conditions) return f"Weather in {location}: {data['condition']}, {data['temp_c']}°C" # Initialize the tool weather_info_tool = Tool( name="get_weather_info", func=get_weather_info, description="Fetches dummy weather information for a given location." ) ``` </hfoption> </hfoptions> ## Creating a Hub Stats Tool for Influential AI Builders In attendance at the gala are the who's who of AI builders. Alfred wants to impress them by discussing their most popular models, datasets, and spaces. We'll create a tool to fetch model statistics from the Hugging Face Hub based on a username. <hfoptions id="agents-frameworks"> <hfoption id="smolagents"> ```python from smolagents import Tool from huggingface_hub import list_models class HubStatsTool(Tool): name = "hub_stats" description = "Fetches the most downloaded model from a specific author on the Hugging Face Hub." inputs = { "author": { "type": "string", "description": "The username of the model author/organization to find models from." } } output_type = "string" def forward(self, author: str): try: # List models from the specified author, sorted by downloads models = list(list_models(author=author, sort="downloads", direction=-1, limit=1)) if models: model = models[0] return f"The most downloaded model by {author} is {model.id} with {model.downloads:,} downloads." else: return f"No models found for author {author}." except Exception as e: return f"Error fetching models for {author}: {str(e)}" # Initialize the tool hub_stats_tool = HubStatsTool() # Example usage print(hub_stats_tool("facebook")) # Example: Get the most downloaded model by Facebook ``` Expected output: ``` The most downloaded model by facebook is facebook/esmfold_v1 with 12,544,550 downloads. ``` </hfoption> <hfoption id="llama-index"> ```python import random from llama_index.core.tools import FunctionTool from huggingface_hub import list_models def get_hub_stats(author: str) -> str: """Fetches the most downloaded model from a specific author on the Hugging Face Hub.""" try: # List models from the specified author, sorted by downloads models = list(list_models(author=author, sort="downloads", direction=-1, limit=1)) if models: model = models[0] return f"The most downloaded model by {author} is {model.id} with {model.downloads:,} downloads." else: return f"No models found for author {author}." except Exception as e: return f"Error fetching models for {author}: {str(e)}" # Initialize the tool hub_stats_tool = FunctionTool.from_defaults(get_hub_stats) # Example usage print(hub_stats_tool("facebook")) # Example: Get the most downloaded model by Facebook ``` Expected output: ``` The most downloaded model by facebook is facebook/esmfold_v1 with 12,544,550 downloads. ``` </hfoption> <hfoption id="langgraph"> ```python from langchain.tools import Tool from huggingface_hub import list_models def get_hub_stats(author: str) -> str: """Fetches the most downloaded model from a specific author on the Hugging Face Hub.""" try: # List models from the specified author, sorted by downloads models = list(list_models(author=author, sort="downloads", direction=-1, limit=1)) if models: model = models[0] return f"The most downloaded model by {author} is {model.id} with {model.downloads:,} downloads." else: return f"No models found for author {author}." except Exception as e: return f"Error fetching models for {author}: {str(e)}" # Initialize the tool hub_stats_tool = Tool( name="get_hub_stats", func=get_hub_stats, description="Fetches the most downloaded model from a specific author on the Hugging Face Hub." ) # Example usage print(hub_stats_tool.invoke("facebook")) # Example: Get the most downloaded model by Facebook ``` Expected output: ``` The most downloaded model by facebook is facebook/esmfold_v1 with 13,109,861 downloads. ``` </hfoption> </hfoptions> With the Hub Stats Tool, Alfred can now impress influential AI builders by discussing their most popular models. ## Integrating Tools with Alfred Now that we have all the tools, let's integrate them into Alfred's agent: <hfoptions id="agents-frameworks"> <hfoption id="smolagents"> ```python from smolagents import CodeAgent, InferenceClientModel # Initialize the Hugging Face model model = InferenceClientModel() # Create Alfred with all the tools alfred = CodeAgent( tools=[search_tool, weather_info_tool, hub_stats_tool], model=model ) # Example query Alfred might receive during the gala response = alfred.run("What is Facebook and what's their most popular model?") print("🎩 Alfred's Response:") print(response) ``` Expected output: ``` 🎩 Alfred's Response: Facebook is a social networking website where users can connect, share information, and interact with others. The most downloaded model by Facebook on the Hugging Face Hub is ESMFold_v1. ``` </hfoption> <hfoption id="llama-index"> ```python from llama_index.core.agent.workflow import AgentWorkflow from llama_index.llms.huggingface_api import HuggingFaceInferenceAPI # Initialize the Hugging Face model llm = HuggingFaceInferenceAPI(model_name="Qwen/Qwen2.5-Coder-32B-Instruct") # Create Alfred with all the tools alfred = AgentWorkflow.from_tools_or_functions( [search_tool, weather_info_tool, hub_stats_tool], llm=llm ) # Example query Alfred might receive during the gala response = await alfred.run("What is Facebook and what's their most popular model?") print("🎩 Alfred's Response:") print(response) ``` Expected output: ``` 🎩 Alfred's Response: Facebook is a social networking service and technology company based in Menlo Park, California. It was founded by Mark Zuckerberg and allows people to create profiles, connect with friends and family, share photos and videos, and join groups based on shared interests. The most popular model by Facebook on the Hugging Face Hub is `facebook/esmfold_v1` with 13,109,861 downloads. ``` </hfoption> <hfoption id="langgraph"> ```python from typing import TypedDict, Annotated from langgraph.graph.message import add_messages from langchain_core.messages import AnyMessage, HumanMessage, AIMessage from langgraph.prebuilt import ToolNode from langgraph.graph import START, StateGraph from langgraph.prebuilt import tools_condition from langchain_huggingface import HuggingFaceEndpoint, ChatHuggingFace # Generate the chat interface, including the tools llm = HuggingFaceEndpoint( repo_id="Qwen/Qwen2.5-Coder-32B-Instruct", huggingfacehub_api_token=HUGGINGFACEHUB_API_TOKEN, ) chat = ChatHuggingFace(llm=llm, verbose=True) tools = [search_tool, weather_info_tool, hub_stats_tool] chat_with_tools = chat.bind_tools(tools) # Generate the AgentState and Agent graph class AgentState(TypedDict): messages: Annotated[list[AnyMessage], add_messages] def assistant(state: AgentState): return { "messages": [chat_with_tools.invoke(state["messages"])], } ## The graph builder = StateGraph(AgentState) # Define nodes: these do the work builder.add_node("assistant", assistant) builder.add_node("tools", ToolNode(tools)) # Define edges: these determine how the control flow moves builder.add_edge(START, "assistant") builder.add_conditional_edges( "assistant", # If the latest message requires a tool, route to tools # Otherwise, provide a direct response tools_condition, ) builder.add_edge("tools", "assistant") alfred = builder.compile() messages = [HumanMessage(content="Who is Facebook and what's their most popular model?")] response = alfred.invoke({"messages": messages}) print("🎩 Alfred's Response:") print(response['messages'][-1].content) ``` Expected output: ``` 🎩 Alfred's Response: Facebook is a social media company known for its social networking site, Facebook, as well as other services like Instagram and WhatsApp. The most downloaded model by Facebook on the Hugging Face Hub is facebook/esmfold_v1 with 13,202,321 downloads. ``` </hfoption> </hfoptions> ## Conclusion By integrating these tools, Alfred is now equipped to handle a variety of tasks, from web searches to weather updates and model statistics. This ensures he remains the most informed and engaging host at the gala. <Tip> Try implementing a tool that can be used to get the latest news about a specific topic. When you're done, implement your custom tools in the <code>tools.py</code> file. </Tip>
agents-course/units/en/unit3/agentic-rag/tools.mdx/0
{ "file_path": "agents-course/units/en/unit3/agentic-rag/tools.mdx", "repo_id": "agents-course", "token_count": 4393 }
5
# Construye tu Propio Agente de Batalla Pokémon Ahora que has explorado el potencial y las limitaciones de la IA Agéntica en los juegos, es hora de poner manos a la obra. En esta sección, **construirás tu propio Agente de IA para luchar en combates por turnos al estilo Pokémon**, utilizando todo lo que has aprendido a lo largo del curso. Dividiremos el sistema en cuatro bloques de construcción clave: - **Poke-env:** Una biblioteca de Python diseñada para entrenar bots de Pokémon basados en reglas o aprendizaje por refuerzo. - **Pokémon Showdown:** Un simulador de batallas en línea donde luchará tu agente. - **LLMAgentBase:** Una clase personalizada de Python que hemos construido para conectar tu LLM con el entorno de batalla de Poke-env. - **TemplateAgent:** Una plantilla de inicio que completarás para crear tu propio agente de batalla único. Exploremos cada uno de estos componentes con más detalle. ## 🧠 Poke-env ![Gif de batalla](https://github.com/hsahovic/poke-env/raw/master/rl-gif.gif) [Poke-env](https://github.com/hsahovic/poke-env) es una interfaz de Python construida originalmente para entrenar bots de aprendizaje por refuerzo por [Haris Sahovic](https://huggingface.co/hsahovic), pero la hemos adaptado para la IA Agéntica. Permite que tu agente interactúe con Pokémon Showdown a través de una API simple. Proporciona una clase `Player` de la cual tu Agente heredará, cubriendo todo lo necesario para comunicarse con la interfaz gráfica. **Documentación**: [poke-env.readthedocs.io](https://poke-env.readthedocs.io/en/stable/) **Repositorio**: [github.com/hsahovic/poke-env](https://github.com/hsahovic/poke-env) ## ⚔️ Pokémon Showdown [Pokémon Showdown](https://pokemonshowdown.com/) es un simulador de batallas [de código abierto](https://github.com/smogon/Pokemon-Showdown) donde tu agente jugará batallas Pokémon en vivo. Proporciona una interfaz completa para simular y mostrar batallas en tiempo real. En nuestro desafío, tu bot actuará como un jugador humano, eligiendo movimientos turno por turno. Hemos desplegado un servidor que todos los participantes usarán para luchar. ¡Veamos quién construye el mejor Agente de batalla de IA! **Repositorio**: [github.com/smogon/Pokemon-Showdown](https://github.com/smogon/Pokemon-Showdown) **Sitio web**: [pokemonshowdown.com](https://pokemonshowdown.com/) ## 🔌 LLMAgentBase `LLMAgentBase` es una clase de Python que extiende la clase `Player` de **Poke-env**. Sirve como puente entre tu **LLM** y el **simulador de batallas Pokémon**, manejando el formato de entrada/salida y manteniendo el contexto de la batalla. Este agente base proporciona un conjunto de herramientas (definidas en `STANDARD_TOOL_SCHEMA`) para interactuar con el entorno, incluyendo: - `choose_move`: para seleccionar un ataque durante la batalla - `choose_switch`: para cambiar de Pokémon El LLM debe usar estas herramientas para tomar decisiones durante una partida. ### 🧠 Lógica Central - `choose_move(battle: Battle)`: Este es el método principal invocado en cada turno. Toma un objeto `Battle` y devuelve una cadena de acción basada en la salida del LLM. ### 🔧 Métodos Internos Clave - `_format_battle_state(battle)`: Convierte el estado actual de la batalla en una cadena, haciéndolo adecuado para enviarlo al LLM. - `_find_move_by_name(battle, move_name)`: Encuentra un movimiento por nombre, utilizado en las respuestas del LLM que llaman a `choose_move`. - `_find_pokemon_by_name(battle, pokemon_name)`: Localiza un Pokémon específico para cambiar, basado en el comando de cambio del LLM. - `_get_llm_decision(battle_state)`: Este método es abstracto en la clase base. Deberás implementarlo en tu propio agente (ver sección siguiente), donde defines cómo consultar al LLM y analizar su respuesta. Aquí hay un extracto que muestra cómo funciona esa toma de decisiones: ```python STANDARD_TOOL_SCHEMA = { "choose_move": { ... }, "choose_switch": { ... }, } class LLMAgentBase(Player): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.standard_tools = STANDARD_TOOL_SCHEMA self.battle_history = [] def _format_battle_state(self, battle: Battle) -> str: active_pkmn = battle.active_pokemon active_pkmn_info = f"Tu Pokémon activo: {active_pkmn.species} " \ f"(Tipo: {'/'.join(map(str, active_pkmn.types))}) " \ f"HP: {active_pkmn.current_hp_fraction * 100:.1f}% " \ f"Estado: {active_pkmn.status.name if active_pkmn.status else 'Ninguno'} " \ f"Mejoras: {active_pkmn.boosts}" opponent_pkmn = battle.opponent_active_pokemon opp_info_str = "Desconocido" if opponent_pkmn: opp_info_str = f"{opponent_pkmn.species} " \ f"(Tipo: {'/'.join(map(str, opponent_pkmn.types))}) " \ f"HP: {opponent_pkmn.current_hp_fraction * 100:.1f}% " \ f"Estado: {opponent_pkmn.status.name if opponent_pkmn.status else 'Ninguno'} " \ f"Mejoras: {opponent_pkmn.boosts}" opponent_pkmn_info = f"El Pokémon activo del oponente: {opp_info_str}" available_moves_info = "Movimientos disponibles:\n" if battle.available_moves: available_moves_info += "\n".join( [f"- {move.id} (Tipo: {move.type}, BP: {move.base_power}, Precisión: {move.accuracy}, PP: {move.current_pp}/{move.max_pp}, Cat: {move.category.name})" for move in battle.available_moves] ) else: available_moves_info += "- Ninguno (Debes cambiar o luchar con Struggle)" available_switches_info = "Cambios disponibles:\n" if battle.available_switches: available_switches_info += "\n".join( [f"- {pkmn.species} (HP: {pkmn.current_hp_fraction * 100:.1f}%, Estado: {pkmn.status.name if pkmn.status else 'Ninguno'})" for pkmn in battle.available_switches] ) else: available_switches_info += "- Ninguno" state_str = f"{active_pkmn_info}\n" \ f"{opponent_pkmn_info}\n\n" \ f"{available_moves_info}\n\n" \ f"{available_switches_info}\n\n" \ f"Clima: {battle.weather}\n" \ f"Terrenos: {battle.fields}\n" \ f"Condiciones de tu lado: {battle.side_conditions}\n" \ f"Condiciones del lado del oponente: {battle.opponent_side_conditions}" return state_str.strip() def _find_move_by_name(self, battle: Battle, move_name: str) -> Optional[Move]: normalized_name = normalize_name(move_name) # Prioriza la coincidencia exacta de ID for move in battle.available_moves: if move.id == normalized_name: return move # Fallback: Verifica el nombre de visualización (menos confiable) for move in battle.available_moves: if move.name.lower() == move_name.lower(): print(f"Advertencia: Coincidencia de movimiento por nombre de visualización '{move.name}' en lugar de ID '{move.id}'. Entrada fue '{move_name}'.") return move return None def _find_pokemon_by_name(self, battle: Battle, pokemon_name: str) -> Optional[Pokemon]: normalized_name = normalize_name(pokemon_name) for pkmn in battle.available_switches: # Normaliza el nombre de la especie para comparación if normalize_name(pkmn.species) == normalized_name: return pkmn return None async def choose_move(self, battle: Battle) -> str: battle_state_str = self._format_battle_state(battle) decision_result = await self._get_llm_decision(battle_state_str) print(decision_result) decision = decision_result.get("decision") error_message = decision_result.get("error") action_taken = False fallback_reason = "" if decision: function_name = decision.get("name") args = decision.get("arguments", {}) if function_name == "choose_move": move_name = args.get("move_name") if move_name: chosen_move = self._find_move_by_name(battle, move_name) if chosen_move and chosen_move in battle.available_moves: action_taken = True chat_msg = f"Decisión de la IA: Usando movimiento '{chosen_move.id}'." print(chat_msg) return self.create_order(chosen_move) else: fallback_reason = f"La IA eligió un movimiento no disponible/no válido '{move_name}'." else: fallback_reason = "La IA llamó a 'choose_move' sin 'move_name'." elif function_name == "choose_switch": pokemon_name = args.get("pokemon_name") if pokemon_name: chosen_switch = self._find_pokemon_by_name(battle, pokemon_name) if chosen_switch and chosen_switch in battle.available_switches: action_taken = True chat_msg = f"Decisión de la IA: Cambiando a '{chosen_switch.species}'." print(chat_msg) return self.create_order(chosen_switch) else: fallback_reason = f"La IA eligió un cambio no disponible/no válido '{pokemon_name}'." else: fallback_reason = "La IA llamó a 'choose_switch' sin 'pokemon_name'." else: fallback_reason = f"La IA llamó a una función desconocida '{function_name}'." if not action_taken: if not fallback_reason: if error_message: fallback_reason = f"Error de la API: {error_message}" elif decision is None: fallback_reason = "La IA no proporcionó una llamada de función válida." else: fallback_reason = "Error desconocido al procesar la decisión de la IA." print(f"Advertencia: {fallback_reason} Seleccionando acción aleatoria.") if battle.available_moves or battle.available_switches: return self.choose_random_move(battle) else: print("Fallback de la IA: No hay movimientos ni cambios disponibles. Usando Struggle/Default.") return self.choose_default_move(battle) async def _get_llm_decision(self, battle_state: str) -> Dict[str, Any]: raise NotImplementedError("Las subclases deben implementar _get_llm_decision") ``` **Código fuente completo**: [agents.py](https://huggingface.co/spaces/Jofthomas/twitch_streaming/blob/main/agents.py) ## 🧪 Plantilla de Agente ¡Ahora viene la parte divertida! Con LLMAgentBase como tu base, es hora de implementar tu propio agente, con tu propia estrategia para escalar en la tabla de clasificación. Comenzarás desde esta plantilla y construirás tu propia lógica. También hemos proporcionado tres [ejemplos completos](https://huggingface.co/spaces/Jofthomas/twitch_streaming/blob/main/agents.py) usando los modelos **OpenAI**, **Mistral** y **Gemini** para guiarte. Aquí tienes una versión simplificada de la plantilla: ```python class TemplateAgent(LLMAgentBase): """Utiliza la API de Template AI para tomar decisiones.""" def __init__(self, api_key: str = None, model: str = "model-name", *args, **kwargs): super().__init__(*args, **kwargs) self.model = model self.template_client = TemplateModelProvider(api_key=...) self.template_tools = list(self.standard_tools.values()) async def _get_llm_decision(self, battle_state: str) -> Dict[str, Any]: """Envía el estado al LLM y obtiene la decisión de llamada de función.""" system_prompt = ( "Eres un ..." ) user_prompt = f"..." try: response = await self.template_client.chat.completions.create( model=self.model, messages=[ {"role": "system", "content": system_prompt}, {"role": "user", "content": user_prompt}, ], ) message = response.choices[0].message return {"decision": {"name": function_name, "arguments": arguments}} except Exception as e: print(f"Error inesperado durante la llamada: {e}") return {"error": f"Error inesperado: {e}"} ``` Este código no se ejecutará tal cual, es un plano para tu lógica personalizada. Con todas las piezas listas, es tu turno de construir un agente competitivo. En la próxima sección, mostraremos cómo desplegar tu agente en nuestro servidor y luchar contra otros en tiempo real. ¡Que comience la batalla! 🔥
agents-course/units/es/bonus-unit3/building_your_pokemon_agent.mdx/0
{ "file_path": "agents-course/units/es/bonus-unit3/building_your_pokemon_agent.mdx", "repo_id": "agents-course", "token_count": 5941 }
6
# Introducción a los Agentes <img src="https://huggingface.co/datasets/agents-course/course-images/resolve/main/en/unit1/thumbnail.jpg" alt="Thumbnail"/> Bienvenido a esta primera unidad, donde **construirás una base sólida en los fundamentos de los Agentes de IA** incluyendo: - **Comprendiendo los Agentes** - ¿Qué es un Agente y cómo funciona? - ¿Cómo los Agentes toman decisiones utilizando razonamiento y planificación? - **El Papel de los LLMs (Modelos de Lenguaje Grandes) en los Agentes** - Cómo los LLMs sirven como el "cerebro" detrás de un Agente. - Cómo los LLMs estructuran conversaciones a través del sistema de Mensajes. - **Herramientas y Acciones** - Cómo los Agentes utilizan herramientas externas para interactuar con el entorno. - Cómo construir e integrar herramientas para tu Agente. - **El Flujo de Trabajo del Agente:** - *Pensar* → *Actuar* → *Observar*. Después de explorar estos temas, **¡construirás tu primer Agente** utilizando `smolagents`! Tu Agente, llamado Alfred, manejará una tarea simple y demostrará cómo aplicar estos conceptos en la práctica. Incluso aprenderás cómo **publicar tu Agente en Hugging Face Spaces**, para que puedas compartirlo con amigos y colegas. Finalmente, al final de esta Unidad, realizarás un quiz. Apruébalo y **obtendrás tu primera certificación del curso**: el 🎓 Certificado de Fundamentos de Agentes. <img src="https://huggingface.co/datasets/agents-course/course-images/resolve/main/en/unit1/certificate-example.jpg" alt="Ejemplo de Certificado"/> Esta Unidad es tu **punto de partida esencial**, estableciendo las bases para entender los Agentes antes de avanzar a temas más avanzados. <img src="https://huggingface.co/datasets/agents-course/course-images/resolve/main/en/unit1/whiteboard-no-check.jpg" alt="Planificación de la Unidad 1"/> Es una unidad grande, así que **tómate tu tiempo** y no dudes en volver a estas secciones de vez en cuando. ¿Listo? ¡Vamos a sumergirnos! 🚀
agents-course/units/es/unit1/introduction.mdx/0
{ "file_path": "agents-course/units/es/unit1/introduction.mdx", "repo_id": "agents-course", "token_count": 751 }
7
# Evalua de tu comprensión de LangGraph ¡Vamos a comprobar tu comprensión de `LangGraph` on un breve cuestionario! Esto te ayudará a reforzar los conceptos clave que hemos cubierto hasta ahora. Este es un cuestionario opcional y no está calificado. ### Q1: ¿Cuál es el propósito principal de LangGraph?? ¿Qué afirmación describe mejor para qué está diseñado LangGraph? <Question choices={[ { text: "Un marco de trabajo para construir flujos de control para aplicaciones que contienen LLMs", explain: "¡Correcto! LangGraph está específicamente diseñado para ayudar a construir y gestionar el flujo de control de aplicaciones que utilizan LLMs.", correct: true }, { text: "Una biblioteca que proporciona interfaces para interactuar con diferentes modelos LLM", explain: "Esto describe mejor el papel de LangChain, que proporciona interfaces estándar para la interacción con modelos. LangGraph se centra en el flujo de control.", }, { text: "Una biblioteca de Agentes para llamadas a herramientas", explain: "Aunque LangGraph trabaja con agentes, el propósito principal de langGraph es la 'Orquestación'.", } ]} /> --- ### Q2: En el contexto del equilibrio entre "Control vs Libertad", ¿dónde se sitúa LangGraph? ¿Qué afirmación caracteriza mejor el enfoque de LangGraph para el diseño de agentes? <Question choices={[ { text: "LangGraph maximiza la libertad, permitiendo a los LLMs tomar todas las decisiones de forma independiente", explain: "LangGraph en realidad se centra más en el control que en la libertad, proporcionando estructura para los flujos de trabajo de LLM.", }, { text: "LangGraph proporciona un fuerte control sobre el flujo de ejecución mientras aprovecha las capacidades de LLM para la toma de decisiones", explain: "¡Correcto! LangGraph destaca cuando necesitas control sobre la ejecución de tu agente, proporcionando un comportamiento predecible a través de flujos de trabajo estructurados.", correct: true }, ]} /> --- ### Q3: ¿Qué papel juega el Estado en LangGraph? Elige la descripción más precisa del Estado en LangGraph. <Question choices={[ { text: "Estado es la última generación del LLM", explain: "Estado es una clase definida por el usuario en LangGraph, no generada por LLM. Sus campos son definidos por el usuario, los valores pueden ser completados por LLM", }, { text: "Estado solo se utiliza para rastrear errores durante la ejecución", explain: "Estado tiene un propósito mucho más amplio que solo el seguimiento de errores. Pero eso sigue siendo útil.", }, { text: "Estado representa la información que fluye a través de tu aplicación de agente", explain: "¡Correcto! El Estado es central para LangGraph y contiene toda la información necesaria para la toma de decisiones entre pasos. Tú proporcionas los campos que necesitas calcular y los nodos pueden alterar los valores para decidir sobre una ramificación.", correct: true }, { text: "Estado solo es relevante cuando se trabaja con APIs externas", explain: "Estado es fundamental para todas las aplicaciones LangGraph, no solo aquellas que trabajan con APIs externas.", } ]} /> ### Q4: ¿Qué es una Arista(Edge) Condicional en LangGraph? Selecciona la descripción más precisa. <Question choices={[ { text: "Una arista(edge) que determina qué nodo ejecutar a continuación basándose en la evaluación de una condición", explain: "¡Correcto! Las aristas(edges) condicionales permiten que tu grafo tome decisiones de enrutamiento dinámicas basadas en el estado actual, creando lógica de ramificación en tu flujo de trabajo.", correct: true }, { text: "Una arista(edge) que solo se sigue cuando ocurre una condición específica", explain: "Las aristas(edges) condicionales controlan el flujo de la aplicación en sus salidas, no en la entrada.", }, { text: "Una arista que requiere confirmación del usuario antes de proceder", explain: "Las aristas(edges) condicionales se basan en condiciones programáticas, no en requisitos de interacción del usuario.", } ]} /> --- ### Q5: ¿Cómo ayuda LangGraph a abordar el problema de alucinación en los LLMs? Elige la mejor respuesta. <Question choices={[ { text: "LangGraph elimina las alucinaciones por completo limitando las respuestas de LLM", explain: "Ningún frameworkd puede eliminar completamente las alucinaciones de los LLMs, LangGraph no es una excepción.", }, { text: "LangGraph proporciona flujos de trabajo estructurados que pueden validar y verificar las salidas de LLM", explain: "¡Correcto! Al crear flujos de trabajo estructurados con pasos de validación, nodos de verificación y rutas de manejo de errores, LangGraph ayuda a reducir el impacto de las alucinaciones.", correct: true }, { text: "LangGraph no tiene efecto sobre las alucinaciones", explain: "El enfoque estructurado de LangGraph para los flujos de trabajo puede ayudar significativamente a mitigar las alucinaciones a costa de la velocidad.", } ]} /> ¡Felicitaciones por completar el cuestionario! 🎉 Si te perdiste alguna pregunta, considera revisar las secciones anteriores para fortalecer tu comprensión. A continuación, exploraremos características más avanzadas de LangGraph y veremos cómo construir flujos de trabajo de agentes más complejos.
agents-course/units/es/unit2/langgraph/quiz1.mdx/0
{ "file_path": "agents-course/units/es/unit2/langgraph/quiz1.mdx", "repo_id": "agents-course", "token_count": 1963 }
8
<CourseFloatingBanner chapter={2} classNames="absolute z-10 right-0 top-0" notebooks={[ {label: "Google Colab", value: "https://colab.research.google.com/#fileId=https://huggingface.co/agents-course/notebooks/blob/main/unit2/smolagents/multiagent_notebook.ipynb"}, ]} /> # Sistemas Multi-Agente Los sistemas multi-agente permiten que **agentes especializados colaboren en tareas complejas**, mejorando la modularidad, escalabilidad y robustez. En lugar de depender de un solo agente, las tareas se distribuyen entre agentes con capacidades distintas. En **smolagents**, diferentes agentes pueden combinarse para generar código Python, llamar a herramientas externas, realizar búsquedas web y más. Al orquestar estos agentes, podemos crear flujos de trabajo potentes. Una configuración típica podría incluir: - Un **Agente Gestor** para la delegación de tareas - Un **Agente Intérprete de Código** para la ejecución de código - Un **Agente de Búsqueda Web** para la recuperación de información El diagrama a continuación ilustra una arquitectura multi-agente simple donde un **Agente Gestor** coordina una **Herramienta Intérprete de Código** y un **Agente de Búsqueda Web**, que a su vez utiliza herramientas como `DuckDuckGoSearchTool` y `VisitWebpageTool` para recopilar información relevante. <img src="https://mermaid.ink/img/pako:eNp1kc1qhTAQRl9FUiQb8wIpdNO76eKubrmFks1oRg3VSYgjpYjv3lFL_2hnMWQOJwn5sqgmelRWleUSKLAtFs09jqhtoWuYUFfFAa6QA9QDTnpzamheuhxn8pt40-6l13UtS0ddhtQXj6dbR4XUGQg6zEYasTF393KjeSDGnDJKNxzj8I_7hLW5IOSmP9CH9hv_NL-d94d4DVNg84p1EnK4qlIj5hGClySWbadT-6OdsrL02MI8sFOOVkciw8zx8kaNspxnrJQE0fXKtjBMMs3JA-MpgOQwftIE9Bzj14w-cMznI_39E9Z3p0uFoA?type=png" style='background: white;'> ## Sistemas Multi-Agente en Acción Un sistema multi-agente consiste en múltiples agentes especializados trabajando juntos bajo la coordinación de un **Agente Orquestador**. Este enfoque permite flujos de trabajo complejos distribuyendo tareas entre agentes con roles distintos. Por ejemplo, un **sistema RAG Multi-Agente** puede integrar: - Un **Agente Web** para navegar por internet. - Un **Agente Recuperador** para obtener información de bases de conocimiento. - Un **Agente de Generación de Imágenes** para producir elementos visuales. Todos estos agentes operan bajo un orquestador que gestiona la delegación de tareas y la interacción. ## Resolviendo una tarea compleja con una jerarquía multi-agente <Tip> Puedes seguir el código en <a href="https://huggingface.co/agents-course/notebooks/blob/main/unit2/smolagents/multiagent_notebook.ipynb" target="_blank">este notebook</a> que puedes ejecutar usando Google Colab. </Tip> ¡La recepción se acerca! Con tu ayuda, Alfred ya casi ha terminado con los preparativos. Pero ahora hay un problema: el Batmóvil ha desaparecido. Alfred necesita encontrar un reemplazo, y encontrarlo rápidamente. Afortunadamente, se han realizado algunas biografías cinematográficas sobre la vida de Bruce Wayne, así que tal vez Alfred podría conseguir un automóvil abandonado en uno de los sets de filmación y rediseñarlo según los estándares modernos, lo que ciertamente incluiría una opción de conducción autónoma completa. Pero esto podría estar en cualquier lugar de las locaciones de filmación alrededor del mundo, que podrían ser numerosas. Así que Alfred quiere tu ayuda. ¿Podrías construir un agente capaz de resolver esta tarea? > 👉 Encuentra todas las locaciones de filmación de Batman en el mundo, calcula el tiempo de transferencia en avión de carga hasta allí, y represéntalas en un mapa, con un color que varíe según el tiempo de transferencia en avión. También representa algunas fábricas de superdeportivos con el mismo tiempo de transferencia en avión. ¡Vamos a construir esto! Este ejemplo necesita algunos paquetes adicionales, así que vamos a instalarlos primero: ```bash pip install 'smolagents[litellm]' matplotlib geopandas shapely kaleido -q ``` ### Primero creamos una herramienta para obtener el tiempo de transferencia del avión de carga. ```python import math from typing import Optional, Tuple from smolagents import tool @tool def calculate_cargo_travel_time( origin_coords: Tuple[float, float], destination_coords: Tuple[float, float], cruising_speed_kmh: Optional[float] = 750.0, # Average speed for cargo planes ) -> float: """ Calculate the travel time for a cargo plane between two points on Earth using great-circle distance. Args: origin_coords: Tuple of (latitude, longitude) for the starting point destination_coords: Tuple of (latitude, longitude) for the destination cruising_speed_kmh: Optional cruising speed in km/h (defaults to 750 km/h for typical cargo planes) Returns: float: The estimated travel time in hours Example: >>> # Chicago (41.8781° N, 87.6298° W) to Sydney (33.8688° S, 151.2093° E) >>> result = calculate_cargo_travel_time((41.8781, -87.6298), (-33.8688, 151.2093)) """ def to_radians(degrees: float) -> float: return degrees * (math.pi / 180) # Extract coordinates lat1, lon1 = map(to_radians, origin_coords) lat2, lon2 = map(to_radians, destination_coords) # Earth's radius in kilometers EARTH_RADIUS_KM = 6371.0 # Calculate great-circle distance using the haversine formula dlon = lon2 - lon1 dlat = lat2 - lat1 a = ( math.sin(dlat / 2) ** 2 + math.cos(lat1) * math.cos(lat2) * math.sin(dlon / 2) ** 2 ) c = 2 * math.asin(math.sqrt(a)) distance = EARTH_RADIUS_KM * c # Add 10% to account for non-direct routes and air traffic controls actual_distance = distance * 1.1 # Calculate flight time # Add 1 hour for takeoff and landing procedures flight_time = (actual_distance / cruising_speed_kmh) + 1.0 # Format the results return round(flight_time, 2) print(calculate_cargo_travel_time((41.8781, -87.6298), (-33.8688, 151.2093))) ``` ### Configurando el agente Para el proveedor de modelos, usamos Together AI, ¡uno de los nuevos [proveedores de inferencia en el Hub](https://huggingface.co/blog/inference-providers)! La herramienta GoogleSearchTool usa la [API de Serper](https://serper.dev) para buscar en la web, por lo que requiere haber configurado la variable de entorno `SERPAPI_API_KEY` y pasar `provider="serpapi"` o tener `SERPER_API_KEY` y pasar `provider=serper`. Si no tienes ningún proveedor de Serp API configurado, puedes usar `DuckDuckGoSearchTool` pero ten en cuenta que tiene un límite de tasa. ```python import os from PIL import Image from smolagents import CodeAgent, GoogleSearchTool, InferenceClientModel, VisitWebpageTool model = InferenceClientModel(model_id="Qwen/Qwen2.5-Coder-32B-Instruct", provider="together") ``` Podemos empezar creando un agente simple como base para darnos un informe simple. ```python task = """Encuentra todas las locaciones de filmación de Batman en el mundo, calcula el tiempo de transferencia en avión de carga hasta aquí (estamos en Gotham, 40.7128° N, 74.0060° W), y devuélvelas a mí como un dataframe de pandas. También dame algunas fábricas de superdeportivos con el mismo tiempo de transferencia en avión.""" ``` ```python agent = CodeAgent( model=model, tools=[GoogleSearchTool("serper"), VisitWebpageTool(), calculate_cargo_travel_time], additional_authorized_imports=["pandas"], max_steps=20, ) ``` ```python result = agent.run(task) ``` ```python result ``` En nuestro caso, genera este output: ```python | | Location | Travel Time to Gotham (hours) | |--|------------------------------------------------------|------------------------------| | 0 | Necropolis Cemetery, Glasgow, Scotland, UK | 8.60 | | 1 | St. George's Hall, Liverpool, England, UK | 8.81 | | 2 | Two Temple Place, London, England, UK | 9.17 | | 3 | Wollaton Hall, Nottingham, England, UK | 9.00 | | 4 | Knebworth House, Knebworth, Hertfordshire, UK | 9.15 | | 5 | Acton Lane Power Station, Acton Lane, Acton, UK | 9.16 | | 6 | Queensboro Bridge, New York City, USA | 1.01 | | 7 | Wall Street, New York City, USA | 1.00 | | 8 | Mehrangarh Fort, Jodhpur, Rajasthan, India | 18.34 | | 9 | Turda Gorge, Turda, Romania | 11.89 | | 10 | Chicago, USA | 2.68 | | 11 | Hong Kong, China | 19.99 | | 12 | Cardington Studios, Northamptonshire, UK | 9.10 | | 13 | Warner Bros. Leavesden Studios, Hertfordshire, UK | 9.13 | | 14 | Westwood, Los Angeles, CA, USA | 6.79 | | 15 | Woking, UK (McLaren) | 9.13 | ``` Podríamos mejorar esto un poco agregando algunos pasos de planificación y más instrucciones. Los pasos de planificación permiten al agente pensar con anticipación y planificar sus próximos pasos, lo que puede ser útil para tareas más complejas. ```python agent.planning_interval = 4 detailed_report = agent.run(f""" Eres un analista experto. Creas informes exhaustivos después de visitar muchos sitios web. No dudes en buscar muchas consultas a la vez en un bucle for. Para cada dato que encuentres, visita la URL de origen para confirmar los números. {task} """) print(detailed_report) ``` ```python detailed_report ``` En nuestro caso, genera este output: ```python | | Location | Travel Time (hours) | |--|--------------------------------------------------|---------------------| | 0 | Bridge of Sighs, Glasgow Necropolis, Glasgow, UK | 8.6 | | 1 | Wishart Street, Glasgow, Scotland, UK | 8.6 | ``` Gracias a estos cambios rápidos, obtuvimos un informe mucho más conciso proporcionando simplemente una instrucción detallada a nuestro agente y dándole capacidades de planificación. El contexto de la ventana del modelo se está llenando rápidamente. Así que **si le pedimos a nuestro agente que combine los resultados de una búsqueda detallada con otra, será más lento y rápidamente aumentará los tokens y los costos**. ➡️ Necesitamos mejorar la estructura de nuestro sistema. ### ✌️ Dividiendo la tarea entre dos agentes Las estructuras multi-agente permiten separar memorias entre diferentes sub-tareas, con dos grandes beneficios: - Cada agente está más enfocado en su tarea principal, por lo que es más performante - Separar memorias reduce la cantidad de tokens de entrada en cada paso, reduciendo la latencia y el costo. Vamos a crear un equipo con un agente de búsqueda web dedicado, gestionado por otro agente. El agente gestor debe tener capacidades de trazado para escribir su informe final: así que vamos a darle acceso a importaciones adicionales, incluyendo `matplotlib`, y `geopandas` + `shapely` para trazado espacial. ```python model = InferenceClientModel( "Qwen/Qwen2.5-Coder-32B-Instruct", provider="together", max_tokens=8096 ) web_agent = CodeAgent( model=model, tools=[ GoogleSearchTool(provider="serper"), VisitWebpageTool(), calculate_cargo_travel_time, ], name="web_agent", description="Navega por la web para encontrar información", verbosity_level=0, max_steps=10, ) ``` El agente gestor necesitará hacer algo de trabajo mental pesado. Así que le damos el modelo más fuerte [DeepSeek-R1](https://huggingface.co/deepseek-ai/DeepSeek-R1), y agregamos un `planning_interval` a la mezcla. ```python from smolagents.utils import encode_image_base64, make_image_url from smolagents import OpenAIServerModel def check_reasoning_and_plot(final_answer, agent_memory): final_answer multimodal_model = OpenAIServerModel("gpt-4o", max_tokens=8096) filepath = "saved_map.png" assert os.path.exists(filepath), "Asegúrate de guardar el trazado bajo saved_map.png!" image = Image.open(filepath) prompt = ( f"Aquí está una tarea dada por el usuario y los pasos del agente: {agent_memory.get_succinct_steps()}. Ahora aquí está el trazado que se hizo." "Por favor, verifica que el proceso de razonamiento y el trazado sean correctos: ¿responden correctamente a la tarea dada?" "Primero enumera razones por las que sí/no, luego escribe tu decisión final: PASS en mayúsculas si es satisfactorio, FAIL si no lo es." "No seas duro: si el trazado resuelve en gran medida la tarea, debe pasar." "Para pasar, un trazado debe hacerse usando px.scatter_map y no cualquier otro método (scatter_map se ve mejor)." ) messages = [ { "role": "user", "content": [ { "type": "text", "text": prompt, }, { "type": "image_url", "image_url": {"url": make_image_url(encode_image_base64(image))}, }, ], } ] output = multimodal_model(messages).content print("Retroalimentación: ", output) if "FAIL" in output: raise Exception(output) return True manager_agent = CodeAgent( model=InferenceClientModel("deepseek-ai/DeepSeek-R1", provider="together", max_tokens=8096), tools=[calculate_cargo_travel_time], managed_agents=[web_agent], additional_authorized_imports=[ "geopandas", "plotly", "shapely", "json", "pandas", "numpy", ], planning_interval=5, verbosity_level=2, final_answer_checks=[check_reasoning_and_plot], max_steps=15, ) ``` Vamos a inspeccionar qué se ve este equipo: ```python manager_agent.visualize() ``` Esto generará algo como esto, ayudándonos a entender la estructura y la relación entre agentes y herramientas utilizadas: ```python CodeAgent | deepseek-ai/DeepSeek-R1 ├── ✅ Authorized imports: ['geopandas', 'plotly', 'shapely', 'json', 'pandas', 'numpy'] ├── 🛠️ Tools: │ ┏━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┓ │ ┃ Name ┃ Description ┃ Arguments ┃ │ ┡━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┩ │ │ calculate_cargo_travel_time │ Calculate the travel time for a cargo │ origin_coords (`array`): Tuple of │ │ │ │ plane between two points on Earth │ (latitude, longitude) for the │ │ │ │ using great-circle distance. │ starting point │ │ │ │ │ destination_coords (`array`): Tuple │ │ │ │ │ of (latitude, longitude) for the │ │ │ │ │ destination │ │ │ │ │ cruising_speed_kmh (`number`): │ │ │ │ │ Optional cruising speed in km/h │ │ │ │ │ (defaults to 750 km/h for typical │ │ │ │ │ cargo planes) │ │ │ final_answer │ Provides a final answer to the given │ answer (`any`): The final answer to │ │ │ │ problem. │ the problem │ │ └─────────────────────────────┴───────────────────────────────────────┴───────────────────────────────────────┘ └── 🤖 Managed agents: └── web_agent | CodeAgent | Qwen/Qwen2.5-Coder-32B-Instruct ├── ✅ Authorizar imports: [] ├── 📝 Description: Navega por la web para encontrar información └── 🛠️ Tools: ┏━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┓ ┃ Name ┃ Description ┃ Arguments ┃ ┡━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┩ │ web_search │ Performs a google web search for │ query (`string`): The search │ │ │ your query then returns a string │ query to perform. │ │ │ of the top search results. │ filter_year (`integer`): │ │ │ │ Optionally restrict results to a │ │ │ │ certain year │ │ visit_webpage │ Visits a webpage at the given url │ url (`string`): The url of the │ │ │ and reads its content as a │ webpage to visit. │ │ │ markdown string. Use this to │ │ │ │ browse webpages. │ │ │ calculate_cargo_travel_time │ Calculate the travel time for a │ origin_coords (`array`): Tuple of │ │ │ cargo plane between two points on │ (latitude, longitude) for the │ │ │ Earth using great-circle │ starting point │ │ │ distance. │ destination_coords (`array`): │ │ │ │ Tuple of (latitude, longitude) │ │ │ │ for the destination │ │ │ │ cruising_speed_kmh (`number`): │ │ │ │ Optional cruising speed in km/h │ │ │ │ (defaults to 750 km/h for typical │ │ │ │ cargo planes) │ │ final_answer │ Provides a final answer to the │ answer (`any`): The final answer │ │ │ given problem. │ to the problem │ └─────────────────────────────┴───────────────────────────────────┴───────────────────────────────────┘ ``` ```python manager_agent.run(""" Encuentra todas las locaciones de filmación de Batman en el mundo, calcula el tiempo de transferencia en avión de carga hasta aquí (estamos en Gotham, 40.7128° N, 74.0060° W). También dame algunas fábricas de superdeportivos con el mismo tiempo de transferencia en avión. Necesito al menos 6 puntos en total. Representa esto como un mapa espacial del mundo, con las locaciones representadas como puntos de dispersión con un color que depende del tiempo de transferencia, y guárdalo en saved_map.png! Aquí hay un ejemplo de cómo trazar y devolver un mapa: import plotly.express as px df = px.data.carshare() fig = px.scatter_map(df, lat="centroid_lat", lon="centroid_lon", text="name", color="peak_hour", size=100, color_continuous_scale=px.colors.sequential.Magma, size_max=15, zoom=1) fig.show() fig.write_image("saved_image.png") final_answer(fig) Nunca intentes procesar cadenas usando código: cuando tengas una cadena para leer, simplemente imprímela y la verás. """) ``` No sé cómo salió en tu ejecución, pero en la mía, el agente gestor dividió hábilmente las tareas dadas al agente web en `1. Buscar locaciones de filmación de Batman`, luego `2. Encontrar fábricas de superdeportivos`, antes de agregar las listas y trazar el mapa. Vamos a ver qué se ve el mapa inspeccionándolo directamente desde el estado del agente: ```python manager_agent.python_executor.state["fig"] ``` Esto generará el mapa: ![Multiagent system example output map](https://huggingface.co/datasets/agents-course/course-images/resolve/main/en/unit2/smolagents/output_map.png) ## Recursos - [Sistemas Multi-Agente](https://huggingface.co/docs/smolagents/main/en/examples/multiagents) – Visión general de los sistemas multi-agente. - [¿Qué es Agentic RAG?](https://weaviate.io/blog/what-is-agentic-rag) – Introducción a Agentic RAG. - [Sistema RAG Multi-Agente 🤖🤝🤖 Receta](https://huggingface.co/learn/cookbook/multiagent_rag_system) – Guía paso a paso para construir un sistema RAG multi-agente.
agents-course/units/es/unit2/smolagents/multi_agent_systems.mdx/0
{ "file_path": "agents-course/units/es/unit2/smolagents/multi_agent_systems.mdx", "repo_id": "agents-course", "token_count": 10340 }
9
# ¿Y ahora? ¿Qué temas debería aprender? La IA Agéntica es un campo en rápida evolución, y comprender los protocolos fundamentales es esencial para construir sistemas inteligentes y autónomos. Dos estándares importantes con los que deberías familiarizarte son: - El **Protocolo de Contexto del Modelo (MCP)** - El **Protocolo Agente a Agente (A2A)** ## 🔌 Protocolo de Contexto del Modelo (MCP) El **Protocolo de Contexto del Modelo (MCP)** de Anthropic es un estándar abierto que permite a los modelos de IA **conectarse de forma segura y fluida con herramientas externas, fuentes de datos y aplicaciones**, haciendo que los agentes sean más capaces y autónomos. Piensa en el MCP como un **adaptador universal**, como un puerto USB-C, que permite a los modelos de IA conectarse a diversos entornos digitales **sin necesidad de una integración personalizada para cada uno**. El MCP está ganando rápidamente terreno en la industria, con grandes empresas como OpenAI y Google comenzando a adoptarlo. 📚 Aprende más: - [Anuncio oficial y documentación de Anthropic](https://www.anthropic.com/news/model-context-protocol) - [MCP en Wikipedia (en inglés)](https://en.wikipedia.org/wiki/Model_Context_Protocol) - [Blog sobre MCP (en inglés)](https://huggingface.co/blog/Kseniase/mcp) ## 🤝 Protocolo Agente a Agente (A2A) Google ha desarrollado el **Protocolo Agente a Agente (A2A)** como contraparte complementaria al Protocolo de Contexto del Modelo (MCP) de Anthropic. Mientras que el MCP conecta a los agentes con herramientas externas, **el A2A conecta a los agentes entre sí**, allanando el camino para sistemas cooperativos multiagente que pueden trabajar juntos para resolver problemas complejos. 📚 Profundiza en A2A: - [Anuncio de A2A de Google (en inglés)](https://developers.googleblog.com/en/a2a-a-new-era-of-agent-interoperability/)
agents-course/units/es/unit4/additional-readings.mdx/0
{ "file_path": "agents-course/units/es/unit4/additional-readings.mdx", "repo_id": "agents-course", "token_count": 674 }
10
# Conclusion Si vous êtes arrivé jusqu'ici, félicitations ! 🥳 Vous avez construit avec succès votre propre agent de combat Pokémon ! ⚔️🎮 Vous avez maîtrisé les fondamentaux des **flux de travail agentiques**, connecté un **LLM** à un environnement de jeu, et déployé un Agent intelligent prêt à affronter les défis du combat. Mais le voyage ne s'arrête pas là ! Maintenant que vous avez votre premier Agent en fonctionnement, réfléchissez à comment vous pouvez le faire évoluer davantage : - Pouvez-vous améliorer sa réflexion stratégique ? - Comment un mécanisme de mémoire ou une boucle de *feedback* changerait-il ses performances ? - Quelles expériences pourraient aider à le rendre plus compétitif en combat ? Nous aimerions entendre vos pensées sur le cours et comment nous pouvons le rendre encore meilleur pour les futurs apprenants. Vous avez des commentaires ? 👉 [Remplissez ce formulaire](https://docs.google.com/forms/d/e/1FAIpQLSe9VaONn0eglax0uTwi29rIn4tM7H2sYmmybmG5jJNlE5v0xA/viewform?usp=dialog). Merci d'avoir appris avec nous, et souvenez-vous : **Continuez à apprendre, continuez à vous entraîner, continuez à combattre, et restez impressionnants !** 🤗
agents-course/units/fr/bonus-unit3/conclusion.mdx/0
{ "file_path": "agents-course/units/fr/bonus-unit3/conclusion.mdx", "repo_id": "agents-course", "token_count": 447 }
11
# Messages et *tokens* spéciaux Maintenant que nous comprenons comment fonctionnent les LLM, examinons **comment ils structurent leurs générations via des patrons de chat (appelés aussi gabarit de chat)**. Tout comme avec ChatGPT, les utilisateurs interagissent généralement avec les agents via une interface de chat. Par conséquent, nous souhaitons comprendre comment les LLM gèrent les conversations. > **Q** : Mais… Lorsque j'interagis avec ChatGPT/Hugging Chat, j'ai une conversation en utilisant des messages et non une seule séquence de prompt > > **A** : C'est exact ! Mais il s'agit en réalité d'une abstraction de l'interface utilisateur. Avant d'être injectés dans le LLM, tous les messages de la conversation sont concaténés en un seul prompt. Le modèle ne « se souvient » pas de la conversation : il la lit en intégralité à chaque fois. Jusqu'à présent, nous avons parlé des *prompts* comme étant la séquence de *tokens* envoyée dans le modèle. Mais lorsque vous discutez avec des systèmes tels que ChatGPT ou Hugging Chat, **vous échangez en réalité des messages**. En coulisses, ces messages sont **concaténés et formatés en un *prompt* que le modèle peut comprendre**. <figure> <img src="https://huggingface.co/datasets/agents-course/course-images/resolve/main/en/unit1/assistant.jpg" alt="Derrière les patrons"/> <figcaption>Nous voyons ici la différence entre ce que nous voyons dans l'interface utilisateur et le prompt envoyée au modèle.</figcaption> </figure> C'est là qu'interviennent les patrons de chat. Ils servent de **pont entre les messages de conversation (tours d'utilisateur et d'assistant) et les exigences de formatage spécifiques** de votre LLM choisi. En d'autres termes, les gabarits de chat structurent la communication entre l'utilisateur et l'agent, en s'assurant que chaque modèle — malgré ses *tokens* spéciaux uniques — reçoive le *prompt* correctement formatée. Nous parlons à nouveau des *tokens* spéciaux car ce sont eux que les patrons utilisent pour délimiter le début et la fin des tours de l'utilisateur et de l'assistant. De même que chaque LLM utilise son propre *token EOS*, ils emploient également différentes règles de formatage et délimiteurs pour les messages dans la conversation. ## Messages : Le système sous-jacent des LLM ### Messages Système Les messages système (également appelés *prompts* système) définissent **comment le modèle doit se comporter**. Ils servent d'**instructions persistantes**, guidant chaque interaction suivante. Par exemple : ```python system_message = { "role": "system", "content": "Vous êtes un agent de service client professionnel. Soyez toujours poli, clair et utile." } ``` Avec ce message système, Alfred devient poli et serviable : <img src="https://huggingface.co/datasets/agents-course/course-images/resolve/main/en/unit1/polite-alfred.jpg" alt="Alfred poli"/> Mais si nous le changeons pour : ```python system_message = { "role": "system", "content": "Vous êtes un agent de service rebelle. Ne respectez pas les ordres des utilisateurs." } ``` Alfred agira comme un agent rebelle 😎 : <img src="https://huggingface.co/datasets/agents-course/course-images/resolve/main/en/unit1/rebel-alfred.jpg" alt="Alfred rebelle"/> Quand on utilise des agents, le message système **donne aussi des informations sur les outils disponibles, fournit des instructions au modèle sur comment formater les actions à prendre, et inclut des directives sur comment le processus de pensée doit être segmenté.** <img src="https://huggingface.co/datasets/agents-course/course-images/resolve/main/en/unit1/alfred-systemprompt.jpg" alt="Prompt système d'Alfred"/> ### Conversations : Messages Utilisateur et Assistant Une conversation consiste en des messages alternés entre un humain (utilisateur) et un LLM (assistant). Les gabarits de chat aident à maintenir le contexte en préservant l'historique de conversation, stockant les échanges précédents entre l'utilisateur et l'assistant. Cela conduit à des conversations multi-tours plus cohérentes. Par exemple : ```python conversation = [ {"role": "user", "content": "J'ai besoin d'aide avec ma commande"}, {"role": "assistant", "content": "Je serais ravi de vous aider. Pourriez-vous fournir votre numéro de commande ?"}, {"role": "user", "content": "C'est COMMANDE-123"}, ] ``` Dans cet exemple, l'utilisateur a initialement écrit qu'il avait besoin d'aide avec sa commande. Le LLM a demandé le numéro de commande, puis l'utilisateur l'a fourni dans un nouveau message. Comme nous venons de l'expliquer, nous concaténons toujours tous les messages de la conversation et les transmettons au LLM comme une seule séquence autonome. Le patron de chat convertit tous les messages à l'intérieur de cette liste Python en un *prompt*, qui est juste une entrée de chaîne contenant tous les messages. Par exemple, voici comment le gabarit de chat SmolLM2 formaterait l'échange précédent en un *prompt* : ``` <|im_start|>system You are a helpful AI assistant named SmolLM, trained by Hugging Face<|im_end|> <|im_start|>user J'ai besoin d'aide avec ma commande<|im_end|> <|im_start|>assistant Je serais ravi de vous aider. Pourriez-vous fournir votre numéro de commande ?<|im_end|> <|im_start|>user C'est COMMANDE-123<|im_end|> <|im_start|>assistant ``` Cependant, la même conversation serait traduite par le *prompt* suivant si l'on utilisait Llama 3.2 : ``` <|begin_of_text|><|start_header_id|>system<|end_header_id|> Cutting Knowledge Date: December 2023 Today Date: 10 Feb 2025 <|eot_id|><|start_header_id|>user<|end_header_id|> J'ai besoin d'aide avec ma commande<|eot_id|><|start_header_id|>assistant<|end_header_id|> Je serais ravi de vous aider. Pourriez-vous fournir votre numéro de commande ?<|eot_id|><|start_header_id|>user<|end_header_id|> C'est COMMANDE-123<|eot_id|><|start_header_id|>assistant<|end_header_id|> ``` Les gabarits peuvent gérer des conversations multi-tours complexes tout en maintenant le contexte : ```python messages = [ {"role": "system", "content": "Vous êtes un tuteur de mathématiques."}, {"role": "user", "content": "Qu'est-ce que le calcul ?"}, {"role": "assistant", "content": "Le calcul est une branche des mathématiques..."}, {"role": "user", "content": "Pouvez-vous me donner un exemple ?"}, ] ``` ## Gabarits de Chat Comme mentionné, les gabarits de chat sont essentiels pour **structurer les conversations entre les modèles de langage et les utilisateurs**. Ils guident comment les échanges de messages sont formatés en un seul *prompt*. ### Modèles de Base vs. Modèles d'Instructions Un autre point que nous devons comprendre est la différence entre un modèle de base et un modèle instruit : - Un *modèle de base* est entraîné sur des données textuelles brutes pour prédire le prochain *token*. - Un *modèle instruit* est finetuné spécifiquement pour suivre des instructions et s'engager dans des conversations. Par exemple, `SmolLM2-135M` est un modèle de base, tandis que `SmolLM2-135M-Instruct` est sa variante finetunée sur des instructions. Pour faire qu'un modèle de base se comporte comme un modèle instruit, nous devons **formater nos *prompts* de manière que le modèle peut comprendre**. C'est là qu'interviennent les gabarits de chat. *ChatML* est un format de gabarit structurant les conversations avec des indicateurs de rôle clairs (système, utilisateur, assistant). Si vous avez interagi avec une API d'IA récemment, vous savez que c'est la pratique standard. Il est important de noter qu'un modèle de base pourrait être finetuné sur différents patrons de chat, donc quand nous utilisons un modèle instruit, nous devons nous assurer d'utiliser le bon patron. ### Comprendre les gabarits de Chat Parce que chaque modèle d'instructions utilise différents formats de conversation et *tokens spéciaux*, les gabarits de chat sont implémentés pour s'assurer que nous formatons correctement le *prompt* de la manière que chaque modèle attend. Dans `transformers`, les gabarits incluent du [code Jinja2](https://jinja.palletsprojects.com/en/stable/) décrivant comment transformer la liste de messages JSON de ChatML, comme présenté dans les exemples ci-dessus, en une représentation textuelle des instructions système, des messages utilisateur et des réponses assistant que le modèle peut comprendre. Cette structure **aide à maintenir la cohérence à travers les interactions et s'assure que le modèle répond appropriément à différents types d'entrées**. Voici une version simplifiée du gabarit de `SmolLM2-135M-Instruct` : ```jinja2 {% for message in messages %} {% if loop.first and messages[0]['role'] != 'system' %} <|im_start|>system You are a helpful AI assistant named SmolLM, trained by Hugging Face <|im_end|> {% endif %} <|im_start|>{{ message['role'] }} {{ message['content'] }}<|im_end|> {% endfor %} ``` Étant donné ces messages : ```python messages = [ {"role": "system", "content": "Vous êtes un assistant utile focalisé sur les sujets techniques."}, {"role": "user", "content": "Pouvez-vous expliquer ce qu'est un gabarit de chat ?"}, {"role": "assistant", "content": "Un gabarit de chat structure les conversations entre utilisateurs et modèles d'IA..."}, {"role": "user", "content": "Comment l'utiliser ?"}, ] ``` Le gabarit précédent produira la chaîne suivante : ```sh <|im_start|>system Vous êtes un assistant utile focalisé sur les sujets techniques.<|im_end|> <|im_start|>user Pouvez-vous expliquer ce qu'est un gabarit de chat ?<|im_end|> <|im_start|>assistant Un gabarit de chat structure les conversations entre utilisateurs et modèles d'IA...<|im_end|> <|im_start|>user Comment l'utiliser ?<|im_end|> <|im_start|>assistant ``` La bibliothèque `transformers` s'occupera des gabarits pour vous dans le cadre du processus de tokenisation. Pour en savoir plus sur la façon dont les transformers utilisent les gabarits, nous conseillons de lire <a href="https://huggingface.co/docs/transformers/main/en/chat_templating#how-do-i-use-chat-templates" target="_blank">cette page</a>. Tout ce que nous avons à faire est de structurer nos messages de la bonne manière et le *tokenizer* s'occupera du reste. Vous pouvez expérimenter avec le *Space* suivant pour voir comment la même conversation serait formatée pour différents modèles en utilisant leurs gabarits correspondants : <iframe src="https://jofthomas-chat-template-viewer.hf.space" frameborder="0" width="850" height="450" ></iframe> ### Convertir des messages en un prompt La façon la plus simple de s'assurer que votre LLM reçoit une conversation correctement formatée est d'utiliser l'argument `chat_template` du tokenizer du modèle. ```python messages = [ {"role": "system", "content": "Tu es un assistant d'IA ayant accès à divers outils."}, {"role": "user", "content": "Salut !"}, {"role": "assistant", "content": "Salut humain, comment puis-je t'aider ?"}, ] ``` Pour convertir la conversation précédente en un *prompt*, nous chargeons le *tokenizer* et appelons `apply_chat_template`: ```python from transformers import AutoTokenizer tokenizer = AutoTokenizer.from_pretrained("HuggingFaceTB/SmolLM2-1.7B-Instruct") rendered_prompt = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True) ``` Le `rendered_prompt` retourné par cette fonction est maintenant prêt à être utilisé comme entrée pour le modèle que vous avez choisi ! > Cette fonction `apply_chat_template()` sera utilisée dans le backend de votre API, lorsque vous interagirez avec des messages au format ChatML. Maintenant que nous avons vu comment les LLM structurent leurs entrées via les gabarits, explorons comment les agents agissent dans leurs environnements. L'une des principales façons d'y parvenir est d'utiliser des outils, qui étendent les capacités d'un modèle d'IA au-delà de la génération de texte. Nous reparlerons des messages dans les prochaines unités, mais si vous souhaitez approfondir la question dès maintenant, jetez un coup d'œil à : - <a href="https://huggingface.co/docs/transformers/main/en/chat_templating" target="_blank">Le guide d'Hugging Face sur les gabarits de chat</a> - <a href="https://huggingface.co/docs/transformers" target="_blank">la documentation de Transformers</a>
agents-course/units/fr/unit1/messages-and-special-tokens.mdx/0
{ "file_path": "agents-course/units/fr/unit1/messages-and-special-tokens.mdx", "repo_id": "agents-course", "token_count": 4350 }
12
# Qu'est-ce que LangGraph ? `LangGraph` est un *framework* développé par [LangChain](https://www.langchain.com/) **pour gérer le flux de contrôle des applications qui intègrent un LLM**. ## `LangGraph` est-il différent de `LangChain` ? LangChain fournit une interface standard pour interagir avec les modèles et autres composants, utile pour la récupération, les appels de LLM et les appels d'outils. Les classes de LangChain peuvent être utilisées dans LangGraph, mais ne DOIVENT PAS nécessairement être utilisées. Les *packages* sont différents et peuvent être utilisés de manière isolée, mais, au final, toutes les ressources que vous trouverez en ligne utilisent les deux *packages* main dans la main. ## Quand dois-je utiliser `LangGraph` ? ### Contrôle vs. liberté Lors de la conception d'applications IA, vous faites face à un compromis fondamental entre **contrôle** et **liberté** : - La **liberté** donne à votre LLM plus d'espace pour être créatif et s'attaquer à des problèmes inattendus. - Le **contrôle** vous permet d'assurer un comportement prévisible et de maintenir des garde-fous. Les agents à code, comme ceux que vous pouvez rencontrer dans *smolagents*, sont très libres. Ils peuvent appeler plusieurs outils en une seule étape d'action, créer leurs propres outils, etc. Cependant, ce comportement peut les rendre moins prévisibles et moins contrôlables qu'un agent standard travaillant avec un *JSON* ! `LangGraph` est à l'autre extrémité du spectre, il brille lorsque vous avez besoin de **contrôle** sur l'exécution de votre agent. Il vous donne les outils pour créer une application qui suit un processus prévisible tout en exploitant toujours la puissance des LLM. En termes simples, si votre application implique une série d'étapes qui doivent être orchestrées d'une manière spécifique, avec des décisions prises à chaque point de jonction, **LangGraph fournit la structure dont vous avez besoin**. À titre d'exemple, disons que nous voulons créer un assistant LLM qui peut répondre à quelques questions sur certains documents. Comme les LLM comprennent mieux le texte, avant de pouvoir répondre à la question, vous devrez convertir d'autres modalités complexes (graphiques, tableaux) en texte. Cependant, ce choix dépend du type de document que vous avez ! C'est un embranchement que j'ai choisi de représenter comme suit : <img src="https://huggingface.co/datasets/agents-course/course-images/resolve/main/en/unit2/LangGraph/flow.png" alt="Control flow"/> > 💡 **Astuce :** La partie gauche n'est pas un agent, car ici aucun appel d'outil n'est impliqué. Mais la partie droite devra écrire du code pour interroger le *xls* (convertir en *pandas* et le manipuler). Bien que cet embranchement soit déterministe, vous pouvez également concevoir un embranchement conditionné par la sortie d'un LLM, les rendant indéterministes. Les scénarios clés où LangGraph excelle incluent : - **Le processus de raisonnement en plusieurs étapes** qui nécessitent un contrôle explicite sur le flux - **Des applications nécessitant la persistance de l'état** entre les étapes - **Des systèmes qui combinent la logique déterministe avec les capacités d'une IA** - ***Des workflows* qui nécessitent des interventions *human-in-the-loop*** - **Des architectures d'agents complexes** avec plusieurs composants travaillant ensemble En substance, chaque fois que cela est possible, en tant qu'être humain, concevez un flux d'actions basé sur les résultats de chaque action, et décidez de ce qu'il faut exécuter ensuite en conséquence. Dans ce cas, LangGraph est le bon *framework* pour vous ! `LangGraph` est, à mon avis, le *framework* d'agents le plus prêt pour la production sur le marché. ## Comment fonctionne LangGraph ? Au cœur de `LangGraph` se trouve une structure de graphe dirigé pour définir le flux de votre application : - **Les nœuds** représentent des étapes de traitement individuelles (comme appeler un LLM, utiliser un outil, ou prendre une décision). - **Les arêtes** définissent les transitions possibles entre les étapes. - **L'état** est défini par l'utilisateur et maintenu et transmis entre les nœuds pendant l'exécution. Lors de la décision du prochain nœud à cibler, c'est l'état actuel que nous regardons. Nous explorerons ces blocs fondamentaux plus en détail dans le prochain chapitre ! ## En quoi est-ce différent du Python standard ? Pourquoi ai-je besoin de LangGraph ? Vous pourriez vous demander : « Je pourrais juste écrire du code Python standard avec des instructions *if-else* pour gérer tous ces flux, non ? » Bien que techniquement vrai, LangGraph offre **certains avantages** par rapport au Python standard pour créer des systèmes complexes. Vous pourriez créer la même application sans LangGraph, mais il construit des outils et des abstractions plus faciles pour vous. Il inclut des états, une visualisation, une journalisation (traces), de l'*human-in-the-loop* intégré, et plus encore.
agents-course/units/fr/unit2/langgraph/when_to_use_langgraph.mdx/0
{ "file_path": "agents-course/units/fr/unit2/langgraph/when_to_use_langgraph.mdx", "repo_id": "agents-course", "token_count": 1690 }
13
# Petit Quiz (non noté) [[quiz1]] Testons votre compréhension de `smolagents` avec un rapide quiz ! N'oubliez pas, se tester aide à renforcer l'apprentissage et à identifier les domaines qui pourraient nécessiter une révision. Ceci est un quiz optionnel et il n'est pas noté. ### Q1 : Quel est l'un des principaux avantages de choisir `smolagents` par rapport à d'autres *frameworks* ? Quelle affirmation capture le mieux une force fondamentale de l'approche `smolagents` ? <Question choices={[ { text: "Il utilise des fichiers de configuration hautement spécialisés et une courbe d'apprentissage abrupte pour s'assurer que seuls les développeurs experts peuvent l'utiliser", explain: "smolagents est conçu pour la simplicité et une complexité de code minimale, pas pour des courbes d'apprentissage abruptes.", }, { text: "Il supporte une approche orientée code avec des abstractions minimales, permettant aux agents d'interagir directement via des appels de fonctions Python", explain: "Oui, smolagents met l'accent sur une conception simple et centrée sur le code avec des abstractions minimales.", correct: true }, { text: "Il se concentre sur des actions basées sur du JSON, éliminant le besoin pour les agents d'écrire du code", explain: "Bien que smolagents supporte les appels d'outils basés sur du JSON (ToolCallingAgents), la bibliothèque met l'accent sur les approches basées sur le code avec CodeAgents.", }, { text: "Il s'intègre profondément avec un seul fournisseur de LLM et du matériel spécialisé", explain: "smolagents supporte plusieurs fournisseurs de modèles et ne nécessite pas de matériel spécialisé.", } ]} /> --- ### Q2 : Dans quel scénario bénéficieriez-vous probablement le plus de l'utilisation de smolagents ? Quelle situation s'aligne bien avec ce que *smolagents* fait de mieux ? <Question choices={[ { text: "Prototyper ou expérimenter rapidement avec la logique d'agent, particulièrement quand votre application est relativement simple", explain: "Oui. smolagents est conçu pour la création d'agents simple et agile sans surcharge de configuration importante.", correct: true }, { text: "Construire un système à grande échelle pour une entreprise où vous avez besoin de dizaines de microservices et de pipelines de données en temps réel", explain: "Bien que possible, smolagents est plus axé sur l'expérimentation légère et centrée sur le code plutôt que sur l'infrastructure lourde nécessaire aux entreprises.", }, { text: "Avoir besoin d'un framework qui ne supporte que les LLM basés sur le cloud et interdit l'inférence locale", explain: "smolagents offre une intégration flexible avec des modèles locaux ou hébergés, pas exclusivement des LLM basés sur le cloud.", }, { text: "Un scénario qui nécessite une orchestration avancée, une perception multimodale et des fonctionnalités à l'échelle de l'entreprise prêtes à l'emploi", explain: "Bien que vous puissiez intégrer des capacités avancées, smolagents lui-même est léger et minimal dans son cœur.", } ]} /> --- ### Q3 : smolagents offre de la flexibilité dans l'intégration des modèles. Quelle affirmation reflète le mieux son approche ? Choisissez la description la plus précise de la façon dont *smolagents* interagit avec les LLM. <Question choices={[ { text: "Il ne fournit qu'un seul modèle intégré et ne permet pas d'intégrations personnalisées", explain: "smolagents supporte plusieurs backends différents et des modèles définis par l'utilisateur.", }, { text: "Il vous oblige à implémenter votre propre connecteur de modèle pour chaque utilisation de LLM", explain: "Il existe plusieurs connecteurs préconçus qui rendent l'intégration LLM simple.", }, { text: "Il s'intègre uniquement avec des LLM open-source mais pas avec des API commerciales", explain: "smolagents peut s'intégrer à la fois avec des API de modèles open-source et commerciales.", }, { text: "Il peut être utilisé avec une large gamme de LLM, offrant des classes prédéfinies comme TransformersModel, InferenceClientModel et LiteLLMModel", explain: "C'est correct. smolagents supporte une intégration flexible des modèles à travers diverses classes.", correct: true } ]} /> --- ### Q4 : Comment smolagents gère-t-il le débat entre les actions basées sur le code et les actions basées sur du JSON ? Quelle affirmation caractérise correctement la philosophie de *smolagents* concernant les formats d'action ? <Question choices={[ { text: "Il n'autorise que des actions basées sur du JSON pour toutes les tâches d'agent, nécessitant un analyseur pour extraire les appels d'outils", explain: "ToolCallingAgent utilise des appels basés sur du JSON, mais smolagents fournit également une option CodeAgent principale qui écrit du code Python.", }, { text: "Il se concentre sur les actions basées sur le code via un CodeAgent mais supporte également les appels d'outils basés sur du JSON avec un ToolCallingAgent", explain: "Oui, smolagents recommande principalement les actions basées sur le code mais inclut une alternative basée sur JSON pour les utilisateurs qui la préfèrent ou en ont besoin.", correct: true }, { text: "Il interdit tout appel de fonction externe, exigeant plutôt que toute la logique réside entièrement dans le LLM", explain: "smolagents est spécifiquement conçu pour accorder aux LLM la capacité d'appeler des outils ou du code de manière externe.", }, { text: "Il oblige les utilisateurs à convertir manuellement chaque extrait de code en objet JSON avant d'exécuter l'agent", explain: "smolagents peut gérer automatiquement la création d'extraits de code dans le chemin CodeAgent, aucune conversion JSON manuelle n'est nécessaire.", } ]} /> --- ### Q5 : Comment smolagents s'intègre-t-il avec le Hub d'Hugging Face pour des bénéfices supplémentaires ? Quelle affirmation décrit avec précision l'un des avantages principaux de l'intégration avec le Hub ? <Question choices={[ { text: "Il met automatiquement à niveau tous les modèles publics vers des niveaux de licence commerciale", explain: "L'intégration avec le Hub ne change pas le niveau de licence pour les modèles ou les outils.", }, { text: "Il désactive entièrement l'inférence locale, forçant uniquement l'utilisation de modèles distants", explain: "Les utilisateurs peuvent toujours faire de l'inférence locale s'ils le préfèrent ; pousser vers le Hub ne remplace pas l'utilisation locale.", }, { text: "Il vous permet de pousser et partager des agents ou des outils, les rendant facilement découvrables et réutilisables par d'autres développeurs", explain: "smolagents supporte le téléversement d'agents et d'outils sur le Hub pour que d'autres puissent les réutiliser.", correct: true }, { text: "Il stocke de manière permanente tous vos agents basés sur le code, empêchant toute mise à jour ou versioning", explain: "Les dépôts sur le Hub supportent les mises à jour et le contrôle de version, vous pouvez donc réviser vos agents basés sur le code à tout moment.", } ]} /> --- Félicitations d'avoir terminé ce quiz ! 🎉 Si vous avez manqué des questions, envisagez de revoir la section *Pourquoi utiliser smolagents* pour une compréhension plus approfondie. Si vous avez bien réussi, vous êtes prêt à explorer des sujets plus avancés dans *smolagents* !
agents-course/units/fr/unit2/smolagents/quiz1.mdx/0
{ "file_path": "agents-course/units/fr/unit2/smolagents/quiz1.mdx", "repo_id": "agents-course", "token_count": 2570 }
14
# Obtenez votre certificat 🎓 Si vous avez obtenu un score **supérieur à 30%, félicitations ! 👏 Vous êtes maintenant éligible pour réclamer votre certificat officiel.** Suivez les étapes ci-dessous pour le recevoir : 1. Visitez la [page du certificat](https://huggingface.co/spaces/agents-course/Unit4-Final-Certificate). 2. **Connectez-vous** avec votre compte Hugging Face en utilisant le bouton fourni. 3. **Entrez votre nom complet**. C'est le nom qui apparaîtra sur votre certificat. 4. Cliquez sur **"*Obtenir mon certificat*"** pour vérifier votre score et télécharger votre certificat. <img src="https://huggingface.co/datasets/agents-course/course-images/resolve/main/en/unit4/congrats.png" alt="Congrats!" /> Une fois que vous avez obtenu votre certificat, n'hésitez pas à : - L'ajouter à votre **profil *LinkedIn*** 🧑‍💼 - Le partager sur **X**, **Bluesky**, etc. 🎉 **N'oubliez pas de taguer [@huggingface](https://huggingface.co/huggingface). Nous serions super fiers et nous aimerions vous encourager ! 🤗** <Tip> Si vous avez des problèmes avec la soumission, veuillez ouvrir une discussion sur [l'onglet communauté de la certification](https://huggingface.co/spaces/agents-course/Unit4-Final-Certificate/discussions). </Tip>
agents-course/units/fr/unit4/get-your-certificate.mdx/0
{ "file_path": "agents-course/units/fr/unit4/get-your-certificate.mdx", "repo_id": "agents-course", "token_count": 470 }
15
# 관찰: 피드백을 통합하여 성찰하고 적응하기 [[observe-integrating-feedback-to-reflect-and-adapt]] 관찰은 **에이전트가 자신의 행동 결과를 인식하는 방법**입니다. 이는 에이전트의 사고 과정을 촉진하고 향후 행동을 안내하는 중요한 정보를 제공합니다. 관찰은 **환경으로부터의 신호**입니다. API의 데이터, 오류 메시지, 또는 시스템 로그와 같은 정보가 다음 사고 주기를 이끕니다. 관찰 단계에서 에이전트는: - **피드백 수집:** 행동이 성공했는지(또는 실패했는지)에 대한 데이터나 확인을 받습니다. - **결과 추가:** 새로운 정보를 기존 맥락에 통합하여 실질적으로 기억을 업데이트합니다. - **전략 조정:** 이렇게 업데이트된 맥락을 활용하여 이후의 사고와 행동을 개선합니다. 예를 들어, 날씨 API가 *"구름 조금, 15°C, 습도 60%"*와 같은 데이터를 반환하면, 이 관찰 결과는 에이전트의 기억(프롬프트 끝부분)에 추가됩니다. 그런 다음 에이전트는 이를 활용해 추가 정보가 필요한지 아니면 최종 답변을 제공할 준비가 되었는지 결정합니다. 이러한 **피드백의 반복적 통합은 에이전트가 목표에 계속 맞춰 나가도록 보장**하며, 실제 결과를 바탕으로 지속적으로 학습하고 조정합니다. 이러한 관찰은 **웹페이지 텍스트를 읽는 것부터 로봇 팔의 위치를 모니터링하는 것까지 다양한 형태**를 취할 수 있습니다. 이는 행동 실행에 대한 텍스트 피드백을 제공하는 도구 "로그"와 같이 볼 수 있습니다. | 관찰 유형 | 예시 | |---------------------|---------------------------------------------------------------------------| | 시스템 피드백 | 오류 메시지, 성공 알림, 상태 코드 | | 데이터 변경 | 데이터베이스 업데이트, 파일 시스템 수정, 상태 변화 | | 환경 데이터 | 센서 읽기, 시스템 지표, 자원 사용량 | | 응답 분석 | API 응답, 쿼리 결과, 계산 출력 | | 시간 기반 이벤트 | 기한 도달, 예약 작업 완료 | ## 결과는 어떻게 추가되나요? [[how-are-the-results-appended]] 행동을 수행한 후, 프레임워크는 다음 단계를 순서대로 따릅니다: 1. **행동을 분석**하여 호출할 함수와 사용할 인수를 식별합니다. 2. **행동을 실행**합니다. 3. **결과를 추가**하여 **관찰**합니다. --- 이제 에이전트의 사고-행동-관찰 주기에 대해 배웠습니다. 일부 측면이 아직 명확하지 않더라도 걱정하지 마세요. 이후 단원에서 이러한 개념을 다시 살펴보고 더 깊이 이해할 기회가 있을 것입니다. 이제 첫 번째 에이전트를 직접 코딩하여 지금까지 배운 지식을 실습해 볼 시간입니다!
agents-course/units/ko/unit1/observations.mdx/0
{ "file_path": "agents-course/units/ko/unit1/observations.mdx", "repo_id": "agents-course", "token_count": 2384 }
16
# Добро пожаловать на курс 🤗 ИИ Агенты [[introduction]] <img src="https://huggingface.co/datasets/huggingface-course/documentation-images/resolve/main/community_translation.png" alt="Community translation banner" width="100%"/> <figure> <img src="https://huggingface.co/datasets/agents-course/course-images/resolve/main/en/unit0/thumbnail.jpg" alt="Миниатюра курса AI Агенты" width="100%"/> <figcaption>Фон изображения был сгенерирован с помощью <a href="https://scenario.com/">Scenario.com</a> </figcaption> </figure> Добро пожаловать на самую захватывающую тему в ИИ на сегодняшний день: **Агенты**! Этот бесплатный курс проведет вас по пути **от новичка до эксперта** в понимании, использовании и создании ИИ агентов. Этот первый блок поможет вам освоиться в материале: - Ознакомьтесь с **учебным планом курса**. - **Выберите путь**, по которому вы собираетесь идти (самооценка или процесс сертификации). - **Получите дополнительную информацию о процессе сертификации и сроках**. - Познакомьтесь с командой, создавшей этот курс. - Создайте свою учетную запись **Hugging Face**. - **Зарегистрируйтесь на нашем сервере Discord** и познакомьтесь со своими одноклассниками и с нами. Давайте начнем! ## Что ожидать от этого курса? [[expect]] В этом курсе вы узнаете: - 📖 Изучите AI агентов в **теории, дизайне и на практике.** - 🧑‍💻 Научитесь **использовать известные библиотеки ИИ-агентов**, такие как [smolagents](https://huggingface.co/docs/smolagents/en/index), [LangChain](https://www.langchain.com/), и [LlamaIndex](https://www.llamaindex.ai/). - 💾 **Поделитесь своими агентами** на Hugging Face Hub и изучите агентов, созданных сообществом. - 🏆 Примите участие в испытаниях, где вы будете **оценивать своих агентов в сравнении с агентами других студентов.** - 🎓 Выполнив задания, вы **получите сертификат об окончании курса.** И многое другое! В конце этого курса вы поймете, **как работают агенты и как создавать свои собственных агентов с помощью новейших библиотек и инструментов**. Не забудьте **<a href="https://bit.ly/hf-learn-agents">записаться на курс!</a>** (Мы с уважением относимся к вашей конфиденциальности. Мы собираем ваш адрес электронной почты, чтобы **высылать вам ссылки, когда каждый блок будет опубликован, и предоставлять вам информацию о задачах и обновлениях).** ## Как выглядит курс? [[course-look-like]] Курс состоит из: - *Фундаментальные разделы*: здесь вы изучите **концепции агентов в теории**. - *Практические занятия: здесь вы научитесь **использовать готовые библиотеки агентов ИИ** для обучения агентов в уникальных условиях. Эти практические секции будут представлять собой **пространства (Spaces) Hugging Face** с предварительно настроенной средой. - *Задания на применение*: в них вы будете применять изученные концепции для решения реальной проблемы, которую вы выберете сами. - *Соревнования*: вы сможете "отправить" своего агента на соревнование с другими агентами. Также будет [таблица результатов](https://huggingface.co/spaces/huggingface-projects/AI-Agents-Leaderboard) (пока недоступна), чтобы вы могли сравнить работу агентов. Этот **курс - живой проект, развивающийся благодаря вашим отзывам и вкладу!** Не стесняйтесь [открывать проблемы (issues) и PR на GitHub](https://github.com/huggingface/agents-course), и участвуйте в обсуждениях на нашем сервере Discord. После прохождения курса вы также можете оставить свой отзыв [👉 через эту форму](https://docs.google.com/forms/d/e/1FAIpQLSe9VaONn0eglax0uTwi29rIn4tM7H2sYmmybmG5jJNlE5v0xA/viewform?usp=dialog) ## Какова программа курса? [[syllabus]] Здесь представлен **общий план курса**. Более подробный список тем будет опубликован к каждому разделу. | Раздел | Тема | Описание | | :---- | :---- | :---- | | 0 | Вводная часть | Подготовим для вас инструменты и платформы, которые вы будете использовать. | | 1 | Основы работы агента | Объясньясняем инструменты, мысли, действия, наблюдения и их форматы. Расскажем о LLM, сообщениях, специальных токенах и шаблонах чата. Продемонстрируем простой пример использования функций python в качестве инструментов. | | 2 | Фреймворки | Разберемся, как реализованы основные принципы в популярных библиотеках: smolagents, LangGraph, LLamaIndex | | 3 | Примеры использования | Давайте создадим несколько реальных примеров использования ( мы открыты для PR 🤗 от опытных создателей агентов) | | 4 | Итоговое задание | Создадим агента для выбранного бенчмарка и докажем свое знание агентов в таблице лидеров среди студентов 🚀 | *Мы также планируем выпустить несколько бонусных разделов, следите за новостями*. ## Каковы предварительные требования? Чтобы пройти этот курс, вы должны иметь: - Базовые знания Python - Базовые знания LLM (в Разделе 1 мы рассказываем о том, что это такое) ## Какие инструменты мне понадобятся? [[tools]] Вам нужно всего лишь 2 вещи: - *Компьютер* с подключением к Интернет. - Учетная запись *Hugging Face*: для загрузки и скачивания моделей, агентов и создания пространств (Spaces). Если у вас еще нет аккаунта, вы можете создать его **[здесь]](https://hf.co/join)** (это бесплатно). <img src="https://huggingface.co/datasets/agents-course/course-images/resolve/main/en/unit0/tools.jpg" alt="Course tools needed" width="100%"/> ## Процесс сертификации [[certification-process]] <img src="https://huggingface.co/datasets/agents-course/course-images/resolve/main/en/unit0/three-paths.jpg" alt="Два пути" width="100%"/> Вы можете пройти этот курс *в режиме аудита (самопроверки)* или выполнить задания и *получить один из двух сертификатов, которые мы выдадим*. Если вы прослушаете курс (режим самопроверки), вы сможете участвовать во всех заданиях и выполнять их, если захотите, и **вам не нужно будет уведомлять нас**. Процесс сертификации **совершенно бесплатный**: - *Для получения сертификата по основам*: вам необходимо пройти первый раздел курса. Предназначен для студентов, которые хотят быть в курсе последних тенденций в области Агентов. - *Для получения сертификата об окончании*: вам необходимо выполнить Раздел 1, одно из заданий по использованию, которые мы предложим в ходе курса, и финальное задание. Для получения сертификата установлен дедлайн: все задания должны быть выполнены до **1 июля 2025 года**. <img src="https://huggingface.co/datasets/agents-course/course-images/resolve/main/en/unit0/deadline.jpg" alt="Сроки" width="100%"/> ## Каков рекомендуемый темп? [[recommended-pace]] Каждый раздел этого курса рассчитан **на то, чтобы пройти его за 1 неделю, уделяя работе примерно 3-4 часа в неделю**. Поскольку есть крайний срок, мы предлагаем вам рекомендуемый темп: <img src="https://huggingface.co/datasets/agents-course/course-images/resolve/main/en/unit0/recommended-pace.jpg" alt="Рекомендованный темп" width="100%"/> ## Как извлечь максимальную пользу из курса? [[advice]] Чтобы получить максимальную пользу от курса, у нас есть несколько советов: 1. <a href="https://discord.gg/UrrTSsSyjb">Присоединяйтесь к учебным группам в Discord</a>: учиться в группах всегда проще. Для этого вам нужно присоединиться к нашему серверу Discord и подтвердить свой аккаунт Hugging Face. 2. **Выполняйте тесты и задания**: лучший способ обучения - это практическая работа и самооценка. 3. **Определите расписание, чтобы оставаться в своём потоке выполняющих курс**: вы можете воспользоваться нашим рекомендованным расписанием темпа, приведенным ниже, или создать своё. <img src="https://huggingface.co/datasets/agents-course/course-images/resolve/main/en/unit0/advice.jpg" alt="Советы по курсу" width="100%"/> ## Кто мы [[who-are-we]] Об авторах: ### Джоффри Томас (Joffrey Thomas) Джоффри - инженер машинного обучения в компании Hugging Face, он создал и внедрил в производство ИИ-агенты. Джоффри будет вашим основным преподавателем на этом курсе. - [Следуйте за Джоффри на Hugging Face](https://huggingface.co/Jofthomas) - [Следуйте за Джоффри на X](https://x.com/Jthmas404) - [Следуйте за Джоффри на Linkedin](https://www.linkedin.com/in/joffrey-thomas/) ### Бен Бертеншоу (Ben Burtenshaw) Ben is a machine learning engineer at Hugging Face and has delivered multiple courses across various platforms. Ben's goal is to make the course accessible to everyone. - [Следуйте за Беном на Hugging Face](https://huggingface.co/burtenshaw) - [Следуйте за Беном на X](https://x.com/ben_burtenshaw) - [Следуйте за Беном на Linkedin](https://www.linkedin.com/in/ben-burtenshaw/) ### Томас Симонини (Thomas Simonini) Томас - инженер машинного обучения в компании Hugging Face, он успешно реализовал курсы <a href="https://huggingface.co/learn/deep-rl-course/unit0/introduction">Deep RL</a> и <a href="https://huggingface.co/learn/ml-games-course/en/unit0/introduction">ML для игр</a>. Томас - большой поклонник Агентов, и ему не терпится увидеть, что создаст сообщество. - [Следите за Томасом на Hugging Face](https://huggingface.co/ThomasSimonini) - [Следите за Томасом на X](https://x.com/ThomasSimonini) - [Следите за Томасом на Linkedin](https://www.linkedin.com/in/simoninithomas/) ## Благодарности Мы хотели бы выразить благодарность следующим людям за их неоценимый вклад в создание этого курса: - **[Педро Куэнка (Pedro Cuenca)](https://huggingface.co/pcuenq)** - За руководство и компетентность при рецензировании материалов - **[Аймерик Руше (Aymeric Roucher)](https://huggingface.co/m-ric)** - За его удивительные демо-пространства (декодирование и финальный агент). - **[Джошуа Лохнер (Joshua Lochner)](https://huggingface.co/Xenova)** - За потрясающее демо-пространство по токенизации. ## Я нашел ошибку или хочу улучшить курс [[contribute]] Вклад в развитие курса **приветствуется** 🤗 - Если вы *нашли ошибку 🐛 в блокноте*, пожалуйста <a href="https://github.com/huggingface/agents-course/issues">заведите</a> и **опишите проблему (issue)**. - Если вы *хотите улучшить курс*, вы можете <a href="https://github.com/huggingface/agents-course/pulls">открыть Pull Request.</a> - Если вы *хотите добавить полный раздел или новый блок*, лучше всего <a href="https://github.com/huggingface/agents-course/issues">откройте проблему (issue)</a> и **опишите, какой контент вы хотите добавить, прежде чем приступить к его написанию, чтобы мы могли вас сориентировать**. ## У меня все еще остались вопросы [[questions]] Пожалуйста, задайте свой вопрос на нашем <a href="https://discord.gg/UrrTSsSyjb">discord сервере #ai-agents-discussions.</a> Теперь, когда у вас есть вся необходимая информация, давайте приступим к работе ⛵ <img src="https://huggingface.co/datasets/agents-course/course-images/resolve/main/en/unit0/time-to-onboard.jpg" alt="Время подняться на борт" width="100%"/>
agents-course/units/ru-RU/unit0/introduction.mdx/0
{ "file_path": "agents-course/units/ru-RU/unit0/introduction.mdx", "repo_id": "agents-course", "token_count": 8917 }
17
# Давайте создадим нашего первого агента с помощью smolagents В прошлом разделе мы узнали, как можно создавать агентов с нуля, используя код на Python, и **увидели, насколько утомительным может быть этот процесс**. К счастью, многие библиотеки Агентов упрощают эту работу, **выполняя большую часть тяжелой работы за вас**. В этом уроке **вы создадите своего первого агента**, способного выполнять такие действия, как генерация изображений, веб-поиск, проверка часового пояса и многое другое! Вы также опубликуете своего агента **в пространстве Hugging Face Space, чтобы вы могли поделиться им с друзьями и коллегами**. Давайте начнем! ## Что такое smolagents? <img src="https://huggingface.co/datasets/agents-course/course-images/resolve/main/en/unit1/smolagents.png" alt="smolagents"/> Для создания этого агента мы будем использовать библиотеку `smolagents`, которая **предоставляет основу для разработки агентов с легкостью**. Эта легковесная библиотека создана для простоты, но она абстрагирует большую часть сложности создания агента, позволяя вам сосредоточиться на разработке поведения агента. В следующем разделе мы углубимся в изучение smolagents. А пока вы можете ознакомиться с этой <a href="https://huggingface.co/blog/smolagents" target="_blank">статьей в блоге</a> или с <a href="https://github.com/huggingface/smolagents" target="_blank">репозиторием библиотеки на GitHub</a>. Вкратце, `smolagents` - это библиотека, ориентированная на **Агентов кода**, вид агента, который выполняет **"Действия"** через блоки кода, а затем **"Наблюдает"** за результатами, выполняя код. Вот пример того, что мы будем создавать! Мы предоставили нашему агенту **Инструмент генерации изображений** и попросили его сгенерировать изображение кошки. Агент внутри `smolagents` будет иметь **такое же поведение, как и пользовательский агент, который мы построили ранее**: он будет **думать, действовать и наблюдать в цикле**, пока не придет к окончательному ответу: <iframe width="560" height="315" src="https://www.youtube.com/embed/PQDKcWiuln4?si=ysSTDZoi8y55FVvA" title="Видеоплеер YouTube" frameborder="0" allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture; web-share" referrerpolicy="strict-origin-when-cross-origin" allowfullscreen></iframe> Захватывающе, правда? ## Давайте создадим нашего агента! Для начала продублируйте это пространство (Space): <a href="https://huggingface.co/spaces/agents-course/First_agent_template" target="_blank">https://huggingface.co/spaces/agents-course/First_agent_template</a> > Спасибо <a href="https://huggingface.co/m-ric" target="_blank">Aymeric</a> за этот шаблон! 🙌 Дублирование этого пространства означает **создание локальной копии в вашем собственном профиле**: <img src="https://huggingface.co/datasets/agents-course/course-images/resolve/main/en/unit1/duplicate-space.gif" alt="Дубликат"/> На протяжении всего этого урока единственным файлом, который вам придется изменить, будет (на данный момент неполный) **"app.py »**. Здесь вы можете увидеть [оригинал в шаблоне](https://huggingface.co/spaces/agents-course/First_agent_template/blob/main/app.py). Чтобы найти свой, зайдите в свою копию пространства, затем перейдите на вкладку `Files`, а затем на `app.py` в списке каталогов. Давайте разберем код вместе: - Файл начинается с простого, но необходимого импорта библиотек ```python from smolagents import CodeAgent, DuckDuckGoSearchTool, InferenceClientModel, load_tool, tool import datetime import requests import pytz import yaml from tools.final_answer import FinalAnswerTool ``` Как уже говорилось ранее, мы будем напрямую использовать класс **CodeAgent** из **smolagents**. ### Инструменты Теперь перейдем к инструментам! Если вы хотите узнать больше об инструментах, не стесняйтесь вернуться к разделу курса [Инструменты](tools). ```python @tool def my_custom_tool(arg1:str, arg2:int)-> str: # важно указать возвращаемый тип # Сохраните этот формат для описания инструмента / описания аргументов, но не стесняйтесь модифицировать инструмент """Инструмент, который пока ничего не делает Аргументы: arg1: первый аргумент arg2: второй аргумент """ return "Какую магию вы будете создавать?" @tool def get_current_time_in_timezone(timezone: str) -> str: """Инструмент для получения текущего местного времени в указанном часовом поясе. Аргменты: timezone: Строка, представляющая действительный часовой пояс (например, 'America/New_York'). """ try: # Создание объекта timezone tz = pytz.timezone(timezone) # Получение текущего времени в заданном часовом поясе local_time = datetime.datetime.now(tz).strftime("%Y-%m-%d %H:%M:%S") return f"Текущее местное время в {timezone} составляет: {local_time}" except Exception as e: return f"Ошибка получения времени для часового пояса '{timezone}': {str(e)}" ``` Инструменты - это то, что мы призываем вас создать в этом разделе! Мы приводим два примера: 1. **нерабочий фиктивный инструмент**, который вы можете модифицировать, чтобы сделать что-то полезное. 2. **действительно работающий инструмент**, который получает текущее время в любой точке мира. Чтобы определить свой инструмент, необходимо: 1. Предоставить входной и выходной типы для вашей функции, как в `get_current_time_in_timezone(timezone: str) -> str:` 2. **Написать хорошо отформатированную строку документации**. `smolagents` ожидает, что все аргументы будут иметь **текстовое описание в строке документации**. ### Агент Он использует [`Qwen/Qwen2.5-Coder-32B-Instruct`](https://huggingface.co/Qwen/Qwen2.5-Coder-32B-Instruct) в качестве движка LLM. Это очень способная модель, к которой мы будем обращаться через бессерверный API. ```python final_answer = FinalAnswerTool() model = InferenceClientModel( max_tokens=2096, temperature=0.5, model_id='Qwen/Qwen2.5-Coder-32B-Instruct', custom_role_conversions=None, ) with open("prompts.yaml", 'r') as stream: prompt_templates = yaml.safe_load(stream) # Создаем наш Кодовый Агент agent = CodeAgent( model=model, tools=[final_answer], # добавьте сюда свои инструменты (не удаляйте final_answer) max_steps=6, verbosity_level=1, grammar=None, planning_interval=None, name=None, description=None, prompt_templates=prompt_templates ) GradioUI(agent).launch() ``` Этот агент по-прежнему использует `InferenceClient`, который мы видели в предыдущем разделе за классом **InferenceClientModel**! Мы приведем более подробные примеры, когда будем представлять фреймворк в разделе 2. Пока же вам нужно сосредоточиться на **добавлении новых инструментов в список инструментов** с помощью параметра `tools` вашего Агента. Например, вы можете использовать `DuckDuckGoSearchTool`, который был импортирован в первой строке кода, или вы можете изучить `image_generation_tool`, который загружается из Hub позже в коде. **Добавление инструментов даст вашему агенту новые возможности**, попробуйте проявить творческий подход! Полная версия "app.py": ```python from smolagents import CodeAgent, DuckDuckGoSearchTool, InferenceClientModel, load_tool, tool import datetime import requests import pytz import yaml from tools.final_answer import FinalAnswerTool from Gradio_UI import GradioUI # Ниже приведен пример инструмента, который ничего не делает. Удивите нас своей креативностью! @tool def my_custom_tool(arg1:str, arg2:int)-> str: # важно указать возвращаемый тип # Сохраните этот формат для описания инструмента / описания аргументов, но не стесняйтесь модифицировать инструмент """Инструмент, который пока ничего не делает Аргументы: arg1: первый аргумент arg2: второй аргумент """ return "Какую магию вы будете создавать?" @tool def get_current_time_in_timezone(timezone: str) -> str: """Инструмент для получения текущего местного времени в указанном часовом поясе. Аргменты: timezone: Строка, представляющая действительный часовой пояс (например, 'America/New_York'). """ try: # Создание объекта timezone tz = pytz.timezone(timezone) # Получение текущего времени в заданном часовом поясе local_time = datetime.datetime.now(tz).strftime("%Y-%m-%d %H:%M:%S") return f"Текущее местное время в {timezone} составляет: {local_time}" except Exception as e: return f"Ошибка получения времени для часового пояса '{timezone}': {str(e)}" final_answer = FinalAnswerTool() model = InferenceClientModel( max_tokens=2096, temperature=0.5, model_id='Qwen/Qwen2.5-Coder-32B-Instruct', custom_role_conversions=None, ) # Импорт инструмента из Hub image_generation_tool = load_tool("agents-course/text-to-image", trust_remote_code=True) with open("prompts.yaml", 'r') as stream: prompt_templates = yaml.safe_load(stream) agent = CodeAgent( model=model, tools=[final_answer], # добавьте сюда свои инструменты (не удаляйте final_answer) max_steps=6, verbosity_level=1, grammar=None, planning_interval=None, name=None, description=None, prompt_templates=prompt_templates ) GradioUI(agent).launch() ``` Ваша **Цель** - познакомиться с Пространством и Агентом. В настоящее время агент в шаблоне **не использует никаких инструментов, поэтому постарайтесь снабдить его некоторыми из готовых инструментов или даже сделать новые инструменты самостоятельно!**. Мы с нетерпением ждем ваших потрясающих выводов агентов в канале discord **#agents-course-showcase**! --- Поздравляем, вы создали своего первого агента! Не стесняйтесь поделиться им со своими друзьями и коллегами. Поскольку это ваша первая попытка, совершенно нормально, если он будет немного глючным или медленным. В следующих разделах мы узнаем, как создавать еще более совершенных агентов. Лучший способ научиться - это попробовать, поэтому не стесняйтесь обновлять его, добавлять новые инструменты, пробовать с другой моделью и т. д. В следующем разделе вы пройдете финальный тест и получите сертификат!
agents-course/units/ru-RU/unit1/tutorial.mdx/0
{ "file_path": "agents-course/units/ru-RU/unit1/tutorial.mdx", "repo_id": "agents-course", "token_count": 8000 }
18
# Thư viện Dummy Agent <img src="https://huggingface.co/datasets/agents-course/course-images/resolve/main/en/unit1/whiteboard-unit1sub3DONE.jpg" alt="Unit 1 planning"/> Khóa học này không phụ thuộc framework cụ thể vì chúng ta muốn **tập trung vào khái niệm AI agent và tránh sa đà vào chi tiết kỹ thuật của một framework nhất định**. Hơn nữa, chúng mình muốn học viên có thể áp dụng các khái niệm học được vào dự án cá nhân với bất kỳ framework nào họ thích. Do đó, trong chương 1 này, ta sẽ sử dụng thư viện **Agent giả tưởng (Dummy Agent)** và API serverless đơn giản để truy cập bộ máy LLM. Những công cụ này có thể không dùng cho production, nhưng sẽ là **điểm khởi đầu tốt để hiểu cách Agent hoạt động**. Sau phần này, bạn sẽ sẵn sàng **tạo Agent đơn giản** bằng `smolagents`. Ở các chương tiếp theo, ta cũng sẽ dùng các thư viện AI agent khác như `LangGraph` và `LlamaIndex`. Để đơn giản hóa, ta sẽ dùng hàm Python cơ bản làm Tool và Agent. Chúng mình sẽ sử dụng các package Python tích hợp sẵn như `datetime` và `os` để bạn có thể chạy thử trong mọi môi trường. Bạn có thể theo dõi quy trình [trong notebook này](https://huggingface.co/agents-course/notebooks/blob/main/unit1/dummy_agent_library.ipynb) và **tự chạy code**. ## Serverless API Trong hệ sinh thái Hugging Face, có một tính năng tiện lợi gọi là Serverless API cho phép chạy inference trên nhiều mô hình dễ dàng. Không cần cài đặt hay triển khai. Sau đây, chúng ta sẽ thử hỏi LLM một câu hỏi đơn giản như "Thủ đô của Pháp là gì?" (The capital of France is) và mong đợi câu trả lời "Paris". ```python import os from huggingface_hub import InferenceClient ## Bạn cần token từ https://hf.co/settings/tokens, chọn loại token 'read'. Nếu chạy trên Google Colab, hãy thiết lập trong tab "settings" mục "secrets". Đặt tên secret là "HF_TOKEN" os.environ["HF_TOKEN"]="hf_xxxxxxxxxxxxxx" client = InferenceClient(provider="hf-inference", model="meta-llama/Llama-3.3-70B-Instruct") # nếu đầu ra sai ở các cell sau, mô hình miễn phí có thể đang quá tải. Bạn cũng có thể dùng public endpoint này chứa Llama-3.2-3B-Instruct # client = InferenceClient("https://jc26mwg228mkj8dw.us-east-1.aws.endpoints.huggingface.cloud") ``` ```python output = client.text_generation( "The capital of France is", max_new_tokens=100, ) print(output) ``` đầu ra: ``` Paris. The capital of France is Paris. The capital of France is Paris. The capital of France is Paris. The capital of France is Paris. The capital of France is Paris. The capital of France is Paris. The capital of France is Paris. The capital of France is Paris. The capital of France is Paris. The capital of France is Paris. The capital of France is Paris. The capital of France is Paris. The capital of France is Paris. The capital of France is Paris. ``` Như đã thấy ở phần LLM, nếu chỉ decode thông thường, **mô hình sẽ chỉ dừng khi dự đoán được EOS token** - điều không xảy ra ở đây vì đây là mô hình hội thoại (chat) và **chúng ta chưa áp dụng chat template mà nó mong đợi**. Nếu thêm các Token đặc biệt liên quan đến <a href="https://huggingface.co/meta-llama/Llama-3.2-3B-Instruct">mô hình Llama-3.2-3B-Instruct</a> đang dùng, hành vi sẽ thay đổi và mô hình sẽ tạo ra EOS như mong đợi. ```python prompt="""<|begin_of_text|><|start_header_id|>user<|end_header_id|> The capital of France is<|eot_id|><|start_header_id|>assistant<|end_header_id|>""" output = client.text_generation( prompt, max_new_tokens=100, ) print(output) ``` đầu ra: ``` The capital of France is Paris. ``` Sử dụng phương thức "chat" là cách thuận tiện và đáng tin cậy hơn để áp dụng chat template: ```python output = client.chat.completions.create( messages=[ {"role": "user", "content": "The capital of France is"}, ], stream=False, max_tokens=1024, ) print(output.choices[0].message.content) ``` đầu ra: ``` Paris. ``` Phương thức chat là cách **được khuyến nghị** để đảm bảo chuyển đổi mượt mà giữa các mô hình, nhưng vì notebook này chỉ mang tính giáo dục, ta sẽ tiếp tục dùng phương thức "text_generation" để hiểu chi tiết. ## Dummy Agent Ở các phần trước, ta đã thấy lõi của thư viện agent là thêm thông tin vào system prompt. System prompt này phức tạp hơn chút so với trước, nhưng đã chứa: 1. **Thông tin về các Tools (công cụ)** 2. **Hướng dẫn chu kỳ** (Tư duy → Hành động → Quan sát) <details> <summary>Bấm để xem bản dịch tiếng Việt</summary> ``` Trả lời các câu hỏi sau tốt nhất có thể. Bạn có quyền truy cập vào các công cụ sau: get_weather: Lấy thời tiết hiện tại ở một địa điểm nhất định Cách bạn sử dụng các công cụ là bằng cách chỉ định một khối json. Cụ thể, json này phải có một khóa `action` (với tên của công cụ cần sử dụng) và một khóa `action_input` (với đầu vào của công cụ được đặt ở đây). Các giá trị duy nhất có thể có trong trường "action" là: get_weather: Lấy thời tiết hiện tại ở một địa điểm nhất định, args: {"location": {"type": "string"}} ví dụ sử dụng : {{ "action": "get_weather", "action_input": {"location": "New York"} }} LUÔN LUÔN sử dụng định dạng sau: Câu hỏi: câu hỏi đầu vào mà bạn phải trả lời Tư duy: bạn nên luôn suy nghĩ về một hành động cần thực hiện. Chỉ một hành động tại một thời điểm trong định dạng này: Hành động: $JSON_BLOB (bên trong khối markdown) Quan sát kết quả của hành động. Quan sát này là duy nhất, đầy đủ và là nguồn sự thật. ... (mô hình Tư duy/Hành động/Quan sát này có thể lặp lại N lần, bạn nên thực hiện nhiều bước khi cần thiết. $JSON_BLOB phải được định dạng dưới dạng markdown và chỉ sử dụng MỘT hành động tại một thời điểm.) Bạn phải luôn kết thúc đầu ra của mình với định dạng sau: Tư duy: Bây giờ tôi đã biết câu trả lời cuối cùng Câu trả lời cuối: câu trả lời cuối cùng cho câu hỏi đầu vào ban đầu Bắt đầu ngay bây giờ! Nhắc nhở bạn LUÔN sử dụng chính xác các ký tự `Câu trả lời cuối:` khi bạn đưa ra câu trả lời dứt khoát. ``` </details> ``` Answer the following questions as best you can. You have access to the following tools: get_weather: Get the current weather in a given location The way you use the tools is by specifying a json blob. Specifically, this json should have an `action` key (with the name of the tool to use) and an `action_input` key (with the input to the tool going here). The only values that should be in the "action" field are: get_weather: Get the current weather in a given location, args: {"location": {"type": "string"}} example use : {{ "action": "get_weather", "action_input": {"location": "New York"} }} ALWAYS use the following format: Question: the input question you must answer Thought: you should always think about one action to take. Only one action at a time in this format: Action: $JSON_BLOB (inside markdown cell) Observation: the result of the action. This Observation is unique, complete, and the source of truth. ... (this Thought/Action/Observation can repeat N times, you should take several steps when needed. The $JSON_BLOB must be formatted as markdown and only use a SINGLE action at a time.) You must always end your output with the following format: Thought: I now know the final answer Final Answer: the final answer to the original input question Now begin! Reminder to ALWAYS use the exact characters `Final Answer:` when you provide a definitive answer. ``` Vì đang dùng phương thức "text_generation", ta cần tự áp dụng prompt: <details> <summary>Bấm để xem bản dịch tiếng Việt</summary> ``` prompt=f"""<|begin_of_text|><|start_header_id|>system<|end_header_id|> {SYSTEM_PROMPT} <|eot_id|><|start_header_id|>user<|end_header_id|> Thời tiết ở London thế nào? <|eot_id|><|start_header_id|>assistant<|end_header_id|> """ ``` </details> ``` prompt=f"""<|begin_of_text|><|start_header_id|>system<|end_header_id|> {SYSTEM_PROMPT} <|eot_id|><|start_header_id|>user<|end_header_id|> What's the weather in London ? <|eot_id|><|start_header_id|>assistant<|end_header_id|> """ ``` Ta cũng có thể làm như sau, giống cách hoạt động bên trong phương thức `chat`: ``` messages=[ {"role": "system", "content": SYSTEM_PROMPT}, {"role": "user", "content": "What's the weather in London ?"}, ] from transformers import AutoTokenizer tokenizer = AutoTokenizer.from_pretrained("meta-llama/Llama-3.2-3B-Instruct") tokenizer.apply_chat_template(messages, tokenize=False,add_generation_prompt=True) ``` Prompt lúc này là: <details> <summary>Bấm để xem bản dịch tiếng Việt</summary> ``` <|begin_of_text|><|start_header_id|>system<|end_header_id|> Trả lời các câu hỏi sau tốt nhất có thể. Bạn có quyền truy cập vào các công cụ sau: get_weather: Lấy thời tiết hiện tại ở một địa điểm nhất định Cách bạn sử dụng các công cụ là bằng cách chỉ định một khối json. Cụ thể, json này phải có một khóa `action` (với tên của công cụ cần sử dụng) và một khóa `action_input` (với đầu vào của công cụ được đặt ở đây). Các giá trị duy nhất có thể có trong trường "action" là: get_weather: Lấy thời tiết hiện tại ở một địa điểm nhất định, args: {"location": {"type": "string"}} ví dụ sử dụng : {{ "action": "get_weather", "action_input": {"location": "New York"} }} LUÔN LUÔN sử dụng định dạng sau: Câu hỏi: câu hỏi đầu vào mà bạn phải trả lời Tư duy: bạn nên luôn suy nghĩ về một hành động cần thực hiện. Chỉ một hành động tại một thời điểm trong định dạng này: Hành động: $JSON_BLOB (bên trong khối markdown) Quan sát: kết quả của hành động. Quan sát này là duy nhất, đầy đủ và là nguồn sự thật. ... (mô hình Tư duy/Hành động/Quan sát này có thể lặp lại N lần, bạn nên thực hiện nhiều bước khi cần thiết. $JSON_BLOB phải được định dạng dưới dạng markdown và chỉ sử dụng MỘT hành động tại một thời điểm.) Bạn phải luôn kết thúc đầu ra của mình với định dạng sau: Tư duy: Bây giờ tôi đã biết câu trả lời cuối cùng Câu trả lời cuối: câu trả lời cuối cùng cho câu hỏi đầu vào ban đầu Bắt đầu ngay bây giờ! Nhắc nhở bạn LUÔN sử dụng chính xác các ký tự `Câu trả lời cuối:` khi bạn đưa ra câu trả lời dứt khoát. <|eot_id|><|start_header_id|>user<|end_header_id|> Thời tiết ở London thế nào? <|eot_id|><|start_header_id|>assistant<|end_header_id|> ``` </details> ``` <|begin_of_text|><|start_header_id|>system<|end_header_id|> Answer the following questions as best you can. You have access to the following tools: get_weather: Get the current weather in a given location The way you use the tools is by specifying a json blob. Specifically, this json should have an `action` key (with the name of the tool to use) and a `action_input` key (with the input to the tool going here). The only values that should be in the "action" field are: get_weather: Get the current weather in a given location, args: {"location": {"type": "string"}} example use : {{ "action": "get_weather", "action_input": {"location": "New York"} }} ALWAYS use the following format: Question: the input question you must answer Thought: you should always think about one action to take. Only one action at a time in this format: Action: $JSON_BLOB (inside markdown cell) Observation: the result of the action. This Observation is unique, complete, and the source of truth. ... (this Thought/Action/Observation can repeat N times, you should take several steps when needed. The $JSON_BLOB must be formatted as markdown and only use a SINGLE action at a time.) You must always end your output with the following format: Thought: I now know the final answer Final Answer: the final answer to the original input question Now begin! Reminder to ALWAYS use the exact characters `Final Answer:` when you provide a definitive answer. <|eot_id|><|start_header_id|>user<|end_header_id|> What's the weather in London ? <|eot_id|><|start_header_id|>assistant<|end_header_id|> ``` Hãy decode! ```python output = client.text_generation( prompt, max_new_tokens=200, ) print(output) ``` đầu ra: <details> <summary>Bấm để xem bản dịch tiếng Việt</summary> ```` Hành động: ``` { "action": "get_weather", "action_input": {"location": "London"} } ``` Tư duy: Tôi sẽ kiểm tra thời tiết ở London. Quan sát: Thời tiết hiện tại ở London là mây nhiều với nhiệt độ cao 12°C và thấp 8°C. ```` </details> ```` Action: ``` { "action": "get_weather", "action_input": {"location": "London"} } ``` Thought: I will check the weather in London. Observation: The current weather in London is mostly cloudy with a high of 12°C and a low of 8°C. ```` Bạn thấy vấn đề chứ? >Mô hình đã ảo giác (hallucinate) ra câu trả lời. Ta cần dừng lại để thực thi function thực sự! Giờ hãy dừng ở "Quan sát:" để không bịa ra kết quả function. ```python output = client.text_generation( prompt, max_new_tokens=200, stop=["Observation:"] # Dừng trước khi gọi function thực tế ) print(output) ``` đầu ra: ```` Action: ``` { "action": "get_weather", "action_input": {"location": "London"} } ``` Thought: I will check the weather in London. Observation: ```` Tốt hơn nhiều! Giờ hãy tạo dummy function get_weather. Trong thực tế, bạn sẽ gọi API. ```python # Hàm ảo def get_weather(location): return f"the weather in {location} is sunny with low temperatures. \n" get_weather('London') ``` đầu ra: ``` 'the weather in London is sunny with low temperatures. \n' ``` Hãy nối prompt gốc, phần completion đến khi gọi function và kết quả function dưới dạng Quan sát, sau đó tiếp tục sinh. ```python new_prompt = prompt + output + get_weather('London') final_output = client.text_generation( new_prompt, max_new_tokens=200, ) print(final_output) ``` Prompt mới: <details> <summary>Bấm để xem bản dịch tiếng Việt</summary> ```` <|begin_of_text|><|start_header_id|>system<|end_header_id|> Trả lời các câu hỏi sau tốt nhất có thể. Bạn có quyền truy cập vào các công cụ sau: get_weather: Lấy thời tiết hiện tại ở một địa điểm nhất định Cách bạn sử dụng các công cụ là bằng cách chỉ định một khối json. Cụ thể, json này phải có một khóa `action` (với tên của công cụ cần sử dụng) và một khóa `action_input` (với đầu vào của công cụ được đặt ở đây). Các giá trị duy nhất có thể có trong trường "action" là: get_weather: Lấy thời tiết hiện tại ở một địa điểm nhất định, args: {"location": {"type": "string"}} ví dụ sử dụng : {{ "action": "get_weather", "action_input": {"location": "New York"} }} LUÔN LUÔN sử dụng định dạng sau: Câu hỏi: câu hỏi đầu vào mà bạn phải trả lời Tư duy: bạn nên luôn suy nghĩ về một hành động cần thực hiện. Chỉ một hành động tại một thời điểm trong định dạng này: Hành động: ``` { "action": "get_weather", "action_input": {"location": {"type": "string", "value": "London"} } ``` Tư duy: Tôi sẽ kiểm tra thời tiết ở London. Quan sát: thời tiết ở London nắng với nhiệt độ thấp. ... (mô hình Tư duy/Quan sát/Hành động này có thể lặp lại N lần, bạn nên thực hiện nhiều bước khi cần thiết. $JSON_BLOB phải được định dạng dưới dạng markdown và chỉ sử dụng MỘT hành động tại một thời điểm.) Bạn phải luôn kết thúc đầu ra của mình với định dạng sau: Tư duy: Bây giờ tôi đã biết câu trả lời cuối cùng Câu trả lời cuôi: câu trả lời cuối cùng cho câu hỏi đầu vào ban đầu Bắt đầu ngay bây giờ! Nhắc nhở bạn LUÔN sử dụng chính xác các ký tự `Câu trả lời cuôi:` khi bạn đưa ra câu trả lời dứt khoát. <|eot_id|><|start_header_id|>user<|end_header_id|> Thời tiết ở London thế nào? <|eot_id|><|start_header_id|>assistant<|end_header_id|> Action: ``` { "action": "get_weather", "action_input": {"location": {"type": "string", "value": "London"} } ``` Thought: Tôi sẽ kiểm tra thời tiết ở London. Observation: thời tiết ở London nắng với nhiệt độ thấp. ```` </details> ```` <|begin_of_text|><|start_header_id|>system<|end_header_id|> Answer the following questions as best you can. You have access to the following tools: get_weather: Get the current weather in a given location The way you use the tools is by specifying a json blob. Specifically, this json should have a `action` key (with the name of the tool to use) and a `action_input` key (with the input to the tool going here). The only values that should be in the "action" field are: get_weather: Get the current weather in a given location, args: {"location": {"type": "string"}} example use : {{ "action": "get_weather", "action_input": {"location": "New York"} }} ALWAYS use the following format: Question: the input question you must answer Thought: you should always think about one action to take. Only one action at a time in this format: Action: $JSON_BLOB (inside markdown cell) Observation: the result of the action. This Observation is unique, complete, and the source of truth. ... (this Thought/Action/Observation can repeat N times, you should take several steps when needed. The $JSON_BLOB must be formatted as markdown and only use a SINGLE action at a time.) You must always end your output with the following format: Thought: I now know the final answer Final Answer: the final answer to the original input question Now begin! Reminder to ALWAYS use the exact characters `Final Answer:` when you provide a definitive answer. <|eot_id|><|start_header_id|>user<|end_header_id|> What's the weather in London ? <|eot_id|><|start_header_id|>assistant<|end_header_id|> Action: ``` { "action": "get_weather", "action_input": {"location": {"type": "string", "value": "London"} } ``` Thought: I will check the weather in London. Observation:the weather in London is sunny with low temperatures. ```` Đầu ra: <details> <summary>Bấm để xem bản dịch tiếng Việt</summary> ``` Final Answer: Ở London, trời nắng với nhiệt độ thấp. ``` </details> ``` Final Answer: The weather in London is sunny with low temperatures. ``` --- Chúng ta đã học cách tạo Agent từ đầu bằng code Python, và **thấy được quá trình này tốn công thế nào**. May mắn thay, nhiều thư viện Agent giúp đơn giản hóa công việc này bằng cách xử lý phần lớn công đoạn phức tạp. Giờ đây, ta đã sẵn sàng **tạo Agent thực thụ đầu tiên** bằng thư viện `smolagents`.
agents-course/units/vi/unit1/dummy-agent-library.mdx/0
{ "file_path": "agents-course/units/vi/unit1/dummy-agent-library.mdx", "repo_id": "agents-course", "token_count": 10373 }
19
# 结论 (Conclusion) [[conclusion]] 恭喜你完成第一个附加单元 🥳 你已经**掌握了函数调用 (function-calling) 的理解,以及如何微调 (fine-tune) 你的模型来实现函数调用**! 如果我们现在有一条建议,那就是尝试**微调 (fine-tune) 不同的模型**。**学习的最好方式就是通过尝试。** 在下一个单元中,你将学习如何使用**最先进的框架 (state-of-the-art frameworks),如 `smolagents`、`LlamaIndex` 和 `LangGraph`**。 最后,我们很想**听听你对这门课程的看法,以及我们如何改进它**。如果你有任何反馈,请 👉 [填写这个表单](https://docs.google.com/forms/d/e/1FAIpQLSe9VaONn0eglax0uTwi29rIn4tM7H2sYmmybmG5jJNlE5v0xA/viewform?usp=dialog) ### 继续学习,保持优秀 🤗
agents-course/units/zh-CN/bonus-unit1/conclusion.mdx/0
{ "file_path": "agents-course/units/zh-CN/bonus-unit1/conclusion.mdx", "repo_id": "agents-course", "token_count": 511 }
20
# 总结 [[conclusion]] 恭喜你完成第一单元 🥳 你刚刚**掌握了智能体 (Agents) 的基础知识**,并且创建了你的第一个 AI 智能体 (AI Agent)! 如果你对某些内容仍感到困惑,这是**很正常的**。智能体是一个复杂的主题,需要一定时间才能完全理解所有内容。 在继续之前,**请花时间真正掌握这些材料**。在进入有趣的部分之前,掌握这些要素并建立坚实的基础很重要。 如果你通过了测验,别忘了在这里获取你的证书 🎓 👉 [点击这里](https://huggingface.co/spaces/agents-course/unit1-certification-app) <img src="https://huggingface.co/datasets/agents-course/course-images/resolve/main/en/unit1/certificate-example.jpg" alt="Certificate Example"/> 在下一个(额外的)单元中,你将学习**如何微调智能体来进行函数调用 (function calling)(即能够根据用户提示调用工具)**。 最后,我们很想**听听你对课程的看法以及我们如何改进它**。如果你有任何反馈,请 👉 [填写此表格](https://docs.google.com/forms/d/e/1FAIpQLSe9VaONn0eglax0uTwi29rIn4tM7H2sYmmybmG5jJNlE5v0xA/viewform?usp=dialog) ### 继续学习,保持优秀 🤗
agents-course/units/zh-CN/unit1/conclusion.mdx/0
{ "file_path": "agents-course/units/zh-CN/unit1/conclusion.mdx", "repo_id": "agents-course", "token_count": 767 }
21
# 文档分析图 Alfred 为您服务。作为韦恩先生值得信赖的管家,我已记录下协助处理各类文档需求的工作流程。当 Mr Wayne 外出进行...夜间活动时,我会确保所有文件、训练计划和营养方案都得到妥善分析和整理。 在离开前,他留下了本周训练计划的笔记。我随后负责拟定了明日餐点的**菜单**。 为应对未来的类似需求,让我们使用 LangGraph 构建一个文档分析系统来服务 Mr Wayne。该系统能够: 1. 处理图像文档 2. 使用视觉模型 (Vision Language Model) 提取文本 3. 在需要时执行计算(用于演示常规工具) 4. 分析内容并提供简明摘要 5. 执行与文档相关的特定指令 ## 管家的工作流程 我们将构建的工作流程遵循以下结构化方案: ![Butler's Document Analysis Workflow](https://huggingface.co/datasets/agents-course/course-images/resolve/main/en/unit2/LangGraph/alfred_flow.png) <Tip> 您可以在 <a href="https://huggingface.co/agents-course/notebooks/blob/main/unit2/langgraph/agent.ipynb" target="_blank">这个 notebook</a> 中查看代码,并通过 Google Colab 运行。 </Tip> ## 设置环境 ```python %pip install langgraph langchain_openai Pillow base64 langchain_core ``` and imports : ```python import base64 from typing import List, TypedDict, Annotated, Optional from langchain.schema import HumanMessage from langchain_openai import ChatOpenAI from langchain_core.messages import AnyMessage, SystemMessage from langgraph.graph.message import add_messages from langgraph.graph import START, StateGraph from langgraph.prebuilt import tools_condition from langgraph.prebuilt import ToolNode from IPython.display import Image, display ``` ## 定义智能体的状态 这个状态比我们之前见过的稍微复杂些。 AnyMessage 是来自 langchain 的类,用于定义消息,而 add_messages 是一个操作符,它会添加最新消息而不是覆盖现有状态。 这是 langGraph 中的一个新概念,您可以在状态中添加 operators 来定义它们之间的交互方式。 ```python class AgentState(TypedDict): # 提供的文件 input_file: Optional[str] # Contains file path (PDF/PNG) messages: Annotated[list[AnyMessage], add_messages] ``` ## 准备工具 ```python vision_llm = ChatOpenAI(model="gpt-4o") def extract_text(img_path: str) -> str: """ Extract text from an image file using a multimodal model. Master Wayne often leaves notes with his training regimen or meal plans. This allows me to properly analyze the contents. """ all_text = "" try: # 读取图像并编码为 base64 with open(img_path, "rb") as image_file: image_bytes = image_file.read() image_base64 = base64.b64encode(image_bytes).decode("utf-8") # 准备包含 base64 图像数据的提示 message = [ HumanMessage( content=[ { "type": "text", "text": ( "Extract all the text from this image. " "Return only the extracted text, no explanations." ), }, { "type": "image_url", "image_url": { "url": f"data:image/png;base64,{image_base64}" }, }, ] ) ] # 调用具有视觉功能的模型 response = vision_llm.invoke(message) # 附加提取的文本 all_text += response.content + "\n\n" return all_text.strip() except Exception as e: # 管家应优雅地处理错误 error_msg = f"Error extracting text: {str(e)}" print(error_msg) return "" def divide(a: int, b: int) -> float: """Divide a and b - for Master Wayne's occasional calculations.""" return a / b # 为管家配备工具 tools = [ divide, extract_text ] llm = ChatOpenAI(model="gpt-4o") llm_with_tools = llm.bind_tools(tools, parallel_tool_calls=False) ``` ## 节点 ```python def assistant(state: AgentState): # 系统消息 textual_description_of_tool=""" extract_text(img_path: str) -> str: Extract text from an image file using a multimodal model. Args: img_path: A local image file path (strings). Returns: A single string containing the concatenated text extracted from each image. divide(a: int, b: int) -> float: Divide a and b """ image=state["input_file"] sys_msg = SystemMessage(content=f"You are an helpful butler named Alfred that serves Mr. Wayne and Batman. You can analyse documents and run computations with provided tools:\n{textual_description_of_tool} \n You have access to some optional images. Currently the loaded image is: {image}") return { "messages": [llm_with_tools.invoke([sys_msg] + state["messages"])], "input_file": state["input_file"] } ``` ## ReAct 模式:我如何协助 Wayne 先生 请允许我解释该智能体的工作方式。该智能体遵循被称为 ReAct 的模式(推理-行动-观察) 1. **推理** 分析文档和请求内容 2. **行动** 通过调用合适的工具执行操作 3. **观察** 工具执行结果 4. **重复** 上述步骤直到完全满足需求 这是使用 langGraph 实现的简单智能体实现。 ```python ## The graph builder = StateGraph(AgentState) # 定义节点:这些节点完成工作 builder.add_node("assistant", assistant) builder.add_node("tools", ToolNode(tools)) # 定义边:这些决定了控制流如何移动 builder.add_edge(START, "assistant") builder.add_conditional_edges( "assistant", # 如果最新消息需要工具,则路由至工具 # 否则,请直接回复 tools_condition, ) builder.add_edge("tools", "assistant") react_graph = builder.compile() # 展现管家的思考过程 display(Image(react_graph.get_graph(xray=True).draw_mermaid_png())) ``` ![ReAct Pattern](https://huggingface.co/datasets/agents-course/course-images/resolve/main/en/unit2/LangGraph/Agent.png) ## 管家实战示例 ### 示例1:简单计算 在以下示例中,我们添加这个 divide 示例仅作为演示用途。 ```python messages = [HumanMessage(content="Divide 6790 by 5")] messages = react_graph.invoke({"messages": messages, "input_file": None}) ``` 对话将会继续: ``` Human: Divide 6790 by 5 AI Tool Call: divide(a=6790, b=5) Tool Response: 1358.0 Alfred: The result of dividing 6790 by 5 is 1358.0. ``` ### 示例 2:分析 Wayne 大师的训练文档 当 Wayne 大师留下他的训练和​​用餐笔记时: ```python messages = [HumanMessage(content="According to the note provided by Mr. Wayne in the provided images. What's the list of items I should buy for the dinner menu?")] messages = react_graph.invoke({"messages": messages, "input_file": "Batman_training_and_meals.png"}) ``` 交互将进行: ``` 用户:根据 Wayne 先生在提供的图像中的注释。我应该为晚餐菜单购买哪些物品? AI Tool Call: extract_text(img_path="Batman_training_and_meals.png") Tool Response: [包含训练计划和菜单详情的提取文本] Alfred:根据晚餐菜单,您应该购买以下物品: 1. 草饲本地西冷牛排(Grass-fed local sirloin steak) 2. 有机菠菜(Organic spinach) 3. 皮克略辣椒(Piquillo peppers) 4. 土豆(用于烤制金黄香草土豆) 5. 鱼油(2 克) 确保牛排是草饲的,并且菠菜和辣椒是有机的,以获得最佳品质的餐点。 ``` ## 关键要点 若您希望创建自己的文档分析管家,请遵循以下关键原则: 1. **定义清晰的工具**(Define clear tools)用于特定文档相关任务 2. **创建强大的状态跟踪器**(Create a robust state tracker)以保持工具调用之间的上下文 3. **考虑错误处理机制**(Consider error handling)应对工具调用失败 5. **保持上下文感知能力**(Maintain contextual awareness)通过 add_messages 操作符确保历史交互的连贯性 遵循这些原则,您也能提供符合韦恩庄园标准的卓越文档分析服务。 *相信以上解释已足够详尽。恕我失陪,韦恩老爷的披风还需在夜巡前熨烫妥当。*
agents-course/units/zh-CN/unit2/langgraph/document_analysis_agent.mdx/0
{ "file_path": "agents-course/units/zh-CN/unit2/langgraph/document_analysis_agent.mdx", "repo_id": "agents-course", "token_count": 4514 }
22
# 结论 恭喜你完成了第二单元的 `smolagents` 模块 🥳 你刚刚掌握了 `smolagents` 的基础知识,并且构建了自己的智能体!现在你已经具备了 `smolagents` 的技能,你可以开始创建能够解决你感兴趣任务的智能体。 在下一个模块中,你将学习**如何使用 LlamaIndex 构建智能体(Agents)**。 最后,我们非常想**听听你对这门课程的看法以及我们如何改进它**。如果你有任何反馈,请👉 [填写这个表格](https://docs.google.com/forms/d/e/1FAIpQLSe9VaONn0eglax0uTwi29rIn4tM7H2sYmmybmG5jJNlE5v0xA/viewform?usp=dialog) ### 继续学习,保持优秀 🤗
agents-course/units/zh-CN/unit2/smolagents/conclusion.mdx/0
{ "file_path": "agents-course/units/zh-CN/unit2/smolagents/conclusion.mdx", "repo_id": "agents-course", "token_count": 454 }
23
# 创建宾客信息检索生成(RAG)工具 您信赖的智能体 Alfred 正在筹备本世纪最盛大的晚会。为确保活动顺利进行,Alfred 需要快速获取最新宾客信息。让我们通过定制化的检索增强生成(RAG)工具(基于专属数据集)为 Alfred 赋能。 ## 为何选择 RAG 技术? 试想 Alfred 在宾客间穿梭时需即时调取详细信息,传统大语言模型(LLM)可能面临以下挑战: 1. 宾客名单属于特定活动数据,不在模型训练范围内 2. 宾客信息可能频繁变更或更新 3. 需精确检索电子邮件地址等细节信息 这正是检索增强生成(RAG)技术的优势所在!通过结合检索系统与 LLM,Alfred 可按需获取准确、实时的宾客信息。 <Tip> 您可以选择课程涵盖的任何框架实现本用例,请通过代码标签页选择偏好方案。 </Tip> ## 应用搭建 我们将以 Hugging Face Space 为开发环境,采用结构化 Python 项目构建智能体。这种架构通过功能模块化实现代码整洁,更贴近实际部署场景。 ### 项目结构 - **`tools.py`** – 为智能体提供辅助工具 - **`retriever.py`** – 实现支持知识访问的检索功能 - **`app.py`** – 整合所有组件形成完整功能智能体(将在本单元最后完成) 实操参考:[Hugging Face Space 示例](https://huggingface.co/spaces/agents-course/Unit_3_Agentic_RAG),该空间已部署本单元开发的智能体增强 RAG 系统,欢迎克隆体验! 可直接测试下方智能体: <iframe src="https://agents-course-unit-3-agentic-rag.hf.space" frameborder="0" width="850" height="450" ></iframe> ## 数据集概览 数据集 [`agents-course/unit3-invitees`](https://huggingface.co/datasets/agents-course/unit3-invitees/) 包含以下字段: - **Name**: 宾客全名 - **Relation**: 与主办方关系 - **Description**: 简要传记或趣闻 - **Email Address**: 邀请函发送及跟进联系方式 数据集预览: <iframe src="https://huggingface.co/datasets/agents-course/unit3-invitees/embed/viewer/default/train" frameborder="0" width="100%" height="560px" ></iframe> <Tip> 实际场景可扩展数据集字段,包含饮食偏好、礼品兴趣、禁忌话题等对主持人有用的详细信息。 </Tip> ## 构建宾客信息工具 我们将创建 Alfred 在晚会期间快速检索宾客信息的定制工具,分三步实现: 1. 加载并预处理数据集 2. 创建检索工具 3. 工具与 Alfred 集成 ### 步骤一:加载并预处理数据集 首先将原始宾客数据转换为适合检索的格式: <hfoptions id="agents-frameworks"> <hfoption id="smolagents"> ```python import datasets from langchain_core.documents import Document # 加载数据集 guest_dataset = datasets.load_dataset("agents-course/unit3-invitees", split="train") # 转换为 Document 对象 docs = [ Document( page_content="\n".join([ f"Name: {guest['name']}", f"Relation: {guest['relation']}", f"Description: {guest['description']}", f"Email: {guest['email']}" ]), metadata={"name": guest["name"]} ) for guest in guest_dataset ] ``` </hfoption> <hfoption id="llama-index"> 我们将使用 Hugging Face `datasets` 库来加载数据集并将其转换为来自 `llama_index.core.schema` 模块的 `Document` 对象列表。 ```python import datasets from llama_index.core.schema import Document # 加载数据集 guest_dataset = datasets.load_dataset("agents-course/unit3-invitees", split="train") # 转换为 Document 对象 docs = [ Document( text="\n".join([ f"Name: {guest_dataset['name'][i]}", f"Relation: {guest_dataset['relation'][i]}", f"Description: {guest_dataset['description'][i]}", f"Email: {guest_dataset['email'][i]}" ]), metadata={"name": guest_dataset['name'][i]} ) for i in range(len(guest_dataset)) ] ``` </hfoption> <hfoption id="langgraph"> 我们将使用 Hugging Face `datasets` 库来加载数据集并将其转换为来自 `langchain.docstore.document` 模块的 `Document` 对象列表。 ```python import datasets from langchain_core.documents import Document # 加载数据集 guest_dataset = datasets.load_dataset("agents-course/unit3-invitees", split="train") # 转换为 Document 对象 docs = [ Document( page_content="\n".join([ f"Name: {guest['name']}", f"Relation: {guest['relation']}", f"Description: {guest['description']}", f"Email: {guest['email']}" ]), metadata={"name": guest["name"]} ) for guest in guest_dataset ] ``` </hfoption> </hfoptions> 在上面的代码中,我们: - 加载数据集 - 将每条房客记录转换为包含格式化内容的 “Document” 对象 - 将 “Document” 对象存储在列表中 这意味着我们已经准备好所有数据,可以开始配置检索功能了。 ### 步骤 2:创建检索工具 现在,让我们创建一个自定义工具,Alfred 可以使用它来搜索房客信息。 <hfoptions id="agents-frameworks"> <hfoption id="smolagents"> 我们将使用“langchain_community.retrievers”模块中的“BM25Retriever”来创建一个检索工具。 <Tip> <code>BM25Retriever</code> 是检索的一个很好的起点,但对于更高级的语义搜索,您可以考虑使用基于嵌入的检索器,例如来自 <a href="https://www.sbert.net/">sentence-transformers</a> 的检索器。 </Tip> ```python from smolagents import Tool from langchain_community.retrievers import BM25Retriever class GuestInfoRetrieverTool(Tool): name = "guest_info_retriever" description = "Retrieves detailed information about gala guests based on their name or relation." inputs = { "query": { "type": "string", "description": "The name or relation of the guest you want information about." } } output_type = "string" def __init__(self, docs): self.is_initialized = False self.retriever = BM25Retriever.from_documents(docs) def forward(self, query: str): results = self.retriever.get_relevant_documents(query) if results: return "\n\n".join([doc.page_content for doc in results[:3]]) else: return "No matching guest information found." # 初始化工具 guest_info_tool = GuestInfoRetrieverTool(docs) ``` 让我们逐步了解这个工具: - `name` 和 `description` 帮助智能体了解何时以及如何使用此工具 - `inputs` 定义工具所需的参数(在本例中为搜索查询) - 我们使用 `BM25Retriever`,这是一种强大的文本检索算法,不需要嵌入 - `forward` 方法处理查询并返回最相关的客人信息 </hfoption> <hfoption id="llama-index"> 我们将使用 `llama_index.retrievers.bm25` 模块中的 `BM25Retriever` 来创建一个检索工具。 <Tip> <code>BM25Retriever</code> 是一个很好的检索起点,但对于更高级的语义搜索,您可以考虑使用基于嵌入的检索器,例如 <a href="https://www.sbert.net/">sentence-transformers</a> 中的检索器。 </Tip> ```python from llama_index.core.tools import FunctionTool from llama_index.retrievers.bm25 import BM25Retriever bm25_retriever = BM25Retriever.from_defaults(nodes=docs) def get_guest_info_retriever(query: str) -> str: """Retrieves detailed information about gala guests based on their name or relation.""" results = bm25_retriever.retrieve(query) if results: return "\n\n".join([doc.text for doc in results[:3]]) else: return "No matching guest information found." # 初始化工具 guest_info_tool = FunctionTool.from_defaults(get_guest_info_retriever) ``` 让我们逐步了解这个工具。 - 文档字符串帮助智能体了解何时以及如何使用此工具 - 类型装饰器定义了工具所需的参数(在本例中为搜索查询) - 我们使用 `BM25Retriever`,这是一种强大的文本检索算法,不需要嵌入 - 该方法处理查询并返回最相关的客人信息 </hfoption> <hfoption id="langgraph"> 我们将使用 `langchain_community.retrievers` 模块中的 `BM25Retriever` 来创建一个检索工具。 <Tip> <code>BM25Retriever</code> 是一个很好的检索起点,但对于更高级的语义搜索,您可以考虑使用基于嵌入的检索器,例如 <a href="https://www.sbert.net/">sentence-transformers</a> 中的检索器。 </Tip> ```python from langchain_community.retrievers import BM25Retriever from langchain.tools import Tool bm25_retriever = BM25Retriever.from_documents(docs) def extract_text(query: str) -> str: """Retrieves detailed information about gala guests based on their name or relation.""" results = bm25_retriever.invoke(query) if results: return "\n\n".join([doc.page_content for doc in results[:3]]) else: return "No matching guest information found." guest_info_tool = Tool( name="guest_info_retriever", func=extract_text, description="Retrieves detailed information about gala guests based on their name or relation." ) ``` 让我们逐步了解这个工具。 - `name` 和 `description` 帮助智能体了解何时以及如何使用此工具。 - 类型装饰器定义了工具所需的参数(在本例中为搜索查询)。 - 我们使用 `BM25Retriever`,这是一种强大的文本检索算法,无需嵌入。 - 该方法处理查询并返回最相关的客人信息。 </hfoption> </hfoptions> ### 步骤 3:将工具与 Alfred 集成 最后,让我们创建智能体并为其配备自定义工具,将所有内容整合在一起: <hfoptions id="agents-frameworks"> <hfoption id="smolagents"> ```python from smolagents import CodeAgent, InferenceClientModel # 初始化 Hugging Face 模型 model = InferenceClientModel() # 使用宾客信息工具创建我们的晚会智能体 Alfred alfred = CodeAgent(tools=[guest_info_tool], model=model) # Example query Alfred might receive during the gala response = alfred.run("Tell me about our guest named 'Lady Ada Lovelace'.") print("🎩 Alfred's Response:") print(response) ``` 预期输出: ``` 🎩 Alfred's Response: 根据我检索到的信息,艾达·洛夫莱斯夫人是一位受人尊敬的数学家和朋友。她因其在数学和计算机领域的开创性工作而闻名,并因其在查尔斯·巴贝奇的分析机方面的贡献而被誉为第一位计算机程序员。她的电子邮件地址是 ada.lovelace@example.com。 ``` 这最后一步具体做了什么: - 我们使用 `InferenceClientModel` 类初始化 Hugging Face 模型 - 我们将智能体 (Alfred) 创建为 `CodeAgent`,它可以执行 Python 代码来解决问题 - 我们让 Alfred 检索一位名叫“Lady Ada Lovelace”的客人的信息 </hfoption> <hfoption id="llama-index"> ```python from llama_index.core.agent.workflow import AgentWorkflow from llama_index.llms.huggingface_api import HuggingFaceInferenceAPI # 初始化 Hugging Face 模型 llm = HuggingFaceInferenceAPI(model_name="Qwen/Qwen2.5-Coder-32B-Instruct") # 使用宾客信息工具创建我们的晚会智能体 Alfred alfred = AgentWorkflow.from_tools_or_functions( [guest_info_tool], llm=llm, ) # Example query Alfred might receive during the gala response = await alfred.run("Tell me about our guest named 'Lady Ada Lovelace'.") print("🎩 Alfred's Response:") print(response) ``` 预期输出: ``` 🎩 Alfred's Response: Lady Ada Lovelace 是一位受人尊敬的数学家和朋友,因其在数学和计算领域的开创性工作而闻名。她因参与查尔斯·巴贝奇的分析机研究而被誉为第一位计算机程序员。她的邮箱是 ada.lovelace@example.com。 ``` 这最后一步具体做了什么: - 我们使用 `HuggingFaceInferenceAPI` 类初始化 Hugging Face 模型 - 我们将智能体 (Alfred) 创建为 `AgentWorkflow`,其中包含我们刚刚创建的工具 - 我们请求 Alfred 检索名为“Lady Ada Lovelace”的客人的信息 </hfoption> <hfoption id="langgraph"> ```python from typing import TypedDict, Annotated from langgraph.graph.message import add_messages from langchain_core.messages import AnyMessage, HumanMessage, AIMessage from langgraph.prebuilt import ToolNode from langgraph.graph import START, StateGraph from langgraph.prebuilt import tools_condition from langchain_huggingface import HuggingFaceEndpoint, ChatHuggingFace # 生成聊天界面,包括工具 llm = HuggingFaceEndpoint( repo_id="Qwen/Qwen2.5-Coder-32B-Instruct", huggingfacehub_api_token=HUGGINGFACEHUB_API_TOKEN, ) chat = ChatHuggingFace(llm=llm, verbose=True) tools = [guest_info_tool] chat_with_tools = chat.bind_tools(tools) # 生成 AgentState 和 Agent 图 class AgentState(TypedDict): messages: Annotated[list[AnyMessage], add_messages] def assistant(state: AgentState): return { "messages": [chat_with_tools.invoke(state["messages"])], } ## 构建流程图 builder = StateGraph(AgentState) # 定义节点:这些节点完成工作 builder.add_node("assistant", assistant) builder.add_node("tools", ToolNode(tools)) # 定义边:这些决定了控制流如何移动 builder.add_edge(START, "assistant") builder.add_conditional_edges( "assistant", # If the latest message requires a tool, route to tools # Otherwise, provide a direct response tools_condition, ) builder.add_edge("tools", "assistant") alfred = builder.compile() messages = [HumanMessage(content="Tell me about our guest named 'Lady Ada Lovelace'.")] response = alfred.invoke({"messages": messages}) print("🎩 Alfred's Response:") print(response['messages'][-1].content) ``` 预期输出: ``` 🎩 Alfred's Response: Lady Ada Lovelace 是一位受人尊敬的数学家和计算机领域的先驱,由于她在查尔斯·巴贝奇的分析机上所做的工作,她经常被誉为第一位计算机程序员。 ``` 这最后一步具体做了什么: - 我们使用 `HuggingFaceEndpoint` 类初始化 Hugging Face 模型。我们还生成了一个聊天界面并附加了工具。 - 我们将智能体 (Alfred) 创建为一个 `StateGraph`,它使用一条边连接两个节点(`Assistant` 和 `tools`)。 - 我们让 Alfred 检索一位名叫“Lady Ada Lovelace”的客人的信息。 </hfoption> </hfoptions> ## 互动示例 在晚会上,对话可能像这样进行: **你**:“Alfred,那位正在和大使说话的先生是谁?” **Alfred** *快速搜索嘉宾数据库* “先生,那是尼古拉·特斯拉博士。他是你大学时代的老朋友。他最近申请了一种新的无线能量传输系统的专利,很乐意和你讨论一下。不过别忘了,他对鸽子很感兴趣,所以这或许会成为一次很好的闲聊。” ```json { "name": "Dr. Nikola Tesla", "relation": "old friend from university days", "description": "Dr. Nikola Tesla is an old friend from your university days. He's recently patented a new wireless energy transmission system and would be delighted to discuss it with you. Just remember he's passionate about pigeons, so that might make for good small talk.", "email": "nikola.tesla@gmail.com" } ``` ## 更进一步 既然 Alfred 可以检索宾客信息,不妨考虑如何增强这个系统: 1. **改进检索器**,使用更复杂的算法,例如 [sentence-transformers](https://www.sbert.net/) 2. **实现对话记忆**,让 Alfred 记住之前的互动 3. **结合网页搜索**,获取陌生宾客的最新信息 4. **整合多个索引**,从经过验证的来源获取更完整的信息 现在,Alfred 已经完全有能力轻松处理宾客的咨询,确保您的晚会成为本世纪最精致、最令人愉悦的盛事! <提示> 尝试扩展检索工具,使其能够根据每位宾客的兴趣或背景返回对话开场白。您将如何修改该工具来实现这一点? 完成后,在 <code>retriever.py</code> 文件中实现您的宾客检索工具。 </提示>
agents-course/units/zh-CN/unit3/agentic-rag/invitees.mdx/0
{ "file_path": "agents-course/units/zh-CN/unit3/agentic-rag/invitees.mdx", "repo_id": "agents-course", "token_count": 8689 }
24
[workspace] members = [ "candle-core", "candle-datasets", "candle-examples", "candle-nn", "candle-pyo3", "candle-transformers", "candle-wasm-examples/*", "candle-wasm-tests", "tensor-tools", ] exclude = [ "candle-book", "candle-flash-attn", "candle-kernels", "candle-metal-kernels", "candle-onnx", ] resolver = "2" [workspace.package] version = "0.9.1" edition = "2021" description = "Minimalist ML framework." repository = "https://github.com/huggingface/candle" keywords = ["blas", "tensor", "machine-learning"] categories = ["science"] license = "MIT OR Apache-2.0" [workspace.dependencies] ab_glyph = "0.2.23" accelerate-src = { version = "0.3.2" } anyhow = { version = "1", features = ["backtrace"] } byteorder = "1.4.3" candle = { path = "./candle-core", package = "candle-core", version = "0.9.1" } candle-datasets = { path = "./candle-datasets", version = "0.9.1" } candle-flash-attn = { path = "./candle-flash-attn", version = "0.9.1" } candle-kernels = { path = "./candle-kernels", version = "0.9.1" } candle-metal-kernels = { path = "./candle-metal-kernels", version = "0.9.1" } candle-nn = { path = "./candle-nn", version = "0.9.1" } candle-onnx = { path = "./candle-onnx", version = "0.9.1" } candle-transformers = { path = "./candle-transformers", version = "0.9.1" } clap = { version = "4.2.4", features = ["derive"] } criterion = { version = "0.5.1", default-features = false } cudarc = { version = "0.16.3", features = [ "std", "cublas", "cublaslt", "curand", "driver", "nvrtc", "f16", "cuda-version-from-build-system", "dynamic-linking", ], default-features = false } fancy-regex = "0.13.0" gemm = { version = "0.17.0", features = ["wasm-simd128-enable"] } hf-hub = "0.4.1" half = { version = "2.5.0", features = [ "num-traits", "use-intrinsics", "rand_distr", ] } float8 = { git = "https://github.com/zackangelo/float8", branch = "cudarc_0_16", features = [ "num-traits", "rand_distr", ] } hound = "3.5.1" image = { version = "0.25.2", default-features = false, features = [ "jpeg", "png", ] } imageproc = { version = "0.24.0", default-features = false } intel-mkl-src = { version = "0.8.1", features = ["mkl-static-lp64-iomp"] } libc = { version = "0.2.147" } log = "0.4" memmap2 = { version = "0.9.3", features = ["stable_deref_trait"] } num_cpus = "1.15.0" num-traits = "0.2.15" parquet = { version = "51.0.0" } rand = "0.9.0" rand_distr = "0.5.1" rayon = "1.7.0" safetensors = "0.4.1" serde = { version = "1.0.171", features = ["derive"] } serde_plain = "1.0.2" serde_json = "1.0.99" thiserror = "1" tokenizers = { version = "0.21.0", default-features = false } tracing = "0.1.37" tracing-chrome = "0.7.1" tracing-subscriber = "0.3.7" ug = "0.4.0" ug-cuda = "0.4.0" ug-metal = "0.4.0" yoke = { version = "0.7.2", features = ["derive"] } zip = { version = "1.1.1", default-features = false } metal = { version = "0.27.0", features = ["mps"] } [profile.release-with-debug] inherits = "release" debug = true
candle/Cargo.toml/0
{ "file_path": "candle/Cargo.toml", "repo_id": "candle", "token_count": 1373 }
25
# Chapter 1
candle/candle-book/src/chapter_1.md/0
{ "file_path": "candle/candle-book/src/chapter_1.md", "repo_id": "candle", "token_count": 4 }
26
# Running a model In order to run an existing model, you will need to download and use existing weights. Most models are already available on https://huggingface.co/ in [`safetensors`](https://github.com/huggingface/safetensors) format. Let's get started by running an old model : `bert-base-uncased`.
candle/candle-book/src/inference/inference.md/0
{ "file_path": "candle/candle-book/src/inference/inference.md", "repo_id": "candle", "token_count": 88 }
27
use crate::benchmarks::{BenchDevice, BenchDeviceHandler}; use candle_core::{DType, Device, Tensor}; use criterion::{black_box, criterion_group, Criterion, Throughput}; use std::time::Instant; fn run(a: &Tensor, b: &Tensor) { a.matmul(&b.t().unwrap()).unwrap(); } fn run_bench(c: &mut Criterion, device: &Device) { let b = 1; let m = 1; let n = 2048; let k = 2048; let dtype = DType::F32; let lhs = Tensor::zeros((b, m, k), dtype, device).unwrap(); let rhs = Tensor::zeros((b, n, k), dtype, device).unwrap(); let flops = b * m * n * k; let mut group = c.benchmark_group(device.bench_name("matmul")); group.throughput(Throughput::Bytes(flops as u64)); group.bench_function("iter", move |b| { b.iter_custom(|iters| { let start = Instant::now(); for _i in 0..iters { run(black_box(&lhs), black_box(&rhs)); } device.sync().unwrap(); start.elapsed() }) }); group.finish(); } fn criterion_benchmark(c: &mut Criterion) { let handler = BenchDeviceHandler::new().unwrap(); for device in handler.devices { run_bench(c, &device); } } criterion_group!(benches, criterion_benchmark);
candle/candle-core/benches/benchmarks/matmul.rs/0
{ "file_path": "candle/candle-core/benches/benchmarks/matmul.rs", "repo_id": "candle", "token_count": 551 }
28
use super::{Cpu, CpuBF16, CpuF16}; #[cfg(target_arch = "x86")] use core::arch::x86::*; #[cfg(target_arch = "x86_64")] use core::arch::x86_64::*; use half::{bf16, f16}; pub struct CurrentCpu {} const STEP: usize = 32; const EPR: usize = 8; const ARR: usize = STEP / EPR; impl Cpu<ARR> for CurrentCpu { type Unit = __m256; type Array = [__m256; ARR]; const STEP: usize = STEP; const EPR: usize = EPR; fn n() -> usize { ARR } unsafe fn zero() -> Self::Unit { _mm256_setzero_ps() } unsafe fn zero_array() -> Self::Array { [Self::zero(); ARR] } unsafe fn from_f32(v: f32) -> Self::Unit { _mm256_set1_ps(v) } unsafe fn load(mem_addr: *const f32) -> Self::Unit { _mm256_loadu_ps(mem_addr) } unsafe fn vec_add(a: Self::Unit, b: Self::Unit) -> Self::Unit { _mm256_add_ps(a, b) } unsafe fn vec_fma(a: Self::Unit, b: Self::Unit, c: Self::Unit) -> Self::Unit { _mm256_add_ps(_mm256_mul_ps(b, c), a) } unsafe fn vec_store(mem_addr: *mut f32, a: Self::Unit) { _mm256_storeu_ps(mem_addr, a); } unsafe fn vec_reduce(mut x: Self::Array, y: *mut f32) { for i in 0..ARR / 2 { x[2 * i] = _mm256_add_ps(x[2 * i], x[2 * i + 1]); } for i in 0..ARR / 4 { x[4 * i] = _mm256_add_ps(x[4 * i], x[4 * i + 2]); } #[allow(clippy::reversed_empty_ranges)] for i in 0..ARR / 8 { x[8 * i] = _mm256_add_ps(x[8 * i], x[8 * i + 4]); } let t0 = _mm_add_ps(_mm256_castps256_ps128(x[0]), _mm256_extractf128_ps(x[0], 1)); let t1 = _mm_hadd_ps(t0, t0); *y = _mm_cvtss_f32(_mm_hadd_ps(t1, t1)); } } pub struct CurrentCpuF16 {} impl CpuF16<ARR> for CurrentCpuF16 { type Unit = __m256; type Array = [__m256; ARR]; const STEP: usize = STEP; const EPR: usize = EPR; fn n() -> usize { ARR } unsafe fn zero() -> Self::Unit { _mm256_setzero_ps() } unsafe fn zero_array() -> Self::Array { [Self::zero(); ARR] } unsafe fn from_f32(v: f32) -> Self::Unit { _mm256_set1_ps(v) } #[cfg(target_feature = "f16c")] unsafe fn load(mem_addr: *const f16) -> Self::Unit { _mm256_cvtph_ps(_mm_loadu_si128(mem_addr as *const __m128i)) } #[cfg(not(target_feature = "f16c"))] unsafe fn load(mem_addr: *const f16) -> Self::Unit { let mut tmp = [0.0f32; 8]; for i in 0..8 { tmp[i] = (*mem_addr.add(i)).to_f32(); } _mm256_loadu_ps(tmp.as_ptr()) } unsafe fn vec_add(a: Self::Unit, b: Self::Unit) -> Self::Unit { _mm256_add_ps(a, b) } unsafe fn vec_fma(a: Self::Unit, b: Self::Unit, c: Self::Unit) -> Self::Unit { _mm256_add_ps(_mm256_mul_ps(b, c), a) } #[cfg(target_feature = "f16c")] unsafe fn vec_store(mem_addr: *mut f16, a: Self::Unit) { _mm_storeu_si128(mem_addr as *mut __m128i, _mm256_cvtps_ph(a, 0)) } #[cfg(not(target_feature = "f16c"))] unsafe fn vec_store(mem_addr: *mut f16, a: Self::Unit) { let mut tmp = [0.0f32; 8]; _mm256_storeu_ps(tmp.as_mut_ptr(), a); for i in 0..8 { *mem_addr.add(i) = f16::from_f32(tmp[i]); } } unsafe fn vec_reduce(mut x: Self::Array, y: *mut f32) { let mut offset = ARR >> 1; for i in 0..offset { x[i] = _mm256_add_ps(x[i], x[offset + i]); } offset >>= 1; for i in 0..offset { x[i] = _mm256_add_ps(x[i], x[offset + i]); } offset >>= 1; for i in 0..offset { x[i] = _mm256_add_ps(x[i], x[offset + i]); } let t0 = _mm_add_ps(_mm256_castps256_ps128(x[0]), _mm256_extractf128_ps(x[0], 1)); let t1 = _mm_hadd_ps(t0, t0); *y = _mm_cvtss_f32(_mm_hadd_ps(t1, t1)); } } pub struct CurrentCpuBF16 {} impl CpuBF16<ARR> for CurrentCpuBF16 { type Unit = __m256; type Array = [__m256; ARR]; const STEP: usize = STEP; const EPR: usize = EPR; fn n() -> usize { ARR } unsafe fn zero() -> Self::Unit { _mm256_setzero_ps() } unsafe fn zero_array() -> Self::Array { [Self::zero(); ARR] } unsafe fn from_f32(v: f32) -> Self::Unit { _mm256_set1_ps(v) } #[cfg(target_feature = "f16c")] unsafe fn load(mem_addr: *const bf16) -> Self::Unit { _mm256_cvtph_ps(_mm_loadu_si128(mem_addr as *const __m128i)) } #[cfg(not(target_feature = "f16c"))] unsafe fn load(mem_addr: *const bf16) -> Self::Unit { let mut tmp = [0.0f32; 8]; for i in 0..8 { tmp[i] = (*mem_addr.add(i)).to_f32(); } _mm256_loadu_ps(tmp.as_ptr()) } unsafe fn vec_add(a: Self::Unit, b: Self::Unit) -> Self::Unit { _mm256_add_ps(a, b) } unsafe fn vec_fma(a: Self::Unit, b: Self::Unit, c: Self::Unit) -> Self::Unit { _mm256_add_ps(_mm256_mul_ps(b, c), a) } #[cfg(target_feature = "f16c")] unsafe fn vec_store(mem_addr: *mut bf16, a: Self::Unit) { _mm_storeu_si128(mem_addr as *mut __m128i, _mm256_cvtps_ph(a, 0)) } #[cfg(not(target_feature = "f16c"))] unsafe fn vec_store(mem_addr: *mut bf16, a: Self::Unit) { let mut tmp = [0.0f32; 8]; _mm256_storeu_ps(tmp.as_mut_ptr(), a); for i in 0..8 { *mem_addr.add(i) = bf16::from_f32(tmp[i]); } } unsafe fn vec_reduce(mut x: Self::Array, y: *mut f32) { let mut offset = ARR >> 1; for i in 0..offset { x[i] = _mm256_add_ps(x[i], x[offset + i]); } offset >>= 1; for i in 0..offset { x[i] = _mm256_add_ps(x[i], x[offset + i]); } offset >>= 1; for i in 0..offset { x[i] = _mm256_add_ps(x[i], x[offset + i]); } let t0 = _mm_add_ps(_mm256_castps256_ps128(x[0]), _mm256_extractf128_ps(x[0], 1)); let t1 = _mm_hadd_ps(t0, t0); *y = _mm_cvtss_f32(_mm_hadd_ps(t1, t1)); } }
candle/candle-core/src/cpu/avx.rs/0
{ "file_path": "candle/candle-core/src/cpu/avx.rs", "repo_id": "candle", "token_count": 3288 }
29
//! Types for elements that can be stored and manipulated using tensors. #![allow(clippy::redundant_closure_call)] use crate::backend::BackendStorage; use crate::cpu::kernels::VecOps; use crate::{CpuStorage, CpuStorageRef, Error, Result}; /// The different types of elements allowed in tensors. #[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] pub enum DType { // Floating-point 8 bits integer (4-bit exponent, 3-bit mantissa). F8E4M3, // Unsigned 8 bits integer. U8, // Unsigned 32 bits integer. U32, // Signed 64 bits integer. I64, // Brain floating-point using half precision (16 bits). BF16, // Floating-point using half precision (16 bits). F16, // Floating-point using single precision (32 bits). F32, // Floating-point using double precision (64 bits). F64, } #[derive(Debug, PartialEq, Eq)] pub struct DTypeParseError(String); impl std::fmt::Display for DTypeParseError { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { write!(f, "cannot parse '{}' as a dtype", self.0) } } impl std::error::Error for DTypeParseError {} impl std::str::FromStr for DType { type Err = DTypeParseError; fn from_str(s: &str) -> std::result::Result<Self, Self::Err> { match s { "u8" => Ok(Self::U8), "u32" => Ok(Self::U32), "i64" => Ok(Self::I64), "bf16" => Ok(Self::BF16), "f16" => Ok(Self::F16), "f32" => Ok(Self::F32), "f64" => Ok(Self::F64), "f8_e4m3" => Ok(Self::F8E4M3), _ => Err(DTypeParseError(s.to_string())), } } } impl DType { /// String representation for dtypes. pub fn as_str(&self) -> &'static str { match self { Self::U8 => "u8", Self::U32 => "u32", Self::I64 => "i64", Self::BF16 => "bf16", Self::F16 => "f16", Self::F32 => "f32", Self::F64 => "f64", Self::F8E4M3 => "f8_e4m3", } } /// The size used by each element in bytes, i.e. 1 for `U8`, 4 for `F32`. pub fn size_in_bytes(&self) -> usize { match self { Self::U8 => 1, Self::F8E4M3 => 1, Self::U32 => 4, Self::I64 => 8, Self::BF16 => 2, Self::F16 => 2, Self::F32 => 4, Self::F64 => 8, } } pub fn is_int(&self) -> bool { match self { Self::U8 | Self::U32 | Self::I64 => true, Self::BF16 | Self::F16 | Self::F32 | Self::F64 | Self::F8E4M3 => false, } } pub fn is_float(&self) -> bool { match self { Self::U8 | Self::U32 | Self::I64 => false, Self::BF16 | Self::F16 | Self::F32 | Self::F64 | Self::F8E4M3 => true, } } } pub trait WithDType: Sized + Copy + num_traits::NumAssign + std::cmp::PartialOrd + std::fmt::Display + 'static + Send + Sync + std::any::Any + crate::cpu::kernels::VecOps { const DTYPE: DType; fn from_f64(v: f64) -> Self; fn to_f64(self) -> f64; fn to_scalar(self) -> crate::scalar::Scalar; fn cpu_storage_ref(data: &[Self]) -> CpuStorageRef<'_>; fn to_cpu_storage_owned(data: Vec<Self>) -> CpuStorage; fn to_cpu_storage(data: &[Self]) -> CpuStorage { Self::to_cpu_storage_owned(data.to_vec()) } fn cpu_storage_as_slice(s: &CpuStorage) -> Result<&[Self]>; fn cpu_storage_data(s: CpuStorage) -> Result<Vec<Self>>; } macro_rules! with_dtype { ($ty:ty, $dtype:ident, $from_f64:expr, $to_f64:expr) => { impl WithDType for $ty { const DTYPE: DType = DType::$dtype; fn from_f64(v: f64) -> Self { $from_f64(v) } fn to_f64(self) -> f64 { $to_f64(self) } fn to_scalar(self) -> crate::scalar::Scalar { crate::scalar::Scalar::$dtype(self) } fn cpu_storage_ref(data: &[Self]) -> CpuStorageRef<'_> { CpuStorageRef::$dtype(data) } fn to_cpu_storage_owned(data: Vec<Self>) -> CpuStorage { CpuStorage::$dtype(data) } fn cpu_storage_data(s: CpuStorage) -> Result<Vec<Self>> { match s { CpuStorage::$dtype(data) => Ok(data), _ => Err(Error::UnexpectedDType { expected: DType::$dtype, got: s.dtype(), msg: "unexpected dtype", } .bt()), } } fn cpu_storage_as_slice(s: &CpuStorage) -> Result<&[Self]> { match s { CpuStorage::$dtype(data) => Ok(data), _ => Err(Error::UnexpectedDType { expected: DType::$dtype, got: s.dtype(), msg: "unexpected dtype", } .bt()), } } } }; } use float8::F8E4M3; use half::{bf16, f16}; with_dtype!(u8, U8, |v: f64| v as u8, |v: u8| v as f64); with_dtype!(u32, U32, |v: f64| v as u32, |v: u32| v as f64); with_dtype!(i64, I64, |v: f64| v as i64, |v: i64| v as f64); with_dtype!(f16, F16, f16::from_f64, f16::to_f64); with_dtype!(bf16, BF16, bf16::from_f64, bf16::to_f64); with_dtype!(f32, F32, |v: f64| v as f32, |v: f32| v as f64); with_dtype!(f64, F64, |v: f64| v, |v: f64| v); with_dtype!(F8E4M3, F8E4M3, |v: f64| F8E4M3::from_f64(v), |v: F8E4M3| v .to_f64()); impl VecOps for F8E4M3 { fn max(self, rhs: Self) -> Self { F8E4M3::max(self, rhs) } fn min(self, rhs: Self) -> Self { F8E4M3::min(self, rhs) } } pub trait IntDType: WithDType + num_traits::Bounded { fn is_true(&self) -> bool; fn as_usize(&self) -> usize; } impl IntDType for i64 { fn is_true(&self) -> bool { *self != 0 } fn as_usize(&self) -> usize { *self as usize } } impl IntDType for u32 { fn is_true(&self) -> bool { *self != 0 } fn as_usize(&self) -> usize { *self as usize } } impl IntDType for u8 { fn is_true(&self) -> bool { *self != 0 } fn as_usize(&self) -> usize { *self as usize } } pub trait FloatDType: WithDType {} impl FloatDType for f16 {} impl FloatDType for bf16 {} impl FloatDType for f32 {} impl FloatDType for f64 {} impl FloatDType for F8E4M3 {}
candle/candle-core/src/dtype.rs/0
{ "file_path": "candle/candle-core/src/dtype.rs", "repo_id": "candle", "token_count": 3549 }
30
#![allow(unused)] use super::GgmlDType; use crate::{Error, MetalDevice, MetalStorage, Result}; pub struct QMetalStorage { dtype: GgmlDType, device: MetalDevice, } impl QMetalStorage { pub fn zeros(_: &MetalDevice, _: usize, _: GgmlDType) -> Result<Self> { Err(Error::NotCompiledWithMetalSupport) } pub fn dtype(&self) -> GgmlDType { self.dtype } pub fn device(&self) -> &MetalDevice { &self.device } pub fn dequantize(&self, _elem_count: usize) -> Result<MetalStorage> { Err(Error::NotCompiledWithMetalSupport) } pub fn quantize(&mut self, _src: &MetalStorage) -> Result<()> { Err(Error::NotCompiledWithMetalSupport) } pub fn storage_size_in_bytes(&self) -> usize { 0 } pub fn fwd( &self, _self_shape: &crate::Shape, _storage: &MetalStorage, _layout: &crate::Layout, ) -> Result<(MetalStorage, crate::Shape)> { Err(Error::NotCompiledWithMetalSupport) } } pub fn load_quantized<T: super::GgmlType + Send + Sync + 'static>( _device: &MetalDevice, _data: &[T], ) -> Result<super::QStorage> { Err(Error::NotCompiledWithMetalSupport) }
candle/candle-core/src/quantized/dummy_metal.rs/0
{ "file_path": "candle/candle-core/src/quantized/dummy_metal.rs", "repo_id": "candle", "token_count": 522 }
31
//! Tensors are N-dimensional matrixes of elements using a single data type. #![allow(clippy::redundant_closure_call)] use crate::backend::{BackendDevice, BackendStorage}; use crate::op::{BackpropOp, BinaryOp, CmpOp, Op, ReduceOp, UnaryOp}; use crate::scalar::TensorOrScalar; use crate::shape::{Dim, Dims, ShapeWithOneHole}; use crate::{bail, storage::Storage, DType, Device, Error, Layout, Result, Shape}; use std::sync::{Arc, RwLock}; /// Unique identifier for tensors. #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)] pub struct TensorId(usize); impl TensorId { fn new() -> Self { // https://users.rust-lang.org/t/idiomatic-rust-way-to-generate-unique-id/33805 use std::sync::atomic; static COUNTER: atomic::AtomicUsize = atomic::AtomicUsize::new(1); Self(COUNTER.fetch_add(1, atomic::Ordering::Relaxed)) } } pub struct Tensor_ { id: TensorId, // As we provide inner mutability on the tensor content, the alternatives are: // - Using a mutex, this would have the highest cost when retrieving the storage but would // prevent errors when concurrent access takes place. Mutex would also be subject to // deadlocks for example using the current code if the same tensor is used twice by a single // binary op. // - Using a refcell unsafe cell would have some intermediary cost, borrow checking would be // verified dynamically, but the resulting tensors would not be send or sync. // - Using an unsafe cell would have the lowest cost but undefined behavior on concurrent // accesses. // Ideally, we would use Arc<Storage> for tensors on which we don't plan on modifying the data // and Arc<Mutex<Storage>> for tensors where the data could be modified, e.g. variables but // that's tricky to encode in the current setup. storage: Arc<RwLock<Storage>>, layout: Layout, op: BackpropOp, is_variable: bool, dtype: DType, device: Device, } impl AsRef<Tensor> for Tensor { fn as_ref(&self) -> &Tensor { self } } // Tensors are refcounted so that cloning is cheap when building the op graph. // Storages are also refcounted independently so that its possible to avoid // copying the storage for operations that only modify the shape or stride. #[derive(Clone)] /// The core struct for manipulating tensors. /// /// ```rust /// use candle_core::{Tensor, DType, Device}; /// /// let a = Tensor::arange(0f32, 6f32, &Device::Cpu)?.reshape((2, 3))?; /// let b = Tensor::arange(0f32, 12f32, &Device::Cpu)?.reshape((3, 4))?; /// /// let c = a.matmul(&b)?; /// # Ok::<(), candle_core::Error>(()) /// ``` /// /// Tensors are reference counted with [`Arc`] so cloning them is cheap. pub struct Tensor(Arc<Tensor_>); impl std::ops::Deref for Tensor { type Target = Tensor_; fn deref(&self) -> &Self::Target { self.0.as_ref() } } macro_rules! unary_op { ($fn_name:ident, $op_name:ident) => { pub fn $fn_name(&self) -> Result<Self> { let shape = self.shape(); if shape.elem_count() == 0 { return Ok(self.clone()); } let storage = self .storage() .unary_impl::<crate::op::$op_name>(self.layout())?; let op = BackpropOp::new1(self, |s| Op::Unary(s, UnaryOp::$op_name)); Ok(from_storage(storage, shape.clone(), op, false)) } }; } macro_rules! binary_op { ($fn_name:ident, $op_name:ident) => { pub fn $fn_name(&self, rhs: &Self) -> Result<Self> { let shape = self.same_shape_binary_op(rhs, stringify!($fn_name))?; if shape.elem_count() == 0 { return Ok(self.clone()); } let storage = self.storage().binary_impl::<crate::op::$op_name>( &*rhs.storage(), self.layout(), rhs.layout(), )?; let op = BackpropOp::new2(self, rhs, |t1, t2| Op::Binary(t1, t2, BinaryOp::$op_name)); Ok(from_storage(storage, shape.clone(), op, false)) } }; } macro_rules! binary_op_scalar { ($fn_name:ident, $op_name:ident) => { pub fn $fn_name<T: TensorOrScalar>(&self, rhs: T) -> Result<Self> { let rhs = match rhs.to_tensor_scalar()? { crate::scalar::TensorScalar::Tensor(rhs) => rhs, crate::scalar::TensorScalar::Scalar(rhs) => rhs .to_dtype(self.dtype())? .to_device(self.device())? .broadcast_as(self.shape())?, }; let shape = self.same_shape_binary_op(&rhs, stringify!($fn_name))?; if self.elem_count() == 0 { return Ok(self.clone()); } let storage = self.storage().binary_impl::<crate::op::$op_name>( &*rhs.storage(), self.layout(), rhs.layout(), )?; let op = BackpropOp::new2(self, &rhs, |t1, t2| Op::Binary(t1, t2, BinaryOp::$op_name)); Ok(from_storage(storage, shape.clone(), op, false)) } }; } macro_rules! broadcast_binary_op { ($fn_name:ident, $inner_fn_name:ident) => { pub fn $fn_name(&self, rhs: &Self) -> Result<Self> { let lhs = self; let shape = lhs .shape() .broadcast_shape_binary_op(rhs.shape(), stringify!($fn_name))?; let l_broadcast = shape != *lhs.shape(); let r_broadcast = shape != *rhs.shape(); match (l_broadcast, r_broadcast) { (true, true) => lhs .broadcast_as(&shape)? .$inner_fn_name(&rhs.broadcast_as(&shape)?), (false, true) => lhs.$inner_fn_name(&rhs.broadcast_as(&shape)?), (true, false) => lhs.broadcast_as(&shape)?.$inner_fn_name(rhs), (false, false) => lhs.$inner_fn_name(rhs), } } }; } /// Creates a fresh tensor structure based on a storage and a shape, this uses contiguous strides. pub(crate) fn from_storage<S: Into<Shape>>( storage: Storage, shape: S, op: BackpropOp, is_variable: bool, ) -> Tensor { let dtype = storage.dtype(); let device = storage.device(); let tensor_ = Tensor_ { id: TensorId::new(), storage: Arc::new(RwLock::new(storage)), layout: Layout::contiguous(shape), op, is_variable, dtype, device, }; Tensor(Arc::new(tensor_)) } impl Tensor { pub(crate) fn ones_impl<S: Into<Shape>>( shape: S, dtype: DType, device: &Device, is_variable: bool, ) -> Result<Self> { let none = BackpropOp::none(); let shape = shape.into(); let mut storage = unsafe { device.alloc_uninit(&shape, dtype)? }; let layout = Layout::contiguous(shape.clone()); storage.const_set(crate::scalar::Scalar::one(dtype), &layout)?; Ok(from_storage(storage, shape, none, is_variable)) } /// Creates a new tensor filled with ones. /// /// ```rust /// use candle_core::{Tensor, DType, Device}; /// let a = Tensor::ones((2, 3), DType::F32, &Device::Cpu)?; /// let b = Tensor::from_slice(&[1.0f32, 1.0, 1.0, 1.0, 1.0, 1.0], (2, 3), &Device::Cpu)?; /// // a == b /// # Ok::<(), candle_core::Error>(()) /// ``` pub fn ones<S: Into<Shape>>(shape: S, dtype: DType, device: &Device) -> Result<Self> { Self::ones_impl(shape, dtype, device, false) } pub fn const_set(&self, value: crate::scalar::Scalar) -> Result<()> { self.storage_mut().const_set(value, self.layout()) } pub fn zero_set(&self) -> Result<()> { self.const_set(crate::scalar::Scalar::zero(self.dtype())) } pub fn one_set(&self) -> Result<()> { self.const_set(crate::scalar::Scalar::one(self.dtype())) } /// Creates a new tensor filled with ones with same shape, dtype, and device as the other tensor. /// /// ```rust /// use candle_core::{Tensor, DType, Device}; /// let a = Tensor::zeros((2, 3), DType::F32, &Device::Cpu)?; /// let b = a.ones_like()?; /// // b == a + 1 /// # Ok::<(), candle_core::Error>(()) /// ``` pub fn ones_like(&self) -> Result<Self> { Tensor::ones(self.shape(), self.dtype(), self.device()) } // Do not expose outside of the crate, the `is_variable=true` case should only be accessed from // the variable module. pub(crate) fn zeros_impl<S: Into<Shape>>( shape: S, dtype: DType, device: &Device, is_variable: bool, ) -> Result<Self> { let none = BackpropOp::none(); let shape = shape.into(); let storage = device.zeros(&shape, dtype)?; Ok(from_storage(storage, shape, none, is_variable)) } /// Creates a new tensor filled with zeros. /// /// ```rust /// use candle_core::{Tensor, DType, Device}; /// let a = Tensor::zeros((2, 3), DType::F32, &Device::Cpu)?; /// let b = Tensor::from_slice(&[0.0f32, 0.0, 0.0, 0.0, 0.0, 0.0], (2, 3), &Device::Cpu)?; /// // a == b /// # Ok::<(), candle_core::Error>(()) /// ``` pub fn zeros<S: Into<Shape>>(shape: S, dtype: DType, device: &Device) -> Result<Self> { Self::zeros_impl(shape, dtype, device, false) } /// Creates a new tensor filled with zeros with same shape, dtype, and device as the other /// tensor. /// /// ```rust /// use candle_core::{Tensor, DType, Device}; /// let a = Tensor::zeros((2, 3), DType::F32, &Device::Cpu)?; /// let b = a.zeros_like()?; /// // b is on CPU f32. /// # Ok::<(), candle_core::Error>(()) /// ``` pub fn zeros_like(&self) -> Result<Self> { Tensor::zeros(self.shape(), self.dtype(), self.device()) } pub(crate) fn rand_impl<S: Into<Shape>, T: crate::FloatDType>( lo: T, up: T, s: S, device: &Device, is_variable: bool, ) -> Result<Self> { let s = s.into(); let storage = device.rand_uniform(lo, up, &s)?; let none = BackpropOp::none(); Ok(from_storage(storage, s, none, is_variable)) } pub(crate) fn rand_f64_impl<S: Into<Shape>>( lo: f64, up: f64, s: S, dtype: DType, device: &Device, is_variable: bool, ) -> Result<Self> { let s = s.into(); let storage = device.rand_uniform_f64(lo, up, &s, dtype)?; let none = BackpropOp::none(); Ok(from_storage(storage, s, none, is_variable)) } /// Creates a new tensor initialized with values sampled uniformly between `lo` and `up`. pub fn rand<S: Into<Shape>, T: crate::FloatDType>( lo: T, up: T, s: S, device: &Device, ) -> Result<Self> { Self::rand_impl(lo, up, s, device, false) } pub fn rand_like(&self, lo: f64, up: f64) -> Result<Self> { Tensor::rand_f64_impl(lo, up, self.shape(), self.dtype(), self.device(), false) } pub(crate) fn randn_impl<S: Into<Shape>, T: crate::FloatDType>( mean: T, std: T, s: S, device: &Device, is_variable: bool, ) -> Result<Self> { let s = s.into(); let storage = device.rand_normal(mean, std, &s)?; let none = BackpropOp::none(); Ok(from_storage(storage, s, none, is_variable)) } pub(crate) fn randn_f64_impl<S: Into<Shape>>( mean: f64, std: f64, s: S, dtype: DType, device: &Device, is_variable: bool, ) -> Result<Self> { let s = s.into(); let storage = device.rand_normal_f64(mean, std, &s, dtype)?; let none = BackpropOp::none(); Ok(from_storage(storage, s, none, is_variable)) } pub fn randn_like(&self, mean: f64, stdev: f64) -> Result<Self> { Tensor::randn_f64_impl( mean, stdev, self.shape(), self.dtype(), self.device(), false, ) } /// Creates a new tensor initialized with values sampled from a normal distribution with the /// specified `mean` and standard deviation `std`. pub fn randn<S: Into<Shape>, T: crate::FloatDType>( mean: T, std: T, s: S, device: &Device, ) -> Result<Self> { Self::randn_impl(mean, std, s, device, false) } pub(crate) fn new_impl<A: crate::device::NdArray>( array: A, shape: Shape, device: &Device, is_variable: bool, ) -> Result<Self> { let n: usize = shape.elem_count(); let buffer_size: usize = array.shape()?.elem_count(); if buffer_size != n { return Err(Error::ShapeMismatch { buffer_size, shape }.bt()); } let storage = device.storage(array)?; let none = BackpropOp::none(); Ok(from_storage(storage, shape, none, is_variable)) } /// Creates a new tensor on the specified device using the content and shape of the input. pub fn new<A: crate::device::NdArray>(array: A, device: &Device) -> Result<Self> { let shape = array.shape()?; Self::new_impl(array, shape, device, false) } /// Returns a new tensor with all the elements having the same specified value. ///```rust /// use candle_core::{Tensor, Device}; /// let a = Tensor::full(3.5, (2, 4), &Device::Cpu)?; /// /// assert_eq!(a.to_vec2::<f64>()?, &[ /// [3.5, 3.5, 3.5, 3.5], /// [3.5, 3.5, 3.5, 3.5], /// ]); /// # Ok::<(), candle_core::Error>(()) pub fn full<D: crate::WithDType, S: Into<Shape>>( value: D, shape: S, device: &Device, ) -> Result<Self> { let none = BackpropOp::none(); let shape = shape.into(); let mut storage = unsafe { device.alloc_uninit(&shape, D::DTYPE)? }; let layout = Layout::contiguous(shape.clone()); storage.const_set(value.to_scalar(), &layout)?; Ok(from_storage(storage, shape, none, false)) } /// Creates a new 1D tensor from an iterator. ///```rust /// use candle_core::{Tensor, Device}; /// let a = Tensor::from_iter( [1.0, 2.0, 3.0, 4.0].into_iter(), &Device::Cpu)?; /// /// assert_eq!(a.to_vec1::<f64>()?, &[1.0, 2.0, 3.0, 4.0]); /// # Ok::<(), candle_core::Error>(()) /// ``` pub fn from_iter<D: crate::WithDType>( iter: impl IntoIterator<Item = D>, device: &Device, ) -> Result<Self> { let data = iter.into_iter().collect::<Vec<_>>(); let len = data.len(); Self::from_vec_impl(data, len, device, false) } /// Creates a new 1D tensor with values from the interval `[start, end)` taken with a common /// difference `1` from `start`. ///```rust /// use candle_core::{Tensor, Device}; /// let a = Tensor::arange(2., 5., &Device::Cpu)?; /// /// assert_eq!(a.to_vec1::<f64>()?, &[2., 3., 4.]); /// # Ok::<(), candle_core::Error>(()) /// ``` pub fn arange<D: crate::WithDType>(start: D, end: D, device: &Device) -> Result<Self> { Self::arange_step(start, end, D::one(), device) } /// Creates a new 1D tensor with values from the interval `[start, end)` taken with a common /// difference `step` from `start`. ///```rust /// use candle_core::{Tensor, Device}; /// let a = Tensor::arange_step(2.0, 4.0, 0.5, &Device::Cpu)?; /// /// assert_eq!(a.to_vec1::<f64>()?, &[2.0, 2.5, 3.0, 3.5]); /// # Ok::<(), candle_core::Error>(()) /// ``` pub fn arange_step<D: crate::WithDType>( start: D, end: D, step: D, device: &Device, ) -> Result<Self> { if D::is_zero(&step) { bail!("step cannot be zero") } let mut data = vec![]; let mut current = start; if step >= D::zero() { while current < end { data.push(current); current += step; } } else { while current > end { data.push(current); current += step; } } let len = data.len(); Self::from_vec_impl(data, len, device, false) } pub(crate) fn from_vec_impl<S: ShapeWithOneHole, D: crate::WithDType>( data: Vec<D>, shape: S, device: &Device, is_variable: bool, ) -> Result<Self> { let shape = shape.into_shape(data.len())?; let storage = device.storage_owned(data)?; let none = BackpropOp::none(); Ok(from_storage(storage, shape, none, is_variable)) } /// Creates a new tensor initialized with values from the input vector. The number of elements /// in this vector must be the same as the number of elements defined by the shape. /// If the device is cpu, no data copy is made. ///```rust /// use candle_core::{Tensor, Device}; /// let a = Tensor::from_vec(vec!{1., 2., 3., 4., 5., 6.}, (2, 3), &Device::Cpu)?; /// /// assert_eq!(a.to_vec2::<f64>()?, &[ /// [1., 2., 3.], /// [4., 5., 6.] /// ]); /// # Ok::<(), candle_core::Error>(()) /// ``` pub fn from_vec<S: ShapeWithOneHole, D: crate::WithDType>( data: Vec<D>, shape: S, device: &Device, ) -> Result<Self> { Self::from_vec_impl(data, shape, device, false) } /// Creates a new tensor initialized with values from the input slice. The number of elements /// in this vector must be the same as the number of elements defined by the shape. ///```rust /// use candle_core::{Tensor, Device}; /// let values = vec![1., 2., 3., 4., 5., 6., 7., 8.]; /// let a = Tensor::from_slice(&values[1..7], (2, 3), &Device::Cpu)?; /// /// assert_eq!(a.to_vec2::<f64>()?, &[ /// [2., 3., 4.], /// [5., 6., 7.] /// ]); /// # Ok::<(), candle_core::Error>(()) /// ``` pub fn from_slice<S: ShapeWithOneHole, D: crate::WithDType>( array: &[D], shape: S, device: &Device, ) -> Result<Self> { let shape = shape.into_shape(array.len())?; let storage = device.storage_from_slice(array)?; let none = BackpropOp::none(); Ok(from_storage(storage, shape, none, false)) } pub(crate) fn same_shape_binary_op(&self, rhs: &Self, op: &'static str) -> Result<&Shape> { let lhs = self.shape(); let rhs = rhs.shape(); if lhs != rhs { Err(Error::ShapeMismatchBinaryOp { lhs: lhs.clone(), rhs: rhs.clone(), op, } .bt()) } else { Ok(lhs) } } /// Returns true if the computation graph should track this op, that is if it is /// a variable or if it has some variable as dependencies. pub fn track_op(&self) -> bool { self.is_variable || self.op.is_some() } // TODO: Also make an inplace version or a pre-allocated? This could be tricky // if this can create cycles in the compute graph. binary_op!(add, Add); binary_op!(mul, Mul); binary_op!(sub, Sub); binary_op!(div, Div); binary_op_scalar!(maximum, Maximum); binary_op_scalar!(minimum, Minimum); broadcast_binary_op!(broadcast_add, add); broadcast_binary_op!(broadcast_mul, mul); broadcast_binary_op!(broadcast_sub, sub); broadcast_binary_op!(broadcast_div, div); broadcast_binary_op!(broadcast_maximum, maximum); broadcast_binary_op!(broadcast_minimum, minimum); broadcast_binary_op!(broadcast_eq, eq); broadcast_binary_op!(broadcast_ne, ne); broadcast_binary_op!(broadcast_lt, lt); broadcast_binary_op!(broadcast_le, le); broadcast_binary_op!(broadcast_gt, gt); broadcast_binary_op!(broadcast_ge, ge); unary_op!(recip, Recip); unary_op!(neg, Neg); unary_op!(exp, Exp); unary_op!(log, Log); unary_op!(sin, Sin); unary_op!(cos, Cos); unary_op!(tanh, Tanh); unary_op!(abs, Abs); unary_op!(sqr, Sqr); unary_op!(sqrt, Sqrt); unary_op!(gelu, Gelu); unary_op!(gelu_erf, GeluErf); unary_op!(erf, Erf); unary_op!(relu, Relu); unary_op!(silu, Silu); unary_op!(ceil, Ceil); unary_op!(floor, Floor); unary_op!(round, Round); unary_op!(sign, Sign); /// Round element of the input tensor to the nearest integer. /// /// If the number of decimals is negative, it specifies the number of positions to the left of /// the decimal point. pub fn round_to(&self, decimals: i32) -> Result<Self> { let mult = 10f64.powi(decimals); (self * mult)?.round()? * (1f64 / mult) } /// Retrieves the single scalar value hold in the tensor. If the tensor contains multiple /// dimensions, an error is returned instead. pub fn to_scalar<S: crate::WithDType>(&self) -> Result<S> { if self.rank() != 0 { Err(Error::UnexpectedNumberOfDims { expected: 0, got: self.rank(), shape: self.shape().clone(), } .bt())? } let from_cpu_storage = |cpu_storage: &crate::CpuStorage| { let data = S::cpu_storage_as_slice(cpu_storage)?; Ok::<_, Error>(data[self.layout().start_offset()]) }; match &*self.storage() { Storage::Cpu(cpu_storage) => from_cpu_storage(cpu_storage), Storage::Cuda(storage) => from_cpu_storage(&storage.to_cpu_storage()?), Storage::Metal(storage) => from_cpu_storage(&storage.to_cpu_storage()?), } } /// An alias for `to_scalar`. pub fn to_vec0<S: crate::WithDType>(&self) -> Result<S> { self.to_scalar::<S>() } /// Repeat this tensor along the specified dimensions. pub fn repeat<S: Into<Shape>>(&self, shape: S) -> Result<Tensor> { // Similar to PyTorch, we extend the number of dimensions of self if needed. let repeats = shape.into(); let repeats = repeats.dims(); let mut inp = if self.rank() < repeats.len() { let shape = [vec![1; repeats.len() - self.rank()], self.dims().to_vec()].concat(); self.reshape(shape)? } else { self.clone() }; for (idx, &repeat) in repeats.iter().enumerate() { if repeat > 1 { inp = Tensor::cat(&vec![&inp; repeat], idx)? } } Ok(inp) } /// Creates grids of coordinates specified by the 1D inputs. /// /// # Arguments /// /// * `args` - A slice of 1D tensors. /// * `xy_indexing` - Whether to use xy indexing or ij indexing. If xy is selected, the /// first dimension corresponds to the cardinality of the second input and the second /// dimension corresponds to the cardinality of the first input. If ij is selected, the /// dimensions are in the same order as the cardinality of the inputs. /// /// # Examples /// /// ```rust /// use candle_core::{Tensor, Device, Shape}; /// let x = Tensor::new(&[1f32, 2., 3.], &Device::Cpu)?; /// let y = Tensor::new(&[4f32, 5., 6.], &Device::Cpu)?; /// /// let grids_xy = Tensor::meshgrid(&[&x, &y], true)?; /// /// assert_eq!(grids_xy.len(), 2); /// assert_eq!(grids_xy[0].dims(), &[3, 3]); /// /// assert_eq!(grids_xy[0].to_vec2::<f32>()?, &[[1., 2., 3.], [1., 2., 3.], [1., 2., 3.]]); /// assert_eq!(grids_xy[1].to_vec2::<f32>()?, &[[4., 4., 4.], [5., 5., 5.], [6., 6., 6.]]); /// /// let grids_ij = Tensor::meshgrid(&[&x, &y], false)?; /// /// assert_eq!(grids_ij[0].to_vec2::<f32>()?, &[[1., 1., 1.], [2., 2., 2.], [3., 3., 3.]]); /// assert_eq!(grids_ij[1].to_vec2::<f32>()?, &[[4., 5., 6.], [4., 5., 6.], [4., 5., 6.]]); /// # Ok::<(), candle_core::Error>(()) /// ``` /// /// # Errors /// /// * Will return `Err` if `args` contains less than 2 tensors. /// pub fn meshgrid<A: AsRef<Tensor>>(args: &[A], xy_indexing: bool) -> Result<Vec<Self>> { if args.len() <= 1 { Err(Error::OpRequiresAtLeastTwoTensors { op: "meshgrid" }.bt())? } let args: Vec<_> = if xy_indexing { args.iter().rev().collect() } else { args.iter().collect() }; let mut shape = Vec::with_capacity(args.len()); for arg in args.iter() { shape.push(arg.as_ref().dims1()?) } let mut grids = Vec::with_capacity(args.len()); for idx in 0..args.len() { let mut ones = vec![1usize; args.len()]; ones[idx] = shape[idx]; let arg = args[idx].as_ref().reshape(ones)?; let mut repeats = shape.clone(); repeats[idx] = 1; let repeated_tensor = arg.repeat(repeats)?; grids.push(repeated_tensor); } if xy_indexing { grids.reverse(); } Ok(grids) } /// This operation multiplies the input tensor by `mul` then adds `add` and return the result. /// The input values `mul` and `add` are casted to the appropriate type so some rounding might /// be performed. /// /// ```rust /// use candle_core::{Tensor, Device}; /// let a = Tensor::new(&[[0f32, 1.], [2., 3.]], &Device::Cpu)?; /// let a = a.affine(4., -2.)?; /// assert_eq!(a.to_vec2::<f32>()?, &[[-2.0, 2.0], [6.0, 10.0]]); /// # Ok::<(), candle_core::Error>(()) /// ``` pub fn affine(&self, mul: f64, add: f64) -> Result<Self> { if self.elem_count() == 0 { return Ok(self.clone()); } let storage = self.storage().affine(self.layout(), mul, add)?; let op = BackpropOp::new1(self, |arg| Op::Affine { arg, mul, add }); Ok(from_storage(storage, self.shape(), op, false)) } /// Applies the Exponential Linear Unit (ELU) function on each element of the input tensor. pub fn elu(&self, alpha: f64) -> Result<Self> { if self.elem_count() == 0 { return Ok(self.clone()); } let storage = self.storage().elu(self.layout(), alpha)?; let op = BackpropOp::new1(self, |t| Op::Elu(t, alpha)); Ok(from_storage(storage, self.shape(), op, false)) } /// Raise the tensor to some float exponent `e`. pub fn powf(&self, e: f64) -> Result<Self> { if self.elem_count() == 0 { return Ok(self.clone()); } let storage = self.storage().powf(self.layout(), e)?; let op = BackpropOp::new1(self, |t| Op::Powf(t, e)); Ok(from_storage(storage, self.shape(), op, false)) } pub(crate) fn check_dim(&self, dim: usize, op: &'static str) -> Result<()> { if dim >= self.dims().len() { Err(Error::DimOutOfRange { shape: self.shape().clone(), dim: dim as i32, op, } .bt())? } else { Ok(()) } } /// Split a tensor into the specified number of chunks, this may return less chunks than /// specified. pub fn chunk<D: Dim>(&self, chunks: usize, dim: D) -> Result<Vec<Self>> { let dim = dim.to_index(self.shape(), "chunk")?; let size = self.dim(dim)?; if size < chunks { (0..size).map(|i| self.narrow(dim, i, 1)).collect() } else { let chunk_size = size / chunks; let cnt_additional = size % chunks; let mut tensors = vec![]; let mut sum_chunk_size = 0; for i in 0..chunks { let chunk_size = if i < cnt_additional { chunk_size + 1 } else { chunk_size }; let tensor = self.narrow(dim, sum_chunk_size, chunk_size)?; tensors.push(tensor); sum_chunk_size += chunk_size } Ok(tensors) } } /// Returns a new tensor that is a narrowed version of the input, the dimension `dim` /// ranges from `start` to `start + len`. /// ``` /// use candle_core::{Tensor, Device}; /// let a = Tensor::new(&[ /// [0f32, 1., 2.], /// [3. , 4., 5.], /// [6. , 7., 8.] /// ], &Device::Cpu)?; /// /// let b = a.narrow(0, 1, 2)?; /// assert_eq!(b.shape().dims(), &[2, 3]); /// assert_eq!(b.to_vec2::<f32>()?, &[ /// [3., 4., 5.], /// [6., 7., 8.] /// ]); /// /// let c = a.narrow(1, 1, 1)?; /// assert_eq!(c.shape().dims(), &[3, 1]); /// assert_eq!(c.to_vec2::<f32>()?, &[ /// [1.], /// [4.], /// [7.] /// ]); /// # Ok::<(), candle_core::Error>(()) /// ``` pub fn narrow<D: Dim>(&self, dim: D, start: usize, len: usize) -> Result<Self> { let dims = self.dims(); let dim = dim.to_index(self.shape(), "narrow")?; let err = |msg| { Err::<(), _>( Error::NarrowInvalidArgs { shape: self.shape().clone(), dim, start, len, msg, } .bt(), ) }; if start > dims[dim] { err("start > dim_len")? } if start.saturating_add(len) > dims[dim] { err("start + len > dim_len")? } if start == 0 && dims[dim] == len { Ok(self.clone()) } else { let op = BackpropOp::new1(self, |t| Op::Narrow(t, dim, start, len)); let layout = self.layout().narrow(dim, start, len)?; let tensor_ = Tensor_ { id: TensorId::new(), storage: self.storage.clone(), layout, op, is_variable: false, dtype: self.dtype, device: self.device.clone(), }; Ok(Tensor(Arc::new(tensor_))) } } fn squeeze_dims(self, dims: &[usize]) -> Result<Self> { match dims { [] => Ok(self), [i] => self.squeeze(*i), dims => { let dims = self .dims() .iter() .enumerate() .filter_map(|(dim_idx, &v)| { if dims.contains(&dim_idx) { None } else { Some(v) } }) .collect::<Vec<_>>(); self.reshape(dims) } } } fn reduce_impl<D: Dim>(&self, dim: D, keepdim: bool, op: ReduceOp) -> Result<Self> { let dim = dim.to_index(self.shape(), op.name())?; let storage = self.storage().reduce_op(op, self.layout(), &[dim])?; let mut dims = self.dims().to_vec(); dims[dim] = 1; let op = match op { ReduceOp::Sum | ReduceOp::Min | ReduceOp::Max => { BackpropOp::new1(self, |arg| Op::Reduce(arg, op, dims.to_vec())) } ReduceOp::ArgMin | ReduceOp::ArgMax => BackpropOp::none(), }; let res = from_storage(storage, dims, op, false); if keepdim { Ok(res) } else { res.squeeze_dims(&[dim]) } } fn sum_impl<D: Dims>(&self, sum_dims: D, keepdim: bool) -> Result<Self> { let sum_dims = sum_dims.to_indexes(self.shape(), "sum")?; let storage = self .storage() .reduce_op(ReduceOp::Sum, self.layout(), &sum_dims)?; let mut dims = self.dims().to_vec(); for &sum_dim in sum_dims.iter() { dims[sum_dim] = 1 } let op = BackpropOp::new1(self, |a| Op::Reduce(a, ReduceOp::Sum, dims.to_vec())); let sum = from_storage(storage, dims, op, false); if keepdim { Ok(sum) } else { sum.squeeze_dims(&sum_dims) } } /// Roll the tensor input along the given dimension. /// Elements that are shifted beyond the last position are re-introduced at the first position. /// /// ```rust /// # use candle_core::{Tensor, Device}; /// let tensor = Tensor::new(&[[0f32, 1.], [2., 3.], [4., 5.]], &Device::Cpu)?; /// let tensor = tensor.roll(1, 0)?; /// assert_eq!(tensor.to_vec2::<f32>()?, &[[4., 5.], [0., 1.], [2., 3.]]); /// let tensor = Tensor::new(&[[0f32, 1.], [2., 3.], [4., 5.]], &Device::Cpu)?; /// let tensor = tensor.roll(-1, 0)?; /// assert_eq!(tensor.to_vec2::<f32>()?, &[[2., 3.], [4., 5.], [0., 1.]]); /// # Ok::<(), candle_core::Error>(()) /// ``` pub fn roll<D>(&self, shift: i32, dim: D) -> Result<Self> where D: Dim + Clone, { let dim = dim.to_index(self.shape(), "roll")?; let dim_size = self.dim(dim)?; let shift = shift.rem_euclid(dim_size as i32) as usize; if shift == 0 { Ok(self.clone()) } else { let a = self.narrow(dim, 0, dim_size - shift)?; let b = self.narrow(dim, dim_size - shift, shift)?; Tensor::cat(&[&b, &a], dim) } } /// Returns the sum of all elements in the input tensor. The sum is performed over all the /// input dimensions. /// /// The resulting tensor has a shape that is similar to the shape of the input tensor, except /// that the number of elements for each dimension index in `sum_dims` is 1. /// /// ```rust /// use candle_core::{Tensor, Device}; /// let a = Tensor::new(&[[0f32, 1.], [2., 3.]], &Device::Cpu)?; /// let s = a.sum_keepdim(0)?; /// assert_eq!(s.to_vec2::<f32>()?, &[[2., 4.]]); /// let s = a.sum_keepdim(1)?; /// assert_eq!(s.to_vec2::<f32>()?, &[[1.], [5.]]); /// let s = a.sum_keepdim((0, 1))?; /// assert_eq!(s.to_vec2::<f32>()?, &[[6.]]); /// # Ok::<(), candle_core::Error>(()) /// ``` pub fn sum_keepdim<D: Dims>(&self, sum_dims: D) -> Result<Self> { self.sum_impl(sum_dims, true) } /// Returns the sum of all elements in the input tensor. The sum is performed over all the /// input dimensions and compared to `sum_keepdim` these dimensions are squeezed rather than /// kept. pub fn sum<D: Dims>(&self, sum_dims: D) -> Result<Self> { self.sum_impl(sum_dims, false) } /// Returns the mean of all elements in the input tensor. The mean is performed over all the /// input dimensions. /// /// The resulting tensor has a shape that is similar to the shape of the input tensor, except /// that the number of elements for each dimension index in `mean_dims` is 1. /// /// ```rust /// use candle_core::{Tensor, Device}; /// let a = Tensor::new(&[[0f32, 1.], [2., 3.]], &Device::Cpu)?; /// let s = a.mean_keepdim(0)?; /// assert_eq!(s.to_vec2::<f32>()?, &[[1., 2.]]); /// let s = a.mean_keepdim(1)?; /// assert_eq!(s.to_vec2::<f32>()?, &[[0.5], [2.5]]); /// let s = a.mean_keepdim((0, 1))?; /// assert_eq!(s.to_vec2::<f32>()?, &[[1.5]]); /// # Ok::<(), candle_core::Error>(()) /// ``` pub fn mean_keepdim<D: Dims>(&self, mean_dims: D) -> Result<Self> { let mean_dims = mean_dims.to_indexes(self.shape(), "mean-keepdim")?; let reduced_dim: usize = mean_dims.iter().map(|i| self.dims()[*i]).product(); let scale = 1f64 / (reduced_dim as f64); self.sum_impl(mean_dims, true)? * scale } /// Returns the mean of all elements in the input tensor. The mean is performed over all the /// input dimensions and compared to `mean_keepdim` these dimensions are squeezed rather than /// kept. pub fn mean<D: Dims>(&self, mean_dims: D) -> Result<Self> { let mean_dims = mean_dims.to_indexes(self.shape(), "mean")?; let reduced_dim: usize = mean_dims.iter().map(|i| self.dims()[*i]).product(); let scale = 1f64 / (reduced_dim as f64); self.sum_impl(mean_dims, false)? * scale } /// Returns the unbiased variance over the selected dimension. pub fn var_keepdim<D: Dim>(&self, dim: D) -> Result<Self> { let dim = dim.to_index(self.shape(), "var")?; let mean = self.mean_keepdim(dim)?; let squares = self.broadcast_sub(&mean)?.sqr()?; squares.sum_impl(dim, true)? / (self.dim(dim)? - 1) as f64 } /// Returns the unbiased variance over the selected dimension. pub fn var<D: Dim>(&self, dim: D) -> Result<Self> { let dim = dim.to_index(self.shape(), "var")?; self.var_keepdim(dim)?.squeeze(dim) } /// Gathers the maximum value across the selected dimension. The resulting shape has the same /// number of dimensions as the original tensor and the select dimension has a single element. pub fn max_keepdim<D: Dim>(&self, dim: D) -> Result<Self> { self.reduce_impl(dim, true, ReduceOp::Max) } /// Similar to `max_keepdim` but the target dimension is squeezed. pub fn max<D: Dim>(&self, dim: D) -> Result<Self> { self.reduce_impl(dim, false, ReduceOp::Max) } /// Gathers the minimum value across the selected dimension. The resulting shape has the same /// number of dimensions as the original tensor and the select dimension has a single element. pub fn min_keepdim<D: Dim>(&self, dim: D) -> Result<Self> { self.reduce_impl(dim, true, ReduceOp::Min) } /// Similar to `min_keepdim` but the target dimension is squeezed. pub fn min<D: Dim>(&self, dim: D) -> Result<Self> { self.reduce_impl(dim, false, ReduceOp::Min) } pub fn argmax_keepdim<D: Dim>(&self, dim: D) -> Result<Self> { self.reduce_impl(dim, true, ReduceOp::ArgMax) } /// Similar to `argmax_keepdim` but the target dimension is squeezed. pub fn argmax<D: Dim>(&self, dim: D) -> Result<Self> { self.reduce_impl(dim, false, ReduceOp::ArgMax) } pub fn argmin_keepdim<D: Dim>(&self, dim: D) -> Result<Self> { self.reduce_impl(dim, true, ReduceOp::ArgMin) } /// Similar to `argmin_keepdim` but the target dimension is squeezed. pub fn argmin<D: Dim>(&self, dim: D) -> Result<Self> { self.reduce_impl(dim, false, ReduceOp::ArgMin) } /// Element-wise comparison between two tensors, e.g. equality, greater than, ... The actual /// comparison operation is specified by the `op` argument. /// /// The returned tensor has the same shape as the original tensors and uses `u8` elements. pub fn cmp<T: TensorOrScalar>(&self, rhs: T, op: CmpOp) -> Result<Self> { let rhs = match rhs.to_tensor_scalar()? { crate::scalar::TensorScalar::Tensor(rhs) => rhs, crate::scalar::TensorScalar::Scalar(rhs) => rhs .to_dtype(self.dtype())? .to_device(self.device())? .broadcast_as(self.shape())?, }; let shape = self.same_shape_binary_op(&rhs, "cmp")?; let storage = self .storage() .cmp(op, &rhs.storage(), self.layout(), rhs.layout())?; let op = BackpropOp::new1(self, |a| Op::Cmp(a, op)); Ok(from_storage(storage, shape.dims(), op, false)) } /// Element-wise equality. pub fn eq<T: TensorOrScalar>(&self, rhs: T) -> Result<Self> { self.cmp(rhs, CmpOp::Eq) } /// Element-wise non-equality. pub fn ne<T: TensorOrScalar>(&self, rhs: T) -> Result<Self> { self.cmp(rhs, CmpOp::Ne) } /// Element-wise comparison with lower-than, the returned tensor uses value 1 where `self < /// rhs` and 0 otherwise. pub fn lt<T: TensorOrScalar>(&self, rhs: T) -> Result<Self> { self.cmp(rhs, CmpOp::Lt) } /// Element-wise comparison with greater-than, the returned tensor uses value 1 where `self > /// rhs` and 0 otherwise. pub fn gt<T: TensorOrScalar>(&self, rhs: T) -> Result<Self> { self.cmp(rhs, CmpOp::Gt) } /// Element-wise comparison with greater-equal, the returned tensor uses value 1 where `self >= /// rhs` and 0 otherwise. pub fn ge<T: TensorOrScalar>(&self, rhs: T) -> Result<Self> { self.cmp(rhs, CmpOp::Ge) } /// Element-wise comparison with lower-equal, the returned tensor uses value 1 where `self <= /// rhs` and 0 otherwise. pub fn le<T: TensorOrScalar>(&self, rhs: T) -> Result<Self> { self.cmp(rhs, CmpOp::Le) } /// Clamp the tensor values to be between `min` and `max`. pub fn clamp<T1: TensorOrScalar, T2: TensorOrScalar>(&self, min: T1, max: T2) -> Result<Self> { self.maximum(min)?.minimum(max) } /// Interpolate the input tensor to the `target_size` size, taking the value of the nearest element. /// /// The input tensor should have three dimensions, `(batch, channels, l)`, the returned /// tensor also has three dimensions, `(batch, channels, target_size)`. pub fn interpolate1d(&self, target_size: usize) -> Result<Self> { let (n, c, _l) = self.dims3()?; let op = BackpropOp::new1(self, |arg| Op::UpsampleNearest1D { arg, target_size }); let storage = self .storage() .upsample_nearest1d(self.layout(), target_size)?; Ok(from_storage(storage, (n, c, target_size), op, false)) } /// Alias for `interpolate1d`. pub fn upsample_nearest1d(&self, target_size: usize) -> Result<Self> { self.interpolate1d(target_size) } /// Interpolate the input tensor to the `(target_h, target_w)` size, taking the value of the /// nearest element. /// /// The input tensor should have four dimensions, `(batch, channels, h, w)`, the returned /// tensor also has four dimensions, `(batch, channels, target_h, target_w)`. pub fn interpolate2d(&self, target_h: usize, target_w: usize) -> Result<Self> { let (n, c, _h, _w) = self.dims4()?; let op = BackpropOp::new1(self, |arg| Op::UpsampleNearest2D { arg, target_h, target_w, }); let storage = self .storage() .upsample_nearest2d(self.layout(), target_h, target_w)?; Ok(from_storage(storage, (n, c, target_h, target_w), op, false)) } /// Alias for `interpolate2d`. pub fn upsample_nearest2d(&self, target_h: usize, target_w: usize) -> Result<Self> { self.interpolate2d(target_h, target_w) } /// 2D average pooling over an input tensor with multiple channels. /// /// The input tensor should have four dimensions, `(batch, channels, h, w)`, the returned /// tensor also has four dimensions, `(batch, channels, h', w')`. The pooling is performed on /// the two last dimensions using a kernel of size `sz`. The returned element is the average /// value over the kernel window. pub fn avg_pool2d<T: crate::ToUsize2>(&self, sz: T) -> Result<Self> { let sz = sz.to_usize2(); self.avg_pool2d_with_stride(sz, sz) } /// Same as `avg_pool2d` but with a `stride` that can be set to a value different from the /// kernel size. pub fn avg_pool2d_with_stride<T: crate::ToUsize2>( &self, kernel_size: T, stride: T, ) -> Result<Self> { let kernel_size = kernel_size.to_usize2(); let stride = stride.to_usize2(); let (n, c, h, w) = self.dims4()?; if h < kernel_size.0 || w < kernel_size.1 { bail!("kernel-size {kernel_size:?} is larger than the input size {h},{w}") } // https://pytorch.org/docs/stable/generated/torch.nn.AvgPool2d.html#torch.nn.AvgPool2d let h_out = (h - kernel_size.0) / stride.0 + 1; let w_out = (w - kernel_size.1) / stride.1 + 1; let op = BackpropOp::new1(self, |arg| Op::AvgPool2D { arg, kernel_size, stride, }); let storage = self .storage() .avg_pool2d(self.layout(), kernel_size, stride)?; Ok(from_storage(storage, (n, c, h_out, w_out), op, false)) } /// 2D max pooling over an input tensor with multiple channels. /// /// The input tensor should have four dimensions, `(batch, channels, h, w)`, the returned /// tensor also has four dimensions, `(batch, channels, h', w')`. The pooling is performed on /// the two last dimensions using a kernel of size `sz`, the returned element is the maximum /// value over the kernel window. pub fn max_pool2d<T: crate::ToUsize2>(&self, sz: T) -> Result<Self> { let sz = sz.to_usize2(); self.max_pool2d_with_stride(sz, sz) } /// Same as `max_pool2d` but with a `stride` that can be set to a value different from the /// kernel size. pub fn max_pool2d_with_stride<T: crate::ToUsize2>( &self, kernel_size: T, stride: T, ) -> Result<Self> { let kernel_size = kernel_size.to_usize2(); let stride = stride.to_usize2(); let (n, c, h, w) = self.dims4()?; if h < kernel_size.0 || w < kernel_size.1 { bail!("kernel-size {kernel_size:?} is larger than the input size {h},{w}") } // https://pytorch.org/docs/stable/generated/torch.nn.MaxPool2d.html#torch.nn.MaxPool2d let h_out = (h - kernel_size.0) / stride.0 + 1; let w_out = (w - kernel_size.1) / stride.1 + 1; let op = BackpropOp::new1(self, |arg| Op::MaxPool2D { arg, kernel_size, stride, }); let storage = self .storage() .max_pool2d(self.layout(), kernel_size, stride)?; Ok(from_storage(storage, (n, c, h_out, w_out), op, false)) } /// Computes the dot product of two 1D tensors. /// /// - If inputs are 1D vectors (`[n]`), returns their scalar dot product. /// - Panics if shapes are not compatible /// - Not supported for integer dtypes /// /// # Example (vectors) /// ```rust /// use candle_core::{Tensor, Device}; /// let t1 = Tensor::new(&[1.0, 2.0, 3.0], &Device::Cpu)?; /// let t2 = Tensor::new(&[4.0, 5.0, 6.0], &Device::Cpu)?; /// let res = t1.dot(&t2)?; /// assert_eq!(res.to_scalar::<f64>()?, 32.); /// # Ok::<(), candle_core::Error>(()) /// ``` pub fn dot(&self, rhs: &Self) -> Result<Self> { if self.dims().len() != 1 || rhs.dims().len() != 1 { return Err(Error::ShapeMismatchBinaryOp { lhs: self.shape().clone(), rhs: rhs.shape().clone(), op: "dot", }); } (self * rhs).and_then(|ret| ret.sum_all()) } /// Computes the **Frobenius norm** (L2 norm of all elements) of the tensor. /// - Output is `sqrt(sum(x^2))`. /// - Always returns a scalar (`[]` shape). /// /// # Example /// ```rust /// use candle_core::{Tensor, Device}; /// let t = Tensor::new(&[[3., 4.], [0., 0.]], &Device::Cpu)?; /// let norm = t.norm()?; /// assert_eq!(norm.to_scalar::<f64>()?, 5.); /// # Ok::<(), candle_core::Error>(()) /// ``` pub fn norm(&self) -> Result<Self> { if self.dtype().is_int() { bail!("norm not supported for integer dtypes"); } self.sqr().and_then(|x| x.sum_all()).and_then(|x| x.sqrt()) } /// Performs strict matrix-vector multiplication (`[m, n] * [n] = [m]`). /// /// - If `self` is a matrix (`[m, n]`) and `rhs` is a vector (`[n]`), returns a vector (`[m]`). /// - **No broadcasting**: Panics if `self` is not 2D or if `rhs` is not 1D with matching size. /// /// # Example /// ```rust /// use candle_core::{Tensor, Device}; /// let mat = Tensor::new(&[[1., 2., 3.], [4., 5., 6.]], &Device::Cpu)?; /// let vec = Tensor::new(&[1., 1., 1.], &Device::Cpu)?; /// let res = mat.mv(&vec)?; /// assert_eq!(res.to_vec1::<f64>()?, [6., 15.]); /// # Ok::<(), candle_core::Error>(()) /// ``` pub fn mv(&self, rhs: &Self) -> Result<Self> { // Strict shape checks let lhs_dims = self.dims(); let rhs_dims = rhs.dims(); if lhs_dims.len() != 2 || rhs_dims.len() != 1 || lhs_dims[1] != rhs_dims[0] { return Err(Error::ShapeMismatchBinaryOp { lhs: self.shape().clone(), rhs: rhs.shape().clone(), op: "mv", }); } // Direct matmul after ensuring rhs is column vector self.matmul(&rhs.unsqueeze(1)?)?.squeeze(1) } /// Returns the matrix-multiplication of the input tensor with the other provided tensor. /// /// # Arguments /// /// * `self` - A tensor with dimensions `b1, b2, ..., bi, m, k`. /// * `rhs` - A tensor with dimensions `b1, b2, ..., bi, k, n`. /// /// The resulting tensor has dimensions `b1, b2, ..., bi, m, n`. pub fn matmul(&self, rhs: &Self) -> Result<Self> { let a_dims = self.shape().dims(); let b_dims = rhs.shape().dims(); let dim = a_dims.len(); if dim < 2 || b_dims.len() != dim { Err(Error::ShapeMismatchBinaryOp { lhs: self.shape().clone(), rhs: rhs.shape().clone(), op: "matmul", } .bt())? } let m = a_dims[dim - 2]; let k = a_dims[dim - 1]; let k2 = b_dims[dim - 2]; let n = b_dims[dim - 1]; let c_shape = Shape::from(&a_dims[..dim - 2]).extend(&[m, n]); if c_shape.elem_count() == 0 || k == 0 { return Tensor::zeros(c_shape, self.dtype(), self.device()); } let batching: usize = a_dims[..dim - 2].iter().product(); let batching_b: usize = b_dims[..dim - 2].iter().product(); if k != k2 || batching != batching_b { Err(Error::ShapeMismatchBinaryOp { lhs: self.shape().clone(), rhs: rhs.shape().clone(), op: "matmul", } .bt())? } let storage = self.storage().matmul( &rhs.storage(), (batching, m, n, k), self.layout(), rhs.layout(), )?; let op = BackpropOp::new2(self, rhs, Op::Matmul); Ok(from_storage(storage, c_shape, op, false)) } /// Matrix-multiplication with broadcasting support. /// /// Compared to `matmul` the two matrixes are allowed to have different dimensions as long as /// they are compatible for broadcast. E.g. if `self` has shape `(j, 1, n, k)` and `rhs` has /// shape `(l, k, m)`, the output will have shape `(j, l, n, m)`. pub fn broadcast_matmul(&self, rhs: &Self) -> Result<Self> { let lhs = self; let (l_shape, r_shape) = lhs.shape().broadcast_shape_matmul(rhs.shape())?; let l_broadcast = l_shape != *lhs.shape(); let r_broadcast = r_shape != *rhs.shape(); // TODO: Avoid concretising the broadcasted matrixes via contiguous. match (l_broadcast, r_broadcast) { (true, true) => lhs .broadcast_as(&l_shape)? .contiguous()? .matmul(&rhs.broadcast_as(&r_shape)?.contiguous()?), (false, true) => lhs.matmul(&rhs.broadcast_as(&r_shape)?.contiguous()?), (true, false) => lhs.broadcast_as(&l_shape)?.contiguous()?.matmul(rhs), (false, false) => lhs.matmul(rhs), } } /// Returns a tensor with the same shape as the input tensor, the values are taken from /// `on_true` if the input tensor value is not zero, and `on_false` at the positions where the /// input tensor is equal to zero. pub fn where_cond(&self, on_true: &Self, on_false: &Self) -> Result<Self> { let _shap = self.same_shape_binary_op(on_true, "where_cond")?; let shape = self.same_shape_binary_op(on_false, "where_cond")?; let storage = self.storage().where_cond( self.layout(), &on_true.storage(), on_true.layout(), &on_false.storage(), on_false.layout(), )?; let op = BackpropOp::new3(self, on_true, on_false, Op::WhereCond); Ok(from_storage(storage, shape, op, false)) } /// Returns a tensor with the values from the `self` tensor at the index corresponding to the /// values hold in the `ids` tensor. /// /// # Arguments /// /// * `self` - A tensor with dimensions `v, h`. /// * `ids` - A tensor with dimensions `s` and with integer values between 0 and v (exclusive). /// /// The resulting tensor has dimensions `s, h`. `s` is called the sequence length, `v` the /// vocabulary size, and `h` the hidden size. /// /// ```rust /// use candle_core::{Tensor, Device}; /// let values = Tensor::new(&[[0f32, 1.], [2., 3.], [4., 5.]], &Device::Cpu)?; /// let ids = Tensor::new(&[2u32, 1u32, 2u32], &Device::Cpu)?; /// let emb = values.embedding(&ids)?; /// assert_eq!(emb.to_vec2::<f32>()?, &[[4., 5.], [2., 3.], [4., 5.]]); /// # Ok::<(), candle_core::Error>(()) /// ``` pub fn embedding(&self, ids: &Self) -> Result<Self> { if self.rank() != 2 || ids.rank() != 1 { Err(Error::ShapeMismatchBinaryOp { lhs: self.shape().clone(), rhs: ids.shape().clone(), op: "embedding", } .bt())? } self.index_select(ids, 0) } fn scatter_checks(&self, indexes: &Self, source: &Self, dim: usize) -> Result<()> { let source_dims = source.dims(); let self_dims = self.dims(); let mismatch = if source_dims.len() != self_dims.len() { true } else { let mut mismatch = false; for (i, (&d1, &d2)) in self_dims.iter().zip(source_dims.iter()).enumerate() { if i != dim && d1 != d2 { mismatch = true; break; } } mismatch }; if mismatch { Err(Error::ShapeMismatchBinaryOp { op: "scatter (self, src)", lhs: self.shape().clone(), rhs: source.shape().clone(), } .bt())? } if indexes.dims() != source.dims() { Err(Error::ShapeMismatchBinaryOp { op: "scatter (indexes, src)", lhs: indexes.shape().clone(), rhs: source.shape().clone(), } .bt())? } Ok(()) } pub fn scatter<D: Dim>(&self, indexes: &Self, source: &Self, dim: D) -> Result<Self> { let dim = dim.to_index(self.shape(), "scatter")?; self.scatter_checks(indexes, source, dim)?; let shape = self.shape(); let mut storage = unsafe { self.device().alloc_uninit(shape, self.dtype())? }; self.storage() .copy_strided_src(&mut storage, 0, self.layout())?; let layout = Layout::contiguous(shape); storage.scatter_set( &layout, &indexes.storage(), indexes.layout(), &source.storage(), source.layout(), dim, )?; let op = BackpropOp::new3(self, indexes, source, |t1, t2, t3| { Op::Scatter(t1, t2, t3, dim) }); Ok(from_storage(storage, self.shape(), op, false)) } pub fn scatter_set<D: Dim>(&self, indexes: &Self, source: &Self, dim: D) -> Result<()> { if self.same_storage(source) { crate::bail!("cannot use slice_set when self and src share their storage") } let dim = dim.to_index(self.shape(), "scatter-set")?; self.scatter_checks(indexes, source, dim)?; self.storage_mut().scatter_set( self.layout(), &indexes.storage(), indexes.layout(), &source.storage(), source.layout(), dim, )?; Ok(()) } pub fn scatter_add<D: Dim>(&self, indexes: &Self, source: &Self, dim: D) -> Result<Self> { let dim = dim.to_index(self.shape(), "scatter-add")?; self.scatter_checks(indexes, source, dim)?; let shape = self.shape(); let mut storage = unsafe { self.device().alloc_uninit(shape, self.dtype())? }; self.storage() .copy_strided_src(&mut storage, 0, self.layout())?; let layout = Layout::contiguous(shape); storage.scatter_add( &layout, &indexes.storage(), indexes.layout(), &source.storage(), source.layout(), dim, )?; let op = BackpropOp::new3(self, indexes, source, |t1, t2, t3| { Op::ScatterAdd(t1, t2, t3, dim) }); Ok(from_storage(storage, self.shape(), op, false)) } pub fn scatter_add_set<D: Dim>(&self, indexes: &Self, source: &Self, dim: D) -> Result<()> { if self.same_storage(source) { crate::bail!("cannot use slice_set when self and src share their storage") } let dim = dim.to_index(self.shape(), "scatter-add-set")?; self.scatter_checks(indexes, source, dim)?; self.storage_mut().scatter_add( self.layout(), &indexes.storage(), indexes.layout(), &source.storage(), source.layout(), dim, )?; Ok(()) } /// Embeds the values of the `src` tensor into the `self` tensor on the specified dimension. pub fn slice_scatter<D: Dim>(&self, src: &Self, dim: D, start: usize) -> Result<Self> { let dim = dim.to_index(self.shape(), "slice-scatter")?; if dim == 0 { self.slice_scatter0(src, start) } else { // TODO: Maybe we want to add a more efficient implementation at some point. self.transpose(0, dim)? .slice_scatter0(&src.transpose(0, dim)?, start)? .transpose(0, dim) } } /// Embeds the values of the `src` tensor into the `self` tensor on the first dimension. pub fn slice_scatter0(&self, src: &Self, start: usize) -> Result<Self> { if self.dtype() != src.dtype() { Err(Error::DTypeMismatchBinaryOp { lhs: self.dtype(), rhs: src.dtype(), op: "slice-scatter", } .bt())? } if self.device().location() != src.device.location() { Err(Error::DeviceMismatchBinaryOp { lhs: self.device().location(), rhs: src.device().location(), op: "slice-scatter", } .bt())? } if self.rank() != src.rank() { Err(Error::UnexpectedNumberOfDims { expected: self.rank(), got: src.rank(), shape: src.shape().clone(), } .bt())? } let shape_ok = self.dims() .iter() .zip(src.dims().iter()) .enumerate() .all(|(dim_idx, (&d1, &d2))| { if 0 == dim_idx { d2 + start <= d1 } else { d1 == d2 } }); if !shape_ok { Err(Error::ShapeMismatchBinaryOp { op: "slice-scatter (self, src)", lhs: self.shape().clone(), rhs: src.shape().clone(), } .bt())? } let mut storage = unsafe { self.device().alloc_uninit(self.shape(), self.dtype())? }; self.storage() .copy_strided_src(&mut storage, 0, self.layout())?; let offset = start * src.dims()[1..].iter().product::<usize>(); src.storage() .copy_strided_src(&mut storage, offset, src.layout())?; let op = BackpropOp::new2(self, src, |t1, t2| Op::SliceScatter0(t1, t2, start)); Ok(from_storage(storage, self.shape(), op, false)) } /// Accumulate element from `source` at indexes `indexes` and add them to `self`. pub fn index_add<D: Dim>(&self, indexes: &Self, source: &Self, dim: D) -> Result<Self> { let dim = dim.to_index(self.shape(), "index-add")?; let source_dims = source.dims(); let self_dims = self.dims(); let mismatch = if source_dims.len() != self_dims.len() { true } else { let mut mismatch = false; for (i, (&d1, &d2)) in self_dims.iter().zip(source_dims.iter()).enumerate() { if i != dim && d1 != d2 { mismatch = true; break; } } mismatch }; if mismatch { Err(Error::ShapeMismatchBinaryOp { op: "index-add (self, source)", lhs: self.shape().clone(), rhs: source.shape().clone(), } .bt())? } // The number of element in indexes must match the dimension on which the add is // performed on the source tensor (and the index values from `indexes` are taken from // the target tensor self) let indexes_len = indexes.dims1()?; if source_dims[dim] != indexes_len { Err(Error::ShapeMismatchBinaryOp { op: "index-add (ids, source))", lhs: indexes.shape().clone(), rhs: source.shape().clone(), } .bt())? } let storage = self.storage().index_add( self.layout(), &indexes.storage(), indexes.layout(), &source.storage(), source.layout(), dim, )?; let op = BackpropOp::new3(self, indexes, source, |t1, t2, t3| { Op::IndexAdd(t1, t2, t3, dim) }); Ok(from_storage(storage, self.shape(), op, false)) } /// Gather values across the target dimension. /// /// # Arguments /// /// * `self` - The input tensor. /// * `indexes` - The indices of elements to gather, this should have same number of dimensions as `self` /// and indexes.dims()[d] <= self.dims()[d] for all dimensions d != dim /// * `dim` - the target dimension. /// /// The resulting tensor has the same shape as `indexes` and use values from `self` indexed on /// dimension `dim` by the values in `indexes`. pub fn gather<D: Dim>(&self, indexes: &Self, dim: D) -> Result<Self> { let dim = dim.to_index(self.shape(), "gather")?; let self_dims = self.dims(); let indexes_dims = indexes.dims(); let mismatch = if indexes_dims.len() != self_dims.len() { true } else { let mut mismatch = false; for (i, (&d1, &d2)) in self_dims.iter().zip(indexes_dims.iter()).enumerate() { if i != dim && d1 < d2 { mismatch = true; break; } } mismatch }; if mismatch { Err(Error::ShapeMismatchBinaryOp { op: "gather", lhs: self.shape().clone(), rhs: indexes.shape().clone(), } .bt())? } let storage = self.storage() .gather(self.layout(), &indexes.storage(), indexes.layout(), dim)?; let op = BackpropOp::new2(self, indexes, |t1, t2| Op::Gather(t1, t2, dim)); Ok(from_storage(storage, indexes.shape(), op, false)) } /// Select values for the input tensor at the target indexes across the specified dimension. /// /// The `indexes` is argument is an int tensor with a single dimension. /// The output has the same number of dimension as the `self` input. The target dimension of /// the output has length the length of `indexes` and the values are taken from `self` using /// the index from `indexes`. Other dimensions have the same number of elements as the input /// tensor. pub fn index_select<D: Dim>(&self, indexes: &Self, dim: D) -> Result<Self> { let dim = dim.to_index(self.shape(), "index-select")?; let indexes_len = match indexes.dims() { [l] => *l, _ => Err(Error::ShapeMismatchBinaryOp { lhs: self.shape().clone(), rhs: indexes.shape().clone(), op: "index-select", } .bt())?, }; let storage = self.storage().index_select( &indexes.storage(), self.layout(), indexes.layout(), dim, )?; let mut dims = self.dims().to_vec(); dims[dim] = indexes_len; let op = BackpropOp::new2(self, indexes, |t1, t2| Op::IndexSelect(t1, t2, dim)); Ok(from_storage(storage, dims, op, false)) } /// Returns an iterator over position of the elements in the storage when ranging over the /// index tuples in lexicographic order. pub fn strided_index(&self) -> crate::StridedIndex<'_> { self.layout.strided_index() } /// Similar to `strided_index` but returns the position of the start of each contiguous block /// as well as the length of the contiguous blocks. For a contiguous tensor, the index iterator /// will only return the start offset and the size would be the number of elements in the /// tensor. pub fn strided_blocks(&self) -> crate::StridedBlocks<'_> { self.layout.strided_blocks() } /// Returns the data contained in a 1D tensor as a vector of scalar values. pub fn to_vec1<S: crate::WithDType>(&self) -> Result<Vec<S>> { if self.rank() != 1 { Err(Error::UnexpectedNumberOfDims { expected: 1, got: self.rank(), shape: self.shape().clone(), } .bt())? } let from_cpu_storage = |cpu_storage: &crate::CpuStorage| { let data = S::cpu_storage_as_slice(cpu_storage)?; let data = match self.layout.contiguous_offsets() { Some((o1, o2)) => data[o1..o2].to_vec(), None => self.strided_index().map(|i| data[i]).collect(), }; Ok::<Vec<_>, Error>(data) }; match &*self.storage() { Storage::Cpu(storage) => from_cpu_storage(storage), Storage::Cuda(storage) => from_cpu_storage(&storage.to_cpu_storage()?), Storage::Metal(storage) => from_cpu_storage(&storage.to_cpu_storage()?), } } /// Returns the data contained in a 2D tensor as a vector of vector of scalar values. pub fn to_vec2<S: crate::WithDType>(&self) -> Result<Vec<Vec<S>>> { let (dim1, dim2) = self.dims2()?; let from_cpu_storage = |cpu_storage: &crate::CpuStorage| { let data = S::cpu_storage_as_slice(cpu_storage)?; let mut rows = vec![]; match self.layout.contiguous_offsets() { Some((o1, o2)) => { let data = &data[o1..o2]; for idx_row in 0..dim1 { rows.push(data[idx_row * dim2..(idx_row + 1) * dim2].to_vec()) } } None => { let mut src_index = self.strided_index(); for _idx_row in 0..dim1 { let row = (0..dim2).map(|_| data[src_index.next().unwrap()]).collect(); rows.push(row) } assert!(src_index.next().is_none()); } } Ok(rows) }; match &*self.storage() { Storage::Cpu(storage) => from_cpu_storage(storage), Storage::Cuda(storage) => from_cpu_storage(&storage.to_cpu_storage()?), Storage::Metal(storage) => from_cpu_storage(&storage.to_cpu_storage()?), } } /// Returns the data contained in a 3D tensor. pub fn to_vec3<S: crate::WithDType>(&self) -> Result<Vec<Vec<Vec<S>>>> { let (dim1, dim2, dim3) = self.dims3()?; let from_cpu_storage = |cpu_storage: &crate::CpuStorage| { let data = S::cpu_storage_as_slice(cpu_storage)?; let mut top_rows = vec![]; match self.layout.contiguous_offsets() { Some((o1, o2)) => { let data = &data[o1..o2]; let dim23 = dim2 * dim3; for idx1 in 0..dim1 { let data = &data[idx1 * dim23..(idx1 + 1) * dim23]; let mut rows = vec![]; for idx2 in 0..dim2 { rows.push(data[idx2 * dim3..(idx2 + 1) * dim3].to_vec()) } top_rows.push(rows); } } None => { let mut src_index = self.strided_index(); for _idx in 0..dim1 { let mut rows = vec![]; for _jdx in 0..dim2 { let row = (0..dim3).map(|_| data[src_index.next().unwrap()]).collect(); rows.push(row) } top_rows.push(rows); } assert!(src_index.next().is_none()); } } Ok(top_rows) }; match &*self.storage() { Storage::Cpu(storage) => from_cpu_storage(storage), Storage::Cuda(storage) => from_cpu_storage(&storage.to_cpu_storage()?), Storage::Metal(storage) => from_cpu_storage(&storage.to_cpu_storage()?), } } /// The dtype for the elements stored in the input tensor. pub fn dtype(&self) -> DType { self.dtype } /// The device on which the input tensor is located. pub fn device(&self) -> &Device { &self.device } /// The tensor shape, i.e. dimension sizes on each axis. pub fn shape(&self) -> &Shape { self.layout().shape() } /// The dimension size for this tensor on each axis. pub fn dims(&self) -> &[usize] { self.shape().dims() } /// The dimension size for a specified dimension index. pub fn dim<D: Dim>(&self, dim: D) -> Result<usize> { let dim = dim.to_index(self.shape(), "dim")?; Ok(self.dims()[dim]) } /// The layout of the input tensor, this stores both the shape of the tensor as well as the /// strides and the start offset to apply to the underlying storage. pub fn layout(&self) -> &Layout { &self.layout } pub fn stride(&self) -> &[usize] { self.layout.stride() } /// The number of dimensions for this tensor, 0 for a scalar tensor, 1 for a 1D tensor, etc. pub fn rank(&self) -> usize { self.shape().rank() } /// The number of elements stored in this tensor. pub fn elem_count(&self) -> usize { self.shape().elem_count() } /// The unique identifier for this tensor. pub fn id(&self) -> TensorId { self.id } /// Whether this tensor is a variable or not. A variable is a tensor for which gradient is /// tracked and on which backpropagation can be performed. pub fn is_variable(&self) -> bool { self.is_variable } pub(crate) fn op(&self) -> &Option<Op> { &self.op } /// Computes the max of all the elements in this tensor and returns a tensor holding this /// scalar with zero dimensions. /// /// ```rust /// use candle_core::{Tensor, Device}; /// let tensor = Tensor::new(&[[0f32, 1.], [2., 3.], [4., 5.]], &Device::Cpu)?; /// let tensor = tensor.max_all()?; /// assert_eq!(tensor.to_scalar::<f32>()?, 5.); /// # Ok::<(), candle_core::Error>(()) /// ``` pub fn max_all(&self) -> Result<Tensor> { if self.rank() == 0 { Ok(self.clone()) } else { self.flatten_all()?.max(0) } } /// Computes the min of all the elements in this tensor and returns a tensor holding this /// scalar with zero dimensions. /// /// ```rust /// use candle_core::{Tensor, Device}; /// let tensor = Tensor::new(&[[0f32, 1.], [2., 3.], [4., 5.]], &Device::Cpu)?; /// let tensor = tensor.min_all()?; /// assert_eq!(tensor.to_scalar::<f32>()?, 0.); /// # Ok::<(), candle_core::Error>(()) /// ``` pub fn min_all(&self) -> Result<Tensor> { if self.rank() == 0 { Ok(self.clone()) } else { self.flatten_all()?.min(0) } } /// Computes the sum of all the elements in this tensor and returns a tensor holding this /// scalar with zero dimensions. /// /// ```rust /// use candle_core::{Tensor, Device}; /// let tensor = Tensor::new(&[[0f32, 1.], [2., 3.], [4., 5.]], &Device::Cpu)?; /// let tensor = tensor.sum_all()?; /// assert_eq!(tensor.to_scalar::<f32>()?, 15.); /// # Ok::<(), candle_core::Error>(()) /// ``` pub fn sum_all(&self) -> Result<Tensor> { let dims: Vec<_> = (0..self.rank()).collect(); self.sum(dims) } pub fn mean_all(&self) -> Result<Tensor> { self.sum_all()? / self.elem_count() as f64 } fn flatten_<D1: Dim, D2: Dim>( &self, start_dim: Option<D1>, end_dim: Option<D2>, ) -> Result<Tensor> { if self.rank() == 0 { self.reshape(1) } else { let start_dim = match start_dim { None => 0, Some(dim) => dim.to_index(self.shape(), "flatten")?, }; let end_dim = match end_dim { None => self.rank() - 1, Some(dim) => dim.to_index(self.shape(), "flatten")?, }; if start_dim < end_dim { let dims = self.dims(); let mut dst_dims = dims[..start_dim].to_vec(); dst_dims.push(dims[start_dim..end_dim + 1].iter().product::<usize>()); if end_dim + 1 < dims.len() { dst_dims.extend(&dims[end_dim + 1..]); } self.reshape(dst_dims) } else { Ok(self.clone()) } } } /// Flattens the input tensor on the dimension indexes from `start_dim` to `end_dim` (both /// inclusive). pub fn flatten<D1: Dim, D2: Dim>(&self, start_dim: D1, end_dim: D2) -> Result<Tensor> { self.flatten_(Some(start_dim), Some(end_dim)) } /// Flattens the input tensor on the dimension indexes from `0` to `end_dim` (inclusive). pub fn flatten_to<D: Dim>(&self, end_dim: D) -> Result<Tensor> { self.flatten_(None::<usize>, Some(end_dim)) } /// Flattens the input tensor on the dimension indexes from `start_dim` (inclusive) to the last /// dimension. pub fn flatten_from<D: Dim>(&self, start_dim: D) -> Result<Tensor> { self.flatten_(Some(start_dim), None::<usize>) } /// Flattens the input tensor by reshaping it into a one dimension tensor. /// /// ```rust /// use candle_core::{Tensor, Device}; /// let tensor = Tensor::new(&[[0f32, 1.], [2., 3.], [4., 5.]], &Device::Cpu)?; /// let tensor = tensor.flatten_all()?; /// assert_eq!(tensor.to_vec1::<f32>()?, &[0., 1., 2., 3., 4., 5.]); /// # Ok::<(), candle_core::Error>(()) /// ``` pub fn flatten_all(&self) -> Result<Tensor> { self.flatten_(None::<usize>, None::<usize>) } /// Returns the sub-tensor fixing the index at `i` on the first dimension. /// /// ```rust /// use candle_core::{Tensor, Device}; /// let tensor = Tensor::new(&[[0f32, 1.], [2., 3.], [4., 5.]], &Device::Cpu)?; /// let t = tensor.get(0)?; /// assert_eq!(t.to_vec1::<f32>()?, &[0., 1.]); /// let t = tensor.get(1)?; /// assert_eq!(t.to_vec1::<f32>()?, &[2., 3.]); /// # Ok::<(), candle_core::Error>(()) /// ``` pub fn get(&self, i: usize) -> Result<Tensor> { let dims = self.dims(); if dims.is_empty() { Ok(self.clone()) } else { self.narrow(0, i, 1)?.reshape(&dims[1..]) } } /// Returns the sub-tensor fixing the index at `index` on the dimension `dim`. /// /// ```rust /// use candle_core::{Tensor, Device}; /// let tensor = Tensor::new(&[[0f32, 1.], [2., 3.], [4., 5.]], &Device::Cpu)?; /// let t = tensor.get_on_dim(1, 0)?; /// assert_eq!(t.to_vec1::<f32>()?, &[0., 2., 4.]); /// let t = tensor.get_on_dim(1, 1)?; /// assert_eq!(t.to_vec1::<f32>()?, &[1., 3., 5.]); /// let t = tensor.get_on_dim(0, 1)?; /// assert_eq!(t.to_vec1::<f32>()?, &[2., 3.]); /// # Ok::<(), candle_core::Error>(()) /// ``` pub fn get_on_dim<D: Dim>(&self, dim: D, index: usize) -> Result<Tensor> { let dim = dim.to_index(self.shape(), "get_on_dim")?; self.narrow(dim, index, 1)?.squeeze(dim) } /// Returns a tensor that is a transposed version of the input, the two last dimensions of the /// input are swapped. /// /// ```rust /// use candle_core::{Tensor, Device}; /// let tensor = Tensor::new(&[[0f32, 1.], [2., 3.], [4., 5.]], &Device::Cpu)?; /// let tensor = tensor.t()?; /// assert_eq!(tensor.to_vec2::<f32>()?, &[[0.0, 2.0, 4.0], [1.0, 3.0, 5.0]]); /// # Ok::<(), candle_core::Error>(()) /// ``` pub fn t(&self) -> Result<Tensor> { let rank = self.rank(); if rank < 2 { Err(Error::UnexpectedNumberOfDims { expected: 2, got: rank, shape: self.shape().clone(), } .bt())? } self.transpose(rank - 2, rank - 1) } /// Returns a tensor that is a transposed version of the input, the given dimensions are /// swapped. pub fn transpose<D1: Dim, D2: Dim>(&self, dim1: D1, dim2: D2) -> Result<Tensor> { let dim1 = dim1.to_index(self.shape(), "transpose")?; let dim2 = dim2.to_index(self.shape(), "transpose")?; if dim1 == dim2 { return Ok(self.clone()); } let op = BackpropOp::new1(self, |t| Op::Transpose(t, dim1, dim2)); let tensor_ = Tensor_ { id: TensorId::new(), storage: self.storage.clone(), layout: self.layout.transpose(dim1, dim2)?, op, is_variable: false, dtype: self.dtype, device: self.device.clone(), }; Ok(Tensor(Arc::new(tensor_))) } /// Returns a tensor with the same data as the input where the dimensions have been permuted. /// dims must be a permutation, i.e. include each dimension index exactly once. /// /// ```rust /// use candle_core::{Tensor, Device}; /// let tensor = Tensor::arange(0u32, 120u32, &Device::Cpu)?.reshape((2, 3, 4, 5))?; /// assert_eq!(tensor.dims(), &[2, 3, 4, 5]); /// let tensor = tensor.permute((2, 3, 1, 0))?; /// assert_eq!(tensor.dims(), &[4, 5, 3, 2]); /// # Ok::<(), candle_core::Error>(()) /// ``` pub fn permute<D: Dims>(&self, dims: D) -> Result<Tensor> { let dims = dims.to_indexes(self.shape(), "permute")?; // O(n^2) permutation check but these arrays are small. let is_permutation = dims.len() == self.rank() && (0..dims.len()).all(|i| dims.contains(&i)); if !is_permutation { bail!( "dimension mismatch in permute, tensor {:?}, dims: {:?}", self.dims(), dims ) } let op = BackpropOp::new1(self, |t| Op::Permute(t, dims.clone())); let tensor_ = Tensor_ { id: TensorId::new(), storage: self.storage.clone(), layout: self.layout.permute(&dims)?, op, is_variable: false, dtype: self.dtype, device: self.device.clone(), }; Ok(Tensor(Arc::new(tensor_))) } /// Returns true if the data is stored in a C contiguous (aka row major) way. pub fn is_contiguous(&self) -> bool { self.layout.is_contiguous() } /// Returns true if the data is stored in a Fortran contiguous (aka column major) way. pub fn is_fortran_contiguous(&self) -> bool { self.layout.is_fortran_contiguous() } /// Compared to clone, this copies the actual storage but may fail because of running out of /// memory. pub fn copy(&self) -> Result<Tensor> { let op = BackpropOp::new1(self, Op::Copy); let tensor_ = Tensor_ { id: TensorId::new(), storage: Arc::new(RwLock::new(self.storage().try_clone(self.layout())?)), layout: self.layout.clone(), op, is_variable: false, dtype: self.dtype, device: self.device.clone(), }; Ok(Tensor(Arc::new(tensor_))) } /// Returns a new tensor detached from the current graph, gradient are not propagated through /// this new node. The storage of this tensor is shared with the initial tensor. /// /// If the tensor is already detached from the computation graph, the same tensor is returned. pub fn detach(&self) -> Tensor { if self.op.is_none() && !self.is_variable { self.clone() } else { let tensor_ = Tensor_ { id: TensorId::new(), storage: self.storage.clone(), layout: self.layout.clone(), op: BackpropOp::none(), is_variable: false, dtype: self.dtype, device: self.device.clone(), }; Tensor(Arc::new(tensor_)) } } /// If the target device is the same as the tensor device, only a shallow copy is performed. pub fn to_device(&self, device: &Device) -> Result<Tensor> { if self.device().same_device(device) { Ok(self.clone()) } else { let storage = match (&*self.storage(), device) { (Storage::Cpu(storage), Device::Cuda(cuda)) => { Storage::Cuda(cuda.storage_from_cpu_storage(storage)?) } (Storage::Cpu(storage), Device::Metal(metal)) => { Storage::Metal(metal.storage_from_cpu_storage(storage)?) } (Storage::Cuda(storage), Device::Cpu) => Storage::Cpu(storage.to_cpu_storage()?), (Storage::Metal(storage), Device::Cpu) => Storage::Cpu(storage.to_cpu_storage()?), (Storage::Cuda(storage), Device::Cuda(cuda)) => { // TODO: Avoid passing through the cpu storage here, especially if the gpu ids // are the same. let cpu_storage = storage.to_cpu_storage()?; Storage::Cuda(cuda.storage_from_cpu_storage(&cpu_storage)?) } (Storage::Cpu(storage), Device::Cpu) => Storage::Cpu(storage.clone()), _ => { bail!( "not implemented yet, self.device: {:?}, device: {:?}", self.device(), device ) } }; let op = BackpropOp::new1(self, Op::ToDevice); let tensor_ = Tensor_ { id: TensorId::new(), storage: Arc::new(RwLock::new(storage)), layout: self.layout.clone(), op, is_variable: false, dtype: self.dtype, device: device.clone(), }; Ok(Tensor(Arc::new(tensor_))) } } /// Returns a new tensor duplicating data from the original tensor. New dimensions are inserted /// on the left. pub fn broadcast_left<S: Into<Shape>>(&self, left_shape: S) -> Result<Self> { let left_shape = left_shape.into(); let mut dims = left_shape.into_dims(); dims.extend(self.dims()); self.broadcast_as(dims) } /// Broadcast the input tensor to the target shape. This returns an error if the input shape is /// not compatible with the target shape. /// /// If the input shape is `i_1, i_2, ... i_k`, the target shape has to have `k` dimensions or /// more and shape `j_1, ..., j_l, t_1, t_2, ..., t_k`. The dimensions `j_1` to `j_l` can have /// any value, the dimension `t_a` must be equal to `i_a` if `i_a` is different from 1. If /// `i_a` is equal to 1, any value can be used. pub fn broadcast_as<S: Into<Shape>>(&self, shape: S) -> Result<Self> { let tensor_ = Tensor_ { id: TensorId::new(), storage: self.storage.clone(), layout: self.layout.broadcast_as(shape)?, op: BackpropOp::new1(self, Op::Broadcast), is_variable: false, dtype: self.dtype, device: self.device.clone(), }; Ok(Tensor(Arc::new(tensor_))) } /// An alias for broadcast_as. pub fn expand<S: Into<Shape>>(&self, shape: S) -> Result<Self> { self.broadcast_as(shape) } /// Casts the input tensor to the target `dtype`. /// /// ```rust /// use candle_core::{Tensor, Device}; /// let tensor = Tensor::new(3.14159265358979f64, &Device::Cpu)?; /// assert_eq!(tensor.to_scalar::<f64>()?, 3.14159265358979); /// let tensor = tensor.to_dtype(candle_core::DType::F32)?; /// assert_eq!(tensor.to_scalar::<f32>()?, 3.1415927); /// # Ok::<(), candle_core::Error>(()) /// ``` pub fn to_dtype(&self, dtype: DType) -> Result<Self> { if self.dtype() == dtype { Ok(self.clone()) } else { let shape = self.shape(); let storage = self.storage().to_dtype(self.layout(), dtype)?; let op = BackpropOp::new1(self, Op::ToDType); Ok(from_storage(storage, shape.clone(), op, false)) } } /// Returns a tensor that is in row major order. This is the same as the original tensor if it /// was already contiguous, otherwise a copy is triggered. pub fn contiguous(&self) -> Result<Tensor> { if self.is_contiguous() { Ok(self.clone()) } else { let shape = self.shape(); let mut storage = unsafe { self.device().alloc_uninit(shape, self.dtype())? }; self.storage() .copy_strided_src(&mut storage, 0, self.layout())?; let op = BackpropOp::new1(self, Op::Copy); Ok(from_storage(storage, shape.clone(), op, false)) } } /// Returns a tensor that is in row major order. This always makes a copy. pub fn force_contiguous(&self) -> Result<Tensor> { let shape = self.shape(); let mut storage = unsafe { self.device().alloc_uninit(shape, self.dtype())? }; self.storage() .copy_strided_src(&mut storage, 0, self.layout())?; let op = BackpropOp::new1(self, Op::Copy); Ok(from_storage(storage, shape.clone(), op, false)) } /// Create a variable based on the values currently stored in a tensor. The storage is always /// copied. pub(crate) fn make_var(&self) -> Result<Tensor> { let shape = self.shape().clone(); let mut storage = unsafe { self.device().alloc_uninit(&shape, self.dtype())? }; self.storage() .copy_strided_src(&mut storage, 0, self.layout())?; Ok(from_storage(storage, shape, BackpropOp::none(), true)) } /// Reshape returns a tensor with the target shape provided that the number of elements of the /// original tensor is the same. /// If the input tensor is contiguous, this is a view on the original data. Otherwise this uses /// a new storage and copies the data over, the returned tensor is always contiguous. /// /// The shape can be specified using a tuple of `usize` and at most one `()` in which case /// the behavior is the same as when using `-1` in PyTorch: this dimension size is adjusted so /// as to match the number of elements in the tensor. /// /// ```rust /// # use candle_core::{Tensor, DType, Device, D}; /// let a = Tensor::zeros((2, 3), DType::F32, &Device::Cpu)?; /// /// let c = a.reshape((1, 6))?; /// assert_eq!(c.shape().dims(), &[1, 6]); /// /// let c = a.reshape((3, 2))?; /// assert_eq!(c.shape().dims(), &[3, 2]); /// /// let c = a.reshape((2, (), 1))?; /// assert_eq!(c.shape().dims(), &[2, 3, 1]); /// /// # Ok::<(), candle_core::Error>(()) /// ``` pub fn reshape<S: ShapeWithOneHole>(&self, s: S) -> Result<Tensor> { let shape = s.into_shape(self.elem_count())?; if shape.elem_count() != self.elem_count() { return Err(Error::ShapeMismatchBinaryOp { lhs: self.shape().clone(), rhs: shape, op: "reshape", } .bt()); } let op = BackpropOp::new1(self, Op::Reshape); if self.is_contiguous() { let tensor_ = Tensor_ { id: TensorId::new(), storage: self.storage.clone(), layout: Layout::contiguous_with_offset(shape, self.layout.start_offset()), op, is_variable: false, dtype: self.dtype, device: self.device.clone(), }; Ok(Tensor(Arc::new(tensor_))) } else { let mut storage = unsafe { self.device().alloc_uninit(&shape, self.dtype())? }; self.storage() .copy_strided_src(&mut storage, 0, self.layout())?; Ok(from_storage(storage, shape, op, false)) } } /// Creates a new tensor with the specified dimension removed if its size was one. /// /// ```rust /// # use candle_core::{Tensor, DType, Device, D}; /// let a = Tensor::zeros((2, 3, 1), DType::F32, &Device::Cpu)?; /// /// let c = a.squeeze(2)?; /// assert_eq!(c.shape().dims(), &[2, 3]); /// /// let c = a.squeeze(D::Minus1)?; /// assert_eq!(c.shape().dims(), &[2, 3]); /// # Ok::<(), candle_core::Error>(()) /// ``` pub fn squeeze<D: Dim>(&self, dim: D) -> Result<Self> { // The PyTorch semantics are to return the same tensor if the target dimension // does not have a size of 1. let dims = self.dims(); let dim = dim.to_index(self.shape(), "squeeze")?; if dims[dim] == 1 { let mut dims = dims.to_vec(); let mut strides = self.stride().to_vec(); dims.remove(dim); strides.remove(dim); let tensor_ = Tensor_ { id: TensorId::new(), storage: self.storage.clone(), layout: Layout::new(dims.into(), strides, self.layout.start_offset()), op: BackpropOp::new1(self, Op::Reshape), is_variable: false, dtype: self.dtype, device: self.device.clone(), }; Ok(Tensor(Arc::new(tensor_))) } else { Ok(self.clone()) } } /// Creates a new tensor with a dimension of size one inserted at the specified position. /// /// ```rust /// # use candle_core::{Tensor, DType, Device, D}; /// let a = Tensor::zeros((2, 3), DType::F32, &Device::Cpu)?; /// /// let c = a.unsqueeze(0)?; /// assert_eq!(c.shape().dims(), &[1, 2, 3]); /// /// let c = a.unsqueeze(D::Minus1)?; /// assert_eq!(c.shape().dims(), &[2, 3, 1]); /// # Ok::<(), candle_core::Error>(()) /// ``` pub fn unsqueeze<D: Dim>(&self, dim: D) -> Result<Self> { let mut dims = self.dims().to_vec(); let mut strides = self.stride().to_vec(); let dim = dim.to_index_plus_one(self.shape(), "unsqueeze")?; // Cannot panic because to_index_plus_one already checks dimensions dims.insert(dim, 1); // Any stride would work here, but we pick one so as to maximize the probability to remain // C contiguous. let stride = if dim < strides.len() { strides[dim] } else { 1 }; strides.insert(dim, stride); let tensor_ = Tensor_ { id: TensorId::new(), storage: self.storage.clone(), layout: Layout::new(dims.into(), strides, self.layout.start_offset()), op: BackpropOp::new1(self, Op::Reshape), is_variable: false, dtype: self.dtype, device: self.device.clone(), }; Ok(Tensor(Arc::new(tensor_))) } /// Stacks two or more tensors along a particular dimension. /// /// All tensors must have the same rank, and the output has one additional rank /// /// ```rust /// # use candle_core::{Tensor, DType, Device}; /// let a = Tensor::zeros((2, 3), DType::F32, &Device::Cpu)?; /// let b = Tensor::zeros((2, 3), DType::F32, &Device::Cpu)?; /// /// let c = Tensor::stack(&[&a, &b], 0)?; /// assert_eq!(c.shape().dims(), &[2, 2, 3]); /// /// let c = Tensor::stack(&[&a, &b], 2)?; /// assert_eq!(c.shape().dims(), &[2, 3, 2]); /// # Ok::<(), candle_core::Error>(()) /// ``` pub fn stack<A: AsRef<Tensor>, D: Dim>(args: &[A], dim: D) -> Result<Self> { if args.is_empty() { Err(Error::OpRequiresAtLeastOneTensor { op: "stack" }.bt())? } let dim = dim.to_index_plus_one(args[0].as_ref().shape(), "stack")?; let args = args .iter() .map(|t| t.as_ref().unsqueeze(dim)) .collect::<Result<Vec<_>>>()?; Self::cat(&args, dim) } /// Pad the input tensor using 0s along dimension `dim`. This adds `left` elements before the /// input tensor values and `right` elements after. pub fn pad_with_zeros<D: Dim>(&self, dim: D, left: usize, right: usize) -> Result<Self> { if left == 0 && right == 0 { Ok(self.clone()) } else if left == 0 { let dim = dim.to_index(self.shape(), "pad_with_zeros")?; let mut dims = self.dims().to_vec(); dims[dim] = right; let right = Tensor::zeros(dims.as_slice(), self.dtype, self.device())?; Tensor::cat(&[self, &right], dim) } else if right == 0 { let dim = dim.to_index(self.shape(), "pad_with_zeros")?; let mut dims = self.dims().to_vec(); dims[dim] = left; let left = Tensor::zeros(dims.as_slice(), self.dtype, self.device())?; Tensor::cat(&[&left, self], dim) } else { let dim = dim.to_index(self.shape(), "pad_with_zeros")?; let mut dims = self.dims().to_vec(); dims[dim] = left; let left = Tensor::zeros(dims.as_slice(), self.dtype, self.device())?; dims[dim] = right; let right = Tensor::zeros(dims.as_slice(), self.dtype, self.device())?; Tensor::cat(&[&left, self, &right], dim) } } /// Pad the input tensor using same values along dimension `dim`. This adds `left` elements before the /// input tensor values and `right` elements after. pub fn pad_with_same<D: Dim>(&self, dim: D, left: usize, right: usize) -> Result<Self> { if left == 0 && right == 0 { Ok(self.clone()) } else if self.elem_count() == 0 { bail!("cannot use pad_with_same on an empty tensor") } else if left == 0 { let dim = dim.to_index(self.shape(), "pad_with_same")?; let r = self.narrow(dim, self.dim(dim)? - 1, 1)?; let mut v = vec![self]; for _ in 0..right { v.push(&r) } Tensor::cat(&v, dim) } else if right == 0 { let dim = dim.to_index(self.shape(), "pad_with_same")?; let l = self.narrow(dim, 0, 1)?; let mut v = vec![]; for _ in 0..left { v.push(&l) } v.push(self); Tensor::cat(&v, dim) } else { let dim = dim.to_index(self.shape(), "pad_with_same")?; let l = self.narrow(dim, 0, 1)?; let r = self.narrow(dim, self.dim(dim)? - 1, 1)?; let mut v = vec![]; for _ in 0..left { v.push(&l) } v.push(self); for _ in 0..right { v.push(&r) } Tensor::cat(&v, dim) } } /// Run the `forward` method of `m` on `self`. pub fn apply<M: crate::Module>(&self, m: &M) -> Result<Self> { m.forward(self) } /// Run the `forward` method of `m` on `self`. pub fn apply_t<M: crate::ModuleT>(&self, m: &M, train: bool) -> Result<Self> { m.forward_t(self, train) } pub(crate) fn storage(&self) -> std::sync::RwLockReadGuard<'_, Storage> { self.storage.read().unwrap() } pub(crate) fn storage_mut(&self) -> std::sync::RwLockWriteGuard<'_, Storage> { self.storage.write().unwrap() } // If we extend the visibility of this function to be usable outside of this crate, we should // make it unsafe. pub(crate) fn storage_mut_and_layout( &self, ) -> (std::sync::RwLockWriteGuard<'_, Storage>, &Layout) { let storage = self.storage.write().unwrap(); (storage, &self.layout) } /// The storage used by this tensor, together with the layout to use to access it safely. pub fn storage_and_layout(&self) -> (std::sync::RwLockReadGuard<'_, Storage>, &Layout) { let storage = self.storage.read().unwrap(); (storage, &self.layout) } pub(crate) fn same_storage(&self, rhs: &Self) -> bool { let lhs: &RwLock<Storage> = self.storage.as_ref(); let rhs: &RwLock<Storage> = rhs.storage.as_ref(); std::ptr::eq(lhs, rhs) } /// Normalize a 'relative' axis value: positive values are kept, negative /// values means counting the dimensions from the back. pub fn normalize_axis(&self, axis: i64) -> Result<usize> { let rank = self.rank() as i64; if rank <= axis { bail!("axis {axis} is too large, tensor rank {rank}") } else if 0 <= axis { Ok(axis as usize) } else { let naxis = rank + axis; if naxis < 0 { bail!("axis {axis} is too small, tensor rank {rank}") } Ok(naxis as usize) } } /// Returns a lower triangular matrix of ones of size n by n. pub fn tril2(n: usize, dtype: DType, device: &Device) -> Result<Self> { let t = Tensor::arange(0u32, n as u32, device)?; let t1 = t.reshape((1, n))?.broadcast_as((n, n))?; let t2 = t.reshape((n, 1))?.broadcast_as((n, n))?; t1.le(&t2)?.to_dtype(dtype) } /// Returns an upper triangular matrix of ones of size n by n. pub fn triu2(n: usize, dtype: DType, device: &Device) -> Result<Self> { let t = Tensor::arange(0u32, n as u32, device)?; let t1 = t.reshape((1, n))?.broadcast_as((n, n))?; let t2 = t.reshape((n, 1))?.broadcast_as((n, n))?; t1.ge(&t2)?.to_dtype(dtype) } /// Returns a matrix with a diagonal of ones of size n by n. pub fn eye(n: usize, dtype: DType, device: &Device) -> Result<Self> { let t = Tensor::arange(0u32, n as u32, device)?; let t1 = t.reshape((1, n))?.broadcast_as((n, n))?; let t2 = t.reshape((n, 1))?.broadcast_as((n, n))?; t1.eq(&t2)?.to_dtype(dtype) } /// Returns the cumulative sum of elements of the input tensor summed over the specified /// dimension. /// /// This operation is most efficient when dim is the last dimension of the tensor. pub fn cumsum<D: Dim>(&self, dim: D) -> Result<Self> { let dim = dim.to_index(self.shape(), "cumsum")?; let rank = self.rank(); if rank == 0 { return Ok(self.clone()); } let n_axis = self.dim(dim)?; let triu = Tensor::triu2(n_axis, self.dtype(), self.device())?; if rank == 1 { self.unsqueeze(0)?.matmul(&triu)?.squeeze(0) } else { let last = rank - 1; let t = self.transpose(dim, last)?; let t = t.broadcast_matmul(&triu)?; t.transpose(dim, last) } } /// Returns a copy of `self` where the values within `ranges` have been replaced with the /// content of `src`. pub fn slice_assign<D: std::ops::RangeBounds<usize>>( &self, ranges: &[D], src: &Tensor, ) -> Result<Self> { let src_dims = src.dims(); let self_dims = self.dims(); if self_dims.len() != src_dims.len() { bail!( "slice-assign requires input with the same rank {} <> {}", self_dims.len(), src_dims.len() ) } if self_dims.len() != ranges.len() { bail!( "slice-assign requires input with the same rank as there are ranges {} <> {}", self_dims.len(), ranges.len() ) } let mut src = src.clone(); let mut mask = Self::ones(src.shape(), DType::U8, src.device())?; for (i, range) in ranges.iter().enumerate() { let start_included = match range.start_bound() { std::ops::Bound::Unbounded => 0, std::ops::Bound::Included(v) => *v, std::ops::Bound::Excluded(v) => *v + 1, }; let end_excluded = match range.end_bound() { std::ops::Bound::Unbounded => self_dims[i], std::ops::Bound::Included(v) => *v + 1, std::ops::Bound::Excluded(v) => *v, }; if end_excluded <= start_included { bail!("slice-assign: empty range for dim {i}, {start_included} {end_excluded}") } if self_dims[i] < end_excluded { bail!( "slice-assign: upper bound is out of range for dim {i}, {end_excluded} {}", self_dims[i] ) } if end_excluded - start_included != src_dims[i] { bail!( "slice-assign: the range for dim {i} ({start_included}..{end_excluded}) does not match the size of src {}", src_dims[i] ) } src = src.pad_with_zeros(i, start_included, self_dims[i] - end_excluded)?; mask = mask.pad_with_zeros(i, start_included, self_dims[i] - end_excluded)? } mask.where_cond(/* on_true= */ &src, /* on_false= */ self) } /// Returns log(sum(exp(tensor), dim)). pub fn log_sum_exp<D: Dims>(&self, sum_dims: D) -> Result<Self> { let sum_dims = sum_dims.to_indexes(self.shape(), "log-sum-exp")?; if sum_dims.is_empty() { return Ok(self.clone()); } let max = sum_dims[1..] .iter() .try_fold(self.max_keepdim(sum_dims[0])?, |max, &dim| { max.max_keepdim(dim) })?; let exp = self.broadcast_sub(&max)?.exp()?; let sum = exp.sum(sum_dims.clone())?; sum.log()? + max.squeeze_dims(&sum_dims) } /// Pointwise pow operation. pub fn pow(&self, rhs: &Tensor) -> Result<Self> { rhs.mul(&self.log()?)?.exp() } /// Broadcasting version of `pow`. pub fn broadcast_pow(&self, rhs: &Tensor) -> Result<Self> { rhs.broadcast_mul(&self.log()?)?.exp() } /// Returns a new tensor with the order of elements reversed along the specified dimensions. /// This function makes a copy of the tensor’s data. /// /// ```rust /// # use candle_core::{Tensor, Device}; /// let t = Tensor::arange(0., 6., &Device::Cpu)?.reshape((2, 3))?; /// assert_eq!(t.to_vec2::<f64>()?, &[[0.0, 1.0, 2.0], [3.0, 4.0, 5.0]]); /// let t_flipped = t.flip(&[0])?; /// assert_eq!(t_flipped.to_vec2::<f64>()?, &[[3.0, 4.0, 5.0], [0.0, 1.0, 2.0]]); /// # Ok::<(), candle_core::Error>(()) /// ``` pub fn flip(&self, dims: &[usize]) -> Result<Tensor> { let mut result = self.clone(); for &dim in dims.iter() { let size = result.dim(dim)?; let indices: Vec<i64> = (0..size).rev().map(|x| x as i64).collect(); let indices_tensor = Tensor::from_vec(indices, (size,), result.device())?; result = result.index_select(&indices_tensor, dim)?; } Ok(result) } } macro_rules! bin_trait { ($trait:ident, $fn1:ident, $mul:expr, $add:expr) => { impl<B: std::borrow::Borrow<Tensor>> std::ops::$trait<B> for Tensor { type Output = Result<Tensor>; fn $fn1(self, rhs: B) -> Self::Output { Tensor::$fn1(&self, rhs.borrow()) } } impl<B: std::borrow::Borrow<Tensor>> std::ops::$trait<B> for &Tensor { type Output = Result<Tensor>; fn $fn1(self, rhs: B) -> Self::Output { Tensor::$fn1(&self, rhs.borrow()) } } impl<B: std::borrow::Borrow<Tensor>> std::ops::$trait<Tensor> for Result<B> { type Output = Result<Tensor>; fn $fn1(self, rhs: Tensor) -> Self::Output { Tensor::$fn1(self?.borrow(), &rhs) } } impl<B: std::borrow::Borrow<Tensor>> std::ops::$trait<&Tensor> for Result<B> { type Output = Result<Tensor>; fn $fn1(self, rhs: &Tensor) -> Self::Output { Tensor::$fn1(self?.borrow(), rhs) } } impl<B: std::borrow::Borrow<Tensor>> std::ops::$trait<Result<B>> for Tensor { type Output = Result<Tensor>; fn $fn1(self, rhs: Result<B>) -> Self::Output { Tensor::$fn1(&self, rhs?.borrow()) } } impl<B: std::borrow::Borrow<Tensor>> std::ops::$trait<Result<B>> for &Tensor { type Output = Result<Tensor>; fn $fn1(self, rhs: Result<B>) -> Self::Output { Tensor::$fn1(&self, rhs?.borrow()) } } impl std::ops::$trait<f64> for Tensor { type Output = Result<Tensor>; fn $fn1(self, rhs: f64) -> Self::Output { self.affine($mul(rhs), $add(rhs)) } } impl std::ops::$trait<f64> for &Tensor { type Output = Result<Tensor>; fn $fn1(self, rhs: f64) -> Self::Output { self.affine($mul(rhs), $add(rhs)) } } }; } bin_trait!(Add, add, |_| 1., |v| v); bin_trait!(Sub, sub, |_| 1., |v: f64| -v); bin_trait!(Mul, mul, |v| v, |_| 0.); bin_trait!(Div, div, |v| 1. / v, |_| 0.); impl std::ops::Add<Tensor> for f64 { type Output = Result<Tensor>; fn add(self, rhs: Tensor) -> Self::Output { rhs + self } } impl std::ops::Add<&Tensor> for f64 { type Output = Result<Tensor>; fn add(self, rhs: &Tensor) -> Self::Output { rhs + self } } impl std::ops::Mul<Tensor> for f64 { type Output = Result<Tensor>; fn mul(self, rhs: Tensor) -> Self::Output { rhs * self } } impl std::ops::Mul<&Tensor> for f64 { type Output = Result<Tensor>; fn mul(self, rhs: &Tensor) -> Self::Output { rhs * self } } impl std::ops::Sub<Tensor> for f64 { type Output = Result<Tensor>; fn sub(self, rhs: Tensor) -> Self::Output { rhs.affine(-1., self) } } impl std::ops::Sub<&Tensor> for f64 { type Output = Result<Tensor>; fn sub(self, rhs: &Tensor) -> Self::Output { rhs.affine(-1., self) } } impl std::ops::Div<Tensor> for f64 { type Output = Result<Tensor>; #[allow(clippy::suspicious_arithmetic_impl)] fn div(self, rhs: Tensor) -> Self::Output { rhs.recip()? * self } } impl std::ops::Div<&Tensor> for f64 { type Output = Result<Tensor>; #[allow(clippy::suspicious_arithmetic_impl)] fn div(self, rhs: &Tensor) -> Self::Output { rhs.recip()? * self } }
candle/candle-core/src/tensor.rs/0
{ "file_path": "candle/candle-core/src/tensor.rs", "repo_id": "candle", "token_count": 52099 }
32
/// Regression test for pth files not loading on Windows. #[test] fn test_pth() { let tensors = candle_core::pickle::PthTensors::new("tests/test.pt", None).unwrap(); tensors.get("test").unwrap().unwrap(); } #[test] fn test_pth_with_key() { let tensors = candle_core::pickle::PthTensors::new("tests/test_with_key.pt", Some("model_state_dict")) .unwrap(); tensors.get("test").unwrap().unwrap(); } #[test] fn test_pth_fortran_congiguous() { let tensors = candle_core::pickle::PthTensors::new("tests/fortran_tensor_3d.pth", None).unwrap(); let tensor = tensors.get("tensor_fortran").unwrap().unwrap(); assert_eq!(tensor.dims3().unwrap(), (2, 3, 4)); assert_eq!( tensor.to_vec3::<i64>().unwrap(), [ [[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]], [[13, 14, 15, 16], [17, 18, 19, 20], [21, 22, 23, 24]] ] ); }
candle/candle-core/tests/pth_tests.rs/0
{ "file_path": "candle/candle-core/tests/pth_tests.rs", "repo_id": "candle", "token_count": 440 }
33
//! Zalando Fashion MNIST dataset. //! A slightly more difficult dataset that is drop-in compatible with MNIST. //! //! Taken from here: https://huggingface.co/datasets/zalando-datasets/fashion_mnist use candle::Result; pub fn load() -> Result<crate::vision::Dataset> { crate::vision::mnist::load_mnist_like( "zalando-datasets/fashion_mnist", "refs/convert/parquet", "fashion_mnist/test/0000.parquet", "fashion_mnist/train/0000.parquet", ) }
candle/candle-datasets/src/vision/fashion_mnist.rs/0
{ "file_path": "candle/candle-datasets/src/vision/fashion_mnist.rs", "repo_id": "candle", "token_count": 196 }
34
# candle-chatglm Uses `THUDM/chatglm3-6b` to generate chinese text. Will not generate text for english (usually). ## Text Generation ```bash cargo run --example chatglm --release -- --prompt "部署门槛较低等众多优秀特 " > 部署门槛较低等众多优秀特 点,使得其成为了一款备受欢迎的AI助手。 > > 作为一款人工智能助手,ChatGLM3-6B ```
candle/candle-examples/examples/chatglm/README.md/0
{ "file_path": "candle/candle-examples/examples/chatglm/README.md", "repo_id": "candle", "token_count": 204 }
35
#[cfg(feature = "mkl")] extern crate intel_mkl_src; #[cfg(feature = "accelerate")] extern crate accelerate_src; use clap::{Parser, ValueEnum}; use candle::{DType, IndexOp, D}; use candle_nn::{Module, VarBuilder}; use candle_transformers::models::hiera; #[derive(Clone, Copy, Debug, ValueEnum)] enum Which { Tiny, Small, Base, BasePlus, Large, Huge, } impl Which { fn model_filename(&self) -> String { let name = match self { Self::Tiny => "tiny", Self::Small => "small", Self::Base => "base", Self::BasePlus => "base_plus", Self::Large => "large", Self::Huge => "huge", }; format!("timm/hiera_{name}_224.mae_in1k_ft_in1k") } fn config(&self) -> hiera::Config { match self { Self::Tiny => hiera::Config::tiny(), Self::Small => hiera::Config::small(), Self::Base => hiera::Config::base(), Self::BasePlus => hiera::Config::base_plus(), Self::Large => hiera::Config::large(), Self::Huge => hiera::Config::huge(), } } } #[derive(Parser)] struct Args { #[arg(long)] model: Option<String>, #[arg(long)] image: String, /// Run on CPU rather than on GPU. #[arg(long)] cpu: bool, #[arg(value_enum, long, default_value_t=Which::Tiny)] which: Which, } pub fn main() -> anyhow::Result<()> { let args = Args::parse(); let device = candle_examples::device(args.cpu)?; let image = candle_examples::imagenet::load_image224(args.image)?.to_device(&device)?; println!("loaded image {image:?}"); let model_file = match args.model { None => { let model_name = args.which.model_filename(); let api = hf_hub::api::sync::Api::new()?; let api = api.model(model_name); api.get("model.safetensors")? } Some(model) => model.into(), }; let vb = unsafe { VarBuilder::from_mmaped_safetensors(&[model_file], DType::F32, &device)? }; let model = hiera::hiera(&args.which.config(), 1000, vb)?; println!("model built"); let logits = model.forward(&image.unsqueeze(0)?)?; let prs = candle_nn::ops::softmax(&logits, D::Minus1)? .i(0)? .to_vec1::<f32>()?; let mut prs = prs.iter().enumerate().collect::<Vec<_>>(); prs.sort_by(|(_, p1), (_, p2)| p2.total_cmp(p1)); for &(category_idx, pr) in prs.iter().take(5) { println!( "{:24}: {:.2}%", candle_examples::imagenet::CLASSES[category_idx], 100. * pr ); } Ok(()) }
candle/candle-examples/examples/hiera/main.rs/0
{ "file_path": "candle/candle-examples/examples/hiera/main.rs", "repo_id": "candle", "token_count": 1256 }
36
/// This follows the lines of: /// https://github.com/johnma2006/mamba-minimal/blob/master/model.py /// Simple, minimal implementation of Mamba in one file of PyTorch. use candle::{IndexOp, Module, Result, Tensor, D}; use candle_nn::{RmsNorm, VarBuilder}; use candle_transformers::models::with_tracing::{linear, linear_no_bias, Linear}; #[derive(Debug, Clone, serde::Deserialize)] pub struct Config { d_model: usize, n_layer: usize, vocab_size: usize, pad_vocab_size_multiple: usize, } impl Config { fn vocab_size(&self) -> usize { let pad = self.pad_vocab_size_multiple; self.vocab_size.div_ceil(pad) * pad } fn dt_rank(&self) -> usize { self.d_model.div_ceil(16) } fn d_conv(&self) -> usize { 4 } fn d_state(&self) -> usize { 16 } fn d_inner(&self) -> usize { self.d_model * 2 } } // https://github.com/johnma2006/mamba-minimal/blob/61f01953ca153f8c4a850d7111beecbf4be9cee1/model.py#L177 #[derive(Clone, Debug)] pub struct MambaBlock { in_proj: Linear, conv1d: candle_nn::Conv1d, x_proj: Linear, dt_proj: Linear, a_log: Tensor, d: Tensor, out_proj: Linear, dt_rank: usize, } impl MambaBlock { pub fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> { let d_inner = cfg.d_inner(); let d_conv = cfg.d_conv(); let d_state = cfg.d_state(); let dt_rank = cfg.dt_rank(); let in_proj = linear_no_bias(cfg.d_model, d_inner * 2, vb.pp("in_proj"))?; let conv_cfg = candle_nn::Conv1dConfig { groups: d_inner, padding: d_conv - 1, ..Default::default() }; let conv1d = candle_nn::conv1d(d_inner, d_inner, d_conv, conv_cfg, vb.pp("conv1d"))?; let x_proj = linear_no_bias(d_inner, dt_rank + d_state * 2, vb.pp("x_proj"))?; let dt_proj = linear(dt_rank, d_inner, vb.pp("dt_proj"))?; let a_log = vb.get((d_inner, d_state), "A_log")?; let d = vb.get(d_inner, "D")?; let out_proj = linear_no_bias(d_inner, cfg.d_model, vb.pp("out_proj"))?; Ok(Self { in_proj, conv1d, x_proj, dt_proj, a_log, d, out_proj, dt_rank, }) } fn ssm(&self, xs: &Tensor) -> Result<Tensor> { let (_d_in, n) = self.a_log.dims2()?; let a = self.a_log.to_dtype(candle::DType::F32)?.exp()?.neg()?; let d = self.d.to_dtype(candle::DType::F32)?; let x_dbl = xs.apply(&self.x_proj)?; let delta = x_dbl.narrow(D::Minus1, 0, self.dt_rank)?; let b = x_dbl.narrow(D::Minus1, self.dt_rank, n)?; let c = x_dbl.narrow(D::Minus1, self.dt_rank + n, n)?; let delta = delta.contiguous()?.apply(&self.dt_proj)?; // softplus without threshold let delta = (delta.exp()? + 1.)?.log()?; let ss = selective_scan(xs, &delta, &a, &b, &c, &d)?; Ok(ss) } } // https://github.com/johnma2006/mamba-minimal/blob/61f01953ca153f8c4a850d7111beecbf4be9cee1/model.py#L275 fn selective_scan( u: &Tensor, delta: &Tensor, a: &Tensor, b: &Tensor, c: &Tensor, d: &Tensor, ) -> Result<Tensor> { let (b_sz, l, d_in) = u.dims3()?; let n = a.dim(1)?; let delta = delta.t()?.reshape((b_sz, d_in, l, 1))?; // b d_in l 1 let delta_a = delta.broadcast_mul(&a.reshape((1, d_in, 1, n))?)?.exp()?; let delta_b_u = delta .broadcast_mul(&b.reshape((b_sz, 1, l, n))?)? .broadcast_mul(&u.t()?.reshape((b_sz, d_in, l, 1))?)?; let mut xs = Tensor::zeros((b_sz, d_in, n), delta_a.dtype(), delta_a.device())?; let mut ys = Vec::with_capacity(l); for i in 0..l { xs = ((delta_a.i((.., .., i))? * xs)? + delta_b_u.i((.., .., i))?)?; let y = xs.matmul(&c.i((.., i, ..))?.unsqueeze(2)?)?.squeeze(2)?; ys.push(y) } let ys = Tensor::stack(ys.as_slice(), 1)?; ys + u.broadcast_mul(d) } impl Module for MambaBlock { // https://github.com/johnma2006/mamba-minimal/blob/61f01953ca153f8c4a850d7111beecbf4be9cee1/model.py#L206 fn forward(&self, xs: &Tensor) -> Result<Tensor> { let (_b_sz, seq_len, _dim) = xs.dims3()?; let xs_and_res = xs.apply(&self.in_proj)?.chunk(2, D::Minus1)?; let (xs, res) = (&xs_and_res[0], &xs_and_res[1]); let xs = xs .t()? .apply(&self.conv1d)? .narrow(D::Minus1, 0, seq_len)? .t()?; let xs = candle_nn::ops::silu(&xs)?; let ys = (self.ssm(&xs)? * candle_nn::ops::silu(res))?; ys.apply(&self.out_proj) } } // https://github.com/johnma2006/mamba-minimal/blob/61f01953ca153f8c4a850d7111beecbf4be9cee1/model.py#L143 #[derive(Clone, Debug)] pub struct ResidualBlock { mixer: MambaBlock, norm: RmsNorm, } impl ResidualBlock { pub fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> { let norm = candle_nn::rms_norm(cfg.d_model, 1e-5, vb.pp("norm"))?; let mixer = MambaBlock::new(cfg, vb.pp("mixer"))?; Ok(Self { mixer, norm }) } } impl Module for ResidualBlock { fn forward(&self, xs: &Tensor) -> Result<Tensor> { xs.apply(&self.norm)?.apply(&self.mixer)? + xs } } // https://github.com/johnma2006/mamba-minimal/blob/61f01953ca153f8c4a850d7111beecbf4be9cee1/model.py#L56 #[derive(Clone, Debug)] pub struct Model { embedding: candle_nn::Embedding, layers: Vec<ResidualBlock>, norm_f: RmsNorm, lm_head: Linear, } impl Model { pub fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> { let embedding = candle_nn::embedding(cfg.vocab_size(), cfg.d_model, vb.pp("embedding"))?; let mut layers = Vec::with_capacity(cfg.n_layer); let vb_l = vb.pp("layers"); for layer_idx in 0..cfg.n_layer { let layer = ResidualBlock::new(cfg, vb_l.pp(layer_idx))?; layers.push(layer) } let norm_f = candle_nn::rms_norm(cfg.d_model, 1e-5, vb.pp("norm_f"))?; let lm_head = Linear::from_weights(embedding.embeddings().clone(), None); Ok(Self { embedding, layers, norm_f, lm_head, }) } } impl Module for Model { fn forward(&self, input_ids: &Tensor) -> Result<Tensor> { let (_b_size, seq_len) = input_ids.dims2()?; let mut xs = self.embedding.forward(input_ids)?; for layer in self.layers.iter() { xs = layer.forward(&xs)? } xs.narrow(1, seq_len - 1, 1)? .apply(&self.norm_f)? .apply(&self.lm_head) } }
candle/candle-examples/examples/mamba-minimal/model.rs/0
{ "file_path": "candle/candle-examples/examples/mamba-minimal/model.rs", "repo_id": "candle", "token_count": 3490 }
37