PEFT
Safetensors
English
cybersecurity
malware-analysis
att&ck
threat-intelligence
mixtral
lora
expert-adapters
cape-sandbox
digital-forensics
Instructions to use umer07/fathom-mixtral with libraries, inference providers, notebooks, and local apps. Follow these links to get started.
- Libraries
- PEFT
How to use umer07/fathom-mixtral with PEFT:
from peft import PeftModel from transformers import AutoModelForCausalLM base_model = AutoModelForCausalLM.from_pretrained("mistralai/Mixtral-8x7B-Instruct-v0.1") model = PeftModel.from_pretrained(base_model, "umer07/fathom-mixtral") - Notebooks
- Google Colab
- Kaggle
| [ | |
| { | |
| "loss": 1.9463, | |
| "grad_norm": 0.9043406844139099, | |
| "learning_rate": 1.8e-05, | |
| "entropy": 1.4898367643356323, | |
| "num_tokens": 239592.0, | |
| "mean_token_accuracy": 0.5726580291986465, | |
| "epoch": 0.016680567139282735, | |
| "step": 10 | |
| }, | |
| { | |
| "loss": 1.8033, | |
| "grad_norm": 0.6490057706832886, | |
| "learning_rate": 3.8e-05, | |
| "entropy": 1.7221096634864808, | |
| "num_tokens": 480339.0, | |
| "mean_token_accuracy": 0.5810627460479736, | |
| "epoch": 0.03336113427856547, | |
| "step": 20 | |
| }, | |
| { | |
| "loss": 1.5177, | |
| "grad_norm": 1.1159552335739136, | |
| "learning_rate": 5.8e-05, | |
| "entropy": 1.5653360843658448, | |
| "num_tokens": 719994.0, | |
| "mean_token_accuracy": 0.6268167346715927, | |
| "epoch": 0.05004170141784821, | |
| "step": 30 | |
| }, | |
| { | |
| "loss": 1.0739, | |
| "grad_norm": 0.4417094886302948, | |
| "learning_rate": 7.800000000000001e-05, | |
| "entropy": 1.053076034784317, | |
| "num_tokens": 957912.0, | |
| "mean_token_accuracy": 0.7257216989994049, | |
| "epoch": 0.06672226855713094, | |
| "step": 40 | |
| }, | |
| { | |
| "loss": 0.9505, | |
| "grad_norm": 0.3340432941913605, | |
| "learning_rate": 9.8e-05, | |
| "entropy": 0.9432537257671356, | |
| "num_tokens": 1197740.0, | |
| "mean_token_accuracy": 0.7573244959115982, | |
| "epoch": 0.08340283569641367, | |
| "step": 50 | |
| }, | |
| { | |
| "loss": 0.9094, | |
| "grad_norm": 0.3062295615673065, | |
| "learning_rate": 9.993394529660307e-05, | |
| "entropy": 0.902097898721695, | |
| "num_tokens": 1434364.0, | |
| "mean_token_accuracy": 0.761836588382721, | |
| "epoch": 0.10008340283569642, | |
| "step": 60 | |
| }, | |
| { | |
| "loss": 0.8602, | |
| "grad_norm": 0.302666038274765, | |
| "learning_rate": 9.970583211010007e-05, | |
| "entropy": 0.8552027255296707, | |
| "num_tokens": 1673721.0, | |
| "mean_token_accuracy": 0.7724049925804138, | |
| "epoch": 0.11676396997497915, | |
| "step": 70 | |
| }, | |
| { | |
| "loss": 0.8386, | |
| "grad_norm": 0.35731419920921326, | |
| "learning_rate": 9.931558883072403e-05, | |
| "entropy": 0.8345210403203964, | |
| "num_tokens": 1921434.0, | |
| "mean_token_accuracy": 0.7766192525625228, | |
| "epoch": 0.13344453711426188, | |
| "step": 80 | |
| }, | |
| { | |
| "loss": 0.8519, | |
| "grad_norm": 0.3715258538722992, | |
| "learning_rate": 9.87644883509393e-05, | |
| "entropy": 0.8489096939563752, | |
| "num_tokens": 2162407.0, | |
| "mean_token_accuracy": 0.7723332077264786, | |
| "epoch": 0.1501251042535446, | |
| "step": 90 | |
| }, | |
| { | |
| "loss": 0.766, | |
| "grad_norm": 0.3652157783508301, | |
| "learning_rate": 9.805432824596348e-05, | |
| "entropy": 0.7692611366510391, | |
| "num_tokens": 2399946.0, | |
| "mean_token_accuracy": 0.7895216166973114, | |
| "epoch": 0.16680567139282734, | |
| "step": 100 | |
| }, | |
| { | |
| "loss": 0.7927, | |
| "grad_norm": 0.3489046096801758, | |
| "learning_rate": 9.71874249104506e-05, | |
| "entropy": 0.7879800289869309, | |
| "num_tokens": 2643290.0, | |
| "mean_token_accuracy": 0.7858752965927124, | |
| "epoch": 0.1834862385321101, | |
| "step": 110 | |
| }, | |
| { | |
| "loss": 0.7939, | |
| "grad_norm": 0.3456013798713684, | |
| "learning_rate": 9.61666060028933e-05, | |
| "entropy": 0.8026239216327667, | |
| "num_tokens": 2890007.0, | |
| "mean_token_accuracy": 0.7865915298461914, | |
| "epoch": 0.20016680567139283, | |
| "step": 120 | |
| }, | |
| { | |
| "loss": 0.7367, | |
| "grad_norm": 0.390689492225647, | |
| "learning_rate": 9.499520122238845e-05, | |
| "entropy": 0.7422888517379761, | |
| "num_tokens": 3134139.0, | |
| "mean_token_accuracy": 0.7972356855869294, | |
| "epoch": 0.21684737281067556, | |
| "step": 130 | |
| }, | |
| { | |
| "loss": 0.7339, | |
| "grad_norm": 0.3555961549282074, | |
| "learning_rate": 9.367703144785096e-05, | |
| "entropy": 0.740860840678215, | |
| "num_tokens": 3382098.0, | |
| "mean_token_accuracy": 0.797610804438591, | |
| "epoch": 0.2335279399499583, | |
| "step": 140 | |
| }, | |
| { | |
| "loss": 0.7405, | |
| "grad_norm": 0.3681407570838928, | |
| "learning_rate": 9.221639627510076e-05, | |
| "entropy": 0.744793301820755, | |
| "num_tokens": 3622551.0, | |
| "mean_token_accuracy": 0.7954040169715881, | |
| "epoch": 0.25020850708924103, | |
| "step": 150 | |
| }, | |
| { | |
| "loss": 0.7253, | |
| "grad_norm": 0.32773861289024353, | |
| "learning_rate": 9.061805999247504e-05, | |
| "entropy": 0.7334282338619232, | |
| "num_tokens": 3865667.0, | |
| "mean_token_accuracy": 0.799143373966217, | |
| "epoch": 0.26688907422852376, | |
| "step": 160 | |
| }, | |
| { | |
| "loss": 0.711, | |
| "grad_norm": 0.32971468567848206, | |
| "learning_rate": 8.88872360407099e-05, | |
| "entropy": 0.7231753587722778, | |
| "num_tokens": 4101163.0, | |
| "mean_token_accuracy": 0.803010705113411, | |
| "epoch": 0.2835696413678065, | |
| "step": 170 | |
| }, | |
| { | |
| "loss": 0.7172, | |
| "grad_norm": 0.3492438495159149, | |
| "learning_rate": 8.70295700077803e-05, | |
| "entropy": 0.7242234885692597, | |
| "num_tokens": 4344116.0, | |
| "mean_token_accuracy": 0.8021188855171204, | |
| "epoch": 0.3002502085070892, | |
| "step": 180 | |
| }, | |
| { | |
| "loss": 0.7291, | |
| "grad_norm": 0.33426719903945923, | |
| "learning_rate": 8.505112121416555e-05, | |
| "entropy": 0.7333565503358841, | |
| "num_tokens": 4581952.0, | |
| "mean_token_accuracy": 0.7988870143890381, | |
| "epoch": 0.31693077564637195, | |
| "step": 190 | |
| }, | |
| { | |
| "loss": 0.7138, | |
| "grad_norm": 0.33009985089302063, | |
| "learning_rate": 8.295834294860535e-05, | |
| "entropy": 0.7183975011110306, | |
| "num_tokens": 4829123.0, | |
| "mean_token_accuracy": 0.8039891421794891, | |
| "epoch": 0.3336113427856547, | |
| "step": 200 | |
| }, | |
| { | |
| "loss": 0.685, | |
| "grad_norm": 0.35757768154144287, | |
| "learning_rate": 8.075806141881326e-05, | |
| "entropy": 0.6890431672334671, | |
| "num_tokens": 5067338.0, | |
| "mean_token_accuracy": 0.8091971307992936, | |
| "epoch": 0.3502919099249375, | |
| "step": 210 | |
| }, | |
| { | |
| "loss": 0.6954, | |
| "grad_norm": 0.3246421813964844, | |
| "learning_rate": 7.845745348580591e-05, | |
| "entropy": 0.6971507608890534, | |
| "num_tokens": 5311215.0, | |
| "mean_token_accuracy": 0.8067805171012878, | |
| "epoch": 0.3669724770642202, | |
| "step": 220 | |
| }, | |
| { | |
| "loss": 0.7073, | |
| "grad_norm": 0.3495083749294281, | |
| "learning_rate": 7.60640232544742e-05, | |
| "entropy": 0.7148334234952927, | |
| "num_tokens": 5554999.0, | |
| "mean_token_accuracy": 0.8036890238523483, | |
| "epoch": 0.38365304420350294, | |
| "step": 230 | |
| }, | |
| { | |
| "loss": 0.676, | |
| "grad_norm": 0.38008829951286316, | |
| "learning_rate": 7.358557759675284e-05, | |
| "entropy": 0.6754449099302292, | |
| "num_tokens": 5802860.0, | |
| "mean_token_accuracy": 0.8100622564554214, | |
| "epoch": 0.40033361134278567, | |
| "step": 240 | |
| }, | |
| { | |
| "loss": 0.682, | |
| "grad_norm": 0.3694831430912018, | |
| "learning_rate": 7.103020068722674e-05, | |
| "entropy": 0.6828817337751388, | |
| "num_tokens": 6043669.0, | |
| "mean_token_accuracy": 0.8099299311637879, | |
| "epoch": 0.4170141784820684, | |
| "step": 250 | |
| }, | |
| { | |
| "loss": 0.6754, | |
| "grad_norm": 0.34716424345970154, | |
| "learning_rate": 6.840622763423391e-05, | |
| "entropy": 0.6824066579341889, | |
| "num_tokens": 6279154.0, | |
| "mean_token_accuracy": 0.8119429558515548, | |
| "epoch": 0.43369474562135113, | |
| "step": 260 | |
| }, | |
| { | |
| "loss": 0.6567, | |
| "grad_norm": 0.3316477835178375, | |
| "learning_rate": 6.572221729247441e-05, | |
| "entropy": 0.6584962159395218, | |
| "num_tokens": 6514535.0, | |
| "mean_token_accuracy": 0.8146260052919387, | |
| "epoch": 0.45037531276063386, | |
| "step": 270 | |
| }, | |
| { | |
| "loss": 0.6816, | |
| "grad_norm": 0.36305058002471924, | |
| "learning_rate": 6.298692434580542e-05, | |
| "entropy": 0.6815447330474853, | |
| "num_tokens": 6757908.0, | |
| "mean_token_accuracy": 0.8089588433504105, | |
| "epoch": 0.4670558798999166, | |
| "step": 280 | |
| }, | |
| { | |
| "loss": 0.6897, | |
| "grad_norm": 0.33401617407798767, | |
| "learning_rate": 6.0209270751282165e-05, | |
| "entropy": 0.696434935927391, | |
| "num_tokens": 6996464.0, | |
| "mean_token_accuracy": 0.8082989573478698, | |
| "epoch": 0.4837364470391993, | |
| "step": 290 | |
| }, | |
| { | |
| "loss": 0.6921, | |
| "grad_norm": 0.34646838903427124, | |
| "learning_rate": 5.7398316637588336e-05, | |
| "entropy": 0.6913289308547974, | |
| "num_tokens": 7234102.0, | |
| "mean_token_accuracy": 0.8069221824407578, | |
| "epoch": 0.5004170141784821, | |
| "step": 300 | |
| }, | |
| { | |
| "loss": 0.6802, | |
| "grad_norm": 0.37350520491600037, | |
| "learning_rate": 5.4563230752779595e-05, | |
| "entropy": 0.6853398621082306, | |
| "num_tokens": 7479635.0, | |
| "mean_token_accuracy": 0.810760036110878, | |
| "epoch": 0.5170975813177648, | |
| "step": 310 | |
| }, | |
| { | |
| "loss": 0.6676, | |
| "grad_norm": 0.3853772282600403, | |
| "learning_rate": 5.1713260557733176e-05, | |
| "entropy": 0.6696277529001236, | |
| "num_tokens": 7718364.0, | |
| "mean_token_accuracy": 0.8130882978439331, | |
| "epoch": 0.5337781484570475, | |
| "step": 320 | |
| }, | |
| { | |
| "loss": 0.6537, | |
| "grad_norm": 0.338049054145813, | |
| "learning_rate": 4.885770206285252e-05, | |
| "entropy": 0.6589328497648239, | |
| "num_tokens": 7958054.0, | |
| "mean_token_accuracy": 0.8148987740278244, | |
| "epoch": 0.5504587155963303, | |
| "step": 330 | |
| }, | |
| { | |
| "loss": 0.6902, | |
| "grad_norm": 0.303418904542923, | |
| "learning_rate": 4.600586950641362e-05, | |
| "entropy": 0.692239272594452, | |
| "num_tokens": 8209298.0, | |
| "mean_token_accuracy": 0.8085194110870362, | |
| "epoch": 0.567139282735613, | |
| "step": 340 | |
| }, | |
| { | |
| "loss": 0.6659, | |
| "grad_norm": 0.38714390993118286, | |
| "learning_rate": 4.316706497345572e-05, | |
| "entropy": 0.6669029265642166, | |
| "num_tokens": 8455256.0, | |
| "mean_token_accuracy": 0.8131787478923798, | |
| "epoch": 0.5838198498748958, | |
| "step": 350 | |
| }, | |
| { | |
| "loss": 0.6579, | |
| "grad_norm": 0.35571643710136414, | |
| "learning_rate": 4.035054805431334e-05, | |
| "entropy": 0.6595758140087128, | |
| "num_tokens": 8695771.0, | |
| "mean_token_accuracy": 0.8157992452383042, | |
| "epoch": 0.6005004170141784, | |
| "step": 360 | |
| }, | |
| { | |
| "loss": 0.6641, | |
| "grad_norm": 0.3794392943382263, | |
| "learning_rate": 3.756550564175727e-05, | |
| "entropy": 0.6648996561765671, | |
| "num_tokens": 8944844.0, | |
| "mean_token_accuracy": 0.8132083296775818, | |
| "epoch": 0.6171809841534612, | |
| "step": 370 | |
| }, | |
| { | |
| "loss": 0.6577, | |
| "grad_norm": 0.3610455393791199, | |
| "learning_rate": 3.4821021965259904e-05, | |
| "entropy": 0.6610071390867234, | |
| "num_tokens": 9191203.0, | |
| "mean_token_accuracy": 0.8145314186811448, | |
| "epoch": 0.6338615512927439, | |
| "step": 380 | |
| }, | |
| { | |
| "loss": 0.659, | |
| "grad_norm": 0.3606182634830475, | |
| "learning_rate": 3.212604896012679e-05, | |
| "entropy": 0.660676684975624, | |
| "num_tokens": 9426243.0, | |
| "mean_token_accuracy": 0.8144401997327805, | |
| "epoch": 0.6505421184320267, | |
| "step": 390 | |
| }, | |
| { | |
| "loss": 0.6568, | |
| "grad_norm": 0.3522314727306366, | |
| "learning_rate": 2.948937706814442e-05, | |
| "entropy": 0.6625478237867355, | |
| "num_tokens": 9664795.0, | |
| "mean_token_accuracy": 0.8151305437088012, | |
| "epoch": 0.6672226855713094, | |
| "step": 400 | |
| }, | |
| { | |
| "loss": 0.6536, | |
| "grad_norm": 0.3643236756324768, | |
| "learning_rate": 2.691960656498621e-05, | |
| "entropy": 0.657148751616478, | |
| "num_tokens": 9904436.0, | |
| "mean_token_accuracy": 0.8148669481277466, | |
| "epoch": 0.6839032527105922, | |
| "step": 410 | |
| }, | |
| { | |
| "loss": 0.6704, | |
| "grad_norm": 0.3429484963417053, | |
| "learning_rate": 2.4425119507900813e-05, | |
| "entropy": 0.6674410969018936, | |
| "num_tokens": 10150234.0, | |
| "mean_token_accuracy": 0.8119391798973083, | |
| "epoch": 0.700583819849875, | |
| "step": 420 | |
| }, | |
| { | |
| "loss": 0.7034, | |
| "grad_norm": 0.3803344666957855, | |
| "learning_rate": 2.2014052395183627e-05, | |
| "entropy": 0.7082253098487854, | |
| "num_tokens": 10391148.0, | |
| "mean_token_accuracy": 0.8065144032239914, | |
| "epoch": 0.7172643869891576, | |
| "step": 430 | |
| }, | |
| { | |
| "loss": 0.6943, | |
| "grad_norm": 0.30864450335502625, | |
| "learning_rate": 1.969426962661059e-05, | |
| "entropy": 0.7002600938081741, | |
| "num_tokens": 10636950.0, | |
| "mean_token_accuracy": 0.8064252346754074, | |
| "epoch": 0.7339449541284404, | |
| "step": 440 | |
| }, | |
| { | |
| "loss": 0.6803, | |
| "grad_norm": 0.35500553250312805, | |
| "learning_rate": 1.747333785140066e-05, | |
| "entropy": 0.681396746635437, | |
| "num_tokens": 10879243.0, | |
| "mean_token_accuracy": 0.8101641505956649, | |
| "epoch": 0.7506255212677231, | |
| "step": 450 | |
| }, | |
| { | |
| "loss": 0.6421, | |
| "grad_norm": 0.36364850401878357, | |
| "learning_rate": 1.535850128737884e-05, | |
| "entropy": 0.6462778955698013, | |
| "num_tokens": 11114856.0, | |
| "mean_token_accuracy": 0.8183332979679108, | |
| "epoch": 0.7673060884070059, | |
| "step": 460 | |
| }, | |
| { | |
| "loss": 0.6766, | |
| "grad_norm": 0.33400440216064453, | |
| "learning_rate": 1.335665809184341e-05, | |
| "entropy": 0.6827076494693756, | |
| "num_tokens": 11360827.0, | |
| "mean_token_accuracy": 0.8112966924905777, | |
| "epoch": 0.7839866555462885, | |
| "step": 470 | |
| }, | |
| { | |
| "loss": 0.647, | |
| "grad_norm": 0.3601098954677582, | |
| "learning_rate": 1.1474337861210543e-05, | |
| "entropy": 0.6471425205469131, | |
| "num_tokens": 11601682.0, | |
| "mean_token_accuracy": 0.8174957513809205, | |
| "epoch": 0.8006672226855713, | |
| "step": 480 | |
| }, | |
| { | |
| "loss": 0.6936, | |
| "grad_norm": 0.36329320073127747, | |
| "learning_rate": 9.717680332828016e-06, | |
| "entropy": 0.7031383335590362, | |
| "num_tokens": 11844375.0, | |
| "mean_token_accuracy": 0.805937397480011, | |
| "epoch": 0.817347789824854, | |
| "step": 490 | |
| }, | |
| { | |
| "loss": 0.6743, | |
| "grad_norm": 0.3474838137626648, | |
| "learning_rate": 8.092415358428173e-06, | |
| "entropy": 0.6719307899475098, | |
| "num_tokens": 12085115.0, | |
| "mean_token_accuracy": 0.8131552457809448, | |
| "epoch": 0.8340283569641368, | |
| "step": 500 | |
| }, | |
| { | |
| "loss": 0.6344, | |
| "grad_norm": 0.35991644859313965, | |
| "learning_rate": 6.603844214542487e-06, | |
| "entropy": 0.637131878733635, | |
| "num_tokens": 12332095.0, | |
| "mean_token_accuracy": 0.8191376119852066, | |
| "epoch": 0.8507089241034195, | |
| "step": 510 | |
| }, | |
| { | |
| "loss": 0.6349, | |
| "grad_norm": 0.34625497460365295, | |
| "learning_rate": 5.256822310839405e-06, | |
| "entropy": 0.6384859681129456, | |
| "num_tokens": 12575942.0, | |
| "mean_token_accuracy": 0.8205374091863632, | |
| "epoch": 0.8673894912427023, | |
| "step": 520 | |
| }, | |
| { | |
| "loss": 0.6527, | |
| "grad_norm": 0.34414827823638916, | |
| "learning_rate": 4.055743352787267e-06, | |
| "entropy": 0.6543797552585602, | |
| "num_tokens": 12817224.0, | |
| "mean_token_accuracy": 0.8158955574035645, | |
| "epoch": 0.8840700583819849, | |
| "step": 530 | |
| }, | |
| { | |
| "loss": 0.6387, | |
| "grad_norm": 0.32114923000335693, | |
| "learning_rate": 3.0045250103002296e-06, | |
| "entropy": 0.6461419433355331, | |
| "num_tokens": 13055918.0, | |
| "mean_token_accuracy": 0.8180903941392899, | |
| "epoch": 0.9007506255212677, | |
| "step": 540 | |
| }, | |
| { | |
| "loss": 0.6749, | |
| "grad_norm": 0.38731274008750916, | |
| "learning_rate": 2.1065961391133702e-06, | |
| "entropy": 0.6730036258697509, | |
| "num_tokens": 13296781.0, | |
| "mean_token_accuracy": 0.8106796860694885, | |
| "epoch": 0.9174311926605505, | |
| "step": 550 | |
| }, | |
| { | |
| "loss": 0.63, | |
| "grad_norm": 0.3419639766216278, | |
| "learning_rate": 1.3648855965679497e-06, | |
| "entropy": 0.6305610507726669, | |
| "num_tokens": 13536924.0, | |
| "mean_token_accuracy": 0.8212851226329804, | |
| "epoch": 0.9341117597998332, | |
| "step": 560 | |
| }, | |
| { | |
| "loss": 0.641, | |
| "grad_norm": 0.3530997931957245, | |
| "learning_rate": 7.818126882875254e-07, | |
| "entropy": 0.647763478755951, | |
| "num_tokens": 13773817.0, | |
| "mean_token_accuracy": 0.8177464485168457, | |
| "epoch": 0.950792326939116, | |
| "step": 570 | |
| }, | |
| { | |
| "loss": 0.6724, | |
| "grad_norm": 0.3384498953819275, | |
| "learning_rate": 3.5927927690588283e-07, | |
| "entropy": 0.675562146306038, | |
| "num_tokens": 14012569.0, | |
| "mean_token_accuracy": 0.8116778939962387, | |
| "epoch": 0.9674728940783986, | |
| "step": 580 | |
| }, | |
| { | |
| "loss": 0.6523, | |
| "grad_norm": 0.3589385747909546, | |
| "learning_rate": 9.866357858642205e-08, | |
| "entropy": 0.6570082008838654, | |
| "num_tokens": 14254389.0, | |
| "mean_token_accuracy": 0.8173295229673385, | |
| "epoch": 0.9841534612176814, | |
| "step": 590 | |
| }, | |
| { | |
| "loss": 0.6664, | |
| "grad_norm": 0.5415430665016174, | |
| "learning_rate": 8.156675674941828e-10, | |
| "entropy": 0.6749062004842257, | |
| "num_tokens": 14485000.0, | |
| "mean_token_accuracy": 0.8125993891766197, | |
| "epoch": 1.0, | |
| "step": 600 | |
| }, | |
| { | |
| "train_runtime": 6029.5713, | |
| "train_samples_per_second": 3.181, | |
| "train_steps_per_second": 0.1, | |
| "total_flos": 6.620616404100956e+18, | |
| "train_loss": 0.7606559038162232, | |
| "epoch": 1.0, | |
| "step": 600 | |
| } | |
| ] |