| { | |
| "best_metric": 0.4863013698630137, | |
| "best_model_checkpoint": "deepfake-video-model-balanced/checkpoint-100", | |
| "epoch": 1.3793103448275863, | |
| "eval_steps": 50, | |
| "global_step": 200, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.06896551724137931, | |
| "grad_norm": 11.416295051574707, | |
| "learning_rate": 6.896551724137932e-06, | |
| "loss": 0.7193, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.13793103448275862, | |
| "grad_norm": 8.326750755310059, | |
| "learning_rate": 1.3793103448275863e-05, | |
| "loss": 0.6911, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.20689655172413793, | |
| "grad_norm": 7.880892753601074, | |
| "learning_rate": 1.9999275591576767e-05, | |
| "loss": 0.7207, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.27586206896551724, | |
| "grad_norm": 7.71298885345459, | |
| "learning_rate": 1.991247350280358e-05, | |
| "loss": 0.7034, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.3448275862068966, | |
| "grad_norm": 14.676814079284668, | |
| "learning_rate": 1.9682229406025635e-05, | |
| "loss": 0.697, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.3448275862068966, | |
| "eval_accuracy": 0.4726027397260274, | |
| "eval_loss": 0.7181810140609741, | |
| "eval_runtime": 96.4229, | |
| "eval_samples_per_second": 1.514, | |
| "eval_steps_per_second": 0.384, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.41379310344827586, | |
| "grad_norm": 19.170063018798828, | |
| "learning_rate": 1.931187513114025e-05, | |
| "loss": 0.7374, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.4827586206896552, | |
| "grad_norm": 12.284372329711914, | |
| "learning_rate": 1.880677002269928e-05, | |
| "loss": 0.7261, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 0.5517241379310345, | |
| "grad_norm": 10.028695106506348, | |
| "learning_rate": 1.817422338558892e-05, | |
| "loss": 0.7085, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 0.6206896551724138, | |
| "grad_norm": 8.536717414855957, | |
| "learning_rate": 1.74233887131081e-05, | |
| "loss": 0.7193, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 0.6896551724137931, | |
| "grad_norm": 11.210798263549805, | |
| "learning_rate": 1.6565131228056136e-05, | |
| "loss": 0.7254, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.6896551724137931, | |
| "eval_accuracy": 0.4863013698630137, | |
| "eval_loss": 0.7144510746002197, | |
| "eval_runtime": 95.1394, | |
| "eval_samples_per_second": 1.535, | |
| "eval_steps_per_second": 0.389, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.7586206896551724, | |
| "grad_norm": 15.714051246643066, | |
| "learning_rate": 1.5611870653623826e-05, | |
| "loss": 0.6968, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 0.8275862068965517, | |
| "grad_norm": 17.525775909423828, | |
| "learning_rate": 1.4577401489328335e-05, | |
| "loss": 0.7214, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 0.896551724137931, | |
| "grad_norm": 8.84438705444336, | |
| "learning_rate": 1.3476693392753477e-05, | |
| "loss": 0.7145, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 0.9655172413793104, | |
| "grad_norm": 17.513769149780273, | |
| "learning_rate": 1.2325674555743106e-05, | |
| "loss": 0.7781, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 1.0344827586206897, | |
| "grad_norm": 7.130011081695557, | |
| "learning_rate": 1.1141001209780249e-05, | |
| "loss": 0.6942, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 1.0344827586206897, | |
| "eval_accuracy": 0.4794520547945205, | |
| "eval_loss": 0.7006052136421204, | |
| "eval_runtime": 94.9661, | |
| "eval_samples_per_second": 1.537, | |
| "eval_steps_per_second": 0.39, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 1.103448275862069, | |
| "grad_norm": 11.245115280151367, | |
| "learning_rate": 9.939816596007147e-06, | |
| "loss": 0.7334, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 1.1724137931034484, | |
| "grad_norm": 17.212377548217773, | |
| "learning_rate": 8.739502887797108e-06, | |
| "loss": 0.648, | |
| "step": 170 | |
| }, | |
| { | |
| "epoch": 1.2413793103448276, | |
| "grad_norm": 7.2269606590271, | |
| "learning_rate": 7.557429655771691e-06, | |
| "loss": 0.7214, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 1.3103448275862069, | |
| "grad_norm": 16.559890747070312, | |
| "learning_rate": 6.410702515190544e-06, | |
| "loss": 0.7197, | |
| "step": 190 | |
| }, | |
| { | |
| "epoch": 1.3793103448275863, | |
| "grad_norm": 7.4253387451171875, | |
| "learning_rate": 5.3159155930021e-06, | |
| "loss": 0.6836, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 1.3793103448275863, | |
| "eval_accuracy": 0.4178082191780822, | |
| "eval_loss": 0.7102890014648438, | |
| "eval_runtime": 97.3566, | |
| "eval_samples_per_second": 1.5, | |
| "eval_steps_per_second": 0.38, | |
| "step": 200 | |
| } | |
| ], | |
| "logging_steps": 10, | |
| "max_steps": 290, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 2, | |
| "save_steps": 50, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": false | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 9.968516715773952e+17, | |
| "train_batch_size": 4, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |