Training in progress, step 1000, checkpoint
Browse files
last-checkpoint/model.safetensors
CHANGED
|
@@ -1,3 +1,3 @@
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:
|
| 3 |
size 2384234968
|
|
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:061dc4d2658410b6acb6b73f235d87b3f5265cef287f6769738bad18b4c0a56d
|
| 3 |
size 2384234968
|
last-checkpoint/optimizer.pt
CHANGED
|
@@ -1,3 +1,3 @@
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:
|
| 3 |
size 4768663315
|
|
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:42fe8edb8b2246c1d5a01ee698f646d3a2bb595570993cb3e2a0c0fd442749b7
|
| 3 |
size 4768663315
|
last-checkpoint/rng_state.pth
CHANGED
|
@@ -1,3 +1,3 @@
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:
|
| 3 |
size 14645
|
|
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:2534e434cd5abbb8f7668d3eab0549db0ef95d6a797a3efa86b712e8e32266a7
|
| 3 |
size 14645
|
last-checkpoint/scheduler.pt
CHANGED
|
@@ -1,3 +1,3 @@
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:
|
| 3 |
size 1465
|
|
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:89fc96babd031398dc4f49db529af14042a214d2e774c56f2d58620c95114b55
|
| 3 |
size 1465
|
last-checkpoint/trainer_state.json
CHANGED
|
@@ -2,9 +2,9 @@
|
|
| 2 |
"best_global_step": null,
|
| 3 |
"best_metric": null,
|
| 4 |
"best_model_checkpoint": null,
|
| 5 |
-
"epoch":
|
| 6 |
"eval_steps": 100,
|
| 7 |
-
"global_step":
|
| 8 |
"is_hyper_param_search": false,
|
| 9 |
"is_local_process_zero": true,
|
| 10 |
"is_world_process_zero": true,
|
|
@@ -458,6 +458,456 @@
|
|
| 458 |
"mean_token_accuracy": 0.9125856146216392,
|
| 459 |
"num_tokens": 4088832.0,
|
| 460 |
"step": 500
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 461 |
}
|
| 462 |
],
|
| 463 |
"logging_steps": 10,
|
|
@@ -477,7 +927,7 @@
|
|
| 477 |
"attributes": {}
|
| 478 |
}
|
| 479 |
},
|
| 480 |
-
"total_flos":
|
| 481 |
"train_batch_size": 2,
|
| 482 |
"trial_name": null,
|
| 483 |
"trial_params": null
|
|
|
|
| 2 |
"best_global_step": null,
|
| 3 |
"best_metric": null,
|
| 4 |
"best_model_checkpoint": null,
|
| 5 |
+
"epoch": 2.779672815871911,
|
| 6 |
"eval_steps": 100,
|
| 7 |
+
"global_step": 1000,
|
| 8 |
"is_hyper_param_search": false,
|
| 9 |
"is_local_process_zero": true,
|
| 10 |
"is_world_process_zero": true,
|
|
|
|
| 458 |
"mean_token_accuracy": 0.9125856146216392,
|
| 459 |
"num_tokens": 4088832.0,
|
| 460 |
"step": 500
|
| 461 |
+
},
|
| 462 |
+
{
|
| 463 |
+
"epoch": 1.4176818656456667,
|
| 464 |
+
"grad_norm": 1.0593403577804565,
|
| 465 |
+
"learning_rate": 5.87448559670782e-06,
|
| 466 |
+
"loss": 0.0926,
|
| 467 |
+
"mean_token_accuracy": 0.9135763190686703,
|
| 468 |
+
"num_tokens": 4170752.0,
|
| 469 |
+
"step": 510
|
| 470 |
+
},
|
| 471 |
+
{
|
| 472 |
+
"epoch": 1.4455273233553776,
|
| 473 |
+
"grad_norm": 0.865151584148407,
|
| 474 |
+
"learning_rate": 5.771604938271605e-06,
|
| 475 |
+
"loss": 0.096,
|
| 476 |
+
"mean_token_accuracy": 0.9089041098952293,
|
| 477 |
+
"num_tokens": 4252672.0,
|
| 478 |
+
"step": 520
|
| 479 |
+
},
|
| 480 |
+
{
|
| 481 |
+
"epoch": 1.4733727810650887,
|
| 482 |
+
"grad_norm": 0.823634684085846,
|
| 483 |
+
"learning_rate": 5.6687242798353915e-06,
|
| 484 |
+
"loss": 0.0826,
|
| 485 |
+
"mean_token_accuracy": 0.9167196683585643,
|
| 486 |
+
"num_tokens": 4334592.0,
|
| 487 |
+
"step": 530
|
| 488 |
+
},
|
| 489 |
+
{
|
| 490 |
+
"epoch": 1.5012182387747999,
|
| 491 |
+
"grad_norm": 1.3224678039550781,
|
| 492 |
+
"learning_rate": 5.565843621399178e-06,
|
| 493 |
+
"loss": 0.0948,
|
| 494 |
+
"mean_token_accuracy": 0.909222112596035,
|
| 495 |
+
"num_tokens": 4416512.0,
|
| 496 |
+
"step": 540
|
| 497 |
+
},
|
| 498 |
+
{
|
| 499 |
+
"epoch": 1.529063696484511,
|
| 500 |
+
"grad_norm": 1.1047239303588867,
|
| 501 |
+
"learning_rate": 5.462962962962963e-06,
|
| 502 |
+
"loss": 0.0908,
|
| 503 |
+
"mean_token_accuracy": 0.911081214249134,
|
| 504 |
+
"num_tokens": 4498432.0,
|
| 505 |
+
"step": 550
|
| 506 |
+
},
|
| 507 |
+
{
|
| 508 |
+
"epoch": 1.556909154194222,
|
| 509 |
+
"grad_norm": 1.124988317489624,
|
| 510 |
+
"learning_rate": 5.360082304526749e-06,
|
| 511 |
+
"loss": 0.0944,
|
| 512 |
+
"mean_token_accuracy": 0.9126834630966186,
|
| 513 |
+
"num_tokens": 4580352.0,
|
| 514 |
+
"step": 560
|
| 515 |
+
},
|
| 516 |
+
{
|
| 517 |
+
"epoch": 1.584754611903933,
|
| 518 |
+
"grad_norm": 1.1450597047805786,
|
| 519 |
+
"learning_rate": 5.257201646090535e-06,
|
| 520 |
+
"loss": 0.09,
|
| 521 |
+
"mean_token_accuracy": 0.9106775917112827,
|
| 522 |
+
"num_tokens": 4662272.0,
|
| 523 |
+
"step": 570
|
| 524 |
+
},
|
| 525 |
+
{
|
| 526 |
+
"epoch": 1.6126000696136442,
|
| 527 |
+
"grad_norm": 0.9272586107254028,
|
| 528 |
+
"learning_rate": 5.154320987654321e-06,
|
| 529 |
+
"loss": 0.0864,
|
| 530 |
+
"mean_token_accuracy": 0.9094911932945251,
|
| 531 |
+
"num_tokens": 4744192.0,
|
| 532 |
+
"step": 580
|
| 533 |
+
},
|
| 534 |
+
{
|
| 535 |
+
"epoch": 1.6404455273233554,
|
| 536 |
+
"grad_norm": 1.3119956254959106,
|
| 537 |
+
"learning_rate": 5.051440329218107e-06,
|
| 538 |
+
"loss": 0.0887,
|
| 539 |
+
"mean_token_accuracy": 0.9084026426076889,
|
| 540 |
+
"num_tokens": 4826112.0,
|
| 541 |
+
"step": 590
|
| 542 |
+
},
|
| 543 |
+
{
|
| 544 |
+
"epoch": 1.6682909850330665,
|
| 545 |
+
"grad_norm": 0.9722729325294495,
|
| 546 |
+
"learning_rate": 4.9485596707818935e-06,
|
| 547 |
+
"loss": 0.0863,
|
| 548 |
+
"mean_token_accuracy": 0.9236545950174332,
|
| 549 |
+
"num_tokens": 4908032.0,
|
| 550 |
+
"step": 600
|
| 551 |
+
},
|
| 552 |
+
{
|
| 553 |
+
"epoch": 1.6961364427427776,
|
| 554 |
+
"grad_norm": 0.9531897902488708,
|
| 555 |
+
"learning_rate": 4.845679012345679e-06,
|
| 556 |
+
"loss": 0.0851,
|
| 557 |
+
"mean_token_accuracy": 0.9154476508498192,
|
| 558 |
+
"num_tokens": 4989952.0,
|
| 559 |
+
"step": 610
|
| 560 |
+
},
|
| 561 |
+
{
|
| 562 |
+
"epoch": 1.7239819004524888,
|
| 563 |
+
"grad_norm": 1.1761149168014526,
|
| 564 |
+
"learning_rate": 4.742798353909465e-06,
|
| 565 |
+
"loss": 0.0889,
|
| 566 |
+
"mean_token_accuracy": 0.9128913849592208,
|
| 567 |
+
"num_tokens": 5071872.0,
|
| 568 |
+
"step": 620
|
| 569 |
+
},
|
| 570 |
+
{
|
| 571 |
+
"epoch": 1.7518273581622,
|
| 572 |
+
"grad_norm": 0.8723099231719971,
|
| 573 |
+
"learning_rate": 4.6399176954732515e-06,
|
| 574 |
+
"loss": 0.0923,
|
| 575 |
+
"mean_token_accuracy": 0.9085738733410835,
|
| 576 |
+
"num_tokens": 5153792.0,
|
| 577 |
+
"step": 630
|
| 578 |
+
},
|
| 579 |
+
{
|
| 580 |
+
"epoch": 1.7796728158719108,
|
| 581 |
+
"grad_norm": 1.3783780336380005,
|
| 582 |
+
"learning_rate": 4.537037037037038e-06,
|
| 583 |
+
"loss": 0.0921,
|
| 584 |
+
"mean_token_accuracy": 0.9185420729219913,
|
| 585 |
+
"num_tokens": 5235712.0,
|
| 586 |
+
"step": 640
|
| 587 |
+
},
|
| 588 |
+
{
|
| 589 |
+
"epoch": 1.807518273581622,
|
| 590 |
+
"grad_norm": 0.9514628648757935,
|
| 591 |
+
"learning_rate": 4.434156378600823e-06,
|
| 592 |
+
"loss": 0.0914,
|
| 593 |
+
"mean_token_accuracy": 0.918224073201418,
|
| 594 |
+
"num_tokens": 5317632.0,
|
| 595 |
+
"step": 650
|
| 596 |
+
},
|
| 597 |
+
{
|
| 598 |
+
"epoch": 1.8353637312913331,
|
| 599 |
+
"grad_norm": 1.5927624702453613,
|
| 600 |
+
"learning_rate": 4.331275720164609e-06,
|
| 601 |
+
"loss": 0.0948,
|
| 602 |
+
"mean_token_accuracy": 0.9134784743189812,
|
| 603 |
+
"num_tokens": 5399552.0,
|
| 604 |
+
"step": 660
|
| 605 |
+
},
|
| 606 |
+
{
|
| 607 |
+
"epoch": 1.863209189001044,
|
| 608 |
+
"grad_norm": 1.1536865234375,
|
| 609 |
+
"learning_rate": 4.228395061728396e-06,
|
| 610 |
+
"loss": 0.0963,
|
| 611 |
+
"mean_token_accuracy": 0.9069471605122089,
|
| 612 |
+
"num_tokens": 5481472.0,
|
| 613 |
+
"step": 670
|
| 614 |
+
},
|
| 615 |
+
{
|
| 616 |
+
"epoch": 1.8910546467107552,
|
| 617 |
+
"grad_norm": 0.9462175965309143,
|
| 618 |
+
"learning_rate": 4.125514403292181e-06,
|
| 619 |
+
"loss": 0.0898,
|
| 620 |
+
"mean_token_accuracy": 0.9125489234924317,
|
| 621 |
+
"num_tokens": 5563392.0,
|
| 622 |
+
"step": 680
|
| 623 |
+
},
|
| 624 |
+
{
|
| 625 |
+
"epoch": 1.9189001044204663,
|
| 626 |
+
"grad_norm": 0.940758228302002,
|
| 627 |
+
"learning_rate": 4.022633744855967e-06,
|
| 628 |
+
"loss": 0.0949,
|
| 629 |
+
"mean_token_accuracy": 0.9125366877764464,
|
| 630 |
+
"num_tokens": 5645312.0,
|
| 631 |
+
"step": 690
|
| 632 |
+
},
|
| 633 |
+
{
|
| 634 |
+
"epoch": 1.9467455621301775,
|
| 635 |
+
"grad_norm": 0.9007344245910645,
|
| 636 |
+
"learning_rate": 3.9197530864197535e-06,
|
| 637 |
+
"loss": 0.082,
|
| 638 |
+
"mean_token_accuracy": 0.9042441304773092,
|
| 639 |
+
"num_tokens": 5727232.0,
|
| 640 |
+
"step": 700
|
| 641 |
+
},
|
| 642 |
+
{
|
| 643 |
+
"epoch": 1.9745910198398886,
|
| 644 |
+
"grad_norm": 0.880010187625885,
|
| 645 |
+
"learning_rate": 3.81687242798354e-06,
|
| 646 |
+
"loss": 0.0882,
|
| 647 |
+
"mean_token_accuracy": 0.9144691735506058,
|
| 648 |
+
"num_tokens": 5809152.0,
|
| 649 |
+
"step": 710
|
| 650 |
+
},
|
| 651 |
+
{
|
| 652 |
+
"epoch": 2.0,
|
| 653 |
+
"grad_norm": 3.2822415828704834,
|
| 654 |
+
"learning_rate": 3.7139917695473256e-06,
|
| 655 |
+
"loss": 0.0894,
|
| 656 |
+
"mean_token_accuracy": 0.9187732877796644,
|
| 657 |
+
"num_tokens": 5883904.0,
|
| 658 |
+
"step": 720
|
| 659 |
+
},
|
| 660 |
+
{
|
| 661 |
+
"epoch": 2.027845457709711,
|
| 662 |
+
"grad_norm": 0.8666273951530457,
|
| 663 |
+
"learning_rate": 3.6111111111111115e-06,
|
| 664 |
+
"loss": 0.0783,
|
| 665 |
+
"mean_token_accuracy": 0.9194716215133667,
|
| 666 |
+
"num_tokens": 5965824.0,
|
| 667 |
+
"step": 730
|
| 668 |
+
},
|
| 669 |
+
{
|
| 670 |
+
"epoch": 2.0556909154194223,
|
| 671 |
+
"grad_norm": 1.257120132446289,
|
| 672 |
+
"learning_rate": 3.5082304526748973e-06,
|
| 673 |
+
"loss": 0.0792,
|
| 674 |
+
"mean_token_accuracy": 0.9152030311524868,
|
| 675 |
+
"num_tokens": 6047744.0,
|
| 676 |
+
"step": 740
|
| 677 |
+
},
|
| 678 |
+
{
|
| 679 |
+
"epoch": 2.0835363731291334,
|
| 680 |
+
"grad_norm": 1.344621181488037,
|
| 681 |
+
"learning_rate": 3.405349794238683e-06,
|
| 682 |
+
"loss": 0.0868,
|
| 683 |
+
"mean_token_accuracy": 0.9004647728055716,
|
| 684 |
+
"num_tokens": 6129664.0,
|
| 685 |
+
"step": 750
|
| 686 |
+
},
|
| 687 |
+
{
|
| 688 |
+
"epoch": 2.1113818308388446,
|
| 689 |
+
"grad_norm": 1.0627198219299316,
|
| 690 |
+
"learning_rate": 3.30246913580247e-06,
|
| 691 |
+
"loss": 0.0804,
|
| 692 |
+
"mean_token_accuracy": 0.915875731408596,
|
| 693 |
+
"num_tokens": 6211584.0,
|
| 694 |
+
"step": 760
|
| 695 |
+
},
|
| 696 |
+
{
|
| 697 |
+
"epoch": 2.1392272885485557,
|
| 698 |
+
"grad_norm": 1.065942406654358,
|
| 699 |
+
"learning_rate": 3.1995884773662556e-06,
|
| 700 |
+
"loss": 0.0769,
|
| 701 |
+
"mean_token_accuracy": 0.9183219157159328,
|
| 702 |
+
"num_tokens": 6293504.0,
|
| 703 |
+
"step": 770
|
| 704 |
+
},
|
| 705 |
+
{
|
| 706 |
+
"epoch": 2.167072746258267,
|
| 707 |
+
"grad_norm": 1.218607783317566,
|
| 708 |
+
"learning_rate": 3.0967078189300415e-06,
|
| 709 |
+
"loss": 0.0847,
|
| 710 |
+
"mean_token_accuracy": 0.9176369860768319,
|
| 711 |
+
"num_tokens": 6375424.0,
|
| 712 |
+
"step": 780
|
| 713 |
+
},
|
| 714 |
+
{
|
| 715 |
+
"epoch": 2.1949182039679775,
|
| 716 |
+
"grad_norm": 0.8840998411178589,
|
| 717 |
+
"learning_rate": 2.9938271604938273e-06,
|
| 718 |
+
"loss": 0.076,
|
| 719 |
+
"mean_token_accuracy": 0.9088307194411754,
|
| 720 |
+
"num_tokens": 6457344.0,
|
| 721 |
+
"step": 790
|
| 722 |
+
},
|
| 723 |
+
{
|
| 724 |
+
"epoch": 2.2227636616776887,
|
| 725 |
+
"grad_norm": 1.3679556846618652,
|
| 726 |
+
"learning_rate": 2.890946502057613e-06,
|
| 727 |
+
"loss": 0.0804,
|
| 728 |
+
"mean_token_accuracy": 0.9142979431897402,
|
| 729 |
+
"num_tokens": 6539264.0,
|
| 730 |
+
"step": 800
|
| 731 |
+
},
|
| 732 |
+
{
|
| 733 |
+
"epoch": 2.2506091193874,
|
| 734 |
+
"grad_norm": 1.7247203588485718,
|
| 735 |
+
"learning_rate": 2.7880658436213994e-06,
|
| 736 |
+
"loss": 0.085,
|
| 737 |
+
"mean_token_accuracy": 0.9157534211874008,
|
| 738 |
+
"num_tokens": 6621184.0,
|
| 739 |
+
"step": 810
|
| 740 |
+
},
|
| 741 |
+
{
|
| 742 |
+
"epoch": 2.278454577097111,
|
| 743 |
+
"grad_norm": 1.2949681282043457,
|
| 744 |
+
"learning_rate": 2.6851851851851856e-06,
|
| 745 |
+
"loss": 0.0792,
|
| 746 |
+
"mean_token_accuracy": 0.9105797454714775,
|
| 747 |
+
"num_tokens": 6703104.0,
|
| 748 |
+
"step": 820
|
| 749 |
+
},
|
| 750 |
+
{
|
| 751 |
+
"epoch": 2.306300034806822,
|
| 752 |
+
"grad_norm": 1.1340421438217163,
|
| 753 |
+
"learning_rate": 2.5823045267489715e-06,
|
| 754 |
+
"loss": 0.0909,
|
| 755 |
+
"mean_token_accuracy": 0.9104696653783322,
|
| 756 |
+
"num_tokens": 6785024.0,
|
| 757 |
+
"step": 830
|
| 758 |
+
},
|
| 759 |
+
{
|
| 760 |
+
"epoch": 2.3341454925165332,
|
| 761 |
+
"grad_norm": 1.4311587810516357,
|
| 762 |
+
"learning_rate": 2.4794238683127577e-06,
|
| 763 |
+
"loss": 0.07,
|
| 764 |
+
"mean_token_accuracy": 0.9179427556693553,
|
| 765 |
+
"num_tokens": 6866944.0,
|
| 766 |
+
"step": 840
|
| 767 |
+
},
|
| 768 |
+
{
|
| 769 |
+
"epoch": 2.3619909502262444,
|
| 770 |
+
"grad_norm": 1.2658636569976807,
|
| 771 |
+
"learning_rate": 2.3765432098765435e-06,
|
| 772 |
+
"loss": 0.078,
|
| 773 |
+
"mean_token_accuracy": 0.9194593898952007,
|
| 774 |
+
"num_tokens": 6948864.0,
|
| 775 |
+
"step": 850
|
| 776 |
+
},
|
| 777 |
+
{
|
| 778 |
+
"epoch": 2.3898364079359555,
|
| 779 |
+
"grad_norm": 1.1735376119613647,
|
| 780 |
+
"learning_rate": 2.2736625514403294e-06,
|
| 781 |
+
"loss": 0.0771,
|
| 782 |
+
"mean_token_accuracy": 0.9146893344819546,
|
| 783 |
+
"num_tokens": 7030784.0,
|
| 784 |
+
"step": 860
|
| 785 |
+
},
|
| 786 |
+
{
|
| 787 |
+
"epoch": 2.4176818656456667,
|
| 788 |
+
"grad_norm": 1.3436040878295898,
|
| 789 |
+
"learning_rate": 2.1707818930041156e-06,
|
| 790 |
+
"loss": 0.0761,
|
| 791 |
+
"mean_token_accuracy": 0.9215264186263085,
|
| 792 |
+
"num_tokens": 7112704.0,
|
| 793 |
+
"step": 870
|
| 794 |
+
},
|
| 795 |
+
{
|
| 796 |
+
"epoch": 2.445527323355378,
|
| 797 |
+
"grad_norm": 1.7530394792556763,
|
| 798 |
+
"learning_rate": 2.0679012345679015e-06,
|
| 799 |
+
"loss": 0.0809,
|
| 800 |
+
"mean_token_accuracy": 0.9206335641443729,
|
| 801 |
+
"num_tokens": 7194624.0,
|
| 802 |
+
"step": 880
|
| 803 |
+
},
|
| 804 |
+
{
|
| 805 |
+
"epoch": 2.4733727810650885,
|
| 806 |
+
"grad_norm": 1.2825167179107666,
|
| 807 |
+
"learning_rate": 1.9650205761316873e-06,
|
| 808 |
+
"loss": 0.0763,
|
| 809 |
+
"mean_token_accuracy": 0.915472112596035,
|
| 810 |
+
"num_tokens": 7276544.0,
|
| 811 |
+
"step": 890
|
| 812 |
+
},
|
| 813 |
+
{
|
| 814 |
+
"epoch": 2.5012182387747997,
|
| 815 |
+
"grad_norm": 1.0933923721313477,
|
| 816 |
+
"learning_rate": 1.8621399176954735e-06,
|
| 817 |
+
"loss": 0.0824,
|
| 818 |
+
"mean_token_accuracy": 0.91106898188591,
|
| 819 |
+
"num_tokens": 7358464.0,
|
| 820 |
+
"step": 900
|
| 821 |
+
},
|
| 822 |
+
{
|
| 823 |
+
"epoch": 2.529063696484511,
|
| 824 |
+
"grad_norm": 1.5624420642852783,
|
| 825 |
+
"learning_rate": 1.7592592592592594e-06,
|
| 826 |
+
"loss": 0.0788,
|
| 827 |
+
"mean_token_accuracy": 0.9117661438882351,
|
| 828 |
+
"num_tokens": 7440384.0,
|
| 829 |
+
"step": 910
|
| 830 |
+
},
|
| 831 |
+
{
|
| 832 |
+
"epoch": 2.556909154194222,
|
| 833 |
+
"grad_norm": 0.8869456052780151,
|
| 834 |
+
"learning_rate": 1.6563786008230454e-06,
|
| 835 |
+
"loss": 0.0744,
|
| 836 |
+
"mean_token_accuracy": 0.9128913849592208,
|
| 837 |
+
"num_tokens": 7522304.0,
|
| 838 |
+
"step": 920
|
| 839 |
+
},
|
| 840 |
+
{
|
| 841 |
+
"epoch": 2.584754611903933,
|
| 842 |
+
"grad_norm": 1.7279531955718994,
|
| 843 |
+
"learning_rate": 1.5534979423868312e-06,
|
| 844 |
+
"loss": 0.0879,
|
| 845 |
+
"mean_token_accuracy": 0.9147627178579569,
|
| 846 |
+
"num_tokens": 7604224.0,
|
| 847 |
+
"step": 930
|
| 848 |
+
},
|
| 849 |
+
{
|
| 850 |
+
"epoch": 2.612600069613644,
|
| 851 |
+
"grad_norm": 0.9950515031814575,
|
| 852 |
+
"learning_rate": 1.4506172839506175e-06,
|
| 853 |
+
"loss": 0.0785,
|
| 854 |
+
"mean_token_accuracy": 0.9133194699883461,
|
| 855 |
+
"num_tokens": 7686144.0,
|
| 856 |
+
"step": 940
|
| 857 |
+
},
|
| 858 |
+
{
|
| 859 |
+
"epoch": 2.6404455273233554,
|
| 860 |
+
"grad_norm": 1.2250232696533203,
|
| 861 |
+
"learning_rate": 1.3477366255144033e-06,
|
| 862 |
+
"loss": 0.0781,
|
| 863 |
+
"mean_token_accuracy": 0.9132705468684434,
|
| 864 |
+
"num_tokens": 7768064.0,
|
| 865 |
+
"step": 950
|
| 866 |
+
},
|
| 867 |
+
{
|
| 868 |
+
"epoch": 2.6682909850330665,
|
| 869 |
+
"grad_norm": 1.1228382587432861,
|
| 870 |
+
"learning_rate": 1.2448559670781894e-06,
|
| 871 |
+
"loss": 0.0858,
|
| 872 |
+
"mean_token_accuracy": 0.9010029338300228,
|
| 873 |
+
"num_tokens": 7849984.0,
|
| 874 |
+
"step": 960
|
| 875 |
+
},
|
| 876 |
+
{
|
| 877 |
+
"epoch": 2.6961364427427776,
|
| 878 |
+
"grad_norm": 1.256042718887329,
|
| 879 |
+
"learning_rate": 1.1419753086419754e-06,
|
| 880 |
+
"loss": 0.079,
|
| 881 |
+
"mean_token_accuracy": 0.9115582153201103,
|
| 882 |
+
"num_tokens": 7931904.0,
|
| 883 |
+
"step": 970
|
| 884 |
+
},
|
| 885 |
+
{
|
| 886 |
+
"epoch": 2.723981900452489,
|
| 887 |
+
"grad_norm": 0.9604980945587158,
|
| 888 |
+
"learning_rate": 1.0390946502057615e-06,
|
| 889 |
+
"loss": 0.0757,
|
| 890 |
+
"mean_token_accuracy": 0.9120229929685593,
|
| 891 |
+
"num_tokens": 8013824.0,
|
| 892 |
+
"step": 980
|
| 893 |
+
},
|
| 894 |
+
{
|
| 895 |
+
"epoch": 2.7518273581622,
|
| 896 |
+
"grad_norm": 1.2086925506591797,
|
| 897 |
+
"learning_rate": 9.362139917695474e-07,
|
| 898 |
+
"loss": 0.082,
|
| 899 |
+
"mean_token_accuracy": 0.9099559649825096,
|
| 900 |
+
"num_tokens": 8095744.0,
|
| 901 |
+
"step": 990
|
| 902 |
+
},
|
| 903 |
+
{
|
| 904 |
+
"epoch": 2.779672815871911,
|
| 905 |
+
"grad_norm": 0.9486561417579651,
|
| 906 |
+
"learning_rate": 8.333333333333333e-07,
|
| 907 |
+
"loss": 0.0888,
|
| 908 |
+
"mean_token_accuracy": 0.910922210663557,
|
| 909 |
+
"num_tokens": 8177664.0,
|
| 910 |
+
"step": 1000
|
| 911 |
}
|
| 912 |
],
|
| 913 |
"logging_steps": 10,
|
|
|
|
| 927 |
"attributes": {}
|
| 928 |
}
|
| 929 |
},
|
| 930 |
+
"total_flos": 2.1611969148616704e+16,
|
| 931 |
"train_batch_size": 2,
|
| 932 |
"trial_name": null,
|
| 933 |
"trial_params": null
|