Praha-Labs commited on
Commit
f131c8c
·
verified ·
1 Parent(s): 4e2abbc

Upload folder using huggingface_hub

Browse files
Files changed (48) hide show
  1. checkpoint-74000/config.json +58 -0
  2. checkpoint-74000/generation_config.json +8 -0
  3. checkpoint-74000/model.safetensors +3 -0
  4. checkpoint-74000/optimizer.pt +3 -0
  5. checkpoint-74000/rng_state.pth +3 -0
  6. checkpoint-74000/scheduler.pt +3 -0
  7. checkpoint-74000/trainer_state.json +0 -0
  8. checkpoint-74000/training_args.bin +3 -0
  9. checkpoint-74500/config.json +58 -0
  10. checkpoint-74500/generation_config.json +8 -0
  11. checkpoint-74500/model.safetensors +3 -0
  12. checkpoint-74500/optimizer.pt +3 -0
  13. checkpoint-74500/rng_state.pth +3 -0
  14. checkpoint-74500/scheduler.pt +3 -0
  15. checkpoint-74500/trainer_state.json +0 -0
  16. checkpoint-74500/training_args.bin +3 -0
  17. checkpoint-75000/config.json +58 -0
  18. checkpoint-75000/generation_config.json +8 -0
  19. checkpoint-75000/model.safetensors +3 -0
  20. checkpoint-75000/optimizer.pt +3 -0
  21. checkpoint-75000/rng_state.pth +3 -0
  22. checkpoint-75000/scheduler.pt +3 -0
  23. checkpoint-75000/trainer_state.json +0 -0
  24. checkpoint-75000/training_args.bin +3 -0
  25. checkpoint-75500/config.json +58 -0
  26. checkpoint-75500/generation_config.json +8 -0
  27. checkpoint-75500/model.safetensors +3 -0
  28. checkpoint-75500/optimizer.pt +3 -0
  29. checkpoint-75500/rng_state.pth +3 -0
  30. checkpoint-75500/scheduler.pt +3 -0
  31. checkpoint-75500/trainer_state.json +0 -0
  32. checkpoint-75500/training_args.bin +3 -0
  33. checkpoint-76000/config.json +58 -0
  34. checkpoint-76000/generation_config.json +8 -0
  35. checkpoint-76000/model.safetensors +3 -0
  36. checkpoint-76000/optimizer.pt +3 -0
  37. checkpoint-76000/rng_state.pth +3 -0
  38. checkpoint-76000/scheduler.pt +3 -0
  39. checkpoint-76000/trainer_state.json +0 -0
  40. checkpoint-76000/training_args.bin +3 -0
  41. checkpoint-76160/config.json +58 -0
  42. checkpoint-76160/generation_config.json +8 -0
  43. checkpoint-76160/model.safetensors +3 -0
  44. checkpoint-76160/optimizer.pt +3 -0
  45. checkpoint-76160/rng_state.pth +3 -0
  46. checkpoint-76160/scheduler.pt +3 -0
  47. checkpoint-76160/trainer_state.json +0 -0
  48. checkpoint-76160/training_args.bin +3 -0
checkpoint-74000/config.json ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "Lfm2ForCausalLM"
4
+ ],
5
+ "block_auto_adjust_ff_dim": true,
6
+ "block_dim": 1024,
7
+ "block_ff_dim": 6656,
8
+ "block_ffn_dim_multiplier": 1.0,
9
+ "block_mlp_init_scale": 1.0,
10
+ "block_multiple_of": 256,
11
+ "block_norm_eps": 1e-05,
12
+ "block_out_init_scale": 1.0,
13
+ "block_use_swiglu": true,
14
+ "block_use_xavier_init": true,
15
+ "bos_token_id": 1,
16
+ "conv_L_cache": 3,
17
+ "conv_bias": false,
18
+ "conv_dim": 1024,
19
+ "conv_dim_out": 1024,
20
+ "conv_use_xavier_init": true,
21
+ "dtype": "bfloat16",
22
+ "eos_token_id": 7,
23
+ "hidden_size": 1024,
24
+ "initializer_range": 0.02,
25
+ "intermediate_size": 6656,
26
+ "layer_types": [
27
+ "conv",
28
+ "conv",
29
+ "full_attention",
30
+ "conv",
31
+ "conv",
32
+ "full_attention",
33
+ "conv",
34
+ "conv",
35
+ "full_attention",
36
+ "conv",
37
+ "full_attention",
38
+ "conv",
39
+ "full_attention",
40
+ "conv",
41
+ "full_attention",
42
+ "conv"
43
+ ],
44
+ "max_position_embeddings": 128000,
45
+ "model_type": "lfm2",
46
+ "norm_eps": 1e-05,
47
+ "num_attention_heads": 16,
48
+ "num_heads": 16,
49
+ "num_hidden_layers": 16,
50
+ "num_key_value_heads": 8,
51
+ "pad_token_id": 0,
52
+ "rope_theta": 1000000.0,
53
+ "transformers_version": "4.56.0",
54
+ "unsloth_version": "2025.8.10",
55
+ "use_cache": true,
56
+ "use_pos_enc": true,
57
+ "vocab_size": 93083
58
+ }
checkpoint-74000/generation_config.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 1,
4
+ "eos_token_id": 7,
5
+ "max_length": 128000,
6
+ "pad_token_id": 0,
7
+ "transformers_version": "4.56.0"
8
+ }
checkpoint-74000/model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0e7b95b7063f2deba50c3ab2b37abb5a8b40b497444e0f00a4bef3e7aff86106
3
+ size 765400720
checkpoint-74000/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a183afc198e6bb5deb1b93696e759eacbbc8073cd6430dc617489b05e2b67a95
3
+ size 1347037259
checkpoint-74000/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:55618d72fdc98ce531c1d13612b817a14a50cd3ee258c44dec6a79dee2ab54ab
3
+ size 14645
checkpoint-74000/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:243bf476157f66398e13327825b68cfdeccc6f1288c69c9c1dcd577da62097ce
3
+ size 1465
checkpoint-74000/trainer_state.json ADDED
The diff for this file is too large to render. See raw diff
 
checkpoint-74000/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ed1f03fe8882aba24ecf0fbe14052a7861e4bb2f188ffd692e2417bea6fe2e8f
3
+ size 5777
checkpoint-74500/config.json ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "Lfm2ForCausalLM"
4
+ ],
5
+ "block_auto_adjust_ff_dim": true,
6
+ "block_dim": 1024,
7
+ "block_ff_dim": 6656,
8
+ "block_ffn_dim_multiplier": 1.0,
9
+ "block_mlp_init_scale": 1.0,
10
+ "block_multiple_of": 256,
11
+ "block_norm_eps": 1e-05,
12
+ "block_out_init_scale": 1.0,
13
+ "block_use_swiglu": true,
14
+ "block_use_xavier_init": true,
15
+ "bos_token_id": 1,
16
+ "conv_L_cache": 3,
17
+ "conv_bias": false,
18
+ "conv_dim": 1024,
19
+ "conv_dim_out": 1024,
20
+ "conv_use_xavier_init": true,
21
+ "dtype": "bfloat16",
22
+ "eos_token_id": 7,
23
+ "hidden_size": 1024,
24
+ "initializer_range": 0.02,
25
+ "intermediate_size": 6656,
26
+ "layer_types": [
27
+ "conv",
28
+ "conv",
29
+ "full_attention",
30
+ "conv",
31
+ "conv",
32
+ "full_attention",
33
+ "conv",
34
+ "conv",
35
+ "full_attention",
36
+ "conv",
37
+ "full_attention",
38
+ "conv",
39
+ "full_attention",
40
+ "conv",
41
+ "full_attention",
42
+ "conv"
43
+ ],
44
+ "max_position_embeddings": 128000,
45
+ "model_type": "lfm2",
46
+ "norm_eps": 1e-05,
47
+ "num_attention_heads": 16,
48
+ "num_heads": 16,
49
+ "num_hidden_layers": 16,
50
+ "num_key_value_heads": 8,
51
+ "pad_token_id": 0,
52
+ "rope_theta": 1000000.0,
53
+ "transformers_version": "4.56.0",
54
+ "unsloth_version": "2025.8.10",
55
+ "use_cache": true,
56
+ "use_pos_enc": true,
57
+ "vocab_size": 93083
58
+ }
checkpoint-74500/generation_config.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 1,
4
+ "eos_token_id": 7,
5
+ "max_length": 128000,
6
+ "pad_token_id": 0,
7
+ "transformers_version": "4.56.0"
8
+ }
checkpoint-74500/model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3a32b56f368cfd3462bc0d9ab9e6405df45eb8f217845960bb577306881897b2
3
+ size 765400720
checkpoint-74500/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dbb54ce4646caeed4a774031c18d312a53df9cbace4d69ba90d753e2b0770c28
3
+ size 1347037259
checkpoint-74500/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:55618d72fdc98ce531c1d13612b817a14a50cd3ee258c44dec6a79dee2ab54ab
3
+ size 14645
checkpoint-74500/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:384ca2fa032510e23a3c04727d5593cbcc9e32cea93f5409ff750dd572da4d3e
3
+ size 1465
checkpoint-74500/trainer_state.json ADDED
The diff for this file is too large to render. See raw diff
 
checkpoint-74500/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ed1f03fe8882aba24ecf0fbe14052a7861e4bb2f188ffd692e2417bea6fe2e8f
3
+ size 5777
checkpoint-75000/config.json ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "Lfm2ForCausalLM"
4
+ ],
5
+ "block_auto_adjust_ff_dim": true,
6
+ "block_dim": 1024,
7
+ "block_ff_dim": 6656,
8
+ "block_ffn_dim_multiplier": 1.0,
9
+ "block_mlp_init_scale": 1.0,
10
+ "block_multiple_of": 256,
11
+ "block_norm_eps": 1e-05,
12
+ "block_out_init_scale": 1.0,
13
+ "block_use_swiglu": true,
14
+ "block_use_xavier_init": true,
15
+ "bos_token_id": 1,
16
+ "conv_L_cache": 3,
17
+ "conv_bias": false,
18
+ "conv_dim": 1024,
19
+ "conv_dim_out": 1024,
20
+ "conv_use_xavier_init": true,
21
+ "dtype": "bfloat16",
22
+ "eos_token_id": 7,
23
+ "hidden_size": 1024,
24
+ "initializer_range": 0.02,
25
+ "intermediate_size": 6656,
26
+ "layer_types": [
27
+ "conv",
28
+ "conv",
29
+ "full_attention",
30
+ "conv",
31
+ "conv",
32
+ "full_attention",
33
+ "conv",
34
+ "conv",
35
+ "full_attention",
36
+ "conv",
37
+ "full_attention",
38
+ "conv",
39
+ "full_attention",
40
+ "conv",
41
+ "full_attention",
42
+ "conv"
43
+ ],
44
+ "max_position_embeddings": 128000,
45
+ "model_type": "lfm2",
46
+ "norm_eps": 1e-05,
47
+ "num_attention_heads": 16,
48
+ "num_heads": 16,
49
+ "num_hidden_layers": 16,
50
+ "num_key_value_heads": 8,
51
+ "pad_token_id": 0,
52
+ "rope_theta": 1000000.0,
53
+ "transformers_version": "4.56.0",
54
+ "unsloth_version": "2025.8.10",
55
+ "use_cache": true,
56
+ "use_pos_enc": true,
57
+ "vocab_size": 93083
58
+ }
checkpoint-75000/generation_config.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 1,
4
+ "eos_token_id": 7,
5
+ "max_length": 128000,
6
+ "pad_token_id": 0,
7
+ "transformers_version": "4.56.0"
8
+ }
checkpoint-75000/model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:069852be1cdb1c29d5fca8959c2c843248dd5aa0cb713ff00a01ec8705744c72
3
+ size 765400720
checkpoint-75000/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c5b4e2f61894ef87f06d5ce3ba2cb8cd81bd718227edb977c9c09fcd36a7d514
3
+ size 1347037259
checkpoint-75000/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:55618d72fdc98ce531c1d13612b817a14a50cd3ee258c44dec6a79dee2ab54ab
3
+ size 14645
checkpoint-75000/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1fa929656755bd52981b3e8c46f3daed040bf06ef9e7d0940ac922edb5faa37e
3
+ size 1465
checkpoint-75000/trainer_state.json ADDED
The diff for this file is too large to render. See raw diff
 
checkpoint-75000/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ed1f03fe8882aba24ecf0fbe14052a7861e4bb2f188ffd692e2417bea6fe2e8f
3
+ size 5777
checkpoint-75500/config.json ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "Lfm2ForCausalLM"
4
+ ],
5
+ "block_auto_adjust_ff_dim": true,
6
+ "block_dim": 1024,
7
+ "block_ff_dim": 6656,
8
+ "block_ffn_dim_multiplier": 1.0,
9
+ "block_mlp_init_scale": 1.0,
10
+ "block_multiple_of": 256,
11
+ "block_norm_eps": 1e-05,
12
+ "block_out_init_scale": 1.0,
13
+ "block_use_swiglu": true,
14
+ "block_use_xavier_init": true,
15
+ "bos_token_id": 1,
16
+ "conv_L_cache": 3,
17
+ "conv_bias": false,
18
+ "conv_dim": 1024,
19
+ "conv_dim_out": 1024,
20
+ "conv_use_xavier_init": true,
21
+ "dtype": "bfloat16",
22
+ "eos_token_id": 7,
23
+ "hidden_size": 1024,
24
+ "initializer_range": 0.02,
25
+ "intermediate_size": 6656,
26
+ "layer_types": [
27
+ "conv",
28
+ "conv",
29
+ "full_attention",
30
+ "conv",
31
+ "conv",
32
+ "full_attention",
33
+ "conv",
34
+ "conv",
35
+ "full_attention",
36
+ "conv",
37
+ "full_attention",
38
+ "conv",
39
+ "full_attention",
40
+ "conv",
41
+ "full_attention",
42
+ "conv"
43
+ ],
44
+ "max_position_embeddings": 128000,
45
+ "model_type": "lfm2",
46
+ "norm_eps": 1e-05,
47
+ "num_attention_heads": 16,
48
+ "num_heads": 16,
49
+ "num_hidden_layers": 16,
50
+ "num_key_value_heads": 8,
51
+ "pad_token_id": 0,
52
+ "rope_theta": 1000000.0,
53
+ "transformers_version": "4.56.0",
54
+ "unsloth_version": "2025.8.10",
55
+ "use_cache": true,
56
+ "use_pos_enc": true,
57
+ "vocab_size": 93083
58
+ }
checkpoint-75500/generation_config.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 1,
4
+ "eos_token_id": 7,
5
+ "max_length": 128000,
6
+ "pad_token_id": 0,
7
+ "transformers_version": "4.56.0"
8
+ }
checkpoint-75500/model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ee97a6beefd28d9aa37f57947ad6bc1c38e4199658f93f9cfbda0e0862a1238d
3
+ size 765400720
checkpoint-75500/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1e18dc713efe638795dea863e13c172d4c3ad4c82f6d5e111d456712a4882c02
3
+ size 1347037259
checkpoint-75500/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:55618d72fdc98ce531c1d13612b817a14a50cd3ee258c44dec6a79dee2ab54ab
3
+ size 14645
checkpoint-75500/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:40fc75ae7519c29edd6583dd123ddc48a52e7bd03032ff9847d98fec59e769f3
3
+ size 1465
checkpoint-75500/trainer_state.json ADDED
The diff for this file is too large to render. See raw diff
 
checkpoint-75500/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ed1f03fe8882aba24ecf0fbe14052a7861e4bb2f188ffd692e2417bea6fe2e8f
3
+ size 5777
checkpoint-76000/config.json ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "Lfm2ForCausalLM"
4
+ ],
5
+ "block_auto_adjust_ff_dim": true,
6
+ "block_dim": 1024,
7
+ "block_ff_dim": 6656,
8
+ "block_ffn_dim_multiplier": 1.0,
9
+ "block_mlp_init_scale": 1.0,
10
+ "block_multiple_of": 256,
11
+ "block_norm_eps": 1e-05,
12
+ "block_out_init_scale": 1.0,
13
+ "block_use_swiglu": true,
14
+ "block_use_xavier_init": true,
15
+ "bos_token_id": 1,
16
+ "conv_L_cache": 3,
17
+ "conv_bias": false,
18
+ "conv_dim": 1024,
19
+ "conv_dim_out": 1024,
20
+ "conv_use_xavier_init": true,
21
+ "dtype": "bfloat16",
22
+ "eos_token_id": 7,
23
+ "hidden_size": 1024,
24
+ "initializer_range": 0.02,
25
+ "intermediate_size": 6656,
26
+ "layer_types": [
27
+ "conv",
28
+ "conv",
29
+ "full_attention",
30
+ "conv",
31
+ "conv",
32
+ "full_attention",
33
+ "conv",
34
+ "conv",
35
+ "full_attention",
36
+ "conv",
37
+ "full_attention",
38
+ "conv",
39
+ "full_attention",
40
+ "conv",
41
+ "full_attention",
42
+ "conv"
43
+ ],
44
+ "max_position_embeddings": 128000,
45
+ "model_type": "lfm2",
46
+ "norm_eps": 1e-05,
47
+ "num_attention_heads": 16,
48
+ "num_heads": 16,
49
+ "num_hidden_layers": 16,
50
+ "num_key_value_heads": 8,
51
+ "pad_token_id": 0,
52
+ "rope_theta": 1000000.0,
53
+ "transformers_version": "4.56.0",
54
+ "unsloth_version": "2025.8.10",
55
+ "use_cache": true,
56
+ "use_pos_enc": true,
57
+ "vocab_size": 93083
58
+ }
checkpoint-76000/generation_config.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 1,
4
+ "eos_token_id": 7,
5
+ "max_length": 128000,
6
+ "pad_token_id": 0,
7
+ "transformers_version": "4.56.0"
8
+ }
checkpoint-76000/model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a89709c77f397f0ebcc3358b26d469e410318534d9c9c3ff43a5af9dbecabe37
3
+ size 765400720
checkpoint-76000/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:11513907f0fa7a3e961e645609e4b81065b3475a323f29026fc1328672389399
3
+ size 1347037259
checkpoint-76000/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:55618d72fdc98ce531c1d13612b817a14a50cd3ee258c44dec6a79dee2ab54ab
3
+ size 14645
checkpoint-76000/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9a8266fbc35f4491be03f90aab6e8c9ba4e1c952d736f96da8e6315191d13278
3
+ size 1465
checkpoint-76000/trainer_state.json ADDED
The diff for this file is too large to render. See raw diff
 
checkpoint-76000/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ed1f03fe8882aba24ecf0fbe14052a7861e4bb2f188ffd692e2417bea6fe2e8f
3
+ size 5777
checkpoint-76160/config.json ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "Lfm2ForCausalLM"
4
+ ],
5
+ "block_auto_adjust_ff_dim": true,
6
+ "block_dim": 1024,
7
+ "block_ff_dim": 6656,
8
+ "block_ffn_dim_multiplier": 1.0,
9
+ "block_mlp_init_scale": 1.0,
10
+ "block_multiple_of": 256,
11
+ "block_norm_eps": 1e-05,
12
+ "block_out_init_scale": 1.0,
13
+ "block_use_swiglu": true,
14
+ "block_use_xavier_init": true,
15
+ "bos_token_id": 1,
16
+ "conv_L_cache": 3,
17
+ "conv_bias": false,
18
+ "conv_dim": 1024,
19
+ "conv_dim_out": 1024,
20
+ "conv_use_xavier_init": true,
21
+ "dtype": "bfloat16",
22
+ "eos_token_id": 7,
23
+ "hidden_size": 1024,
24
+ "initializer_range": 0.02,
25
+ "intermediate_size": 6656,
26
+ "layer_types": [
27
+ "conv",
28
+ "conv",
29
+ "full_attention",
30
+ "conv",
31
+ "conv",
32
+ "full_attention",
33
+ "conv",
34
+ "conv",
35
+ "full_attention",
36
+ "conv",
37
+ "full_attention",
38
+ "conv",
39
+ "full_attention",
40
+ "conv",
41
+ "full_attention",
42
+ "conv"
43
+ ],
44
+ "max_position_embeddings": 128000,
45
+ "model_type": "lfm2",
46
+ "norm_eps": 1e-05,
47
+ "num_attention_heads": 16,
48
+ "num_heads": 16,
49
+ "num_hidden_layers": 16,
50
+ "num_key_value_heads": 8,
51
+ "pad_token_id": 0,
52
+ "rope_theta": 1000000.0,
53
+ "transformers_version": "4.56.0",
54
+ "unsloth_version": "2025.8.10",
55
+ "use_cache": true,
56
+ "use_pos_enc": true,
57
+ "vocab_size": 93083
58
+ }
checkpoint-76160/generation_config.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 1,
4
+ "eos_token_id": 7,
5
+ "max_length": 128000,
6
+ "pad_token_id": 0,
7
+ "transformers_version": "4.56.0"
8
+ }
checkpoint-76160/model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bc59931b7e1440f5270d7ad0ac5b8c71c3eda658726316b80638e818833f568c
3
+ size 765400720
checkpoint-76160/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:35a030ab28cbc32ccf50be2ad8c8ff576da076af81690b7f4d734fe95e86f3db
3
+ size 1347037259
checkpoint-76160/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:55618d72fdc98ce531c1d13612b817a14a50cd3ee258c44dec6a79dee2ab54ab
3
+ size 14645
checkpoint-76160/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7c07986256907f7aa2d3f67e3d23e7b1fce4d9433b23fe023304c7a842ddef3b
3
+ size 1465
checkpoint-76160/trainer_state.json ADDED
The diff for this file is too large to render. See raw diff
 
checkpoint-76160/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ed1f03fe8882aba24ecf0fbe14052a7861e4bb2f188ffd692e2417bea6fe2e8f
3
+ size 5777