Alwaly commited on
Commit
debd363
·
verified ·
1 Parent(s): ad9aff1

Training in progress, step 500

Browse files
config.json CHANGED
@@ -1,5 +1,4 @@
1
  {
2
- "_name_or_path": "facebook/m2m100_418M",
3
  "activation_dropout": 0.0,
4
  "activation_function": "relu",
5
  "architectures": [
@@ -14,7 +13,7 @@
14
  "decoder_layers": 12,
15
  "decoder_start_token_id": 2,
16
  "dropout": 0.1,
17
- "early_stopping": true,
18
  "encoder_attention_heads": 16,
19
  "encoder_ffn_dim": 4096,
20
  "encoder_layerdrop": 0.05,
@@ -23,15 +22,15 @@
23
  "gradient_checkpointing": false,
24
  "init_std": 0.02,
25
  "is_encoder_decoder": true,
26
- "max_length": 200,
27
  "max_position_embeddings": 1024,
28
  "model_type": "m2m_100",
29
- "num_beams": 5,
30
  "num_hidden_layers": 12,
31
  "pad_token_id": 1,
32
  "scale_embedding": true,
33
  "torch_dtype": "float32",
34
- "transformers_version": "4.44.2",
35
  "use_cache": true,
36
  "vocab_size": 128112
37
  }
 
1
  {
 
2
  "activation_dropout": 0.0,
3
  "activation_function": "relu",
4
  "architectures": [
 
13
  "decoder_layers": 12,
14
  "decoder_start_token_id": 2,
15
  "dropout": 0.1,
16
+ "early_stopping": null,
17
  "encoder_attention_heads": 16,
18
  "encoder_ffn_dim": 4096,
19
  "encoder_layerdrop": 0.05,
 
22
  "gradient_checkpointing": false,
23
  "init_std": 0.02,
24
  "is_encoder_decoder": true,
25
+ "max_length": null,
26
  "max_position_embeddings": 1024,
27
  "model_type": "m2m_100",
28
+ "num_beams": null,
29
  "num_hidden_layers": 12,
30
  "pad_token_id": 1,
31
  "scale_embedding": true,
32
  "torch_dtype": "float32",
33
+ "transformers_version": "4.52.4",
34
  "use_cache": true,
35
  "vocab_size": 128112
36
  }
model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:b73978afc58a5a9e0076cd830517436baea323285adcd929b1fb5d6c3ab28cf6
3
  size 1935681888
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4ee49321ce91d781660a8f2e7ce76d1a058751f05cce4a023a48979a3cf6254d
3
  size 1935681888
runs/Jun23_18-51-01_a9c45816e4b8/events.out.tfevents.1750704682.a9c45816e4b8.1362.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:320553b9da509238ed0a26cb6f87bed42020dee54074141a828d49c0601a07ba
3
+ size 5559
tokenizer_config.json CHANGED
@@ -936,8 +936,9 @@
936
  "__zu__"
937
  ],
938
  "bos_token": "<s>",
939
- "clean_up_tokenization_spaces": true,
940
  "eos_token": "</s>",
 
941
  "language_codes": "m2m100",
942
  "model_max_length": 1024,
943
  "num_madeup_words": 8,
 
936
  "__zu__"
937
  ],
938
  "bos_token": "<s>",
939
+ "clean_up_tokenization_spaces": false,
940
  "eos_token": "</s>",
941
+ "extra_special_tokens": {},
942
  "language_codes": "m2m100",
943
  "model_max_length": 1024,
944
  "num_madeup_words": 8,
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:737662fe97f56b6a2a82ff756ac3d931ef5c182e19178f30481200bdb687739d
3
- size 5368
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a3248cc69d476a75b43dfdf4b8127b9685b788a88b61f6b85b3f00049b4478d5
3
+ size 5496