huyisme-005 commited on
Commit
6bbc71d
·
verified ·
1 Parent(s): 0a81b8d

Upload folder using huggingface_hub

Browse files
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ tokenizer.json filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,61 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: transformers
3
+ license: gemma
4
+ base_model: google/gemma-3-270m
5
+ tags:
6
+ - generated_from_trainer
7
+ model-index:
8
+ - name: gemma_noisy_zarma_finetune
9
+ results: []
10
+ ---
11
+
12
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
13
+ should probably proofread and complete it, then remove this comment. -->
14
+
15
+ # gemma_noisy_zarma_finetune
16
+
17
+ This model is a fine-tuned version of [google/gemma-3-270m](https://huggingface.co/google/gemma-3-270m) on the None dataset.
18
+ It achieves the following results on the evaluation set:
19
+ - Loss: 2.0806
20
+
21
+ ## Model description
22
+
23
+ More information needed
24
+
25
+ ## Intended uses & limitations
26
+
27
+ More information needed
28
+
29
+ ## Training and evaluation data
30
+
31
+ More information needed
32
+
33
+ ## Training procedure
34
+
35
+ ### Training hyperparameters
36
+
37
+ The following hyperparameters were used during training:
38
+ - learning_rate: 2e-05
39
+ - train_batch_size: 28
40
+ - eval_batch_size: 28
41
+ - seed: 42
42
+ - optimizer: Use OptimizerNames.ADAMW_TORCH_FUSED with betas=(0.9,0.999) and epsilon=1e-08 and optimizer_args=No additional optimizer arguments
43
+ - lr_scheduler_type: linear
44
+ - num_epochs: 3
45
+ - mixed_precision_training: Native AMP
46
+
47
+ ### Training results
48
+
49
+ | Training Loss | Epoch | Step | Validation Loss |
50
+ |:-------------:|:-----:|:-----:|:---------------:|
51
+ | 1.2439 | 1.0 | 9643 | 1.6537 |
52
+ | 0.9003 | 2.0 | 19286 | 1.8666 |
53
+ | 0.696 | 3.0 | 28929 | 2.0806 |
54
+
55
+
56
+ ### Framework versions
57
+
58
+ - Transformers 4.57.0
59
+ - Pytorch 2.8.0+cu128
60
+ - Datasets 4.1.1
61
+ - Tokenizers 0.22.1
added_tokens.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ {
2
+ "<image_soft_token>": 262144
3
+ }
config.json ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_sliding_window_pattern": 6,
3
+ "architectures": [
4
+ "Gemma3ForCausalLM"
5
+ ],
6
+ "attention_bias": false,
7
+ "attention_dropout": 0.0,
8
+ "attn_logit_softcapping": null,
9
+ "bos_token_id": 2,
10
+ "dtype": "float32",
11
+ "eos_token_id": 1,
12
+ "final_logit_softcapping": null,
13
+ "head_dim": 256,
14
+ "hidden_activation": "gelu_pytorch_tanh",
15
+ "hidden_size": 640,
16
+ "initializer_range": 0.02,
17
+ "intermediate_size": 2048,
18
+ "layer_types": [
19
+ "sliding_attention",
20
+ "sliding_attention",
21
+ "sliding_attention",
22
+ "sliding_attention",
23
+ "sliding_attention",
24
+ "full_attention",
25
+ "sliding_attention",
26
+ "sliding_attention",
27
+ "sliding_attention",
28
+ "sliding_attention",
29
+ "sliding_attention",
30
+ "full_attention",
31
+ "sliding_attention",
32
+ "sliding_attention",
33
+ "sliding_attention",
34
+ "sliding_attention",
35
+ "sliding_attention",
36
+ "full_attention"
37
+ ],
38
+ "max_position_embeddings": 32768,
39
+ "model_type": "gemma3_text",
40
+ "num_attention_heads": 4,
41
+ "num_hidden_layers": 18,
42
+ "num_key_value_heads": 1,
43
+ "pad_token_id": 0,
44
+ "query_pre_attn_scalar": 256,
45
+ "rms_norm_eps": 1e-06,
46
+ "rope_local_base_freq": 10000.0,
47
+ "rope_scaling": null,
48
+ "rope_theta": 1000000.0,
49
+ "sliding_window": 512,
50
+ "transformers_version": "4.57.0",
51
+ "use_bidirectional_attention": false,
52
+ "use_cache": true,
53
+ "vocab_size": 262144
54
+ }
generation_config.json ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token_id": 2,
3
+ "do_sample": true,
4
+ "eos_token_id": [
5
+ 1
6
+ ],
7
+ "pad_token_id": 0,
8
+ "top_k": 64,
9
+ "top_p": 0.95,
10
+ "transformers_version": "4.57.0"
11
+ }
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:021eaa4dfac2a735365503d2f78e6fd7cbd5da92f64eaac7a5170e70bcbc13bd
3
+ size 1072419256
noisy_zarma_metrics.json ADDED
@@ -0,0 +1,68 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "model": "google/gemma-3-270m",
3
+ "train_rows": 45000,
4
+ "eval_rows": 5000,
5
+ "epochs": 3,
6
+ "learning_rate": 2e-05,
7
+ "final_eval_loss": 2.080573,
8
+ "final_perplexity": 8.009057,
9
+ "noisy_columns": [
10
+ "char_swap",
11
+ "random_char_insertion",
12
+ "char_delete",
13
+ "char_substitute",
14
+ "word_masking",
15
+ "word_swap"
16
+ ],
17
+ "overall": {
18
+ "BLEU": 46.8891,
19
+ "ChrF++": 51.4311,
20
+ "COMET": null,
21
+ "n_samples": 30000,
22
+ "COMET_error": "COMET scoring unavailable or failed."
23
+ },
24
+ "per_column": {
25
+ "char_swap": {
26
+ "BLEU": 50.1065,
27
+ "ChrF++": 54.3406,
28
+ "COMET": null,
29
+ "n_samples": 5000,
30
+ "COMET_error": "COMET scoring unavailable or failed."
31
+ },
32
+ "random_char_insertion": {
33
+ "BLEU": 49.0539,
34
+ "ChrF++": 53.9819,
35
+ "COMET": null,
36
+ "n_samples": 5000,
37
+ "COMET_error": "COMET scoring unavailable or failed."
38
+ },
39
+ "char_delete": {
40
+ "BLEU": 46.5467,
41
+ "ChrF++": 52.1023,
42
+ "COMET": null,
43
+ "n_samples": 5000,
44
+ "COMET_error": "COMET scoring unavailable or failed."
45
+ },
46
+ "char_substitute": {
47
+ "BLEU": 48.4838,
48
+ "ChrF++": 52.7162,
49
+ "COMET": null,
50
+ "n_samples": 5000,
51
+ "COMET_error": "COMET scoring unavailable or failed."
52
+ },
53
+ "word_masking": {
54
+ "BLEU": 40.6802,
55
+ "ChrF++": 43.6655,
56
+ "COMET": null,
57
+ "n_samples": 5000,
58
+ "COMET_error": "COMET scoring unavailable or failed."
59
+ },
60
+ "word_swap": {
61
+ "BLEU": 44.524,
62
+ "ChrF++": 51.7803,
63
+ "COMET": null,
64
+ "n_samples": 5000,
65
+ "COMET_error": "COMET scoring unavailable or failed."
66
+ }
67
+ }
68
+ }
special_tokens_map.json ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "boi_token": "<start_of_image>",
3
+ "bos_token": {
4
+ "content": "<bos>",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false
9
+ },
10
+ "eoi_token": "<end_of_image>",
11
+ "eos_token": {
12
+ "content": "<eos>",
13
+ "lstrip": false,
14
+ "normalized": false,
15
+ "rstrip": false,
16
+ "single_word": false
17
+ },
18
+ "image_token": "<image_soft_token>",
19
+ "pad_token": {
20
+ "content": "<pad>",
21
+ "lstrip": false,
22
+ "normalized": false,
23
+ "rstrip": false,
24
+ "single_word": false
25
+ },
26
+ "unk_token": {
27
+ "content": "<unk>",
28
+ "lstrip": false,
29
+ "normalized": false,
30
+ "rstrip": false,
31
+ "single_word": false
32
+ }
33
+ }
tokenizer.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:81dff017751f3628fd40ceb598dea6e37bce914e3ba98843f65dac72abe71eda
3
+ size 33384819
tokenizer.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1299c11d7cf632ef3b4e11937501358ada021bbdf7c47638d13c0ee982f2e79c
3
+ size 4689074
tokenizer_config.json ADDED
The diff for this file is too large to render. See raw diff
 
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0ea4231a12ad03603309e5bb0916d2e7e0e78cdf05a8370e1fd302f34177b107
3
+ size 5841