End of training
Browse files- README.md +141 -0
- config.json +29 -0
- generation_config.json +6 -0
- model.safetensors +3 -0
- special_tokens_map.json +24 -0
- tokenizer.json +0 -0
- tokenizer.model +3 -0
- tokenizer_config.json +42 -0
- training_args.bin +3 -0
README.md
ADDED
|
@@ -0,0 +1,141 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
---
|
| 2 |
+
library_name: transformers
|
| 3 |
+
license: apache-2.0
|
| 4 |
+
base_model: reflex-ai/AMD-Llama-350M-Upgraded
|
| 5 |
+
tags:
|
| 6 |
+
- generated_from_trainer
|
| 7 |
+
model-index:
|
| 8 |
+
- name: amdchess
|
| 9 |
+
results: []
|
| 10 |
+
---
|
| 11 |
+
|
| 12 |
+
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
|
| 13 |
+
should probably proofread and complete it, then remove this comment. -->
|
| 14 |
+
|
| 15 |
+
# amdchess
|
| 16 |
+
|
| 17 |
+
This model is a fine-tuned version of [reflex-ai/AMD-Llama-350M-Upgraded](https://huggingface.co/reflex-ai/AMD-Llama-350M-Upgraded) on an unknown dataset.
|
| 18 |
+
It achieves the following results on the evaluation set:
|
| 19 |
+
- Loss: 1.6347
|
| 20 |
+
|
| 21 |
+
## Model description
|
| 22 |
+
|
| 23 |
+
More information needed
|
| 24 |
+
|
| 25 |
+
## Intended uses & limitations
|
| 26 |
+
|
| 27 |
+
More information needed
|
| 28 |
+
|
| 29 |
+
## Training and evaluation data
|
| 30 |
+
|
| 31 |
+
More information needed
|
| 32 |
+
|
| 33 |
+
## Training procedure
|
| 34 |
+
|
| 35 |
+
### Training hyperparameters
|
| 36 |
+
|
| 37 |
+
The following hyperparameters were used during training:
|
| 38 |
+
- learning_rate: 3e-05
|
| 39 |
+
- train_batch_size: 4
|
| 40 |
+
- eval_batch_size: 4
|
| 41 |
+
- seed: 42
|
| 42 |
+
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
|
| 43 |
+
- lr_scheduler_type: cosine
|
| 44 |
+
- num_epochs: 0.1
|
| 45 |
+
|
| 46 |
+
### Training results
|
| 47 |
+
|
| 48 |
+
| Training Loss | Epoch | Step | Validation Loss |
|
| 49 |
+
|:-------------:|:------:|:----:|:---------------:|
|
| 50 |
+
| 8.019 | 0.0012 | 4 | 7.6135 |
|
| 51 |
+
| 7.7094 | 0.0024 | 8 | 7.0826 |
|
| 52 |
+
| 6.8737 | 0.0035 | 12 | 6.8392 |
|
| 53 |
+
| 6.6426 | 0.0047 | 16 | 6.6142 |
|
| 54 |
+
| 6.3563 | 0.0059 | 20 | 6.2879 |
|
| 55 |
+
| 6.0826 | 0.0071 | 24 | 5.9688 |
|
| 56 |
+
| 5.8464 | 0.0083 | 28 | 5.5885 |
|
| 57 |
+
| 5.3209 | 0.0094 | 32 | 5.4342 |
|
| 58 |
+
| 5.2345 | 0.0106 | 36 | 5.2125 |
|
| 59 |
+
| 4.9003 | 0.0118 | 40 | 4.9282 |
|
| 60 |
+
| 4.6779 | 0.0130 | 44 | 4.7029 |
|
| 61 |
+
| 4.3778 | 0.0142 | 48 | 4.3920 |
|
| 62 |
+
| 4.3256 | 0.0154 | 52 | 4.1814 |
|
| 63 |
+
| 3.9975 | 0.0165 | 56 | 4.0072 |
|
| 64 |
+
| 3.73 | 0.0177 | 60 | 3.8358 |
|
| 65 |
+
| 4.0483 | 0.0189 | 64 | 3.7093 |
|
| 66 |
+
| 3.7907 | 0.0201 | 68 | 3.5874 |
|
| 67 |
+
| 3.3881 | 0.0213 | 72 | 3.4606 |
|
| 68 |
+
| 3.5066 | 0.0224 | 76 | 3.4071 |
|
| 69 |
+
| 3.3845 | 0.0236 | 80 | 3.2889 |
|
| 70 |
+
| 3.2318 | 0.0248 | 84 | 3.1932 |
|
| 71 |
+
| 3.5897 | 0.0260 | 88 | 3.1209 |
|
| 72 |
+
| 3.0362 | 0.0272 | 92 | 3.0123 |
|
| 73 |
+
| 2.7973 | 0.0283 | 96 | 2.9055 |
|
| 74 |
+
| 2.8976 | 0.0295 | 100 | 2.8210 |
|
| 75 |
+
| 2.8188 | 0.0307 | 104 | 2.7422 |
|
| 76 |
+
| 2.5149 | 0.0319 | 108 | 2.6395 |
|
| 77 |
+
| 2.495 | 0.0331 | 112 | 2.5714 |
|
| 78 |
+
| 2.5654 | 0.0342 | 116 | 2.4863 |
|
| 79 |
+
| 2.4205 | 0.0354 | 120 | 2.4448 |
|
| 80 |
+
| 2.3487 | 0.0366 | 124 | 2.3561 |
|
| 81 |
+
| 2.413 | 0.0378 | 128 | 2.3265 |
|
| 82 |
+
| 2.2713 | 0.0390 | 132 | 2.2814 |
|
| 83 |
+
| 2.2293 | 0.0402 | 136 | 2.2361 |
|
| 84 |
+
| 2.2793 | 0.0413 | 140 | 2.1745 |
|
| 85 |
+
| 2.185 | 0.0425 | 144 | 2.1444 |
|
| 86 |
+
| 2.0137 | 0.0437 | 148 | 2.1245 |
|
| 87 |
+
| 2.1408 | 0.0449 | 152 | 2.0849 |
|
| 88 |
+
| 2.1539 | 0.0461 | 156 | 2.0650 |
|
| 89 |
+
| 2.0592 | 0.0472 | 160 | 2.0345 |
|
| 90 |
+
| 1.9849 | 0.0484 | 164 | 2.0390 |
|
| 91 |
+
| 1.8796 | 0.0496 | 168 | 1.9978 |
|
| 92 |
+
| 1.9646 | 0.0508 | 172 | 1.9860 |
|
| 93 |
+
| 1.9913 | 0.0520 | 176 | 1.9388 |
|
| 94 |
+
| 1.967 | 0.0531 | 180 | 1.9121 |
|
| 95 |
+
| 1.9141 | 0.0543 | 184 | 1.9085 |
|
| 96 |
+
| 1.9513 | 0.0555 | 188 | 1.9040 |
|
| 97 |
+
| 1.9123 | 0.0567 | 192 | 1.8606 |
|
| 98 |
+
| 1.8204 | 0.0579 | 196 | 1.8556 |
|
| 99 |
+
| 1.9311 | 0.0590 | 200 | 1.8390 |
|
| 100 |
+
| 1.8425 | 0.0602 | 204 | 1.8162 |
|
| 101 |
+
| 1.7932 | 0.0614 | 208 | 1.7914 |
|
| 102 |
+
| 1.591 | 0.0626 | 212 | 1.7749 |
|
| 103 |
+
| 1.7899 | 0.0638 | 216 | 1.7667 |
|
| 104 |
+
| 1.7094 | 0.0650 | 220 | 1.7637 |
|
| 105 |
+
| 1.8023 | 0.0661 | 224 | 1.7458 |
|
| 106 |
+
| 1.7368 | 0.0673 | 228 | 1.7339 |
|
| 107 |
+
| 1.5679 | 0.0685 | 232 | 1.7281 |
|
| 108 |
+
| 1.7265 | 0.0697 | 236 | 1.7221 |
|
| 109 |
+
| 1.7034 | 0.0709 | 240 | 1.7093 |
|
| 110 |
+
| 1.5902 | 0.0720 | 244 | 1.7086 |
|
| 111 |
+
| 1.6903 | 0.0732 | 248 | 1.6976 |
|
| 112 |
+
| 1.7581 | 0.0744 | 252 | 1.6944 |
|
| 113 |
+
| 1.656 | 0.0756 | 256 | 1.6899 |
|
| 114 |
+
| 1.4287 | 0.0768 | 260 | 1.6858 |
|
| 115 |
+
| 1.6527 | 0.0779 | 264 | 1.6754 |
|
| 116 |
+
| 1.7206 | 0.0791 | 268 | 1.6787 |
|
| 117 |
+
| 1.8268 | 0.0803 | 272 | 1.6673 |
|
| 118 |
+
| 1.538 | 0.0815 | 276 | 1.6590 |
|
| 119 |
+
| 1.7374 | 0.0827 | 280 | 1.6711 |
|
| 120 |
+
| 1.7255 | 0.0839 | 284 | 1.6513 |
|
| 121 |
+
| 1.6032 | 0.0850 | 288 | 1.6552 |
|
| 122 |
+
| 1.5297 | 0.0862 | 292 | 1.6458 |
|
| 123 |
+
| 1.7639 | 0.0874 | 296 | 1.6488 |
|
| 124 |
+
| 1.8029 | 0.0886 | 300 | 1.6441 |
|
| 125 |
+
| 1.665 | 0.0898 | 304 | 1.6425 |
|
| 126 |
+
| 1.6854 | 0.0909 | 308 | 1.6425 |
|
| 127 |
+
| 1.5418 | 0.0921 | 312 | 1.6396 |
|
| 128 |
+
| 1.6943 | 0.0933 | 316 | 1.6373 |
|
| 129 |
+
| 1.6758 | 0.0945 | 320 | 1.6359 |
|
| 130 |
+
| 1.9994 | 0.0957 | 324 | 1.6352 |
|
| 131 |
+
| 1.6326 | 0.0968 | 328 | 1.6349 |
|
| 132 |
+
| 1.6935 | 0.0980 | 332 | 1.6348 |
|
| 133 |
+
| 1.6358 | 0.0992 | 336 | 1.6347 |
|
| 134 |
+
|
| 135 |
+
|
| 136 |
+
### Framework versions
|
| 137 |
+
|
| 138 |
+
- Transformers 4.44.2
|
| 139 |
+
- Pytorch 2.5.0+cu121
|
| 140 |
+
- Datasets 3.0.2
|
| 141 |
+
- Tokenizers 0.19.1
|
config.json
ADDED
|
@@ -0,0 +1,29 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"_name_or_path": "reflex-ai/AMD-Llama-350M-Upgraded",
|
| 3 |
+
"architectures": [
|
| 4 |
+
"LlamaForCausalLM"
|
| 5 |
+
],
|
| 6 |
+
"attention_bias": false,
|
| 7 |
+
"attention_dropout": 0.0,
|
| 8 |
+
"bos_token_id": 1,
|
| 9 |
+
"eos_token_id": 2,
|
| 10 |
+
"hidden_act": "silu",
|
| 11 |
+
"hidden_size": 768,
|
| 12 |
+
"initializer_range": 0.02,
|
| 13 |
+
"intermediate_size": 4096,
|
| 14 |
+
"max_position_embeddings": 2048,
|
| 15 |
+
"mlp_bias": false,
|
| 16 |
+
"model_type": "llama",
|
| 17 |
+
"num_attention_heads": 12,
|
| 18 |
+
"num_hidden_layers": 24,
|
| 19 |
+
"num_key_value_heads": 12,
|
| 20 |
+
"pretraining_tp": 1,
|
| 21 |
+
"rms_norm_eps": 1e-05,
|
| 22 |
+
"rope_scaling": null,
|
| 23 |
+
"rope_theta": 10000.0,
|
| 24 |
+
"tie_word_embeddings": false,
|
| 25 |
+
"torch_dtype": "float32",
|
| 26 |
+
"transformers_version": "4.44.2",
|
| 27 |
+
"use_cache": true,
|
| 28 |
+
"vocab_size": 32000
|
| 29 |
+
}
|
generation_config.json
ADDED
|
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"_from_model_config": true,
|
| 3 |
+
"bos_token_id": 1,
|
| 4 |
+
"eos_token_id": 2,
|
| 5 |
+
"transformers_version": "4.44.2"
|
| 6 |
+
}
|
model.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:725dd4e1d0f61d69609f24e34ddbc6f03d12644d82c386856b5dfbfc0be4e93c
|
| 3 |
+
size 1329245144
|
special_tokens_map.json
ADDED
|
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"bos_token": {
|
| 3 |
+
"content": "<s>",
|
| 4 |
+
"lstrip": false,
|
| 5 |
+
"normalized": false,
|
| 6 |
+
"rstrip": false,
|
| 7 |
+
"single_word": false
|
| 8 |
+
},
|
| 9 |
+
"eos_token": {
|
| 10 |
+
"content": "</s>",
|
| 11 |
+
"lstrip": false,
|
| 12 |
+
"normalized": false,
|
| 13 |
+
"rstrip": false,
|
| 14 |
+
"single_word": false
|
| 15 |
+
},
|
| 16 |
+
"pad_token": "<unk>",
|
| 17 |
+
"unk_token": {
|
| 18 |
+
"content": "<unk>",
|
| 19 |
+
"lstrip": false,
|
| 20 |
+
"normalized": false,
|
| 21 |
+
"rstrip": false,
|
| 22 |
+
"single_word": false
|
| 23 |
+
}
|
| 24 |
+
}
|
tokenizer.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
tokenizer.model
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:9e556afd44213b6bd1be2b850ebbbd98f5481437a8021afaf58ee7fb1818d347
|
| 3 |
+
size 499723
|
tokenizer_config.json
ADDED
|
@@ -0,0 +1,42 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"add_bos_token": true,
|
| 3 |
+
"add_eos_token": false,
|
| 4 |
+
"add_prefix_space": null,
|
| 5 |
+
"added_tokens_decoder": {
|
| 6 |
+
"0": {
|
| 7 |
+
"content": "<unk>",
|
| 8 |
+
"lstrip": false,
|
| 9 |
+
"normalized": false,
|
| 10 |
+
"rstrip": false,
|
| 11 |
+
"single_word": false,
|
| 12 |
+
"special": true
|
| 13 |
+
},
|
| 14 |
+
"1": {
|
| 15 |
+
"content": "<s>",
|
| 16 |
+
"lstrip": false,
|
| 17 |
+
"normalized": false,
|
| 18 |
+
"rstrip": false,
|
| 19 |
+
"single_word": false,
|
| 20 |
+
"special": true
|
| 21 |
+
},
|
| 22 |
+
"2": {
|
| 23 |
+
"content": "</s>",
|
| 24 |
+
"lstrip": false,
|
| 25 |
+
"normalized": false,
|
| 26 |
+
"rstrip": false,
|
| 27 |
+
"single_word": false,
|
| 28 |
+
"special": true
|
| 29 |
+
}
|
| 30 |
+
},
|
| 31 |
+
"bos_token": "<s>",
|
| 32 |
+
"clean_up_tokenization_spaces": false,
|
| 33 |
+
"eos_token": "</s>",
|
| 34 |
+
"legacy": true,
|
| 35 |
+
"model_max_length": 1000000000000000019884624838656,
|
| 36 |
+
"pad_token": "<unk>",
|
| 37 |
+
"sp_model_kwargs": {},
|
| 38 |
+
"spaces_between_special_tokens": false,
|
| 39 |
+
"tokenizer_class": "LlamaTokenizer",
|
| 40 |
+
"unk_token": "<unk>",
|
| 41 |
+
"use_default_system_prompt": true
|
| 42 |
+
}
|
training_args.bin
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:49cbe6b908078fcd2b959f042facbf06a4d644b21aa3fcf82cd3117ca00346db
|
| 3 |
+
size 5176
|