Commit ·
d2d1b6d
1
Parent(s): ee64ebc
Training in progress, step 100
Browse files- adapter_config.json +16 -0
- adapter_model.bin +3 -0
- training_args.bin +3 -0
adapter_config.json
ADDED
|
@@ -0,0 +1,16 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"auto_mapping": null,
|
| 3 |
+
"base_model_name_or_path": "meta-llama/Llama-2-7b-hf",
|
| 4 |
+
"inference_mode": true,
|
| 5 |
+
"num_attention_heads": 32,
|
| 6 |
+
"num_layers": 32,
|
| 7 |
+
"num_transformer_submodules": 1,
|
| 8 |
+
"num_virtual_tokens": 20,
|
| 9 |
+
"peft_type": "PROMPT_TUNING",
|
| 10 |
+
"prompt_tuning_init": "TEXT",
|
| 11 |
+
"prompt_tuning_init_text": "tr\u1ea3 l\u1eddi c\u00e2u h\u1ecfi v\u1edbi n\u1ed9i dung \u0111\u01b0\u1ee3c cho d\u01b0\u1edbi \u0111\u00e2y",
|
| 12 |
+
"revision": null,
|
| 13 |
+
"task_type": "CAUSAL_LM",
|
| 14 |
+
"token_dim": 4096,
|
| 15 |
+
"tokenizer_name_or_path": "meta-llama/Llama-2-7b-hf"
|
| 16 |
+
}
|
adapter_model.bin
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:8a0364243e1915797fd7cd91d8e6d7adfc7e8c2cd36912c7b7a439ecdb361f83
|
| 3 |
+
size 328509
|
training_args.bin
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:cbb986581ef71182f6fced43babda9c0ea44c51d1029b4f3c3c7ec2315e6fd5e
|
| 3 |
+
size 4155
|