SystemAdmin123 commited on
Commit
812391a
·
verified ·
1 Parent(s): b529dbb

Training in progress, step 40

Browse files
adapter_config.json ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alpha_pattern": {},
3
+ "auto_mapping": null,
4
+ "base_model_name_or_path": "unsloth/tinyllama",
5
+ "bias": "none",
6
+ "eva_config": null,
7
+ "exclude_modules": null,
8
+ "fan_in_fan_out": null,
9
+ "inference_mode": true,
10
+ "init_lora_weights": true,
11
+ "layer_replication": null,
12
+ "layers_pattern": null,
13
+ "layers_to_transform": null,
14
+ "loftq_config": {},
15
+ "lora_alpha": 256,
16
+ "lora_bias": false,
17
+ "lora_dropout": 0.1,
18
+ "megatron_config": null,
19
+ "megatron_core": "megatron.core",
20
+ "modules_to_save": null,
21
+ "peft_type": "LORA",
22
+ "r": 128,
23
+ "rank_pattern": {},
24
+ "revision": null,
25
+ "target_modules": [
26
+ "gate_proj",
27
+ "v_proj",
28
+ "o_proj",
29
+ "up_proj",
30
+ "q_proj",
31
+ "down_proj",
32
+ "k_proj"
33
+ ],
34
+ "task_type": "CAUSAL_LM",
35
+ "use_dora": false,
36
+ "use_rslora": false
37
+ }
adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:be4b9db42bafc0797619853c54867f0657d42c174c280399042523803335f875
3
+ size 403743472
axolotl_config.yaml ADDED
@@ -0,0 +1,57 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ adapter: lora
2
+ base_model: unsloth/tinyllama
3
+ batch_size: 64
4
+ bf16: true
5
+ chat_template: tokenizer_default_fallback_alpaca
6
+ datasets:
7
+ - format: custom
8
+ path: https://gradients.s3.eu-north-1.amazonaws.com/1619d5c4fda918de_train_data.json?X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Credential=AKIAVVZOOA7SA4UOFLPI%2F20250209%2Feu-north-1%2Fs3%2Faws4_request&X-Amz-Date=20250209T105617Z&X-Amz-Expires=604800&X-Amz-SignedHeaders=host&X-Amz-Signature=02ee79ed43b8e249b18535c8d2b439c262793ee8a3ac4a93e676f23db64fcee9
9
+ type:
10
+ field_instruction: input
11
+ field_output: output
12
+ format: '{instruction}'
13
+ no_input_format: '{instruction}'
14
+ system_format: '{system}'
15
+ system_prompt: ''
16
+ device_map: auto
17
+ eval_sample_packing: false
18
+ eval_steps: 0.1
19
+ flash_attention: true
20
+ gradient_checkpointing: true
21
+ group_by_length: true
22
+ hub_model_id: SystemAdmin123/f9f404f8-0d42-4e2e-892f-c51231bfc8a4
23
+ hub_strategy: checkpoint
24
+ learning_rate: 0.0001
25
+ logging_steps: 10
26
+ lora_alpha: 256
27
+ lora_dropout: 0.1
28
+ lora_r: 128
29
+ lora_target_linear: true
30
+ lr_scheduler: cosine
31
+ max_steps: 11200
32
+ micro_batch_size: 5
33
+ model_type: AutoModelForCausalLM
34
+ num_epochs: 10000
35
+ optimizer: adamw_bnb_8bit
36
+ output_dir: /root/.sn56/axolotl/tmp/f9f404f8-0d42-4e2e-892f-c51231bfc8a4
37
+ pad_to_sequence_len: true
38
+ resize_token_embeddings_to_32x: false
39
+ sample_packing: true
40
+ save_steps: 40
41
+ save_total_limit: 1
42
+ sequence_len: 2048
43
+ tokenizer_type: LlamaTokenizerFast
44
+ torch_dtype: bf16
45
+ training_args_kwargs:
46
+ disable_tqdm: true
47
+ hub_private_repo: true
48
+ save_only_model: true
49
+ trust_remote_code: true
50
+ val_set_size: 0.01
51
+ wandb_entity: ''
52
+ wandb_mode: online
53
+ wandb_name: unsloth/tinyllama-https://gradients.s3.eu-north-1.amazonaws.com/1619d5c4fda918de_train_data.json?X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Credential=AKIAVVZOOA7SA4UOFLPI%2F20250209%2Feu-north-1%2Fs3%2Faws4_request&X-Amz-Date=20250209T105617Z&X-Amz-Expires=604800&X-Amz-SignedHeaders=host&X-Amz-Signature=02ee79ed43b8e249b18535c8d2b439c262793ee8a3ac4a93e676f23db64fcee9
54
+ wandb_project: Gradients-On-Demand
55
+ wandb_run: your_name
56
+ wandb_runid: default
57
+ warmup_ratio: 0.05
config.json ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_attn_implementation_autoset": true,
3
+ "_name_or_path": "unsloth/tinyllama",
4
+ "architectures": [
5
+ "LlamaForCausalLM"
6
+ ],
7
+ "attention_bias": false,
8
+ "attention_dropout": 0.0,
9
+ "bos_token_id": 1,
10
+ "eos_token_id": 2,
11
+ "head_dim": 64,
12
+ "hidden_act": "silu",
13
+ "hidden_size": 2048,
14
+ "initializer_range": 0.02,
15
+ "intermediate_size": 5632,
16
+ "max_position_embeddings": 2048,
17
+ "mlp_bias": false,
18
+ "model_type": "llama",
19
+ "num_attention_heads": 32,
20
+ "num_hidden_layers": 22,
21
+ "num_key_value_heads": 4,
22
+ "pad_token_id": 0,
23
+ "pretraining_tp": 1,
24
+ "rms_norm_eps": 1e-05,
25
+ "rope_scaling": null,
26
+ "rope_theta": 10000.0,
27
+ "tie_word_embeddings": false,
28
+ "torch_dtype": "bfloat16",
29
+ "transformers_version": "4.47.1",
30
+ "unsloth_version": "2024.9",
31
+ "use_cache": false,
32
+ "vocab_size": 32000
33
+ }
special_tokens_map.json ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "<s>",
4
+ "lstrip": false,
5
+ "normalized": false,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "eos_token": {
10
+ "content": "</s>",
11
+ "lstrip": false,
12
+ "normalized": false,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "pad_token": {
17
+ "content": "<unk>",
18
+ "lstrip": false,
19
+ "normalized": false,
20
+ "rstrip": false,
21
+ "single_word": false
22
+ },
23
+ "unk_token": {
24
+ "content": "<unk>",
25
+ "lstrip": false,
26
+ "normalized": false,
27
+ "rstrip": false,
28
+ "single_word": false
29
+ }
30
+ }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9e556afd44213b6bd1be2b850ebbbd98f5481437a8021afaf58ee7fb1818d347
3
+ size 499723
tokenizer_config.json ADDED
@@ -0,0 +1,45 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": true,
3
+ "add_eos_token": false,
4
+ "add_prefix_space": null,
5
+ "added_tokens_decoder": {
6
+ "0": {
7
+ "content": "<unk>",
8
+ "lstrip": false,
9
+ "normalized": false,
10
+ "rstrip": false,
11
+ "single_word": false,
12
+ "special": true
13
+ },
14
+ "1": {
15
+ "content": "<s>",
16
+ "lstrip": false,
17
+ "normalized": false,
18
+ "rstrip": false,
19
+ "single_word": false,
20
+ "special": true
21
+ },
22
+ "2": {
23
+ "content": "</s>",
24
+ "lstrip": false,
25
+ "normalized": false,
26
+ "rstrip": false,
27
+ "single_word": false,
28
+ "special": true
29
+ }
30
+ },
31
+ "bos_token": "<s>",
32
+ "chat_template": "{% for message in messages %}{% if message['role'] == 'user' %}{{ '### Instruction: ' + message['content'] + '\n\n' }}{% elif message['role'] == 'assistant' %}{{ '### Response: ' + message['content'] + eos_token}}{% endif %}{% endfor %}",
33
+ "clean_up_tokenization_spaces": false,
34
+ "eos_token": "</s>",
35
+ "extra_special_tokens": {},
36
+ "legacy": false,
37
+ "model_max_length": 1000000000000000019884624838656,
38
+ "pad_token": "<unk>",
39
+ "padding_side": "left",
40
+ "sp_model_kwargs": {},
41
+ "tokenizer_class": "LlamaTokenizer",
42
+ "unk_token": "<unk>",
43
+ "use_default_system_prompt": false,
44
+ "use_fast": true
45
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dfc543989776dbe8bb3a126b1df4dadb0d0cf94c38d0461f13565424f500a752
3
+ size 7096