SystemAdmin123 commited on
Commit
bea82fe
·
verified ·
1 Parent(s): f1d16e8

Training in progress, step 1640

Browse files
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ tokenizer.json filter=lfs diff=lfs merge=lfs -text
adapter_config.json ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alpha_pattern": {},
3
+ "auto_mapping": null,
4
+ "base_model_name_or_path": "unsloth/Mistral-Nemo-Instruct-2407",
5
+ "bias": "none",
6
+ "eva_config": null,
7
+ "exclude_modules": null,
8
+ "fan_in_fan_out": null,
9
+ "inference_mode": true,
10
+ "init_lora_weights": true,
11
+ "layer_replication": null,
12
+ "layers_pattern": null,
13
+ "layers_to_transform": null,
14
+ "loftq_config": {},
15
+ "lora_alpha": 256,
16
+ "lora_bias": false,
17
+ "lora_dropout": 0.1,
18
+ "megatron_config": null,
19
+ "megatron_core": "megatron.core",
20
+ "modules_to_save": null,
21
+ "peft_type": "LORA",
22
+ "r": 128,
23
+ "rank_pattern": {},
24
+ "revision": null,
25
+ "target_modules": [
26
+ "v_proj",
27
+ "o_proj",
28
+ "up_proj",
29
+ "q_proj",
30
+ "gate_proj",
31
+ "k_proj",
32
+ "down_proj"
33
+ ],
34
+ "task_type": "CAUSAL_LM",
35
+ "use_dora": false,
36
+ "use_rslora": false
37
+ }
adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f5c133cbae1bf24ff38ab2204204c17ce35bf25e5f51ddc26ab53af3ce673b47
3
+ size 1824599104
axolotl_config.yaml ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ adapter: lora
2
+ base_model: unsloth/Mistral-Nemo-Instruct-2407
3
+ batch_size: 64
4
+ bf16: true
5
+ chat_template: tokenizer_default_fallback_alpaca
6
+ datasets:
7
+ - format: custom
8
+ path: https://gradients.s3.eu-north-1.amazonaws.com/770e7d1149f051d4_train_data.json?X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Credential=AKIAVVZOOA7SA4UOFLPI%2F20250210%2Feu-north-1%2Fs3%2Faws4_request&X-Amz-Date=20250210T085315Z&X-Amz-Expires=604800&X-Amz-SignedHeaders=host&X-Amz-Signature=5f8e3f412d280dc0a512928601968f5a64f92fcb369fa357d24c2803630144c2
9
+ type:
10
+ field_input: context
11
+ field_instruction: instruction
12
+ field_output: response
13
+ format: '{instruction} {input}'
14
+ no_input_format: '{instruction}'
15
+ system_format: '{system}'
16
+ system_prompt: ''
17
+ device_map: auto
18
+ eval_sample_packing: false
19
+ eval_steps: 0.1
20
+ flash_attention: true
21
+ gradient_checkpointing: true
22
+ group_by_length: true
23
+ hub_model_id: SystemAdmin123/3cdbdb76-a3d3-4c38-abe0-1eb63d76d0b0
24
+ hub_strategy: checkpoint
25
+ learning_rate: 0.0001
26
+ logging_steps: 10
27
+ lora_alpha: 256
28
+ lora_dropout: 0.1
29
+ lora_r: 128
30
+ lora_target_linear: true
31
+ lr_scheduler: cosine
32
+ max_steps: 11200
33
+ micro_batch_size: 1
34
+ model_type: AutoModelForCausalLM
35
+ num_epochs: 10000
36
+ optimizer: adamw_bnb_8bit
37
+ output_dir: /root/.sn56/axolotl/tmp/3cdbdb76-a3d3-4c38-abe0-1eb63d76d0b0
38
+ pad_to_sequence_len: true
39
+ resize_token_embeddings_to_32x: false
40
+ sample_packing: true
41
+ save_steps: 40
42
+ save_total_limit: 1
43
+ sequence_len: 2048
44
+ tokenizer_type: PreTrainedTokenizerFast
45
+ torch_dtype: bf16
46
+ training_args_kwargs:
47
+ disable_tqdm: true
48
+ hub_private_repo: true
49
+ save_only_model: true
50
+ trust_remote_code: true
51
+ val_set_size: 0.01
52
+ wandb_entity: ''
53
+ wandb_mode: online
54
+ wandb_name: unsloth/Mistral-Nemo-Instruct-2407-https://gradients.s3.eu-north-1.amazonaws.com/770e7d1149f051d4_train_data.json?X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Credential=AKIAVVZOOA7SA4UOFLPI%2F20250210%2Feu-north-1%2Fs3%2Faws4_request&X-Amz-Date=20250210T085315Z&X-Amz-Expires=604800&X-Amz-SignedHeaders=host&X-Amz-Signature=5f8e3f412d280dc0a512928601968f5a64f92fcb369fa357d24c2803630144c2
55
+ wandb_project: Gradients-On-Demand
56
+ wandb_run: your_name
57
+ wandb_runid: default
58
+ warmup_ratio: 0.05
config.json ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_attn_implementation_autoset": true,
3
+ "_name_or_path": "unsloth/Mistral-Nemo-Instruct-2407",
4
+ "architectures": [
5
+ "MistralForCausalLM"
6
+ ],
7
+ "attention_dropout": 0.0,
8
+ "bos_token_id": 1,
9
+ "eos_token_id": 2,
10
+ "head_dim": 128,
11
+ "hidden_act": "silu",
12
+ "hidden_size": 5120,
13
+ "initializer_range": 0.02,
14
+ "intermediate_size": 14336,
15
+ "max_position_embeddings": 131072,
16
+ "model_type": "mistral",
17
+ "num_attention_heads": 32,
18
+ "num_hidden_layers": 40,
19
+ "num_key_value_heads": 8,
20
+ "pad_token_id": 10,
21
+ "rms_norm_eps": 1e-05,
22
+ "rope_theta": 1000000.0,
23
+ "sliding_window": null,
24
+ "tie_word_embeddings": false,
25
+ "torch_dtype": "bfloat16",
26
+ "transformers_version": "4.47.1",
27
+ "unsloth_fixed": true,
28
+ "use_cache": false,
29
+ "vocab_size": 131072
30
+ }
special_tokens_map.json ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "<s>",
4
+ "lstrip": false,
5
+ "normalized": false,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "eos_token": {
10
+ "content": "</s>",
11
+ "lstrip": false,
12
+ "normalized": false,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "pad_token": {
17
+ "content": "<pad>",
18
+ "lstrip": false,
19
+ "normalized": false,
20
+ "rstrip": false,
21
+ "single_word": false
22
+ },
23
+ "unk_token": {
24
+ "content": "<unk>",
25
+ "lstrip": false,
26
+ "normalized": false,
27
+ "rstrip": false,
28
+ "single_word": false
29
+ }
30
+ }
tokenizer.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b0240ce510f08e6c2041724e9043e33be9d251d1e4a4d94eb68cd47b954b61d2
3
+ size 17078292
tokenizer_config.json ADDED
The diff for this file is too large to render. See raw diff
 
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:35bd6400315cdf815af66aa5981001f1f24845cf8dd063dd9fdcfe3f05024997
3
+ size 10808