impossibleexchange commited on
Commit
e255a49
·
verified ·
1 Parent(s): 565bbfa

Delete training_config.yml

Browse files
Files changed (1) hide show
  1. training_config.yml +0 -75
training_config.yml DELETED
@@ -1,75 +0,0 @@
1
- model:
2
- _component_: models.lora_mmllama3_8b
3
- lora_attn_modules:
4
- - q_proj
5
- - v_proj
6
- apply_lora_to_mlp: false
7
- apply_lora_to_output: false
8
- lora_rank: 16
9
- lora_alpha: 32
10
- perception_tokens: 2
11
- use_clip: false
12
- tokenizer:
13
- _component_: models.a2a_tokenizer
14
- path: models/tokenizer.model
15
- checkpointer:
16
- _component_: torchtune.utils.FullModelMetaCheckpointer
17
- checkpoint_dir: checkpoints/Meta-Llama-3-8B-Instruct/original
18
- checkpoint_files:
19
- - consolidated.00.pth
20
- adapter_checkpoint: null
21
- recipe_checkpoint: null
22
- output_dir: output_checkpoints/omega_a2a
23
- model_type: LLAMA3
24
- resume_from_checkpoint: false
25
- interim_checkpoint_steps: 5000
26
- interim_gen_steps: null
27
- max_new_tokens: 100
28
- temperature: 0.6
29
- top_k: 225
30
- dataset:
31
- _component_: ds.EvenBatcher
32
- buffer_size: 64
33
- dataset:
34
- _component_: ds.RoundRobinDataset
35
- datasets:
36
- - _component_: ds.CaptionInstructDataset
37
- dataset_path: ds/sam_llava/output.parquet
38
- train_on_input: false
39
- seed: null
40
- shuffle: true
41
- batch_size: 1
42
- optimizer:
43
- _component_: torch.optim.AdamW
44
- weight_decay: 0.001
45
- lr: 1.0e-05
46
- lr_scheduler:
47
- _component_: torchtune.modules.get_cosine_schedule_with_warmup
48
- num_warmup_steps: 100
49
- loss:
50
- _component_: torch.nn.CrossEntropyLoss
51
- epochs: 4
52
- max_steps_per_epoch: null
53
- gradient_accumulation_steps: 32
54
- compile: false
55
- output_dir: /tmp/lora_finetune_output
56
- metric_logger:
57
- _component_: torchtune.utils.metric_logging.StdoutLogger
58
- log_dir: /dev/stdout
59
- log_every_n_steps: 8
60
- device: cuda
61
- dtype: bf16
62
- enable_activation_checkpointing: false
63
- profiler:
64
- _component_: torchtune.utils.profiler
65
- enabled: false
66
- inference:
67
- prompt_template: 'Video:
68
-
69
- {video}
70
-
71
- Caption the previous video.'
72
- max_new_tokens: 300
73
- temperature: 0.6
74
- top_k: 300
75
- quantizer: null