Upload folder using huggingface_hub
Browse files
recipes/orpo-fixer-replanner-0.5b-iter2.yaml
ADDED
|
@@ -0,0 +1,48 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# ORPO iter-2 RE-PLANNER fixer on 0.5B base (per user 0.5B preference).
|
| 2 |
+
# Teaches fixer to PRODUCE a correct alternative when given a failed planner attempt.
|
| 3 |
+
# Starts from existing 0.5B ORPO fixer iter-1.
|
| 4 |
+
model_name_or_path: ./output/qwen-coder0.5b-bird-fixer-orpo
|
| 5 |
+
torch_dtype: bfloat16
|
| 6 |
+
use_flash_attention_2: false
|
| 7 |
+
|
| 8 |
+
dataset_mixer:
|
| 9 |
+
../data/llm_alignment/scaleup_iter2_v3/hf_fixer_replanner: 1.0
|
| 10 |
+
|
| 11 |
+
dataset_splits:
|
| 12 |
+
- train_dpo
|
| 13 |
+
- test_dpo
|
| 14 |
+
preprocessing_num_workers: 12
|
| 15 |
+
chat_template: "{{'<|im_start|>user\n' + messages['prompt'] + '<|im_end|>\n'}}{{'<|im_start|>assistant\n' + messages['completion'] + '<|im_end|>\n'}}"
|
| 16 |
+
report_to: ["tensorboard"]
|
| 17 |
+
|
| 18 |
+
bf16: true
|
| 19 |
+
beta: 0.5
|
| 20 |
+
do_eval: true
|
| 21 |
+
eval_strategy: "steps"
|
| 22 |
+
eval_steps: 100
|
| 23 |
+
gradient_accumulation_steps: 8
|
| 24 |
+
gradient_checkpointing: true
|
| 25 |
+
gradient_checkpointing_kwargs:
|
| 26 |
+
use_reentrant: False
|
| 27 |
+
learning_rate: 1.0e-6
|
| 28 |
+
log_level: info
|
| 29 |
+
logging_steps: 10
|
| 30 |
+
lr_scheduler_type: inverse_sqrt
|
| 31 |
+
max_length: 6144
|
| 32 |
+
max_prompt_length: 5500
|
| 33 |
+
num_train_epochs: -1
|
| 34 |
+
max_steps: 400
|
| 35 |
+
optim: adamw_torch
|
| 36 |
+
output_dir: output/qwen-coder0.5b-scaleup-fixer-replanner-orpo-iter2
|
| 37 |
+
overwrite_output_dir: true
|
| 38 |
+
per_device_train_batch_size: 1
|
| 39 |
+
per_device_eval_batch_size: 1
|
| 40 |
+
push_to_hub: false
|
| 41 |
+
remove_unused_columns: false
|
| 42 |
+
save_strategy: "steps"
|
| 43 |
+
save_steps: 400
|
| 44 |
+
save_total_limit: 1
|
| 45 |
+
seed: 42
|
| 46 |
+
warmup_ratio: 0.05
|
| 47 |
+
warmup_steps: 40
|
| 48 |
+
max_grad_norm: 0.5
|
recipes/orpo-planner-collab-iter2.yaml
ADDED
|
@@ -0,0 +1,46 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# ORPO iter-2 on planner-COLLAB-iter1 — starts from prior ORPO checkpoint, uses new rollouts.
|
| 2 |
+
model_name_or_path: ./output/qwen-coder3b-scaleup-planner-COLLAB-orpo
|
| 3 |
+
torch_dtype: bfloat16
|
| 4 |
+
use_flash_attention_2: false
|
| 5 |
+
|
| 6 |
+
dataset_mixer:
|
| 7 |
+
../data/llm_alignment/scaleup_iter2/hf_planner_collaborative: 1.0
|
| 8 |
+
|
| 9 |
+
dataset_splits:
|
| 10 |
+
- train_dpo
|
| 11 |
+
- test_dpo
|
| 12 |
+
preprocessing_num_workers: 12
|
| 13 |
+
chat_template: "{{'<|im_start|>user\n' + messages['prompt'] + '<|im_end|>\n'}}{{'<|im_start|>assistant\n' + messages['completion'] + '<|im_end|>\n'}}"
|
| 14 |
+
report_to: ["tensorboard"]
|
| 15 |
+
|
| 16 |
+
bf16: true
|
| 17 |
+
beta: 1.0
|
| 18 |
+
do_eval: true
|
| 19 |
+
eval_strategy: "steps"
|
| 20 |
+
eval_steps: 100
|
| 21 |
+
gradient_accumulation_steps: 16
|
| 22 |
+
gradient_checkpointing: true
|
| 23 |
+
gradient_checkpointing_kwargs:
|
| 24 |
+
use_reentrant: False
|
| 25 |
+
learning_rate: 1.0e-6
|
| 26 |
+
log_level: info
|
| 27 |
+
logging_steps: 10
|
| 28 |
+
lr_scheduler_type: inverse_sqrt
|
| 29 |
+
max_length: 4000
|
| 30 |
+
max_prompt_length: 3500
|
| 31 |
+
num_train_epochs: -1
|
| 32 |
+
max_steps: 200
|
| 33 |
+
optim: adamw_torch
|
| 34 |
+
output_dir: output/qwen-coder3b-scaleup-planner-COLLAB-orpo-iter2
|
| 35 |
+
overwrite_output_dir: true
|
| 36 |
+
per_device_train_batch_size: 1
|
| 37 |
+
per_device_eval_batch_size: 1
|
| 38 |
+
push_to_hub: false
|
| 39 |
+
remove_unused_columns: false
|
| 40 |
+
save_strategy: "steps"
|
| 41 |
+
save_steps: 200
|
| 42 |
+
save_total_limit: 1
|
| 43 |
+
seed: 42
|
| 44 |
+
warmup_ratio: 0.1
|
| 45 |
+
warmup_steps: 50
|
| 46 |
+
max_grad_norm: 0.5
|
recipes/validator-condition-fft-0.5b-v3.yaml
ADDED
|
@@ -0,0 +1,41 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# SFT Qwen2.5-Coder-0.5B Validator-Condition (v_c) — 0.5B base per user constraint.
|
| 2 |
+
model_name_or_path: /home/datht/huggingface/Qwen/Qwen2.5-Coder-0.5B-Instruct
|
| 3 |
+
model_revision: main
|
| 4 |
+
torch_dtype: bfloat16
|
| 5 |
+
use_flash_attention_2: false
|
| 6 |
+
response_template: "<|im_start|>assistant"
|
| 7 |
+
|
| 8 |
+
dataset_mixer:
|
| 9 |
+
../data/multi-agents/fixed/sft-validator-condition-v3: 1.0
|
| 10 |
+
dataset_splits:
|
| 11 |
+
- train
|
| 12 |
+
- test
|
| 13 |
+
preprocessing_num_workers: 24
|
| 14 |
+
chat_template: "{{'<|im_start|>user\n' + messages['prompt'] + '<|im_end|>\n'}}{{'<|im_start|>assistant\n' + messages['completion'] + '<|im_end|>\n'}}"
|
| 15 |
+
|
| 16 |
+
bf16: true
|
| 17 |
+
do_eval: false
|
| 18 |
+
eval_strategy: "no"
|
| 19 |
+
gradient_accumulation_steps: 4
|
| 20 |
+
gradient_checkpointing: true
|
| 21 |
+
learning_rate: 2.0e-05
|
| 22 |
+
log_level: info
|
| 23 |
+
logging_steps: 10
|
| 24 |
+
logging_strategy: steps
|
| 25 |
+
lr_scheduler_type: cosine
|
| 26 |
+
max_seq_length: 5120
|
| 27 |
+
truncation_side: left
|
| 28 |
+
max_steps: -1
|
| 29 |
+
num_train_epochs: 2
|
| 30 |
+
optim: adamw_torch
|
| 31 |
+
output_dir: output/qwen-coder0.5b-bird-validator-condition-sft-v3
|
| 32 |
+
overwrite_output_dir: true
|
| 33 |
+
per_device_train_batch_size: 8
|
| 34 |
+
push_to_hub: false
|
| 35 |
+
remove_unused_columns: true
|
| 36 |
+
report_to:
|
| 37 |
+
- tensorboard
|
| 38 |
+
save_strategy: "epoch"
|
| 39 |
+
save_total_limit: 1
|
| 40 |
+
seed: 42
|
| 41 |
+
tf32: true
|
recipes/validator-condition-fft-v3.yaml
ADDED
|
@@ -0,0 +1,42 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# SFT Qwen3-0.6B Validator-Condition (v_c) on section-specific data.
|
| 2 |
+
# Per paper §Combined Validator, v_c critiques only the WHERE/HAVING/CASE conditions.
|
| 3 |
+
model_name_or_path: /home/datht/huggingface/Qwen/Qwen3-0.6B
|
| 4 |
+
model_revision: main
|
| 5 |
+
torch_dtype: bfloat16
|
| 6 |
+
use_flash_attention_2: false
|
| 7 |
+
response_template: "<|im_start|>assistant"
|
| 8 |
+
|
| 9 |
+
dataset_mixer:
|
| 10 |
+
../data/multi-agents/fixed/sft-validator-condition-v3: 1.0
|
| 11 |
+
dataset_splits:
|
| 12 |
+
- train
|
| 13 |
+
- test
|
| 14 |
+
preprocessing_num_workers: 24
|
| 15 |
+
chat_template: "{{'<|im_start|>user\n' + messages['prompt'] + '<|im_end|>\n'}}{{'<|im_start|>assistant\n' + messages['completion'] + '<|im_end|>\n'}}"
|
| 16 |
+
|
| 17 |
+
bf16: true
|
| 18 |
+
do_eval: false
|
| 19 |
+
eval_strategy: "no"
|
| 20 |
+
gradient_accumulation_steps: 8
|
| 21 |
+
gradient_checkpointing: true
|
| 22 |
+
learning_rate: 2.0e-05
|
| 23 |
+
log_level: info
|
| 24 |
+
logging_steps: 10
|
| 25 |
+
logging_strategy: steps
|
| 26 |
+
lr_scheduler_type: cosine
|
| 27 |
+
max_seq_length: 5120
|
| 28 |
+
truncation_side: left
|
| 29 |
+
max_steps: -1
|
| 30 |
+
num_train_epochs: 2
|
| 31 |
+
optim: adamw_torch
|
| 32 |
+
output_dir: output/qwen3-0.6b-bird-validator-condition-sft-v3
|
| 33 |
+
overwrite_output_dir: true
|
| 34 |
+
per_device_train_batch_size: 4
|
| 35 |
+
push_to_hub: false
|
| 36 |
+
remove_unused_columns: true
|
| 37 |
+
report_to:
|
| 38 |
+
- tensorboard
|
| 39 |
+
save_strategy: "epoch"
|
| 40 |
+
save_total_limit: 1
|
| 41 |
+
seed: 42
|
| 42 |
+
tf32: true
|
recipes/validator-selection-fft-0.5b-v3.yaml
ADDED
|
@@ -0,0 +1,41 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# SFT Qwen2.5-Coder-0.5B Validator-Selection (v_s) — 0.5B base per user constraint.
|
| 2 |
+
model_name_or_path: /home/datht/huggingface/Qwen/Qwen2.5-Coder-0.5B-Instruct
|
| 3 |
+
model_revision: main
|
| 4 |
+
torch_dtype: bfloat16
|
| 5 |
+
use_flash_attention_2: false
|
| 6 |
+
response_template: "<|im_start|>assistant"
|
| 7 |
+
|
| 8 |
+
dataset_mixer:
|
| 9 |
+
../data/multi-agents/fixed/sft-validator-selection-v3: 1.0
|
| 10 |
+
dataset_splits:
|
| 11 |
+
- train
|
| 12 |
+
- test
|
| 13 |
+
preprocessing_num_workers: 24
|
| 14 |
+
chat_template: "{{'<|im_start|>user\n' + messages['prompt'] + '<|im_end|>\n'}}{{'<|im_start|>assistant\n' + messages['completion'] + '<|im_end|>\n'}}"
|
| 15 |
+
|
| 16 |
+
bf16: true
|
| 17 |
+
do_eval: false
|
| 18 |
+
eval_strategy: "no"
|
| 19 |
+
gradient_accumulation_steps: 4
|
| 20 |
+
gradient_checkpointing: true
|
| 21 |
+
learning_rate: 2.0e-05
|
| 22 |
+
log_level: info
|
| 23 |
+
logging_steps: 10
|
| 24 |
+
logging_strategy: steps
|
| 25 |
+
lr_scheduler_type: cosine
|
| 26 |
+
max_seq_length: 5120
|
| 27 |
+
truncation_side: left
|
| 28 |
+
max_steps: -1
|
| 29 |
+
num_train_epochs: 2
|
| 30 |
+
optim: adamw_torch
|
| 31 |
+
output_dir: output/qwen-coder0.5b-bird-validator-selection-sft-v3
|
| 32 |
+
overwrite_output_dir: true
|
| 33 |
+
per_device_train_batch_size: 8
|
| 34 |
+
push_to_hub: false
|
| 35 |
+
remove_unused_columns: true
|
| 36 |
+
report_to:
|
| 37 |
+
- tensorboard
|
| 38 |
+
save_strategy: "epoch"
|
| 39 |
+
save_total_limit: 1
|
| 40 |
+
seed: 42
|
| 41 |
+
tf32: true
|