| | |
| | |
| | |
| | """ |
| | Obsidian Bases SLM Training Script |
| | Fine-tunes Qwen 3 0.6B to generate .base files from natural language. |
| | """ |
| |
|
| | import os |
| | from datasets import load_dataset |
| | from peft import LoraConfig |
| | from trl import SFTTrainer, SFTConfig |
| | import trackio |
| |
|
| | |
| | dataset = load_dataset("ssdavid/obsidian-bases-query-v1", split="train") |
| |
|
| | |
| | def format_example(example): |
| | return { |
| | "messages": [ |
| | {"role": "user", "content": example["instruction"]}, |
| | {"role": "assistant", "content": example["output"]} |
| | ] |
| | } |
| |
|
| | dataset = dataset.map(format_example) |
| |
|
| | |
| | dataset_split = dataset.train_test_split(test_size=0.1, seed=42) |
| |
|
| | |
| | peft_config = LoraConfig( |
| | r=16, |
| | lora_alpha=32, |
| | lora_dropout=0.05, |
| | target_modules=["q_proj", "k_proj", "v_proj", "o_proj"], |
| | bias="none", |
| | task_type="CAUSAL_LM" |
| | ) |
| |
|
| | |
| | training_args = SFTConfig( |
| | output_dir="obsidian-bases-slm", |
| | push_to_hub=True, |
| | hub_model_id="ssdavid/obsidian-bases-slm", |
| | num_train_epochs=3, |
| | per_device_train_batch_size=4, |
| | gradient_accumulation_steps=4, |
| | learning_rate=2e-4, |
| | warmup_ratio=0.1, |
| | logging_steps=10, |
| | eval_strategy="steps", |
| | eval_steps=50, |
| | save_strategy="steps", |
| | save_steps=100, |
| | max_length=512, |
| | report_to="trackio", |
| | project="obsidian-bases-slm", |
| | run_name="qwen3-0.6b-bases-v1", |
| | ) |
| |
|
| | |
| | trainer = SFTTrainer( |
| | model="Qwen/Qwen3-0.6B", |
| | train_dataset=dataset_split["train"], |
| | eval_dataset=dataset_split["test"], |
| | peft_config=peft_config, |
| | args=training_args, |
| | ) |
| |
|
| | |
| | trainer.train() |
| |
|
| | |
| | trainer.push_to_hub() |
| | print("Training complete! Model pushed to ssdavid/obsidian-bases-slm") |
| |
|