agent-os-training-guide / scripts /train_1.5b_cloud.py
devsomosahub's picture
Upload scripts/train_1.5b_cloud.py with huggingface_hub
5b803d4 verified
"""AutoTrain cloud script - trains Qwen 7B LoRA and pushes to HuggingFace Hub."""
import os
import torch
from datasets import load_dataset, concatenate_datasets
from transformers import AutoTokenizer, AutoModelForCausalLM, TrainingArguments, BitsAndBytesConfig
from peft import LoraConfig, get_peft_model
from trl import SFTTrainer
from huggingface_hub import login
HF_TOKEN = os.environ["HF_TOKEN"]
MODEL_ID = "Qwen/Qwen2.5-7B-Instruct"
REPO_ID = "devsomosahub/agent-os-adapter-7b"
OUTPUT_DIR = "./output"
login(token=HF_TOKEN)
print("Loading dataset from Hub...")
ds = load_dataset("devsomosahub/agent-os-dataset", data_files="train.jsonl", split="train")
def format_example(example):
text = f"<|im_start|>system\nYou are a command adapter. Output ONLY valid JSON. No explanation.<|im_end|>\n<|im_start|>user\n{example['input']}<|im_end|>\n<|im_start|>assistant\n{example['output']}<|im_end|>"
return {"text": text}
ds = ds.map(format_example)
ds = concatenate_datasets([ds, ds, ds, ds])
print(f"Dataset: {len(ds)} examples")
print("Loading model (Q4)...")
bnb_config = BitsAndBytesConfig(load_in_4bit=True, bnb_4bit_quant_type="nf4", bnb_4bit_compute_dtype=torch.float16)
tokenizer = AutoTokenizer.from_pretrained(MODEL_ID, trust_remote_code=True)
if tokenizer.pad_token is None:
tokenizer.pad_token = tokenizer.eos_token
model = AutoModelForCausalLM.from_pretrained(MODEL_ID, quantization_config=bnb_config, device_map="auto", trust_remote_code=True)
lora_config = LoraConfig(
r=32, lora_alpha=64,
target_modules=["q_proj", "v_proj", "k_proj", "o_proj", "gate_proj", "up_proj", "down_proj"],
lora_dropout=0.05, bias="none", task_type="CAUSAL_LM",
)
model = get_peft_model(model, lora_config)
model.print_trainable_parameters()
print("Starting training...")
training_args = TrainingArguments(
output_dir=OUTPUT_DIR,
num_train_epochs=7,
per_device_train_batch_size=4,
gradient_accumulation_steps=2,
learning_rate=2e-4,
fp16=True,
logging_steps=10,
save_strategy="steps",
save_steps=500,
warmup_ratio=0.1,
lr_scheduler_type="cosine",
report_to="none",
push_to_hub=True,
hub_model_id=REPO_ID,
hub_token=HF_TOKEN,
)
trainer = SFTTrainer(
model=model,
train_dataset=ds,
args=training_args,
processing_class=tokenizer,
)
trainer.train()
print("Pushing to Hub...")
trainer.push_to_hub()
model.push_to_hub(REPO_ID, token=HF_TOKEN)
tokenizer.push_to_hub(REPO_ID, token=HF_TOKEN)
print(f"DONE! Model at https://huggingface.co/{REPO_ID}")