ADAPT-Chase's picture
Add files using upload-large-folder tool
fbf3c28 verified
#!/usr/bin/env python3
"""
Final training script for Elizabeth - proper tokenization and formatting
"""
import os
os.environ['HF_HOME'] = '/home/x/.cache/huggingface'
import torch
from transformers import (
AutoModelForCausalLM,
AutoTokenizer,
TrainingArguments,
Trainer,
DataCollatorForLanguageModeling
)
from datasets import Dataset
import json
# Configuration
MODEL_NAME = "Qwen/Qwen3-8B"
TRAIN_DATA_PATH = "/home/x/adaptai/aiml/e-train-1/elizabeth_tooluse_minipack_v1.jsonl"
OUTPUT_DIR = "/home/x/adaptai/experiments/qwen3-8b-elizabeth-final"
# YaRN Configuration
YARN_CONFIG = {
"rope_scaling": {
"type": "yarn",
"factor": 8.0,
"original_max_position_embeddings": 16384,
"extrapolation_factor": 1.0,
"attn_factor": 1.0,
"beta_fast": 32.0,
"beta_slow": 1.0
}
}
class FinalTrainer:
def __init__(self):
self.model = None
self.tokenizer = None
def setup_model(self):
"""Load model with YaRN configuration"""
print("πŸš€ Loading Qwen3-8B with YaRN configuration...")
# Load tokenizer
self.tokenizer = AutoTokenizer.from_pretrained(
MODEL_NAME,
trust_remote_code=True
)
if self.tokenizer.pad_token is None:
self.tokenizer.pad_token = self.tokenizer.eos_token
# Load model with YaRN
self.model = AutoModelForCausalLM.from_pretrained(
MODEL_NAME,
torch_dtype=torch.bfloat16,
device_map="auto",
trust_remote_code=True,
**YARN_CONFIG
)
print(f"βœ… Model loaded with YaRN configuration")
print(f"πŸ“ Context length: {self.model.config.max_position_embeddings}")
def tokenize_function(self, examples):
"""Tokenize the text examples"""
# Convert messages to text format
texts = []
for messages in examples['messages']:
text = ""
for msg in messages:
if msg['role'] == 'system':
text += f"<|im_start|>system\n{msg['content']}<|im_end|>\n"
elif msg['role'] == 'user':
text += f"<|im_start|>user\n{msg['content']}<|im_end|>\n"
elif msg['role'] == 'assistant':
text += f"<|im_start|>assistant\n{msg['content']}<|im_end|>\n"
elif msg['role'] == 'tool':
text += f"<|im_start|>tool\n{json.dumps(msg['content'])}<|im_end|>\n"
texts.append(text)
# Tokenize
tokenized = self.tokenizer(
texts,
truncation=True,
padding=False,
max_length=4096,
return_tensors=None
)
# Add labels for language modeling
tokenized["labels"] = tokenized["input_ids"].copy()
return tokenized
def load_dataset(self):
"""Load and tokenize dataset"""
print("πŸ“Š Loading and tokenizing data...")
# Load raw data
data = []
with open(TRAIN_DATA_PATH, 'r') as f:
for line in f:
data.append(json.loads(line))
# Create dataset
dataset = Dataset.from_list(data)
# Tokenize
tokenized_dataset = dataset.map(
self.tokenize_function,
batched=True,
batch_size=10,
remove_columns=dataset.column_names
)
print(f"βœ… Tokenized {len(tokenized_dataset)} examples")
return tokenized_dataset
def train(self):
"""Start training"""
self.setup_model()
dataset = self.load_dataset()
# Training arguments
training_args = TrainingArguments(
output_dir=OUTPUT_DIR,
num_train_epochs=1,
per_device_train_batch_size=1,
gradient_accumulation_steps=16,
learning_rate=2e-5,
warmup_ratio=0.03,
lr_scheduler_type="cosine",
logging_steps=10,
save_steps=100,
bf16=True,
gradient_checkpointing=True,
remove_unused_columns=False,
report_to=[],
)
# Data collator
data_collator = DataCollatorForLanguageModeling(
tokenizer=self.tokenizer,
mlm=False,
)
# Trainer
trainer = Trainer(
model=self.model,
args=training_args,
train_dataset=dataset,
data_collator=data_collator,
)
print("🎯 Starting training...")
# Start training
trainer.train()
# Save final model
trainer.save_model()
print(f"βœ… Training completed!")
print(f"πŸ’Ύ Model saved to: {OUTPUT_DIR}")
if __name__ == "__main__":
trainer = FinalTrainer()
trainer.train()