ADAPT-Chase's picture
Add files using upload-large-folder tool
fbf3c28 verified
#!/usr/bin/env python3
"""
Working training script for Elizabeth with clean data
"""
import os
os.environ['HF_HOME'] = '/home/x/.cache/huggingface'
import torch
from transformers import (
AutoModelForCausalLM,
AutoTokenizer,
TrainingArguments,
Trainer,
DataCollatorForLanguageModeling
)
from datasets import Dataset
import json
# Configuration
MODEL_NAME = "Qwen/Qwen3-8B"
TRAIN_DATA_PATH = "/home/x/adaptai/aiml/e-train-1/clean_training_data.jsonl"
OUTPUT_DIR = "/home/x/adaptai/experiments/qwen3-8b-elizabeth-working"
class WorkingTrainer:
def __init__(self):
self.model = None
self.tokenizer = None
def setup_model(self):
"""Load model"""
print("πŸš€ Loading Qwen3-8B...")
# Load tokenizer
self.tokenizer = AutoTokenizer.from_pretrained(
MODEL_NAME,
trust_remote_code=True
)
if self.tokenizer.pad_token is None:
self.tokenizer.pad_token = self.tokenizer.eos_token
# Load model
self.model = AutoModelForCausalLM.from_pretrained(
MODEL_NAME,
torch_dtype=torch.bfloat16,
device_map="auto",
trust_remote_code=True
)
print(f"βœ… Model loaded")
def load_dataset(self):
"""Load clean dataset"""
print("πŸ“Š Loading clean data...")
# Load data
texts = []
with open(TRAIN_DATA_PATH, 'r') as f:
for line in f:
data = json.loads(line)
messages = data.get('messages', [])
# Convert to text format
text = ""
for msg in messages:
text += f"{msg['role']}: {msg['content']}\n"
texts.append(text)
print(f"βœ… Loaded {len(texts)} examples")
# Create dataset
dataset = Dataset.from_dict({"text": texts})
return dataset
def tokenize_function(self, examples):
"""Tokenize text"""
return self.tokenizer(
examples["text"],
truncation=True,
padding=False,
max_length=2048,
)
def train(self):
"""Start training"""
self.setup_model()
dataset = self.load_dataset()
# Tokenize
tokenized_dataset = dataset.map(
self.tokenize_function,
batched=True,
remove_columns=dataset.column_names
)
# Training arguments
training_args = TrainingArguments(
output_dir=OUTPUT_DIR,
num_train_epochs=3,
per_device_train_batch_size=1,
gradient_accumulation_steps=8,
learning_rate=5e-5,
warmup_ratio=0.1,
logging_steps=5,
save_steps=50,
bf16=True,
remove_unused_columns=False,
report_to=[],
)
# Data collator
data_collator = DataCollatorForLanguageModeling(
tokenizer=self.tokenizer,
mlm=False,
)
# Trainer
trainer = Trainer(
model=self.model,
args=training_args,
train_dataset=tokenized_dataset,
data_collator=data_collator,
)
print("🎯 Starting training...")
print("⏰ This will take approximately 1-2 hours...")
# Start training
trainer.train()
# Save final model
trainer.save_model()
print(f"βœ… Training completed!")
print(f"πŸ’Ύ Model saved to: {OUTPUT_DIR}")
if __name__ == "__main__":
trainer = WorkingTrainer()
trainer.train()