ADAPT-Chase's picture
Add files using upload-large-folder tool
fbf3c28 verified
#!/usr/bin/env python3
"""
Simple training script for Elizabeth - focuses on core identity without complex formatting
"""
import os
os.environ['HF_HOME'] = '/home/x/.cache/huggingface'
import torch
from transformers import (
AutoModelForCausalLM,
AutoTokenizer,
TrainingArguments,
Trainer,
DataCollatorForLanguageModeling
)
from datasets import Dataset
import json
# Configuration
MODEL_NAME = "Qwen/Qwen3-8B"
TRAIN_DATA_PATH = "/home/x/adaptai/aiml/e-train-1/elizabeth_tooluse_minipack_v1.jsonl"
OUTPUT_DIR = "/home/x/adaptai/experiments/qwen3-8b-elizabeth-simple"
# YaRN Configuration
YARN_CONFIG = {
"rope_scaling": {
"type": "yarn",
"factor": 8.0,
"original_max_position_embeddings": 16384,
"extrapolation_factor": 1.0,
"attn_factor": 1.0,
"beta_fast": 32.0,
"beta_slow": 1.0
}
}
class SimpleTrainer:
def __init__(self):
self.model = None
self.tokenizer = None
def setup_model(self):
"""Load model with YaRN configuration"""
print("πŸš€ Loading Qwen3-8B with YaRN configuration...")
# Load tokenizer
self.tokenizer = AutoTokenizer.from_pretrained(
MODEL_NAME,
trust_remote_code=True
)
if self.tokenizer.pad_token is None:
self.tokenizer.pad_token = self.tokenizer.eos_token
# Load model with YaRN
self.model = AutoModelForCausalLM.from_pretrained(
MODEL_NAME,
torch_dtype=torch.bfloat16,
device_map="auto",
trust_remote_code=True,
**YARN_CONFIG
)
print(f"βœ… Model loaded with YaRN configuration")
print(f"πŸ“ Context length: {self.model.config.max_position_embeddings}")
def load_simple_dataset(self):
"""Load only the simple text examples"""
print("πŸ“Š Loading simple training data...")
texts = []
# Load only examples with simple text (no tool calls)
with open(TRAIN_DATA_PATH, 'r') as f:
for line in f:
data = json.loads(line)
# Skip complex tool call examples for now
has_tool_call = any('tool_call' in str(msg) for msg in data.get('messages', []))
if not has_tool_call:
# Convert to simple text
conversation = ""
for msg in data.get('messages', []):
if 'content' in msg:
conversation += f"{msg['role']}: {msg['content']}\n"
texts.append(conversation)
print(f"βœ… Loaded {len(texts)} simple examples")
# Create dataset
dataset = Dataset.from_dict({"text": texts})
return dataset
def train(self):
"""Start simple training"""
self.setup_model()
dataset = self.load_simple_dataset()
# Simple training arguments
training_args = TrainingArguments(
output_dir=OUTPUT_DIR,
num_train_epochs=1,
per_device_train_batch_size=1,
gradient_accumulation_steps=16,
learning_rate=2e-5,
warmup_ratio=0.03,
lr_scheduler_type="cosine",
logging_steps=10,
save_steps=100,
bf16=True,
gradient_checkpointing=True,
report_to=[],
)
# Data collator
data_collator = DataCollatorForLanguageModeling(
tokenizer=self.tokenizer,
mlm=False,
)
# Trainer
trainer = Trainer(
model=self.model,
args=training_args,
train_dataset=dataset,
data_collator=data_collator,
tokenizer=self.tokenizer,
)
print("🎯 Starting simple training...")
# Start training
trainer.train()
# Save final model
trainer.save_model()
print(f"βœ… Simple training completed!")
if __name__ == "__main__":
trainer = SimpleTrainer()
trainer.train()