|
|
|
|
|
""" |
|
|
Working training script for Elizabeth with clean data |
|
|
""" |
|
|
|
|
|
import os |
|
|
os.environ['HF_HOME'] = '/home/x/.cache/huggingface' |
|
|
|
|
|
import torch |
|
|
from transformers import ( |
|
|
AutoModelForCausalLM, |
|
|
AutoTokenizer, |
|
|
TrainingArguments, |
|
|
Trainer, |
|
|
DataCollatorForLanguageModeling |
|
|
) |
|
|
from datasets import Dataset |
|
|
import json |
|
|
|
|
|
|
|
|
MODEL_NAME = "Qwen/Qwen3-8B" |
|
|
TRAIN_DATA_PATH = "/home/x/adaptai/aiml/e-train-1/clean_training_data.jsonl" |
|
|
OUTPUT_DIR = "/home/x/adaptai/experiments/qwen3-8b-elizabeth-working" |
|
|
|
|
|
class WorkingTrainer: |
|
|
def __init__(self): |
|
|
self.model = None |
|
|
self.tokenizer = None |
|
|
|
|
|
def setup_model(self): |
|
|
"""Load model""" |
|
|
print("π Loading Qwen3-8B...") |
|
|
|
|
|
|
|
|
self.tokenizer = AutoTokenizer.from_pretrained( |
|
|
MODEL_NAME, |
|
|
trust_remote_code=True |
|
|
) |
|
|
|
|
|
if self.tokenizer.pad_token is None: |
|
|
self.tokenizer.pad_token = self.tokenizer.eos_token |
|
|
|
|
|
|
|
|
self.model = AutoModelForCausalLM.from_pretrained( |
|
|
MODEL_NAME, |
|
|
torch_dtype=torch.bfloat16, |
|
|
device_map="auto", |
|
|
trust_remote_code=True |
|
|
) |
|
|
|
|
|
print(f"β
Model loaded") |
|
|
|
|
|
def load_dataset(self): |
|
|
"""Load clean dataset""" |
|
|
print("π Loading clean data...") |
|
|
|
|
|
|
|
|
texts = [] |
|
|
with open(TRAIN_DATA_PATH, 'r') as f: |
|
|
for line in f: |
|
|
data = json.loads(line) |
|
|
messages = data.get('messages', []) |
|
|
|
|
|
|
|
|
text = "" |
|
|
for msg in messages: |
|
|
text += f"{msg['role']}: {msg['content']}\n" |
|
|
texts.append(text) |
|
|
|
|
|
print(f"β
Loaded {len(texts)} examples") |
|
|
|
|
|
|
|
|
dataset = Dataset.from_dict({"text": texts}) |
|
|
return dataset |
|
|
|
|
|
def tokenize_function(self, examples): |
|
|
"""Tokenize text""" |
|
|
return self.tokenizer( |
|
|
examples["text"], |
|
|
truncation=True, |
|
|
padding=False, |
|
|
max_length=2048, |
|
|
) |
|
|
|
|
|
def train(self): |
|
|
"""Start training""" |
|
|
self.setup_model() |
|
|
dataset = self.load_dataset() |
|
|
|
|
|
|
|
|
tokenized_dataset = dataset.map( |
|
|
self.tokenize_function, |
|
|
batched=True, |
|
|
remove_columns=dataset.column_names |
|
|
) |
|
|
|
|
|
|
|
|
training_args = TrainingArguments( |
|
|
output_dir=OUTPUT_DIR, |
|
|
num_train_epochs=3, |
|
|
per_device_train_batch_size=1, |
|
|
gradient_accumulation_steps=8, |
|
|
learning_rate=5e-5, |
|
|
warmup_ratio=0.1, |
|
|
logging_steps=5, |
|
|
save_steps=50, |
|
|
bf16=True, |
|
|
remove_unused_columns=False, |
|
|
report_to=[], |
|
|
) |
|
|
|
|
|
|
|
|
data_collator = DataCollatorForLanguageModeling( |
|
|
tokenizer=self.tokenizer, |
|
|
mlm=False, |
|
|
) |
|
|
|
|
|
|
|
|
trainer = Trainer( |
|
|
model=self.model, |
|
|
args=training_args, |
|
|
train_dataset=tokenized_dataset, |
|
|
data_collator=data_collator, |
|
|
) |
|
|
|
|
|
print("π― Starting training...") |
|
|
print("β° This will take approximately 1-2 hours...") |
|
|
|
|
|
|
|
|
trainer.train() |
|
|
|
|
|
|
|
|
trainer.save_model() |
|
|
|
|
|
print(f"β
Training completed!") |
|
|
print(f"πΎ Model saved to: {OUTPUT_DIR}") |
|
|
|
|
|
if __name__ == "__main__": |
|
|
trainer = WorkingTrainer() |
|
|
trainer.train() |