|
|
|
|
|
""" |
|
|
Simple training script for Elizabeth - focuses on core identity without complex formatting |
|
|
""" |
|
|
|
|
|
import os |
|
|
os.environ['HF_HOME'] = '/home/x/.cache/huggingface' |
|
|
|
|
|
import torch |
|
|
from transformers import ( |
|
|
AutoModelForCausalLM, |
|
|
AutoTokenizer, |
|
|
TrainingArguments, |
|
|
Trainer, |
|
|
DataCollatorForLanguageModeling |
|
|
) |
|
|
from datasets import Dataset |
|
|
import json |
|
|
|
|
|
|
|
|
MODEL_NAME = "Qwen/Qwen3-8B" |
|
|
TRAIN_DATA_PATH = "/home/x/adaptai/aiml/e-train-1/elizabeth_tooluse_minipack_v1.jsonl" |
|
|
OUTPUT_DIR = "/home/x/adaptai/experiments/qwen3-8b-elizabeth-simple" |
|
|
|
|
|
|
|
|
YARN_CONFIG = { |
|
|
"rope_scaling": { |
|
|
"type": "yarn", |
|
|
"factor": 8.0, |
|
|
"original_max_position_embeddings": 16384, |
|
|
"extrapolation_factor": 1.0, |
|
|
"attn_factor": 1.0, |
|
|
"beta_fast": 32.0, |
|
|
"beta_slow": 1.0 |
|
|
} |
|
|
} |
|
|
|
|
|
class SimpleTrainer: |
|
|
def __init__(self): |
|
|
self.model = None |
|
|
self.tokenizer = None |
|
|
|
|
|
def setup_model(self): |
|
|
"""Load model with YaRN configuration""" |
|
|
print("π Loading Qwen3-8B with YaRN configuration...") |
|
|
|
|
|
|
|
|
self.tokenizer = AutoTokenizer.from_pretrained( |
|
|
MODEL_NAME, |
|
|
trust_remote_code=True |
|
|
) |
|
|
|
|
|
if self.tokenizer.pad_token is None: |
|
|
self.tokenizer.pad_token = self.tokenizer.eos_token |
|
|
|
|
|
|
|
|
self.model = AutoModelForCausalLM.from_pretrained( |
|
|
MODEL_NAME, |
|
|
torch_dtype=torch.bfloat16, |
|
|
device_map="auto", |
|
|
trust_remote_code=True, |
|
|
**YARN_CONFIG |
|
|
) |
|
|
|
|
|
print(f"β
Model loaded with YaRN configuration") |
|
|
print(f"π Context length: {self.model.config.max_position_embeddings}") |
|
|
|
|
|
def load_simple_dataset(self): |
|
|
"""Load only the simple text examples""" |
|
|
print("π Loading simple training data...") |
|
|
|
|
|
texts = [] |
|
|
|
|
|
|
|
|
with open(TRAIN_DATA_PATH, 'r') as f: |
|
|
for line in f: |
|
|
data = json.loads(line) |
|
|
|
|
|
|
|
|
has_tool_call = any('tool_call' in str(msg) for msg in data.get('messages', [])) |
|
|
if not has_tool_call: |
|
|
|
|
|
conversation = "" |
|
|
for msg in data.get('messages', []): |
|
|
if 'content' in msg: |
|
|
conversation += f"{msg['role']}: {msg['content']}\n" |
|
|
texts.append(conversation) |
|
|
|
|
|
print(f"β
Loaded {len(texts)} simple examples") |
|
|
|
|
|
|
|
|
dataset = Dataset.from_dict({"text": texts}) |
|
|
return dataset |
|
|
|
|
|
def train(self): |
|
|
"""Start simple training""" |
|
|
self.setup_model() |
|
|
dataset = self.load_simple_dataset() |
|
|
|
|
|
|
|
|
training_args = TrainingArguments( |
|
|
output_dir=OUTPUT_DIR, |
|
|
num_train_epochs=1, |
|
|
per_device_train_batch_size=1, |
|
|
gradient_accumulation_steps=16, |
|
|
learning_rate=2e-5, |
|
|
warmup_ratio=0.03, |
|
|
lr_scheduler_type="cosine", |
|
|
logging_steps=10, |
|
|
save_steps=100, |
|
|
bf16=True, |
|
|
gradient_checkpointing=True, |
|
|
report_to=[], |
|
|
) |
|
|
|
|
|
|
|
|
data_collator = DataCollatorForLanguageModeling( |
|
|
tokenizer=self.tokenizer, |
|
|
mlm=False, |
|
|
) |
|
|
|
|
|
|
|
|
trainer = Trainer( |
|
|
model=self.model, |
|
|
args=training_args, |
|
|
train_dataset=dataset, |
|
|
data_collator=data_collator, |
|
|
tokenizer=self.tokenizer, |
|
|
) |
|
|
|
|
|
print("π― Starting simple training...") |
|
|
|
|
|
|
|
|
trainer.train() |
|
|
|
|
|
|
|
|
trainer.save_model() |
|
|
|
|
|
print(f"β
Simple training completed!") |
|
|
|
|
|
if __name__ == "__main__": |
|
|
trainer = SimpleTrainer() |
|
|
trainer.train() |