| |
| """ |
| Final training script for Elizabeth - handles tool call format properly |
| """ |
|
|
| import os |
| os.environ['HF_HOME'] = '/home/x/.cache/huggingface' |
|
|
| import torch |
| from transformers import ( |
| AutoModelForCausalLM, |
| AutoTokenizer, |
| TrainingArguments, |
| Trainer, |
| DataCollatorForLanguageModeling |
| ) |
| from datasets import Dataset |
| import json |
|
|
| |
| MODEL_NAME = "Qwen/Qwen3-8B" |
| TRAIN_DATA_PATH = "/home/x/adaptai/aiml/e-train-1/elizabeth_tooluse_minipack_v1.jsonl" |
| OUTPUT_DIR = "/home/x/adaptai/experiments/qwen3-8b-elizabeth-final" |
|
|
| |
| YARN_CONFIG = { |
| "rope_scaling": { |
| "type": "yarn", |
| "factor": 8.0, |
| "original_max_position_embeddings": 16384, |
| "extrapolation_factor": 1.0, |
| "attn_factor": 1.0, |
| "beta_fast": 32.0, |
| "beta_slow": 1.0 |
| } |
| } |
|
|
| class ElizabethTrainer: |
| def __init__(self): |
| self.model = None |
| self.tokenizer = None |
| |
| def setup_model(self): |
| """Load model with YaRN configuration""" |
| print("π Loading Qwen3-8B with YaRN configuration...") |
| |
| |
| self.tokenizer = AutoTokenizer.from_pretrained( |
| MODEL_NAME, |
| trust_remote_code=True |
| ) |
| |
| if self.tokenizer.pad_token is None: |
| self.tokenizer.pad_token = self.tokenizer.eos_token |
| |
| |
| self.model = AutoModelForCausalLM.from_pretrained( |
| MODEL_NAME, |
| torch_dtype=torch.bfloat16, |
| device_map="auto", |
| trust_remote_code=True, |
| **YARN_CONFIG |
| ) |
| |
| print(f"β
Model loaded with YaRN configuration") |
| print(f"π Context length: {self.model.config.max_position_embeddings}") |
| |
| def convert_messages_to_text(self, messages): |
| """Convert messages to text format, handling tool calls properly""" |
| text = "" |
| for msg in messages: |
| if msg['role'] == 'system': |
| text += f"<|im_start|>system\n{msg['content']}<|im_end|>\n" |
| elif msg['role'] == 'user': |
| text += f"<|im_start|>user\n{msg['content']}<|im_end|>\n" |
| elif msg['role'] == 'assistant': |
| |
| if 'tool_call' in msg: |
| tool_call = msg['tool_call'] |
| text += f"<|im_start|>assistant\nCALL {tool_call['name']} {json.dumps(tool_call['arguments'])}<|im_end|>\n" |
| else: |
| text += f"<|im_start|>assistant\n{msg['content']}<|im_end|>\n" |
| elif msg['role'] == 'tool': |
| text += f"<|im_start|>tool\n{json.dumps(msg['content'])}<|im_end|>\n" |
| return text |
| |
| def tokenize_function(self, examples): |
| """Tokenize the text examples""" |
| |
| texts = [] |
| for messages in examples['messages']: |
| text = self.convert_messages_to_text(messages) |
| texts.append(text) |
| |
| |
| tokenized = self.tokenizer( |
| texts, |
| truncation=True, |
| padding=False, |
| max_length=4096, |
| return_tensors=None |
| ) |
| |
| |
| tokenized["labels"] = tokenized["input_ids"].copy() |
| return tokenized |
| |
| def load_dataset(self): |
| """Load and tokenize dataset""" |
| print("π Loading and tokenizing data...") |
| |
| |
| data = [] |
| with open(TRAIN_DATA_PATH, 'r') as f: |
| for line in f: |
| entry = json.loads(line) |
| |
| data.append({"messages": entry["messages"]}) |
| |
| |
| dataset = Dataset.from_list(data) |
| |
| |
| tokenized_dataset = dataset.map( |
| self.tokenize_function, |
| batched=True, |
| batch_size=10, |
| remove_columns=dataset.column_names |
| ) |
| |
| print(f"β
Tokenized {len(tokenized_dataset)} examples") |
| return tokenized_dataset |
| |
| def train(self): |
| """Start training""" |
| self.setup_model() |
| dataset = self.load_dataset() |
| |
| |
| training_args = TrainingArguments( |
| output_dir=OUTPUT_DIR, |
| num_train_epochs=1, |
| per_device_train_batch_size=1, |
| gradient_accumulation_steps=16, |
| learning_rate=2e-5, |
| warmup_ratio=0.03, |
| lr_scheduler_type="cosine", |
| logging_steps=10, |
| save_steps=100, |
| bf16=True, |
| gradient_checkpointing=True, |
| remove_unused_columns=False, |
| report_to=[], |
| ) |
| |
| |
| data_collator = DataCollatorForLanguageModeling( |
| tokenizer=self.tokenizer, |
| mlm=False, |
| ) |
| |
| |
| trainer = Trainer( |
| model=self.model, |
| args=training_args, |
| train_dataset=dataset, |
| data_collator=data_collator, |
| ) |
| |
| print("π― Starting training...") |
| print("β° This will take approximately 8 hours...") |
| |
| |
| trainer.train() |
| |
| |
| trainer.save_model() |
| |
| print(f"β
Training completed!") |
| print(f"πΎ Model saved to: {OUTPUT_DIR}") |
|
|
| if __name__ == "__main__": |
| trainer = ElizabethTrainer() |
| trainer.train() |