| import torch |
| from torch.utils.data import Dataset, DataLoader |
| from transformers import AutoTokenizer, get_linear_schedule_with_warmup |
| from components.trm_core import TinyRecursiveModel |
| from tqdm import tqdm |
| import json |
| import os |
| from typing import List, Dict |
|
|
| class ConversationDataset(Dataset): |
| def __init__(self, data_path: str, tokenizer, max_length: int = 512): |
| self.tokenizer = tokenizer |
| self.max_length = max_length |
| self.examples = self._load_data(data_path) |
| |
| def _load_data(self, data_path: str) -> List[Dict]: |
| """Load and preprocess conversation data.""" |
| with open(data_path, 'r') as f: |
| data = json.load(f) |
| |
| examples = [] |
| for conv in data: |
| |
| text = " ".join([f"{msg['role']}: {msg['content']}" for msg in conv]) |
| examples.append({"text": text}) |
| return examples |
| |
| def __len__(self): |
| return len(self.examples) |
| |
| def __getitem__(self, idx: int) -> Dict[str, torch.Tensor]: |
| example = self.examples[idx] |
| encoding = self.tokenizer( |
| example["text"], |
| max_length=self.max_length, |
| padding="max_length", |
| truncation=True, |
| return_tensors="pt" |
| ) |
| return { |
| "input_ids": encoding["input_ids"].squeeze(), |
| "attention_mask": encoding["attention_mask"].squeeze() |
| } |
|
|
| def train_model( |
| model: torch.nn.Module, |
| train_loader: DataLoader, |
| val_loader: DataLoader, |
| num_epochs: int = 10, |
| learning_rate: float = 5e-5, |
| warmup_steps: int = 1000, |
| output_dir: str = "checkpoints" |
| ) -> None: |
| """Train the TRM model.""" |
| device = torch.device("cuda" if torch.cuda.is_available() else "cpu") |
| model = model.to(device) |
| |
| |
| os.makedirs(output_dir, exist_ok=True) |
| |
| |
| optimizer = torch.optim.AdamW(model.parameters(), lr=learning_rate) |
| total_steps = len(train_loader) * num_epochs |
| scheduler = get_linear_schedule_with_warmup( |
| optimizer, |
| num_warmup_steps=warmup_steps, |
| num_training_steps=total_steps |
| ) |
| |
| |
| best_val_loss = float('inf') |
| for epoch in range(num_epochs): |
| |
| model.train() |
| train_loss = 0 |
| progress_bar = tqdm(train_loader, desc=f"Epoch {epoch + 1}/{num_epochs}") |
| |
| for batch in progress_bar: |
| input_ids = batch["input_ids"].to(device) |
| attention_mask = batch["attention_mask"].to(device) |
| |
| |
| outputs = model(input_ids, attention_mask=attention_mask, labels=input_ids) |
| loss = outputs.loss |
| |
| |
| loss.backward() |
| torch.nn.utils.clip_grad_norm_(model.parameters(), 1.0) |
| optimizer.step() |
| scheduler.step() |
| optimizer.zero_grad() |
| |
| train_loss += loss.item() |
| progress_bar.set_postfix({"loss": loss.item()}) |
| |
| avg_train_loss = train_loss / len(train_loader) |
| |
| |
| val_loss = evaluate(model, val_loader, device) |
| print(f"Epoch {epoch + 1} - Train Loss: {avg_train_loss:.4f}, Val Loss: {val_loss:.4f}") |
| |
| |
| if val_loss < best_val_loss: |
| best_val_loss = val_loss |
| torch.save(model.state_dict(), os.path.join(output_dir, "best_model.pt")) |
| |
| |
| torch.save({ |
| 'epoch': epoch, |
| 'model_state_dict': model.state_dict(), |
| 'optimizer_state_dict': optimizer.state_dict(), |
| 'loss': avg_train_loss, |
| }, os.path.join(output_dir, f"checkpoint_epoch_{epoch}.pt")) |
|
|
| def evaluate(model: torch.nn.Module, data_loader: DataLoader, device: torch.device) -> float: |
| """Evaluate the model on the validation set.""" |
| model.eval() |
| total_loss = 0 |
| |
| with torch.no_grad(): |
| for batch in data_loader: |
| input_ids = batch["input_ids"].to(device) |
| attention_mask = batch["attention_mask"].to(device) |
| |
| outputs = model(input_ids, attention_mask=attention_mask, labels=input_ids) |
| total_loss += outputs.loss.item() |
| |
| return total_loss / len(data_loader) |
|
|
| def main(): |
| |
| tokenizer = AutoTokenizer.from_pretrained("gpt2") |
| tokenizer.pad_token = tokenizer.eos_token |
| |
| model = TinyRecursiveModel( |
| d_model=512, |
| nhead=8, |
| num_layers=6, |
| vocab_size=len(tokenizer), |
| max_seq_length=1024 |
| ) |
| |
| |
| os.makedirs("data", exist_ok=True) |
| |
| |
| if not os.path.exists("data/train_conversations.json"): |
| example_data = [ |
| [ |
| {"role": "user", "content": "Hello, how are you?"}, |
| {"role": "assistant", "content": "I'm doing well, thank you for asking! How can I assist you today?"} |
| ], |
| [ |
| {"role": "user", "content": "What's the weather like?"}, |
| {"role": "assistant", "content": "I don't have access to real-time weather data, but you can check a weather service for the latest updates."} |
| ] |
| ] |
| with open("data/train_conversations.json", 'w') as f: |
| json.dump(example_data, f) |
| with open("data/val_conversations.json", 'w') as f: |
| json.dump(example_data, f) |
| |
| |
| train_dataset = ConversationDataset("data/train_conversations.json", tokenizer) |
| val_dataset = ConversationDataset("data/val_conversations.json", tokenizer) |
| |
| |
| train_loader = DataLoader(train_dataset, batch_size=4, shuffle=True) |
| val_loader = DataLoader(val_dataset, batch_size=4) |
| |
| |
| train_model( |
| model=model, |
| train_loader=train_loader, |
| val_loader=val_loader, |
| num_epochs=10, |
| learning_rate=5e-5, |
| output_dir="checkpoints" |
| ) |
|
|
| if __name__ == "__main__": |
| main() |
|
|