tungtoxic / train_mlp.py
huythichai's picture
Add files using upload-large-folder tool
18037f4 verified
import argparse
import os
from dataclasses import dataclass
from typing import Tuple
import numpy as np
import torch
from sklearn.metrics import accuracy_score, f1_score
from sklearn.model_selection import train_test_split
from torch import nn
from torch.utils.data import DataLoader, TensorDataset
from tqdm import tqdm
@dataclass
class TrainConfig:
embeddings_path: str
labels_path: str
output_dir: str
val_size: float
seed: int
epochs: int
batch_size: int
lr: float
hidden_size: int
dropout: float
device: str
class MLPClassifier(nn.Module):
def __init__(self, input_dim: int, hidden_size: int, dropout: float, num_classes: int = 6) -> None:
super().__init__()
self.net = nn.Sequential(
nn.Linear(input_dim, hidden_size),
nn.ReLU(),
nn.Dropout(dropout),
nn.Linear(hidden_size, num_classes),
)
def forward(self, x: torch.Tensor) -> torch.Tensor:
return self.net(x)
def load_data(embeddings_path: str, labels_path: str) -> Tuple[np.ndarray, np.ndarray]:
embeddings = np.load(embeddings_path)
labels = np.load(labels_path)
if embeddings.shape[0] != labels.shape[0]:
raise ValueError("Embeddings and labels have different lengths")
return embeddings.astype(np.float32), labels.astype(np.int64)
def make_loaders(
embeddings: np.ndarray,
labels: np.ndarray,
val_size: float,
seed: int,
batch_size: int,
) -> Tuple[DataLoader, DataLoader]:
x_train, x_val, y_train, y_val = train_test_split(
embeddings,
labels,
test_size=val_size,
random_state=seed,
stratify=labels,
)
train_ds = TensorDataset(torch.from_numpy(x_train), torch.from_numpy(y_train))
val_ds = TensorDataset(torch.from_numpy(x_val), torch.from_numpy(y_val))
train_loader = DataLoader(train_ds, batch_size=batch_size, shuffle=True)
val_loader = DataLoader(val_ds, batch_size=batch_size, shuffle=False)
return train_loader, val_loader
def evaluate(
model: nn.Module,
loader: DataLoader,
device: str,
criterion: nn.Module,
) -> Tuple[float, float, float]:
model.eval()
preds = []
targets = []
losses = []
with torch.no_grad():
for batch in loader:
features, labels = (item.to(device) for item in batch)
logits = model(features)
loss = criterion(logits, labels)
pred = torch.argmax(logits, dim=1)
losses.append(loss.item())
preds.append(pred.cpu().numpy())
targets.append(labels.cpu().numpy())
y_pred = np.concatenate(preds)
y_true = np.concatenate(targets)
acc = float(accuracy_score(y_true, y_pred))
f1 = float(f1_score(y_true, y_pred, average="macro"))
avg_loss = float(np.mean(losses)) if losses else 0.0
return avg_loss, acc, f1
def train(config: TrainConfig) -> None:
os.makedirs(config.output_dir, exist_ok=True)
embeddings, labels = load_data(config.embeddings_path, config.labels_path)
train_loader, val_loader = make_loaders(
embeddings=embeddings,
labels=labels,
val_size=config.val_size,
seed=config.seed,
batch_size=config.batch_size,
)
model = MLPClassifier(
input_dim=embeddings.shape[1],
hidden_size=config.hidden_size,
dropout=config.dropout,
num_classes=6,
).to(config.device)
optimizer = torch.optim.AdamW(model.parameters(), lr=config.lr)
criterion = nn.CrossEntropyLoss()
best_val_loss = float("inf")
for epoch in range(1, config.epochs + 1):
model.train()
epoch_losses = []
for batch in tqdm(train_loader, desc=f"Epoch {epoch}/{config.epochs}"):
features, batch_labels = (item.to(config.device) for item in batch)
logits = model(features)
loss = criterion(logits, batch_labels)
optimizer.zero_grad()
loss.backward()
optimizer.step()
epoch_losses.append(loss.item())
val_loss, val_acc, val_f1 = evaluate(model, val_loader, config.device, criterion)
avg_loss = float(np.mean(epoch_losses)) if epoch_losses else 0.0
print(
f"Epoch {epoch}: loss={avg_loss:.4f} val_loss={val_loss:.4f} "
f"val_acc={val_acc:.4f} val_f1={val_f1:.4f}"
)
if val_loss < best_val_loss:
best_val_loss = val_loss
torch.save(
{
"model_state_dict": model.state_dict(),
"input_dim": embeddings.shape[1],
"hidden_size": config.hidden_size,
"dropout": config.dropout,
},
os.path.join(config.output_dir, "best_model.pt"),
)
def main() -> None:
parser = argparse.ArgumentParser()
parser.add_argument("--embeddings_path", default="artifacts/embeddings.npy")
parser.add_argument("--labels_path", default="artifacts/labels.npy")
parser.add_argument("--output_dir", default="artifacts")
parser.add_argument("--val_size", type=float, default=0.1)
parser.add_argument("--seed", type=int, default=42)
parser.add_argument("--epochs", type=int, default=100)
parser.add_argument("--batch_size", type=int, default=64)
parser.add_argument("--lr", type=float, default=4e-2)
parser.add_argument("--hidden_size", type=int, default=512)
parser.add_argument("--dropout", type=float, default=0.2)
parser.add_argument("--device", default="cuda" if torch.cuda.is_available() else "cpu")
args = parser.parse_args()
config = TrainConfig(
embeddings_path=args.embeddings_path,
labels_path=args.labels_path,
output_dir=args.output_dir,
val_size=args.val_size,
seed=args.seed,
epochs=args.epochs,
batch_size=args.batch_size,
lr=args.lr,
hidden_size=args.hidden_size,
dropout=args.dropout,
device=args.device,
)
train(config)
if __name__ == "__main__":
main()