arxiv_embeddings / translate.py
ppxscal's picture
This is the script I used to train the tiny model
000f4de
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from torch.utils.data import Dataset, DataLoader
import json
from torch.utils.data import random_split
if torch.cuda.is_available():
device = torch.device('cuda')
print("Using GPU:", torch.cuda.get_device_name(0))
else:
device = torch.device('cpu')
print("Using CPU")
class VectorDataset(Dataset):
def __init__(self, json_file):
with open(json_file, 'r') as f:
self.data = json.load(f)
def __len__(self):
return len(self.data)
def __getitem__(self, idx):
input_vector = torch.tensor(self.data[idx]['gte-embedding'])
output_vector = torch.tensor(self.data[idx]['ada-embedding'])
return input_vector, output_vector
train_dataset = VectorDataset('bighybrid.json')
train_size = int(0.8 * len(train_dataset))
val_size = len(train_dataset) - train_size
train_dataset, val_dataset = random_split(train_dataset, [train_size, val_size])
train_loader = DataLoader(train_dataset, batch_size=2, shuffle=True)
val_loader = DataLoader(val_dataset, batch_size=2, shuffle=False)
class LinearModel(nn.Module):
def __init__(self):
super(LinearModel, self).__init__()
self.linear_1 = nn.Linear(1024, 1024)
self.linear_2 = nn.Linear(1024, 1024)
self.dropout_1 = nn.Dropout()
self.linear_3 = nn.Linear(1024, 2048)
self.dropout_2 = nn.Dropout()
self.linear_4 = nn.Linear(2048, 1024)
self.dropout_3 = nn.Dropout()
self.linear_5 = nn.Linear(1024, 1536)
def forward(self, x):
x = F.relu(self.linear_1(x))
x = F.relu(self.linear_2(x))
x = self.dropout_1(x)
x = F.relu(self.linear_3(x))
x = self.dropout_2(x)
x = F.relu(self.linear_4(x))
x = self.dropout_3(x)
x = self.linear_5(x)
return x
def cosine_similarity_loss(outputs, targets):
print(outputs.shape, targets.shape)
return (1 - F.cosine_similarity(outputs, targets)).mean() / 2
model = LinearModel().to(device)
optimizer = optim.Adam(model.parameters(), lr=0.001)
num_epochs = 200
for epoch in range(num_epochs):
# Training
model.train()
for inputs, targets in train_loader:
inputs, targets = inputs.to(device), targets.to(device)
optimizer.zero_grad()
outputs = model(inputs)
loss = cosine_similarity_loss(outputs, targets)
loss.backward()
optimizer.step()
# Validation
model.eval()
val_loss = 0
with torch.no_grad():
for inputs, targets in val_loader:
inputs, targets = inputs.to(device), targets.to(device)
outputs = model(inputs)
loss = cosine_similarity_loss(outputs, targets)
val_loss += loss.item()
val_loss /= len(val_loader)
print(f"Epoch {epoch+1}/{num_epochs}, Training Loss: {loss.item()}, Validation Loss: {val_loss}")
torch.save(model.state_dict(), 'model_weights.pth')