Datasets:

ArXiv:
FRGCF / test /MNIST /mnist_mlp.py
kouke123's picture
Upload folder using huggingface_hub
8304f29 verified
import torch
import torch.nn as nn
import torch.optim as optim
from torchvision import datasets, transforms
from torch.utils.data import DataLoader
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
transform = transforms.ToTensor()
train_set = datasets.MNIST(root='./data',
train=True,
download=True,
transform=transform)
test_set = datasets.MNIST(root='./data',
train=False,
download=True,
transform=transform)
train_loader = DataLoader(train_set, batch_size=64, shuffle=True)
test_loader = DataLoader(test_set, batch_size=64, shuffle=False)
class MNISTNet(nn.Module):
def __init__(self):
super().__init__()
self.flatten = nn.Flatten()
self.fc1 = nn.Linear(28*28,128)
self.relu = nn.ReLU()
self.fc2 = nn.Linear(128,10)
def forward(self, x):
print("input:" , x.shape)
x = self.flatten(x)
print("after flatten:" , x.shape)
x = self.fc1(x)
print("after fc1:" , x.shape)
x = self.relu(x)
x = self.fc2(x)
print("after fc2:" , x.shape)
return x
model = MNISTNet().to(device)
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(model.parameters(), lr=0.001)
def train():
model.train()
for images, labels in train_loader:
images, labels = images.to(device), labels.to(device)
outputs = model(images)
loss = criterion(outputs, labels)
optimizer.zero_grad()
loss.backward()
optimizer.step()
def test():
model.eval()
correct = 0
total = 0
with torch.no_grad():
for images, labels in test_loader:
images, labels = images.to(device), labels.to(device)
outputs = model(images)
pred = torch.argmax(outputs, dim=1)
correct += (pred == labels).sum().item()
total += labels.size(0)
acc = correct / total
print(f"Accuracy: {acc:.4f}")
for epoch in range(5):
train()
print(f"Epoch {epoch+1} finished")
test()