xcx0902 commited on
Commit
f4b1740
·
verified ·
1 Parent(s): 3ca5136

Upload folder using huggingface_hub

Browse files
Files changed (4) hide show
  1. config.json +49 -0
  2. model.safetensors +3 -0
  3. train.py +110 -0
  4. vit_mnist.pth +3 -0
config.json ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "ViTForImageClassification"
4
+ ],
5
+ "attention_probs_dropout_prob": 0.1,
6
+ "encoder_stride": 16,
7
+ "hidden_act": "gelu",
8
+ "hidden_dropout_prob": 0.1,
9
+ "hidden_size": 128,
10
+ "id2label": {
11
+ "0": "LABEL_0",
12
+ "1": "LABEL_1",
13
+ "2": "LABEL_2",
14
+ "3": "LABEL_3",
15
+ "4": "LABEL_4",
16
+ "5": "LABEL_5",
17
+ "6": "LABEL_6",
18
+ "7": "LABEL_7",
19
+ "8": "LABEL_8",
20
+ "9": "LABEL_9"
21
+ },
22
+ "image_size": 28,
23
+ "initializer_range": 0.02,
24
+ "intermediate_size": 256,
25
+ "label2id": {
26
+ "LABEL_0": 0,
27
+ "LABEL_1": 1,
28
+ "LABEL_2": 2,
29
+ "LABEL_3": 3,
30
+ "LABEL_4": 4,
31
+ "LABEL_5": 5,
32
+ "LABEL_6": 6,
33
+ "LABEL_7": 7,
34
+ "LABEL_8": 8,
35
+ "LABEL_9": 9
36
+ },
37
+ "layer_norm_eps": 1e-12,
38
+ "model_type": "vit",
39
+ "num_attention_heads": 4,
40
+ "num_channels": 3,
41
+ "num_hidden_layers": 4,
42
+ "patch_size": 7,
43
+ "pooler_act": "tanh",
44
+ "pooler_output_size": 128,
45
+ "problem_type": "single_label_classification",
46
+ "qkv_bias": true,
47
+ "torch_dtype": "float32",
48
+ "transformers_version": "4.51.3"
49
+ }
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ce9f6e60324eef57eb3014c9c3e5fbcc77f948c9a4a614f21bb56f10ad7a2ce2
3
+ size 2218808
train.py ADDED
@@ -0,0 +1,110 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from torch.utils.data import DataLoader
3
+ from torchvision import transforms, datasets
4
+ from transformers import ViTModel, ViTConfig, ViTForImageClassification
5
+ import torch.nn as nn
6
+ import torch.optim as optim
7
+ from tqdm import tqdm
8
+
9
+ # Set device
10
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
11
+
12
+ # Hyperparameters
13
+ IMAGE_SIZE = 28 # MNIST image size
14
+ PATCH_SIZE = 7 # Patch size to divide 28x28 image
15
+ NUM_CLASSES = 10
16
+ BATCH_SIZE = 128
17
+ EPOCHS = 5
18
+ LR = 2e-4
19
+
20
+ # Resize and normalize
21
+ transform = transforms.Compose([
22
+ transforms.Resize((IMAGE_SIZE, IMAGE_SIZE)),
23
+ transforms.ToTensor(),
24
+ transforms.Normalize((0.5,), (0.5,))
25
+ ])
26
+
27
+ # Load MNIST dataset
28
+ train_dataset = datasets.MNIST(root='./data', train=True, download=True, transform=transform)
29
+ test_dataset = datasets.MNIST(root='./data', train=False, download=True, transform=transform)
30
+
31
+ train_loader = DataLoader(train_dataset, batch_size=BATCH_SIZE, shuffle=True)
32
+ test_loader = DataLoader(test_dataset, batch_size=BATCH_SIZE)
33
+
34
+ # Use a pre-configured ViT for image classification
35
+ configuration = ViTConfig(
36
+ image_size=IMAGE_SIZE,
37
+ patch_size=PATCH_SIZE,
38
+ num_labels=NUM_CLASSES,
39
+ hidden_size=128,
40
+ num_hidden_layers=4,
41
+ num_attention_heads=4,
42
+ intermediate_size=256,
43
+ hidden_act="gelu",
44
+ hidden_dropout_prob=0.1,
45
+ attention_probs_dropout_prob=0.1,
46
+ initializer_range=0.02
47
+ )
48
+
49
+ model = ViTForImageClassification(configuration).to(device)
50
+
51
+ # Alternatively, you can also load a pretrained ViT and fine-tune it:
52
+ # model = ViTForImageClassification.from_pretrained('google/vit-base-patch16-224-in21k', num_labels=10)
53
+
54
+ # Optimizer
55
+ optimizer = optim.AdamW(model.parameters(), lr=LR)
56
+ criterion = nn.CrossEntropyLoss()
57
+
58
+ # Training loop
59
+ def train():
60
+ model.train()
61
+ for epoch in range(EPOCHS):
62
+ total_loss = 0
63
+ correct = 0
64
+ total = 0
65
+ for images, labels in tqdm(train_loader, desc=f"Epoch {epoch+1}/{EPOCHS}"):
66
+ images, labels = images.to(device), labels.to(device)
67
+
68
+ # Repeat grayscale channel to match expected input shape (ViT expects 3 channels)
69
+ images = images.repeat(1, 3, 1, 1)
70
+
71
+ outputs = model(images, labels=labels)
72
+ loss = outputs.loss
73
+ logits = outputs.logits
74
+
75
+ optimizer.zero_grad()
76
+ loss.backward()
77
+ optimizer.step()
78
+
79
+ total_loss += loss.item()
80
+ preds = torch.argmax(logits, dim=-1)
81
+ correct += (preds == labels).sum().item()
82
+ total += labels.size(0)
83
+
84
+ print(f"Epoch {epoch+1}, Loss: {total_loss/len(train_loader):.4f}, Accuracy: {correct/total:.4f}")
85
+
86
+ # Evaluation loop
87
+ def evaluate():
88
+ model.eval()
89
+ correct = 0
90
+ total = 0
91
+ with torch.no_grad():
92
+ for images, labels in test_loader:
93
+ images, labels = images.to(device), labels.to(device)
94
+ images = images.repeat(1, 3, 1, 1)
95
+
96
+ outputs = model(images)
97
+ logits = outputs.logits
98
+
99
+ preds = torch.argmax(logits, dim=-1)
100
+ correct += (preds == labels).sum().item()
101
+ total += labels.size(0)
102
+
103
+ print(f"Test Accuracy: {correct / total:.4f}")
104
+
105
+ # Run training and evaluation
106
+ if __name__ == "__main__":
107
+ train()
108
+ evaluate()
109
+ model.save_pretrained(".")
110
+ torch.save(model, "vit_mnist.pth")
vit_mnist.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f5eb7d550d17e6e7f76658bd8e70a65b3e9e451f5bef9deb4ada7cb5be5c7350
3
+ size 2254631