Abdullah-Nazhat commited on
Commit
f465594
·
verified ·
1 Parent(s): 0a36980

Upload 2 files

Browse files
Files changed (2) hide show
  1. litetensormapper.py +134 -0
  2. train.py +186 -0
litetensormapper.py ADDED
@@ -0,0 +1,134 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from torch import nn, Tensor
3
+
4
+
5
+
6
+
7
+ class VecDyT(nn.Module):
8
+ def __init__(self, input_shape):
9
+
10
+ super().__init__()
11
+
12
+ self.alpha = nn.Parameter(torch.randn(input_shape))
13
+
14
+ def forward(self, x):
15
+ x = torch.tanh(self.alpha * x)
16
+ return x
17
+
18
+
19
+ class VecDyGeluSine(nn.Module):
20
+ def __init__(self, input_shape):
21
+
22
+ super().__init__()
23
+
24
+ self.alpha = nn.Parameter(torch.randn(input_shape))
25
+ self.beta = nn.Parameter(torch.randn(input_shape))
26
+ self.gamma = nn.Parameter(torch.randn(1))
27
+ self.etta = nn.Parameter(torch.randn(1))
28
+ self.gelu = nn.GELU()
29
+
30
+ def forward(self, x):
31
+
32
+
33
+
34
+ x = self.gamma * self.gelu(self.alpha * x) + self.etta * torch.sin(self.beta * x)
35
+
36
+ return x
37
+
38
+
39
+
40
+
41
+
42
+ class TTT(nn.Module):
43
+ def __init__(self, dim: int):
44
+
45
+ super(TTT, self).__init__()
46
+
47
+
48
+ self.mapping = nn.Linear(dim,dim,bias=False)
49
+
50
+
51
+
52
+ def forward(self, in_seq: Tensor) -> Tensor:
53
+
54
+
55
+ outs = []
56
+
57
+ for seq in range(in_seq.size(1)):
58
+
59
+ state = in_seq[:,seq,:]
60
+ train_view = state + torch.randn_like(state)
61
+ label_view = state
62
+ loss = nn.functional.mse_loss(self.mapping(train_view), label_view)
63
+ grads = torch.autograd.grad(
64
+ loss, self.mapping.parameters(),create_graph=True)
65
+ with torch.no_grad():
66
+ for param, grad in zip(self.mapping.parameters(), grads):
67
+
68
+ param -= 0.01 * grad
69
+
70
+ readout = self.mapping(in_seq[:,seq,:]).detach()
71
+ outs.append(readout)
72
+ out = torch.stack(outs, dim=1)
73
+
74
+ return out
75
+
76
+ class FFUnit(nn.Module):
77
+ def __init__(self,dim):
78
+
79
+ super().__init__()
80
+
81
+ self.proj = nn.Linear(dim,dim,bias=False)
82
+ self.modulate = VecDyGeluSine(dim)
83
+
84
+
85
+ def forward(self, x):
86
+
87
+ u, v = x, x
88
+
89
+ u = self.modulate(u)
90
+ v = self.proj(v)
91
+ g = u * v
92
+
93
+ return g
94
+
95
+ class LiteTensorMapperBlock(nn.Module):
96
+ def __init__(self, dim, num_patch):
97
+
98
+ super().__init__()
99
+
100
+ self.norm_1 = VecDyT(dim)
101
+ self.norm_2 = VecDyT(dim)
102
+ self.memory = TTT(dim)
103
+ self.feedforward = FFUnit(dim)
104
+
105
+
106
+ def forward(self, x):
107
+
108
+
109
+ memorypath, FFpath = x, x
110
+
111
+ memorypath = self.norm_1(memorypath)
112
+
113
+ memorypath = self.memory(memorypath)
114
+
115
+ FFpath = self.norm_2(FFpath)
116
+
117
+ FFpath = self.feedforward(FFpath)
118
+
119
+ x = memorypath + FFpath
120
+
121
+ return x
122
+
123
+
124
+ class LiteTensorMapper(nn.Module):
125
+ def __init__(self, d_model,num_patch, num_layers):
126
+ super().__init__()
127
+
128
+ self.model = nn.Sequential(
129
+ *[LiteTensorMapperBlock(d_model,num_patch) for _ in range(num_layers)]
130
+ )
131
+
132
+ def forward(self, x):
133
+
134
+ return self.model(x)
train.py ADDED
@@ -0,0 +1,186 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import csv
3
+ import torch
4
+ from torch import nn
5
+ from torch.utils.data import DataLoader
6
+ from torchvision import datasets
7
+ from torchvision.transforms import ToTensor, Normalize, RandomCrop, RandomHorizontalFlip, Compose
8
+
9
+
10
+
11
+
12
+ transform = Compose([
13
+ RandomCrop(32, padding=4),
14
+ RandomHorizontalFlip(),
15
+ ToTensor(),
16
+ Normalize((0.5, 0.5,0.5),(0.5, 0.5,0.5))
17
+
18
+ ])
19
+
20
+ training_data = datasets.CIFAR10(
21
+ root='data',
22
+ train=True,
23
+ download=True,
24
+ transform=transform
25
+ )
26
+
27
+ test_data = datasets.CIFAR10(
28
+ root='data',
29
+ train=False,
30
+ download=True,
31
+ transform=transform
32
+ )
33
+
34
+
35
+ batch_size = 128
36
+
37
+ train_dataloader = DataLoader(training_data, batch_size=batch_size,shuffle=True)
38
+ test_dataloader = DataLoader(test_data, batch_size=batch_size)
39
+
40
+
41
+ for X, y in test_dataloader:
42
+ print(f"Shape of X [N,C,H,W]:{X.shape}")
43
+ print(f"Shape of y:{y.shape}{y.dtype}")
44
+ break
45
+
46
+
47
+ def check_sizes(image_size, patch_size):
48
+ sqrt_num_patches, remainder = divmod(image_size, patch_size)
49
+ assert remainder == 0, "`image_size` must be divisibe by `patch_size`"
50
+ num_patches = sqrt_num_patches ** 2
51
+ return num_patches
52
+
53
+
54
+
55
+
56
+ device = "cuda" if torch.cuda.is_available() else "cpu"
57
+
58
+ print(f"using {device} device")
59
+
60
+
61
+
62
+ class TensorMapperImageClassification(LiteTensorMapper):
63
+ def __init__(
64
+ self,
65
+ image_size=32,
66
+ patch_size=4,
67
+ in_channels=3,
68
+ num_classes=10,
69
+ d_model = 256,
70
+ num_layers=4,
71
+
72
+
73
+ ):
74
+ num_patches = check_sizes(image_size, patch_size)
75
+ super().__init__(d_model, num_patches,num_layers)
76
+ self.patcher = nn.Conv2d(
77
+ in_channels, d_model, kernel_size=patch_size, stride=patch_size
78
+ )
79
+ self.classifier = nn.Linear(d_model, num_classes)
80
+
81
+ def forward(self, x):
82
+
83
+ patches = self.patcher(x)
84
+ batch_size, num_channels, _, _ = patches.shape
85
+ patches = patches.permute(0, 2, 3, 1)
86
+ patches = patches.view(batch_size, -1, num_channels)
87
+ embedding = self.model(patches)
88
+ embedding = embedding.mean(dim=1)
89
+ out = self.classifier(embedding)
90
+ return out
91
+
92
+ model = TensorMapperImageClassification().to(device)
93
+ print(model)
94
+
95
+
96
+
97
+ loss_fn = nn.CrossEntropyLoss()
98
+ optimizer = torch.optim.Adam(model.parameters(),lr=1e-3)
99
+
100
+
101
+
102
+
103
+ def train(dataloader, model, loss_fn, optimizer):
104
+ size = len(dataloader.dataset)
105
+ num_batches = len(dataloader)
106
+ model.train()
107
+ train_loss = 0
108
+ correct = 0
109
+ for batch, (X,y) in enumerate(dataloader):
110
+ X, y = X.to(device), y.to(device)
111
+
112
+
113
+ pred = model(X)
114
+ loss = loss_fn(pred,y)
115
+
116
+
117
+ optimizer.zero_grad()
118
+ loss.backward()
119
+ optimizer.step()
120
+ train_loss += loss.item()
121
+ _, labels = torch.max(pred.data, 1)
122
+ correct += labels.eq(y.data).type(torch.float).sum()
123
+
124
+
125
+
126
+
127
+ if batch % 100 == 0:
128
+ loss, current = loss.item(), batch * len(X)
129
+ print(f"loss: {loss:>7f} [{current:>5d}/{size:>5d}]")
130
+
131
+ train_loss /= num_batches
132
+ train_accuracy = 100. * correct.item() / size
133
+ print(train_accuracy)
134
+ return train_loss,train_accuracy
135
+
136
+
137
+
138
+
139
+
140
+ def test(dataloader, model, loss_fn):
141
+ size = len(dataloader.dataset)
142
+ num_batches = len(dataloader)
143
+ model.eval()
144
+ test_loss = 0
145
+ correct = 0
146
+
147
+ for X,y in dataloader:
148
+ X,y = X.to(device), y.to(device)
149
+ pred = model(X)
150
+ test_loss += loss_fn(pred, y).item()
151
+ correct += (pred.argmax(1) == y).type(torch.float).sum().item()
152
+ test_loss /= num_batches
153
+ correct /= size
154
+ print(f"Test Error: \n Accuracy: {(100*correct):>0.1f}%, Avg loss: {test_loss:>8f} \n")
155
+ test_accuracy = 100*correct
156
+ return test_loss, test_accuracy
157
+
158
+
159
+
160
+
161
+
162
+ logname = "/content/sample_data/logs_cifar10.csv"
163
+ if not os.path.exists(logname):
164
+ with open(logname, 'w') as logfile:
165
+ logwriter = csv.writer(logfile, delimiter=',')
166
+ logwriter.writerow(['epoch', 'train loss', 'train acc',
167
+ 'test loss', 'test acc'])
168
+
169
+
170
+ epochs = 100
171
+ for epoch in range(epochs):
172
+ print(f"Epoch {epoch+1}\n-----------------------------------")
173
+ train_loss, train_acc = train(train_dataloader, model, loss_fn, optimizer)
174
+ test_loss, test_acc = test(test_dataloader, model, loss_fn)
175
+ with open(logname, 'a') as logfile:
176
+ logwriter = csv.writer(logfile, delimiter=',')
177
+ logwriter.writerow([epoch+1, train_loss, train_acc,
178
+ test_loss, test_acc])
179
+ print("Done!")
180
+
181
+
182
+
183
+ path = "/content/sample_data/"
184
+ model_name = "LiteTensorMapperImageClassification_cifar10"
185
+ torch.save(model.state_dict(), f"{path}/{model_name}.pth")
186
+ print(f"Saved Model State to {path}/{model_name}.pth ")