dadadar commited on
Commit
ccdb773
·
1 Parent(s): 62a5913

Upload 12 files

Browse files
model_epoch10.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:aff523c26c52c65a5ede38fabf20e8b01b4ce7ff737680d744cea2d77aac5f75
3
+ size 44797301
model_epoch10_lr0.0001_best_epoch.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:171bbd7df4b8427e670e9cb17d5980961f68a5b7dfa8a44537c7230f00107f6b
3
+ size 44790117
model_epoch10_lr0.001.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dc4e6813cb78cb3a1c433468fba83b438a3fdae01939310f71429316f2e4dc5a
3
+ size 44798293
model_epoch20_lr0.0001_best_epoch2.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6edaec6d92d744347df888720407e0147e6c5f3c34c19be800d58607f9a67c19
3
+ size 44799969
model_epoch20_lr0.001.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:37fe8b37d2bd220158de3e5e68829477afcb5757968336cde7fc316638b40294
3
+ size 44798293
model_epoch20_lr0.01_best_epoch.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6c69be32be19548182b7c0324203e8cf6934a1b0b768c2aff55fa070e42fa01c
3
+ size 44789869
model_epoch5.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8aacf1b591781e2f2b43c2ef73ea73f682e0a181d01df903b469438537e51465
3
+ size 44797177
model_resnet101_epoch5_lr0.0001_best_epoch.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:332d3de6c316fdaee58005ced4bfc51dd9521f59f6523865753e22ecce8acc85
3
+ size 170693625
model_resnet50_epoch5_lr0.0001_best_epoch.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bcb9fd3f2ad246784fb094c37fc078db08e0aae7719d7c780fc5fc37d53aa45f
3
+ size 94392261
plot.py ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import matplotlib.pyplot as plt
2
+
3
+
4
+ epochs = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
5
+ train_accuracy = [99, 99, 98, 98, 98, 99, 98, 99, 98, 99]
6
+ val_accuracy = [99, 99, 98, 98, 98, 99, 98, 99, 98, 99]
7
+
8
+
9
+ plt.figure(figsize=(10, 6))
10
+ plt.plot(epochs, train_accuracy, label='Train Accuracy', marker='o')
11
+ plt.plot(epochs, val_accuracy, label='Validation Accuracy', marker='o')
12
+ plt.xlabel('Epochs')
13
+ plt.ylabel('Accuracy (%)')
14
+ plt.title('Training and Validation Accuracy')
15
+ plt.legend()
16
+ plt.grid(True)
17
+ plt.show()
18
+
19
+ plt.savefig('plot.png')
resnet_test.py ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch.utils.data.distributed
2
+ import torchvision.transforms as transforms
3
+ import torchvision.datasets as datasets
4
+ from torch.autograd import Variable
5
+ import pandas as pd
6
+ from PIL import Image
7
+
8
+ classes = ('cat', 'dog')
9
+ transform_test = transforms.Compose([
10
+ transforms.Resize((224, 224)),
11
+ transforms.ToTensor(),
12
+ transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])
13
+ ])
14
+
15
+ DEVICE = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
16
+ model = torch.load("model_epoch20_lr0.0001_best_epoch2.pth")
17
+ model.eval()
18
+ model.to(DEVICE)
19
+
20
+
21
+ predictions = []
22
+
23
+ for index in range(1, 501):
24
+ img_path = f'datasets/datatest/test/{index}.jpg' # Assuming file names are 1.jpg, 2.jpg, 3.jpg, etc.
25
+
26
+ # Load the image, preprocess it, and make a prediction
27
+ img = Image.open(img_path)
28
+ img = transform_test(img)
29
+ img = img.unsqueeze(0).to(DEVICE)
30
+ output = model(img)
31
+ _, pred = torch.max(output.data, 1)
32
+ predictions.append(pred.data.item())
33
+
34
+
35
+ # Create a DataFrame from the predictions
36
+ df = pd.DataFrame(predictions, columns=['prediction'])
37
+
38
+
39
+ # Save the DataFrame to an Excel file
40
+ df.to_excel('predictions.xlsx', index=False)
41
+
42
+
43
+
resnet_train.py ADDED
@@ -0,0 +1,140 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch.optim as optim
2
+ import torch
3
+ import torch.nn as nn
4
+ import torch.nn.parallel
5
+ import torch.optim
6
+ import torch.utils.data
7
+ import torch.utils.data.distributed
8
+ import torchvision.transforms as transforms
9
+ import torchvision.datasets as datasets
10
+ import torchvision.models
11
+ #from effnetv2 import effnetv2_s
12
+ from torch.autograd import Variable
13
+
14
+ #training parameters
15
+ modellr = 1e-4
16
+ BATCH_SIZE = 64
17
+ EPOCHS = 20
18
+ DEVICE = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
19
+ # Add these variables to keep track of the best accuracy and epoch number
20
+ best_accuracy = 0
21
+ best_epoch = 0
22
+
23
+
24
+ #data preprocess
25
+ transform = transforms.Compose([
26
+ transforms.Resize((224, 224)),
27
+ transforms.ToTensor(),
28
+ transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])
29
+
30
+ ])
31
+ transform_test = transforms.Compose([
32
+ transforms.Resize((224, 224)),
33
+ transforms.ToTensor(),
34
+ transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])
35
+ ])
36
+
37
+
38
+
39
+
40
+
41
+ dataset_train = datasets.ImageFolder('datasets/datasets/train', transform)
42
+ print(dataset_train.imgs)
43
+
44
+ print(dataset_train.class_to_idx)
45
+ dataset_test = datasets.ImageFolder('datasets/datasets/val', transform_test)
46
+
47
+
48
+
49
+ train_loader = torch.utils.data.DataLoader(dataset_train, batch_size=BATCH_SIZE, shuffle=True)
50
+ test_loader = torch.utils.data.DataLoader(dataset_test, batch_size=BATCH_SIZE, shuffle=False)
51
+
52
+
53
+
54
+ #model & training settings
55
+ criterion = nn.CrossEntropyLoss()
56
+ model = torchvision.models.resnet18(pretrained=True)
57
+ num_ftrs = model.fc.in_features
58
+ model.fc = nn.Linear(num_ftrs, 2)
59
+ model.to(DEVICE)
60
+
61
+ optimizer = optim.Adam(model.parameters(), lr=modellr)
62
+
63
+ #Learning rate adjust (no need)
64
+ def adjust_learning_rate(optimizer, epoch):
65
+ """Sets the learning rate to the initial LR decayed by 10 every 30 epochs"""
66
+ modellrnew = modellr * (0.1 ** (epoch // 50))
67
+ print("lr:", modellrnew)
68
+ for param_group in optimizer.param_groups:
69
+ param_group['lr'] = modellrnew
70
+
71
+
72
+ #Training method
73
+ def train(model, device, train_loader, optimizer, epoch):
74
+ model.train()
75
+ sum_loss = 0
76
+ total_num = len(train_loader.dataset)
77
+ print(total_num, len(train_loader))
78
+ for batch_idx, (data, target) in enumerate(train_loader):
79
+ data, target = Variable(data).to(device), Variable(target).to(device)
80
+ output = model(data)
81
+ loss = criterion(output, target)
82
+ optimizer.zero_grad()
83
+ loss.backward()
84
+ optimizer.step()
85
+ print_loss = loss.data.item()
86
+ sum_loss += print_loss
87
+ if (batch_idx + 1) % 50 == 0:
88
+ print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
89
+ epoch, (batch_idx + 1) * len(data), len(train_loader.dataset),
90
+ 100. * (batch_idx + 1) / len(train_loader), loss.item()))
91
+ ave_loss = sum_loss / len(train_loader)
92
+ print('epoch:{},loss:{}'.format(epoch, ave_loss))
93
+
94
+
95
+ # Modify the val function to update the best model when a higher accuracy is achieved
96
+
97
+
98
+
99
+ def val(model, device, test_loader, epoch):
100
+ global best_accuracy, best_epoch
101
+ model.eval()
102
+ test_loss = 0
103
+ correct = 0
104
+ total_num = len(test_loader.dataset)
105
+ print(total_num, len(test_loader))
106
+ with torch.no_grad():
107
+ for data, target in test_loader:
108
+ data, target = Variable(data).to(device), Variable(target).to(device)
109
+ output = model(data)
110
+ loss = criterion(output, target)
111
+ _, pred = torch.max(output.data, 1)
112
+ correct += torch.sum(pred == target)
113
+ print_loss = loss.data.item()
114
+ test_loss += print_loss
115
+ correct = correct.data.item()
116
+ acc = correct / total_num
117
+ avgloss = test_loss / len(test_loader)
118
+ print('\nVal set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(
119
+ avgloss, correct, len(test_loader.dataset), 100 * acc))
120
+ # Check if this epoch's accuracy is better than the best so far
121
+ if acc > best_accuracy:
122
+ best_accuracy, best_epoch
123
+ best_accuracy = acc
124
+ best_epoch = epoch
125
+ # Save the best model
126
+ torch.save(model, 'model_resnet18_epoch20_lr0.0001_best_epoch.pth')
127
+
128
+
129
+
130
+ # Train the model and track the best model
131
+ for epoch in range(1, EPOCHS + 1):
132
+ adjust_learning_rate(optimizer, epoch)
133
+ train(model, DEVICE, train_loader, optimizer, epoch)
134
+ val(model, DEVICE, test_loader, epoch)
135
+
136
+
137
+ print(f"Best model achieved at epoch {best_epoch} with accuracy: {best_accuracy * 100:.2f}%")
138
+
139
+
140
+