Upload dis-small-and-small-train.ipynb
Browse files
dis-small-and-small-train.ipynb
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
{"metadata":{"kernelspec":{"language":"python","display_name":"Python 3","name":"python3"},"language_info":{"name":"python","version":"3.10.14","mimetype":"text/x-python","codemirror_mode":{"name":"ipython","version":3},"pygments_lexer":"ipython3","nbconvert_exporter":"python","file_extension":".py"},"kaggle":{"accelerator":"gpu","dataSources":[],"dockerImageVersionId":30776,"isInternetEnabled":true,"language":"python","sourceType":"notebook","isGpuEnabled":true}},"nbformat_minor":4,"nbformat":4,"cells":[{"cell_type":"code","source":"# Big Model Weights: https://huggingface.co/Hifo/KDExperiment/resolve/main/big","metadata":{"_uuid":"8f2839f25d086af736a60e9eeb907d3b93b6e0e5","_cell_guid":"b1076dfc-b9ad-4769-8c92-a6c4dae69d19","execution":{"iopub.status.busy":"2024-10-03T18:33:20.197555Z","iopub.execute_input":"2024-10-03T18:33:20.197932Z","iopub.status.idle":"2024-10-03T18:33:20.202078Z","shell.execute_reply.started":"2024-10-03T18:33:20.197894Z","shell.execute_reply":"2024-10-03T18:33:20.201154Z"},"trusted":true},"execution_count":2,"outputs":[]},{"cell_type":"code","source":"import urllib.request\nurllib.request.urlretrieve(\"https://huggingface.co/Hifo/KDExperiment/resolve/main/big\", \"big\")\n","metadata":{"execution":{"iopub.status.busy":"2024-10-03T18:33:20.203721Z","iopub.execute_input":"2024-10-03T18:33:20.204691Z","iopub.status.idle":"2024-10-03T18:33:23.099613Z","shell.execute_reply.started":"2024-10-03T18:33:20.204645Z","shell.execute_reply":"2024-10-03T18:33:23.098454Z"},"trusted":true},"execution_count":3,"outputs":[{"execution_count":3,"output_type":"execute_result","data":{"text/plain":"('big', <http.client.HTTPMessage at 0x7e2a9fd00c40>)"},"metadata":{}}]},{"cell_type":"code","source":"import torch\nimport torchvision\nimport torch.nn as nn\nimport torch.optim as optim\nimport torchvision.transforms as transforms\nfrom torchvision.models import resnet50, ResNet50_Weights\nimport torchvision.datasets as datasets\n\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\nbig = resnet50()\nbig.fc = nn.Linear(2048, 10)\nbig.load_state_dict(torch.load(\"big\", weights_only = True, map_location=device))\nbig.to(device)","metadata":{"execution":{"iopub.status.busy":"2024-10-03T18:33:23.101360Z","iopub.execute_input":"2024-10-03T18:33:23.101660Z","iopub.status.idle":"2024-10-03T18:33:26.806909Z","shell.execute_reply.started":"2024-10-03T18:33:23.101627Z","shell.execute_reply":"2024-10-03T18:33:26.805840Z"},"trusted":true},"execution_count":4,"outputs":[{"execution_count":4,"output_type":"execute_result","data":{"text/plain":"ResNet(\n (conv1): Conv2d(3, 64, kernel_size=(7, 7), stride=(2, 2), padding=(3, 3), bias=False)\n (bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (relu): ReLU(inplace=True)\n (maxpool): MaxPool2d(kernel_size=3, stride=2, padding=1, dilation=1, ceil_mode=False)\n (layer1): Sequential(\n (0): Bottleneck(\n (conv1): Conv2d(64, 64, kernel_size=(1, 1), stride=(1, 1), bias=False)\n (bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (conv2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (bn2): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (conv3): Conv2d(64, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)\n (bn3): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (relu): ReLU(inplace=True)\n (downsample): Sequential(\n (0): Conv2d(64, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)\n (1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n )\n )\n (1): Bottleneck(\n (conv1): Conv2d(256, 64, kernel_size=(1, 1), stride=(1, 1), bias=False)\n (bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (conv2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (bn2): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (conv3): Conv2d(64, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)\n (bn3): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (relu): ReLU(inplace=True)\n )\n (2): Bottleneck(\n (conv1): Conv2d(256, 64, kernel_size=(1, 1), stride=(1, 1), bias=False)\n (bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (conv2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (bn2): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (conv3): Conv2d(64, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)\n (bn3): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (relu): ReLU(inplace=True)\n )\n )\n (layer2): Sequential(\n (0): Bottleneck(\n (conv1): Conv2d(256, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)\n (bn1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (conv2): Conv2d(128, 128, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)\n (bn2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (conv3): Conv2d(128, 512, kernel_size=(1, 1), stride=(1, 1), bias=False)\n (bn3): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (relu): ReLU(inplace=True)\n (downsample): Sequential(\n (0): Conv2d(256, 512, kernel_size=(1, 1), stride=(2, 2), bias=False)\n (1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n )\n )\n (1): Bottleneck(\n (conv1): Conv2d(512, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)\n (bn1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (conv2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (bn2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (conv3): Conv2d(128, 512, kernel_size=(1, 1), stride=(1, 1), bias=False)\n (bn3): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (relu): ReLU(inplace=True)\n )\n (2): Bottleneck(\n (conv1): Conv2d(512, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)\n (bn1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (conv2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (bn2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (conv3): Conv2d(128, 512, kernel_size=(1, 1), stride=(1, 1), bias=False)\n (bn3): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (relu): ReLU(inplace=True)\n )\n (3): Bottleneck(\n (conv1): Conv2d(512, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)\n (bn1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (conv2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (bn2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (conv3): Conv2d(128, 512, kernel_size=(1, 1), stride=(1, 1), bias=False)\n (bn3): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (relu): ReLU(inplace=True)\n )\n )\n (layer3): Sequential(\n (0): Bottleneck(\n (conv1): Conv2d(512, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)\n (bn1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)\n (bn2): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (conv3): Conv2d(256, 1024, kernel_size=(1, 1), stride=(1, 1), bias=False)\n (bn3): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (relu): ReLU(inplace=True)\n (downsample): Sequential(\n (0): Conv2d(512, 1024, kernel_size=(1, 1), stride=(2, 2), bias=False)\n (1): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n )\n )\n (1): Bottleneck(\n (conv1): Conv2d(1024, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)\n (bn1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (bn2): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (conv3): Conv2d(256, 1024, kernel_size=(1, 1), stride=(1, 1), bias=False)\n (bn3): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (relu): ReLU(inplace=True)\n )\n (2): Bottleneck(\n (conv1): Conv2d(1024, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)\n (bn1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (bn2): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (conv3): Conv2d(256, 1024, kernel_size=(1, 1), stride=(1, 1), bias=False)\n (bn3): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (relu): ReLU(inplace=True)\n )\n (3): Bottleneck(\n (conv1): Conv2d(1024, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)\n (bn1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (bn2): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (conv3): Conv2d(256, 1024, kernel_size=(1, 1), stride=(1, 1), bias=False)\n (bn3): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (relu): ReLU(inplace=True)\n )\n (4): Bottleneck(\n (conv1): Conv2d(1024, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)\n (bn1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (bn2): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (conv3): Conv2d(256, 1024, kernel_size=(1, 1), stride=(1, 1), bias=False)\n (bn3): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (relu): ReLU(inplace=True)\n )\n (5): Bottleneck(\n (conv1): Conv2d(1024, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)\n (bn1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (bn2): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (conv3): Conv2d(256, 1024, kernel_size=(1, 1), stride=(1, 1), bias=False)\n (bn3): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (relu): ReLU(inplace=True)\n )\n )\n (layer4): Sequential(\n (0): Bottleneck(\n (conv1): Conv2d(1024, 512, kernel_size=(1, 1), stride=(1, 1), bias=False)\n (bn1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (conv2): Conv2d(512, 512, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)\n (bn2): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (conv3): Conv2d(512, 2048, kernel_size=(1, 1), stride=(1, 1), bias=False)\n (bn3): BatchNorm2d(2048, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (relu): ReLU(inplace=True)\n (downsample): Sequential(\n (0): Conv2d(1024, 2048, kernel_size=(1, 1), stride=(2, 2), bias=False)\n (1): BatchNorm2d(2048, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n )\n )\n (1): Bottleneck(\n (conv1): Conv2d(2048, 512, kernel_size=(1, 1), stride=(1, 1), bias=False)\n (bn1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (conv2): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (bn2): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (conv3): Conv2d(512, 2048, kernel_size=(1, 1), stride=(1, 1), bias=False)\n (bn3): BatchNorm2d(2048, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (relu): ReLU(inplace=True)\n )\n (2): Bottleneck(\n (conv1): Conv2d(2048, 512, kernel_size=(1, 1), stride=(1, 1), bias=False)\n (bn1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (conv2): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (bn2): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (conv3): Conv2d(512, 2048, kernel_size=(1, 1), stride=(1, 1), bias=False)\n (bn3): BatchNorm2d(2048, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (relu): ReLU(inplace=True)\n )\n )\n (avgpool): AdaptiveAvgPool2d(output_size=(1, 1))\n (fc): Linear(in_features=2048, out_features=10, bias=True)\n)"},"metadata":{}}]},{"cell_type":"code","source":"transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.5,), (0.5,))])\ntrain_dataset = torchvision.datasets.CIFAR10(root='./data', train=True, download=True, transform=transform)\ntest_dataset = torchvision.datasets.CIFAR10(root='./data', train=False, download=True, transform=transform)\ntrain_loader = torch.utils.data.DataLoader(dataset=train_dataset, batch_size=64, shuffle=True)\ntest_loader = torch.utils.data.DataLoader(dataset=test_dataset, batch_size=64, shuffle=False)\n","metadata":{"execution":{"iopub.status.busy":"2024-10-03T18:33:26.808279Z","iopub.execute_input":"2024-10-03T18:33:26.808728Z","iopub.status.idle":"2024-10-03T18:33:46.531020Z","shell.execute_reply.started":"2024-10-03T18:33:26.808693Z","shell.execute_reply":"2024-10-03T18:33:46.530226Z"},"trusted":true},"execution_count":5,"outputs":[{"name":"stdout","text":"Downloading https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz to ./data/cifar-10-python.tar.gz\n","output_type":"stream"},{"name":"stderr","text":"100%|██████████| 170498071/170498071 [00:15<00:00, 11154494.28it/s]\n","output_type":"stream"},{"name":"stdout","text":"Extracting ./data/cifar-10-python.tar.gz to ./data\nFiles already downloaded and verified\n","output_type":"stream"}]},{"cell_type":"code","source":"# Using pytorch train/test function\ndef train(model, train_loader, epochs, learning_rate, device):\n criterion = nn.CrossEntropyLoss()\n optimizer = optim.Adam(model.parameters(), lr=learning_rate)\n\n model.train()\n\n for epoch in range(epochs):\n running_loss = 0.0\n for inputs, labels in train_loader:\n # inputs: A collection of batch_size images\n # labels: A vector of dimensionality batch_size with integers denoting class of each image\n inputs, labels = inputs.to(device), labels.to(device)\n\n optimizer.zero_grad()\n outputs = model(inputs)\n\n # outputs: Output of the network for the collection of images. A tensor of dimensionality batch_size x num_classes\n # labels: The actual labels of the images. Vector of dimensionality batch_size\n loss = criterion(outputs, labels)\n loss.backward()\n optimizer.step()\n\n running_loss += loss.item()\n\n print(f\"Epoch {epoch+1}/{epochs}, Loss: {running_loss / len(train_loader)}\")\n\ndef test(model, test_loader, device):\n model.to(device)\n model.eval()\n\n correct = 0\n total = 0\n\n with torch.no_grad():\n for inputs, labels in test_loader:\n inputs, labels = inputs.to(device), labels.to(device)\n\n outputs = model(inputs)\n _, predicted = torch.max(outputs.data, 1)\n\n total += labels.size(0)\n correct += (predicted == labels).sum().item()\n\n accuracy = 100 * correct / total\n print(f\"Test Accuracy: {accuracy:.2f}%\")\n return accuracy","metadata":{"execution":{"iopub.status.busy":"2024-10-03T18:33:46.533587Z","iopub.execute_input":"2024-10-03T18:33:46.533971Z","iopub.status.idle":"2024-10-03T18:33:46.544247Z","shell.execute_reply.started":"2024-10-03T18:33:46.533927Z","shell.execute_reply":"2024-10-03T18:33:46.543318Z"},"trusted":true},"execution_count":6,"outputs":[]},{"cell_type":"code","source":"test_accuracy_deep = test(big, test_loader, device)","metadata":{"execution":{"iopub.status.busy":"2024-10-03T18:33:46.545458Z","iopub.execute_input":"2024-10-03T18:33:46.545823Z","iopub.status.idle":"2024-10-03T18:33:51.286987Z","shell.execute_reply.started":"2024-10-03T18:33:46.545782Z","shell.execute_reply":"2024-10-03T18:33:51.286074Z"},"trusted":true},"execution_count":7,"outputs":[{"name":"stdout","text":"Test Accuracy: 85.11%\n","output_type":"stream"}]},{"cell_type":"code","source":"big_total_params = sum(p.numel() for p in big.parameters())\nf'{big_total_params:,}'","metadata":{"execution":{"iopub.status.busy":"2024-10-03T18:33:51.288203Z","iopub.execute_input":"2024-10-03T18:33:51.288537Z","iopub.status.idle":"2024-10-03T18:33:51.295595Z","shell.execute_reply.started":"2024-10-03T18:33:51.288503Z","shell.execute_reply":"2024-10-03T18:33:51.294684Z"},"trusted":true},"execution_count":8,"outputs":[{"execution_count":8,"output_type":"execute_result","data":{"text/plain":"'23,528,522'"},"metadata":{}}]},{"cell_type":"markdown","source":"# Trying Distil with no initialized parameters\nwith initialized maybe check this website: https://discuss.pytorch.org/t/copy-weights-of-some-layers/170016\n","metadata":{}},{"cell_type":"code","source":"torch.manual_seed(1337)\nfrom torchvision.models import resnet18\nimport torch.nn.functional as F\n\ntransform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.5,), (0.5,))])\ntrain_dataset = torchvision.datasets.CIFAR10(root='./data', train=True, download=True, transform=transform)\ntest_dataset = torchvision.datasets.CIFAR10(root='./data', train=False, download=True, transform=transform)\ntrain_loader = torch.utils.data.DataLoader(dataset=train_dataset, batch_size=64, shuffle=True)\ntest_loader = torch.utils.data.DataLoader(dataset=test_dataset, batch_size=64, shuffle=False)\n\ndis_small = resnet18(pretrained=False)\ndis_small.fc = nn.Linear(512, 10)\ndis_small = dis_small.to(device)\n\ndef distillation_loss(y_student, y_true, y_teacher, temperature=3, alpha=0.5):\n # Loss function\n hard_loss = nn.CrossEntropyLoss()(y_student, y_true)\n soft_loss = nn.KLDivLoss()(F.log_softmax(y_student/temperature, dim=1), F.softmax(y_teacher/temperature, dim=1))\n return alpha * hard_loss + (1 - alpha) * soft_loss\n\ndef dis_train(big, small, train_loader, epochs, learning_rate, device):\n optimizer = optim.Adam(small.parameters(), lr=learning_rate)\n \n small.train()\n\n # Train the student model with distillation\n for epoch in range(epochs):\n running_loss = 0.0\n for images, labels in train_loader:\n # Get predictions from teacher model\n images, labels = images.to(device), labels.to(device)\n with torch.no_grad():\n teacher_outputs = big(images)\n\n # Train student model\n optimizer.zero_grad()\n student_outputs = small(images)\n loss = distillation_loss(student_outputs, labels, teacher_outputs)\n # update student model weights\n loss.backward()\n optimizer.step()\n\n running_loss += loss.item()\n\n print(f\"Epoch {epoch+1}/{epochs}, Loss: {running_loss / len(train_loader)}\")","metadata":{"execution":{"iopub.status.busy":"2024-10-03T18:33:51.296836Z","iopub.execute_input":"2024-10-03T18:33:51.297111Z","iopub.status.idle":"2024-10-03T18:33:53.055995Z","shell.execute_reply.started":"2024-10-03T18:33:51.297081Z","shell.execute_reply":"2024-10-03T18:33:53.054903Z"},"trusted":true},"execution_count":9,"outputs":[{"name":"stdout","text":"Files already downloaded and verified\nFiles already downloaded and verified\n","output_type":"stream"},{"name":"stderr","text":"/opt/conda/lib/python3.10/site-packages/torchvision/models/_utils.py:208: UserWarning: The parameter 'pretrained' is deprecated since 0.13 and may be removed in the future, please use 'weights' instead.\n warnings.warn(\n/opt/conda/lib/python3.10/site-packages/torchvision/models/_utils.py:223: UserWarning: Arguments other than a weight enum or `None` for 'weights' are deprecated since 0.13 and may be removed in the future. The current behavior is equivalent to passing `weights=None`.\n warnings.warn(msg)\n","output_type":"stream"}]},{"cell_type":"code","source":"dis_train(big, dis_small, train_loader, epochs=25, learning_rate=0.001, device=device)","metadata":{"execution":{"iopub.status.busy":"2024-10-03T18:33:53.057377Z","iopub.execute_input":"2024-10-03T18:33:53.057771Z","iopub.status.idle":"2024-10-03T18:47:24.661661Z","shell.execute_reply.started":"2024-10-03T18:33:53.057717Z","shell.execute_reply":"2024-10-03T18:47:24.660718Z"},"trusted":true},"execution_count":10,"outputs":[{"name":"stderr","text":"/opt/conda/lib/python3.10/site-packages/torch/nn/functional.py:2994: UserWarning: reduction: 'mean' divides the total loss by both the batch size and the support size.'batchmean' divides only by the batch size, and aligns with the KL div math definition.'mean' will be changed to behave the same as 'batchmean' in the next major release.\n warnings.warn(\n","output_type":"stream"},{"name":"stdout","text":"Epoch 1/25, Loss: 0.7524534537435492\nEpoch 2/25, Loss: 0.5413726670738986\nEpoch 3/25, Loss: 0.4472511190054057\nEpoch 4/25, Loss: 0.3783151125130446\nEpoch 5/25, Loss: 0.32493936908824367\nEpoch 6/25, Loss: 0.2715627715334563\nEpoch 7/25, Loss: 0.22571897133232077\nEpoch 8/25, Loss: 0.18628351503740187\nEpoch 9/25, Loss: 0.14843439720952145\nEpoch 10/25, Loss: 0.12123991300582962\nEpoch 11/25, Loss: 0.10244220758662047\nEpoch 12/25, Loss: 0.0843141776321413\nEpoch 13/25, Loss: 0.07530320173158023\nEpoch 14/25, Loss: 0.06554122773639838\nEpoch 15/25, Loss: 0.05757047744501201\nEpoch 16/25, Loss: 0.060186152808520646\nEpoch 17/25, Loss: 0.05156470468991896\nEpoch 18/25, Loss: 0.04533080867541683\nEpoch 19/25, Loss: 0.04351369228780917\nEpoch 20/25, Loss: 0.04523978385445483\nEpoch 21/25, Loss: 0.04099091550792613\nEpoch 22/25, Loss: 0.03720140633592501\nEpoch 23/25, Loss: 0.03718112826483119\nEpoch 24/25, Loss: 0.032265712234937134\nEpoch 25/25, Loss: 0.0334224874395496\n","output_type":"stream"}]},{"cell_type":"code","source":"test_accuracy_deep = test(dis_small, test_loader, device)","metadata":{"execution":{"iopub.status.busy":"2024-10-03T18:47:24.662863Z","iopub.execute_input":"2024-10-03T18:47:24.663161Z","iopub.status.idle":"2024-10-03T18:47:27.611236Z","shell.execute_reply.started":"2024-10-03T18:47:24.663128Z","shell.execute_reply":"2024-10-03T18:47:27.610327Z"},"trusted":true},"execution_count":11,"outputs":[{"name":"stdout","text":"Test Accuracy: 76.85%\n","output_type":"stream"}]},{"cell_type":"code","source":"dis_small_total_params = sum(p.numel() for p in dis_small.parameters())\nf'{dis_small_total_params:,}'","metadata":{"execution":{"iopub.status.busy":"2024-10-03T18:47:27.614776Z","iopub.execute_input":"2024-10-03T18:47:27.615308Z","iopub.status.idle":"2024-10-03T18:47:27.621622Z","shell.execute_reply.started":"2024-10-03T18:47:27.615270Z","shell.execute_reply":"2024-10-03T18:47:27.620702Z"},"trusted":true},"execution_count":12,"outputs":[{"execution_count":12,"output_type":"execute_result","data":{"text/plain":"'11,181,642'"},"metadata":{}}]},{"cell_type":"code","source":"torch.save(dis_small.state_dict(), \"./dis_small\")","metadata":{"execution":{"iopub.status.busy":"2024-10-03T18:47:27.622678Z","iopub.execute_input":"2024-10-03T18:47:27.622975Z","iopub.status.idle":"2024-10-03T18:47:27.702712Z","shell.execute_reply.started":"2024-10-03T18:47:27.622933Z","shell.execute_reply":"2024-10-03T18:47:27.701710Z"},"trusted":true},"execution_count":13,"outputs":[]},{"cell_type":"markdown","source":"# Small_raw","metadata":{}},{"cell_type":"code","source":"torch.manual_seed(1337)\ntransform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.5,), (0.5,))])\ntrain_dataset = torchvision.datasets.CIFAR10(root='./data', train=True, download=True, transform=transform)\ntest_dataset = torchvision.datasets.CIFAR10(root='./data', train=False, download=True, transform=transform)\ntrain_loader = torch.utils.data.DataLoader(dataset=train_dataset, batch_size=64, shuffle=True)\ntest_loader = torch.utils.data.DataLoader(dataset=test_dataset, batch_size=64, shuffle=False)","metadata":{"execution":{"iopub.status.busy":"2024-10-03T18:47:27.703851Z","iopub.execute_input":"2024-10-03T18:47:27.704155Z","iopub.status.idle":"2024-10-03T18:47:29.269950Z","shell.execute_reply.started":"2024-10-03T18:47:27.704122Z","shell.execute_reply":"2024-10-03T18:47:29.269140Z"},"trusted":true},"execution_count":14,"outputs":[{"name":"stdout","text":"Files already downloaded and verified\nFiles already downloaded and verified\n","output_type":"stream"}]},{"cell_type":"code","source":"small = resnet18(pretrained=False)\nsmall.fc = nn.Linear(512, 10)\nsmall = small.to(device)","metadata":{"execution":{"iopub.status.busy":"2024-10-03T18:47:29.271115Z","iopub.execute_input":"2024-10-03T18:47:29.271461Z","iopub.status.idle":"2024-10-03T18:47:29.455674Z","shell.execute_reply.started":"2024-10-03T18:47:29.271426Z","shell.execute_reply":"2024-10-03T18:47:29.454668Z"},"trusted":true},"execution_count":15,"outputs":[]},{"cell_type":"code","source":"train(small, train_loader, epochs=25, learning_rate=0.001, device=device)","metadata":{"execution":{"iopub.status.busy":"2024-10-03T18:47:29.456777Z","iopub.execute_input":"2024-10-03T18:47:29.457075Z","iopub.status.idle":"2024-10-03T18:57:33.574099Z","shell.execute_reply.started":"2024-10-03T18:47:29.457044Z","shell.execute_reply":"2024-10-03T18:57:33.573142Z"},"trusted":true},"execution_count":16,"outputs":[{"name":"stdout","text":"Epoch 1/25, Loss: 1.3676461787022594\nEpoch 2/25, Loss: 0.9762389314601488\nEpoch 3/25, Loss: 0.8043568366019013\nEpoch 4/25, Loss: 0.6764583561731421\nEpoch 5/25, Loss: 0.5753647954872502\nEpoch 6/25, Loss: 0.48233342849080213\nEpoch 7/25, Loss: 0.400000371858287\nEpoch 8/25, Loss: 0.32537583809565096\nEpoch 9/25, Loss: 0.25701333034564466\nEpoch 10/25, Loss: 0.2105109858162263\nEpoch 11/25, Loss: 0.17911920626945507\nEpoch 12/25, Loss: 0.14568388255317802\nEpoch 13/25, Loss: 0.12849539505970448\nEpoch 14/25, Loss: 0.11056595328061478\nEpoch 15/25, Loss: 0.10758327027363583\nEpoch 16/25, Loss: 0.10026343065359251\nEpoch 17/25, Loss: 0.09190704626426616\nEpoch 18/25, Loss: 0.08056514117869136\nEpoch 19/25, Loss: 0.07815771021396684\nEpoch 20/25, Loss: 0.07976269064000105\nEpoch 21/25, Loss: 0.0592617023440883\nEpoch 22/25, Loss: 0.07719461167382453\nEpoch 23/25, Loss: 0.05902578942689097\nEpoch 24/25, Loss: 0.06188044371276908\nEpoch 25/25, Loss: 0.05197284745199539\n","output_type":"stream"}]},{"cell_type":"code","source":"test_accuracy_deep = test(small, test_loader, device)","metadata":{"execution":{"iopub.status.busy":"2024-10-03T18:57:33.575460Z","iopub.execute_input":"2024-10-03T18:57:33.575929Z","iopub.status.idle":"2024-10-03T18:57:36.561663Z","shell.execute_reply.started":"2024-10-03T18:57:33.575881Z","shell.execute_reply":"2024-10-03T18:57:36.560702Z"},"trusted":true},"execution_count":17,"outputs":[{"name":"stdout","text":"Test Accuracy: 77.28%\n","output_type":"stream"}]},{"cell_type":"code","source":"torch.save(small.state_dict(), \"./small\")","metadata":{"execution":{"iopub.status.busy":"2024-10-03T18:57:36.562904Z","iopub.execute_input":"2024-10-03T18:57:36.563312Z","iopub.status.idle":"2024-10-03T18:57:36.636018Z","shell.execute_reply.started":"2024-10-03T18:57:36.563266Z","shell.execute_reply":"2024-10-03T18:57:36.635046Z"},"trusted":true},"execution_count":18,"outputs":[]},{"cell_type":"code","source":"","metadata":{},"execution_count":null,"outputs":[]}]}
|