Chanakya Hosamani
Update HF Space with best checkpoint
a89cc74
import gradio as gr
import torch
import torch.nn as nn
import torch.nn.functional as F
from torchvision import transforms
from PIL import Image
import numpy as np
# CIFAR-100 class names
CIFAR100_CLASSES = [
'apple', 'aquarium_fish', 'baby', 'bear', 'beaver', 'bed', 'bee', 'beetle',
'bicycle', 'bottle', 'bowl', 'boy', 'bridge', 'bus', 'butterfly', 'camel',
'can', 'castle', 'caterpillar', 'cattle', 'chair', 'chimpanzee', 'clock',
'cloud', 'cockroach', 'couch', 'crab', 'crocodile', 'cup', 'dinosaur',
'dolphin', 'elephant', 'flatfish', 'forest', 'fox', 'girl', 'hamster',
'house', 'kangaroo', 'keyboard', 'lamp', 'lawn_mower', 'leopard', 'lion',
'lizard', 'lobster', 'man', 'maple_tree', 'motorcycle', 'mountain', 'mouse',
'mushroom', 'oak_tree', 'orange', 'orchid', 'otter', 'palm_tree', 'pear',
'pickup_truck', 'pine_tree', 'plain', 'plate', 'poppy', 'porcupine',
'possum', 'rabbit', 'raccoon', 'ray', 'road', 'rocket', 'rose',
'sea', 'seal', 'shark', 'shrew', 'skunk', 'skyscraper', 'snail', 'snake',
'spider', 'squirrel', 'streetcar', 'sunflower', 'sweet_pepper', 'table',
'tank', 'telephone', 'television', 'tiger', 'tractor', 'train', 'trout',
'tulip', 'turtle', 'wardrobe', 'whale', 'willow_tree', 'wolf', 'woman', 'worm'
]
# ResNet-18 Architecture
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, in_planes, planes, stride=1):
super(BasicBlock, self).__init__()
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.expansion * planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, self.expansion * planes, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(self.expansion * planes)
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.bn2(self.conv2(out))
out += self.shortcut(x)
out = F.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, num_blocks, num_classes=100):
super(ResNet, self).__init__()
self.in_planes = 64 # Changed from 32 to 64
# For CIFAR-100, use kernel_size=3 and stride=1 (not 7 and 2 like ImageNet)
self.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1, bias=False) # Changed from 32 to 64
self.bn1 = nn.BatchNorm2d(64) # Changed from 32 to 64
self.layer1 = self._make_layer(block, 64, num_blocks[0], stride=1) # Changed from 32 to 64
self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=2) # Changed from 64 to 128
self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=2) # Changed from 128 to 256
self.layer4 = self._make_layer(block, 512, num_blocks[3], stride=2) # Changed from 256 to 512
self.linear = nn.Linear(512 * block.expansion, num_classes) # Changed from 256 to 512
def _make_layer(self, block, planes, num_blocks, stride):
strides = [stride] + [1] * (num_blocks - 1)
layers = []
for stride in strides:
layers.append(block(self.in_planes, planes, stride))
self.in_planes = planes * block.expansion
return nn.Sequential(*layers)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = self.layer4(out)
out = F.avg_pool2d(out, 4)
out = out.view(out.size(0), -1)
out = self.linear(out)
return out
# Load model
def load_model():
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
model = ResNet(BasicBlock, [2, 2, 2, 2], num_classes=100)
# Load checkpoint
try:
checkpoint = torch.load('checkpoints/resnet18_best.pth', map_location=device)
model.load_state_dict(checkpoint['model_state_dict'])
print(f"Model loaded successfully! Best accuracy: {checkpoint.get('best_acc', 'N/A')}%")
except Exception as e:
print(f"Error loading model: {e}")
print("Using randomly initialized model (for demo purposes)")
model = model.to(device)
model.eval()
return model, device
# Image preprocessing
def preprocess_image(image):
transform = transforms.Compose([
transforms.Resize((32, 32)),
transforms.ToTensor(),
transforms.Normalize((0.5071, 0.4867, 0.4408), (0.2675, 0.2565, 0.2761))
])
if image.mode != 'RGB':
image = image.convert('RGB')
img_tensor = transform(image).unsqueeze(0)
return img_tensor
# Prediction function
def predict(image):
if image is None:
return None
# Preprocess
img_tensor = preprocess_image(image)
img_tensor = img_tensor.to(device)
# Predict
with torch.no_grad():
outputs = model(img_tensor)
probabilities = F.softmax(outputs, dim=1)[0]
# Get top 5 predictions
top5_prob, top5_idx = torch.topk(probabilities, 5)
# Format results
results = {}
for i in range(5):
class_name = CIFAR100_CLASSES[top5_idx[i]]
confidence = top5_prob[i].item()
results[class_name] = float(confidence)
return results
# Initialize model
print("Loading model...")
model, device = load_model()
print("Model loaded!")
# Create Gradio interface
title = "ResNet-18 CIFAR-100 Image Classifier"
description = """
## 🎯 ResNet-18 trained on CIFAR-100 Dataset
This model achieves **77.18% test accuracy** on CIFAR-100!
**How to use:**
1. Upload an image or use one of the examples
2. The model will classify it into one of 100 categories
3. See the top 5 predictions with confidence scores
**Note:** This model was trained on 32×32 images from CIFAR-100, so it works best with:
- Small objects
- Centered subjects
- Simple backgrounds
- Animals, vehicles, household items, plants, etc.
**Training Details:**
- Architecture: ResNet-18 (11M parameters)
- Dataset: CIFAR-100 (100 classes)
- Techniques: OneCycleLR, Cutout, Label Smoothing
- Training Time: ~70 minutes on RTX 4070
"""
article = """
### Model Performance
- **Test Accuracy:** 77.18%
- **Train Accuracy:** 98.25%
- **Total Epochs:** 100
- **Training Time:** ~70 minutes
### Classes
The model can recognize 100 different classes including:
- **Animals (42 classes):** bear, beaver, bee, beetle, butterfly, camel, cattle, chimpanzee, caterpillar, crab, crocodile, dinosaur, dolphin, elephant, flatfish, fox, hamster, kangaroo, leopard, lion, lizard, lobster, mouse, otter, porcupine, possum, rabbit, raccoon, ray, seal, shark, shrew, skunk, snail, snake, spider, squirrel, tiger, trout, turtle, whale, wolf, worm
- **Vehicles (10 classes):** bicycle, bus, motorcycle, pickup_truck, lawn_mower, rocket, streetcar, tank, tractor, train
- **Household Items (15 classes):** bed, bottle, bowl, can, chair, clock, couch, cup, keyboard, lamp, plate, table, telephone, television, wardrobe
- **People (5 classes):** baby, boy, girl, man, woman
- **Plants:**
- **Flowers (5 classes):** orchid, poppy, rose, sunflower, tulip
- **Trees (5 classes):** maple_tree, oak_tree, palm_tree, pine_tree, willow_tree
- **Food (5 classes):** apple, mushroom, orange, pear, sweet_pepper
- **Nature & Structures (13 classes):** aquarium_fish, bridge, castle, cloud, forest, house, mountain, plain, road, sea, skyscraper
---
**Repository:** [GitHub](https://github.com/godsofheaven/Resnet-Model-Implementation-for-CIFAR-100-Dataset)
"""
# Create interface
demo = gr.Interface(
fn=predict,
inputs=gr.Image(type="pil", label="Upload Image"),
outputs=gr.Label(num_top_classes=5, label="Predictions"),
title=title,
description=description,
article=article,
examples=[
# Users can add their own example images
],
theme=gr.themes.Soft(),
analytics_enabled=False,
)
# Launch
if __name__ == "__main__":
demo.launch()