Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
|
@@ -10,36 +10,36 @@ from torchvision import transforms, models
|
|
| 10 |
from torch import nn, optim
|
| 11 |
from torch.utils.data import Dataset, DataLoader
|
| 12 |
import time
|
| 13 |
-
import
|
| 14 |
|
| 15 |
-
# Set device
|
| 16 |
-
device = torch.device('
|
| 17 |
print(f"Using device: {device}")
|
| 18 |
|
| 19 |
-
# Step 1: Import the dataset
|
| 20 |
-
print("π Loading
|
| 21 |
-
|
| 22 |
-
|
| 23 |
-
# Take a sample to explore (first 1000 images for demonstration)
|
| 24 |
-
print("π¦ Taking sample of 1000 images for exploration...")
|
| 25 |
-
sample_data = list(dataset.take(1000))
|
| 26 |
|
| 27 |
# Step 2: Check the rows and structure
|
| 28 |
print("\nπ Dataset structure sample:")
|
| 29 |
-
print(f"Total samples in
|
| 30 |
-
print(f"Sample size we're using: {len(sample_data)}")
|
| 31 |
print("\nFirst row example:")
|
| 32 |
-
print(
|
| 33 |
|
| 34 |
-
# Extract images and labels
|
| 35 |
-
images = [item['image'] for item in
|
| 36 |
-
labels = [item['label'] for item in
|
| 37 |
|
| 38 |
print(f"\nπ Unique classes in sample: {len(set(labels))}")
|
| 39 |
print(f"π Class distribution:")
|
| 40 |
class_counts = pd.Series(labels).value_counts()
|
| 41 |
print(class_counts.head(10))
|
| 42 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 43 |
# Step 3: Visualize charts with matplotlib
|
| 44 |
def create_visualizations():
|
| 45 |
"""Create exploratory visualizations"""
|
|
@@ -52,20 +52,9 @@ def create_visualizations():
|
|
| 52 |
ax1.set_ylabel('Count')
|
| 53 |
ax1.tick_params(axis='x', rotation=45)
|
| 54 |
|
| 55 |
-
# Image size distribution (if available)
|
| 56 |
-
img_sizes = [(img.size[0], img.size[1]) for img in images[:100]]
|
| 57 |
-
widths = [size[0] for size in img_sizes]
|
| 58 |
-
heights = [size[1] for size in img_sizes]
|
| 59 |
-
|
| 60 |
-
ax2.scatter(widths, heights, alpha=0.6, color='green')
|
| 61 |
-
ax2.set_title('Image Dimensions Distribution')
|
| 62 |
-
ax2.set_xlabel('Width (pixels)')
|
| 63 |
-
ax2.set_ylabel('Height (pixels)')
|
| 64 |
-
ax2.grid(True, alpha=0.3)
|
| 65 |
-
|
| 66 |
# Sample images grid
|
| 67 |
-
|
| 68 |
-
|
| 69 |
# Create a grid of sample images
|
| 70 |
grid_img = Image.new('RGB', (400, 400), (255, 255, 255))
|
| 71 |
for i in range(min(4, len(images))):
|
|
@@ -73,11 +62,22 @@ def create_visualizations():
|
|
| 73 |
x = (i % 2) * 200
|
| 74 |
y = (i // 2) * 200
|
| 75 |
grid_img.paste(img, (x, y))
|
| 76 |
-
|
| 77 |
|
| 78 |
# Class frequency pie chart
|
| 79 |
-
|
| 80 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 81 |
|
| 82 |
plt.tight_layout()
|
| 83 |
return fig
|
|
@@ -89,21 +89,29 @@ print("β
Visualizations created successfully!")
|
|
| 89 |
|
| 90 |
# Step 4: Train/Test Split
|
| 91 |
print("\nβοΈ Creating train/test split...")
|
| 92 |
-
|
| 93 |
-
|
| 94 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 95 |
|
| 96 |
print(f"Training samples: {len(train_images)}")
|
| 97 |
print(f"Testing samples: {len(test_images)}")
|
| 98 |
-
print(f"Number of classes: {
|
| 99 |
|
| 100 |
# Step 5: Set up EfficientNet-B0 model
|
| 101 |
class FoodClassifier(nn.Module):
|
| 102 |
-
def __init__(self, num_classes=
|
| 103 |
super(FoodClassifier, self).__init__()
|
| 104 |
# Load pre-trained EfficientNet-B0
|
| 105 |
self.effnet = models.efficientnet_b0(pretrained=True)
|
| 106 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 107 |
# Replace the classifier head
|
| 108 |
num_features = self.effnet.classifier[1].in_features
|
| 109 |
self.effnet.classifier = nn.Sequential(
|
|
@@ -118,8 +126,8 @@ class FoodClassifier(nn.Module):
|
|
| 118 |
train_transform = transforms.Compose([
|
| 119 |
transforms.Resize((224, 224)),
|
| 120 |
transforms.RandomHorizontalFlip(),
|
| 121 |
-
transforms.RandomRotation(
|
| 122 |
-
transforms.ColorJitter(brightness=0.
|
| 123 |
transforms.ToTensor(),
|
| 124 |
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
|
| 125 |
])
|
|
@@ -136,7 +144,6 @@ class FoodDataset(Dataset):
|
|
| 136 |
self.images = images
|
| 137 |
self.labels = labels
|
| 138 |
self.transform = transform
|
| 139 |
-
self.label_to_idx = {label: idx for idx, label in enumerate(set(labels))}
|
| 140 |
|
| 141 |
def __len__(self):
|
| 142 |
return len(self.images)
|
|
@@ -144,41 +151,44 @@ class FoodDataset(Dataset):
|
|
| 144 |
def __getitem__(self, idx):
|
| 145 |
img = self.images[idx]
|
| 146 |
label = self.labels[idx]
|
| 147 |
-
|
|
|
|
|
|
|
|
|
|
| 148 |
|
| 149 |
if self.transform:
|
| 150 |
img = self.transform(img)
|
| 151 |
|
| 152 |
-
return img,
|
| 153 |
|
| 154 |
# Create datasets
|
| 155 |
train_dataset = FoodDataset(train_images, train_labels, transform=train_transform)
|
| 156 |
test_dataset = FoodDataset(test_images, test_labels, transform=test_transform)
|
| 157 |
|
| 158 |
-
# Create data loaders
|
| 159 |
-
train_loader = DataLoader(train_dataset, batch_size=
|
| 160 |
-
test_loader = DataLoader(test_dataset, batch_size=
|
| 161 |
|
| 162 |
# Initialize model
|
| 163 |
-
num_classes = len(set(labels))
|
| 164 |
model = FoodClassifier(num_classes=num_classes).to(device)
|
| 165 |
print(f"Intialized EfficientNet-B0 model with {num_classes} output classes")
|
| 166 |
|
| 167 |
# Training function (simplified for demo)
|
| 168 |
def train_model():
|
| 169 |
"""Simple training function for demonstration"""
|
| 170 |
-
print("\nποΈ Starting model training...")
|
| 171 |
criterion = nn.CrossEntropyLoss()
|
| 172 |
-
optimizer = optim.Adam(model.parameters(), lr=0.001)
|
| 173 |
|
| 174 |
model.train()
|
| 175 |
total_loss = 0
|
| 176 |
correct = 0
|
| 177 |
total = 0
|
| 178 |
|
| 179 |
-
# Train on a small subset for demo
|
|
|
|
| 180 |
for batch_idx, (images, labels) in enumerate(train_loader):
|
| 181 |
-
if batch_idx >=
|
| 182 |
break
|
| 183 |
|
| 184 |
images, labels = images.to(device), labels.to(device)
|
|
@@ -194,11 +204,11 @@ def train_model():
|
|
| 194 |
total += labels.size(0)
|
| 195 |
correct += (predicted == labels).sum().item()
|
| 196 |
|
| 197 |
-
if batch_idx %
|
| 198 |
-
print(f"Batch {batch_idx}/
|
| 199 |
|
| 200 |
-
accuracy = 100 * correct / total
|
| 201 |
-
avg_loss = total_loss / min(
|
| 202 |
print(f"β
Training completed! Average Loss: {avg_loss:.4f}, Accuracy: {accuracy:.2f}%")
|
| 203 |
return avg_loss, accuracy
|
| 204 |
|
|
@@ -206,6 +216,10 @@ def train_model():
|
|
| 206 |
def predict_food(image):
|
| 207 |
"""Predict food class from uploaded image"""
|
| 208 |
try:
|
|
|
|
|
|
|
|
|
|
|
|
|
| 209 |
# Preprocess image
|
| 210 |
img_tensor = test_transform(image).unsqueeze(0).to(device)
|
| 211 |
|
|
@@ -221,7 +235,8 @@ def predict_food(image):
|
|
| 221 |
# Create results string
|
| 222 |
results = []
|
| 223 |
for i in range(top5_prob.size(0)):
|
| 224 |
-
|
|
|
|
| 225 |
probability = top5_prob[i].item() * 100
|
| 226 |
results.append(f"{class_name}: {probability:.2f}%")
|
| 227 |
|
|
@@ -266,10 +281,9 @@ with gr.Blocks(title="Food Classifier") as demo:
|
|
| 266 |
with gr.Tabs():
|
| 267 |
with gr.TabItem("π Dataset Explorer"):
|
| 268 |
gr.Markdown("## Dataset Exploration")
|
| 269 |
-
gr.Markdown(f"- **Dataset**:
|
| 270 |
-
gr.Markdown(f"- **Total Images**:
|
| 271 |
-
gr.Markdown(f"- **
|
| 272 |
-
gr.Markdown(f"- **Unique Classes**: {len(set(labels))}")
|
| 273 |
gr.Plot(vis_fig)
|
| 274 |
|
| 275 |
with gr.TabItem("π― Classifier"):
|
|
@@ -289,10 +303,13 @@ with gr.Blocks(title="Food Classifier") as demo:
|
|
| 289 |
)
|
| 290 |
|
| 291 |
gr.Markdown("### Training Summary")
|
| 292 |
-
gr.Markdown(f"- **Model**: EfficientNet-B0")
|
| 293 |
gr.Markdown(f"- **Training Accuracy**: {train_acc:.2f}%")
|
| 294 |
gr.Markdown(f"- **Training Loss**: {train_loss:.4f}")
|
| 295 |
gr.Markdown("- **Note**: This is a demo training on a small subset. For full training, use Google Colab/Kaggle.")
|
|
|
|
|
|
|
|
|
|
| 296 |
|
| 297 |
# Launch the app
|
| 298 |
if __name__ == "__main__":
|
|
|
|
| 10 |
from torch import nn, optim
|
| 11 |
from torch.utils.data import Dataset, DataLoader
|
| 12 |
import time
|
| 13 |
+
import random
|
| 14 |
|
| 15 |
+
# Set device - use CPU for Hugging Face Spaces
|
| 16 |
+
device = torch.device('cpu')
|
| 17 |
print(f"Using device: {device}")
|
| 18 |
|
| 19 |
+
# Step 1: Import the CORRECT dataset with actual images
|
| 20 |
+
print("π Loading Food-101 dataset...")
|
| 21 |
+
# Load a smaller sample for Hugging Face Spaces constraints
|
| 22 |
+
dataset = load_dataset("ethz/food101", split="train[:5000]") # Take first 5000 images for demo
|
|
|
|
|
|
|
|
|
|
| 23 |
|
| 24 |
# Step 2: Check the rows and structure
|
| 25 |
print("\nπ Dataset structure sample:")
|
| 26 |
+
print(f"Total samples in our sample: {len(dataset)}")
|
|
|
|
| 27 |
print("\nFirst row example:")
|
| 28 |
+
print(dataset[0])
|
| 29 |
|
| 30 |
+
# Extract images and labels
|
| 31 |
+
images = [item['image'] for item in dataset]
|
| 32 |
+
labels = [item['label'] for item in dataset]
|
| 33 |
|
| 34 |
print(f"\nπ Unique classes in sample: {len(set(labels))}")
|
| 35 |
print(f"π Class distribution:")
|
| 36 |
class_counts = pd.Series(labels).value_counts()
|
| 37 |
print(class_counts.head(10))
|
| 38 |
|
| 39 |
+
# Get class names mapping
|
| 40 |
+
id2label = dataset.features['label'].names
|
| 41 |
+
num_classes = len(id2label)
|
| 42 |
+
|
| 43 |
# Step 3: Visualize charts with matplotlib
|
| 44 |
def create_visualizations():
|
| 45 |
"""Create exploratory visualizations"""
|
|
|
|
| 52 |
ax1.set_ylabel('Count')
|
| 53 |
ax1.tick_params(axis='x', rotation=45)
|
| 54 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 55 |
# Sample images grid
|
| 56 |
+
ax2.set_title('Sample Food Images')
|
| 57 |
+
ax2.axis('off')
|
| 58 |
# Create a grid of sample images
|
| 59 |
grid_img = Image.new('RGB', (400, 400), (255, 255, 255))
|
| 60 |
for i in range(min(4, len(images))):
|
|
|
|
| 62 |
x = (i % 2) * 200
|
| 63 |
y = (i // 2) * 200
|
| 64 |
grid_img.paste(img, (x, y))
|
| 65 |
+
ax2.imshow(grid_img)
|
| 66 |
|
| 67 |
# Class frequency pie chart
|
| 68 |
+
ax3.pie(class_counts.head(5), labels=class_counts.head(5).index, autopct='%1.1f%%', startangle=90)
|
| 69 |
+
ax3.set_title('Top 5 Classes Percentage')
|
| 70 |
+
|
| 71 |
+
# Image size distribution
|
| 72 |
+
img_sizes = [(img.size[0], img.size[1]) for img in images[:100]]
|
| 73 |
+
widths = [size[0] for size in img_sizes]
|
| 74 |
+
heights = [size[1] for size in img_sizes]
|
| 75 |
+
|
| 76 |
+
ax4.scatter(widths, heights, alpha=0.6, color='green')
|
| 77 |
+
ax4.set_title('Image Dimensions Distribution')
|
| 78 |
+
ax4.set_xlabel('Width (pixels)')
|
| 79 |
+
ax4.set_ylabel('Height (pixels)')
|
| 80 |
+
ax4.grid(True, alpha=0.3)
|
| 81 |
|
| 82 |
plt.tight_layout()
|
| 83 |
return fig
|
|
|
|
| 89 |
|
| 90 |
# Step 4: Train/Test Split
|
| 91 |
print("\nβοΈ Creating train/test split...")
|
| 92 |
+
train_size = min(4000, len(images) - 1000) # Keep it small for Spaces
|
| 93 |
+
test_size = min(1000, len(images) - train_size)
|
| 94 |
+
|
| 95 |
+
train_images = images[:train_size]
|
| 96 |
+
train_labels = labels[:train_size]
|
| 97 |
+
test_images = images[train_size:train_size+test_size]
|
| 98 |
+
test_labels = labels[train_size:train_size+test_size]
|
| 99 |
|
| 100 |
print(f"Training samples: {len(train_images)}")
|
| 101 |
print(f"Testing samples: {len(test_images)}")
|
| 102 |
+
print(f"Number of classes: {num_classes}")
|
| 103 |
|
| 104 |
# Step 5: Set up EfficientNet-B0 model
|
| 105 |
class FoodClassifier(nn.Module):
|
| 106 |
+
def __init__(self, num_classes=101):
|
| 107 |
super(FoodClassifier, self).__init__()
|
| 108 |
# Load pre-trained EfficientNet-B0
|
| 109 |
self.effnet = models.efficientnet_b0(pretrained=True)
|
| 110 |
|
| 111 |
+
# Freeze most layers for demo training
|
| 112 |
+
for param in self.effnet.parameters():
|
| 113 |
+
param.requires_grad = False
|
| 114 |
+
|
| 115 |
# Replace the classifier head
|
| 116 |
num_features = self.effnet.classifier[1].in_features
|
| 117 |
self.effnet.classifier = nn.Sequential(
|
|
|
|
| 126 |
train_transform = transforms.Compose([
|
| 127 |
transforms.Resize((224, 224)),
|
| 128 |
transforms.RandomHorizontalFlip(),
|
| 129 |
+
transforms.RandomRotation(5),
|
| 130 |
+
transforms.ColorJitter(brightness=0.1, contrast=0.1),
|
| 131 |
transforms.ToTensor(),
|
| 132 |
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
|
| 133 |
])
|
|
|
|
| 144 |
self.images = images
|
| 145 |
self.labels = labels
|
| 146 |
self.transform = transform
|
|
|
|
| 147 |
|
| 148 |
def __len__(self):
|
| 149 |
return len(self.images)
|
|
|
|
| 151 |
def __getitem__(self, idx):
|
| 152 |
img = self.images[idx]
|
| 153 |
label = self.labels[idx]
|
| 154 |
+
|
| 155 |
+
# Convert to RGB if needed
|
| 156 |
+
if img.mode != 'RGB':
|
| 157 |
+
img = img.convert('RGB')
|
| 158 |
|
| 159 |
if self.transform:
|
| 160 |
img = self.transform(img)
|
| 161 |
|
| 162 |
+
return img, label
|
| 163 |
|
| 164 |
# Create datasets
|
| 165 |
train_dataset = FoodDataset(train_images, train_labels, transform=train_transform)
|
| 166 |
test_dataset = FoodDataset(test_images, test_labels, transform=test_transform)
|
| 167 |
|
| 168 |
+
# Create data loaders with small batch sizes for Spaces
|
| 169 |
+
train_loader = DataLoader(train_dataset, batch_size=8, shuffle=True)
|
| 170 |
+
test_loader = DataLoader(test_dataset, batch_size=16, shuffle=False)
|
| 171 |
|
| 172 |
# Initialize model
|
|
|
|
| 173 |
model = FoodClassifier(num_classes=num_classes).to(device)
|
| 174 |
print(f"Intialized EfficientNet-B0 model with {num_classes} output classes")
|
| 175 |
|
| 176 |
# Training function (simplified for demo)
|
| 177 |
def train_model():
|
| 178 |
"""Simple training function for demonstration"""
|
| 179 |
+
print("\nποΈ Starting model training (demo mode)...")
|
| 180 |
criterion = nn.CrossEntropyLoss()
|
| 181 |
+
optimizer = optim.Adam(model.effnet.classifier.parameters(), lr=0.001) # Only train classifier
|
| 182 |
|
| 183 |
model.train()
|
| 184 |
total_loss = 0
|
| 185 |
correct = 0
|
| 186 |
total = 0
|
| 187 |
|
| 188 |
+
# Train on a very small subset for demo
|
| 189 |
+
max_batches = 20 # Very limited for Spaces
|
| 190 |
for batch_idx, (images, labels) in enumerate(train_loader):
|
| 191 |
+
if batch_idx >= max_batches:
|
| 192 |
break
|
| 193 |
|
| 194 |
images, labels = images.to(device), labels.to(device)
|
|
|
|
| 204 |
total += labels.size(0)
|
| 205 |
correct += (predicted == labels).sum().item()
|
| 206 |
|
| 207 |
+
if batch_idx % 5 == 0:
|
| 208 |
+
print(f"Batch {batch_idx}/{max_batches} - Loss: {loss.item():.4f}")
|
| 209 |
|
| 210 |
+
accuracy = 100 * correct / total if total > 0 else 0
|
| 211 |
+
avg_loss = total_loss / min(max_batches, len(train_loader))
|
| 212 |
print(f"β
Training completed! Average Loss: {avg_loss:.4f}, Accuracy: {accuracy:.2f}%")
|
| 213 |
return avg_loss, accuracy
|
| 214 |
|
|
|
|
| 216 |
def predict_food(image):
|
| 217 |
"""Predict food class from uploaded image"""
|
| 218 |
try:
|
| 219 |
+
# Convert to RGB if needed
|
| 220 |
+
if image.mode != 'RGB':
|
| 221 |
+
image = image.convert('RGB')
|
| 222 |
+
|
| 223 |
# Preprocess image
|
| 224 |
img_tensor = test_transform(image).unsqueeze(0).to(device)
|
| 225 |
|
|
|
|
| 235 |
# Create results string
|
| 236 |
results = []
|
| 237 |
for i in range(top5_prob.size(0)):
|
| 238 |
+
class_idx = top5_catid[i].item()
|
| 239 |
+
class_name = id2label[class_idx] if class_idx < len(id2label) else f"Class {class_idx}"
|
| 240 |
probability = top5_prob[i].item() * 100
|
| 241 |
results.append(f"{class_name}: {probability:.2f}%")
|
| 242 |
|
|
|
|
| 281 |
with gr.Tabs():
|
| 282 |
with gr.TabItem("π Dataset Explorer"):
|
| 283 |
gr.Markdown("## Dataset Exploration")
|
| 284 |
+
gr.Markdown(f"- **Dataset**: Food-101 (sample)")
|
| 285 |
+
gr.Markdown(f"- **Total Images**: 101,000 (using 5,000 sample)")
|
| 286 |
+
gr.Markdown(f"- **Unique Classes**: {num_classes}")
|
|
|
|
| 287 |
gr.Plot(vis_fig)
|
| 288 |
|
| 289 |
with gr.TabItem("π― Classifier"):
|
|
|
|
| 303 |
)
|
| 304 |
|
| 305 |
gr.Markdown("### Training Summary")
|
| 306 |
+
gr.Markdown(f"- **Model**: EfficientNet-B0 (transfer learning)")
|
| 307 |
gr.Markdown(f"- **Training Accuracy**: {train_acc:.2f}%")
|
| 308 |
gr.Markdown(f"- **Training Loss**: {train_loss:.4f}")
|
| 309 |
gr.Markdown("- **Note**: This is a demo training on a small subset. For full training, use Google Colab/Kaggle.")
|
| 310 |
+
|
| 311 |
+
gr.Markdown("### Dataset Information")
|
| 312 |
+
gr.Markdown("- This dataset consists of 101 food categories, with 101'000 images. For each class, 250 manually reviewed test images are provided as well as 750 training images") [[1]]
|
| 313 |
|
| 314 |
# Launch the app
|
| 315 |
if __name__ == "__main__":
|