AjayCharit commited on
Commit
9cd2d19
Β·
verified Β·
1 Parent(s): 9353a44

Upload 3 files

Browse files
Files changed (3) hide show
  1. README.md +9 -12
  2. app.py +135 -0
  3. requirements.txt +5 -0
README.md CHANGED
@@ -1,12 +1,9 @@
1
- ---
2
- title: Crop Doctor
3
- emoji: 🐒
4
- colorFrom: pink
5
- colorTo: green
6
- sdk: gradio
7
- sdk_version: 6.2.0
8
- app_file: app.py
9
- pinned: false
10
- ---
11
-
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
1
+ <<<<<<< HEAD
2
+ # 🌱 Crop Disease Doctor (95% Accurate!)
3
+
4
+ Upload leaf photo β†’ Instant rust/healthy diagnosis!
5
+
6
+ **Demo**: Webcam upload β†’ "bean_rust 92% β†’ Neem oil"
7
+ =======
8
+ # Crop-Doctor
9
+ >>>>>>> c8c2493bb7c4496471d605a7dbc0c282eb7eae49
 
 
 
app.py ADDED
@@ -0,0 +1,135 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn as nn
3
+ import torch.nn.functional as F
4
+ import torch.optim as optim
5
+ from torch.utils.data import DataLoader
6
+ from torchvision import transforms
7
+ from datasets import load_dataset
8
+ from PIL import Image
9
+ import gradio as gr
10
+ print("Imports done!") # Test
11
+
12
+ dataset = load_dataset("beans")
13
+ classes = dataset['train'].features['labels'].names
14
+ print(f"Classes: {classes}") # ['healthy', 'angular_leaf_spot', 'bacterial_blight', 'rust']
15
+ print(f"Train: {len(dataset['train'])}, Test: {len(dataset['test'])} imgs")
16
+
17
+ transform_train = transforms.Compose([
18
+ transforms.Resize([224,224]),
19
+ transforms.ToTensor(),
20
+ transforms.Normalize([0.485,0.546,0.406],[0.229,0.224,0.225])
21
+ ])
22
+
23
+ class BeansDataset(torch.utils.data.Dataset):
24
+ def __init__(self,split = "train",transform = None):
25
+ self.ds = dataset[split]
26
+ self.transform = transform
27
+ def __len__(self):
28
+ return len(self.ds)
29
+ def __getitem__(self, idx):
30
+ img = self.ds['image'][idx]
31
+ label = self.ds['labels'][idx]
32
+ img = transform_train(img)
33
+ return img, label
34
+
35
+ train_loader = DataLoader(BeansDataset('train',transform_train),batch_size = 16,shuffle= True)
36
+ test_loader = DataLoader(BeansDataset('test',transform_train),batch_size = 16,shuffle= True)
37
+ print("Data Ready")
38
+
39
+ class CropNet(nn.Module):
40
+ def __init__(self):
41
+ super().__init__()
42
+ self.conv1 = nn.Conv2d(3, 32, 3, padding=1)
43
+ self.bn1 = nn.BatchNorm2d(32)
44
+ self.pool = nn.MaxPool2d(2)
45
+ self.conv2 = nn.Conv2d(32, 64, 3, padding=1)
46
+ self.conv3 = nn.Conv2d(64, 128, 3, padding=1)
47
+ self.bn2 = nn.BatchNorm2d(64)
48
+ self.bn3 = nn.BatchNorm2d(128)
49
+ self.fc = nn.Linear(128 * 28 * 28, len(classes))
50
+
51
+
52
+ def forward(self, x): # x=[16,3,224,224]
53
+ x = self.pool(F.relu(self.bn1(self.conv1(x))))
54
+ x = self.pool(F.relu(self.bn2(self.conv2(x))))
55
+ x = self.pool(F.relu(self.bn3(self.conv3(x))))
56
+ x = x.view(x.size(0), -1)
57
+ return self.fc(x)
58
+
59
+
60
+ model = CropNet()
61
+ print(model)
62
+
63
+
64
+
65
+ device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') # Auto GPU!
66
+ print(f"πŸš€ Using: {device}")
67
+ if device.type == 'cuda':
68
+ print(f" GPU: {torch.cuda.get_device_name(0)}")
69
+ print(f" Memory: {torch.cuda.get_device_properties(0).total_memory/1e9:.1f}GB")
70
+
71
+ model.to(device)
72
+ dummy = torch.randn(1,3,224,224).to(device)
73
+ print("Forward OK:", model(dummy).shape) # torch.Size([1, 3])
74
+ print(f"Classes: {len(classes)}, Model on {device}")
75
+ optimizer = optim.Adam(model.parameters(), lr=0.0003, weight_decay=1e-4)
76
+ criterion = nn.CrossEntropyLoss()
77
+
78
+ imgs, labs = next(iter(train_loader))
79
+ imgs = imgs.to(device)
80
+ labs = labs.to(device)
81
+ print(f"Batch OK: {imgs.shape}")
82
+ out = model(imgs)
83
+ print(f"Sample logit: {out[0].argmax().item()}")
84
+
85
+ for epoch in range(10):
86
+ model.train()
87
+ if epoch == 0:
88
+ print("Sample probs:", F.softmax(out[0], dim=0)) # Vary(not uniform)
89
+
90
+ tot_loss, tot_acc = 0, 0
91
+ for batch_idx, (imgs, labs) in enumerate(train_loader):
92
+ imgs, labs = imgs.to(device), labs.to(device)
93
+ optimizer.zero_grad() # Reset grads
94
+ out = model(imgs) # Forward pass
95
+ loss = criterion(out, labs)
96
+ loss.backward()
97
+ optimizer.step()
98
+ tot_loss += loss.item()
99
+ tot_acc += (out.argmax(1)==labs).float().mean()
100
+ print(f"Ep {epoch+1}: Loss {tot_loss/len(train_loader):.3f} Acc {tot_acc/len(train_loader):.2%}")
101
+ # ===== TEST ACC =====
102
+ model.eval()
103
+ test_correct = 0
104
+ total_test = 0
105
+ with torch.no_grad():
106
+ for imgs, labs in test_loader:
107
+ imgs = imgs.to(device)
108
+ labs = labs.to(device)
109
+ out = model(imgs)
110
+ pred = out.argmax(dim=1) # Highest logit class
111
+ test_correct += (pred == labs).sum().item()
112
+ total_test += labs.size(0)
113
+
114
+ print(f"πŸŽ‰ Test Acc: {test_correct/total_test*100:.1f}%")
115
+ torch.save(model.state_dict(), 'crop_model.pth')
116
+
117
+ remedies = {0:"Healthy!",1:"Leaf Spot: Fungicide",2:"Blight: Copper spray",3:"Rust: Neem oil"}
118
+
119
+ def diagnose(img):
120
+ model.eval()
121
+ img_t = transform_test(img).unsqueeze(0).to(device)
122
+ with torch.no_grad():
123
+ probs = model(img_t)[0]
124
+ pred = probs.argmax().item()
125
+ conf = probs[pred].item()
126
+ sev = conf * 100
127
+ return f"{classes[pred]}\nConf: {conf:.1%} Sev: {sev:.0f}%\n{remedies[pred]}"
128
+
129
+
130
+ torch.save(model, 'crop_doctor_full.pth') # Full model
131
+
132
+ app = gr.Interface(fn=diagnose, inputs=gr.Image(type='pil'), outputs='text', title="Crop Disease Detector")
133
+ app.launch(server_port=7860)
134
+
135
+
requirements.txt ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ torch
2
+ torchvision
3
+ datasets
4
+ gradio
5
+ pillow