Commit ·
d541e6d
0
Parent(s):
Revert last upload
Browse filesThis view is limited to 50 files because it contains too many changes. See raw diff
- .gitattributes +61 -0
- Ground Truth/mustard_train.csv +0 -0
- Ground Truth/okra_train.csv +0 -0
- Ground Truth/radish_train.csv +0 -0
- Ground Truth/wheat_train.csv +0 -0
- README.md +195 -0
- Test_data/For_age_prediction/Ground truth/mustard_test.csv +0 -0
- Test_data/For_age_prediction/Ground truth/okra_test.csv +0 -0
- Test_data/For_age_prediction/Ground truth/radish_test.csv +0 -0
- Test_data/For_age_prediction/Ground truth/wheat_test.csv +0 -0
- Test_data/For_age_prediction/Ground truth/wheat_test1.csv +0 -0
- Test_data/For_age_prediction/acm_test_py.py +336 -0
- Test_data/For_age_prediction/augmented_leaf_data (1).csv +0 -0
- Test_data/For_age_prediction/mustard/p4/d1/L1/mustard_p4_d1_L1_0.png +3 -0
- Test_data/For_age_prediction/mustard/p4/d1/L1/mustard_p4_d1_L1_105.png +3 -0
- Test_data/For_age_prediction/mustard/p4/d1/L1/mustard_p4_d1_L1_120.png +3 -0
- Test_data/For_age_prediction/mustard/p4/d1/L1/mustard_p4_d1_L1_135.png +3 -0
- Test_data/For_age_prediction/mustard/p4/d1/L1/mustard_p4_d1_L1_15.png +3 -0
- Test_data/For_age_prediction/mustard/p4/d1/L1/mustard_p4_d1_L1_150.png +3 -0
- Test_data/For_age_prediction/mustard/p4/d1/L1/mustard_p4_d1_L1_165.png +3 -0
- Test_data/For_age_prediction/mustard/p4/d1/L1/mustard_p4_d1_L1_180.png +3 -0
- Test_data/For_age_prediction/mustard/p4/d1/L1/mustard_p4_d1_L1_195.png +3 -0
- Test_data/For_age_prediction/mustard/p4/d1/L1/mustard_p4_d1_L1_210.png +3 -0
- Test_data/For_age_prediction/mustard/p4/d1/L1/mustard_p4_d1_L1_225.png +3 -0
- Test_data/For_age_prediction/mustard/p4/d1/L1/mustard_p4_d1_L1_240.png +3 -0
- Test_data/For_age_prediction/mustard/p4/d1/L1/mustard_p4_d1_L1_255.png +3 -0
- Test_data/For_age_prediction/mustard/p4/d1/L1/mustard_p4_d1_L1_270.png +3 -0
- Test_data/For_age_prediction/mustard/p4/d1/L1/mustard_p4_d1_L1_285.png +3 -0
- Test_data/For_age_prediction/mustard/p4/d1/L1/mustard_p4_d1_L1_30.png +3 -0
- Test_data/For_age_prediction/mustard/p4/d1/L1/mustard_p4_d1_L1_300.png +3 -0
- Test_data/For_age_prediction/mustard/p4/d1/L1/mustard_p4_d1_L1_315.png +3 -0
- Test_data/For_age_prediction/mustard/p4/d1/L1/mustard_p4_d1_L1_330.png +3 -0
- Test_data/For_age_prediction/mustard/p4/d1/L1/mustard_p4_d1_L1_345.png +3 -0
- Test_data/For_age_prediction/mustard/p4/d1/L1/mustard_p4_d1_L1_45.png +3 -0
- Test_data/For_age_prediction/mustard/p4/d1/L1/mustard_p4_d1_L1_60.png +3 -0
- Test_data/For_age_prediction/mustard/p4/d1/L1/mustard_p4_d1_L1_75.png +3 -0
- Test_data/For_age_prediction/mustard/p4/d1/L1/mustard_p4_d1_L1_90.png +3 -0
- Test_data/For_age_prediction/mustard/p4/d1/L2/mustard_p4_d1_L2_0.png +3 -0
- Test_data/For_age_prediction/mustard/p4/d1/L2/mustard_p4_d1_L2_105.png +3 -0
- Test_data/For_age_prediction/mustard/p4/d1/L2/mustard_p4_d1_L2_120.png +3 -0
- Test_data/For_age_prediction/mustard/p4/d1/L2/mustard_p4_d1_L2_135.png +3 -0
- Test_data/For_age_prediction/mustard/p4/d1/L2/mustard_p4_d1_L2_15.png +3 -0
- Test_data/For_age_prediction/mustard/p4/d1/L2/mustard_p4_d1_L2_150.png +3 -0
- Test_data/For_age_prediction/mustard/p4/d1/L2/mustard_p4_d1_L2_165.png +3 -0
- Test_data/For_age_prediction/mustard/p4/d1/L2/mustard_p4_d1_L2_180.png +3 -0
- Test_data/For_age_prediction/mustard/p4/d1/L2/mustard_p4_d1_L2_195.png +3 -0
- Test_data/For_age_prediction/mustard/p4/d1/L2/mustard_p4_d1_L2_210.png +3 -0
- Test_data/For_age_prediction/mustard/p4/d1/L2/mustard_p4_d1_L2_225.png +3 -0
- Test_data/For_age_prediction/mustard/p4/d1/L2/mustard_p4_d1_L2_240.png +3 -0
- Test_data/For_age_prediction/mustard/p4/d1/L2/mustard_p4_d1_L2_255.png +3 -0
.gitattributes
ADDED
|
@@ -0,0 +1,61 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
*.7z filter=lfs diff=lfs merge=lfs -text
|
| 2 |
+
*.arrow filter=lfs diff=lfs merge=lfs -text
|
| 3 |
+
*.avro filter=lfs diff=lfs merge=lfs -text
|
| 4 |
+
*.bin filter=lfs diff=lfs merge=lfs -text
|
| 5 |
+
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
| 6 |
+
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
| 7 |
+
*.ftz filter=lfs diff=lfs merge=lfs -text
|
| 8 |
+
*.gz filter=lfs diff=lfs merge=lfs -text
|
| 9 |
+
*.h5 filter=lfs diff=lfs merge=lfs -text
|
| 10 |
+
*.joblib filter=lfs diff=lfs merge=lfs -text
|
| 11 |
+
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
| 12 |
+
*.lz4 filter=lfs diff=lfs merge=lfs -text
|
| 13 |
+
*.mds filter=lfs diff=lfs merge=lfs -text
|
| 14 |
+
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
| 15 |
+
*.model filter=lfs diff=lfs merge=lfs -text
|
| 16 |
+
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
| 17 |
+
*.npy filter=lfs diff=lfs merge=lfs -text
|
| 18 |
+
*.npz filter=lfs diff=lfs merge=lfs -text
|
| 19 |
+
*.onnx filter=lfs diff=lfs merge=lfs -text
|
| 20 |
+
*.ot filter=lfs diff=lfs merge=lfs -text
|
| 21 |
+
*.parquet filter=lfs diff=lfs merge=lfs -text
|
| 22 |
+
*.pb filter=lfs diff=lfs merge=lfs -text
|
| 23 |
+
*.pickle filter=lfs diff=lfs merge=lfs -text
|
| 24 |
+
*.pkl filter=lfs diff=lfs merge=lfs -text
|
| 25 |
+
*.pt filter=lfs diff=lfs merge=lfs -text
|
| 26 |
+
*.pth filter=lfs diff=lfs merge=lfs -text
|
| 27 |
+
*.rar filter=lfs diff=lfs merge=lfs -text
|
| 28 |
+
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
| 29 |
+
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
| 30 |
+
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
| 31 |
+
*.tar filter=lfs diff=lfs merge=lfs -text
|
| 32 |
+
*.tflite filter=lfs diff=lfs merge=lfs -text
|
| 33 |
+
*.tgz filter=lfs diff=lfs merge=lfs -text
|
| 34 |
+
*.wasm filter=lfs diff=lfs merge=lfs -text
|
| 35 |
+
*.xz filter=lfs diff=lfs merge=lfs -text
|
| 36 |
+
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 37 |
+
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 38 |
+
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
| 39 |
+
# Audio files - uncompressed
|
| 40 |
+
*.pcm filter=lfs diff=lfs merge=lfs -text
|
| 41 |
+
*.sam filter=lfs diff=lfs merge=lfs -text
|
| 42 |
+
*.raw filter=lfs diff=lfs merge=lfs -text
|
| 43 |
+
# Audio files - compressed
|
| 44 |
+
*.aac filter=lfs diff=lfs merge=lfs -text
|
| 45 |
+
*.flac filter=lfs diff=lfs merge=lfs -text
|
| 46 |
+
*.mp3 filter=lfs diff=lfs merge=lfs -text
|
| 47 |
+
*.ogg filter=lfs diff=lfs merge=lfs -text
|
| 48 |
+
*.wav filter=lfs diff=lfs merge=lfs -text
|
| 49 |
+
# Image files - uncompressed
|
| 50 |
+
*.bmp filter=lfs diff=lfs merge=lfs -text
|
| 51 |
+
*.gif filter=lfs diff=lfs merge=lfs -text
|
| 52 |
+
*.png filter=lfs diff=lfs merge=lfs -text
|
| 53 |
+
*.tiff filter=lfs diff=lfs merge=lfs -text
|
| 54 |
+
# Image files - compressed
|
| 55 |
+
*.jpg filter=lfs diff=lfs merge=lfs -text
|
| 56 |
+
*.jpeg filter=lfs diff=lfs merge=lfs -text
|
| 57 |
+
*.webp filter=lfs diff=lfs merge=lfs -text
|
| 58 |
+
# Video files - compressed
|
| 59 |
+
*.mp4 filter=lfs diff=lfs merge=lfs -text
|
| 60 |
+
*.webm filter=lfs diff=lfs merge=lfs -text
|
| 61 |
+
Test_data/For_leaf_counting/okra/p1/d22/L2/PXL_20230808_110153626_6.dng filter=lfs diff=lfs merge=lfs -text
|
Ground Truth/mustard_train.csv
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
Ground Truth/okra_train.csv
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
Ground Truth/radish_train.csv
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
Ground Truth/wheat_train.csv
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
README.md
ADDED
|
@@ -0,0 +1,195 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
---
|
| 2 |
+
license: cc-by-4.0
|
| 3 |
+
task_categories:
|
| 4 |
+
- image-classification
|
| 5 |
+
- text-to-image
|
| 6 |
+
- image-to-text
|
| 7 |
+
language:
|
| 8 |
+
- en
|
| 9 |
+
tags:
|
| 10 |
+
- plant-growth
|
| 11 |
+
- phenotyping
|
| 12 |
+
- agriculture
|
| 13 |
+
- multiview
|
| 14 |
+
- time-series
|
| 15 |
+
- leaf-counting
|
| 16 |
+
- age-estimation
|
| 17 |
+
- precision-agriculture
|
| 18 |
+
pretty_name: GroMo 25 — Multiview Plant Growth Dataset
|
| 19 |
+
size_categories:
|
| 20 |
+
- 10K<n<100K
|
| 21 |
+
---
|
| 22 |
+
|
| 23 |
+
# GroMo25: Multiview Time-Series Plant Image Dataset for Age Estimation and Leaf Counting
|
| 24 |
+
|
| 25 |
+
## Dataset Summary
|
| 26 |
+
|
| 27 |
+
**GroMo25** is a multiview, time-series plant image dataset designed for plant age estimation (in days) and leaf counting tasks in precision agriculture. It contains high-quality images of four crop species — **Wheat, Okra, Radish, and Mustard** — captured over multiple days under controlled conditions. Each plant is photographed from 24 angles across 5 vertical levels per day, providing rich spatial and temporal information for plant growth modeling.
|
| 28 |
+
|
| 29 |
+
This dataset is intended for researchers and practitioners in **academic research, plant phenotyping, and agriculture**, and serves as the official dataset for the **GroMo Challenge**.
|
| 30 |
+
|
| 31 |
+
> **Paper:** [GroMo Challenge: Multiview time-series plant image dataset for age estimation and leaf counting in precision agriculture](https://dl.acm.org/doi/abs/10.1145/3746027.3762097)
|
| 32 |
+
|
| 33 |
+
---
|
| 34 |
+
|
| 35 |
+
## Dataset Structure
|
| 36 |
+
|
| 37 |
+
### Crops Included
|
| 38 |
+
|
| 39 |
+
| Crop | Description |
|
| 40 |
+
|---------|-----------------------------------------|
|
| 41 |
+
| Wheat | Cereal crop, multiple growth stages |
|
| 42 |
+
| Okra | Vegetable crop, leaf-heavy growth |
|
| 43 |
+
| Radish | Root vegetable, rapid growth cycle |
|
| 44 |
+
| Mustard | Oilseed crop, distinct leaf pattern |
|
| 45 |
+
|
| 46 |
+
### Image Capture Setup
|
| 47 |
+
|
| 48 |
+
All images were captured in a **controlled indoor environment**:
|
| 49 |
+
- Each plant is grown in a **pot** placed on a **rotating disk**
|
| 50 |
+
- The background is covered with a **white cloth** to ensure clean segmentation and consistent background
|
| 51 |
+
- Images are taken at **24 angles** per level (0° to 345°, 15° increment between consecutive angles)
|
| 52 |
+
- Each plant is photographed across **5 vertical levels** (L1 to L5) per day
|
| 53 |
+
|
| 54 |
+
### Images Per Day (Per Plant)
|
| 55 |
+
|
| 56 |
+
```
|
| 57 |
+
5 levels × 24 angles = 120 images per plant per day
|
| 58 |
+
```
|
| 59 |
+
|
| 60 |
+
### Directory Structure
|
| 61 |
+
|
| 62 |
+
```
|
| 63 |
+
dataset/
|
| 64 |
+
├── train/
|
| 65 |
+
│ ├── p1/
|
| 66 |
+
│ │ ├── d1/
|
| 67 |
+
│ │ │ ├── L1/
|
| 68 |
+
│ │ │ │ ├── radish_p1_d1_L1_0.png
|
| 69 |
+
│ │ │ │ ├── radish_p1_d1_L1_15.png
|
| 70 |
+
│ │ │ │ ├── ...
|
| 71 |
+
│ │ │ │ └── radish_p1_d1_L1_345.png
|
| 72 |
+
│ │ │ ├── L2/
|
| 73 |
+
│ │ │ ├── L3/
|
| 74 |
+
│ │ │ ├── L4/
|
| 75 |
+
│ │ │ └── L5/
|
| 76 |
+
│ │ ├── d2/
|
| 77 |
+
│ │ └── ...
|
| 78 |
+
│ ├── p2/
|
| 79 |
+
│ ├── p3/
|
| 80 |
+
│ └── p4/
|
| 81 |
+
└── test/
|
| 82 |
+
└── (same structure as train)
|
| 83 |
+
```
|
| 84 |
+
|
| 85 |
+
### File Naming Convention
|
| 86 |
+
|
| 87 |
+
Each image follows the format:
|
| 88 |
+
|
| 89 |
+
```
|
| 90 |
+
{crop}_p{X}_d{Y}_L{Z}_{A}.png
|
| 91 |
+
```
|
| 92 |
+
|
| 93 |
+
| Field | Description |
|
| 94 |
+
|-------|-------------|
|
| 95 |
+
| `crop` | Crop name (e.g., `radish`, `wheat`) |
|
| 96 |
+
| `X` | Plant ID (e.g., `p1`, `p2`) |
|
| 97 |
+
| `Y` | Day number (e.g., `d1`, `d2`) |
|
| 98 |
+
| `Z` | Level (L1 to L5) |
|
| 99 |
+
| `A` | Angle in degrees (0 to 345, step 15) |
|
| 100 |
+
|
| 101 |
+
**Example:** `radish_p1_d3_L2_90.png` → Radish, Plant 1, Day 3, Level 2, angle 90°
|
| 102 |
+
|
| 103 |
+
---
|
| 104 |
+
|
| 105 |
+
## Ground Truth / Annotations
|
| 106 |
+
|
| 107 |
+
Each crop has a corresponding `{crop}_train.csv` file containing per-image labels.
|
| 108 |
+
|
| 109 |
+
### Format
|
| 110 |
+
|
| 111 |
+
| Column | Description |
|
| 112 |
+
|--------|-------------|
|
| 113 |
+
| `filename` | Relative path to the image |
|
| 114 |
+
| `leaf_count` | Number of leaves on the plant |
|
| 115 |
+
| `Age` | Age of the plant in days |
|
| 116 |
+
|
| 117 |
+
### Example
|
| 118 |
+
```csv
|
| 119 |
+
filename,leaf_count,Age
|
| 120 |
+
mustard/p1/d1/L1/mustard_p1_d1_L1_0.png,2,1
|
| 121 |
+
```
|
| 122 |
+
|
| 123 |
+
A separate CSV is provided for each crop covering all plants, days, levels, and angles.
|
| 124 |
+
|
| 125 |
+
---
|
| 126 |
+
|
| 127 |
+
## Tasks
|
| 128 |
+
|
| 129 |
+
This dataset supports the following tasks:
|
| 130 |
+
|
| 131 |
+
- **Plant Age Estimation** — Predict the number of days since planting (regression)
|
| 132 |
+
- **Leaf Counting** — Predict the number of leaves visible on the plant (regression)
|
| 133 |
+
|
| 134 |
+
### Evaluation Metrics
|
| 135 |
+
|
| 136 |
+
| Metric | Description |
|
| 137 |
+
|--------|-------------|
|
| 138 |
+
| RMSE | Root Mean Squared Error |
|
| 139 |
+
| MAE | Mean Absolute Error |
|
| 140 |
+
|
| 141 |
+
---
|
| 142 |
+
|
| 143 |
+
## Citation
|
| 144 |
+
|
| 145 |
+
If you use this dataset in your research, please cite:
|
| 146 |
+
|
| 147 |
+
```bibtex
|
| 148 |
+
@inproceedings{10.1145/3746027.3762097,
|
| 149 |
+
author = {Bansal, Shreya and Bhatt, Ruchi and Chander, Amanpreet and Kaur, Rupinder and Singh, Malya and Kankanhalli, Mohan and El Saddik, Abdulmotaleb and Saini, Mukesh},
|
| 150 |
+
title = {GroMo25: ACM Multimedia 2025 Grand Challenge for Plant Growth Modeling with Multiview Images},
|
| 151 |
+
year = {2025},
|
| 152 |
+
isbn = {9798400720352},
|
| 153 |
+
publisher = {Association for Computing Machinery},
|
| 154 |
+
address = {New York, NY, USA},
|
| 155 |
+
url = {https://doi.org/10.1145/3746027.3762097},
|
| 156 |
+
doi = {10.1145/3746027.3762097},
|
| 157 |
+
abstract = {Understanding plant growth dynamics is a critical component of modern agricultural research, with applications in yield prediction, phenotyping, and sustainable crop management. Despite recent advances in computer vision and deep learning, progress in plant growth modeling has been constrained by the lack of publicly available, high-resolution, multiview, and temporally rich datasets. To address this gap, we introduce Growth Modelling GroMo25, the first international challenge on plant growth modeling using multiview imagery. In this challenge, we propose a dataset that comprises high-resolution images of four crops: wheat, mustard, radish, and okra, captured at consistent time intervals from multiple camera viewpoints under controlled environmental conditions. The challenge focuses on two key tasks: (1) plant age prediction and (2) leaf count estimation, both requiring models to use spatial and temporal plant features. GroMo25 attracted participation from multiple teams worldwide, encouraging benchmarking and innovation in vision-based plant phenotyping. The GitHub repository is publicly available at https://github.com/mriglab/GroMo-Plant-Growth-Modeling-with-Multiview-Images.},
|
| 158 |
+
booktitle = {Proceedings of the 33rd ACM International Conference on Multimedia},
|
| 159 |
+
pages = {14204–14209},
|
| 160 |
+
numpages = {6},
|
| 161 |
+
keywords = {growth age prediction, leaf count estimation, multiview},
|
| 162 |
+
location = {Dublin, Ireland},
|
| 163 |
+
series = {MM '25}
|
| 164 |
+
}
|
| 165 |
+
```
|
| 166 |
+
|
| 167 |
+
---
|
| 168 |
+
|
| 169 |
+
## Authors & Affiliations
|
| 170 |
+
|
| 171 |
+
| Name | Institution |
|
| 172 |
+
|------|------------|
|
| 173 |
+
| Shreya Bansal | IIT Ropar |
|
| 174 |
+
| Ruchi Bhatt | IIT Ropar |
|
| 175 |
+
| Amanpreet Chander | IIT Ropar |
|
| 176 |
+
| Rupinder Kaur | IIT Ropar |
|
| 177 |
+
| Malya Singh | IIT Ropar |
|
| 178 |
+
| Dr. Mohan Kankanhalli | National University of Singapore |
|
| 179 |
+
| Abdulmotaleb El Saddik | University of Ottawa |
|
| 180 |
+
| Mukesh Kumar Saini | IIT Ropar |
|
| 181 |
+
|
| 182 |
+
For queries, contact: **mrig@iitrpr.ac.in**
|
| 183 |
+
|
| 184 |
+
---
|
| 185 |
+
|
| 186 |
+
## License
|
| 187 |
+
|
| 188 |
+
This dataset is released under the [Creative Commons Attribution 4.0 International (CC BY 4.0)](https://creativecommons.org/licenses/by/4.0/) license.
|
| 189 |
+
|
| 190 |
+
You are free to use, share, and adapt this dataset for any purpose, including commercial use, as long as appropriate credit is given to the authors.
|
| 191 |
+
|
| 192 |
+
---
|
| 193 |
+
|
| 194 |
+
## Note
|
| 195 |
+
The dataset is part of a challenge organised by [ANNAM.AI](http://data.annam.ai/gromo25/)
|
Test_data/For_age_prediction/Ground truth/mustard_test.csv
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
Test_data/For_age_prediction/Ground truth/okra_test.csv
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
Test_data/For_age_prediction/Ground truth/radish_test.csv
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
Test_data/For_age_prediction/Ground truth/wheat_test.csv
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
Test_data/For_age_prediction/Ground truth/wheat_test1.csv
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
Test_data/For_age_prediction/acm_test_py.py
ADDED
|
@@ -0,0 +1,336 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# -*- coding: utf-8 -*-
|
| 2 |
+
"""acm_test.py.ipynb
|
| 3 |
+
|
| 4 |
+
Automatically generated by Colab.
|
| 5 |
+
|
| 6 |
+
Original file is located at
|
| 7 |
+
https://colab.research.google.com/drive/1iMB_SLHX4vbCzsA_7Q8MJe-SSKX4Cphr
|
| 8 |
+
"""
|
| 9 |
+
|
| 10 |
+
from google.colab import drive
|
| 11 |
+
drive.mount('/content/drive')
|
| 12 |
+
|
| 13 |
+
import torch
|
| 14 |
+
import torch.nn as nn
|
| 15 |
+
import torch.optim as optim
|
| 16 |
+
from torch.utils.data import Dataset, DataLoader, random_split
|
| 17 |
+
from torchvision import transforms
|
| 18 |
+
import os
|
| 19 |
+
from PIL import Image
|
| 20 |
+
import random
|
| 21 |
+
import numpy as np
|
| 22 |
+
from tqdm import tqdm
|
| 23 |
+
import matplotlib.pyplot as plt
|
| 24 |
+
import pandas as pd
|
| 25 |
+
from sklearn.metrics import r2_score, mean_absolute_error
|
| 26 |
+
|
| 27 |
+
# Define the device (GPU or CPU)
|
| 28 |
+
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
| 29 |
+
print(device)
|
| 30 |
+
|
| 31 |
+
################################# Input here ###################################
|
| 32 |
+
root_path='/content/drive/MyDrive/ACM grand challenge/Test_data/For_age_prediction/'
|
| 33 |
+
csv_file='/content/okra_test.csv'
|
| 34 |
+
crop='okra'
|
| 35 |
+
n_images=4
|
| 36 |
+
plant_input=2
|
| 37 |
+
days_input=86
|
| 38 |
+
batch_size = 8
|
| 39 |
+
seed=42
|
| 40 |
+
height, width = 224, 224
|
| 41 |
+
# Transformations for resizing and converting to tensor
|
| 42 |
+
transform = transforms.Compose([transforms.Resize((224, 224)), transforms.ToTensor()])
|
| 43 |
+
##############################################################################
|
| 44 |
+
|
| 45 |
+
class CropDataset(Dataset):
|
| 46 |
+
def __init__(self, root_dir, csv_file, images_per_level, crop, plants, days,
|
| 47 |
+
levels=['L1', 'L2', 'L3', 'L4', 'L5'], transform=None):
|
| 48 |
+
"""
|
| 49 |
+
Args:
|
| 50 |
+
root_dir (str): Directory with all the images.
|
| 51 |
+
csv_file (str): Path to the CSV file containing ground truth (filename, leaf_count, age).
|
| 52 |
+
images_per_level (int): Number of images to select per level (should be factors of 24).
|
| 53 |
+
crop (str): Crop type (e.g., "radish").
|
| 54 |
+
plants (int): Number of plants (e.g., 4).
|
| 55 |
+
days (int): Number of days (e.g., 59).
|
| 56 |
+
levels (list): List of levels (e.g., ['L1', 'L2', 'L3', 'L4', 'L5']).
|
| 57 |
+
transform (callable, optional): Transform to be applied on a sample.
|
| 58 |
+
"""
|
| 59 |
+
self.root_dir = root_dir
|
| 60 |
+
self.csv_file = csv_file
|
| 61 |
+
self.images_per_level = images_per_level
|
| 62 |
+
self.crop = crop
|
| 63 |
+
self.plants_num = plants
|
| 64 |
+
self.max_days = days
|
| 65 |
+
self.levels = levels
|
| 66 |
+
self.transform = transform
|
| 67 |
+
self.image_data = self._load_metadata()
|
| 68 |
+
self.image_paths = self._load_image_paths()
|
| 69 |
+
|
| 70 |
+
def _load_metadata(self):
|
| 71 |
+
"""Load CSV file into a pandas DataFrame and map filenames to leaf counts and ages."""
|
| 72 |
+
df = pd.read_csv(self.csv_file)
|
| 73 |
+
df["filename"] = df["filename"].astype(str) # Ensure filenames are strings
|
| 74 |
+
return df.set_index("filename") # Use filename as the index for quick lookup
|
| 75 |
+
|
| 76 |
+
def _select_angles(self):
|
| 77 |
+
"""
|
| 78 |
+
Select angles dynamically for a given level.
|
| 79 |
+
"""
|
| 80 |
+
images_needed = self.images_per_level
|
| 81 |
+
selected_angles = [i for i in range(0, 360, int(360 / images_needed))]
|
| 82 |
+
|
| 83 |
+
initial_angles = [i for i in range(15, selected_angles[1], 15)]
|
| 84 |
+
multiple_selections = [selected_angles]
|
| 85 |
+
|
| 86 |
+
for initial_angle in initial_angles:
|
| 87 |
+
selection = [initial_angle]
|
| 88 |
+
while len(selection) < images_needed:
|
| 89 |
+
next_angle = (selection[-1] + int(360 / images_needed)) % 360
|
| 90 |
+
if next_angle not in selection:
|
| 91 |
+
selection.append(next_angle)
|
| 92 |
+
multiple_selections.append(selection)
|
| 93 |
+
print(multiple_selections)
|
| 94 |
+
return multiple_selections
|
| 95 |
+
|
| 96 |
+
def _load_image_paths(self):
|
| 97 |
+
"""
|
| 98 |
+
Load image paths for all levels and plants based on the selection of angles.
|
| 99 |
+
"""
|
| 100 |
+
image_paths = []
|
| 101 |
+
multiple_selections = self._select_angles()
|
| 102 |
+
|
| 103 |
+
for plant in range(1, self.plants_num + 1):
|
| 104 |
+
plant_path = os.path.join(self.root_dir, crop, f"p{plant}")
|
| 105 |
+
if not os.path.isdir(plant_path):
|
| 106 |
+
print(f"Plant directory not found: {plant_path}")
|
| 107 |
+
continue
|
| 108 |
+
for day in range(1, self.max_days + 1):
|
| 109 |
+
day_path = os.path.join(self.root_dir, crop, f"p{plant}", f"d{day}")
|
| 110 |
+
if not os.path.isdir(day_path):
|
| 111 |
+
continue
|
| 112 |
+
for selected_angles in multiple_selections:
|
| 113 |
+
for level in self.levels:
|
| 114 |
+
level_path = os.path.join(self.root_dir,self.crop, f"p{plant}", f"d{day}", level)
|
| 115 |
+
level_image_paths = [
|
| 116 |
+
os.path.join(level_path, f"{self.crop}_p{plant}_d{day}_{level}_{angle}.png")
|
| 117 |
+
for angle in selected_angles
|
| 118 |
+
]
|
| 119 |
+
filename = os.path.join(self.crop,f"p{plant}", f"d{day}", level,f"{self.crop}_p{plant}_d{day}_{level}_{selected_angles[0]}.png")
|
| 120 |
+
print(filename)
|
| 121 |
+
leaf_count = self.image_data.loc[filename, "leaf_count"]
|
| 122 |
+
# print(level_image_paths)
|
| 123 |
+
image_paths.append((level_image_paths, leaf_count,day)) # Append day number along with image paths
|
| 124 |
+
|
| 125 |
+
print(f"Total samples loaded: {len(image_paths)}")
|
| 126 |
+
# print(f"individual sample size: {len(image_paths[0][0])}")
|
| 127 |
+
return image_paths
|
| 128 |
+
|
| 129 |
+
|
| 130 |
+
def __len__(self):
|
| 131 |
+
return len(self.image_paths)
|
| 132 |
+
|
| 133 |
+
|
| 134 |
+
def __getitem__(self, idx):
|
| 135 |
+
"""
|
| 136 |
+
Get a batch of images from the dataset corresponding to the angles selected.
|
| 137 |
+
"""
|
| 138 |
+
images = []
|
| 139 |
+
leaf_count = self.image_paths[idx][1]
|
| 140 |
+
age = self.image_paths[idx][2]
|
| 141 |
+
# print(leaf_count,age)
|
| 142 |
+
all_images= self.image_paths[idx][0]
|
| 143 |
+
# print("length of all images:", len(all_images))
|
| 144 |
+
for img_path in all_images: # Get the image paths for this sample
|
| 145 |
+
if os.path.isfile(img_path):
|
| 146 |
+
level_image = Image.open(img_path)
|
| 147 |
+
if self.transform:
|
| 148 |
+
level_image = self.transform(level_image)
|
| 149 |
+
images.append(level_image)
|
| 150 |
+
else:
|
| 151 |
+
print(f"Path is not a valid file: {img_path}")
|
| 152 |
+
|
| 153 |
+
images = torch.cat(images, dim=0)
|
| 154 |
+
|
| 155 |
+
return images, torch.tensor(leaf_count, dtype=torch.float32), torch.tensor(age, dtype=torch.float32) # Return both images and the corresponding day as ground truth
|
| 156 |
+
|
| 157 |
+
test_dataset = CropDataset(root_dir=root_path,
|
| 158 |
+
csv_file=csv_file,
|
| 159 |
+
images_per_level=n_images,
|
| 160 |
+
crop=crop,
|
| 161 |
+
plants=plant_input,
|
| 162 |
+
days=days_input,
|
| 163 |
+
transform=transform)
|
| 164 |
+
|
| 165 |
+
num_images = n_images # Number of images (24 RGB images)
|
| 166 |
+
input_channels = num_images*3 # 24 RGB images (3 channels each)
|
| 167 |
+
patch_size = 16 # Size of each patch
|
| 168 |
+
num_patches = (height // patch_size) * (width // patch_size) # Number of patches (14 * 14 for 224x224 images)
|
| 169 |
+
projection_dim = 256 # Embedding dimension for each patch
|
| 170 |
+
num_heads = 8 # Number of attention heads
|
| 171 |
+
num_layers = 6 # Number of transformer layers
|
| 172 |
+
mlp_dim = 512 # Dimension of the MLP head
|
| 173 |
+
num_classes = 1 # Number of output classes (for day or leaf count prediction)
|
| 174 |
+
dropout_rate = 0.1 # Dropout rate
|
| 175 |
+
|
| 176 |
+
|
| 177 |
+
|
| 178 |
+
|
| 179 |
+
# DataLoader for training and validation sets
|
| 180 |
+
test_loader = DataLoader(test_dataset, batch_size=batch_size, shuffle=True)
|
| 181 |
+
|
| 182 |
+
class VisionTransformer(nn.Module):
|
| 183 |
+
def __init__(self, input_channels, patch_size, num_patches, projection_dim, num_heads, num_layers, mlp_dim, num_images, dropout_rate=0.1):
|
| 184 |
+
super(VisionTransformer, self).__init__()
|
| 185 |
+
|
| 186 |
+
self.num_images = num_images # Total number of images (24 images in your case)
|
| 187 |
+
self.patch_size = patch_size
|
| 188 |
+
self.num_patches = num_patches
|
| 189 |
+
self.projection_dim = projection_dim
|
| 190 |
+
self.num_heads = num_heads
|
| 191 |
+
self.num_layers = num_layers
|
| 192 |
+
self.mlp_dim = mlp_dim
|
| 193 |
+
|
| 194 |
+
# Separate patch embedding layers for each image (RGB)
|
| 195 |
+
self.patch_embeds = nn.ModuleList([
|
| 196 |
+
nn.Conv2d(input_channels // num_images, projection_dim, kernel_size=patch_size, stride=patch_size)
|
| 197 |
+
for _ in range(num_images)
|
| 198 |
+
])
|
| 199 |
+
|
| 200 |
+
# Positional Encoding (Learnable)
|
| 201 |
+
self.positional_encoding = nn.Parameter(torch.randn(1, num_patches, projection_dim))
|
| 202 |
+
|
| 203 |
+
# Transformer Encoder Layers (modified to return attention weights)
|
| 204 |
+
self.attention_layers = nn.ModuleList([
|
| 205 |
+
nn.TransformerEncoderLayer(
|
| 206 |
+
d_model=projection_dim,
|
| 207 |
+
nhead=num_heads,
|
| 208 |
+
dim_feedforward=mlp_dim,
|
| 209 |
+
dropout=dropout_rate,
|
| 210 |
+
batch_first=True
|
| 211 |
+
)
|
| 212 |
+
for _ in range(num_layers)
|
| 213 |
+
])
|
| 214 |
+
|
| 215 |
+
# MLP Head for classification/regression
|
| 216 |
+
self.mlp_head = nn.Sequential(
|
| 217 |
+
nn.Linear(projection_dim * num_images, mlp_dim),
|
| 218 |
+
nn.ReLU(),
|
| 219 |
+
nn.Linear(mlp_dim, 1)
|
| 220 |
+
)
|
| 221 |
+
|
| 222 |
+
def forward(self, x):
|
| 223 |
+
batch_size = x.shape[0]
|
| 224 |
+
|
| 225 |
+
# Step 1: Patch Embedding (Separate for each image)
|
| 226 |
+
patch_embeddings = []
|
| 227 |
+
for i in range(self.num_images):
|
| 228 |
+
# Split the input channels into separate images (3 channels each for RGB)
|
| 229 |
+
img_x = x[:, i*3:(i+1)*3, :, :] # Shape: (batch_size, 3, height, width)
|
| 230 |
+
patch_embed = self.patch_embeds[i](img_x) # Apply separate embedding
|
| 231 |
+
patch_embed = patch_embed.flatten(2).transpose(1, 2) # Shape: (batch_size, num_patches, projection_dim)
|
| 232 |
+
patch_embeddings.append(patch_embed)
|
| 233 |
+
|
| 234 |
+
# Step 2: Add Positional Encoding
|
| 235 |
+
patch_embeddings = [pe + self.positional_encoding for pe in patch_embeddings]
|
| 236 |
+
|
| 237 |
+
# Step 3: Transformer Encoder Layers (Self-attention + Feed Forward)
|
| 238 |
+
attention_weights = [] # To store attention weights
|
| 239 |
+
for layer in self.attention_layers:
|
| 240 |
+
layer_attention_weights = [] # Store the attention weights for each layer
|
| 241 |
+
for i in range(self.num_images):
|
| 242 |
+
# Modified to return attention weights (self-attention)
|
| 243 |
+
attn_output, attn_weights = layer.self_attn(patch_embeddings[i], patch_embeddings[i], patch_embeddings[i])
|
| 244 |
+
patch_embeddings[i] = attn_output
|
| 245 |
+
layer_attention_weights.append(attn_weights)
|
| 246 |
+
attention_weights.append(layer_attention_weights)
|
| 247 |
+
|
| 248 |
+
# Step 4: Concatenate the projections from each image (Shape: (batch_size, num_patches, projection_dim * num_images))
|
| 249 |
+
x = torch.cat(patch_embeddings, dim=-1) # Concatenate across the last dimension (projection_dim)
|
| 250 |
+
|
| 251 |
+
# Step 5: Pooling (Take mean across all patches)
|
| 252 |
+
x = x.mean(dim=1) # Mean pooling over patches (Shape: (batch_size, projection_dim * num_images))
|
| 253 |
+
|
| 254 |
+
# Step 6: MLP Head for classification/regression
|
| 255 |
+
output = self.mlp_head(x)
|
| 256 |
+
|
| 257 |
+
return output, attention_weights # Return attention weights too
|
| 258 |
+
|
| 259 |
+
# Load the trained model
|
| 260 |
+
model_path = "/content/drive/MyDrive/ACM grand challenge/Crops data/For_age_prediction/results/okra_all files/okra_vit_age_prediction_10.pth" # Change this to your actual model path
|
| 261 |
+
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
| 262 |
+
|
| 263 |
+
# Assuming your model class is defined as MyModel
|
| 264 |
+
model = VisionTransformer(input_channels, patch_size, num_patches, projection_dim, num_heads, num_layers, mlp_dim, num_images, dropout_rate) # Replace with your actual model class
|
| 265 |
+
model.load_state_dict(torch.load(model_path, map_location=device))
|
| 266 |
+
model.to(device)
|
| 267 |
+
model.eval() # Set model to evaluation mode
|
| 268 |
+
|
| 269 |
+
# Initialize lists to store predictions and actual values
|
| 270 |
+
y_true = []
|
| 271 |
+
y_pred = []
|
| 272 |
+
|
| 273 |
+
# Run inference on the test set
|
| 274 |
+
with torch.no_grad():
|
| 275 |
+
for images, count, age in test_loader: # Assuming test_loader gives (images, labels)
|
| 276 |
+
images = images.to(device)
|
| 277 |
+
count = count.to(device)
|
| 278 |
+
age = age.to(device)
|
| 279 |
+
|
| 280 |
+
outputs, attention = model(images) # Ensure outputs are properly shaped
|
| 281 |
+
y_true.extend(age.cpu().numpy())
|
| 282 |
+
y_pred.extend(outputs.cpu().numpy())
|
| 283 |
+
|
| 284 |
+
# Convert to NumPy arrays
|
| 285 |
+
y_true = np.array(y_true)
|
| 286 |
+
y_pred = np.array(y_pred)
|
| 287 |
+
|
| 288 |
+
from sklearn.metrics import mean_squared_error, mean_absolute_error, r2_score
|
| 289 |
+
# Compute metrics
|
| 290 |
+
rmse = np.sqrt(mean_squared_error(y_true, y_pred))
|
| 291 |
+
mae = mean_absolute_error(y_true, y_pred)
|
| 292 |
+
r2 = r2_score(y_true, y_pred)
|
| 293 |
+
|
| 294 |
+
# Print results
|
| 295 |
+
print(f"RMSE: {rmse:.4f}")
|
| 296 |
+
print(f"MAE: {mae:.4f}")
|
| 297 |
+
print(f"R² Score: {r2:.4f}")
|
| 298 |
+
|
| 299 |
+
# Load the trained model
|
| 300 |
+
model_path = "/content/drive/MyDrive/ACM grand challenge/Crops data/For_age_prediction/results/okra_all files/okra_vit_leaf_count_8.pth" # Change this to your actual model path
|
| 301 |
+
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
| 302 |
+
|
| 303 |
+
# Assuming your model class is defined as MyModel
|
| 304 |
+
model = VisionTransformer(input_channels, patch_size, num_patches, projection_dim, num_heads, num_layers, mlp_dim, num_images, dropout_rate) # Replace with your actual model class
|
| 305 |
+
model.load_state_dict(torch.load(model_path, map_location=device))
|
| 306 |
+
model.to(device)
|
| 307 |
+
model.eval() # Set model to evaluation mode
|
| 308 |
+
|
| 309 |
+
# Initialize lists to store predictions and actual values
|
| 310 |
+
y_true = []
|
| 311 |
+
y_pred = []
|
| 312 |
+
|
| 313 |
+
# Run inference on the test set
|
| 314 |
+
with torch.no_grad():
|
| 315 |
+
for images, count, age in test_loader: # Assuming test_loader gives (images, labels)
|
| 316 |
+
images = images.to(device)
|
| 317 |
+
count = count.to(device)
|
| 318 |
+
count = count.to(device)
|
| 319 |
+
|
| 320 |
+
outputs, attention = model(images) # Ensure outputs are properly shaped
|
| 321 |
+
y_true.extend(count.cpu().numpy())
|
| 322 |
+
y_pred.extend(outputs.cpu().numpy())
|
| 323 |
+
|
| 324 |
+
# Convert to NumPy arrays
|
| 325 |
+
y_true = np.array(y_true)
|
| 326 |
+
y_pred = np.array(y_pred)
|
| 327 |
+
from sklearn.metrics import mean_squared_error, mean_absolute_error, r2_score
|
| 328 |
+
# Compute metrics
|
| 329 |
+
rmse = np.sqrt(mean_squared_error(y_true, y_pred))
|
| 330 |
+
mae = mean_absolute_error(y_true, y_pred)
|
| 331 |
+
r2 = r2_score(y_true, y_pred)
|
| 332 |
+
|
| 333 |
+
# Print results
|
| 334 |
+
print(f"RMSE: {rmse:.4f}")
|
| 335 |
+
print(f"MAE: {mae:.4f}")
|
| 336 |
+
print(f"R² Score: {r2:.4f}")
|
Test_data/For_age_prediction/augmented_leaf_data (1).csv
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
Test_data/For_age_prediction/mustard/p4/d1/L1/mustard_p4_d1_L1_0.png
ADDED
|
Git LFS Details
|
Test_data/For_age_prediction/mustard/p4/d1/L1/mustard_p4_d1_L1_105.png
ADDED
|
Git LFS Details
|
Test_data/For_age_prediction/mustard/p4/d1/L1/mustard_p4_d1_L1_120.png
ADDED
|
Git LFS Details
|
Test_data/For_age_prediction/mustard/p4/d1/L1/mustard_p4_d1_L1_135.png
ADDED
|
Git LFS Details
|
Test_data/For_age_prediction/mustard/p4/d1/L1/mustard_p4_d1_L1_15.png
ADDED
|
Git LFS Details
|
Test_data/For_age_prediction/mustard/p4/d1/L1/mustard_p4_d1_L1_150.png
ADDED
|
Git LFS Details
|
Test_data/For_age_prediction/mustard/p4/d1/L1/mustard_p4_d1_L1_165.png
ADDED
|
Git LFS Details
|
Test_data/For_age_prediction/mustard/p4/d1/L1/mustard_p4_d1_L1_180.png
ADDED
|
Git LFS Details
|
Test_data/For_age_prediction/mustard/p4/d1/L1/mustard_p4_d1_L1_195.png
ADDED
|
Git LFS Details
|
Test_data/For_age_prediction/mustard/p4/d1/L1/mustard_p4_d1_L1_210.png
ADDED
|
Git LFS Details
|
Test_data/For_age_prediction/mustard/p4/d1/L1/mustard_p4_d1_L1_225.png
ADDED
|
Git LFS Details
|
Test_data/For_age_prediction/mustard/p4/d1/L1/mustard_p4_d1_L1_240.png
ADDED
|
Git LFS Details
|
Test_data/For_age_prediction/mustard/p4/d1/L1/mustard_p4_d1_L1_255.png
ADDED
|
Git LFS Details
|
Test_data/For_age_prediction/mustard/p4/d1/L1/mustard_p4_d1_L1_270.png
ADDED
|
Git LFS Details
|
Test_data/For_age_prediction/mustard/p4/d1/L1/mustard_p4_d1_L1_285.png
ADDED
|
Git LFS Details
|
Test_data/For_age_prediction/mustard/p4/d1/L1/mustard_p4_d1_L1_30.png
ADDED
|
Git LFS Details
|
Test_data/For_age_prediction/mustard/p4/d1/L1/mustard_p4_d1_L1_300.png
ADDED
|
Git LFS Details
|
Test_data/For_age_prediction/mustard/p4/d1/L1/mustard_p4_d1_L1_315.png
ADDED
|
Git LFS Details
|
Test_data/For_age_prediction/mustard/p4/d1/L1/mustard_p4_d1_L1_330.png
ADDED
|
Git LFS Details
|
Test_data/For_age_prediction/mustard/p4/d1/L1/mustard_p4_d1_L1_345.png
ADDED
|
Git LFS Details
|
Test_data/For_age_prediction/mustard/p4/d1/L1/mustard_p4_d1_L1_45.png
ADDED
|
Git LFS Details
|
Test_data/For_age_prediction/mustard/p4/d1/L1/mustard_p4_d1_L1_60.png
ADDED
|
Git LFS Details
|
Test_data/For_age_prediction/mustard/p4/d1/L1/mustard_p4_d1_L1_75.png
ADDED
|
Git LFS Details
|
Test_data/For_age_prediction/mustard/p4/d1/L1/mustard_p4_d1_L1_90.png
ADDED
|
Git LFS Details
|
Test_data/For_age_prediction/mustard/p4/d1/L2/mustard_p4_d1_L2_0.png
ADDED
|
Git LFS Details
|
Test_data/For_age_prediction/mustard/p4/d1/L2/mustard_p4_d1_L2_105.png
ADDED
|
Git LFS Details
|
Test_data/For_age_prediction/mustard/p4/d1/L2/mustard_p4_d1_L2_120.png
ADDED
|
Git LFS Details
|
Test_data/For_age_prediction/mustard/p4/d1/L2/mustard_p4_d1_L2_135.png
ADDED
|
Git LFS Details
|
Test_data/For_age_prediction/mustard/p4/d1/L2/mustard_p4_d1_L2_15.png
ADDED
|
Git LFS Details
|
Test_data/For_age_prediction/mustard/p4/d1/L2/mustard_p4_d1_L2_150.png
ADDED
|
Git LFS Details
|
Test_data/For_age_prediction/mustard/p4/d1/L2/mustard_p4_d1_L2_165.png
ADDED
|
Git LFS Details
|
Test_data/For_age_prediction/mustard/p4/d1/L2/mustard_p4_d1_L2_180.png
ADDED
|
Git LFS Details
|
Test_data/For_age_prediction/mustard/p4/d1/L2/mustard_p4_d1_L2_195.png
ADDED
|
Git LFS Details
|
Test_data/For_age_prediction/mustard/p4/d1/L2/mustard_p4_d1_L2_210.png
ADDED
|
Git LFS Details
|
Test_data/For_age_prediction/mustard/p4/d1/L2/mustard_p4_d1_L2_225.png
ADDED
|
Git LFS Details
|
Test_data/For_age_prediction/mustard/p4/d1/L2/mustard_p4_d1_L2_240.png
ADDED
|
Git LFS Details
|
Test_data/For_age_prediction/mustard/p4/d1/L2/mustard_p4_d1_L2_255.png
ADDED
|
Git LFS Details
|