Add files using upload-large-folder tool
Browse files
argentina/.DS_Store
CHANGED
|
Binary files a/argentina/.DS_Store and b/argentina/.DS_Store differ
|
|
|
argentina/processed/csvs/soil_cec_weighted_admin2.csv
CHANGED
|
The diff for this file is too large to render.
See raw diff
|
|
|
argentina/processed/csvs/soil_ph_h2o_weighted_admin2.csv
CHANGED
|
The diff for this file is too large to render.
See raw diff
|
|
|
dataset.py
ADDED
|
@@ -0,0 +1,647 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Amazing Crop Yield Dataset (ACYD) - Hugging Face Dataset Implementation
|
| 3 |
+
|
| 4 |
+
A comprehensive multi-country crop yield prediction dataset with weather, land surface,
|
| 5 |
+
and yield data for machine learning applications.
|
| 6 |
+
|
| 7 |
+
Usage:
|
| 8 |
+
load_dataset(
|
| 9 |
+
"notadib/ACYD",
|
| 10 |
+
country="argentina",
|
| 11 |
+
crop_type="corn",
|
| 12 |
+
standardize=True,
|
| 13 |
+
test_year=2020,
|
| 14 |
+
n_train_years=10,
|
| 15 |
+
n_past_years=5,
|
| 16 |
+
trust_remote_code=True
|
| 17 |
+
)
|
| 18 |
+
|
| 19 |
+
Output format:
|
| 20 |
+
- weather: (seq_len, n_weather_vars)
|
| 21 |
+
- land_surface: (seq_len, n_land_surface_vars)
|
| 22 |
+
- land_surface_mask: (seq_len, n_land_surface_vars) [used cause ndvi starts from 1982 but rest from 1979]
|
| 23 |
+
- soil: (soil_depths, n_soil_vars)
|
| 24 |
+
- coords: (1,2)
|
| 25 |
+
- years: (n_past_years + 1, 1)
|
| 26 |
+
- y_past: (n_past_years, 1)
|
| 27 |
+
- y: (1,)
|
| 28 |
+
"""
|
| 29 |
+
|
| 30 |
+
import datasets
|
| 31 |
+
import logging
|
| 32 |
+
import pandas as pd
|
| 33 |
+
import numpy as np
|
| 34 |
+
from tqdm import tqdm
|
| 35 |
+
import os
|
| 36 |
+
|
| 37 |
+
# Default values if constants module is not available
|
| 38 |
+
MAX_CONTEXT_LENGTH = 500
|
| 39 |
+
N_WEATHER_VARS = 5 # precipitation, reference_et, snow_lwe, solar_radiation, t2m_max
|
| 40 |
+
N_LAND_SURFACE_VARS = 3 # lai_high, lai_low, ndvi
|
| 41 |
+
N_SOIL_VARS = 8 # Different soil properties
|
| 42 |
+
SOIL_DEPTHS = 6 # Different depth layers
|
| 43 |
+
CROP_YIELD_STATS = {
|
| 44 |
+
"soybean": {"mean": [], "std": []},
|
| 45 |
+
"corn": {"mean": [], "std": []},
|
| 46 |
+
"wheat": {"mean": [], "std": []},
|
| 47 |
+
"sunflower": {"mean": [], "std": []},
|
| 48 |
+
}
|
| 49 |
+
|
| 50 |
+
# Valid parameter ranges (updated for Argentina data)
|
| 51 |
+
_CROP_TYPES = ["soybean", "corn", "wheat", "sunflower"]
|
| 52 |
+
_TEST_YEARS = list(range(1982, 2025)) # 1982-2024 based on available data
|
| 53 |
+
_N_TRAIN_YEARS = list(range(1, 31)) # 1-30 years
|
| 54 |
+
_N_PAST_YEARS = list(range(1, 11)) # 1-10 years
|
| 55 |
+
|
| 56 |
+
|
| 57 |
+
class CropYieldConfig(datasets.BuilderConfig):
|
| 58 |
+
"""Custom configuration for Crop Yield Dataset."""
|
| 59 |
+
|
| 60 |
+
def __init__(
|
| 61 |
+
self,
|
| 62 |
+
crop_type: str = "soybean",
|
| 63 |
+
test_year: int = 2018,
|
| 64 |
+
n_train_years: int = 10,
|
| 65 |
+
n_past_years: int = 5,
|
| 66 |
+
data_dir: str = "./",
|
| 67 |
+
standardize: bool = True,
|
| 68 |
+
**kwargs,
|
| 69 |
+
):
|
| 70 |
+
"""
|
| 71 |
+
Args:
|
| 72 |
+
crop_type: Type of crop (soybean, corn, wheat, sunflower)
|
| 73 |
+
test_year: Year to use for testing (1982-2024)
|
| 74 |
+
n_train_years: Number of years to use for training (1-30)
|
| 75 |
+
n_past_years: Number of past years to include in features (1-10)
|
| 76 |
+
data_dir: Directory containing the data files
|
| 77 |
+
standardize: Whether to standardize the data
|
| 78 |
+
**kwargs: Additional arguments for BuilderConfig
|
| 79 |
+
"""
|
| 80 |
+
# Validate parameters
|
| 81 |
+
assert crop_type in _CROP_TYPES, f"Crop type must be one of {_CROP_TYPES}"
|
| 82 |
+
assert test_year in _TEST_YEARS, f"Test year must be between 1982 and 2024"
|
| 83 |
+
assert (
|
| 84 |
+
n_train_years in _N_TRAIN_YEARS
|
| 85 |
+
), f"Training years must be between 1 and 30"
|
| 86 |
+
assert n_past_years in _N_PAST_YEARS, f"Past years must be between 1 and 10"
|
| 87 |
+
|
| 88 |
+
# Create descriptive config name following GitHub Code style
|
| 89 |
+
std_str = "std" if standardize else "nostd"
|
| 90 |
+
config_name = (
|
| 91 |
+
f"{crop_type}-{test_year}-{n_train_years}-{n_past_years}-{std_str}"
|
| 92 |
+
)
|
| 93 |
+
|
| 94 |
+
super().__init__(
|
| 95 |
+
name=config_name,
|
| 96 |
+
**kwargs,
|
| 97 |
+
)
|
| 98 |
+
|
| 99 |
+
self.crop_type = crop_type
|
| 100 |
+
self.test_year = test_year
|
| 101 |
+
self.n_train_years = n_train_years
|
| 102 |
+
self.n_past_years = n_past_years
|
| 103 |
+
self.data_dir = data_dir
|
| 104 |
+
self.standardize = standardize
|
| 105 |
+
|
| 106 |
+
|
| 107 |
+
class CropYieldDataset(datasets.GeneratorBasedBuilder):
|
| 108 |
+
"""Crop Yield Dataset with weather and land surface data."""
|
| 109 |
+
|
| 110 |
+
VERSION = datasets.Version("1.0.0")
|
| 111 |
+
BUILDER_CONFIG_CLASS = CropYieldConfig
|
| 112 |
+
config: CropYieldConfig # Type annotation for config
|
| 113 |
+
|
| 114 |
+
BUILDER_CONFIGS = [
|
| 115 |
+
CropYieldConfig(
|
| 116 |
+
crop_type="soybean",
|
| 117 |
+
description="Soybean yield prediction dataset with default parameters",
|
| 118 |
+
),
|
| 119 |
+
CropYieldConfig(
|
| 120 |
+
crop_type="corn",
|
| 121 |
+
description="Corn yield prediction dataset with default parameters",
|
| 122 |
+
),
|
| 123 |
+
CropYieldConfig(
|
| 124 |
+
crop_type="wheat",
|
| 125 |
+
description="Wheat yield prediction dataset with default parameters",
|
| 126 |
+
),
|
| 127 |
+
CropYieldConfig(
|
| 128 |
+
crop_type="sunflower",
|
| 129 |
+
description="Sunflower yield prediction dataset with default parameters",
|
| 130 |
+
),
|
| 131 |
+
]
|
| 132 |
+
|
| 133 |
+
DEFAULT_CONFIG_NAME = "soybean-2018-10-5-std"
|
| 134 |
+
|
| 135 |
+
def _create_builder_config(
|
| 136 |
+
self,
|
| 137 |
+
config_name=None,
|
| 138 |
+
custom_features=None,
|
| 139 |
+
**config_kwargs,
|
| 140 |
+
):
|
| 141 |
+
"""Create a BuilderConfig from config_kwargs.
|
| 142 |
+
|
| 143 |
+
This method allows passing parameters directly to load_dataset() like:
|
| 144 |
+
load_dataset("path", crop_type="corn", test_year=2017, ...)
|
| 145 |
+
"""
|
| 146 |
+
# If config_name is provided and matches existing config, use it
|
| 147 |
+
if config_name and config_name in [
|
| 148 |
+
config.name for config in self.BUILDER_CONFIGS
|
| 149 |
+
]:
|
| 150 |
+
for config in self.BUILDER_CONFIGS:
|
| 151 |
+
if config.name == config_name:
|
| 152 |
+
return config, config_name
|
| 153 |
+
|
| 154 |
+
# Otherwise, create a new config from the provided parameters
|
| 155 |
+
if config_kwargs:
|
| 156 |
+
# Create new config with provided parameters
|
| 157 |
+
config = CropYieldConfig(**config_kwargs)
|
| 158 |
+
return config, config.name
|
| 159 |
+
|
| 160 |
+
# Fall back to default behavior
|
| 161 |
+
return super()._create_builder_config(
|
| 162 |
+
config_name=config_name,
|
| 163 |
+
custom_features=custom_features,
|
| 164 |
+
**config_kwargs,
|
| 165 |
+
)
|
| 166 |
+
|
| 167 |
+
def _info(self):
|
| 168 |
+
# Get n_past_years from config, with fallback to default
|
| 169 |
+
n_past_years = (
|
| 170 |
+
self.config.n_past_years if self.config.n_past_years is not None else 5
|
| 171 |
+
)
|
| 172 |
+
|
| 173 |
+
# Calculate concrete shapes based on n_past_years
|
| 174 |
+
seq_len = 52 * (n_past_years + 1) # 52 weeks per year * number of years
|
| 175 |
+
n_weather_vars = (
|
| 176 |
+
5 # precipitation, reference_et, snow_lwe, solar_radiation, t2m_max
|
| 177 |
+
)
|
| 178 |
+
n_land_surface_vars = 3 # lai_high, lai_low, ndvi
|
| 179 |
+
n_soil_vars = 8 # Different soil properties
|
| 180 |
+
soil_depths = 6 # Different depth layers
|
| 181 |
+
|
| 182 |
+
features = datasets.Features(
|
| 183 |
+
{
|
| 184 |
+
"weather": datasets.Array2D(
|
| 185 |
+
shape=(seq_len, n_weather_vars), dtype="float32"
|
| 186 |
+
),
|
| 187 |
+
"land_surface": datasets.Array2D(
|
| 188 |
+
shape=(seq_len, n_land_surface_vars), dtype="float32"
|
| 189 |
+
),
|
| 190 |
+
"land_surface_mask": datasets.Array2D(
|
| 191 |
+
shape=(seq_len, n_land_surface_vars), dtype="bool"
|
| 192 |
+
),
|
| 193 |
+
"soil": datasets.Array2D(
|
| 194 |
+
shape=(soil_depths, n_soil_vars), dtype="float32"
|
| 195 |
+
),
|
| 196 |
+
"coords": datasets.Array2D(shape=(1, 2), dtype="float32"),
|
| 197 |
+
"years": datasets.Array2D(shape=(n_past_years + 1, 1), dtype="float32"),
|
| 198 |
+
"y_past": datasets.Sequence(
|
| 199 |
+
feature=datasets.Value("float32"), length=n_past_years
|
| 200 |
+
),
|
| 201 |
+
"y": datasets.Sequence(feature=datasets.Value("float32"), length=1),
|
| 202 |
+
}
|
| 203 |
+
)
|
| 204 |
+
|
| 205 |
+
return datasets.DatasetInfo(
|
| 206 |
+
description="Crop yield prediction dataset with weather and land surface data",
|
| 207 |
+
features=features,
|
| 208 |
+
)
|
| 209 |
+
|
| 210 |
+
def _split_generators(self, dl_manager):
|
| 211 |
+
# Get configuration parameters
|
| 212 |
+
config = self.config
|
| 213 |
+
if not isinstance(config, CropYieldConfig):
|
| 214 |
+
# Fallback to default values if config is not CropYieldConfig
|
| 215 |
+
logging.warning(
|
| 216 |
+
"Config is not CropYieldConfig, using default values for soybean"
|
| 217 |
+
)
|
| 218 |
+
test_year = 2018
|
| 219 |
+
n_train_years = 10
|
| 220 |
+
n_past_years = 5
|
| 221 |
+
data_dir = "./"
|
| 222 |
+
standardize = True
|
| 223 |
+
crop = "soybean"
|
| 224 |
+
else:
|
| 225 |
+
test_year = config.test_year
|
| 226 |
+
n_train_years = config.n_train_years
|
| 227 |
+
n_past_years = config.n_past_years
|
| 228 |
+
data_dir = config.data_dir
|
| 229 |
+
standardize = config.standardize
|
| 230 |
+
crop = config.crop_type # Use crop_type instead of name
|
| 231 |
+
|
| 232 |
+
# Ensure data_dir is not None and has proper format
|
| 233 |
+
if data_dir is None:
|
| 234 |
+
data_dir = "./"
|
| 235 |
+
elif not data_dir.endswith("/"):
|
| 236 |
+
data_dir += "/"
|
| 237 |
+
|
| 238 |
+
# Read the dataset
|
| 239 |
+
crop_df = self._read_crop_dataset(data_dir, crop)
|
| 240 |
+
|
| 241 |
+
return [
|
| 242 |
+
datasets.SplitGenerator(
|
| 243 |
+
name="train",
|
| 244 |
+
gen_kwargs={
|
| 245 |
+
"data": crop_df,
|
| 246 |
+
"test_year": test_year,
|
| 247 |
+
"n_train_years": n_train_years,
|
| 248 |
+
"n_past_years": n_past_years,
|
| 249 |
+
"crop": crop,
|
| 250 |
+
"standardize": standardize,
|
| 251 |
+
"is_test": False,
|
| 252 |
+
},
|
| 253 |
+
),
|
| 254 |
+
datasets.SplitGenerator(
|
| 255 |
+
name="test",
|
| 256 |
+
gen_kwargs={
|
| 257 |
+
"data": crop_df,
|
| 258 |
+
"test_year": test_year,
|
| 259 |
+
"n_train_years": n_train_years,
|
| 260 |
+
"n_past_years": n_past_years,
|
| 261 |
+
"crop": crop,
|
| 262 |
+
"standardize": standardize,
|
| 263 |
+
"is_test": True,
|
| 264 |
+
},
|
| 265 |
+
),
|
| 266 |
+
]
|
| 267 |
+
|
| 268 |
+
def _generate_examples(
|
| 269 |
+
self, data, test_year, n_train_years, n_past_years, crop, standardize, is_test
|
| 270 |
+
):
|
| 271 |
+
# Process and standardize data
|
| 272 |
+
processed_data = self._process_data(
|
| 273 |
+
data, test_year, n_train_years, crop, standardize
|
| 274 |
+
)
|
| 275 |
+
|
| 276 |
+
# Create dataset samples
|
| 277 |
+
dataset_samples = self._create_dataset_samples(
|
| 278 |
+
processed_data, test_year, n_train_years, n_past_years, crop, is_test
|
| 279 |
+
)
|
| 280 |
+
|
| 281 |
+
for idx, sample in enumerate(dataset_samples):
|
| 282 |
+
yield idx, sample
|
| 283 |
+
|
| 284 |
+
def _read_crop_dataset(self, data_dir: str, crop: str):
|
| 285 |
+
"""Load and merge separate CSV files for Argentina data"""
|
| 286 |
+
|
| 287 |
+
# Define file paths for Argentina processed CSVs
|
| 288 |
+
csv_dir = os.path.join(data_dir, "data", "argentina", "processed", "csvs")
|
| 289 |
+
|
| 290 |
+
# Load crop yield data
|
| 291 |
+
crop_file = f"crop_{crop}_yield_1970-2024.csv"
|
| 292 |
+
if crop == "wheat":
|
| 293 |
+
crop_file = f"crop_{crop}_yield_1970-2025.csv"
|
| 294 |
+
|
| 295 |
+
crop_path = os.path.join(csv_dir, crop_file)
|
| 296 |
+
crop_df = pd.read_csv(crop_path)
|
| 297 |
+
|
| 298 |
+
# Load weather data files
|
| 299 |
+
weather_files = {
|
| 300 |
+
"precipitation": "weather_1979-2024_precipitation_weekly_weighted_admin2.csv",
|
| 301 |
+
"reference_et": "weather_1979-2024_reference_et_weekly_weighted_admin2.csv",
|
| 302 |
+
"snow_lwe": "weather_1979-2024_snow_lwe_weekly_weighted_admin2.csv",
|
| 303 |
+
"solar_radiation": "weather_1979-2024_solar_radiation_weekly_weighted_admin2.csv",
|
| 304 |
+
"t2m_max": "weather_1979-2024_t2m_max_weekly_weighted_admin2.csv",
|
| 305 |
+
}
|
| 306 |
+
|
| 307 |
+
# Load land surface data files
|
| 308 |
+
land_surface_files = {
|
| 309 |
+
"lai_high": "land_surface_1979-2024_lai_high_weekly_weighted_admin2.csv",
|
| 310 |
+
"lai_low": "land_surface_1979-2024_lai_low_weekly_weighted_admin2.csv",
|
| 311 |
+
"ndvi": "land_surface_1982-2024_ndvi_weekly_weighted_admin2.csv",
|
| 312 |
+
}
|
| 313 |
+
|
| 314 |
+
# Load soil data files
|
| 315 |
+
soil_files = {
|
| 316 |
+
"cec": "soil_cec_weighted_admin2.csv",
|
| 317 |
+
"coarse_fragments": "soil_coarse_fragments_weighted_admin2.csv",
|
| 318 |
+
"nitrogen": "soil_nitrogen_weighted_admin2.csv",
|
| 319 |
+
"organic_carbon": "soil_organic_carbon_weighted_admin2.csv",
|
| 320 |
+
"organic_carbon_density": "soil_organic_carbon_density_weighted_admin2.csv",
|
| 321 |
+
"ph_h2o": "soil_ph_h2o_weighted_admin2.csv",
|
| 322 |
+
"sand": "soil_sand_weighted_admin2.csv",
|
| 323 |
+
"silt": "soil_silt_weighted_admin2.csv",
|
| 324 |
+
}
|
| 325 |
+
|
| 326 |
+
# Load all weather and land surface data
|
| 327 |
+
weather_dfs = {}
|
| 328 |
+
for var_name, filename in weather_files.items():
|
| 329 |
+
file_path = os.path.join(csv_dir, filename)
|
| 330 |
+
df = pd.read_csv(file_path)
|
| 331 |
+
weather_dfs[var_name] = df
|
| 332 |
+
|
| 333 |
+
land_surface_dfs = {}
|
| 334 |
+
for var_name, filename in land_surface_files.items():
|
| 335 |
+
file_path = os.path.join(csv_dir, filename)
|
| 336 |
+
df = pd.read_csv(file_path)
|
| 337 |
+
land_surface_dfs[var_name] = df
|
| 338 |
+
|
| 339 |
+
soil_dfs = {}
|
| 340 |
+
for var_name, filename in soil_files.items():
|
| 341 |
+
file_path = os.path.join(csv_dir, filename)
|
| 342 |
+
if os.path.exists(file_path):
|
| 343 |
+
df = pd.read_csv(file_path)
|
| 344 |
+
soil_dfs[var_name] = df
|
| 345 |
+
else:
|
| 346 |
+
logging.warning(f"Soil file {filename} not found, skipping")
|
| 347 |
+
|
| 348 |
+
# Create location identifier for merging
|
| 349 |
+
crop_df["loc_ID"] = crop_df["admin_level_1"] + "_" + crop_df["admin_level_2"]
|
| 350 |
+
|
| 351 |
+
# Start with crop data as base
|
| 352 |
+
merged_df = crop_df.copy()
|
| 353 |
+
|
| 354 |
+
# Add latitude and longitude from weather data (they should be consistent)
|
| 355 |
+
if "precipitation" in weather_dfs:
|
| 356 |
+
precip_df = weather_dfs["precipitation"]
|
| 357 |
+
precip_df["loc_ID"] = (
|
| 358 |
+
precip_df["admin_level_1"] + "_" + precip_df["admin_level_2"]
|
| 359 |
+
)
|
| 360 |
+
|
| 361 |
+
# Add lat/lng to merged_df
|
| 362 |
+
lat_lng_df = precip_df[["loc_ID", "year", "latitude", "longitude"]].copy()
|
| 363 |
+
merged_df = merged_df.merge(lat_lng_df, on=["loc_ID", "year"], how="left")
|
| 364 |
+
merged_df = merged_df.rename(
|
| 365 |
+
columns={"latitude": "lat", "longitude": "lng"}
|
| 366 |
+
)
|
| 367 |
+
|
| 368 |
+
# Merge weather data
|
| 369 |
+
for var_name, df in weather_dfs.items():
|
| 370 |
+
df["loc_ID"] = df["admin_level_1"] + "_" + df["admin_level_2"]
|
| 371 |
+
|
| 372 |
+
# Get weekly columns for this variable
|
| 373 |
+
week_cols = [
|
| 374 |
+
col for col in df.columns if col.startswith(f"{var_name}_week_")
|
| 375 |
+
]
|
| 376 |
+
|
| 377 |
+
# Rename columns to match expected format (W_varindex_weeknum)
|
| 378 |
+
var_index = list(weather_files.keys()).index(var_name) + 1 # 1-indexed
|
| 379 |
+
rename_dict = {}
|
| 380 |
+
for i, col in enumerate(week_cols, 1):
|
| 381 |
+
rename_dict[col] = f"W_{var_index}_{i}"
|
| 382 |
+
|
| 383 |
+
df_renamed = df[["loc_ID", "year"] + week_cols].rename(columns=rename_dict)
|
| 384 |
+
merged_df = merged_df.merge(df_renamed, on=["loc_ID", "year"], how="left")
|
| 385 |
+
|
| 386 |
+
# Merge land surface data
|
| 387 |
+
for var_name, df in land_surface_dfs.items():
|
| 388 |
+
df["loc_ID"] = df["admin_level_1"] + "_" + df["admin_level_2"]
|
| 389 |
+
|
| 390 |
+
# Get weekly columns for this variable
|
| 391 |
+
week_cols = [
|
| 392 |
+
col for col in df.columns if col.startswith(f"{var_name}_week_")
|
| 393 |
+
]
|
| 394 |
+
|
| 395 |
+
# Continue indexing from where weather variables left off
|
| 396 |
+
var_index = (
|
| 397 |
+
len(weather_files) + list(land_surface_files.keys()).index(var_name) + 1
|
| 398 |
+
)
|
| 399 |
+
rename_dict = {}
|
| 400 |
+
for i, col in enumerate(week_cols, 1):
|
| 401 |
+
rename_dict[col] = f"W_{var_index}_{i}"
|
| 402 |
+
|
| 403 |
+
df_renamed = df[["loc_ID", "year"] + week_cols].rename(columns=rename_dict)
|
| 404 |
+
merged_df = merged_df.merge(df_renamed, on=["loc_ID", "year"], how="left")
|
| 405 |
+
|
| 406 |
+
# Merge soil data (assuming soil data has depth columns)
|
| 407 |
+
for var_name, df in soil_dfs.items():
|
| 408 |
+
df["loc_ID"] = df["admin_level_1"] + "_" + df["admin_level_2"]
|
| 409 |
+
|
| 410 |
+
# Get depth columns for this variable (assuming format like cec_0_5cm, cec_5_15cm, etc.)
|
| 411 |
+
depth_cols = [
|
| 412 |
+
col
|
| 413 |
+
for col in df.columns
|
| 414 |
+
if col.startswith(f"{var_name}_") and "cm" in col
|
| 415 |
+
]
|
| 416 |
+
|
| 417 |
+
if depth_cols:
|
| 418 |
+
# Continue indexing from where land surface variables left off
|
| 419 |
+
var_index = (
|
| 420 |
+
len(weather_files)
|
| 421 |
+
+ len(land_surface_files)
|
| 422 |
+
+ list(soil_files.keys()).index(var_name)
|
| 423 |
+
+ 1
|
| 424 |
+
)
|
| 425 |
+
rename_dict = {}
|
| 426 |
+
for i, col in enumerate(depth_cols, 1):
|
| 427 |
+
rename_dict[col] = f"S_{var_index}_{i}"
|
| 428 |
+
|
| 429 |
+
df_renamed = df[["loc_ID"] + depth_cols].rename(columns=rename_dict)
|
| 430 |
+
# For soil data, merge only on loc_ID (soil properties don't change by year)
|
| 431 |
+
merged_df = merged_df.merge(df_renamed, on=["loc_ID"], how="left")
|
| 432 |
+
|
| 433 |
+
# Sort by location and year
|
| 434 |
+
merged_df = merged_df.sort_values(["loc_ID", "year"])
|
| 435 |
+
|
| 436 |
+
logging.info(f"Loaded {len(merged_df)} records for {crop} from Argentina data")
|
| 437 |
+
logging.info(
|
| 438 |
+
f"Data covers years {merged_df['year'].min()}-{merged_df['year'].max()}"
|
| 439 |
+
)
|
| 440 |
+
logging.info(f"Number of unique locations: {merged_df['loc_ID'].nunique()}")
|
| 441 |
+
|
| 442 |
+
return merged_df
|
| 443 |
+
|
| 444 |
+
def _process_data(self, data, test_year, n_train_years, crop, standardize):
|
| 445 |
+
start_year = test_year - n_train_years
|
| 446 |
+
|
| 447 |
+
data = data[data["year"] > 1981.0].copy()
|
| 448 |
+
|
| 449 |
+
# Drop rows with missing yield values for the given crop
|
| 450 |
+
yield_col = f"{crop}_yield"
|
| 451 |
+
rows_before = len(data)
|
| 452 |
+
data = data.dropna(subset=[yield_col])
|
| 453 |
+
rows_after = len(data)
|
| 454 |
+
rows_dropped = rows_before - rows_after
|
| 455 |
+
|
| 456 |
+
if rows_dropped > 0:
|
| 457 |
+
print(
|
| 458 |
+
f"Dropped {rows_dropped} rows with missing {yield_col} values ({rows_before} -> {rows_after} rows)"
|
| 459 |
+
)
|
| 460 |
+
|
| 461 |
+
if standardize:
|
| 462 |
+
# Standardize data
|
| 463 |
+
cols_to_standardize = [
|
| 464 |
+
col
|
| 465 |
+
for col in data.columns
|
| 466 |
+
if col
|
| 467 |
+
not in [
|
| 468 |
+
"loc_ID",
|
| 469 |
+
"year",
|
| 470 |
+
"country",
|
| 471 |
+
"admin_level_1",
|
| 472 |
+
"admin_level_2",
|
| 473 |
+
"lat",
|
| 474 |
+
"lng",
|
| 475 |
+
yield_col,
|
| 476 |
+
]
|
| 477 |
+
]
|
| 478 |
+
|
| 479 |
+
data[cols_to_standardize] = (
|
| 480 |
+
data[cols_to_standardize] - data[cols_to_standardize].mean()
|
| 481 |
+
) / data[cols_to_standardize].std()
|
| 482 |
+
data[cols_to_standardize] = data[cols_to_standardize].fillna(0)
|
| 483 |
+
|
| 484 |
+
# Standardize yield data
|
| 485 |
+
train_data = data[(data["year"] >= start_year) & (data["year"] < test_year)]
|
| 486 |
+
yield_mean, yield_std = (
|
| 487 |
+
train_data[yield_col].mean(),
|
| 488 |
+
train_data[yield_col].std(),
|
| 489 |
+
)
|
| 490 |
+
data[yield_col] = (data[yield_col] - yield_mean) / yield_std
|
| 491 |
+
|
| 492 |
+
print(f"{crop} yield mean = {yield_mean:.3f} and std = {yield_std:.3f}")
|
| 493 |
+
CROP_YIELD_STATS[crop]["mean"].append(yield_mean)
|
| 494 |
+
CROP_YIELD_STATS[crop]["std"].append(yield_std)
|
| 495 |
+
# If not standardizing, still need to handle NaN values
|
| 496 |
+
data = data.fillna(0)
|
| 497 |
+
|
| 498 |
+
return data
|
| 499 |
+
|
| 500 |
+
def _create_dataset_samples(
|
| 501 |
+
self, data, test_year, n_train_years, n_past_years, crop, is_test
|
| 502 |
+
):
|
| 503 |
+
start_year = test_year - n_train_years
|
| 504 |
+
yield_col = f"{crop}_yield"
|
| 505 |
+
|
| 506 |
+
# Define column groups for Argentina data
|
| 507 |
+
weather_cols = [
|
| 508 |
+
f"W_{i}_{j}" for i in range(1, 6) for j in range(1, 53)
|
| 509 |
+
] # 5 weather variables, 52 weeks
|
| 510 |
+
|
| 511 |
+
land_surface_cols = [
|
| 512 |
+
f"W_{i}_{j}" for i in range(6, 9) for j in range(1, 53)
|
| 513 |
+
] # 3 land surface variables, 52 weeks
|
| 514 |
+
|
| 515 |
+
# Check if soil data exists in the dataset
|
| 516 |
+
soil_cols = [
|
| 517 |
+
f"S_{i}_{j}" for i in range(1, 9) for j in range(1, 7)
|
| 518 |
+
] # 8 soil variables, 6 depth layers
|
| 519 |
+
|
| 520 |
+
# Filter candidate data
|
| 521 |
+
if is_test:
|
| 522 |
+
candidate_data = data[data["year"] == test_year]
|
| 523 |
+
else:
|
| 524 |
+
candidate_data = data[
|
| 525 |
+
(data["year"] >= start_year) & (data["year"] < test_year)
|
| 526 |
+
]
|
| 527 |
+
|
| 528 |
+
# Filter to only include cases where we have complete historical data
|
| 529 |
+
data_sorted = data.sort_values(["loc_ID", "year"])
|
| 530 |
+
|
| 531 |
+
def has_sufficient_history(row):
|
| 532 |
+
year, loc_ID = row["year"], row["loc_ID"]
|
| 533 |
+
loc_data = data_sorted[data_sorted["loc_ID"] == loc_ID]
|
| 534 |
+
loc_data_up_to_year = loc_data[loc_data["year"] <= year]
|
| 535 |
+
return len(loc_data_up_to_year.tail(n_past_years + 1)) == n_past_years + 1
|
| 536 |
+
|
| 537 |
+
mask = candidate_data.apply(has_sufficient_history, axis=1)
|
| 538 |
+
valid_candidates = candidate_data[mask]
|
| 539 |
+
index = valid_candidates[["year", "loc_ID"]].reset_index(drop=True)
|
| 540 |
+
|
| 541 |
+
dataset_name = "train" if not is_test else "test"
|
| 542 |
+
logging.info(
|
| 543 |
+
f"Creating {dataset_name} dataset with {len(index)} samples for {'test year ' + str(test_year) if is_test else 'training years ' + str(start_year) + '-' + str(test_year-1)} using {crop} yield."
|
| 544 |
+
)
|
| 545 |
+
|
| 546 |
+
samples = []
|
| 547 |
+
total_samples = len(index)
|
| 548 |
+
|
| 549 |
+
if total_samples == 0:
|
| 550 |
+
logging.warning(f"No samples found for {dataset_name} dataset!")
|
| 551 |
+
return samples
|
| 552 |
+
|
| 553 |
+
for idx in tqdm(range(total_samples)):
|
| 554 |
+
year = int(index.iloc[idx]["year"])
|
| 555 |
+
loc_ID = index.iloc[idx]["loc_ID"]
|
| 556 |
+
query_data = data[(data["year"] <= year) & (data["loc_ID"] == loc_ID)].tail(
|
| 557 |
+
n_past_years + 1
|
| 558 |
+
)
|
| 559 |
+
|
| 560 |
+
# Extract weather data
|
| 561 |
+
weather_data = (
|
| 562 |
+
query_data[weather_cols]
|
| 563 |
+
.values.astype("float32")
|
| 564 |
+
.reshape((-1, N_WEATHER_VARS, 52))
|
| 565 |
+
)
|
| 566 |
+
n_years, n_weather_features, seq_len = weather_data.shape
|
| 567 |
+
total_seq_len = n_years * seq_len
|
| 568 |
+
|
| 569 |
+
if total_seq_len > MAX_CONTEXT_LENGTH:
|
| 570 |
+
raise ValueError(
|
| 571 |
+
f"total_seq_len = {total_seq_len} is greater than MAX_CONTEXT_LENGTH = {MAX_CONTEXT_LENGTH}"
|
| 572 |
+
)
|
| 573 |
+
|
| 574 |
+
# Reshape weather: (n_years, n_features, 52) -> (total_seq_len, n_features)
|
| 575 |
+
weather = weather_data.transpose(0, 2, 1).reshape(
|
| 576 |
+
total_seq_len, n_weather_features
|
| 577 |
+
)
|
| 578 |
+
|
| 579 |
+
# Extract land surface data
|
| 580 |
+
land_surface_data = (
|
| 581 |
+
query_data[land_surface_cols]
|
| 582 |
+
.values.astype("float32")
|
| 583 |
+
.reshape((-1, N_LAND_SURFACE_VARS, 52))
|
| 584 |
+
)
|
| 585 |
+
# Reshape land surface: (n_years, n_features, 52) -> (total_seq_len, n_features)
|
| 586 |
+
land_surface = land_surface_data.transpose(0, 2, 1).reshape(
|
| 587 |
+
total_seq_len, N_LAND_SURFACE_VARS
|
| 588 |
+
)
|
| 589 |
+
|
| 590 |
+
# Create land surface mask (True where NDVI data is missing before 1982)
|
| 591 |
+
land_surface_mask = np.zeros(
|
| 592 |
+
(total_seq_len, N_LAND_SURFACE_VARS), dtype=bool
|
| 593 |
+
)
|
| 594 |
+
year_data = query_data["year"].values
|
| 595 |
+
for i, year_val in enumerate(year_data):
|
| 596 |
+
start_idx = i * seq_len
|
| 597 |
+
end_idx = (i + 1) * seq_len
|
| 598 |
+
if year_val < 1982: # NDVI starts from 1982
|
| 599 |
+
land_surface_mask[start_idx:end_idx, 2] = (
|
| 600 |
+
True # Mask NDVI (index 2)
|
| 601 |
+
)
|
| 602 |
+
|
| 603 |
+
# Extract soil data (static for each location)
|
| 604 |
+
soil_data = (
|
| 605 |
+
query_data[soil_cols]
|
| 606 |
+
.iloc[0]
|
| 607 |
+
.values.astype("float32")
|
| 608 |
+
.reshape((N_SOIL_VARS, SOIL_DEPTHS))
|
| 609 |
+
)
|
| 610 |
+
soil = soil_data.T # Transpose to get (soil_depths, n_soil_vars)
|
| 611 |
+
|
| 612 |
+
# Extract coordinates (single location)
|
| 613 |
+
coords = (
|
| 614 |
+
query_data[["lat", "lng"]]
|
| 615 |
+
.iloc[0]
|
| 616 |
+
.values.astype("float32")
|
| 617 |
+
.reshape((1, 2))
|
| 618 |
+
)
|
| 619 |
+
|
| 620 |
+
# Extract years (n_past_years + 1, 1)
|
| 621 |
+
years = query_data["year"].values.astype("float32").reshape((-1, 1))
|
| 622 |
+
|
| 623 |
+
# Extract yield data
|
| 624 |
+
y = query_data.iloc[-1:][yield_col].values.astype("float32")
|
| 625 |
+
y_past = query_data[yield_col].values.astype("float32")[
|
| 626 |
+
:-1
|
| 627 |
+
] # Exclude current year
|
| 628 |
+
|
| 629 |
+
if len(y_past) < n_past_years:
|
| 630 |
+
raise ValueError(
|
| 631 |
+
f"Insufficient yield history for location {loc_ID} in year {year}. Need {n_past_years} past years but have {len(y_past)}."
|
| 632 |
+
)
|
| 633 |
+
|
| 634 |
+
sample = {
|
| 635 |
+
"weather": weather,
|
| 636 |
+
"land_surface": land_surface,
|
| 637 |
+
"land_surface_mask": land_surface_mask,
|
| 638 |
+
"soil": soil,
|
| 639 |
+
"coords": coords,
|
| 640 |
+
"years": years,
|
| 641 |
+
"y_past": y_past,
|
| 642 |
+
"y": y,
|
| 643 |
+
}
|
| 644 |
+
|
| 645 |
+
samples.append(sample)
|
| 646 |
+
|
| 647 |
+
return samples
|