""" Amazing Crop Yield Dataset (ACYD) - Hugging Face Dataset Implementation A comprehensive multi-country crop yield prediction dataset with weather, land surface, and yield data for machine learning applications. Usage: load_dataset( "notadib/ACYD", country="argentina", crop_type="corn", standardize=True, test_year=2020, n_train_years=10, n_past_years=5, trust_remote_code=True ) Output format: - weather: (seq_len, n_weather_vars) - land_surface: (seq_len, n_land_surface_vars) - land_surface_mask: (seq_len, n_land_surface_vars) [used cause ndvi starts from 1982 but rest from 1979] - soil: (soil_depths, n_soil_vars) - coords: (1,2) - years: (n_past_years + 1, 1) - y_past: (n_past_years, 1) - y: (1,) """ import datasets import logging import pandas as pd import numpy as np from tqdm import tqdm import os # Default values if constants module is not available MAX_CONTEXT_LENGTH = 500 N_WEATHER_VARS = 5 # precipitation, reference_et, snow_lwe, solar_radiation, t2m_max N_LAND_SURFACE_VARS = 3 # lai_high, lai_low, ndvi N_SOIL_VARS = 8 # Different soil properties SOIL_DEPTHS = 6 # Different depth layers CROP_YIELD_STATS = { "soybean": {"mean": [], "std": []}, "corn": {"mean": [], "std": []}, "wheat": {"mean": [], "std": []}, "sunflower": {"mean": [], "std": []}, } # Valid parameter ranges (updated for Argentina data) _CROP_TYPES = ["soybean", "corn", "wheat", "sunflower"] _TEST_YEARS = list(range(1982, 2025)) # 1982-2024 based on available data _N_TRAIN_YEARS = list(range(1, 31)) # 1-30 years _N_PAST_YEARS = list(range(1, 11)) # 1-10 years class CropYieldConfig(datasets.BuilderConfig): """Custom configuration for Crop Yield Dataset.""" def __init__( self, crop_type: str = "soybean", test_year: int = 2018, n_train_years: int = 10, n_past_years: int = 5, data_dir: str = "./", standardize: bool = True, **kwargs, ): """ Args: crop_type: Type of crop (soybean, corn, wheat, sunflower) test_year: Year to use for testing (1982-2024) n_train_years: Number of years to use for training (1-30) n_past_years: Number of past years to include in features (1-10) data_dir: Directory containing the data files standardize: Whether to standardize the data **kwargs: Additional arguments for BuilderConfig """ # Validate parameters assert crop_type in _CROP_TYPES, f"Crop type must be one of {_CROP_TYPES}" assert test_year in _TEST_YEARS, f"Test year must be between 1982 and 2024" assert ( n_train_years in _N_TRAIN_YEARS ), f"Training years must be between 1 and 30" assert n_past_years in _N_PAST_YEARS, f"Past years must be between 1 and 10" # Create descriptive config name following GitHub Code style std_str = "std" if standardize else "nostd" config_name = ( f"{crop_type}-{test_year}-{n_train_years}-{n_past_years}-{std_str}" ) super().__init__( name=config_name, **kwargs, ) self.crop_type = crop_type self.test_year = test_year self.n_train_years = n_train_years self.n_past_years = n_past_years self.data_dir = data_dir self.standardize = standardize class CropYieldDataset(datasets.GeneratorBasedBuilder): """Crop Yield Dataset with weather and land surface data.""" VERSION = datasets.Version("1.0.0") BUILDER_CONFIG_CLASS = CropYieldConfig config: CropYieldConfig # Type annotation for config BUILDER_CONFIGS = [ CropYieldConfig( crop_type="soybean", description="Soybean yield prediction dataset with default parameters", ), CropYieldConfig( crop_type="corn", description="Corn yield prediction dataset with default parameters", ), CropYieldConfig( crop_type="wheat", description="Wheat yield prediction dataset with default parameters", ), CropYieldConfig( crop_type="sunflower", description="Sunflower yield prediction dataset with default parameters", ), ] DEFAULT_CONFIG_NAME = "soybean-2018-10-5-std" def _create_builder_config( self, config_name=None, custom_features=None, **config_kwargs, ): """Create a BuilderConfig from config_kwargs. This method allows passing parameters directly to load_dataset() like: load_dataset("path", crop_type="corn", test_year=2017, ...) """ # If config_name is provided and matches existing config, use it if config_name and config_name in [ config.name for config in self.BUILDER_CONFIGS ]: for config in self.BUILDER_CONFIGS: if config.name == config_name: return config, config_name # Otherwise, create a new config from the provided parameters if config_kwargs: # Create new config with provided parameters config = CropYieldConfig(**config_kwargs) return config, config.name # Fall back to default behavior return super()._create_builder_config( config_name=config_name, custom_features=custom_features, **config_kwargs, ) def _info(self): # Get n_past_years from config, with fallback to default n_past_years = ( self.config.n_past_years if self.config.n_past_years is not None else 5 ) # Calculate concrete shapes based on n_past_years seq_len = 52 * (n_past_years + 1) # 52 weeks per year * number of years n_weather_vars = ( 5 # precipitation, reference_et, snow_lwe, solar_radiation, t2m_max ) n_land_surface_vars = 3 # lai_high, lai_low, ndvi n_soil_vars = 8 # Different soil properties soil_depths = 6 # Different depth layers features = datasets.Features( { "weather": datasets.Array2D( shape=(seq_len, n_weather_vars), dtype="float32" ), "land_surface": datasets.Array2D( shape=(seq_len, n_land_surface_vars), dtype="float32" ), "land_surface_mask": datasets.Array2D( shape=(seq_len, n_land_surface_vars), dtype="bool" ), "soil": datasets.Array2D( shape=(soil_depths, n_soil_vars), dtype="float32" ), "coords": datasets.Array2D(shape=(1, 2), dtype="float32"), "years": datasets.Array2D(shape=(n_past_years + 1, 1), dtype="float32"), "y_past": datasets.Sequence( feature=datasets.Value("float32"), length=n_past_years ), "y": datasets.Sequence(feature=datasets.Value("float32"), length=1), } ) return datasets.DatasetInfo( description="Crop yield prediction dataset with weather and land surface data", features=features, ) def _split_generators(self, dl_manager): # Get configuration parameters config = self.config if not isinstance(config, CropYieldConfig): # Fallback to default values if config is not CropYieldConfig logging.warning( "Config is not CropYieldConfig, using default values for soybean" ) test_year = 2018 n_train_years = 10 n_past_years = 5 data_dir = "./" standardize = True crop = "soybean" else: test_year = config.test_year n_train_years = config.n_train_years n_past_years = config.n_past_years data_dir = config.data_dir standardize = config.standardize crop = config.crop_type # Use crop_type instead of name # Ensure data_dir is not None and has proper format if data_dir is None: data_dir = "./" elif not data_dir.endswith("/"): data_dir += "/" # Read the dataset crop_df = self._read_crop_dataset(data_dir, crop) return [ datasets.SplitGenerator( name="train", gen_kwargs={ "data": crop_df, "test_year": test_year, "n_train_years": n_train_years, "n_past_years": n_past_years, "crop": crop, "standardize": standardize, "is_test": False, }, ), datasets.SplitGenerator( name="test", gen_kwargs={ "data": crop_df, "test_year": test_year, "n_train_years": n_train_years, "n_past_years": n_past_years, "crop": crop, "standardize": standardize, "is_test": True, }, ), ] def _generate_examples( self, data, test_year, n_train_years, n_past_years, crop, standardize, is_test ): # Process and standardize data processed_data = self._process_data( data, test_year, n_train_years, crop, standardize ) # Create dataset samples dataset_samples = self._create_dataset_samples( processed_data, test_year, n_train_years, n_past_years, crop, is_test ) for idx, sample in enumerate(dataset_samples): yield idx, sample def _read_crop_dataset(self, data_dir: str, crop: str): """Load and merge separate CSV files for Argentina data""" # Define file paths for Argentina processed CSVs csv_dir = os.path.join(data_dir, "data", "argentina", "processed", "csvs") # Load crop yield data crop_file = f"crop_{crop}_yield_1970-2024.csv" if crop == "wheat": crop_file = f"crop_{crop}_yield_1970-2025.csv" crop_path = os.path.join(csv_dir, crop_file) crop_df = pd.read_csv(crop_path) # Load weather data files weather_files = { "precipitation": "weather_1979-2024_precipitation_weekly_weighted_admin2.csv", "reference_et": "weather_1979-2024_reference_et_weekly_weighted_admin2.csv", "snow_lwe": "weather_1979-2024_snow_lwe_weekly_weighted_admin2.csv", "solar_radiation": "weather_1979-2024_solar_radiation_weekly_weighted_admin2.csv", "t2m_max": "weather_1979-2024_t2m_max_weekly_weighted_admin2.csv", } # Load land surface data files land_surface_files = { "lai_high": "land_surface_1979-2024_lai_high_weekly_weighted_admin2.csv", "lai_low": "land_surface_1979-2024_lai_low_weekly_weighted_admin2.csv", "ndvi": "land_surface_1982-2024_ndvi_weekly_weighted_admin2.csv", } # Load soil data files soil_files = { "cec": "soil_cec_weighted_admin2.csv", "coarse_fragments": "soil_coarse_fragments_weighted_admin2.csv", "nitrogen": "soil_nitrogen_weighted_admin2.csv", "organic_carbon": "soil_organic_carbon_weighted_admin2.csv", "organic_carbon_density": "soil_organic_carbon_density_weighted_admin2.csv", "ph_h2o": "soil_ph_h2o_weighted_admin2.csv", "sand": "soil_sand_weighted_admin2.csv", "silt": "soil_silt_weighted_admin2.csv", } # Load all weather and land surface data weather_dfs = {} for var_name, filename in weather_files.items(): file_path = os.path.join(csv_dir, filename) df = pd.read_csv(file_path) weather_dfs[var_name] = df land_surface_dfs = {} for var_name, filename in land_surface_files.items(): file_path = os.path.join(csv_dir, filename) df = pd.read_csv(file_path) land_surface_dfs[var_name] = df soil_dfs = {} for var_name, filename in soil_files.items(): file_path = os.path.join(csv_dir, filename) if os.path.exists(file_path): df = pd.read_csv(file_path) soil_dfs[var_name] = df else: logging.warning(f"Soil file {filename} not found, skipping") # Create location identifier for merging crop_df["loc_ID"] = crop_df["admin_level_1"] + "_" + crop_df["admin_level_2"] # Start with crop data as base merged_df = crop_df.copy() # Add latitude and longitude from weather data (they should be consistent) if "precipitation" in weather_dfs: precip_df = weather_dfs["precipitation"] precip_df["loc_ID"] = ( precip_df["admin_level_1"] + "_" + precip_df["admin_level_2"] ) # Add lat/lng to merged_df lat_lng_df = precip_df[["loc_ID", "year", "latitude", "longitude"]].copy() merged_df = merged_df.merge(lat_lng_df, on=["loc_ID", "year"], how="left") merged_df = merged_df.rename( columns={"latitude": "lat", "longitude": "lng"} ) # Merge weather data for var_name, df in weather_dfs.items(): df["loc_ID"] = df["admin_level_1"] + "_" + df["admin_level_2"] # Get weekly columns for this variable week_cols = [ col for col in df.columns if col.startswith(f"{var_name}_week_") ] # Rename columns to match expected format (W_varindex_weeknum) var_index = list(weather_files.keys()).index(var_name) + 1 # 1-indexed rename_dict = {} for i, col in enumerate(week_cols, 1): rename_dict[col] = f"W_{var_index}_{i}" df_renamed = df[["loc_ID", "year"] + week_cols].rename(columns=rename_dict) merged_df = merged_df.merge(df_renamed, on=["loc_ID", "year"], how="left") # Merge land surface data for var_name, df in land_surface_dfs.items(): df["loc_ID"] = df["admin_level_1"] + "_" + df["admin_level_2"] # Get weekly columns for this variable week_cols = [ col for col in df.columns if col.startswith(f"{var_name}_week_") ] # Continue indexing from where weather variables left off var_index = ( len(weather_files) + list(land_surface_files.keys()).index(var_name) + 1 ) rename_dict = {} for i, col in enumerate(week_cols, 1): rename_dict[col] = f"W_{var_index}_{i}" df_renamed = df[["loc_ID", "year"] + week_cols].rename(columns=rename_dict) merged_df = merged_df.merge(df_renamed, on=["loc_ID", "year"], how="left") # Merge soil data (assuming soil data has depth columns) for var_name, df in soil_dfs.items(): df["loc_ID"] = df["admin_level_1"] + "_" + df["admin_level_2"] # Get depth columns for this variable (assuming format like cec_0_5cm, cec_5_15cm, etc.) depth_cols = [ col for col in df.columns if col.startswith(f"{var_name}_") and "cm" in col ] if depth_cols: # Continue indexing from where land surface variables left off var_index = ( len(weather_files) + len(land_surface_files) + list(soil_files.keys()).index(var_name) + 1 ) rename_dict = {} for i, col in enumerate(depth_cols, 1): rename_dict[col] = f"S_{var_index}_{i}" df_renamed = df[["loc_ID"] + depth_cols].rename(columns=rename_dict) # For soil data, merge only on loc_ID (soil properties don't change by year) merged_df = merged_df.merge(df_renamed, on=["loc_ID"], how="left") # Sort by location and year merged_df = merged_df.sort_values(["loc_ID", "year"]) logging.info(f"Loaded {len(merged_df)} records for {crop} from Argentina data") logging.info( f"Data covers years {merged_df['year'].min()}-{merged_df['year'].max()}" ) logging.info(f"Number of unique locations: {merged_df['loc_ID'].nunique()}") return merged_df def _process_data(self, data, test_year, n_train_years, crop, standardize): start_year = test_year - n_train_years data = data[data["year"] > 1981.0].copy() # Drop rows with missing yield values for the given crop yield_col = f"{crop}_yield" rows_before = len(data) data = data.dropna(subset=[yield_col]) rows_after = len(data) rows_dropped = rows_before - rows_after if rows_dropped > 0: print( f"Dropped {rows_dropped} rows with missing {yield_col} values ({rows_before} -> {rows_after} rows)" ) if standardize: # Standardize data cols_to_standardize = [ col for col in data.columns if col not in [ "loc_ID", "year", "country", "admin_level_1", "admin_level_2", "lat", "lng", yield_col, ] ] data[cols_to_standardize] = ( data[cols_to_standardize] - data[cols_to_standardize].mean() ) / data[cols_to_standardize].std() data[cols_to_standardize] = data[cols_to_standardize].fillna(0) # Standardize yield data train_data = data[(data["year"] >= start_year) & (data["year"] < test_year)] yield_mean, yield_std = ( train_data[yield_col].mean(), train_data[yield_col].std(), ) data[yield_col] = (data[yield_col] - yield_mean) / yield_std print(f"{crop} yield mean = {yield_mean:.3f} and std = {yield_std:.3f}") CROP_YIELD_STATS[crop]["mean"].append(yield_mean) CROP_YIELD_STATS[crop]["std"].append(yield_std) # If not standardizing, still need to handle NaN values data = data.fillna(0) return data def _create_dataset_samples( self, data, test_year, n_train_years, n_past_years, crop, is_test ): start_year = test_year - n_train_years yield_col = f"{crop}_yield" # Define column groups for Argentina data weather_cols = [ f"W_{i}_{j}" for i in range(1, 6) for j in range(1, 53) ] # 5 weather variables, 52 weeks land_surface_cols = [ f"W_{i}_{j}" for i in range(6, 9) for j in range(1, 53) ] # 3 land surface variables, 52 weeks # Check if soil data exists in the dataset soil_cols = [ f"S_{i}_{j}" for i in range(1, 9) for j in range(1, 7) ] # 8 soil variables, 6 depth layers # Filter candidate data if is_test: candidate_data = data[data["year"] == test_year] else: candidate_data = data[ (data["year"] >= start_year) & (data["year"] < test_year) ] # Filter to only include cases where we have complete historical data data_sorted = data.sort_values(["loc_ID", "year"]) def has_sufficient_history(row): year, loc_ID = row["year"], row["loc_ID"] loc_data = data_sorted[data_sorted["loc_ID"] == loc_ID] loc_data_up_to_year = loc_data[loc_data["year"] <= year] return len(loc_data_up_to_year.tail(n_past_years + 1)) == n_past_years + 1 mask = candidate_data.apply(has_sufficient_history, axis=1) valid_candidates = candidate_data[mask] index = valid_candidates[["year", "loc_ID"]].reset_index(drop=True) dataset_name = "train" if not is_test else "test" logging.info( f"Creating {dataset_name} dataset with {len(index)} samples for {'test year ' + str(test_year) if is_test else 'training years ' + str(start_year) + '-' + str(test_year-1)} using {crop} yield." ) samples = [] total_samples = len(index) if total_samples == 0: logging.warning(f"No samples found for {dataset_name} dataset!") return samples for idx in tqdm(range(total_samples)): year = int(index.iloc[idx]["year"]) loc_ID = index.iloc[idx]["loc_ID"] query_data = data[(data["year"] <= year) & (data["loc_ID"] == loc_ID)].tail( n_past_years + 1 ) # Extract weather data weather_data = ( query_data[weather_cols] .values.astype("float32") .reshape((-1, N_WEATHER_VARS, 52)) ) n_years, n_weather_features, seq_len = weather_data.shape total_seq_len = n_years * seq_len if total_seq_len > MAX_CONTEXT_LENGTH: raise ValueError( f"total_seq_len = {total_seq_len} is greater than MAX_CONTEXT_LENGTH = {MAX_CONTEXT_LENGTH}" ) # Reshape weather: (n_years, n_features, 52) -> (total_seq_len, n_features) weather = weather_data.transpose(0, 2, 1).reshape( total_seq_len, n_weather_features ) # Extract land surface data land_surface_data = ( query_data[land_surface_cols] .values.astype("float32") .reshape((-1, N_LAND_SURFACE_VARS, 52)) ) # Reshape land surface: (n_years, n_features, 52) -> (total_seq_len, n_features) land_surface = land_surface_data.transpose(0, 2, 1).reshape( total_seq_len, N_LAND_SURFACE_VARS ) # Create land surface mask (True where NDVI data is missing before 1982) land_surface_mask = np.zeros( (total_seq_len, N_LAND_SURFACE_VARS), dtype=bool ) year_data = query_data["year"].values for i, year_val in enumerate(year_data): start_idx = i * seq_len end_idx = (i + 1) * seq_len if year_val < 1982: # NDVI starts from 1982 land_surface_mask[start_idx:end_idx, 2] = ( True # Mask NDVI (index 2) ) # Extract soil data (static for each location) soil_data = ( query_data[soil_cols] .iloc[0] .values.astype("float32") .reshape((N_SOIL_VARS, SOIL_DEPTHS)) ) soil = soil_data.T # Transpose to get (soil_depths, n_soil_vars) # Extract coordinates (single location) coords = ( query_data[["lat", "lng"]] .iloc[0] .values.astype("float32") .reshape((1, 2)) ) # Extract years (n_past_years + 1, 1) years = query_data["year"].values.astype("float32").reshape((-1, 1)) # Extract yield data y = query_data.iloc[-1:][yield_col].values.astype("float32") y_past = query_data[yield_col].values.astype("float32")[ :-1 ] # Exclude current year if len(y_past) < n_past_years: raise ValueError( f"Insufficient yield history for location {loc_ID} in year {year}. Need {n_past_years} past years but have {len(y_past)}." ) sample = { "weather": weather, "land_surface": land_surface, "land_surface_mask": land_surface_mask, "soil": soil, "coords": coords, "years": years, "y_past": y_past, "y": y, } samples.append(sample) return samples