import os import pandas as pd import datasets # Define labels and columns to drop labels = ["Adaptation", "Buildings", "Climate Hazards", "Emissions", "Energy", "Food", "Governance and Data Management", "Opportunities", "Strategy", "Transport", "Waste", "Water"] columns_to_drop = ["id", "Year Reported to CDP", "Organization", "Parent Section", "Section", "Question Name", "Row Name", "Comments", "Response Answer"] # Define dataset split paths split_paths = { "train": "https://huggingface.co/datasets/iceberg-nlp/climabench/resolve/main/all_data/CDP/Cities/Cities%20Responses/train.csv", "val": "https://huggingface.co/datasets/iceberg-nlp/climabench/resolve/main/all_data/CDP/Cities/Cities%20Responses/val.csv", "test": "https://huggingface.co/datasets/iceberg-nlp/climabench/resolve/main/all_data/CDP/Cities/Cities%20Responses/test.csv" } # Define output directory output_dir = "cdp_topic/cities" os.makedirs(output_dir, exist_ok=True) # Create directory if it doesn't exist def label_to_id(example): """Maps the Category/Label to an index based on the labels list.""" example["label_index"] = labels.index(example["Label"]) return example def prepare_topic_dataset(df: pd.DataFrame) -> pd.DataFrame: """Prepares the dataset by removing unwanted columns and adding label indices.""" df = df.drop(columns=columns_to_drop, errors="ignore") # Remove unnecessary columns df["label_index"] = df["Label"].apply(lambda x: labels.index(x)) return df # Process and save each split for split, url in split_paths.items(): print(f"Processing {split} split...") # Load dataset df = pd.read_csv(url) # Read CSV directly from URL # Prepare dataset df = prepare_topic_dataset(df) # Save to local CSV save_path = os.path.join(output_dir, f"{split}.csv") df.to_csv(save_path, index=False) print(f"Saved: {save_path}") print("All datasets processed and saved successfully!")