File size: 1,994 Bytes
43a1d8a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
import os
import pandas as pd
import datasets

# Define labels and columns to drop
labels = ["Adaptation", "Buildings", "Climate Hazards", "Emissions", "Energy", "Food",
          "Governance and Data Management", "Opportunities", "Strategy", "Transport",
          "Waste", "Water"]

columns_to_drop = ["id", "Year Reported to CDP", "Organization", "Parent Section",
                   "Section", "Question Name", "Row Name", "Comments", "Response Answer"]

# Define dataset split paths
split_paths = {
    "train": "https://huggingface.co/datasets/iceberg-nlp/climabench/resolve/main/all_data/CDP/Cities/Cities%20Responses/train.csv",
    "val": "https://huggingface.co/datasets/iceberg-nlp/climabench/resolve/main/all_data/CDP/Cities/Cities%20Responses/val.csv",
    "test": "https://huggingface.co/datasets/iceberg-nlp/climabench/resolve/main/all_data/CDP/Cities/Cities%20Responses/test.csv"
}

# Define output directory
output_dir = "cdp_topic/cities"
os.makedirs(output_dir, exist_ok=True)  # Create directory if it doesn't exist


def label_to_id(example):
    """Maps the Category/Label to an index based on the labels list."""
    example["label_index"] = labels.index(example["Label"])
    return example


def prepare_topic_dataset(df: pd.DataFrame) -> pd.DataFrame:
    """Prepares the dataset by removing unwanted columns and adding label indices."""
    df = df.drop(columns=columns_to_drop, errors="ignore")  # Remove unnecessary columns
    df["label_index"] = df["Label"].apply(lambda x: labels.index(x))
    return df


# Process and save each split
for split, url in split_paths.items():
    print(f"Processing {split} split...")

    # Load dataset
    df = pd.read_csv(url)  # Read CSV directly from URL

    # Prepare dataset
    df = prepare_topic_dataset(df)

    # Save to local CSV
    save_path = os.path.join(output_dir, f"{split}.csv")
    df.to_csv(save_path, index=False)

    print(f"Saved: {save_path}")

print("All datasets processed and saved successfully!")