|
|
import os |
|
|
import pandas as pd |
|
|
import datasets |
|
|
|
|
|
|
|
|
labels = ["Adaptation", "Buildings", "Climate Hazards", "Emissions", "Energy", "Food", |
|
|
"Governance and Data Management", "Opportunities", "Strategy", "Transport", |
|
|
"Waste", "Water"] |
|
|
|
|
|
columns_to_drop = ["id", "Year Reported to CDP", "Organization", "Parent Section", |
|
|
"Section", "Question Name", "Row Name", "Comments", "Response Answer"] |
|
|
|
|
|
|
|
|
split_paths = { |
|
|
"train": "https://huggingface.co/datasets/iceberg-nlp/climabench/resolve/main/all_data/CDP/Cities/Cities%20Responses/train.csv", |
|
|
"val": "https://huggingface.co/datasets/iceberg-nlp/climabench/resolve/main/all_data/CDP/Cities/Cities%20Responses/val.csv", |
|
|
"test": "https://huggingface.co/datasets/iceberg-nlp/climabench/resolve/main/all_data/CDP/Cities/Cities%20Responses/test.csv" |
|
|
} |
|
|
|
|
|
|
|
|
output_dir = "cdp_topic/cities" |
|
|
os.makedirs(output_dir, exist_ok=True) |
|
|
|
|
|
|
|
|
def label_to_id(example): |
|
|
"""Maps the Category/Label to an index based on the labels list.""" |
|
|
example["label_index"] = labels.index(example["Label"]) |
|
|
return example |
|
|
|
|
|
|
|
|
def prepare_topic_dataset(df: pd.DataFrame) -> pd.DataFrame: |
|
|
"""Prepares the dataset by removing unwanted columns and adding label indices.""" |
|
|
df = df.drop(columns=columns_to_drop, errors="ignore") |
|
|
df["label_index"] = df["Label"].apply(lambda x: labels.index(x)) |
|
|
return df |
|
|
|
|
|
|
|
|
|
|
|
for split, url in split_paths.items(): |
|
|
print(f"Processing {split} split...") |
|
|
|
|
|
|
|
|
df = pd.read_csv(url) |
|
|
|
|
|
|
|
|
df = prepare_topic_dataset(df) |
|
|
|
|
|
|
|
|
save_path = os.path.join(output_dir, f"{split}.csv") |
|
|
df.to_csv(save_path, index=False) |
|
|
|
|
|
print(f"Saved: {save_path}") |
|
|
|
|
|
print("All datasets processed and saved successfully!") |
|
|
|