import pandas as pd import ast import numpy as np import os from pathlib import Path import argparse # path="/home/rjalota/climabench_data/CARDS2_multisource_multilabel_data.csv" # path = "/home/rjalota/climabench_data/data/training" claim_mapping = { "0": "No claim", "1": "Global warming is not happening", "2": "Human greenhouse gases are not causing climate change", "3": "Climate impacts/global warming is beneficial/not bad", "4": "Climate solutions won’t work", "5": "Climate movement/science is unreliable" } subclaim_mapping = { "0_0": "No claim", "1_1": "Ice/permafrost/snow cover isn’t melting", "1_2": "We’re heading into an ice age/global cooling", "1_3": "Weather is cold/snowing", "1_4": "Climate hasn’t warmed/changed over the last (few) decade(s)", "1_6": "Sea level rise is exaggerated/not accelerating", "1_7": "Extreme weather isn’t increasing/has happened before/isn’t linked to climate change", "2_1": "It’s natural cycles/variation", "2_3": "There’s no evidence for greenhouse effect/carbon dioxide driving climate change", "3_1": "Climate sensitivity is low/negative feedbacks reduce warming", "3_2": "Species/plants/reefs aren’t showing climate impacts/are benefiting from climate change", "3_3": "CO2 is beneficial/not a pollutant", "4_1": "Climate policies (mitigation or adaptation) are harmful", "4_2": "Climate policies are ineffective/flawed", "4_4": "Clean energy technology/biofuels won’t work", "4_5": "People need energy (e.g. from fossil fuels/nuclear)", "5_1": "Climate-related science is unreliable/uncertain/unsound (data, methods & models)", "5_2": "Climate movement is unreliable/alarmist/corrupt" } def parse_args(): parser = argparse.ArgumentParser(description='run binary classifer') parser.add_argument("--path", default="data/training", help="path to exeter training dir. containing train, test, validation splits") parser.add_argument("--out", default="climaEval/exeter/", help="output directory path") return parser.parse_args() if __name__ == '__main__': args = parse_args() for filename in os.listdir(args.path): print(filename) f = os.path.join(args.path, filename) df = pd.read_csv(f, header=0) # print(df.head()) df.rename(columns={"claim": "sub_claim_code"}, inplace=True) df["claim_code"] = df["sub_claim_code"].str.split("_").str[0] df["claim"] = df["claim_code"].map(claim_mapping) df["sub_claim"] = df["sub_claim_code"].map(subclaim_mapping) claim_df = df[['text', 'claim_code', 'claim']] subclaim_df = df[['text', 'sub_claim_code', 'sub_claim']] claim_df = claim_df.dropna() Path(f"{args.out}/claim/").mkdir(parents=True, exist_ok=True) Path(f"{args.out}/sub_claim/").mkdir(parents=True, exist_ok=True) claim_df.to_csv(f"{args.out}/claim/{filename}", index=False) subclaim_df.to_csv(f"{args.out}/sub_claim/{filename}", index=False) #print(claim_df.claim.value_counts()) print("---") print(claim_df.claim_code.value_counts()) print(sorted(list(subclaim_df["sub_claim_code"].unique())))