Upload folder using huggingface_hub
Browse files- .gitattributes +3 -0
- Fake.csv +3 -0
- README +41 -0
- True.csv +3 -0
- combined_preprocessed_news.csv +3 -0
- preprocess.py +297 -0
- test.tsv +0 -0
- train.tsv +0 -0
- valid.tsv +0 -0
.gitattributes
CHANGED
|
@@ -57,3 +57,6 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
| 57 |
# Video files - compressed
|
| 58 |
*.mp4 filter=lfs diff=lfs merge=lfs -text
|
| 59 |
*.webm filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
|
|
|
| 57 |
# Video files - compressed
|
| 58 |
*.mp4 filter=lfs diff=lfs merge=lfs -text
|
| 59 |
*.webm filter=lfs diff=lfs merge=lfs -text
|
| 60 |
+
combined_preprocessed_news.csv filter=lfs diff=lfs merge=lfs -text
|
| 61 |
+
Fake.csv filter=lfs diff=lfs merge=lfs -text
|
| 62 |
+
True.csv filter=lfs diff=lfs merge=lfs -text
|
Fake.csv
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:bebf8bcfe95678bf2c732bf413a2ce5f621af0102c82bf08083b2e5d3c693d0c
|
| 3 |
+
size 62789876
|
README
ADDED
|
@@ -0,0 +1,41 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
LIAR: A BENCHMARK DATASET FOR FAKE NEWS DETECTION
|
| 2 |
+
|
| 3 |
+
William Yang Wang, "Liar, Liar Pants on Fire": A New Benchmark Dataset for Fake News Detection, to appear in Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics (ACL 2017), short paper, Vancouver, BC, Canada, July 30-August 4, ACL.
|
| 4 |
+
=====================================================================
|
| 5 |
+
Description of the TSV format:
|
| 6 |
+
|
| 7 |
+
Column 1: the ID of the statement ([ID].json).
|
| 8 |
+
Column 2: the label.
|
| 9 |
+
Column 3: the statement.
|
| 10 |
+
Column 4: the subject(s).
|
| 11 |
+
Column 5: the speaker.
|
| 12 |
+
Column 6: the speaker's job title.
|
| 13 |
+
Column 7: the state info.
|
| 14 |
+
Column 8: the party affiliation.
|
| 15 |
+
Column 9-13: the total credit history count, including the current statement.
|
| 16 |
+
9: barely true counts.
|
| 17 |
+
10: false counts.
|
| 18 |
+
11: half true counts.
|
| 19 |
+
12: mostly true counts.
|
| 20 |
+
13: pants on fire counts.
|
| 21 |
+
Column 14: the context (venue / location of the speech or statement).
|
| 22 |
+
|
| 23 |
+
Note that we do not provide the full-text verdict report in this current version of the dataset,
|
| 24 |
+
but you can use the following command to access the full verdict report and links to the source documents:
|
| 25 |
+
wget http://www.politifact.com//api/v/2/statement/[ID]/?format=json
|
| 26 |
+
|
| 27 |
+
======================================================================
|
| 28 |
+
The original sources retain the copyright of the data.
|
| 29 |
+
|
| 30 |
+
Note that there are absolutely no guarantees with this data,
|
| 31 |
+
and we provide this dataset "as is",
|
| 32 |
+
but you are welcome to report the issues of the preliminary version
|
| 33 |
+
of this data.
|
| 34 |
+
|
| 35 |
+
You are allowed to use this dataset for research purposes only.
|
| 36 |
+
|
| 37 |
+
For more question about the dataset, please contact:
|
| 38 |
+
William Wang, william@cs.ucsb.edu
|
| 39 |
+
|
| 40 |
+
v1.0 04/23/2017
|
| 41 |
+
|
True.csv
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:ba0844414a65dc6ae7402b8eee5306da24b6b56488d6767135af466c7dcb2775
|
| 3 |
+
size 53582940
|
combined_preprocessed_news.csv
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:1ca3c8fc7ff82cdcaa41fe00b7f71d02758537fe67ccbd771b1a467895498e7b
|
| 3 |
+
size 201324100
|
preprocess.py
ADDED
|
@@ -0,0 +1,297 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# preprocess_data.py
|
| 2 |
+
import pandas as pd
|
| 3 |
+
import numpy as np
|
| 4 |
+
import re
|
| 5 |
+
import nltk
|
| 6 |
+
from nltk.corpus import stopwords
|
| 7 |
+
import string # To remove punctuation more easily
|
| 8 |
+
import os # To check for file existence
|
| 9 |
+
|
| 10 |
+
# --- Configuration ---
|
| 11 |
+
# Adjust these paths if your files are located elsewhere
|
| 12 |
+
TRAIN_TSV_PATH = 'train.tsv'
|
| 13 |
+
TEST_TSV_PATH = 'test.tsv'
|
| 14 |
+
FAKE_CSV_PATH = 'Fake.csv' # Adjust filename if needed (e.g., fake.csv)
|
| 15 |
+
TRUE_CSV_PATH = 'True.csv' # Adjust filename if needed (e.g., true.csv)
|
| 16 |
+
OUTPUT_CSV_PATH = 'combined_preprocessed_news.csv'
|
| 17 |
+
|
| 18 |
+
print("--- Data Preprocessing Script Started ---")
|
| 19 |
+
|
| 20 |
+
# --- Check if input files exist ---
|
| 21 |
+
required_files = [TRAIN_TSV_PATH, TEST_TSV_PATH, FAKE_CSV_PATH, TRUE_CSV_PATH]
|
| 22 |
+
files_exist = True
|
| 23 |
+
for f_path in required_files:
|
| 24 |
+
if not os.path.exists(f_path):
|
| 25 |
+
print(f"Error: Required input file not found: {f_path}")
|
| 26 |
+
files_exist = False
|
| 27 |
+
|
| 28 |
+
if not files_exist:
|
| 29 |
+
print("\nPlease ensure all required input files are in the same directory as the script.")
|
| 30 |
+
print("Script aborted.")
|
| 31 |
+
exit() # Stop the script if files are missing
|
| 32 |
+
else:
|
| 33 |
+
print("All required input files found.")
|
| 34 |
+
|
| 35 |
+
# Define column names for the TSV files based on the description
|
| 36 |
+
tsv_column_names = [
|
| 37 |
+
'id', 'label', 'statement', 'subject', 'speaker', 'speaker_job_title',
|
| 38 |
+
'state_info', 'party_affiliation', 'barely_true_counts', 'false_counts',
|
| 39 |
+
'half_true_counts', 'mostly_true_counts', 'pants_on_fire_counts', 'context'
|
| 40 |
+
]
|
| 41 |
+
|
| 42 |
+
# Define the target columns for the final unified dataset
|
| 43 |
+
final_columns = [
|
| 44 |
+
'text', 'label', 'subject', 'speaker', 'speaker_job_title',
|
| 45 |
+
'state_info', 'party_affiliation', 'context', 'date', 'source',
|
| 46 |
+
'processed_text', 'encoded_label'
|
| 47 |
+
# Add credit history counts if desired
|
| 48 |
+
# 'barely_true_counts', 'false_counts', 'half_true_counts',
|
| 49 |
+
# 'mostly_true_counts', 'pants_on_fire_counts'
|
| 50 |
+
]
|
| 51 |
+
|
| 52 |
+
|
| 53 |
+
# --- Download NLTK data (if not already present) ---
|
| 54 |
+
try:
|
| 55 |
+
nltk.data.find('corpora/stopwords')
|
| 56 |
+
print("\nNLTK stopwords found.")
|
| 57 |
+
except nltk.downloader.DownloadError:
|
| 58 |
+
print("\nNLTK stopwords not found. Downloading...")
|
| 59 |
+
try:
|
| 60 |
+
nltk.download('stopwords')
|
| 61 |
+
print("NLTK stopwords downloaded.")
|
| 62 |
+
except Exception as e:
|
| 63 |
+
print(f"Error downloading NLTK stopwords: {e}")
|
| 64 |
+
print("Please check your internet connection or firewall settings.")
|
| 65 |
+
print("Script aborted.")
|
| 66 |
+
exit() # Stop if download fails
|
| 67 |
+
|
| 68 |
+
stop_words = set(stopwords.words('english'))
|
| 69 |
+
|
| 70 |
+
# --- Text Preprocessing Function ---
|
| 71 |
+
def preprocess_text(text):
|
| 72 |
+
"""Cleans and preprocesses text data."""
|
| 73 |
+
if not isinstance(text, str):
|
| 74 |
+
return "" # Return empty string for non-string inputs (like NaN)
|
| 75 |
+
|
| 76 |
+
# 1. Convert to lowercase
|
| 77 |
+
text = text.lower()
|
| 78 |
+
|
| 79 |
+
# 2. Remove URLs
|
| 80 |
+
text = re.sub(r'http\S+|www\S+|https\S+', '', text, flags=re.MULTILINE)
|
| 81 |
+
|
| 82 |
+
# 3. Remove HTML tags (basic)
|
| 83 |
+
text = re.sub(r'<.*?>', '', text)
|
| 84 |
+
|
| 85 |
+
# 4. Remove text in square brackets (often metadata like [Reuters])
|
| 86 |
+
text = re.sub(r'\[.*?\]', '', text)
|
| 87 |
+
|
| 88 |
+
# 5. Remove punctuation more comprehensively
|
| 89 |
+
text = text.translate(str.maketrans('', '', string.punctuation))
|
| 90 |
+
|
| 91 |
+
# 6. Remove numbers (optional - consider if numbers are important)
|
| 92 |
+
# text = re.sub(r'\d+', '', text)
|
| 93 |
+
|
| 94 |
+
# 7. Remove extra whitespace
|
| 95 |
+
text = re.sub(r'\s+', ' ', text).strip()
|
| 96 |
+
|
| 97 |
+
# 8. Remove stopwords (optional - uncomment if desired)
|
| 98 |
+
# tokens = text.split()
|
| 99 |
+
# tokens = [word for word in tokens if word not in stop_words]
|
| 100 |
+
# text = ' '.join(tokens)
|
| 101 |
+
|
| 102 |
+
# 9. Lemmatization/Stemming (Optional - would require more NLTK setup)
|
| 103 |
+
# ...
|
| 104 |
+
|
| 105 |
+
return text
|
| 106 |
+
|
| 107 |
+
# --- Load and Process LIAR Dataset (TSV files) ---
|
| 108 |
+
print("\nProcessing LIAR dataset (train.tsv, test.tsv)...")
|
| 109 |
+
try:
|
| 110 |
+
df_train_tsv = pd.read_csv(TRAIN_TSV_PATH, sep='\t', header=None, names=tsv_column_names)
|
| 111 |
+
df_test_tsv = pd.read_csv(TEST_TSV_PATH, sep='\t', header=None, names=tsv_column_names)
|
| 112 |
+
df_liar = pd.concat([df_train_tsv, df_test_tsv], ignore_index=True)
|
| 113 |
+
print(f"LIAR dataset loaded. Shape: {df_liar.shape}")
|
| 114 |
+
|
| 115 |
+
# Map LIAR labels to binary 'REAL'/'FAKE'
|
| 116 |
+
# Categories considered REAL: true, mostly-true, half-true
|
| 117 |
+
# Categories considered FAKE: false, barely-true, pants-fire
|
| 118 |
+
label_mapping_liar = {
|
| 119 |
+
'true': 'REAL',
|
| 120 |
+
'mostly-true': 'REAL',
|
| 121 |
+
'half-true': 'REAL',
|
| 122 |
+
'false': 'FAKE',
|
| 123 |
+
'barely-true': 'FAKE',
|
| 124 |
+
'pants-fire': 'FAKE'
|
| 125 |
+
}
|
| 126 |
+
df_liar['label'] = df_liar['label'].map(label_mapping_liar)
|
| 127 |
+
|
| 128 |
+
# Rename 'statement' to 'text' for consistency
|
| 129 |
+
df_liar.rename(columns={'statement': 'text'}, inplace=True)
|
| 130 |
+
|
| 131 |
+
# Add source column
|
| 132 |
+
df_liar['source'] = 'LIAR'
|
| 133 |
+
|
| 134 |
+
# Select relevant columns (add credit counts here if needed)
|
| 135 |
+
liar_cols_to_keep = [
|
| 136 |
+
'text', 'label', 'subject', 'speaker', 'speaker_job_title',
|
| 137 |
+
'state_info', 'party_affiliation', 'context', 'source'
|
| 138 |
+
# Add credit counts here if needed
|
| 139 |
+
# 'barely_true_counts', 'false_counts', 'half_true_counts',
|
| 140 |
+
# 'mostly_true_counts', 'pants_on_fire_counts'
|
| 141 |
+
]
|
| 142 |
+
# Ensure only existing columns are selected
|
| 143 |
+
liar_cols_to_keep = [col for col in liar_cols_to_keep if col in df_liar.columns]
|
| 144 |
+
df_liar = df_liar[liar_cols_to_keep]
|
| 145 |
+
print(f"LIAR dataset processed. Shape: {df_liar.shape}")
|
| 146 |
+
|
| 147 |
+
except FileNotFoundError as e:
|
| 148 |
+
print(f"Error loading TSV file: {e}. Please check file paths.")
|
| 149 |
+
df_liar = pd.DataFrame() # Create empty df to avoid error later
|
| 150 |
+
except Exception as e:
|
| 151 |
+
print(f"An error occurred processing TSV files: {e}")
|
| 152 |
+
df_liar = pd.DataFrame()
|
| 153 |
+
|
| 154 |
+
# --- Load and Process Kaggle Fake/True News Dataset (CSV files) ---
|
| 155 |
+
print("\nProcessing Kaggle dataset (Fake.csv, True.csv)...")
|
| 156 |
+
dfs_kaggle = []
|
| 157 |
+
try:
|
| 158 |
+
# Process Fake News
|
| 159 |
+
df_fake_csv = pd.read_csv(FAKE_CSV_PATH)
|
| 160 |
+
df_fake_csv['label'] = 'FAKE'
|
| 161 |
+
df_fake_csv['source'] = 'KaggleFake'
|
| 162 |
+
# Combine title and text, handling potential NaNs
|
| 163 |
+
df_fake_csv['text'] = df_fake_csv['title'].fillna('') + " " + df_fake_csv['text'].fillna('')
|
| 164 |
+
dfs_kaggle.append(df_fake_csv[['text', 'label', 'subject', 'date', 'source']])
|
| 165 |
+
print(f"Loaded Fake.csv. Shape: {df_fake_csv.shape}")
|
| 166 |
+
|
| 167 |
+
# Process True News
|
| 168 |
+
df_true_csv = pd.read_csv(TRUE_CSV_PATH)
|
| 169 |
+
df_true_csv['label'] = 'REAL'
|
| 170 |
+
df_true_csv['source'] = 'KaggleTrue'
|
| 171 |
+
# Combine title and text, handling potential NaNs
|
| 172 |
+
df_true_csv['text'] = df_true_csv['title'].fillna('') + " " + df_true_csv['text'].fillna('')
|
| 173 |
+
dfs_kaggle.append(df_true_csv[['text', 'label', 'subject', 'date', 'source']])
|
| 174 |
+
print(f"Loaded True.csv. Shape: {df_true_csv.shape}")
|
| 175 |
+
|
| 176 |
+
df_kaggle = pd.concat(dfs_kaggle, ignore_index=True)
|
| 177 |
+
print(f"Kaggle dataset processed. Shape: {df_kaggle.shape}")
|
| 178 |
+
|
| 179 |
+
except FileNotFoundError as e:
|
| 180 |
+
print(f"Error loading CSV file: {e}. Please check file paths.")
|
| 181 |
+
df_kaggle = pd.DataFrame() # Create empty df
|
| 182 |
+
except Exception as e:
|
| 183 |
+
print(f"An error occurred processing CSV files: {e}")
|
| 184 |
+
df_kaggle = pd.DataFrame()
|
| 185 |
+
|
| 186 |
+
# --- Combine Datasets ---
|
| 187 |
+
print("\nCombining LIAR and Kaggle datasets...")
|
| 188 |
+
if not df_liar.empty or not df_kaggle.empty:
|
| 189 |
+
df_combined = pd.concat([df_liar, df_kaggle], ignore_index=True, sort=False)
|
| 190 |
+
print(f"Initial combined shape: {df_combined.shape}")
|
| 191 |
+
|
| 192 |
+
# Handle potential duplicate columns if credit counts were kept (unlikely here)
|
| 193 |
+
# df_combined = df_combined.loc[:,~df_combined.columns.duplicated()]
|
| 194 |
+
|
| 195 |
+
# Fill missing values created by combining different structures
|
| 196 |
+
# Fill text NAs first (critical)
|
| 197 |
+
initial_text_na = df_combined['text'].isna().sum()
|
| 198 |
+
if initial_text_na > 0:
|
| 199 |
+
print(f"Warning: Found {initial_text_na} missing values in 'text' column. Dropping these rows.")
|
| 200 |
+
df_combined.dropna(subset=['text'], inplace=True)
|
| 201 |
+
|
| 202 |
+
# Fill missing labels (less likely after processing, but check)
|
| 203 |
+
initial_label_na = df_combined['label'].isna().sum()
|
| 204 |
+
if initial_label_na > 0:
|
| 205 |
+
print(f"Warning: Found {initial_label_na} missing values in 'label' column. Dropping these rows.")
|
| 206 |
+
df_combined.dropna(subset=['label'], inplace=True)
|
| 207 |
+
|
| 208 |
+
|
| 209 |
+
# Fill other missing metadata with 'Unknown' or appropriate placeholders
|
| 210 |
+
fill_values = {
|
| 211 |
+
'subject': 'Unknown',
|
| 212 |
+
'speaker': 'Unknown',
|
| 213 |
+
'speaker_job_title': 'Unknown',
|
| 214 |
+
'state_info': 'Unknown',
|
| 215 |
+
'party_affiliation': 'Unknown',
|
| 216 |
+
'context': 'Unknown',
|
| 217 |
+
'date': 'Unknown' # Or pd.NaT if you want a proper date null
|
| 218 |
+
# Add fill values for credit counts if they were kept
|
| 219 |
+
}
|
| 220 |
+
# Apply fillna only for columns that actually exist in the combined df
|
| 221 |
+
for col, value in fill_values.items():
|
| 222 |
+
if col in df_combined.columns:
|
| 223 |
+
df_combined[col].fillna(value, inplace=True)
|
| 224 |
+
|
| 225 |
+
|
| 226 |
+
print(f"Shape after initial combining & handling critical NAs: {df_combined.shape}")
|
| 227 |
+
# print(f"Combined dataset columns: {df_combined.columns.tolist()}") # Uncomment to debug columns if needed
|
| 228 |
+
# print(f"\nMissing values per column after fill:\n{df_combined.isnull().sum()}") # Uncomment to debug NAs
|
| 229 |
+
|
| 230 |
+
# --- Apply Text Preprocessing ---
|
| 231 |
+
print("\nApplying text preprocessing to 'text' column...")
|
| 232 |
+
# Ensure the text column is string type before applying
|
| 233 |
+
df_combined['text'] = df_combined['text'].astype(str)
|
| 234 |
+
df_combined['processed_text'] = df_combined['text'].apply(preprocess_text)
|
| 235 |
+
print("Text preprocessing complete.")
|
| 236 |
+
|
| 237 |
+
# --- Encode Labels ---
|
| 238 |
+
print("Encoding labels ('FAKE': 0, 'REAL': 1)...") # IMPORTANT MAPPING
|
| 239 |
+
label_encoding_map = {'FAKE': 0, 'REAL': 1}
|
| 240 |
+
df_combined['encoded_label'] = df_combined['label'].map(label_encoding_map)
|
| 241 |
+
|
| 242 |
+
# Verify encoding worked
|
| 243 |
+
label_encoding_na = df_combined['encoded_label'].isna().sum()
|
| 244 |
+
invalid_labels = df_combined[df_combined['encoded_label'].isna()]['label'].unique()
|
| 245 |
+
if label_encoding_na > 0:
|
| 246 |
+
print(f"Warning: {label_encoding_na} rows could not be label encoded.")
|
| 247 |
+
print(f"Invalid or unexpected labels found: {invalid_labels}. Dropping these rows.")
|
| 248 |
+
# Optionally drop these rows or investigate further
|
| 249 |
+
df_combined.dropna(subset=['encoded_label'], inplace=True)
|
| 250 |
+
df_combined['encoded_label'] = df_combined['encoded_label'].astype(int) # Ensure int type after dropna
|
| 251 |
+
else:
|
| 252 |
+
df_combined['encoded_label'] = df_combined['encoded_label'].astype(int)
|
| 253 |
+
|
| 254 |
+
|
| 255 |
+
print("Label encoding complete.")
|
| 256 |
+
print(f"\nValue counts for 'label':\n{df_combined['label'].value_counts()}")
|
| 257 |
+
print(f"\nValue counts for 'encoded_label':\n{df_combined['encoded_label'].value_counts()}")
|
| 258 |
+
|
| 259 |
+
|
| 260 |
+
# --- Final Touches ---
|
| 261 |
+
# Drop rows where processed_text is empty after cleaning
|
| 262 |
+
empty_processed = (df_combined['processed_text'] == '').sum()
|
| 263 |
+
if empty_processed > 0:
|
| 264 |
+
print(f"Dropping {empty_processed} rows where processed_text became empty after cleaning.")
|
| 265 |
+
df_combined = df_combined[df_combined['processed_text'] != '']
|
| 266 |
+
|
| 267 |
+
# Select and reorder columns for the final output
|
| 268 |
+
# Ensure all desired final columns exist before selecting
|
| 269 |
+
final_columns_present = [col for col in final_columns if col in df_combined.columns]
|
| 270 |
+
df_final = df_combined[final_columns_present].copy()
|
| 271 |
+
|
| 272 |
+
# Drop duplicates based on the processed text to avoid identical articles
|
| 273 |
+
initial_rows = len(df_final)
|
| 274 |
+
df_final.drop_duplicates(subset=['processed_text'], keep='first', inplace=True)
|
| 275 |
+
rows_dropped = initial_rows - len(df_final)
|
| 276 |
+
if rows_dropped > 0:
|
| 277 |
+
print(f"Dropped {rows_dropped} duplicate rows based on 'processed_text'.")
|
| 278 |
+
|
| 279 |
+
|
| 280 |
+
print(f"\nFinal dataset shape: {df_final.shape}")
|
| 281 |
+
print(f"Final dataset columns: {df_final.columns.tolist()}")
|
| 282 |
+
# print("\nFinal dataset info:") # Uncomment for more detail
|
| 283 |
+
# df_final.info()
|
| 284 |
+
# print("\nFinal dataset head:") # Uncomment for more detail
|
| 285 |
+
# print(df_final.head())
|
| 286 |
+
|
| 287 |
+
# --- Save the Preprocessed Data ---
|
| 288 |
+
try:
|
| 289 |
+
df_final.to_csv(OUTPUT_CSV_PATH, index=False)
|
| 290 |
+
print(f"\nSuccessfully saved combined and preprocessed data to '{OUTPUT_CSV_PATH}'")
|
| 291 |
+
except Exception as e:
|
| 292 |
+
print(f"\nError saving the final CSV file: {e}")
|
| 293 |
+
|
| 294 |
+
else:
|
| 295 |
+
print("\nCould not process input files. Combined dataset is empty. Nothing to save.")
|
| 296 |
+
|
| 297 |
+
print("\n--- Data Preprocessing Script Finished ---")
|
test.tsv
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
train.tsv
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
valid.tsv
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|