russian-spam-fork / join_datasets.py
benzlokzik's picture
join datasets and use bool instead other ways
01db8b7
# /// script
# requires-python = ">=3.12"
# dependencies = [
# "pandas",
# "pyarrow",
# ]
# ///
"""
Runs with uv
"""
import pandas as pd
from pathlib import Path
def parse_txt_file(txt_path: str):
texts = []
labels = []
with open(txt_path, "r", encoding="utf-8") as f:
for line in f:
line = line.strip()
if not line:
continue
if line.startswith("__label__ham"):
label = 0
text = line.replace("__label__ham", "", 1).strip()
elif line.startswith("__label__spam"):
label = 1
text = line.replace("__label__spam", "", 1).strip()
else:
continue
if text:
texts.append(text)
labels.append(bool(label))
return pd.DataFrame({"text": texts, "label": labels})
def join_datasets(
txt_files: list[str],
parquet_files: list[str],
output_path: str,
):
dfs = []
for txt_file in txt_files:
if Path(txt_file).exists():
df = parse_txt_file(txt_file)
dfs.append(df)
print(f"Loaded {len(df)} rows from {txt_file}")
for parquet_file in parquet_files:
if Path(parquet_file).exists():
df = pd.read_parquet(parquet_file)
if "message" in df.columns:
df = df.rename(columns={"message": "text"})
if "text" not in df.columns or "label" not in df.columns:
print(f"Skipping {parquet_file}: missing 'text' or 'label' column")
continue
df = df[["text", "label"]]
df["label"] = df["label"].astype("bool")
dfs.append(df)
print(f"Loaded {len(df)} rows from {parquet_file}")
if not dfs:
print("No data to combine")
return
combined_df = pd.concat(dfs, ignore_index=True)
combined_df.to_parquet(output_path, index=False)
print(f"\nCombined dataset saved to {output_path}")
print(f"Total rows: {len(combined_df)}")
print(f"Label column dtype: {combined_df['label'].dtype}")
print(f"Label distribution:\n{combined_df['label'].value_counts().sort_index()}")
if __name__ == "__main__":
txt_files = [
"train_data.txt",
"labeled_dataset_from_hf.txt",
]
parquet_files = [
"processed_combined.parquet",
# hf://datasets/darkQibit/russian-spam-detection/processed_combined.parquet
]
output_path = "processed_combined.parquet"
join_datasets(txt_files, parquet_files, output_path)