File size: 4,750 Bytes
86b932c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
"""
text_utils.py β€” Shared text cleaning helpers for the Fake News Detection pipeline.

Provides functions for normalizing, cleaning, and featurizing raw text
before it enters any model stage.
"""

import re
import logging
from typing import Optional

import pandas as pd

logger = logging.getLogger(__name__)


def clean_text(text: str) -> str:
    """Clean a single text string for downstream processing.

    Steps applied (in order):
        1. Lowercase
        2. Remove HTML tags
        3. Remove URLs
        4. Remove special characters (keep alphanumeric + basic punctuation)
        5. Normalize whitespace

    Note: Stopwords are **not** removed because LSTM / BERT models need them.

    Args:
        text: Raw input text.

    Returns:
        Cleaned text string.
    """
    if not isinstance(text, str) or len(text.strip()) == 0:
        return ""

    # Lowercase
    text = text.lower()

    # Remove HTML tags
    text = re.sub(r"<[^>]+>", " ", text)

    # Remove URLs
    text = re.sub(r"https?://\S+|www\.\S+", " ", text)

    # Remove special characters (keep letters, digits, spaces, basic punctuation)
    text = re.sub(r"[^a-z0-9\s.,!?;:'\"-]", " ", text)

    # Collapse multiple whitespace into one
    text = re.sub(r"\s+", " ", text).strip()

    return text


def build_full_text(title: Optional[str], text: Optional[str]) -> str:
    """Concatenate title and body text with a period separator.

    Args:
        title: Article title (may be None or empty).
        text:  Article body  (may be None or empty).

    Returns:
        Combined string in the form ``"title. text"`` with graceful handling
        of missing parts.
    """
    if pd.isna(title): title = ""
    if pd.isna(text): text = ""
    
    title = str(title).strip()
    text = str(text).strip()

    if title and text:
        return f"{title}. {text}"
    elif title:
        return title
    elif text:
        return text
    return ""


def word_count(text: str) -> int:
    """Return the number of whitespace-delimited tokens in *text*.

    Args:
        text: Input string (cleaned or raw).

    Returns:
        Integer word count.
    """
    if not text:
        return 0
    return len(text.split())


def text_length_bucket(wc: int) -> str:
    """Classify a word count into a length bucket.

    Args:
        wc: Word count (non-negative integer).

    Returns:
        One of ``"short"`` (< 50), ``"medium"`` (50–300), ``"long"`` (> 300).
    """
    if wc < 50:
        return "short"
    elif wc <= 300:
        return "medium"
    else:
        return "long"


def clean_empty_texts(
    df: pd.DataFrame,
    min_word_count: int = 3,
) -> pd.DataFrame:
    """Remove rows with missing or near-empty text content.

    Rules:
        - Fill NaN in ``title`` and ``text`` columns with empty string.
        - Create ``full_text`` = title.strip() + ". " + text.strip().
        - Drop rows where full_text word count < *min_word_count*.
        - Reset index after dropping.

    Args:
        df:             Input DataFrame (must contain ``title`` and ``text``).
        min_word_count: Minimum number of words required to keep a row.

    Returns:
        Cleaned DataFrame with empty/near-empty rows removed.
        Logs how many rows were dropped.
    """
    before = len(df)

    df = df.copy()
    df["title"] = df["title"].fillna("").astype(str)
    df["text"] = df["text"].fillna("").astype(str)

    # Build combined text for word-count check
    full = df.apply(
        lambda r: build_full_text(r["title"], r["text"]), axis=1
    )
    wc = full.apply(word_count)

    keep_mask = wc >= min_word_count
    dropped = (~keep_mask).sum()

    df_out = df.loc[keep_mask].reset_index(drop=True)
    
    # Overwrite 'text' with the combined 'full_text' so model tokenization 
    # and the validation script do not encounter empty strings in 'text'
    df_out["text"] = full.loc[keep_mask].reset_index(drop=True)

    logger.info(
        "clean_empty_texts: dropped %d / %d rows with word_count < %d",
        dropped, before, min_word_count,
    )
    return df_out


# ─── standalone test ────────────────────────────────────────
if __name__ == "__main__":
    sample = (
        '<p>WASHINGTON (Reuters) – The U.S. military said on Friday '
        'https://example.com/article that it would begin accepting '
        'transgender recruits &amp; more…</p>'
    )
    cleaned = clean_text(sample)
    print(f"Cleaned : {cleaned}")
    full = build_full_text("Breaking News", cleaned)
    print(f"Full    : {full}")
    wc = word_count(full)
    print(f"Words   : {wc}")
    print(f"Bucket  : {text_length_bucket(wc)}")