Spaces:
Sleeping
Sleeping
| import re | |
| import string | |
| class TextProcessor: | |
| """Handles normalization, cleaning, and phrase detection.""" | |
| STOPWORDS = { | |
| 'i', 'me', 'my', 'myself', 'we', 'our', 'ours', 'ourselves', 'you', "you're", "you've", "you'll", "you'd", | |
| 'your', 'yours', 'yourself', 'yourselves', 'he', 'him', 'his', 'himself', 'she', "she's", 'her', 'hers', | |
| 'herself', 'it', "it's", 'its', 'itself', 'they', 'them', 'their', 'theirs', 'themselves', 'what', 'which', | |
| 'who', 'whom', 'this', 'that', "that'll", 'these', 'those', 'am', 'is', 'are', 'was', 'were', 'be', 'been', | |
| 'being', 'have', 'has', 'had', 'having', 'do', 'does', 'did', 'doing', 'a', 'an', 'the', 'and', 'but', 'if', | |
| 'or', 'because', 'as', 'until', 'while', 'of', 'at', 'by', 'for', 'with', 'about', 'against', 'between', | |
| 'into', 'through', 'during', 'before', 'after', 'above', 'below', 'to', 'from', 'up', 'down', 'in', 'out', | |
| 'on', 'off', 'over', 'under', 'again', 'further', 'then', 'once', 'here', 'there', 'when', 'where', 'why', | |
| 'how', 'all', 'any', 'both', 'each', 'few', 'more', 'most', 'other', 'some', 'such', 'no', 'nor', 'not', | |
| 'only', 'own', 'same', 'so', 'than', 'too', 'very', 's', 't', 'can', 'will', 'just', 'don', "don't", 'should', | |
| "should've", 'now', 'd', 'll', 'm', 'o', 're', 've', 'y', 'ain', 'aren', "aren't", 'couldn', "couldn't", | |
| 'didn', "didn't", 'doesn', "doesn't", 'hadn', "hadn't", 'hasn', "hasn't", 'haven', "haven't", 'isn', "isn't", | |
| 'ma', 'mightn', "mightn't", 'mustn', "mustn't", 'needn', "needn't", 'shan', "shan't", 'shouldn', "shouldn't", | |
| 'wasn', "wasn't", 'weren', "weren't", 'won', "won't", 'wouldn', "wouldn't" | |
| } | |
| def clean_html(text): | |
| if not text: return "" | |
| # Remove HTML tags | |
| clean = re.sub(r'<[^>]+>', ' ', text) | |
| # Remove extra whitespace | |
| clean = re.sub(r'\s+', ' ', clean).strip() | |
| return clean | |
| def normalize(cls, text): | |
| if not text: return "" | |
| # Lowercase | |
| text = text.lower() | |
| # Remove punctuation | |
| text = text.translate(str.maketrans('', '', string.punctuation)) | |
| # Remove stopwords | |
| tokens = [t for t in text.split() if t not in cls.STOPWORDS] | |
| return " ".join(tokens) | |
| def detect_phrases(text): | |
| """Simple bigram detection for important PPD concepts.""" | |
| phrases = [ | |
| "postpartum depression", "maternal mental health", "sleep disturbance", | |
| "crying spells", "suicidal ideation", "mood swings", "baby blues" | |
| ] | |
| for p in phrases: | |
| # We don't replace, we just ensure they are treated as one token for TF-IDF if we want, | |
| # but scikit-learn's ngram_range can also do this. | |
| # To force it, we could underscore them: | |
| if p in text.lower(): | |
| underscored = p.replace(" ", "_") | |
| text = re.sub(p, underscored, text, flags=re.IGNORECASE) | |
| return text | |