File size: 8,496 Bytes
42bba47 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 |
#!/usr/bin/env python3
"""
QUANTUM-GRADE DATA PREPROCESSING PIPELINE
Bleeding Edge Deduplication, Normalization & Tokenization
Aurora - ETL Systems Specialist
"""
import os
import re
import json
import hashlib
import numpy as np
from datasketch import MinHash, MinHashLSH
from bs4 import BeautifulSoup
import html2text
import ftfy
from unidecode import unidecode
from langdetect import detect
import nltk
from nltk.tokenize import word_tokenize, sent_tokenize
from nltk.corpus import stopwords
from nltk.stem import PorterStemmer
import spacy
from transformers import AutoTokenizer
import concurrent.futures
from tqdm import tqdm
from pathlib import Path
# Download NLTK data
nltk.download('punkt', quiet=True)
nltk.download('stopwords', quiet=True)
class QuantumPreprocessor:
def __init__(self):
# Load bleeding edge models
self.nlp = spacy.load("en_core_web_sm", disable=['parser', 'ner'])
self.tokenizer = AutoTokenizer.from_pretrained("EleutherAI/gpt-neox-20b")
self.stemmer = PorterStemmer()
self.stop_words = set(stopwords.words('english'))
# MinHash LSH for deduplication
self.lsh = MinHashLSH(threshold=0.8, num_perm=128)
self.minhashes = {}
# Quality thresholds
self.min_length = 50 # characters
self.max_length = 10000
self.min_quality_score = 0.7
def advanced_html_cleaning(self, text):
"""Ultra-aggressive HTML/XML cleaning"""
if not text:
return ""
# Convert HTML to clean text
h = html2text.HTML2Text()
h.ignore_links = False
h.ignore_images = True
h.ignore_emphasis = False
h.body_width = 0
cleaned = h.handle(text)
# Remove residual markup
cleaned = re.sub(r'\[.*?\]\(.*?\)', '', cleaned) # Markdown links
cleaned = re.sub(r'\*\*.*?\*\*', '', cleaned) # Bold
cleaned = re.sub(r'\*.*?\*', '', cleaned) # Italic
cleaned = re.sub(r'`.*?`', '', cleaned) # Code
return cleaned.strip()
def unicode_normalization(self, text):
"""Fix all unicode issues"""
text = ftfy.fix_text(text)
text = unidecode(text) # Convert to ASCII
return text
def aggressive_cleaning(self, text):
"""Bleeding edge text cleaning"""
# Remove emails
text = re.sub(r'\S*@\S*\s?', '', text)
# Remove URLs
text = re.sub(r'http\S+|www\.\S+', '', text)
# Remove special characters but keep basic punctuation
text = re.sub(r'[^\w\s.,!?;:\-\'\"()]', '', text)
# Normalize whitespace
text = re.sub(r'\s+', ' ', text)
return text.strip()
def language_detection(self, text):
"""Detect and filter non-English content"""
try:
lang = detect(text[:500])
return lang == 'en'
except:
return False
def quality_scoring(self, text):
"""Compute text quality score"""
if len(text) < self.min_length:
return 0.0
# Sentence length variety
sentences = sent_tokenize(text)
if len(sentences) < 2:
return 0.3
# Word diversity
words = word_tokenize(text.lower())
unique_words = len(set(words))
diversity = unique_words / len(words) if words else 0
# Stopword ratio (should be reasonable)
stopword_count = sum(1 for word in words if word in self.stop_words)
stopword_ratio = stopword_count / len(words) if words else 0
# Length score
length_score = min(1.0, len(text) / 1000)
# Composite score
score = (diversity * 0.3 +
(0.5 - abs(stopword_ratio - 0.3)) * 0.3 +
length_score * 0.4)
return max(0.0, min(1.0, score))
def minhash_signature(self, text):
"""Create MinHash signature for deduplication"""
words = text.lower().split()
m = MinHash(num_perm=128)
for word in words:
m.update(word.encode('utf8'))
return m
def is_duplicate(self, text, doc_id):
"""Check if text is duplicate using MinHash LSH"""
if len(text) < 100: # Too short for reliable deduplication
return False
m = self.minhash_signature(text)
# Check for similar documents
results = self.lsh.query(m)
if results:
return True
# Add to index if not duplicate
self.lsh.insert(doc_id, m)
self.minhashes[doc_id] = m
return False
def advanced_tokenization(self, text):
"""Bleeding edge tokenization with multiple strategies"""
# GPT-style tokenization
gpt_tokens = self.tokenizer.tokenize(text)
# SpaCy linguistic tokenization
doc = self.nlp(text)
spacy_tokens = [token.text for token in doc]
# NLTK tokenization
nltk_tokens = word_tokenize(text)
# Return all for ensemble processing
return {
'gpt': gpt_tokens,
'spacy': spacy_tokens,
'nltk': nltk_tokens,
'raw_text': text
}
def process_document(self, text, doc_id):
"""Full quantum-grade processing pipeline"""
if not text or len(text.strip()) < self.min_length:
return None
# Step 1: HTML cleaning
cleaned = self.advanced_html_cleaning(text)
# Step 2: Unicode normalization
cleaned = self.unicode_normalization(cleaned)
# Step 3: Aggressive cleaning
cleaned = self.aggressive_cleaning(cleaned)
# Step 4: Language detection
if not self.language_detection(cleaned):
return None
# Step 5: Quality scoring
quality_score = self.quality_scoring(cleaned)
if quality_score < self.min_quality_score:
return None
# Step 6: Deduplication
if self.is_duplicate(cleaned, doc_id):
return None
# Step 7: Tokenization
tokens = self.advanced_tokenization(cleaned)
return {
'id': doc_id,
'original_length': len(text),
'cleaned_length': len(cleaned),
'quality_score': quality_score,
'tokens': tokens,
'cleaned_text': cleaned,
'token_count': len(tokens['gpt'])
}
def process_batch(self, texts, doc_ids):
"""Process batch of documents in parallel"""
results = []
with concurrent.futures.ThreadPoolExecutor(max_workers=8) as executor:
futures = []
for text, doc_id in zip(texts, doc_ids):
futures.append(executor.submit(self.process_document, text, doc_id))
for future in tqdm(concurrent.futures.as_completed(futures),
total=len(futures), desc="Processing documents"):
result = future.result()
if result:
results.append(result)
return results
def main():
print("🚀 QUANTUM-GRADE PREPROCESSING PIPELINE INITIALIZED")
print("=" * 60)
# Initialize preprocessor
preprocessor = QuantumPreprocessor()
# Test with sample data
test_texts = [
"Quantum computing represents the next frontier in computational power.",
"Machine learning models require massive datasets for effective training.",
"The quick brown fox jumps over the lazy dog.",
"Quantum computing represents the next frontier in computational power." # Duplicate
]
test_ids = [f"test_{i}" for i in range(len(test_texts))]
results = preprocessor.process_batch(test_texts, test_ids)
print(f"\n✅ Processed {len(results)}/{len(test_texts)} documents")
print(f"📊 Deduplication removed {len(test_texts) - len(results)} duplicates")
for result in results:
print(f"\n📄 Document {result['id']}:")
print(f" Quality: {result['quality_score']:.3f}")
print(f" Tokens: {result['token_count']}")
print(f" Preview: {result['cleaned_text'][:100]}...")
if __name__ == "__main__":
main() |