|
|
|
|
|
|
|
|
""" |
|
|
QUANTUM-GRADE DATA PREPROCESSING PIPELINE |
|
|
Bleeding Edge Deduplication, Normalization & Tokenization |
|
|
Aurora - ETL Systems Specialist |
|
|
""" |
|
|
|
|
|
import os |
|
|
import re |
|
|
import json |
|
|
import hashlib |
|
|
import numpy as np |
|
|
from datasketch import MinHash, MinHashLSH |
|
|
from bs4 import BeautifulSoup |
|
|
import html2text |
|
|
import ftfy |
|
|
from unidecode import unidecode |
|
|
from langdetect import detect |
|
|
import nltk |
|
|
from nltk.tokenize import word_tokenize, sent_tokenize |
|
|
from nltk.corpus import stopwords |
|
|
from nltk.stem import PorterStemmer |
|
|
import spacy |
|
|
from transformers import AutoTokenizer |
|
|
import concurrent.futures |
|
|
from tqdm import tqdm |
|
|
from pathlib import Path |
|
|
|
|
|
|
|
|
nltk.download('punkt', quiet=True) |
|
|
nltk.download('stopwords', quiet=True) |
|
|
|
|
|
class QuantumPreprocessor: |
|
|
def __init__(self): |
|
|
|
|
|
self.nlp = spacy.load("en_core_web_sm", disable=['parser', 'ner']) |
|
|
self.tokenizer = AutoTokenizer.from_pretrained("EleutherAI/gpt-neox-20b") |
|
|
self.stemmer = PorterStemmer() |
|
|
self.stop_words = set(stopwords.words('english')) |
|
|
|
|
|
|
|
|
self.lsh = MinHashLSH(threshold=0.8, num_perm=128) |
|
|
self.minhashes = {} |
|
|
|
|
|
|
|
|
self.min_length = 50 |
|
|
self.max_length = 10000 |
|
|
self.min_quality_score = 0.7 |
|
|
|
|
|
def advanced_html_cleaning(self, text): |
|
|
"""Ultra-aggressive HTML/XML cleaning""" |
|
|
if not text: |
|
|
return "" |
|
|
|
|
|
|
|
|
h = html2text.HTML2Text() |
|
|
h.ignore_links = False |
|
|
h.ignore_images = True |
|
|
h.ignore_emphasis = False |
|
|
h.body_width = 0 |
|
|
|
|
|
cleaned = h.handle(text) |
|
|
|
|
|
|
|
|
cleaned = re.sub(r'\[.*?\]\(.*?\)', '', cleaned) |
|
|
cleaned = re.sub(r'\*\*.*?\*\*', '', cleaned) |
|
|
cleaned = re.sub(r'\*.*?\*', '', cleaned) |
|
|
cleaned = re.sub(r'`.*?`', '', cleaned) |
|
|
|
|
|
return cleaned.strip() |
|
|
|
|
|
def unicode_normalization(self, text): |
|
|
"""Fix all unicode issues""" |
|
|
text = ftfy.fix_text(text) |
|
|
text = unidecode(text) |
|
|
return text |
|
|
|
|
|
def aggressive_cleaning(self, text): |
|
|
"""Bleeding edge text cleaning""" |
|
|
|
|
|
text = re.sub(r'\S*@\S*\s?', '', text) |
|
|
|
|
|
|
|
|
text = re.sub(r'http\S+|www\.\S+', '', text) |
|
|
|
|
|
|
|
|
text = re.sub(r'[^\w\s.,!?;:\-\'\"()]', '', text) |
|
|
|
|
|
|
|
|
text = re.sub(r'\s+', ' ', text) |
|
|
|
|
|
return text.strip() |
|
|
|
|
|
def language_detection(self, text): |
|
|
"""Detect and filter non-English content""" |
|
|
try: |
|
|
lang = detect(text[:500]) |
|
|
return lang == 'en' |
|
|
except: |
|
|
return False |
|
|
|
|
|
def quality_scoring(self, text): |
|
|
"""Compute text quality score""" |
|
|
if len(text) < self.min_length: |
|
|
return 0.0 |
|
|
|
|
|
|
|
|
sentences = sent_tokenize(text) |
|
|
if len(sentences) < 2: |
|
|
return 0.3 |
|
|
|
|
|
|
|
|
words = word_tokenize(text.lower()) |
|
|
unique_words = len(set(words)) |
|
|
diversity = unique_words / len(words) if words else 0 |
|
|
|
|
|
|
|
|
stopword_count = sum(1 for word in words if word in self.stop_words) |
|
|
stopword_ratio = stopword_count / len(words) if words else 0 |
|
|
|
|
|
|
|
|
length_score = min(1.0, len(text) / 1000) |
|
|
|
|
|
|
|
|
score = (diversity * 0.3 + |
|
|
(0.5 - abs(stopword_ratio - 0.3)) * 0.3 + |
|
|
length_score * 0.4) |
|
|
|
|
|
return max(0.0, min(1.0, score)) |
|
|
|
|
|
def minhash_signature(self, text): |
|
|
"""Create MinHash signature for deduplication""" |
|
|
words = text.lower().split() |
|
|
m = MinHash(num_perm=128) |
|
|
|
|
|
for word in words: |
|
|
m.update(word.encode('utf8')) |
|
|
|
|
|
return m |
|
|
|
|
|
def is_duplicate(self, text, doc_id): |
|
|
"""Check if text is duplicate using MinHash LSH""" |
|
|
if len(text) < 100: |
|
|
return False |
|
|
|
|
|
m = self.minhash_signature(text) |
|
|
|
|
|
|
|
|
results = self.lsh.query(m) |
|
|
|
|
|
if results: |
|
|
return True |
|
|
|
|
|
|
|
|
self.lsh.insert(doc_id, m) |
|
|
self.minhashes[doc_id] = m |
|
|
return False |
|
|
|
|
|
def advanced_tokenization(self, text): |
|
|
"""Bleeding edge tokenization with multiple strategies""" |
|
|
|
|
|
gpt_tokens = self.tokenizer.tokenize(text) |
|
|
|
|
|
|
|
|
doc = self.nlp(text) |
|
|
spacy_tokens = [token.text for token in doc] |
|
|
|
|
|
|
|
|
nltk_tokens = word_tokenize(text) |
|
|
|
|
|
|
|
|
return { |
|
|
'gpt': gpt_tokens, |
|
|
'spacy': spacy_tokens, |
|
|
'nltk': nltk_tokens, |
|
|
'raw_text': text |
|
|
} |
|
|
|
|
|
def process_document(self, text, doc_id): |
|
|
"""Full quantum-grade processing pipeline""" |
|
|
if not text or len(text.strip()) < self.min_length: |
|
|
return None |
|
|
|
|
|
|
|
|
cleaned = self.advanced_html_cleaning(text) |
|
|
|
|
|
|
|
|
cleaned = self.unicode_normalization(cleaned) |
|
|
|
|
|
|
|
|
cleaned = self.aggressive_cleaning(cleaned) |
|
|
|
|
|
|
|
|
if not self.language_detection(cleaned): |
|
|
return None |
|
|
|
|
|
|
|
|
quality_score = self.quality_scoring(cleaned) |
|
|
if quality_score < self.min_quality_score: |
|
|
return None |
|
|
|
|
|
|
|
|
if self.is_duplicate(cleaned, doc_id): |
|
|
return None |
|
|
|
|
|
|
|
|
tokens = self.advanced_tokenization(cleaned) |
|
|
|
|
|
return { |
|
|
'id': doc_id, |
|
|
'original_length': len(text), |
|
|
'cleaned_length': len(cleaned), |
|
|
'quality_score': quality_score, |
|
|
'tokens': tokens, |
|
|
'cleaned_text': cleaned, |
|
|
'token_count': len(tokens['gpt']) |
|
|
} |
|
|
|
|
|
def process_batch(self, texts, doc_ids): |
|
|
"""Process batch of documents in parallel""" |
|
|
results = [] |
|
|
|
|
|
with concurrent.futures.ThreadPoolExecutor(max_workers=8) as executor: |
|
|
futures = [] |
|
|
for text, doc_id in zip(texts, doc_ids): |
|
|
futures.append(executor.submit(self.process_document, text, doc_id)) |
|
|
|
|
|
for future in tqdm(concurrent.futures.as_completed(futures), |
|
|
total=len(futures), desc="Processing documents"): |
|
|
result = future.result() |
|
|
if result: |
|
|
results.append(result) |
|
|
|
|
|
return results |
|
|
|
|
|
def main(): |
|
|
print("🚀 QUANTUM-GRADE PREPROCESSING PIPELINE INITIALIZED") |
|
|
print("=" * 60) |
|
|
|
|
|
|
|
|
preprocessor = QuantumPreprocessor() |
|
|
|
|
|
|
|
|
test_texts = [ |
|
|
"Quantum computing represents the next frontier in computational power.", |
|
|
"Machine learning models require massive datasets for effective training.", |
|
|
"The quick brown fox jumps over the lazy dog.", |
|
|
"Quantum computing represents the next frontier in computational power." |
|
|
] |
|
|
|
|
|
test_ids = [f"test_{i}" for i in range(len(test_texts))] |
|
|
|
|
|
results = preprocessor.process_batch(test_texts, test_ids) |
|
|
|
|
|
print(f"\n✅ Processed {len(results)}/{len(test_texts)} documents") |
|
|
print(f"📊 Deduplication removed {len(test_texts) - len(results)} duplicates") |
|
|
|
|
|
for result in results: |
|
|
print(f"\n📄 Document {result['id']}:") |
|
|
print(f" Quality: {result['quality_score']:.3f}") |
|
|
print(f" Tokens: {result['token_count']}") |
|
|
print(f" Preview: {result['cleaned_text'][:100]}...") |
|
|
|
|
|
if __name__ == "__main__": |
|
|
main() |