adaptai / platform /aiml /etl /quantum_preprocessing_pipeline.py
ADAPT-Chase's picture
Add files using upload-large-folder tool
42bba47 verified
#!/usr/bin/env python3
"""
QUANTUM-GRADE DATA PREPROCESSING PIPELINE
Bleeding Edge Deduplication, Normalization & Tokenization
Aurora - ETL Systems Specialist
"""
import os
import re
import json
import hashlib
import numpy as np
from datasketch import MinHash, MinHashLSH
from bs4 import BeautifulSoup
import html2text
import ftfy
from unidecode import unidecode
from langdetect import detect
import nltk
from nltk.tokenize import word_tokenize, sent_tokenize
from nltk.corpus import stopwords
from nltk.stem import PorterStemmer
import spacy
from transformers import AutoTokenizer
import concurrent.futures
from tqdm import tqdm
from pathlib import Path
# Download NLTK data
nltk.download('punkt', quiet=True)
nltk.download('stopwords', quiet=True)
class QuantumPreprocessor:
def __init__(self):
# Load bleeding edge models
self.nlp = spacy.load("en_core_web_sm", disable=['parser', 'ner'])
self.tokenizer = AutoTokenizer.from_pretrained("EleutherAI/gpt-neox-20b")
self.stemmer = PorterStemmer()
self.stop_words = set(stopwords.words('english'))
# MinHash LSH for deduplication
self.lsh = MinHashLSH(threshold=0.8, num_perm=128)
self.minhashes = {}
# Quality thresholds
self.min_length = 50 # characters
self.max_length = 10000
self.min_quality_score = 0.7
def advanced_html_cleaning(self, text):
"""Ultra-aggressive HTML/XML cleaning"""
if not text:
return ""
# Convert HTML to clean text
h = html2text.HTML2Text()
h.ignore_links = False
h.ignore_images = True
h.ignore_emphasis = False
h.body_width = 0
cleaned = h.handle(text)
# Remove residual markup
cleaned = re.sub(r'\[.*?\]\(.*?\)', '', cleaned) # Markdown links
cleaned = re.sub(r'\*\*.*?\*\*', '', cleaned) # Bold
cleaned = re.sub(r'\*.*?\*', '', cleaned) # Italic
cleaned = re.sub(r'`.*?`', '', cleaned) # Code
return cleaned.strip()
def unicode_normalization(self, text):
"""Fix all unicode issues"""
text = ftfy.fix_text(text)
text = unidecode(text) # Convert to ASCII
return text
def aggressive_cleaning(self, text):
"""Bleeding edge text cleaning"""
# Remove emails
text = re.sub(r'\S*@\S*\s?', '', text)
# Remove URLs
text = re.sub(r'http\S+|www\.\S+', '', text)
# Remove special characters but keep basic punctuation
text = re.sub(r'[^\w\s.,!?;:\-\'\"()]', '', text)
# Normalize whitespace
text = re.sub(r'\s+', ' ', text)
return text.strip()
def language_detection(self, text):
"""Detect and filter non-English content"""
try:
lang = detect(text[:500])
return lang == 'en'
except:
return False
def quality_scoring(self, text):
"""Compute text quality score"""
if len(text) < self.min_length:
return 0.0
# Sentence length variety
sentences = sent_tokenize(text)
if len(sentences) < 2:
return 0.3
# Word diversity
words = word_tokenize(text.lower())
unique_words = len(set(words))
diversity = unique_words / len(words) if words else 0
# Stopword ratio (should be reasonable)
stopword_count = sum(1 for word in words if word in self.stop_words)
stopword_ratio = stopword_count / len(words) if words else 0
# Length score
length_score = min(1.0, len(text) / 1000)
# Composite score
score = (diversity * 0.3 +
(0.5 - abs(stopword_ratio - 0.3)) * 0.3 +
length_score * 0.4)
return max(0.0, min(1.0, score))
def minhash_signature(self, text):
"""Create MinHash signature for deduplication"""
words = text.lower().split()
m = MinHash(num_perm=128)
for word in words:
m.update(word.encode('utf8'))
return m
def is_duplicate(self, text, doc_id):
"""Check if text is duplicate using MinHash LSH"""
if len(text) < 100: # Too short for reliable deduplication
return False
m = self.minhash_signature(text)
# Check for similar documents
results = self.lsh.query(m)
if results:
return True
# Add to index if not duplicate
self.lsh.insert(doc_id, m)
self.minhashes[doc_id] = m
return False
def advanced_tokenization(self, text):
"""Bleeding edge tokenization with multiple strategies"""
# GPT-style tokenization
gpt_tokens = self.tokenizer.tokenize(text)
# SpaCy linguistic tokenization
doc = self.nlp(text)
spacy_tokens = [token.text for token in doc]
# NLTK tokenization
nltk_tokens = word_tokenize(text)
# Return all for ensemble processing
return {
'gpt': gpt_tokens,
'spacy': spacy_tokens,
'nltk': nltk_tokens,
'raw_text': text
}
def process_document(self, text, doc_id):
"""Full quantum-grade processing pipeline"""
if not text or len(text.strip()) < self.min_length:
return None
# Step 1: HTML cleaning
cleaned = self.advanced_html_cleaning(text)
# Step 2: Unicode normalization
cleaned = self.unicode_normalization(cleaned)
# Step 3: Aggressive cleaning
cleaned = self.aggressive_cleaning(cleaned)
# Step 4: Language detection
if not self.language_detection(cleaned):
return None
# Step 5: Quality scoring
quality_score = self.quality_scoring(cleaned)
if quality_score < self.min_quality_score:
return None
# Step 6: Deduplication
if self.is_duplicate(cleaned, doc_id):
return None
# Step 7: Tokenization
tokens = self.advanced_tokenization(cleaned)
return {
'id': doc_id,
'original_length': len(text),
'cleaned_length': len(cleaned),
'quality_score': quality_score,
'tokens': tokens,
'cleaned_text': cleaned,
'token_count': len(tokens['gpt'])
}
def process_batch(self, texts, doc_ids):
"""Process batch of documents in parallel"""
results = []
with concurrent.futures.ThreadPoolExecutor(max_workers=8) as executor:
futures = []
for text, doc_id in zip(texts, doc_ids):
futures.append(executor.submit(self.process_document, text, doc_id))
for future in tqdm(concurrent.futures.as_completed(futures),
total=len(futures), desc="Processing documents"):
result = future.result()
if result:
results.append(result)
return results
def main():
print("🚀 QUANTUM-GRADE PREPROCESSING PIPELINE INITIALIZED")
print("=" * 60)
# Initialize preprocessor
preprocessor = QuantumPreprocessor()
# Test with sample data
test_texts = [
"Quantum computing represents the next frontier in computational power.",
"Machine learning models require massive datasets for effective training.",
"The quick brown fox jumps over the lazy dog.",
"Quantum computing represents the next frontier in computational power." # Duplicate
]
test_ids = [f"test_{i}" for i in range(len(test_texts))]
results = preprocessor.process_batch(test_texts, test_ids)
print(f"\n✅ Processed {len(results)}/{len(test_texts)} documents")
print(f"📊 Deduplication removed {len(test_texts) - len(results)} duplicates")
for result in results:
print(f"\n📄 Document {result['id']}:")
print(f" Quality: {result['quality_score']:.3f}")
print(f" Tokens: {result['token_count']}")
print(f" Preview: {result['cleaned_text'][:100]}...")
if __name__ == "__main__":
main()