| |
|
|
| """ |
| MASTER QUANTUM PIPELINE CONTROLLER |
| Orchestrates Bleeding Edge Preprocessing + Knowledge Acquisition |
| Aurora - ETL Systems Specialist |
| """ |
|
|
| import time |
| from datetime import datetime |
| from pathlib import Path |
| import json |
| import os |
| from typing import Iterator, Tuple |
| from quantum_preprocessing_pipeline import QuantumPreprocessor |
| from knowledge_base_scraper import KnowledgeBaseScraper |
| from database_integration import DatabaseIntegrator |
| from sentence_transformers import SentenceTransformer |
| import concurrent.futures |
|
|
| class MasterPipeline: |
| def __init__(self): |
| self.preprocessor = QuantumPreprocessor() |
| self.scraper = KnowledgeBaseScraper() |
| self.integrator = DatabaseIntegrator() |
| |
| |
| self.embedding_model = SentenceTransformer('all-MiniLM-L6-v2') |
| |
| self.output_dir = Path("/data/adaptai/corpus-data/processed") |
| self.output_dir.mkdir(exist_ok=True, parents=True) |
| |
| def generate_embeddings(self, text): |
| """Generate embeddings for text""" |
| return self.embedding_model.encode(text).tolist() |
| |
| def process_and_integrate(self, text, doc_id, source_type="dataset"): |
| """Full processing and database integration""" |
| start_time = time.time() |
| |
| |
| processed = self.preprocessor.process_document(text, doc_id) |
| if not processed: |
| return None |
| |
| |
| embeddings = self.generate_embeddings(processed['cleaned_text']) |
| |
| |
| processed['processing_time'] = time.time() - start_time |
| processed['source'] = source_type |
| |
| |
| self.integrator.integrate_document(doc_id, processed, embeddings) |
| |
| return processed |
| |
| def process_dataset_batch(self, dataset_path, source_type="dataset"): |
| """Process entire dataset batch from files under `dataset_path`. |
| |
| Supports JSONL (.jsonl) with keys "text" or "content", JSON arrays of objects, |
| and plain text/markdown files (each file as one document). |
| """ |
| print(f"🚀 Processing dataset from path: {dataset_path}") |
|
|
| def iter_docs(path: str, limit: int = 100000) -> Iterator[Tuple[str, str]]: |
| count = 0 |
| for root, _, files in os.walk(path): |
| for fname in files: |
| if count >= limit: |
| return |
| fpath = os.path.join(root, fname) |
| lower = fname.lower() |
| try: |
| if lower.endswith('.jsonl'): |
| with open(fpath, 'r', encoding='utf-8') as f: |
| for idx, line in enumerate(f): |
| if count >= limit: |
| return |
| line = line.strip() |
| if not line: |
| continue |
| obj = json.loads(line) |
| text = obj.get('text') or obj.get('content') |
| if text: |
| doc_id = f"{source_type}:{os.path.relpath(fpath, path)}#{idx}" |
| yield doc_id, text |
| count += 1 |
| elif lower.endswith('.json'): |
| with open(fpath, 'r', encoding='utf-8') as f: |
| data = json.load(f) |
| if isinstance(data, list): |
| for idx, obj in enumerate(data): |
| if count >= limit: |
| return |
| if isinstance(obj, dict): |
| text = obj.get('text') or obj.get('content') |
| if text: |
| doc_id = f"{source_type}:{os.path.relpath(fpath, path)}#{idx}" |
| yield doc_id, text |
| count += 1 |
| elif isinstance(data, dict): |
| text = data.get('text') or data.get('content') |
| if text: |
| doc_id = f"{source_type}:{os.path.relpath(fpath, path)}" |
| yield doc_id, text |
| count += 1 |
| elif lower.endswith(('.txt', '.md')): |
| with open(fpath, 'r', encoding='utf-8') as f: |
| text = f.read() |
| if text.strip(): |
| doc_id = f"{source_type}:{os.path.relpath(fpath, path)}" |
| yield doc_id, text |
| count += 1 |
| elif lower.endswith('.parquet'): |
| try: |
| import pandas as pd |
| except Exception as _: |
| raise RuntimeError("Parquet support requires pandas/pyarrow installed") |
| df = pd.read_parquet(fpath) |
| text_col = 'text' if 'text' in df.columns else ('content' if 'content' in df.columns else None) |
| if text_col is None: |
| raise ValueError(f"No 'text' or 'content' column in {fpath}") |
| for row_idx, text in enumerate(df[text_col].astype(str).tolist()): |
| if count >= limit: |
| return |
| if text and text.strip(): |
| doc_id = f"{source_type}:{os.path.relpath(fpath, path)}#{row_idx}" |
| yield doc_id, text |
| count += 1 |
| except Exception as e: |
| print(f" ⚠️ Skipping {fpath}: {e}") |
|
|
| processed_count = 0 |
| total_count = 0 |
|
|
| for idx, (doc_id, content) in enumerate(iter_docs(dataset_path)): |
| total_count += 1 |
| result = self.process_and_integrate(content, doc_id, source_type) |
| if result: |
| processed_count += 1 |
| if idx % 100 == 0: |
| print(f" Processed {idx} documents so far…") |
|
|
| print(f" Completed path: {dataset_path}") |
| return processed_count, total_count |
| |
| def acquire_knowledge_base(self): |
| """Acquire and integrate knowledge base content""" |
| print("🌐 ACQUIRING KNOWLEDGE BASE CONTENT") |
| print("=" * 50) |
| |
| |
| knowledge_data = self.scraper.scrape_all_sources() |
| |
| |
| self.integrator.integrate_knowledge_base(knowledge_data) |
| |
| |
| processed_knowledge = [] |
| for category, items in knowledge_data.items(): |
| for item in items: |
| content = item.get('content') or item.get('abstract', '') |
| if content: |
| doc_id = f"knowledge_{hash(content) % 1000000000}" |
| processed = self.process_and_integrate( |
| content, doc_id, f"knowledge_{category}" |
| ) |
| if processed: |
| processed_knowledge.append(processed) |
| |
| return len(processed_knowledge) |
| |
| def run_full_pipeline(self): |
| """Execute complete quantum pipeline""" |
| print("🚀 QUANTUM MASTER PIPELINE INITIATED") |
| print("=" * 60) |
| print("Phases: 1. Knowledge Acquisition 2. Quantum Processing 3. Multi-DB Integration") |
| print() |
| |
| start_time = time.time() |
| |
| |
| print("📚 PHASE 1: KNOWLEDGE ACQUISITION") |
| print("-" * 40) |
| knowledge_count = self.acquire_knowledge_base() |
| print(f"✅ Acquired {knowledge_count} knowledge items") |
| |
| |
| print("\n⚡ PHASE 2: QUANTUM PROCESSING") |
| print("-" * 40) |
| |
| |
| datasets = [ |
| ('redpajama', '/data/adaptai/corpus-data/public-datasets/redpajama-hf'), |
| ('pile', '/data/adaptai/corpus-data/public-datasets/pile-hf'), |
| ('thestack', '/data/adaptai/corpus-data/public-datasets/the-stack-hf'), |
| ('openassistant', '/data/adaptai/corpus-data/public-datasets/openassistant-hf'), |
| ('ultrafeedback', '/data/adaptai/corpus-data/public-datasets/ultrafeedback-hf') |
| ] |
| |
| total_processed = 0 |
| total_documents = 0 |
| |
| for dataset_name, dataset_path in datasets: |
| if Path(dataset_path).exists(): |
| processed, total = self.process_dataset_batch(dataset_path, dataset_name) |
| total_processed += processed |
| total_documents += total |
| print(f"✅ {dataset_name}: {processed}/{total} documents processed") |
| else: |
| print(f"⏩ {dataset_name}: Dataset not available yet") |
| |
| |
| print("\n💾 PHASE 3: DATABASE INTEGRATION") |
| print("-" * 40) |
| |
| stats = self.integrator.get_database_stats() |
| print("📊 FINAL DATABASE STATISTICS:") |
| for db, count in stats.items(): |
| print(f" {db}: {count}") |
| |
| |
| self.save_pipeline_report({ |
| 'total_processed': total_processed, |
| 'total_documents': total_documents, |
| 'knowledge_items': knowledge_count, |
| 'processing_time': time.time() - start_time, |
| 'completion_time': datetime.now().isoformat(), |
| 'database_stats': stats |
| }) |
| |
| print(f"\n🎯 PIPELINE COMPLETE: {total_processed} documents processed") |
| print(f"⏱️ Total time: {time.time() - start_time:.2f} seconds") |
| print("=" * 60) |
| |
| def save_pipeline_report(self, metrics): |
| """Save pipeline execution report""" |
| report = { |
| 'pipeline_version': 'quantum_v1.0', |
| 'execution_date': datetime.now().isoformat(), |
| 'metrics': metrics, |
| 'components': { |
| 'preprocessor': 'QuantumPreprocessor', |
| 'scraper': 'KnowledgeBaseScraper', |
| 'integrator': 'DatabaseIntegrator', |
| 'embedding_model': 'all-MiniLM-L6-v2' |
| }, |
| 'databases_integrated': [ |
| 'Redis (18000)', 'PostgreSQL', 'SQLite', 'Qdrant (6333)', 'ChromaDB' |
| ] |
| } |
| |
| report_file = self.output_dir / f"pipeline_report_{datetime.now().strftime('%Y%m%d_%H%M%S')}.json" |
| with open(report_file, 'w', encoding='utf-8') as f: |
| json.dump(report, f, indent=2, ensure_ascii=False) |
| |
| print(f"💾 Pipeline report saved to {report_file}") |
|
|
| def main(): |
| pipeline = MasterPipeline() |
| pipeline.run_full_pipeline() |
|
|
| if __name__ == "__main__": |
| main() |
|
|