--- license: cc-by-sa-4.0 task_categories: - text-retrieval - feature-extraction language: - en viewer: false tags: - wikipedia - qdrant - sparse-embeddings - splade - bm25 - vector-database - semantic-search size_categories: - 1M wikipedia-en-qdrant-2025-09-03.snapshot ``` 3. **Restore to Qdrant**: Follow the [Qdrant Snapshot Restore Documentation](https://qdrant.tech/documentation/concepts/snapshots/#restore-snapshot) to restore the snapshot to your Qdrant instance. ### Usage Example Here's a complete example showing how to search the Wikipedia embeddings using SPLADE, BM25, and hybrid approaches: ```python import os from typing import List, Optional from dotenv import load_dotenv from fastembed import SparseTextEmbedding from fastembed.sparse.bm25 import Bm25 from qdrant_client import QdrantClient, models COLLECTION_NAME = 'WIKIPEDIA_ENGLISH' # existing Qdrant collection SPLADE_MODEL = 'prithivida/Splade_PP_en_v1' # original: 'naver/splade-v3' BM25_MODEL = 'Qdrant/bm25' # fastembed BM25 model card BM25_LANGUAGE = 'english' TOPK_SPLADE = 3 TOPK_BM25 = 3 TOPK_FUSED = 5 PREFETCH_PER_MODEL = 20 # how many raw candidates each model contributes before fusion load_dotenv() def build_qdrant_client() -> QdrantClient: url = os.getenv('QDRANT_URL', 'http://localhost:6333') api_key = os.getenv('QDRANT_API_KEY') # may be None / empty for local if 'localhost' in url or '127.0.0.1' in url: return QdrantClient(url=url, api_key=api_key) return QdrantClient(url=url, https=True, timeout=60, api_key=api_key) class HybridSparseSearcher: """Encapsulates SPLADE, BM25, and hybrid (RRF) query logic.""" def __init__(self) -> None: self.client = build_qdrant_client() self.splade = SparseTextEmbedding(model_name=SPLADE_MODEL, device='cpu') self.bm25 = Bm25(BM25_MODEL, language=BM25_LANGUAGE) # ------------------------ Individual Model Searches ------------------ def _splade_query_vector(self, text: str) -> models.SparseVector: sparse_obj = next(self.splade.embed(text)) return models.SparseVector(**sparse_obj.as_object()) def _bm25_query_vector(self, text: str) -> models.SparseVector: sparse_obj = next(self.bm25.query_embed(text)) return models.SparseVector(**sparse_obj.as_object()) def search_splade(self, query: str, limit: int = TOPK_SPLADE): vector = self._splade_query_vector(query) return self.client.query_points( collection_name=COLLECTION_NAME, query=vector, using='text-sparse', limit=limit, ).points def search_bm25(self, query: str, limit: int = TOPK_BM25): vector = self._bm25_query_vector(query) return self.client.query_points( collection_name=COLLECTION_NAME, query=vector, using='bm25', limit=limit, ).points def search_hybrid_rrf( self, query: str, limit: int = TOPK_FUSED, per_model: int = PREFETCH_PER_MODEL ): prefetch = [ models.Prefetch( query=self._splade_query_vector(query), using='text-sparse', limit=per_model, ), models.Prefetch( query=self._bm25_query_vector(query), using='bm25', limit=per_model ), ] return self.client.query_points( collection_name=COLLECTION_NAME, prefetch=prefetch, query=models.FusionQuery(fusion=models.Fusion.RRF), limit=limit, ).points @staticmethod def _format(point) -> str: payload = point.payload or {} title = payload.get('title', '') section = payload.get('title_section', 'Main') url = payload.get('url') content = (payload.get('content') or '').strip().replace('\n', ' ') if len(content) > 220: content = content[:220] + '...' lines = [f'Score: {point.score:.4f}', f'Title: {title} [{section}]'] if url: lines.append(f'URL: {url}') lines.append(f'Snippet: {content}') return '\n'.join(lines) def pretty_print(self, header: str, points: List, limit: Optional[int] = None): print('\n' + header) print('-' * len(header)) if not points: print('(no results)') return for idx, p in enumerate(points[: limit or len(points)], 1): print(f'\n{idx}. {self._format(p)}') def run(query: str): """Execute the full demo: SPLADE, BM25, Hybrid fused.""" searcher = HybridSparseSearcher() # Individual models splade_points = searcher.search_splade(query) bm25_points = searcher.search_bm25(query) # Hybrid hybrid_points = searcher.search_hybrid_rrf(query) # Output searcher.pretty_print(f'Top {TOPK_SPLADE} SPLADE Results', splade_points) searcher.pretty_print(f'Top {TOPK_BM25} BM25 Results', bm25_points) searcher.pretty_print(f'Top {TOPK_FUSED} Hybrid (RRF) Results', hybrid_points) def main(): query = 'Albert Einstein' run(query) if __name__ == '__main__': main() ``` ## Dataset Creation ### Curation Rationale This dataset was created to provide a comprehensive, ready-to-use vector database for Wikipedia English content that supports both traditional lexical search (BM25) and modern semantic search (SPLADE) capabilities. The hybrid approach enables more accurate and diverse search results by combining the strengths of both methods. ### Source Data #### Data Collection and Processing - **Source**: Wikipedia English Cirrus dump (JSON format) - **Processing Pipeline**: 1. Downloaded Wikipedia Cirrus dump containing structured article data 2. Extracted and split articles into chunks for granular search 3. Generated SPLADE sparse embeddings using `naver/splade-v3` 4. Generated BM25 sparse embeddings using `Qdrant/bm25` with English language settings 5. Stored in Qdrant vector database with optimized disk indexing ## Bias, Risks, and Limitations ### Known Limitations - **Temporal Snapshot**: This dataset represents Wikipedia at a specific point in time and may not reflect current information - **Language Limitation**: Only English Wikipedia content is included ### Recommendations Users should be aware of these limitations and consider: - Supplementing with real-time data sources for current information - Being mindful of potential cultural and geographic biases in search results - Validating critical information from primary sources - Considering the vintage of the data when making time-sensitive queries ## Technical Details ### Requirements - **Qdrant**: Version 1.15+ recommended - **Python Dependencies**: `qdrant-client`, `fastembed`, `python-dotenv` - **Hardware**: Minimum 12GB RAM recommended - **Storage**: Approximately 380GB for the complete database ## Citation If you use this dataset in your research or applications, please cite: ```bibtex @dataset{wikipedia_english_qdrant_2025, title={Wikipedia English Embeddings Dataset (Qdrant Snapshot)}, author={Emergent Methods}, year={2025}, url={https://huggingface.co/datasets/EmergentMethods/en_qdrant_wikipedia}, note={Wikipedia content under CC-BY-SA-4.0 license} } ``` ## Dataset Card Contact For questions, issues, or contributions, please contact [Emergent Methods](https://emergentmethods.ai).