ZhuofengLi commited on
Commit
89efa1d
·
verified ·
1 Parent(s): 218a2b4

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +52 -1
README.md CHANGED
@@ -52,4 +52,55 @@ Each row in the dataset contains the following fields:
52
  + **url** (string): The source URL where the document was retrieved from.
53
 
54
  ## How to use this dataset?
55
- You can use this dataset together with its embeddings to build an offline search engine.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
52
  + **url** (string): The source URL where the document was retrieved from.
53
 
54
  ## How to use this dataset?
55
+ You can use this dataset together with its embeddings to build an offline search engine. Below is a pseduo code for **demonstration only** (for production use, consider [Faiss-GPU](https://github.com/facebookresearch/faiss/wiki/Faiss-on-the-GPU)).
56
+ ```python
57
+ # download index before
58
+ # huggingface-cli download OpenResearcher/OpenResearcher-Corpus --repo-type=dataset --include="qwen3-embedding-8b/*" --local-dir ./indexes
59
+ import glob
60
+ import pickle
61
+ import faiss
62
+ import numpy as np
63
+ from datasets import load_dataset
64
+ from sentence_transformers import SentenceTransformer
65
+
66
+ # 1. Load corpus
67
+ corpus = load_dataset("OpenResearcher/OpenResearcher-Corpus", split="train")
68
+ docid_to_doc = {str(doc["docid"]): doc for doc in corpus}
69
+
70
+ # 2. Load all embedding shards from OpenResearcher-Indexes
71
+ index_files = sorted(glob.glob("path/to/indexes/*.pkl"))
72
+ all_embeddings = []
73
+ all_lookup = []
74
+
75
+ for file_path in index_files:
76
+ with open(file_path, "rb") as f:
77
+ embeddings, lookup = pickle.load(f)
78
+ all_embeddings.append(embeddings)
79
+ all_lookup.extend(lookup)
80
+
81
+ all_embeddings = np.vstack(all_embeddings).astype(np.float32)
82
+ faiss.normalize_L2(all_embeddings) # Normalize for cosine similarity
83
+
84
+ # 3. Build FAISS index
85
+ index = faiss.IndexFlatIP(all_embeddings.shape[1])
86
+ index.add(all_embeddings)
87
+
88
+ # 4. Load model and encode query
89
+ model = SentenceTransformer("Qwen/Qwen3-Embedding-8B")
90
+ query = "What is machine learning?"
91
+ query_embedding = model.encode([query], prompt_name="query")
92
+
93
+ # 5. Search in FAISS
94
+ scores, indices = index.search(query_embedding, k=5)
95
+
96
+ # 6. Print results
97
+ for idx, score in zip(indices[0], scores[0]):
98
+ docid = str(all_lookup[idx])
99
+ doc = docid_to_doc.get(docid)
100
+ if doc:
101
+ print(f"Score: {score:.4f}")
102
+ print(f"URL: {doc['url']}")
103
+ print(f"Text: {doc['text'][:200]}...\n")
104
+ ```
105
+
106
+