|
|
import os |
|
|
import faiss |
|
|
import json |
|
|
import numpy as np |
|
|
from FlagEmbedding import FlagLLMModel, FlagAutoModel |
|
|
|
|
|
|
|
|
def create_index(embeddings: np.ndarray): |
|
|
index = faiss.IndexFlatIP(len(embeddings[0])) |
|
|
embeddings = np.asarray(embeddings, dtype=np.float32) |
|
|
index.add(embeddings) |
|
|
return index |
|
|
|
|
|
def move_index_to_gpu(index): |
|
|
try: |
|
|
co = faiss.GpuMultipleClonerOptions() |
|
|
co.shard = True |
|
|
co.useFloat16 = True |
|
|
index = faiss.index_cpu_to_all_gpus(index, co=co) |
|
|
except: |
|
|
print('not support faiss-gpu') |
|
|
return index |
|
|
|
|
|
|
|
|
def load_model_util(previous_model, model_path): |
|
|
self_model_path = '/share/chaofan/models/bge-multilingual-gemma2' |
|
|
if model_path == 'BAAI/bge-multilingual-gemma2': |
|
|
if previous_model is not None and previous_model.model_name_or_path == self_model_path: |
|
|
return previous_model |
|
|
model = FlagLLMModel(self_model_path, |
|
|
query_instruction_for_retrieval="Given a question, retrieve Wikipedia passages that answer the question.", |
|
|
query_instruction_format="<instruct>{}\n<query>{}", |
|
|
use_fp16=True, |
|
|
devices=['cuda:0']) |
|
|
else: |
|
|
if previous_model is not None and previous_model.model_name_or_path == model_path: |
|
|
return previous_model |
|
|
model = FlagAutoModel.from_finetuned(model_path, |
|
|
use_fp16=True, |
|
|
devices=['cuda:0']) |
|
|
if previous_model is not None: |
|
|
del previous_model |
|
|
model.model.half() |
|
|
model.model = model.model.to('cuda:0') |
|
|
return model |
|
|
|
|
|
|
|
|
def load_corpus_util(base_dir, lang): |
|
|
corpus_path = os.path.join(base_dir, lang, 'corpus.jsonl') |
|
|
data = [] |
|
|
with open(corpus_path) as f: |
|
|
for line in f: |
|
|
tmp = json.loads(line) |
|
|
data.append(tmp) |
|
|
|
|
|
queries = [] |
|
|
queries_path = os.path.join(base_dir, lang, 'dev_queries.jsonl') |
|
|
with open(queries_path) as f: |
|
|
for line in f: |
|
|
tmp = json.loads(line) |
|
|
queries.append(tmp['text']) |
|
|
if len(queries) >= 5: |
|
|
break |
|
|
|
|
|
return data, queries |
|
|
|
|
|
|
|
|
def build_index_util(emb_dir, lang, model, data): |
|
|
emb_path = os.path.join(emb_dir, lang, 'corpus.npy') |
|
|
index_path = os.path.join(emb_dir, lang, 'faiss.index') |
|
|
if os.path.exists(index_path): |
|
|
faiss_index = faiss.read_index(index_path) |
|
|
return move_index_to_gpu(faiss_index) |
|
|
|
|
|
if os.path.exists(emb_path): |
|
|
doc_emb = np.load(emb_path) |
|
|
else: |
|
|
doc_emb = model.encode_corpus(data, batch_size=256) |
|
|
np.save(emb_path, doc_emb) |
|
|
faiss_index = create_index(doc_emb) |
|
|
|
|
|
faiss_index = move_index_to_gpu(faiss_index) |
|
|
return faiss_index |
|
|
|
|
|
|
|
|
def search_util(model, query, corpus, faiss_index, topk): |
|
|
query_emb = model.encode_queries(query) |
|
|
query_emb = query_emb.reshape(1, -1) |
|
|
scores, inxs = faiss_index.search(query_emb, k=topk) |
|
|
data = [] |
|
|
for idx in inxs[0]: |
|
|
data.append(corpus[idx]) |
|
|
return scores[0], data |