File size: 3,172 Bytes
53afb32
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
import os
import faiss
import json
import numpy as np
from FlagEmbedding import FlagLLMModel, FlagAutoModel


def create_index(embeddings: np.ndarray):
    index = faiss.IndexFlatIP(len(embeddings[0]))
    embeddings = np.asarray(embeddings, dtype=np.float32)        
    index.add(embeddings)
    return index

def move_index_to_gpu(index):
    try:
        co = faiss.GpuMultipleClonerOptions()
        co.shard = True
        co.useFloat16 = True
        index = faiss.index_cpu_to_all_gpus(index, co=co)
    except:
        print('not support faiss-gpu')
    return index


def load_model_util(previous_model, model_path):
    self_model_path = '/share/chaofan/models/bge-multilingual-gemma2'
    if model_path == 'BAAI/bge-multilingual-gemma2':
        if previous_model is not None and previous_model.model_name_or_path == self_model_path:
            return previous_model
        model = FlagLLMModel(self_model_path,
                            query_instruction_for_retrieval="Given a question, retrieve Wikipedia passages that answer the question.",
                            query_instruction_format="<instruct>{}\n<query>{}",
                            use_fp16=True,
                            devices=['cuda:0'])
    else:
        if previous_model is not None and previous_model.model_name_or_path == model_path:
            return previous_model
        model = FlagAutoModel.from_finetuned(model_path,
                                             use_fp16=True,
                                             devices=['cuda:0'])
    if previous_model is not None:
        del previous_model
    model.model.half()
    model.model = model.model.to('cuda:0')
    return model


def load_corpus_util(base_dir, lang):
    corpus_path = os.path.join(base_dir, lang, 'corpus.jsonl')
    data = []
    with open(corpus_path) as f:
        for line in f:
            tmp = json.loads(line)
            data.append(tmp)
    
    queries = []
    queries_path = os.path.join(base_dir, lang, 'dev_queries.jsonl')
    with open(queries_path) as f:
        for line in f:
            tmp = json.loads(line)
            queries.append(tmp['text'])
            if len(queries) >= 5:
                break

    return data, queries


def build_index_util(emb_dir, lang, model, data):
    emb_path = os.path.join(emb_dir, lang, 'corpus.npy')
    index_path = os.path.join(emb_dir, lang, 'faiss.index')
    if os.path.exists(index_path):
        faiss_index = faiss.read_index(index_path)
        return move_index_to_gpu(faiss_index)

    if os.path.exists(emb_path):
        doc_emb = np.load(emb_path)
    else:
        doc_emb = model.encode_corpus(data, batch_size=256)
        np.save(emb_path, doc_emb)
    faiss_index = create_index(doc_emb)
    # faiss.write_index(faiss_index, index_path)
    faiss_index = move_index_to_gpu(faiss_index)
    return faiss_index


def search_util(model, query, corpus, faiss_index, topk):
    query_emb = model.encode_queries(query)
    query_emb = query_emb.reshape(1, -1)
    scores, inxs = faiss_index.search(query_emb, k=topk)
    data = []
    for idx in inxs[0]:
        data.append(corpus[idx])
    return scores[0], data