File size: 2,474 Bytes
8caeaa7
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
from argparse import ArgumentParser
import json
from tqdm import tqdm
from dataclasses import dataclass
from typing import List, Optional, Dict, Set
from sentence_transformers import SentenceTransformer
import numpy as np
import hnswlib

@dataclass
class Doc:
    input: str
    output: str
    
    @staticmethod
    def from_json(doc: Dict):
        return Doc(input=doc['input'], output=doc['output'])


if __name__ == "__main__":
    parser = ArgumentParser(prog="convert.py", description="dadjokes reddit CSV parser")
    parser.add_argument("--data", action="store", help="path to input JSON file", required=True)
    parser.add_argument("--out", action="store", help="path to output file", required=True)
    parser.add_argument("--inst", action="store", help="alpaca instruction", required=True)

    args = parser.parse_args()
    print(args)

    model = SentenceTransformer("intfloat/e5-base-v2",device="cuda")

    with open(args.data, 'r') as input:
        docs: List[Doc] = []
        for line in tqdm(input.readlines()):
            item = Doc.from_json(json.loads(line))
            docs.append(item)
        embeddings = model.encode([f"passage: {doc.input} {doc.output}" for doc in docs], batch_size=512, show_progress_bar=True)
        p = hnswlib.Index(space = 'cosine', dim = 768)
        print("building index")
        p.init_index(max_elements = len(docs), ef_construction = 200, M = 16)
        p.add_items(embeddings, [id for id, doc in enumerate(docs)])
        print("computing similarity")
        labels, distances = p.knn_query(embeddings, k = 10)
        skips: Set[int] = set()
        print("search done, exporting")
        dupe_count = 0
        broken_count = 0
        with open(args.out,'w') as output:
            for (index, doc), label_list, dist_list in zip(enumerate(docs), labels.tolist(), distances.tolist()):
                if index not in skips:
                    if "http" not in doc.output:
                        jdoc = {"input": doc.input, "output": doc.output, "instruction": args.inst}
                        output.write(json.dumps(jdoc) + '\n')
                    else:
                        broken_count += 1
                else:
                    dupe_count += 1
                skips.add(index)
                for label, dist in zip(label_list, dist_list):
                    if (dist < 0.07):
                        skips.add(label)

        print(f"done: dupes={dupe_count} broken={broken_count}")