File size: 3,419 Bytes
545c4d5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
from typing import Dict
import torch
from cde_benchmark.formatters.data_formatter import BaseDataFormatter
from cde_benchmark.evaluators.eval_utils import CustomRetrievalEvaluator


class Embedder:
    def __init__(
        self,
        is_contextual_model: bool = False,
    ):

        self.is_contextual_model = is_contextual_model
        self.evaluator = CustomRetrievalEvaluator()

    def embed_queries(self, queries):
        raise NotImplementedError

    def embed_documents(self, documents):
        raise NotImplementedError

    def process_queries(self, data_formatter):
        queries, document_ids = data_formatter.get_queries()
        query_embeddings = self.embed_queries(queries)

        # make into a contiguous tensor, and map position to document_ids
        return query_embeddings, document_ids

    def process_documents(self, data_formatter):
        if self.is_contextual_model:
            documents, document_ids = data_formatter.get_nested()
            # embed documents in contextual models receive a list of list of documents and should return embeddings in the same shape
            doc_embeddings = self.embed_documents(documents)
            # flatten
            document_ids = [id_ for nested_ids in document_ids for id_ in nested_ids]
            doc_embeddings = [
                embed_ for nested_embeds in doc_embeddings for embed_ in nested_embeds
            ]

        else:
            documents, document_ids = data_formatter.get_flattened()
            doc_embeddings = self.embed_documents(documents)

        # make into a contiguous tensor, and map position to document_ids
        return doc_embeddings, document_ids

    def get_similarities(self, query_embeddings, doc_embeddings):
        # convert to torch tensors and compute similarity with dot product
        query_embeddings = torch.tensor(query_embeddings)
        doc_embeddings = torch.tensor(doc_embeddings)
        scores = torch.mm(query_embeddings, doc_embeddings.t())
        return scores

    def get_metrics(self, scores, all_document_ids, label_documents_id):
        # scores are a list of list of scores (or 2D tensor)
        # label_document_ids are a list of document ids corresponding to the true label
        # all_document_ids are a list of all document ids in the same order as the scores

        assert scores.shape[1] == len(all_document_ids)
        assert scores.shape[0] == len(label_documents_id)
        assert set(label_documents_id).issubset(set(all_document_ids))

        relevant_docs = {}
        for idx, label in enumerate(label_documents_id):
            relevant_docs[str(idx)] = {label: 1}

        results = {}
        for idx, scores_per_query in enumerate(scores):
            results[str(idx)] = {
                str(doc_id): score.item()
                for doc_id, score in zip(all_document_ids, scores_per_query)
            }

        metrics: Dict[str, float] = self.evaluator.compute_mteb_metrics(
            relevant_docs, results
        )
        return metrics

    def compute_metrics_e2e(self, data_formatter):
        queries_embeddings, label_ids = self.process_queries(data_formatter)
        documents_embeddings, all_doc_ids = self.process_documents(data_formatter)

        scores = self.get_similarities(queries_embeddings, documents_embeddings)
        metrics = self.get_metrics(scores, all_doc_ids, label_ids)
        return metrics