| | from sentence_transformers import SentenceTransformer |
| | from mteb import MTEB |
| | from mteb.abstasks.AbsTaskRetrieval import AbsTaskRetrieval |
| | from datasets import DatasetDict |
| | from collections import defaultdict |
| | import pandas as pd |
| | def load_dataset(path): |
| | df = pd.read_parquet(path, engine="pyarrow") |
| | return df |
| |
|
| | def load_retrieval_data(path): |
| | eval_split = 'dev' |
| |
|
| | corpus = {e['cid']: {'text': e['text']} for i, e in load_dataset(path + r'\data\corpus.parquet.gz').iterrows()} |
| | queries = {e['qid']: e['text'] for i, e in load_dataset(path + r'\data\queries.parquet.gz').iterrows()} |
| | relevant_docs = defaultdict(dict) |
| | for i, e in load_dataset(path + r'\data\qrels.parquet.gz').iterrows(): |
| | relevant_docs[e['qid']][e['cid']] = e['score'] |
| |
|
| | corpus = DatasetDict({eval_split: corpus}) |
| | queries = DatasetDict({eval_split: queries}) |
| | relevant_docs = DatasetDict({eval_split: relevant_docs}) |
| | return corpus, queries, relevant_docs |
| |
|
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| |
|
| | |
| | |
| | model = SentenceTransformer(r'D:\models\Dmeta', device='cuda:0') |
| |
|
| | texts1 = ["胡子长得太快怎么办?", "在香港哪里买手表好"] |
| | texts2 = ["胡子长得快怎么办?", "怎样使胡子不浓密!", "香港买手表哪里好", "在杭州手机到哪里买"] |
| | embs1 = model.encode(texts1, normalize_embeddings=True) |
| | embs2 = model.encode(texts2, normalize_embeddings=True) |
| | similarity = embs1 @ embs2.T |
| | print(similarity) |
| |
|
| | class H2Retrieval(AbsTaskRetrieval): |
| | @property |
| | def description(self): |
| | return { |
| | 'name': 'H2Retrieval', |
| | 'hf_hub_name': 'Limour/H2Retrieval', |
| | 'reference': 'https://huggingface.co/datasets/a686d380/h-corpus-2023', |
| | 'description': 'h-corpus 领域的 Retrieval 评价数据集。', |
| | 'type': 'Retrieval', |
| | 'category': 's2p', |
| | 'eval_splits': ['dev'], |
| | 'eval_langs': ['zh'], |
| | 'main_score': 'ndcg_at_10' |
| | } |
| |
|
| | def load_data(self, **kwargs): |
| | if self.data_loaded: |
| | return |
| |
|
| | self.corpus, self.queries, self.relevant_docs = load_retrieval_data(r'D:\datasets\H2Retrieval') |
| | self.data_loaded = True |
| |
|
| | evaluation = MTEB(tasks=[H2Retrieval()]) |
| | evaluation.run(model) |
| | |
| | |
| | |