File size: 2,392 Bytes
797f8cc
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
import torch 
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
from datasets import load_dataset
from transformers import BertTokenizer

def load_chatbot_data(file_path, sample_frac=0.05):
    df = pd.read_csv(file_path).sample(frac=sample_frac, random_state=42)
    labels = np.argmax(df[['winner_model_a', 'winner_model_b', 'winner_tie']].values, axis=1)
    return train_test_split(df, labels, test_size=0.2, random_state=42)

def load_personas():
    datasets = ["instruction", "npc", "math", "tool", "reasoning", "knowledge"]
    all_personas = []
    for dataset in datasets:
        data = load_dataset("proj-persona/PersonaHub", dataset, split="train")
        all_personas.extend([(p['input persona'], p['synthesized text']) for p in data])
    return all_personas

class ChatbotDataset(torch.utils.data.Dataset):
    def __init__(self, prompts, responses_a, responses_b, labels, tokenizer, max_length=128):
        self.tokenizer = tokenizer
        self.max_length = max_length
        self.prompts = prompts
        self.responses_a = responses_a
        self.responses_b = responses_b
        self.labels = labels

    def __len__(self):
        return len(self.labels)

    def __getitem__(self, idx):
        prompt = self.prompts[idx]
        response_a = self.responses_a[idx]
        response_b = self.responses_b[idx]

        encoded_prompt = self.tokenizer.encode_plus(
            prompt,
            add_special_tokens=True,
            max_length=self.max_length,
            padding='max_length',
            truncation=True,
            return_tensors='pt'
        )

        encoded_response_a = self.tokenizer.encode_plus(
            response_a,
            add_special_tokens=True,
            max_length=self.max_length,
            padding='max_length',
            truncation=True,
            return_tensors='pt'
        )

        encoded_response_b = self.tokenizer.encode_plus(
            response_b,
            add_special_tokens=True,
            max_length=self.max_length,
            padding='max_length',
            truncation=True,
            return_tensors='pt'
        )

        return {
            'prompt': encoded_prompt,
            'response_a': encoded_response_a,
            'response_b': encoded_response_b,
            'label': torch.tensor(self.labels[idx], dtype=torch.long)
        }