File size: 2,257 Bytes
3eecc60
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67

import pandas as pd
import torch
from transformers import BertTokenizer, BertForSequenceClassification, AdamW
from torch.utils.data import Dataset, DataLoader
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder
import pickle

class TextDataset(Dataset):
    def __init__(self, texts, labels, tokenizer, max_len):
        self.texts = texts
        self.labels = labels
        self.tokenizer = tokenizer
        self.max_len = max_len

    def __len__(self):
        return len(self.texts)

    def __getitem__(self, idx):
        text = self.texts[idx]
        label = self.labels[idx]
        encoding = self.tokenizer(text, truncation=True, padding='max_length', max_length=self.max_len, return_tensors='pt')
        return {
            'input_ids': encoding['input_ids'].squeeze(0),
            'attention_mask': encoding['attention_mask'].squeeze(0),
            'labels': torch.tensor(label, dtype=torch.long)
        }

# Sample data
data = pd.DataFrame({
    "text": ["I love this", "I hate this", "This is amazing", "This is terrible"],
    "label": ["positive", "negative", "positive", "negative"]
})

# Preprocess
le = LabelEncoder()
data["label_enc"] = le.fit_transform(data["label"])

train_texts, val_texts, train_labels, val_labels = train_test_split(data["text"], data["label_enc"], test_size=0.2)

tokenizer = BertTokenizer.from_pretrained("bert-base-uncased")
train_dataset = TextDataset(train_texts.tolist(), train_labels.tolist(), tokenizer, max_len=32)
train_loader = DataLoader(train_dataset, batch_size=2)

model = BertForSequenceClassification.from_pretrained("bert-base-uncased", num_labels=2)
optimizer = AdamW(model.parameters(), lr=5e-5)

model.train()
for epoch in range(1):
    for batch in train_loader:
        outputs = model(input_ids=batch['input_ids'],
                        attention_mask=batch['attention_mask'],
                        labels=batch['labels'])
        loss = outputs.loss
        loss.backward()
        optimizer.step()
        optimizer.zero_grad()

torch.save(model.state_dict(), "app/bert_model.pth")

with open("app/tokenizer.pkl", "wb") as f:
    pickle.dump(tokenizer, f)

with open("app/label_encoder.pkl", "wb") as f:
    pickle.dump(le, f)