amg2118 commited on
Commit
d9edc89
·
1 Parent(s): aef5d1f

Remove automodel as it was not working

Browse files
config.json → BertRegressorModel/config.json RENAMED
File without changes
model.safetensors → BertRegressorModel/model.safetensors RENAMED
File without changes
modelling_bert_regression.py DELETED
@@ -1,37 +0,0 @@
1
- import torch
2
- import torch.nn as nn
3
- from transformers import BertTokenizer, BertModel, BertPreTrainedModel, AutoModel
4
-
5
- # Configuration
6
- MODEL_NAME = "bert-base-uncased"
7
- MAX_LENGTH = 128
8
- BATCH_SIZE = 16
9
- NUM_EPOCHS = 3
10
- LEARNING_RATE = 2e-5
11
- DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu")
12
-
13
-
14
- tokenizer = BertTokenizer.from_pretrained(MODEL_NAME)
15
-
16
- def preprocess(arguments):
17
- encoding = tokenizer(arguments, truncation=True, padding="max_length", max_length=MAX_LENGTH)
18
- return encoding
19
-
20
- class BertRegressionModel(BertPreTrainedModel):
21
- def __init__(self, config):
22
- super().__init__(config)
23
- self.bert = BertModel(config)
24
- self.regressor = nn.Sequential(
25
- nn.Dropout(config.hidden_dropout_prob),
26
- nn.Linear(config.hidden_size, 1),
27
- nn.Sigmoid() # ensures output is in [0, 1]
28
- )
29
- self.init_weights()
30
-
31
- def forward(self, input_ids, attention_mask, labels=None):
32
- outputs = self.bert(input_ids=input_ids, attention_mask=attention_mask)
33
- pooled_output = outputs.pooler_output
34
- preds = self.regressor(pooled_output).squeeze(-1)
35
- return preds
36
-
37
- AutoModel.register(BertRegressionModel)