YannickLetrefle commited on
Commit
ab42b99
·
1 Parent(s): 8fc7adb

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +126 -0
README.md CHANGED
@@ -1,3 +1,129 @@
1
  ---
2
  license: cc
3
  ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  ---
2
  license: cc
3
  ---
4
+
5
+ This is a classifier fine_tuned from camemBERT that takes as input a text and a question and returns 1 if the text is helpful to answer the question and 0 else.
6
+ the input should be formatted as tokenized_paragraph + sep_token + tokenized_question.
7
+
8
+ class QuestionAnswerDataset(Dataset):
9
+ def __init__(self, dataframe, tokenizer, max_length=512):
10
+ self.dataframe = dataframe
11
+ self.tokenizer = tokenizer
12
+ self.max_length = max_length
13
+
14
+ def __len__(self):
15
+ return len(self.dataframe)
16
+
17
+ def __getitem__(self, idx):
18
+ # Extract data
19
+ row = self.dataframe.iloc[idx]
20
+ paragraph = row['paragraph']
21
+ is_positive = np.random.random()
22
+ if is_positive > 0.5:
23
+ is_positive = 1
24
+ else:
25
+ is_positive = 0
26
+
27
+ if is_positive:
28
+ question = row['positive_questions'][np.random.randint(len(row['positive_questions']))]
29
+ label = 1
30
+ else:
31
+ question = row['negative_questions'][np.random.randint(len(row['negative_questions']))]
32
+ label = 0
33
+
34
+ # print('paragraph', paragraph)
35
+ # print('question', question)
36
+
37
+ # Tokenize
38
+ tokenized_paragraph = self.tokenizer(
39
+ paragraph,
40
+ truncation=True,
41
+ max_length=self.max_length,
42
+ return_tensors="pt"
43
+ )
44
+ tokenized_question = self.tokenizer(
45
+ question,
46
+ truncation=True,
47
+ max_length=self.max_length,
48
+ return_tensors="pt"
49
+ )
50
+ total_length = tokenized_paragraph['input_ids'].shape[1] + tokenized_question['input_ids'].shape[1]+2
51
+ if total_length > self.max_length:
52
+ tokenized_paragraph['input_ids'] = tokenized_paragraph['input_ids'][:, :self.max_length - tokenized_question['input_ids'].shape[1]-2]
53
+ tokenized_paragraph['attention_mask'] = tokenized_paragraph['attention_mask'][:, :self.max_length - tokenized_question['input_ids'].shape[1]-2]
54
+
55
+
56
+ # print('-'*100)
57
+ # print('tokenized_paragraph', tokenized_paragraph)
58
+ # print('tokenized_question', tokenized_question)
59
+ # print('total_length', total_length)
60
+ # print('-'*100)
61
+
62
+ sep_token = torch.tensor([[self.tokenizer.sep_token_id]]).to(tokenized_paragraph['input_ids'].device)
63
+
64
+
65
+ # Concatenate tokenized inputs
66
+ tokenized_input_ids = torch.cat((tokenized_paragraph['input_ids'], sep_token, tokenized_question['input_ids']),
67
+ dim=1)
68
+ tokenized_attention_mask = torch.cat(
69
+ [tokenized_paragraph['attention_mask'], torch.ones_like(sep_token), tokenized_question['attention_mask']],
70
+ dim=1)
71
+
72
+ # print("tokenized_input_ids shape:", tokenized_input_ids.shape)
73
+ # print("tokenized_attention_mask shape:", tokenized_attention_mask.shape)
74
+
75
+ # Make sure the length does not exceed max_length
76
+ if tokenized_input_ids.size(1) > self.max_length:
77
+ tokenized_input_ids = tokenized_input_ids[:, :self.max_length]
78
+ tokenized_attention_mask = tokenized_attention_mask[:, :self.max_length]
79
+
80
+ return tokenized_input_ids.squeeze(0), tokenized_attention_mask.squeeze(0), torch.tensor(label)
81
+
82
+
83
+ # Tokenizer
84
+ tokenizer = AutoTokenizer.from_pretrained("camembert/camembert-large")
85
+
86
+ #split test_train test_size = 0.2
87
+ from sklearn.model_selection import train_test_split
88
+
89
+ train_df, test_df = train_test_split(df, test_size=0.2, random_state=42)
90
+
91
+
92
+ # Dataset
93
+ train_dataset = QuestionAnswerDataset(train_df, tokenizer)
94
+ test_dataset = QuestionAnswerDataset(test_df, tokenizer)
95
+
96
+ def custom_collate_fn(batch):
97
+ input_ids = [item[0] for item in batch]
98
+ attention_masks = [item[1] for item in batch]
99
+ labels = torch.tensor([item[2] for item in batch])
100
+
101
+ input_ids_padded = pad_sequence(input_ids, batch_first=True, padding_value=tokenizer.pad_token_id)
102
+ attention_masks_padded = pad_sequence(attention_masks, batch_first=True, padding_value=0)
103
+
104
+ return input_ids_padded, attention_masks_padded, labels
105
+
106
+
107
+ # DataLoader
108
+ train_dataloader = DataLoader(train_dataset, batch_size=64, shuffle=True, collate_fn=custom_collate_fn)
109
+ test_dataloader = DataLoader(test_dataset, batch_size=64, shuffle=True, collate_fn=custom_collate_fn)
110
+
111
+ # Assuming 'myDataloader' is your DataLoader
112
+ for i, (input_ids, attention_masks, labels) in enumerate(train_dataloader):
113
+ print(f"Batch {i+1}")
114
+ print("Input IDs:", input_ids)
115
+ print("Input IDs Shape:", input_ids.shape)
116
+ print("Attention Masks:", attention_masks)
117
+ print("Attention Masks Shape:", attention_masks.shape)
118
+ print("Labels:", labels)
119
+ print("Labels Shape:", labels.shape)
120
+ print("-" * 50)
121
+
122
+ # Optionally, stop after the first few batches
123
+ if i == 1: # Change this number to control how many batches to print
124
+ break
125
+
126
+
127
+ # Model (for binary classification)
128
+ camembertModel = AutoModelForSequenceClassification.from_pretrained("camembert/camembert-large", num_labels=1)
129
+