shivansarora commited on
Commit
239ff1d
Β·
verified Β·
1 Parent(s): d7332b4

Create util.py

Browse files
Files changed (1) hide show
  1. util.py +108 -0
util.py ADDED
@@ -0,0 +1,108 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch, scipy
2
+ import numpy as np
3
+ from sklearn.metrics import confusion_matrix
4
+ from sklearn.metrics import f1_score
5
+ from sklearn.metrics import classification_report
6
+ import seaborn as sns
7
+ import matplotlib.pyplot as plt
8
+
9
+ class TextDataset(torch.utils.data.Dataset):
10
+ def __init__(self, encodings):
11
+ self.encodings = encodings
12
+ self.data_len=len(encodings['input_ids'])
13
+
14
+ def __getitem__(self, idx):
15
+ item = {key: val[idx].clone().detach() for key, val in self.encodings.items()}
16
+ return item
17
+
18
+ def __len__(self):
19
+ return self.data_len
20
+
21
+ class CEFRDataset(torch.utils.data.Dataset):
22
+ def __init__(self, encodings, sent_levels_a, sent_levels_b):
23
+ self.encodings = encodings
24
+ self.slabels_low = np.minimum(sent_levels_a, sent_levels_b)
25
+ self.slabels_high = np.maximum(sent_levels_a, sent_levels_b)
26
+
27
+ def __getitem__(self, idx):
28
+ item = {key: val[idx].clone().detach() for key, val in self.encodings.items()}
29
+ item['slabels_low'] = self.slabels_low[idx].clone().detach()
30
+ item['slabels_high'] = self.slabels_high[idx].clone().detach()
31
+ return item
32
+
33
+ def __len__(self):
34
+ return len(self.slabels_high)
35
+
36
+
37
+ class ConcatDataset(torch.utils.data.Dataset):
38
+ def __init__(self, *datasets):
39
+ self.datasets = datasets
40
+
41
+ def __getitem__(self, i):
42
+ return tuple(d[i] for d in self.datasets)
43
+
44
+ def __len__(self):
45
+ return min(len(d) for d in self.datasets)
46
+
47
+
48
+ def read_corpus(path, num_labels):
49
+ levels_a, levels_b, sents = [], [], []
50
+ with open(path) as f:
51
+ for line in f:
52
+ array = line.strip().split('\t')
53
+ sents.append(array[0].split(' '))
54
+ levels_a.append(float(array[1]) - 1) # Convert 1-6 to 0-5
55
+ levels_b.append(float(array[2]) - 1) # Convert 1-6 to 0-5
56
+
57
+ levels_a = np.array(levels_a)
58
+ levels_b = np.array(levels_b)
59
+
60
+ return levels_a, levels_b, sents
61
+
62
+
63
+ def convert_numeral_to_six_levels(levels):
64
+ level_thresholds = np.array([0.0, 0.5, 1.5, 2.5, 3.5, 4.5])
65
+ return _conversion(level_thresholds, levels)
66
+
67
+
68
+ def _conversion(level_thresholds, values):
69
+ thresh_array = np.tile(level_thresholds, reps=(values.shape[0], 1))
70
+ array = np.tile(values, reps=(1, level_thresholds.shape[0]))
71
+ levels = np.maximum(np.zeros((values.shape[0], 1)),
72
+ np.count_nonzero(thresh_array <= array, axis=1, keepdims=True) - 1).astype(int)
73
+ return levels
74
+
75
+
76
+ # Mean Pooling - Take attention mask into account for correct averaging
77
+ def mean_pooling(token_embeddings, attention_mask):
78
+ input_mask_expanded = attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float()
79
+ return torch.sum(token_embeddings * input_mask_expanded, 1) / torch.clamp(input_mask_expanded.sum(1), min=1e-9)
80
+
81
+
82
+ # Take attention mask into account for excluding padding
83
+ def token_embeddings_filtering_padding(token_embeddings, attention_mask):
84
+ input_mask_expanded = attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float()
85
+ return token_embeddings * input_mask_expanded
86
+
87
+
88
+ def eval_multiclass(out_path, labels, predictions):
89
+ cm = confusion_matrix(labels, predictions)
90
+ plt.figure()
91
+ sns.heatmap(cm)
92
+ plt.savefig(out_path + '_confusion_matrix.png')
93
+
94
+ report = classification_report(labels, predictions, digits=4)
95
+ print(report)
96
+ with open(out_path + '_test_report.txt', 'w') as fw:
97
+ fw.write('{0}\n'.format(report))
98
+
99
+
100
+ def mean_confidence_interval(data, confidence=0.95):
101
+ if len(data) > 5:
102
+ data.remove(max(data))
103
+ data.remove(min(data))
104
+ a = 1.0 * np.array(data)
105
+ n = len(a)
106
+ m, se = np.mean(a), scipy.stats.sem(a)
107
+ h = se * scipy.stats.t.ppf((1 + confidence) / 2., n - 1)
108
+ return m, h