6023oji commited on
Commit
a909fa2
Β·
1 Parent(s): 9a2142b

Upload kpmg_(2).py

Browse files
Files changed (1) hide show
  1. kpmg_(2).py +238 -0
kpmg_(2).py ADDED
@@ -0,0 +1,238 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ """kpmg (2).ipynb
3
+
4
+ Automatically generated by Colaboratory.
5
+
6
+ Original file is located at
7
+ https://colab.research.google.com/drive/1h7M0B8Uvu4c7u6iZK1VT-mAS4YydvyA3
8
+
9
+ # **Import Module**
10
+ """
11
+
12
+ import pandas as pd
13
+ import numpy as np
14
+
15
+ !pip install mxnet
16
+ !pip install gluonnlp pandas tqdm
17
+ !pip install sentencepiece
18
+ !pip install transformers==3.0.2
19
+ !pip install torch
20
+
21
+ !pip install git+https://git@github.com/SKTBrain/KoBERT.git@master
22
+
23
+ import torch
24
+ from torch import nn
25
+ import torch.nn.functional as F
26
+ import torch.optim as optim
27
+ from torch.utils.data import Dataset, DataLoader
28
+ import gluonnlp as nlp
29
+ import numpy as np
30
+ from tqdm import tqdm, tqdm_notebook
31
+
32
+
33
+ from kobert.utils import get_tokenizer
34
+ from kobert.pytorch_kobert import get_pytorch_kobert_model
35
+
36
+ from transformers import AdamW
37
+ from transformers.optimization import get_cosine_schedule_with_warmup
38
+
39
+ #GPU μ‚¬μš©
40
+ device = torch.device("cuda:0")
41
+ #BERT λͺ¨λΈ, Vocabulary 뢈러였기
42
+ bertmodel, vocab = get_pytorch_kobert_model()
43
+
44
+ import os
45
+
46
+ """# **Load Data**"""
47
+
48
+ from google.colab import drive
49
+ drive.mount('/content/drive')
50
+
51
+ data = pd.read_csv(r'/content/drive/MyDrive/kpmg/concat.csv')
52
+
53
+ data
54
+
55
+ data.loc[(data['category'] == "쀑립"), 'category'] = 0
56
+ data.loc[(data['category'] == "e"), 'category'] = 1
57
+ data.loc[(data['category'] == "s"), 'category'] = 2
58
+ data.loc[(data['category'] == "g"), 'category'] = 3
59
+
60
+ data_list = []
61
+ for q, label in zip(data['contents'], data['category']) :
62
+ data1 = []
63
+ data1.append(q)
64
+ data1.append(str(label))
65
+
66
+ data_list.append(data1)
67
+
68
+ print(data_list[0])
69
+ print(data_list[100])
70
+ print(data_list[250])
71
+ print(data_list[1000])
72
+ print(data_list[2500])
73
+ print(data_list[3300])
74
+
75
+ #train & test λ°μ΄ν„°λ‘œ λ‚˜λˆ„κΈ°
76
+ from sklearn.model_selection import train_test_split
77
+
78
+ dataset_train, dataset_test = train_test_split(data, test_size=0.25, random_state=0)
79
+ print(len(dataset_train))
80
+ print(len(dataset_test))
81
+
82
+ class BERTDataset(Dataset):
83
+ def __init__(self, dataset, sent_idx, label_idx, bert_tokenizer, max_len,
84
+ pad, pair):
85
+ transform = nlp.data.BERTSentenceTransform(
86
+ bert_tokenizer, max_seq_length=max_len, pad=pad, pair=pair)
87
+
88
+ self.sentences = [transform([dataset.iloc[i][sent_idx]]) for i in range(len(dataset))]
89
+ self.labels = [np.int32(dataset.iloc[i][label_idx]) for i in range(len(dataset))]
90
+
91
+ def __getitem__(self, i):
92
+ return (self.sentences[i] + (self.labels[i], ))
93
+
94
+ def __len__(self):
95
+ return (len(self.labels))
96
+
97
+ max_len = 64
98
+ batch_size = 64
99
+ warmup_ratio = 0.1
100
+ num_epochs = 10
101
+ max_grad_norm = 1
102
+ log_interval = 200
103
+ learning_rate = 5e-5
104
+
105
+ tokenizer = get_tokenizer()
106
+ tok = nlp.data.BERTSPTokenizer(tokenizer, vocab, lower=False)
107
+
108
+ data_train = BERTDataset(dataset_train, 0, 1, tok, max_len, True, False)
109
+ data_test = BERTDataset(dataset_test, 0, 1, tok, max_len, True, False)
110
+
111
+ train_dataloader = torch.utils.data.DataLoader(data_train, batch_size=batch_size, num_workers=5, shuffle=True)
112
+ test_dataloader = torch.utils.data.DataLoader(data_test, batch_size=batch_size, num_workers=5, shuffle=True)
113
+
114
+ """# **KOBERT ν•™μŠ΅μ‹œν‚€κΈ°**"""
115
+
116
+ class BERTClassifier(nn.Module):
117
+ def __init__(self,
118
+ bert,
119
+ hidden_size = 768,
120
+ num_classes=4,
121
+ dr_rate=None,
122
+ params=None):
123
+ super(BERTClassifier, self).__init__()
124
+ self.bert = bert
125
+ self.dr_rate = dr_rate
126
+
127
+ self.classifier = nn.Linear(hidden_size , num_classes)
128
+ if dr_rate:
129
+ self.dropout = nn.Dropout(p=dr_rate)
130
+
131
+ def gen_attention_mask(self, token_ids, valid_length):
132
+ attention_mask = torch.zeros_like(token_ids)
133
+ for i, v in enumerate(valid_length):
134
+ attention_mask[i][:v] = 1
135
+ return attention_mask.float()
136
+
137
+ def forward(self, token_ids, valid_length, segment_ids):
138
+ attention_mask = self.gen_attention_mask(token_ids, valid_length)
139
+
140
+ _, pooler = self.bert(input_ids = token_ids, token_type_ids = segment_ids.long(), attention_mask = attention_mask.float().to(token_ids.device), return_dict=False)
141
+
142
+ if self.dr_rate:
143
+ out = self.dropout(pooler)
144
+ return self.classifier(out)
145
+
146
+ #BERT λͺ¨λΈ 뢈러였기
147
+ model = BERTClassifier(bertmodel, dr_rate=0.5).to(device)
148
+
149
+ #optimizer와 schedule μ„€μ •
150
+ no_decay = ['bias', 'LayerNorm.weight']
151
+ optimizer_grouped_parameters = [
152
+ {'params': [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)], 'weight_decay': 0.01},
153
+ {'params': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
154
+ ]
155
+
156
+ optimizer = AdamW(optimizer_grouped_parameters, lr=learning_rate)
157
+ loss_fn = nn.CrossEntropyLoss()
158
+
159
+ t_total = len(train_dataloader) * num_epochs
160
+ warmup_step = int(t_total * warmup_ratio)
161
+
162
+ scheduler = get_cosine_schedule_with_warmup(optimizer, num_warmup_steps=warmup_step, num_training_steps=t_total)
163
+
164
+ def calc_accuracy(X,Y):
165
+ max_vals, max_indices = torch.max(X, 1)
166
+ train_acc = (max_indices == Y).sum().data.cpu().numpy()/max_indices.size()[0]
167
+ return train_acc
168
+
169
+ """Train"""
170
+
171
+ for e in range(num_epochs):
172
+ train_acc = 0.0
173
+ test_acc = 0.0
174
+ model.train()
175
+ for batch_id, (token_ids, valid_length, segment_ids, label) in enumerate(tqdm_notebook(train_dataloader)):
176
+ optimizer.zero_grad()
177
+ token_ids = token_ids.long().to(device)
178
+ segment_ids = segment_ids.long().to(device)
179
+ valid_length= valid_length
180
+ label = label.long().to(device)
181
+ out = model(token_ids, valid_length, segment_ids)
182
+ loss = loss_fn(out, label)
183
+ loss.backward()
184
+ torch.nn.utils.clip_grad_norm_(model.parameters(), max_grad_norm)
185
+ optimizer.step()
186
+ scheduler.step()
187
+ train_acc += calc_accuracy(out, label)
188
+ print("epoch {} train acc {}".format(e+1, train_acc / (batch_id+1)))
189
+ model.eval()
190
+ for batch_id, (token_ids, valid_length, segment_ids, label) in enumerate(tqdm_notebook(test_dataloader)):
191
+ token_ids = token_ids.long().to(device)
192
+ segment_ids = segment_ids.long().to(device)
193
+ valid_length= valid_length
194
+ label = label.long().to(device)
195
+ out = model(token_ids, valid_length, segment_ids)
196
+ test_acc += calc_accuracy(out, label)
197
+ print("epoch {} test acc {}".format(e+1, test_acc / (batch_id+1)))
198
+
199
+ """TEST"""
200
+
201
+ def softmax(vals, idx):
202
+ valscpu = vals.cpu().detach().squeeze(0)
203
+ a = 0
204
+ for i in valscpu:
205
+ a += np.exp(i)
206
+ return ((np.exp(valscpu[idx]))/a).item() * 100
207
+
208
+ def testModel(model, seq):
209
+ cate = ["쀑립","e","s","g"]
210
+ tmp = [seq]
211
+ transform = nlp.data.BERTSentenceTransform(tok, max_len, pad=True, pair=False)
212
+ tokenized = transform(tmp)
213
+
214
+ model.eval()
215
+ result = model(torch.tensor([tokenized[0]]).to(device), [tokenized[1]], torch.tensor(tokenized[2]).to(device))
216
+ idx = result.argmax().cpu().item()
217
+ print("λ³΄κ³ μ„œμ˜ μΉ΄ν…Œκ³ λ¦¬λŠ”:", cate[idx])
218
+ print("μ‹ λ’°λ„λŠ”:", "{:.2f}%".format(softmax(result,idx)))
219
+
220
+ testModel(model, "μ΄μ‚¬νšŒ κΈˆν˜Έμ„μœ ν™”ν•™μ€ 지속가λŠ₯ν•œ 기업을 λ§Œλ“€κΈ° μœ„ν•΄ κ±΄μ „ν•œ 지배ꡬ쑰λ₯Ό κ΅¬μΆ•ν•˜κ³  μžˆμŠ΅λ‹ˆλ‹€. μ΄μ‚¬νšŒλŠ” μ΄ν•΄κ΄€κ³„μžμ˜ 이읡을 λŒ€λ³€ν•˜κ³ , κ²½μ˜μ§„μ— λŒ€ν•œ 감독 역할을 ν•˜λ©°, μž₯기적인 κ΄€μ μ˜ μ˜μ‚¬κ²°μ •μ„ ν•˜κΈ° μœ„ν•΄ λ…Έλ ₯ν•©λ‹ˆλ‹€.")
221
+
222
+ testModel(model, "κΈˆν˜Έμ„μœ ν™”ν•™μ€ μ‹œμž₯의 변화에 적절히 λŒ€μ‘ν•˜κ³  μΉœν™˜κ²½ 포트폴리였 μ „ν™˜μ„ μœ„ν•΄ κ³ λΆ€κ°€/μΉœν™˜κ²½ μ œν’ˆ 생산, μΉœν™˜κ²½ μžλ™μ°¨ κ΄€λ ¨ μ†”λ£¨μ…˜, λ°”μ΄μ˜€/μΉœν™˜κ²½μ†Œμž¬ 및 κ³ λΆ€κ°€ μŠ€νŽ˜μ…œν‹° μ œν’ˆ μ—°κ΅¬κ°œλ°œ 등을 κ³„νš μ€‘μž…λ‹ˆλ‹€.")
223
+
224
+ testModel(model, "λ‹Ήμ‚¬λŠ” κΈˆμœ΅μƒν’ˆκ³Ό κ΄€λ ¨ν•˜μ—¬ μ‹ μš©μœ„ν—˜, μœ λ™μ„±μœ„ν—˜ 및 μ‹œμž₯μœ„ν—˜μ— λ…ΈμΆœλ˜μ–΄ μžˆμŠ΅λ‹ˆλ‹€. λ³Έ 주석은 당사가 λ…ΈμΆœλ˜μ–΄ μžˆλŠ” μœ„μ˜ μœ„ν—˜μ— λŒ€ν•œ 정보와 λ‹Ήμ‚¬μ˜ μœ„ν—˜κ΄€λ¦¬ λͺ©ν‘œ,μ •μ±…, μœ„ν—˜ 평가 및 관리 절차, 그리고 μžλ³Έκ΄€λ¦¬μ— λŒ€ν•΄ κ³΅μ‹œν•˜κ³  μžˆμŠ΅λ‹ˆλ‹€. μΆ”κ°€μ μΈκ³„λŸ‰μ  정보에 λŒ€ν•΄μ„œλŠ” λ³Έ μž¬λ¬΄μ œν‘œ μ „λ°˜μ— κ±Έμ³μ„œ κ³΅μ‹œλ˜μ–΄ μžˆμŠ΅λ‹ˆλ‹€.")
225
+
226
+ testModel(model, "μ£Όκ΄€ν•˜λŠ” β€˜2021λ…„ μžλ°œμ μ—λ„ˆμ§€νš¨μœ¨λͺ©ν‘œμ œ μ‹œλ²”μ‚¬μ—…β€™ ν˜‘μ•½μ„ 톡해 μ—λ„ˆμ§€ μ›λ‹¨μœ„ λͺ©ν‘œ κ°œμ„ μ„ μœ„ν•΄ λ…Έλ ₯ν•˜κ³  있으며, μ§€μ—­μ‚¬νšŒ 및 μ—λ„ˆμ§€μ‹œλ―Όμ—°λŒ€μ—μ„œ μ£Όκ΄€ν•˜λŠ” ν™˜κ²½ κ΄€λ ¨ ν™œλ™μ— μ°Έμ—¬ν•˜λ©° κΈ°ν›„λ³€ν™” λŒ€μ‘ μ€‘μš”μ„±μ— λŒ€ν•œ 곡감과 μ†Œν†΅μ„ μ‹€μ²œν•˜κ³  μžˆμŠ΅λ‹ˆλ‹€. ")
227
+
228
+ testModel(model, "생물닀양성 μœ μ§€")
229
+
230
+ testModel(model, "생물닀양성 μœ μ§€ 및 지속가λŠ₯성을 μΆ”μ§„ν•˜λŠ” ꡭ제 λΉ„μ˜λ¦¬ ν™˜κ²½λ³΄ν˜Έλ‹¨μ²΄")
231
+
232
+ testModel(model, "μ•„μšΈλŸ¬ μ œν’ˆ 제쑰, 판맀 전단계에 μžˆμ–΄μ„œμ˜ νƒ„μ†Œλ°°μΆœμ ˆκ°μ„ μœ„ν•œ 곡급망 관리 체계λ₯Ό 보닀 κ°•ν™”ν•΄ λ‚˜μ•„κ°ˆ κ²ƒμž…λ‹ˆλ‹€.")
233
+
234
+ testModel(model, "κ°œλ°œμ—μ„œ μœ ν†΅κΉŒμ§€, μ›λ£ŒλΆ€ν„° μ œν’ˆκΉŒμ§€, λͺ¨λ“  단계λ₯Ό μ•„μš°λ₯΄λŠ” ν’ˆμ§ˆμ•ˆμ „μ˜ ν™•λ³΄λŠ” ν•„μˆ˜μ μž…λ‹ˆλ‹€.")
235
+
236
+ testModel(model, "λ‘―λ°μ œκ³ΌλŠ” λ™λ°˜μ„±μž₯아카데미λ₯Ό 온라인으둜 연쀑 μš΄μ˜ν•˜λ©° ν˜‘λ ₯μ—…μ²΄μ˜ μΈμ μžμ› κ°œλ°œμ„ μ§€μ›ν•˜κ³  μžˆμŠ΅λ‹ˆλ‹€. ")
237
+
238
+ testModel(model, "")