Upload kpmg_(2).py
Browse files- kpmg_(2).py +238 -0
kpmg_(2).py
ADDED
|
@@ -0,0 +1,238 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# -*- coding: utf-8 -*-
|
| 2 |
+
"""kpmg (2).ipynb
|
| 3 |
+
|
| 4 |
+
Automatically generated by Colaboratory.
|
| 5 |
+
|
| 6 |
+
Original file is located at
|
| 7 |
+
https://colab.research.google.com/drive/1h7M0B8Uvu4c7u6iZK1VT-mAS4YydvyA3
|
| 8 |
+
|
| 9 |
+
# **Import Module**
|
| 10 |
+
"""
|
| 11 |
+
|
| 12 |
+
import pandas as pd
|
| 13 |
+
import numpy as np
|
| 14 |
+
|
| 15 |
+
!pip install mxnet
|
| 16 |
+
!pip install gluonnlp pandas tqdm
|
| 17 |
+
!pip install sentencepiece
|
| 18 |
+
!pip install transformers==3.0.2
|
| 19 |
+
!pip install torch
|
| 20 |
+
|
| 21 |
+
!pip install git+https://git@github.com/SKTBrain/KoBERT.git@master
|
| 22 |
+
|
| 23 |
+
import torch
|
| 24 |
+
from torch import nn
|
| 25 |
+
import torch.nn.functional as F
|
| 26 |
+
import torch.optim as optim
|
| 27 |
+
from torch.utils.data import Dataset, DataLoader
|
| 28 |
+
import gluonnlp as nlp
|
| 29 |
+
import numpy as np
|
| 30 |
+
from tqdm import tqdm, tqdm_notebook
|
| 31 |
+
|
| 32 |
+
|
| 33 |
+
from kobert.utils import get_tokenizer
|
| 34 |
+
from kobert.pytorch_kobert import get_pytorch_kobert_model
|
| 35 |
+
|
| 36 |
+
from transformers import AdamW
|
| 37 |
+
from transformers.optimization import get_cosine_schedule_with_warmup
|
| 38 |
+
|
| 39 |
+
#GPU μ¬μ©
|
| 40 |
+
device = torch.device("cuda:0")
|
| 41 |
+
#BERT λͺ¨λΈ, Vocabulary λΆλ¬μ€κΈ°
|
| 42 |
+
bertmodel, vocab = get_pytorch_kobert_model()
|
| 43 |
+
|
| 44 |
+
import os
|
| 45 |
+
|
| 46 |
+
"""# **Load Data**"""
|
| 47 |
+
|
| 48 |
+
from google.colab import drive
|
| 49 |
+
drive.mount('/content/drive')
|
| 50 |
+
|
| 51 |
+
data = pd.read_csv(r'/content/drive/MyDrive/kpmg/concat.csv')
|
| 52 |
+
|
| 53 |
+
data
|
| 54 |
+
|
| 55 |
+
data.loc[(data['category'] == "μ€λ¦½"), 'category'] = 0
|
| 56 |
+
data.loc[(data['category'] == "e"), 'category'] = 1
|
| 57 |
+
data.loc[(data['category'] == "s"), 'category'] = 2
|
| 58 |
+
data.loc[(data['category'] == "g"), 'category'] = 3
|
| 59 |
+
|
| 60 |
+
data_list = []
|
| 61 |
+
for q, label in zip(data['contents'], data['category']) :
|
| 62 |
+
data1 = []
|
| 63 |
+
data1.append(q)
|
| 64 |
+
data1.append(str(label))
|
| 65 |
+
|
| 66 |
+
data_list.append(data1)
|
| 67 |
+
|
| 68 |
+
print(data_list[0])
|
| 69 |
+
print(data_list[100])
|
| 70 |
+
print(data_list[250])
|
| 71 |
+
print(data_list[1000])
|
| 72 |
+
print(data_list[2500])
|
| 73 |
+
print(data_list[3300])
|
| 74 |
+
|
| 75 |
+
#train & test λ°μ΄ν°λ‘ λλκΈ°
|
| 76 |
+
from sklearn.model_selection import train_test_split
|
| 77 |
+
|
| 78 |
+
dataset_train, dataset_test = train_test_split(data, test_size=0.25, random_state=0)
|
| 79 |
+
print(len(dataset_train))
|
| 80 |
+
print(len(dataset_test))
|
| 81 |
+
|
| 82 |
+
class BERTDataset(Dataset):
|
| 83 |
+
def __init__(self, dataset, sent_idx, label_idx, bert_tokenizer, max_len,
|
| 84 |
+
pad, pair):
|
| 85 |
+
transform = nlp.data.BERTSentenceTransform(
|
| 86 |
+
bert_tokenizer, max_seq_length=max_len, pad=pad, pair=pair)
|
| 87 |
+
|
| 88 |
+
self.sentences = [transform([dataset.iloc[i][sent_idx]]) for i in range(len(dataset))]
|
| 89 |
+
self.labels = [np.int32(dataset.iloc[i][label_idx]) for i in range(len(dataset))]
|
| 90 |
+
|
| 91 |
+
def __getitem__(self, i):
|
| 92 |
+
return (self.sentences[i] + (self.labels[i], ))
|
| 93 |
+
|
| 94 |
+
def __len__(self):
|
| 95 |
+
return (len(self.labels))
|
| 96 |
+
|
| 97 |
+
max_len = 64
|
| 98 |
+
batch_size = 64
|
| 99 |
+
warmup_ratio = 0.1
|
| 100 |
+
num_epochs = 10
|
| 101 |
+
max_grad_norm = 1
|
| 102 |
+
log_interval = 200
|
| 103 |
+
learning_rate = 5e-5
|
| 104 |
+
|
| 105 |
+
tokenizer = get_tokenizer()
|
| 106 |
+
tok = nlp.data.BERTSPTokenizer(tokenizer, vocab, lower=False)
|
| 107 |
+
|
| 108 |
+
data_train = BERTDataset(dataset_train, 0, 1, tok, max_len, True, False)
|
| 109 |
+
data_test = BERTDataset(dataset_test, 0, 1, tok, max_len, True, False)
|
| 110 |
+
|
| 111 |
+
train_dataloader = torch.utils.data.DataLoader(data_train, batch_size=batch_size, num_workers=5, shuffle=True)
|
| 112 |
+
test_dataloader = torch.utils.data.DataLoader(data_test, batch_size=batch_size, num_workers=5, shuffle=True)
|
| 113 |
+
|
| 114 |
+
"""# **KOBERT νμ΅μν€κΈ°**"""
|
| 115 |
+
|
| 116 |
+
class BERTClassifier(nn.Module):
|
| 117 |
+
def __init__(self,
|
| 118 |
+
bert,
|
| 119 |
+
hidden_size = 768,
|
| 120 |
+
num_classes=4,
|
| 121 |
+
dr_rate=None,
|
| 122 |
+
params=None):
|
| 123 |
+
super(BERTClassifier, self).__init__()
|
| 124 |
+
self.bert = bert
|
| 125 |
+
self.dr_rate = dr_rate
|
| 126 |
+
|
| 127 |
+
self.classifier = nn.Linear(hidden_size , num_classes)
|
| 128 |
+
if dr_rate:
|
| 129 |
+
self.dropout = nn.Dropout(p=dr_rate)
|
| 130 |
+
|
| 131 |
+
def gen_attention_mask(self, token_ids, valid_length):
|
| 132 |
+
attention_mask = torch.zeros_like(token_ids)
|
| 133 |
+
for i, v in enumerate(valid_length):
|
| 134 |
+
attention_mask[i][:v] = 1
|
| 135 |
+
return attention_mask.float()
|
| 136 |
+
|
| 137 |
+
def forward(self, token_ids, valid_length, segment_ids):
|
| 138 |
+
attention_mask = self.gen_attention_mask(token_ids, valid_length)
|
| 139 |
+
|
| 140 |
+
_, pooler = self.bert(input_ids = token_ids, token_type_ids = segment_ids.long(), attention_mask = attention_mask.float().to(token_ids.device), return_dict=False)
|
| 141 |
+
|
| 142 |
+
if self.dr_rate:
|
| 143 |
+
out = self.dropout(pooler)
|
| 144 |
+
return self.classifier(out)
|
| 145 |
+
|
| 146 |
+
#BERT λͺ¨λΈ λΆλ¬μ€κΈ°
|
| 147 |
+
model = BERTClassifier(bertmodel, dr_rate=0.5).to(device)
|
| 148 |
+
|
| 149 |
+
#optimizerμ schedule μ€μ
|
| 150 |
+
no_decay = ['bias', 'LayerNorm.weight']
|
| 151 |
+
optimizer_grouped_parameters = [
|
| 152 |
+
{'params': [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)], 'weight_decay': 0.01},
|
| 153 |
+
{'params': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
|
| 154 |
+
]
|
| 155 |
+
|
| 156 |
+
optimizer = AdamW(optimizer_grouped_parameters, lr=learning_rate)
|
| 157 |
+
loss_fn = nn.CrossEntropyLoss()
|
| 158 |
+
|
| 159 |
+
t_total = len(train_dataloader) * num_epochs
|
| 160 |
+
warmup_step = int(t_total * warmup_ratio)
|
| 161 |
+
|
| 162 |
+
scheduler = get_cosine_schedule_with_warmup(optimizer, num_warmup_steps=warmup_step, num_training_steps=t_total)
|
| 163 |
+
|
| 164 |
+
def calc_accuracy(X,Y):
|
| 165 |
+
max_vals, max_indices = torch.max(X, 1)
|
| 166 |
+
train_acc = (max_indices == Y).sum().data.cpu().numpy()/max_indices.size()[0]
|
| 167 |
+
return train_acc
|
| 168 |
+
|
| 169 |
+
"""Train"""
|
| 170 |
+
|
| 171 |
+
for e in range(num_epochs):
|
| 172 |
+
train_acc = 0.0
|
| 173 |
+
test_acc = 0.0
|
| 174 |
+
model.train()
|
| 175 |
+
for batch_id, (token_ids, valid_length, segment_ids, label) in enumerate(tqdm_notebook(train_dataloader)):
|
| 176 |
+
optimizer.zero_grad()
|
| 177 |
+
token_ids = token_ids.long().to(device)
|
| 178 |
+
segment_ids = segment_ids.long().to(device)
|
| 179 |
+
valid_length= valid_length
|
| 180 |
+
label = label.long().to(device)
|
| 181 |
+
out = model(token_ids, valid_length, segment_ids)
|
| 182 |
+
loss = loss_fn(out, label)
|
| 183 |
+
loss.backward()
|
| 184 |
+
torch.nn.utils.clip_grad_norm_(model.parameters(), max_grad_norm)
|
| 185 |
+
optimizer.step()
|
| 186 |
+
scheduler.step()
|
| 187 |
+
train_acc += calc_accuracy(out, label)
|
| 188 |
+
print("epoch {} train acc {}".format(e+1, train_acc / (batch_id+1)))
|
| 189 |
+
model.eval()
|
| 190 |
+
for batch_id, (token_ids, valid_length, segment_ids, label) in enumerate(tqdm_notebook(test_dataloader)):
|
| 191 |
+
token_ids = token_ids.long().to(device)
|
| 192 |
+
segment_ids = segment_ids.long().to(device)
|
| 193 |
+
valid_length= valid_length
|
| 194 |
+
label = label.long().to(device)
|
| 195 |
+
out = model(token_ids, valid_length, segment_ids)
|
| 196 |
+
test_acc += calc_accuracy(out, label)
|
| 197 |
+
print("epoch {} test acc {}".format(e+1, test_acc / (batch_id+1)))
|
| 198 |
+
|
| 199 |
+
"""TEST"""
|
| 200 |
+
|
| 201 |
+
def softmax(vals, idx):
|
| 202 |
+
valscpu = vals.cpu().detach().squeeze(0)
|
| 203 |
+
a = 0
|
| 204 |
+
for i in valscpu:
|
| 205 |
+
a += np.exp(i)
|
| 206 |
+
return ((np.exp(valscpu[idx]))/a).item() * 100
|
| 207 |
+
|
| 208 |
+
def testModel(model, seq):
|
| 209 |
+
cate = ["μ€λ¦½","e","s","g"]
|
| 210 |
+
tmp = [seq]
|
| 211 |
+
transform = nlp.data.BERTSentenceTransform(tok, max_len, pad=True, pair=False)
|
| 212 |
+
tokenized = transform(tmp)
|
| 213 |
+
|
| 214 |
+
model.eval()
|
| 215 |
+
result = model(torch.tensor([tokenized[0]]).to(device), [tokenized[1]], torch.tensor(tokenized[2]).to(device))
|
| 216 |
+
idx = result.argmax().cpu().item()
|
| 217 |
+
print("λ³΄κ³ μμ μΉ΄ν
κ³ λ¦¬λ:", cate[idx])
|
| 218 |
+
print("μ λ’°λλ:", "{:.2f}%".format(softmax(result,idx)))
|
| 219 |
+
|
| 220 |
+
testModel(model, "μ΄μ¬ν κΈνΈμμ ννμ μ§μκ°λ₯ν κΈ°μ
μ λ§λ€κΈ° μν΄ κ±΄μ ν μ§λ°°κ΅¬μ‘°λ₯Ό ꡬμΆνκ³ μμ΅λλ€. μ΄μ¬νλ μ΄ν΄κ΄κ³μμ μ΄μ΅μ λλ³νκ³ , κ²½μμ§μ λν κ°λ
μν μ νλ©°, μ₯κΈ°μ μΈ κ΄μ μ μμ¬κ²°μ μ νκΈ° μν΄ λ
Έλ ₯ν©λλ€.")
|
| 221 |
+
|
| 222 |
+
testModel(model, "κΈνΈμμ ννμ μμ₯μ λ³νμ μ μ ν λμνκ³ μΉνκ²½ ν¬νΈν΄λ¦¬μ€ μ νμ μν΄ κ³ λΆκ°/μΉνκ²½ μ ν μμ°, μΉνκ²½ μλμ°¨ κ΄λ ¨ μ루μ
, λ°μ΄μ€/μΉνκ²½μμ¬ λ° κ³ λΆκ° μ€νμ
ν° μ ν μ°κ΅¬κ°λ° λ±μ κ³ν μ€μ
λλ€.")
|
| 223 |
+
|
| 224 |
+
testModel(model, "λΉμ¬λ κΈμ΅μνκ³Ό κ΄λ ¨νμ¬ μ μ©μν, μ λμ±μν λ° μμ₯μνμ λ
ΈμΆλμ΄ μμ΅λλ€. λ³Έ μ£Όμμ λΉμ¬κ° λ
ΈμΆλμ΄ μλ μμ μνμ λν μ 보μ λΉμ¬μ μνκ΄λ¦¬ λͺ©ν,μ μ±
, μν νκ° λ° κ΄λ¦¬ μ μ°¨, κ·Έλ¦¬κ³ μλ³Έκ΄λ¦¬μ λν΄ κ³΅μνκ³ μμ΅λλ€. μΆκ°μ μΈκ³λμ μ 보μ λν΄μλ λ³Έ μ¬λ¬΄μ ν μ λ°μ κ±Έμ³μ 곡μλμ΄ μμ΅λλ€.")
|
| 225 |
+
|
| 226 |
+
testModel(model, "μ£Όκ΄νλ β2021λ
μλ°μ μλμ§ν¨μ¨λͺ©νμ μλ²μ¬μ
β νμ½μ ν΅ν΄ μλμ§ μλ¨μ λͺ©ν κ°μ μ μν΄ λ
Έλ ₯νκ³ μμΌλ©°, μ§μμ¬ν λ° μλμ§μλ―Όμ°λμμ μ£Όκ΄νλ νκ²½ κ΄λ ¨ νλμ μ°Έμ¬νλ©° κΈ°νλ³ν λμ μ€μμ±μ λν 곡κ°κ³Ό μν΅μ μ€μ²νκ³ μμ΅λλ€. ")
|
| 227 |
+
|
| 228 |
+
testModel(model, "μλ¬Όλ€μμ± μ μ§")
|
| 229 |
+
|
| 230 |
+
testModel(model, "μλ¬Όλ€μμ± μ μ§ λ° μ§μκ°λ₯μ±μ μΆμ§νλ κ΅μ λΉμ리 ν경보νΈλ¨μ²΄")
|
| 231 |
+
|
| 232 |
+
testModel(model, "μμΈλ¬ μ ν μ μ‘°, νλ§€ μ λ¨κ³μ μμ΄μμ νμλ°°μΆμ κ°μ μν 곡κΈλ§ κ΄λ¦¬ 체κ³λ₯Ό λ³΄λ€ κ°νν΄ λμκ° κ²μ
λλ€.")
|
| 233 |
+
|
| 234 |
+
testModel(model, "κ°λ°μμ μ ν΅κΉμ§, μλ£λΆν° μ νκΉμ§, λͺ¨λ λ¨κ³λ₯Ό μμ°λ₯΄λ νμ§μμ μ ν보λ νμμ μ
λλ€.")
|
| 235 |
+
|
| 236 |
+
testModel(model, "λ‘―λ°μ κ³Όλ λλ°μ±μ₯μμΉ΄λ°λ―Έλ₯Ό μ¨λΌμΈμΌλ‘ μ°μ€ μ΄μνλ©° νλ ₯μ
체μ μΈμ μμ κ°λ°μ μ§μνκ³ μμ΅λλ€. ")
|
| 237 |
+
|
| 238 |
+
testModel(model, "")
|