add defect-detection
Browse files- Code-Code/Defect-detection/code/eval.sh +18 -0
- Code-Code/Defect-detection/code/evaluate.sh +1 -0
- Code-Code/Defect-detection/code/evaluator.py +52 -0
- Code-Code/Defect-detection/code/model.py +45 -0
- Code-Code/Defect-detection/code/run.py +598 -0
- Code-Code/Defect-detection/code/train.sh +17 -0
- Code-Code/Defect-detection/dataset.zip +3 -0
- Code-Code/Defect-detection/model/epoch_1/subject_model.pth +3 -0
- Code-Code/Defect-detection/model/epoch_2/subject_model.pth +3 -0
- Code-Code/Defect-detection/model/epoch_3/subject_model.pth +3 -0
- Code-Code/Defect-detection/model/epoch_4/subject_model.pth +3 -0
- Code-Code/Defect-detection/model/epoch_5/subject_model.pth +3 -0
Code-Code/Defect-detection/code/eval.sh
ADDED
|
@@ -0,0 +1,18 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
CUDA_VISIBLE_DEVICES=0,1 python run.py \
|
| 2 |
+
--output_dir=../model \
|
| 3 |
+
--model_type=roberta \
|
| 4 |
+
--tokenizer_name=microsoft/codebert-base \
|
| 5 |
+
--model_name_or_path=microsoft/codebert-base \
|
| 6 |
+
--do_eval \
|
| 7 |
+
--do_test \
|
| 8 |
+
--train_data_file=../dataset/train.jsonl \
|
| 9 |
+
--eval_data_file=../dataset/valid.jsonl \
|
| 10 |
+
--test_data_file=../dataset/valid.jsonl \
|
| 11 |
+
--epoch 5 \
|
| 12 |
+
--block_size 400 \
|
| 13 |
+
--train_batch_size 32 \
|
| 14 |
+
--eval_batch_size 64 \
|
| 15 |
+
--learning_rate 2e-5 \
|
| 16 |
+
--max_grad_norm 1.0 \
|
| 17 |
+
--evaluate_during_training \
|
| 18 |
+
--seed 123456
|
Code-Code/Defect-detection/code/evaluate.sh
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
python evaluator.py -a ../dataset/valid.jsonl -p ../model/predictions.txt
|
Code-Code/Defect-detection/code/evaluator.py
ADDED
|
@@ -0,0 +1,52 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) Microsoft Corporation.
|
| 2 |
+
# Licensed under the MIT license.
|
| 3 |
+
import logging
|
| 4 |
+
import sys
|
| 5 |
+
import json
|
| 6 |
+
import numpy as np
|
| 7 |
+
|
| 8 |
+
def read_answers(filename):
|
| 9 |
+
answers={}
|
| 10 |
+
with open(filename) as f:
|
| 11 |
+
for line in f:
|
| 12 |
+
line=line.strip()
|
| 13 |
+
js=json.loads(line)
|
| 14 |
+
answers[js['idx']]=js['target']
|
| 15 |
+
return answers
|
| 16 |
+
|
| 17 |
+
def read_predictions(filename):
|
| 18 |
+
predictions={}
|
| 19 |
+
with open(filename) as f:
|
| 20 |
+
for line in f:
|
| 21 |
+
line=line.strip()
|
| 22 |
+
idx,label=line.split()
|
| 23 |
+
predictions[int(idx)]=int(label)
|
| 24 |
+
return predictions
|
| 25 |
+
|
| 26 |
+
def calculate_scores(answers,predictions):
|
| 27 |
+
Acc=[]
|
| 28 |
+
for key in answers:
|
| 29 |
+
if key not in predictions:
|
| 30 |
+
logging.error("Missing prediction for index {}.".format(key))
|
| 31 |
+
sys.exit()
|
| 32 |
+
Acc.append(answers[key]==predictions[key])
|
| 33 |
+
|
| 34 |
+
scores={}
|
| 35 |
+
scores['Acc']=np.mean(Acc)
|
| 36 |
+
return scores
|
| 37 |
+
|
| 38 |
+
def main():
|
| 39 |
+
import argparse
|
| 40 |
+
parser = argparse.ArgumentParser(description='Evaluate leaderboard predictions for Defect Detection dataset.')
|
| 41 |
+
parser.add_argument('--answers', '-a',help="filename of the labels, in txt format.")
|
| 42 |
+
parser.add_argument('--predictions', '-p',help="filename of the leaderboard predictions, in txt format.")
|
| 43 |
+
|
| 44 |
+
|
| 45 |
+
args = parser.parse_args()
|
| 46 |
+
answers=read_answers(args.answers)
|
| 47 |
+
predictions=read_predictions(args.predictions)
|
| 48 |
+
scores=calculate_scores(answers,predictions)
|
| 49 |
+
print(scores)
|
| 50 |
+
|
| 51 |
+
if __name__ == '__main__':
|
| 52 |
+
main()
|
Code-Code/Defect-detection/code/model.py
ADDED
|
@@ -0,0 +1,45 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) Microsoft Corporation.
|
| 2 |
+
# Licensed under the MIT License.
|
| 3 |
+
import torch
|
| 4 |
+
import torch.nn as nn
|
| 5 |
+
import torch
|
| 6 |
+
from torch.autograd import Variable
|
| 7 |
+
import copy
|
| 8 |
+
from torch.nn import CrossEntropyLoss, MSELoss
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
class Model(nn.Module):
|
| 13 |
+
def __init__(self, encoder,config,tokenizer,args):
|
| 14 |
+
super(Model, self).__init__()
|
| 15 |
+
self.encoder = encoder
|
| 16 |
+
self.config=config
|
| 17 |
+
self.tokenizer=tokenizer
|
| 18 |
+
self.args=args
|
| 19 |
+
|
| 20 |
+
# Define dropout layer, dropout_probability is taken from args.
|
| 21 |
+
self.dropout = nn.Dropout(args.dropout_probability)
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
def forward(self, input_ids=None,labels=None, return_vec=None):
|
| 25 |
+
outputs=self.encoder(input_ids,attention_mask=input_ids.ne(1))
|
| 26 |
+
|
| 27 |
+
if return_vec:
|
| 28 |
+
return outputs.pooler_output
|
| 29 |
+
outputs = outputs[0]
|
| 30 |
+
|
| 31 |
+
# Apply dropout
|
| 32 |
+
outputs = self.dropout(outputs)
|
| 33 |
+
|
| 34 |
+
logits=outputs
|
| 35 |
+
prob=torch.sigmoid(logits)
|
| 36 |
+
if labels is not None:
|
| 37 |
+
labels=labels.float()
|
| 38 |
+
loss=torch.log(prob[:,0]+1e-10)*labels+torch.log((1-prob)[:,0]+1e-10)*(1-labels)
|
| 39 |
+
loss=-loss.mean()
|
| 40 |
+
return loss,prob
|
| 41 |
+
else:
|
| 42 |
+
return prob
|
| 43 |
+
|
| 44 |
+
|
| 45 |
+
|
Code-Code/Defect-detection/code/run.py
ADDED
|
@@ -0,0 +1,598 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# coding=utf-8
|
| 2 |
+
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
|
| 3 |
+
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
|
| 4 |
+
#
|
| 5 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 6 |
+
# you may not use this file except in compliance with the License.
|
| 7 |
+
# You may obtain a copy of the License at
|
| 8 |
+
#
|
| 9 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 10 |
+
#
|
| 11 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 12 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 13 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 14 |
+
# See the License for the specific language governing permissions and
|
| 15 |
+
# limitations under the License.
|
| 16 |
+
"""
|
| 17 |
+
Fine-tuning the library models for language modeling on a text file (GPT, GPT-2, BERT, RoBERTa).
|
| 18 |
+
GPT and GPT-2 are fine-tuned using a causal language modeling (CLM) loss while BERT and RoBERTa are fine-tuned
|
| 19 |
+
using a masked language modeling (MLM) loss.
|
| 20 |
+
"""
|
| 21 |
+
|
| 22 |
+
from __future__ import absolute_import, division, print_function
|
| 23 |
+
|
| 24 |
+
import argparse
|
| 25 |
+
import glob
|
| 26 |
+
import logging
|
| 27 |
+
import os
|
| 28 |
+
import pickle
|
| 29 |
+
import random
|
| 30 |
+
import re
|
| 31 |
+
import shutil
|
| 32 |
+
|
| 33 |
+
import numpy as np
|
| 34 |
+
import torch
|
| 35 |
+
from torch.utils.data import DataLoader, Dataset, SequentialSampler, RandomSampler,TensorDataset
|
| 36 |
+
from torch.utils.data.distributed import DistributedSampler
|
| 37 |
+
import json
|
| 38 |
+
try:
|
| 39 |
+
from torch.utils.tensorboard import SummaryWriter
|
| 40 |
+
except:
|
| 41 |
+
from tensorboardX import SummaryWriter
|
| 42 |
+
|
| 43 |
+
from tqdm import tqdm, trange
|
| 44 |
+
import multiprocessing
|
| 45 |
+
from model import Model
|
| 46 |
+
cpu_cont = multiprocessing.cpu_count()
|
| 47 |
+
from transformers import (WEIGHTS_NAME, AdamW, get_linear_schedule_with_warmup,
|
| 48 |
+
BertConfig, BertForMaskedLM, BertTokenizer, BertForSequenceClassification,
|
| 49 |
+
GPT2Config, GPT2LMHeadModel, GPT2Tokenizer,
|
| 50 |
+
OpenAIGPTConfig, OpenAIGPTLMHeadModel, OpenAIGPTTokenizer,
|
| 51 |
+
RobertaConfig, RobertaForSequenceClassification, RobertaTokenizer,
|
| 52 |
+
DistilBertConfig, DistilBertForMaskedLM, DistilBertForSequenceClassification, DistilBertTokenizer)
|
| 53 |
+
|
| 54 |
+
logger = logging.getLogger(__name__)
|
| 55 |
+
|
| 56 |
+
MODEL_CLASSES = {
|
| 57 |
+
'gpt2': (GPT2Config, GPT2LMHeadModel, GPT2Tokenizer),
|
| 58 |
+
'openai-gpt': (OpenAIGPTConfig, OpenAIGPTLMHeadModel, OpenAIGPTTokenizer),
|
| 59 |
+
'bert': (BertConfig, BertForSequenceClassification, BertTokenizer),
|
| 60 |
+
'roberta': (RobertaConfig, RobertaForSequenceClassification, RobertaTokenizer),
|
| 61 |
+
'distilbert': (DistilBertConfig, DistilBertForSequenceClassification, DistilBertTokenizer)
|
| 62 |
+
}
|
| 63 |
+
|
| 64 |
+
|
| 65 |
+
|
| 66 |
+
class InputFeatures(object):
|
| 67 |
+
"""A single training/test features for a example."""
|
| 68 |
+
def __init__(self,
|
| 69 |
+
input_tokens,
|
| 70 |
+
input_ids,
|
| 71 |
+
idx,
|
| 72 |
+
label,
|
| 73 |
+
|
| 74 |
+
):
|
| 75 |
+
self.input_tokens = input_tokens
|
| 76 |
+
self.input_ids = input_ids
|
| 77 |
+
self.idx=str(idx)
|
| 78 |
+
self.label=label
|
| 79 |
+
|
| 80 |
+
|
| 81 |
+
def convert_examples_to_features(js,tokenizer,args):
|
| 82 |
+
#source
|
| 83 |
+
code=' '.join(js['func'].split())
|
| 84 |
+
code_tokens=tokenizer.tokenize(code)[:args.block_size-2]
|
| 85 |
+
source_tokens =[tokenizer.cls_token]+code_tokens+[tokenizer.sep_token]
|
| 86 |
+
source_ids = tokenizer.convert_tokens_to_ids(source_tokens)
|
| 87 |
+
padding_length = args.block_size - len(source_ids)
|
| 88 |
+
source_ids+=[tokenizer.pad_token_id]*padding_length
|
| 89 |
+
return InputFeatures(source_tokens,source_ids,js['idx'],js['target'])
|
| 90 |
+
|
| 91 |
+
class TextDataset(Dataset):
|
| 92 |
+
def __init__(self, tokenizer, args, file_path=None):
|
| 93 |
+
self.examples = []
|
| 94 |
+
with open(file_path) as f:
|
| 95 |
+
for line in f:
|
| 96 |
+
js=json.loads(line.strip())
|
| 97 |
+
self.examples.append(convert_examples_to_features(js,tokenizer,args))
|
| 98 |
+
if 'train' in file_path:
|
| 99 |
+
for idx, example in enumerate(self.examples[:3]):
|
| 100 |
+
logger.info("*** Example ***")
|
| 101 |
+
logger.info("idx: {}".format(idx))
|
| 102 |
+
logger.info("label: {}".format(example.label))
|
| 103 |
+
logger.info("input_tokens: {}".format([x.replace('\u0120','_') for x in example.input_tokens]))
|
| 104 |
+
logger.info("input_ids: {}".format(' '.join(map(str, example.input_ids))))
|
| 105 |
+
|
| 106 |
+
def __len__(self):
|
| 107 |
+
return len(self.examples)
|
| 108 |
+
|
| 109 |
+
def __getitem__(self, i):
|
| 110 |
+
return torch.tensor(self.examples[i].input_ids),torch.tensor(self.examples[i].label)
|
| 111 |
+
|
| 112 |
+
|
| 113 |
+
def set_seed(seed=42):
|
| 114 |
+
random.seed(seed)
|
| 115 |
+
os.environ['PYHTONHASHSEED'] = str(seed)
|
| 116 |
+
np.random.seed(seed)
|
| 117 |
+
torch.manual_seed(seed)
|
| 118 |
+
torch.cuda.manual_seed(seed)
|
| 119 |
+
torch.backends.cudnn.deterministic = True
|
| 120 |
+
|
| 121 |
+
|
| 122 |
+
def train(args, train_dataset, model, tokenizer):
|
| 123 |
+
""" Train the model """
|
| 124 |
+
args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu)
|
| 125 |
+
train_sampler = RandomSampler(train_dataset) if args.local_rank == -1 else DistributedSampler(train_dataset)
|
| 126 |
+
|
| 127 |
+
train_dataloader = DataLoader(train_dataset, sampler=train_sampler,
|
| 128 |
+
batch_size=args.train_batch_size,num_workers=4,pin_memory=True)
|
| 129 |
+
args.max_steps=args.epoch*len( train_dataloader)
|
| 130 |
+
args.save_steps=len( train_dataloader)
|
| 131 |
+
args.warmup_steps=len( train_dataloader)
|
| 132 |
+
args.logging_steps=len( train_dataloader)
|
| 133 |
+
args.num_train_epochs=args.epoch
|
| 134 |
+
model.to(args.device)
|
| 135 |
+
# Prepare optimizer and schedule (linear warmup and decay)
|
| 136 |
+
no_decay = ['bias', 'LayerNorm.weight']
|
| 137 |
+
optimizer_grouped_parameters = [
|
| 138 |
+
{'params': [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],
|
| 139 |
+
'weight_decay': args.weight_decay},
|
| 140 |
+
{'params': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
|
| 141 |
+
]
|
| 142 |
+
optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)
|
| 143 |
+
scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=args.max_steps*0.1,
|
| 144 |
+
num_training_steps=args.max_steps)
|
| 145 |
+
if args.fp16:
|
| 146 |
+
try:
|
| 147 |
+
from apex import amp
|
| 148 |
+
except ImportError:
|
| 149 |
+
raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.")
|
| 150 |
+
model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level)
|
| 151 |
+
|
| 152 |
+
# multi-gpu training (should be after apex fp16 initialization)
|
| 153 |
+
if args.n_gpu > 1:
|
| 154 |
+
model = torch.nn.DataParallel(model)
|
| 155 |
+
|
| 156 |
+
# Distributed training (should be after apex fp16 initialization)
|
| 157 |
+
if args.local_rank != -1:
|
| 158 |
+
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.local_rank],
|
| 159 |
+
output_device=args.local_rank,
|
| 160 |
+
find_unused_parameters=True)
|
| 161 |
+
|
| 162 |
+
checkpoint_last = os.path.join(args.output_dir, 'checkpoint-last')
|
| 163 |
+
scheduler_last = os.path.join(checkpoint_last, 'scheduler.pt')
|
| 164 |
+
optimizer_last = os.path.join(checkpoint_last, 'optimizer.pt')
|
| 165 |
+
if os.path.exists(scheduler_last):
|
| 166 |
+
scheduler.load_state_dict(torch.load(scheduler_last))
|
| 167 |
+
if os.path.exists(optimizer_last):
|
| 168 |
+
optimizer.load_state_dict(torch.load(optimizer_last))
|
| 169 |
+
# Train!
|
| 170 |
+
logger.info("***** Running training *****")
|
| 171 |
+
logger.info(" Num examples = %d", len(train_dataset))
|
| 172 |
+
logger.info(" Num Epochs = %d", args.num_train_epochs)
|
| 173 |
+
logger.info(" Instantaneous batch size per GPU = %d", args.per_gpu_train_batch_size)
|
| 174 |
+
logger.info(" Total train batch size (w. parallel, distributed & accumulation) = %d",
|
| 175 |
+
args.train_batch_size * args.gradient_accumulation_steps * (
|
| 176 |
+
torch.distributed.get_world_size() if args.local_rank != -1 else 1))
|
| 177 |
+
logger.info(" Gradient Accumulation steps = %d", args.gradient_accumulation_steps)
|
| 178 |
+
logger.info(" Total optimization steps = %d", args.max_steps)
|
| 179 |
+
|
| 180 |
+
global_step = args.start_step
|
| 181 |
+
tr_loss, logging_loss,avg_loss,tr_nb,tr_num,train_loss = 0.0, 0.0,0.0,0,0,0
|
| 182 |
+
best_mrr=0.0
|
| 183 |
+
best_acc=0.0
|
| 184 |
+
# model.resize_token_embeddings(len(tokenizer))
|
| 185 |
+
model.zero_grad()
|
| 186 |
+
|
| 187 |
+
# Initialize early stopping parameters at the start of training
|
| 188 |
+
early_stopping_counter = 0
|
| 189 |
+
best_loss = None
|
| 190 |
+
|
| 191 |
+
for idx in range(args.start_epoch, int(args.num_train_epochs)):
|
| 192 |
+
bar = tqdm(train_dataloader,total=len(train_dataloader))
|
| 193 |
+
tr_num=0
|
| 194 |
+
train_loss=0
|
| 195 |
+
for step, batch in enumerate(bar):
|
| 196 |
+
inputs = batch[0].to(args.device)
|
| 197 |
+
labels=batch[1].to(args.device)
|
| 198 |
+
model.train()
|
| 199 |
+
loss,logits = model(inputs,labels)
|
| 200 |
+
|
| 201 |
+
|
| 202 |
+
if args.n_gpu > 1:
|
| 203 |
+
loss = loss.mean() # mean() to average on multi-gpu parallel training
|
| 204 |
+
if args.gradient_accumulation_steps > 1:
|
| 205 |
+
loss = loss / args.gradient_accumulation_steps
|
| 206 |
+
|
| 207 |
+
if args.fp16:
|
| 208 |
+
with amp.scale_loss(loss, optimizer) as scaled_loss:
|
| 209 |
+
scaled_loss.backward()
|
| 210 |
+
torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm)
|
| 211 |
+
else:
|
| 212 |
+
loss.backward()
|
| 213 |
+
torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
|
| 214 |
+
|
| 215 |
+
tr_loss += loss.item()
|
| 216 |
+
tr_num+=1
|
| 217 |
+
train_loss+=loss.item()
|
| 218 |
+
if avg_loss==0:
|
| 219 |
+
avg_loss=tr_loss
|
| 220 |
+
avg_loss=round(train_loss/tr_num,5)
|
| 221 |
+
bar.set_description("epoch {} loss {}".format(idx,avg_loss))
|
| 222 |
+
|
| 223 |
+
|
| 224 |
+
if (step + 1) % args.gradient_accumulation_steps == 0:
|
| 225 |
+
optimizer.step()
|
| 226 |
+
optimizer.zero_grad()
|
| 227 |
+
scheduler.step()
|
| 228 |
+
global_step += 1
|
| 229 |
+
output_flag=True
|
| 230 |
+
avg_loss=round(np.exp((tr_loss - logging_loss) /(global_step- tr_nb)),4)
|
| 231 |
+
if args.local_rank in [-1, 0] and args.logging_steps > 0 and global_step % args.logging_steps == 0:
|
| 232 |
+
logging_loss = tr_loss
|
| 233 |
+
tr_nb=global_step
|
| 234 |
+
|
| 235 |
+
if args.local_rank in [-1, 0] and args.save_steps > 0 and global_step % args.save_steps == 0:
|
| 236 |
+
|
| 237 |
+
if args.local_rank == -1 and args.evaluate_during_training: # Only evaluate when single GPU otherwise metrics may not average well
|
| 238 |
+
results = evaluate(args, model, tokenizer,eval_when_training=True)
|
| 239 |
+
for key, value in results.items():
|
| 240 |
+
logger.info(" %s = %s", key, round(value,4))
|
| 241 |
+
# Save model checkpoint
|
| 242 |
+
|
| 243 |
+
if results['eval_acc']>best_acc:
|
| 244 |
+
best_acc=results['eval_acc']
|
| 245 |
+
logger.info(" "+"*"*20)
|
| 246 |
+
logger.info(" Best acc:%s",round(best_acc,4))
|
| 247 |
+
logger.info(" "+"*"*20)
|
| 248 |
+
|
| 249 |
+
checkpoint_prefix = 'checkpoint-best-acc'
|
| 250 |
+
output_dir = os.path.join(args.output_dir, '{}'.format(checkpoint_prefix))
|
| 251 |
+
if not os.path.exists(output_dir):
|
| 252 |
+
os.makedirs(output_dir)
|
| 253 |
+
model_to_save = model.module if hasattr(model,'module') else model
|
| 254 |
+
output_dir = os.path.join(output_dir, '{}'.format('model.bin'))
|
| 255 |
+
torch.save(model_to_save.state_dict(), output_dir)
|
| 256 |
+
logger.info("Saving model checkpoint to %s", output_dir)
|
| 257 |
+
|
| 258 |
+
# 每一轮记录checkpoint
|
| 259 |
+
output_dir = os.path.join(args.output_dir, 'epoch_{}'.format(idx+1))
|
| 260 |
+
if not os.path.exists(output_dir):
|
| 261 |
+
os.makedirs(output_dir)
|
| 262 |
+
model_to_save = model.module if hasattr(model, 'module') else model
|
| 263 |
+
ckpt_output_path = os.path.join(output_dir, 'subject_model.pth')
|
| 264 |
+
logger.info("Saving model checkpoint to %s", ckpt_output_path)
|
| 265 |
+
torch.save(model_to_save.state_dict(), ckpt_output_path)
|
| 266 |
+
|
| 267 |
+
# Calculate average loss for the epoch
|
| 268 |
+
avg_loss = train_loss / tr_num
|
| 269 |
+
|
| 270 |
+
# Check for early stopping condition
|
| 271 |
+
if args.early_stopping_patience is not None:
|
| 272 |
+
if best_loss is None or avg_loss < best_loss - args.min_loss_delta:
|
| 273 |
+
best_loss = avg_loss
|
| 274 |
+
early_stopping_counter = 0
|
| 275 |
+
else:
|
| 276 |
+
early_stopping_counter += 1
|
| 277 |
+
if early_stopping_counter >= args.early_stopping_patience:
|
| 278 |
+
logger.info("Early stopping")
|
| 279 |
+
break # Exit the loop early
|
| 280 |
+
|
| 281 |
+
|
| 282 |
+
|
| 283 |
+
|
| 284 |
+
def evaluate(args, model, tokenizer,eval_when_training=False):
|
| 285 |
+
# Loop to handle MNLI double evaluation (matched, mis-matched)
|
| 286 |
+
eval_output_dir = args.output_dir
|
| 287 |
+
|
| 288 |
+
eval_dataset = TextDataset(tokenizer, args,args.eval_data_file)
|
| 289 |
+
|
| 290 |
+
if not os.path.exists(eval_output_dir) and args.local_rank in [-1, 0]:
|
| 291 |
+
os.makedirs(eval_output_dir)
|
| 292 |
+
|
| 293 |
+
args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu)
|
| 294 |
+
# Note that DistributedSampler samples randomly
|
| 295 |
+
eval_sampler = SequentialSampler(eval_dataset) if args.local_rank == -1 else DistributedSampler(eval_dataset)
|
| 296 |
+
eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size,num_workers=4,pin_memory=True)
|
| 297 |
+
|
| 298 |
+
# multi-gpu evaluate
|
| 299 |
+
if args.n_gpu > 1 and eval_when_training is False:
|
| 300 |
+
model = torch.nn.DataParallel(model)
|
| 301 |
+
|
| 302 |
+
# Eval!
|
| 303 |
+
logger.info("***** Running evaluation *****")
|
| 304 |
+
logger.info(" Num examples = %d", len(eval_dataset))
|
| 305 |
+
logger.info(" Batch size = %d", args.eval_batch_size)
|
| 306 |
+
eval_loss = 0.0
|
| 307 |
+
nb_eval_steps = 0
|
| 308 |
+
model.eval()
|
| 309 |
+
logits=[]
|
| 310 |
+
labels=[]
|
| 311 |
+
for batch in eval_dataloader:
|
| 312 |
+
inputs = batch[0].to(args.device)
|
| 313 |
+
label=batch[1].to(args.device)
|
| 314 |
+
with torch.no_grad():
|
| 315 |
+
lm_loss,logit = model(inputs,label)
|
| 316 |
+
eval_loss += lm_loss.mean().item()
|
| 317 |
+
logits.append(logit.cpu().numpy())
|
| 318 |
+
labels.append(label.cpu().numpy())
|
| 319 |
+
nb_eval_steps += 1
|
| 320 |
+
logits=np.concatenate(logits,0)
|
| 321 |
+
labels=np.concatenate(labels,0)
|
| 322 |
+
preds=logits[:,0]>0.5
|
| 323 |
+
eval_acc=np.mean(labels==preds)
|
| 324 |
+
eval_loss = eval_loss / nb_eval_steps
|
| 325 |
+
perplexity = torch.tensor(eval_loss)
|
| 326 |
+
|
| 327 |
+
result = {
|
| 328 |
+
"eval_loss": float(perplexity),
|
| 329 |
+
"eval_acc":round(eval_acc,4),
|
| 330 |
+
}
|
| 331 |
+
return result
|
| 332 |
+
|
| 333 |
+
def test(args, model, tokenizer):
|
| 334 |
+
# Loop to handle MNLI double evaluation (matched, mis-matched)
|
| 335 |
+
eval_dataset = TextDataset(tokenizer, args,args.test_data_file)
|
| 336 |
+
|
| 337 |
+
|
| 338 |
+
args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu)
|
| 339 |
+
# Note that DistributedSampler samples randomly
|
| 340 |
+
eval_sampler = SequentialSampler(eval_dataset) if args.local_rank == -1 else DistributedSampler(eval_dataset)
|
| 341 |
+
eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size)
|
| 342 |
+
|
| 343 |
+
# multi-gpu evaluate
|
| 344 |
+
if args.n_gpu > 1:
|
| 345 |
+
model = torch.nn.DataParallel(model)
|
| 346 |
+
|
| 347 |
+
# Eval!
|
| 348 |
+
logger.info("***** Running Test *****")
|
| 349 |
+
logger.info(" Num examples = %d", len(eval_dataset))
|
| 350 |
+
logger.info(" Batch size = %d", args.eval_batch_size)
|
| 351 |
+
eval_loss = 0.0
|
| 352 |
+
nb_eval_steps = 0
|
| 353 |
+
model.eval()
|
| 354 |
+
logits=[]
|
| 355 |
+
labels=[]
|
| 356 |
+
for batch in tqdm(eval_dataloader,total=len(eval_dataloader)):
|
| 357 |
+
inputs = batch[0].to(args.device)
|
| 358 |
+
label=batch[1].to(args.device)
|
| 359 |
+
with torch.no_grad():
|
| 360 |
+
logit = model(inputs)
|
| 361 |
+
logits.append(logit.cpu().numpy())
|
| 362 |
+
labels.append(label.cpu().numpy())
|
| 363 |
+
|
| 364 |
+
logits=np.concatenate(logits,0)
|
| 365 |
+
labels=np.concatenate(labels,0)
|
| 366 |
+
preds=logits[:,0]>0.5
|
| 367 |
+
with open(os.path.join(args.output_dir,"predictions.txt"),'w') as f:
|
| 368 |
+
for example,pred in zip(eval_dataset.examples,preds):
|
| 369 |
+
if pred:
|
| 370 |
+
f.write(example.idx+'\t1\n')
|
| 371 |
+
else:
|
| 372 |
+
f.write(example.idx+'\t0\n')
|
| 373 |
+
|
| 374 |
+
|
| 375 |
+
|
| 376 |
+
def main():
|
| 377 |
+
parser = argparse.ArgumentParser()
|
| 378 |
+
|
| 379 |
+
## Required parameters
|
| 380 |
+
parser.add_argument("--train_data_file", default=None, type=str, required=True,
|
| 381 |
+
help="The input training data file (a text file).")
|
| 382 |
+
parser.add_argument("--output_dir", default=None, type=str, required=True,
|
| 383 |
+
help="The output directory where the model predictions and checkpoints will be written.")
|
| 384 |
+
|
| 385 |
+
## Other parameters
|
| 386 |
+
parser.add_argument("--eval_data_file", default=None, type=str,
|
| 387 |
+
help="An optional input evaluation data file to evaluate the perplexity on (a text file).")
|
| 388 |
+
parser.add_argument("--test_data_file", default=None, type=str,
|
| 389 |
+
help="An optional input evaluation data file to evaluate the perplexity on (a text file).")
|
| 390 |
+
|
| 391 |
+
parser.add_argument("--model_type", default="bert", type=str,
|
| 392 |
+
help="The model architecture to be fine-tuned.")
|
| 393 |
+
parser.add_argument("--model_name_or_path", default=None, type=str,
|
| 394 |
+
help="The model checkpoint for weights initialization.")
|
| 395 |
+
|
| 396 |
+
parser.add_argument("--mlm", action='store_true',
|
| 397 |
+
help="Train with masked-language modeling loss instead of language modeling.")
|
| 398 |
+
parser.add_argument("--mlm_probability", type=float, default=0.15,
|
| 399 |
+
help="Ratio of tokens to mask for masked language modeling loss")
|
| 400 |
+
|
| 401 |
+
parser.add_argument("--config_name", default="", type=str,
|
| 402 |
+
help="Optional pretrained config name or path if not the same as model_name_or_path")
|
| 403 |
+
parser.add_argument("--tokenizer_name", default="", type=str,
|
| 404 |
+
help="Optional pretrained tokenizer name or path if not the same as model_name_or_path")
|
| 405 |
+
parser.add_argument("--cache_dir", default="", type=str,
|
| 406 |
+
help="Optional directory to store the pre-trained models downloaded from s3 (instread of the default one)")
|
| 407 |
+
parser.add_argument("--block_size", default=-1, type=int,
|
| 408 |
+
help="Optional input sequence length after tokenization."
|
| 409 |
+
"The training dataset will be truncated in block of this size for training."
|
| 410 |
+
"Default to the model max input length for single sentence inputs (take into account special tokens).")
|
| 411 |
+
parser.add_argument("--do_train", action='store_true',
|
| 412 |
+
help="Whether to run training.")
|
| 413 |
+
parser.add_argument("--do_eval", action='store_true',
|
| 414 |
+
help="Whether to run eval on the dev set.")
|
| 415 |
+
parser.add_argument("--do_test", action='store_true',
|
| 416 |
+
help="Whether to run eval on the dev set.")
|
| 417 |
+
parser.add_argument("--evaluate_during_training", action='store_true',
|
| 418 |
+
help="Run evaluation during training at each logging step.")
|
| 419 |
+
parser.add_argument("--do_lower_case", action='store_true',
|
| 420 |
+
help="Set this flag if you are using an uncased model.")
|
| 421 |
+
|
| 422 |
+
parser.add_argument("--train_batch_size", default=4, type=int,
|
| 423 |
+
help="Batch size per GPU/CPU for training.")
|
| 424 |
+
parser.add_argument("--eval_batch_size", default=4, type=int,
|
| 425 |
+
help="Batch size per GPU/CPU for evaluation.")
|
| 426 |
+
parser.add_argument('--gradient_accumulation_steps', type=int, default=1,
|
| 427 |
+
help="Number of updates steps to accumulate before performing a backward/update pass.")
|
| 428 |
+
parser.add_argument("--learning_rate", default=5e-5, type=float,
|
| 429 |
+
help="The initial learning rate for Adam.")
|
| 430 |
+
parser.add_argument("--weight_decay", default=0.0, type=float,
|
| 431 |
+
help="Weight deay if we apply some.")
|
| 432 |
+
parser.add_argument("--adam_epsilon", default=1e-8, type=float,
|
| 433 |
+
help="Epsilon for Adam optimizer.")
|
| 434 |
+
parser.add_argument("--max_grad_norm", default=1.0, type=float,
|
| 435 |
+
help="Max gradient norm.")
|
| 436 |
+
parser.add_argument("--num_train_epochs", default=1.0, type=float,
|
| 437 |
+
help="Total number of training epochs to perform.")
|
| 438 |
+
parser.add_argument("--max_steps", default=-1, type=int,
|
| 439 |
+
help="If > 0: set total number of training steps to perform. Override num_train_epochs.")
|
| 440 |
+
parser.add_argument("--warmup_steps", default=0, type=int,
|
| 441 |
+
help="Linear warmup over warmup_steps.")
|
| 442 |
+
|
| 443 |
+
parser.add_argument('--logging_steps', type=int, default=50,
|
| 444 |
+
help="Log every X updates steps.")
|
| 445 |
+
parser.add_argument('--save_steps', type=int, default=50,
|
| 446 |
+
help="Save checkpoint every X updates steps.")
|
| 447 |
+
parser.add_argument('--save_total_limit', type=int, default=None,
|
| 448 |
+
help='Limit the total amount of checkpoints, delete the older checkpoints in the output_dir, does not delete by default')
|
| 449 |
+
parser.add_argument("--eval_all_checkpoints", action='store_true',
|
| 450 |
+
help="Evaluate all checkpoints starting with the same prefix as model_name_or_path ending and ending with step number")
|
| 451 |
+
parser.add_argument("--no_cuda", action='store_true',
|
| 452 |
+
help="Avoid using CUDA when available")
|
| 453 |
+
parser.add_argument('--overwrite_output_dir', action='store_true',
|
| 454 |
+
help="Overwrite the content of the output directory")
|
| 455 |
+
parser.add_argument('--overwrite_cache', action='store_true',
|
| 456 |
+
help="Overwrite the cached training and evaluation sets")
|
| 457 |
+
parser.add_argument('--seed', type=int, default=42,
|
| 458 |
+
help="random seed for initialization")
|
| 459 |
+
parser.add_argument('--epoch', type=int, default=42,
|
| 460 |
+
help="random seed for initialization")
|
| 461 |
+
parser.add_argument('--fp16', action='store_true',
|
| 462 |
+
help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit")
|
| 463 |
+
parser.add_argument('--fp16_opt_level', type=str, default='O1',
|
| 464 |
+
help="For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."
|
| 465 |
+
"See details at https://nvidia.github.io/apex/amp.html")
|
| 466 |
+
parser.add_argument("--local_rank", type=int, default=-1,
|
| 467 |
+
help="For distributed training: local_rank")
|
| 468 |
+
parser.add_argument('--server_ip', type=str, default='', help="For distant debugging.")
|
| 469 |
+
parser.add_argument('--server_port', type=str, default='', help="For distant debugging.")
|
| 470 |
+
|
| 471 |
+
# Add early stopping parameters and dropout probability parameters
|
| 472 |
+
parser.add_argument("--early_stopping_patience", type=int, default=None,
|
| 473 |
+
help="Number of epochs with no improvement after which training will be stopped.")
|
| 474 |
+
parser.add_argument("--min_loss_delta", type=float, default=0.001,
|
| 475 |
+
help="Minimum change in the loss required to qualify as an improvement.")
|
| 476 |
+
parser.add_argument('--dropout_probability', type=float, default=0, help='dropout probability')
|
| 477 |
+
|
| 478 |
+
|
| 479 |
+
|
| 480 |
+
|
| 481 |
+
args = parser.parse_args()
|
| 482 |
+
|
| 483 |
+
# Setup distant debugging if needed
|
| 484 |
+
if args.server_ip and args.server_port:
|
| 485 |
+
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
|
| 486 |
+
import ptvsd
|
| 487 |
+
print("Waiting for debugger attach")
|
| 488 |
+
ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True)
|
| 489 |
+
ptvsd.wait_for_attach()
|
| 490 |
+
|
| 491 |
+
# Setup CUDA, GPU & distributed training
|
| 492 |
+
if args.local_rank == -1 or args.no_cuda:
|
| 493 |
+
device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
|
| 494 |
+
args.n_gpu = torch.cuda.device_count()
|
| 495 |
+
else: # Initializes the distributed backend which will take care of sychronizing nodes/GPUs
|
| 496 |
+
torch.cuda.set_device(args.local_rank)
|
| 497 |
+
device = torch.device("cuda", args.local_rank)
|
| 498 |
+
torch.distributed.init_process_group(backend='nccl')
|
| 499 |
+
args.n_gpu = 1
|
| 500 |
+
args.device = device
|
| 501 |
+
args.per_gpu_train_batch_size=args.train_batch_size//args.n_gpu
|
| 502 |
+
args.per_gpu_eval_batch_size=args.eval_batch_size//args.n_gpu
|
| 503 |
+
# Setup logging
|
| 504 |
+
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
|
| 505 |
+
datefmt='%m/%d/%Y %H:%M:%S',
|
| 506 |
+
level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN)
|
| 507 |
+
logger.warning("Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s",
|
| 508 |
+
args.local_rank, device, args.n_gpu, bool(args.local_rank != -1), args.fp16)
|
| 509 |
+
|
| 510 |
+
|
| 511 |
+
|
| 512 |
+
# Set seed
|
| 513 |
+
set_seed(args.seed)
|
| 514 |
+
|
| 515 |
+
# Load pretrained model and tokenizer
|
| 516 |
+
if args.local_rank not in [-1, 0]:
|
| 517 |
+
torch.distributed.barrier() # Barrier to make sure only the first process in distributed training download model & vocab
|
| 518 |
+
|
| 519 |
+
args.start_epoch = 0
|
| 520 |
+
args.start_step = 0
|
| 521 |
+
checkpoint_last = os.path.join(args.output_dir, 'checkpoint-last')
|
| 522 |
+
if os.path.exists(checkpoint_last) and os.listdir(checkpoint_last):
|
| 523 |
+
args.model_name_or_path = os.path.join(checkpoint_last, 'pytorch_model.bin')
|
| 524 |
+
args.config_name = os.path.join(checkpoint_last, 'config.json')
|
| 525 |
+
idx_file = os.path.join(checkpoint_last, 'idx_file.txt')
|
| 526 |
+
with open(idx_file, encoding='utf-8') as idxf:
|
| 527 |
+
args.start_epoch = int(idxf.readlines()[0].strip()) + 1
|
| 528 |
+
|
| 529 |
+
step_file = os.path.join(checkpoint_last, 'step_file.txt')
|
| 530 |
+
if os.path.exists(step_file):
|
| 531 |
+
with open(step_file, encoding='utf-8') as stepf:
|
| 532 |
+
args.start_step = int(stepf.readlines()[0].strip())
|
| 533 |
+
|
| 534 |
+
logger.info("reload model from {}, resume from {} epoch".format(checkpoint_last, args.start_epoch))
|
| 535 |
+
|
| 536 |
+
config_class, model_class, tokenizer_class = MODEL_CLASSES[args.model_type]
|
| 537 |
+
config = config_class.from_pretrained(args.config_name if args.config_name else args.model_name_or_path,
|
| 538 |
+
cache_dir=args.cache_dir if args.cache_dir else None)
|
| 539 |
+
config.num_labels=1
|
| 540 |
+
tokenizer = tokenizer_class.from_pretrained(args.tokenizer_name,
|
| 541 |
+
do_lower_case=args.do_lower_case,
|
| 542 |
+
cache_dir=args.cache_dir if args.cache_dir else None)
|
| 543 |
+
if args.block_size <= 0:
|
| 544 |
+
args.block_size = tokenizer.max_len_single_sentence # Our input block size will be the max possible for the model
|
| 545 |
+
args.block_size = min(args.block_size, tokenizer.max_len_single_sentence)
|
| 546 |
+
if args.model_name_or_path:
|
| 547 |
+
model = model_class.from_pretrained(args.model_name_or_path,
|
| 548 |
+
from_tf=bool('.ckpt' in args.model_name_or_path),
|
| 549 |
+
config=config,
|
| 550 |
+
cache_dir=args.cache_dir if args.cache_dir else None)
|
| 551 |
+
else:
|
| 552 |
+
model = model_class(config)
|
| 553 |
+
|
| 554 |
+
model=Model(model,config,tokenizer,args)
|
| 555 |
+
if args.local_rank == 0:
|
| 556 |
+
torch.distributed.barrier() # End of barrier to make sure only the first process in distributed training download model & vocab
|
| 557 |
+
|
| 558 |
+
logger.info("Training/evaluation parameters %s", args)
|
| 559 |
+
|
| 560 |
+
# Training
|
| 561 |
+
if args.do_train:
|
| 562 |
+
if args.local_rank not in [-1, 0]:
|
| 563 |
+
torch.distributed.barrier() # Barrier to make sure only the first process in distributed training process the dataset, and the others will use the cache
|
| 564 |
+
|
| 565 |
+
train_dataset = TextDataset(tokenizer, args,args.train_data_file)
|
| 566 |
+
if args.local_rank == 0:
|
| 567 |
+
torch.distributed.barrier()
|
| 568 |
+
|
| 569 |
+
train(args, train_dataset, model, tokenizer)
|
| 570 |
+
|
| 571 |
+
|
| 572 |
+
|
| 573 |
+
# Evaluation
|
| 574 |
+
results = {}
|
| 575 |
+
if args.do_eval and args.local_rank in [-1, 0]:
|
| 576 |
+
checkpoint_prefix = 'epoch_5/subject_model.pth'
|
| 577 |
+
output_dir = os.path.join(args.output_dir, '{}'.format(checkpoint_prefix))
|
| 578 |
+
model.load_state_dict(torch.load(output_dir))
|
| 579 |
+
model.to(args.device)
|
| 580 |
+
result=evaluate(args, model, tokenizer)
|
| 581 |
+
logger.info("***** Eval results *****")
|
| 582 |
+
for key in sorted(result.keys()):
|
| 583 |
+
logger.info(" %s = %s", key, str(round(result[key],4)))
|
| 584 |
+
|
| 585 |
+
if args.do_test and args.local_rank in [-1, 0]:
|
| 586 |
+
checkpoint_prefix = 'epoch_5/subject_model.pth'
|
| 587 |
+
output_dir = os.path.join(args.output_dir, '{}'.format(checkpoint_prefix))
|
| 588 |
+
model.load_state_dict(torch.load(output_dir))
|
| 589 |
+
model.to(args.device)
|
| 590 |
+
test(args, model, tokenizer)
|
| 591 |
+
|
| 592 |
+
return results
|
| 593 |
+
|
| 594 |
+
|
| 595 |
+
if __name__ == "__main__":
|
| 596 |
+
main()
|
| 597 |
+
|
| 598 |
+
|
Code-Code/Defect-detection/code/train.sh
ADDED
|
@@ -0,0 +1,17 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
CUDA_VISIBLE_DEVICES=0,1 python run.py \
|
| 2 |
+
--output_dir=../model \
|
| 3 |
+
--model_type=roberta \
|
| 4 |
+
--tokenizer_name=microsoft/codebert-base \
|
| 5 |
+
--model_name_or_path=microsoft/codebert-base \
|
| 6 |
+
--do_train \
|
| 7 |
+
--train_data_file=../dataset/train.jsonl \
|
| 8 |
+
--eval_data_file=../dataset/valid.jsonl \
|
| 9 |
+
--test_data_file=../dataset/test.jsonl \
|
| 10 |
+
--epoch 5 \
|
| 11 |
+
--block_size 400 \
|
| 12 |
+
--train_batch_size 32 \
|
| 13 |
+
--eval_batch_size 64 \
|
| 14 |
+
--learning_rate 2e-5 \
|
| 15 |
+
--max_grad_norm 1.0 \
|
| 16 |
+
--evaluate_during_training \
|
| 17 |
+
--seed 123456
|
Code-Code/Defect-detection/dataset.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:fafb4004eda1a4e1d4392b002e3de6f542d2a2b6701ec9758f25791bc9da49d6
|
| 3 |
+
size 14533467
|
Code-Code/Defect-detection/model/epoch_1/subject_model.pth
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:b22544bd55c811a58a09ef3354f9ca0e5492967428d6bd04213e5e23f93054c5
|
| 3 |
+
size 498673198
|
Code-Code/Defect-detection/model/epoch_2/subject_model.pth
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:b27ecb25228c2630cb2449cccb5c6bcfc3b213c5cf54d4d7f92205510bbc1356
|
| 3 |
+
size 498673198
|
Code-Code/Defect-detection/model/epoch_3/subject_model.pth
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:e4187f180a07f788680cae80879e0e9db389ddda30b4842a115ea0fbc97ad5bb
|
| 3 |
+
size 498673198
|
Code-Code/Defect-detection/model/epoch_4/subject_model.pth
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:d5757d63d13eab8e1dbc45754ceb2a2b325bb3336fba6fc6005c14cea7d5d5ad
|
| 3 |
+
size 498673198
|
Code-Code/Defect-detection/model/epoch_5/subject_model.pth
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:d2791e6e7dea22221eaaed6a9bf29e4bd5ef9d62ef40f4547d8bef9ed994508f
|
| 3 |
+
size 498673198
|