zyf0278 commited on
Commit
b9c56bc
·
1 Parent(s): 6f52c7a

add Clone-detection-POJ-104

Browse files
Code-Code/Clone-detection-POJ-104/code/eval.sh ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ CUDA_VISIBLE_DEVICES=0,1 python run.py \
2
+ --output_dir=../model \
3
+ --model_type=roberta \
4
+ --config_name=microsoft/codebert-base \
5
+ --model_name_or_path=microsoft/codebert-base \
6
+ --tokenizer_name=roberta-base \
7
+ --do_eval \
8
+ --train_data_file=../dataset/train.jsonl \
9
+ --eval_data_file=../dataset/valid.jsonl \
10
+ --epoch 2 \
11
+ --block_size 400 \
12
+ --train_batch_size 8 \
13
+ --eval_batch_size 16 \
14
+ --learning_rate 2e-5 \
15
+ --max_grad_norm 1.0 \
16
+ --evaluate_during_training \
17
+ --seed 123456
Code-Code/Clone-detection-POJ-104/code/evaluate.sh ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ python extract_answers.py \
2
+ -c ../dataset/valid.jsonl \
3
+ -o ../model/answers.jsonl
4
+ python evaluator.py \
5
+ -a ../model/answers.jsonl \
6
+ -p ../model/predictions.jsonl
Code-Code/Clone-detection-POJ-104/code/evaluator.py ADDED
@@ -0,0 +1,64 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # Licensed under the MIT license.
3
+ import logging
4
+ import sys,json
5
+ import numpy as np
6
+ from tqdm import tqdm
7
+
8
+ def read_answers(filename):
9
+ answers={}
10
+ with open(filename) as f:
11
+ for line in f:
12
+ line=line.strip()
13
+ js=json.loads(line)
14
+ answers[js['index']]=js['answers']
15
+ return answers
16
+
17
+ def read_predictions(filename):
18
+ predictions={}
19
+ with open(filename) as f:
20
+ for line in f:
21
+ line=line.strip()
22
+ js=json.loads(line)
23
+ predictions[js['index']]=js['answers']
24
+ return predictions
25
+
26
+ def calculate_scores(answers,predictions):
27
+ scores=[]
28
+ for key in answers:
29
+ if key not in predictions:
30
+ logging.error("Missing prediction for index {}.".format(key))
31
+ sys.exit()
32
+
33
+ if len(answers[key])!=len(predictions[key]):
34
+ logging.error("Mismatch the number of answers for index {}.".format(key))
35
+ sys.exit()
36
+
37
+ answer = set(answers[key])
38
+
39
+ Avep = []
40
+ for k, p in enumerate(predictions[key]):
41
+ if p in answer:
42
+ Avep.append((len(Avep)+1)/(k+1))
43
+
44
+ scores.append(sum(Avep)/len(answer))
45
+
46
+ result={}
47
+ result['MAP@R']= round(np.mean(scores),4)
48
+ return result
49
+
50
+ def main():
51
+ import argparse
52
+ parser = argparse.ArgumentParser(description='Evaluate leaderboard predictions for POJ-104 dataset.')
53
+ parser.add_argument('--answers', '-a',help="filename of the labels, in txt format.")
54
+ parser.add_argument('--predictions', '-p',help="filename of the leaderboard predictions, in txt format.")
55
+
56
+
57
+ args = parser.parse_args()
58
+ answers=read_answers(args.answers)
59
+ predictions=read_predictions(args.predictions)
60
+ scores=calculate_scores(answers,predictions)
61
+ print(scores)
62
+
63
+ if __name__ == '__main__':
64
+ main()
Code-Code/Clone-detection-POJ-104/code/extract_answers.py ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # Licensed under the MIT license.
3
+ import json
4
+
5
+ def extract_answers(filename):
6
+ cluster={}
7
+ with open(filename) as f:
8
+ for line in f:
9
+ line=line.strip()
10
+ js=json.loads(line)
11
+ if js['label'] not in cluster:
12
+ cluster[js['label']]=set()
13
+ cluster[js['label']].add(js['index'])
14
+ answers=[]
15
+ for key in cluster:
16
+ for idx1 in cluster[key]:
17
+ temp={}
18
+ temp['index']=idx1
19
+ temp['answers']=[]
20
+ for idx2 in cluster[key]:
21
+ if idx1!=idx2:
22
+ temp['answers'].append(idx2)
23
+ answers.append(temp)
24
+ return answers
25
+
26
+
27
+ def main():
28
+ import argparse
29
+ parser = argparse.ArgumentParser(description='Extract answers from code files.')
30
+ parser.add_argument('--codefile', '-c',help="filename of the code examples.")
31
+ parser.add_argument('--outfile', '-o',help="filename of output.")
32
+ args = parser.parse_args()
33
+ answers=extract_answers(args.codefile)
34
+ with open(args.outfile,'w') as f:
35
+ for line in answers:
36
+ f.write(json.dumps(line)+'\n')
37
+
38
+ if __name__ == '__main__':
39
+ main()
Code-Code/Clone-detection-POJ-104/code/model.py ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # Licensed under the MIT License.
3
+ import torch
4
+ import torch.nn as nn
5
+ import torch
6
+ from torch.autograd import Variable
7
+ import copy
8
+ import torch.nn.functional as F
9
+ from torch.nn import CrossEntropyLoss, MSELoss
10
+
11
+
12
+
13
+ class Model(nn.Module):
14
+ def __init__(self, encoder,config,tokenizer,args):
15
+ super(Model, self).__init__()
16
+ self.encoder = encoder
17
+ self.config=config
18
+ self.tokenizer=tokenizer
19
+ self.args=args
20
+
21
+
22
+ def forward(self, input_ids=None,p_input_ids=None,n_input_ids=None,labels=None):
23
+ bs,_=input_ids.size()
24
+ input_ids=torch.cat((input_ids,p_input_ids,n_input_ids),0)
25
+
26
+ outputs=self.encoder(input_ids,attention_mask=input_ids.ne(1))
27
+ if len(outputs) > 1:
28
+ outputs = outputs[1]
29
+ else:
30
+ outputs = outputs[0][:, 0, :]
31
+ outputs=outputs.split(bs,0)
32
+
33
+ prob_1=(outputs[0]*outputs[1]).sum(-1)
34
+ prob_2=(outputs[0]*outputs[2]).sum(-1)
35
+ temp=torch.cat((outputs[0],outputs[1]),0)
36
+ temp_labels=torch.cat((labels,labels),0)
37
+ prob_3= torch.mm(outputs[0],temp.t())
38
+ mask=labels[:,None]==temp_labels[None,:]
39
+ prob_3=prob_3*(1-mask.float())-1e9*mask.float()
40
+
41
+ prob=torch.softmax(torch.cat((prob_1[:,None],prob_2[:,None],prob_3),-1),-1)
42
+ loss=torch.log(prob[:,0]+1e-10)
43
+ loss=-loss.mean()
44
+ return loss,outputs[0]
45
+
46
+
47
+
48
+
Code-Code/Clone-detection-POJ-104/code/run.py ADDED
@@ -0,0 +1,632 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
3
+ # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+ """
17
+ Fine-tuning the library models for language modeling on a text file (GPT, GPT-2, BERT, RoBERTa).
18
+ GPT and GPT-2 are fine-tuned using a causal language modeling (CLM) loss while BERT and RoBERTa are fine-tuned
19
+ using a masked language modeling (MLM) loss.
20
+ """
21
+
22
+ from __future__ import absolute_import, division, print_function
23
+
24
+ import argparse
25
+ import glob
26
+ import logging
27
+ import os
28
+ import pickle
29
+ import random
30
+ import re
31
+ import shutil
32
+
33
+ import numpy as np
34
+ import torch
35
+ from torch.utils.data import DataLoader, Dataset, SequentialSampler, RandomSampler,TensorDataset
36
+ from torch.utils.data.distributed import DistributedSampler
37
+ import json
38
+ try:
39
+ from torch.utils.tensorboard import SummaryWriter
40
+ except:
41
+ from tensorboardX import SummaryWriter
42
+
43
+ from tqdm import tqdm, trange
44
+ import multiprocessing
45
+ from model import Model
46
+ cpu_cont = multiprocessing.cpu_count()
47
+ from transformers import (WEIGHTS_NAME, AdamW, get_linear_schedule_with_warmup,
48
+ BertConfig, BertModel, BertTokenizer,
49
+ GPT2Config, GPT2LMHeadModel, GPT2Tokenizer,
50
+ OpenAIGPTConfig, OpenAIGPTLMHeadModel, OpenAIGPTTokenizer,
51
+ RobertaConfig, RobertaModel, RobertaTokenizer,
52
+ DistilBertConfig, DistilBertModel, DistilBertTokenizer)
53
+
54
+ logger = logging.getLogger(__name__)
55
+
56
+ MODEL_CLASSES = {
57
+ 'gpt2': (GPT2Config, GPT2LMHeadModel, GPT2Tokenizer),
58
+ 'openai-gpt': (OpenAIGPTConfig, OpenAIGPTLMHeadModel, OpenAIGPTTokenizer),
59
+ 'bert': (BertConfig, BertModel, BertTokenizer),
60
+ 'roberta': (RobertaConfig, RobertaModel, RobertaTokenizer),
61
+ 'distilbert': (DistilBertConfig, DistilBertModel, DistilBertTokenizer)
62
+ }
63
+
64
+
65
+ class InputFeatures(object):
66
+ """A single training/test features for a example."""
67
+ def __init__(self,
68
+ input_tokens,
69
+ input_ids,
70
+ index,
71
+ label,
72
+
73
+ ):
74
+ self.input_tokens = input_tokens
75
+ self.input_ids = input_ids
76
+ self.index=index
77
+ self.label=label
78
+
79
+
80
+ def convert_examples_to_features(js,tokenizer,args):
81
+ #source
82
+ code=' '.join(js['code'].split())
83
+ code_tokens=tokenizer.tokenize(code)[:args.block_size-2]
84
+ source_tokens =[tokenizer.cls_token]+code_tokens+[tokenizer.sep_token]
85
+ source_ids = tokenizer.convert_tokens_to_ids(source_tokens)
86
+ padding_length = args.block_size - len(source_ids)
87
+ source_ids+=[tokenizer.pad_token_id]*padding_length
88
+ return InputFeatures(source_tokens,source_ids,js['index'],int(js['label']))
89
+
90
+ class TextDataset(Dataset):
91
+ def __init__(self, tokenizer, args, file_path=None):
92
+ self.examples = []
93
+ data=[]
94
+ with open(file_path) as f:
95
+ for line in f:
96
+ line=line.strip()
97
+ js=json.loads(line)
98
+ data.append(js)
99
+ for js in data:
100
+ self.examples.append(convert_examples_to_features(js,tokenizer,args))
101
+ if 'train' in file_path:
102
+ for idx, example in enumerate(self.examples[:3]):
103
+ logger.info("*** Example ***")
104
+ logger.info("idx: {}".format(idx))
105
+ logger.info("label: {}".format(example.label))
106
+ logger.info("input_tokens: {}".format([x.replace('\u0120','_') for x in example.input_tokens]))
107
+ logger.info("input_ids: {}".format(' '.join(map(str, example.input_ids))))
108
+ self.label_examples={}
109
+ for e in self.examples:
110
+ if e.label not in self.label_examples:
111
+ self.label_examples[e.label]=[]
112
+ self.label_examples[e.label].append(e)
113
+
114
+ def __len__(self):
115
+ return len(self.examples)
116
+
117
+ def __getitem__(self, i):
118
+ label=self.examples[i].label
119
+ index=self.examples[i].index
120
+ labels=list(self.label_examples)
121
+ labels.remove(label)
122
+ while True:
123
+ shuffle_example=random.sample(self.label_examples[label],1)[0]
124
+ if shuffle_example.index!=index:
125
+ p_example=shuffle_example
126
+ break
127
+ n_example=random.sample(self.label_examples[random.sample(labels,1)[0]],1)[0]
128
+
129
+ return (torch.tensor(self.examples[i].input_ids),torch.tensor(p_example.input_ids),
130
+ torch.tensor(n_example.input_ids),torch.tensor(label))
131
+
132
+
133
+ def set_seed(seed=42):
134
+ random.seed(seed)
135
+ os.environ['PYHTONHASHSEED'] = str(seed)
136
+ np.random.seed(seed)
137
+ torch.manual_seed(seed)
138
+ torch.cuda.manual_seed(seed)
139
+ torch.backends.cudnn.deterministic = True
140
+
141
+
142
+ def train(args, train_dataset, model, tokenizer):
143
+ """ Train the model """
144
+
145
+ args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu)
146
+ train_sampler = RandomSampler(train_dataset) if args.local_rank == -1 else DistributedSampler(train_dataset)
147
+
148
+ train_dataloader = DataLoader(train_dataset, sampler=train_sampler,
149
+ batch_size=args.train_batch_size,num_workers=4,pin_memory=True)
150
+ args.max_steps=args.epoch*len( train_dataloader)
151
+ args.save_steps=len( train_dataloader)
152
+ args.warmup_steps=len( train_dataloader)
153
+ args.logging_steps=len( train_dataloader)
154
+ args.num_train_epochs=args.epoch
155
+ model.to(args.device)
156
+ # Prepare optimizer and schedule (linear warmup and decay)
157
+ no_decay = ['bias', 'LayerNorm.weight']
158
+ optimizer_grouped_parameters = [
159
+ {'params': [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],
160
+ 'weight_decay': args.weight_decay},
161
+ {'params': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
162
+ ]
163
+ optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)
164
+ scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=args.max_steps*0.1,
165
+ num_training_steps=args.max_steps)
166
+ if args.fp16:
167
+ try:
168
+ from apex import amp
169
+ except ImportError:
170
+ raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.")
171
+ model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level)
172
+
173
+ # multi-gpu training (should be after apex fp16 initialization)
174
+ if args.n_gpu > 1:
175
+ model = torch.nn.DataParallel(model)
176
+
177
+ # Distributed training (should be after apex fp16 initialization)
178
+ if args.local_rank != -1:
179
+ model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.local_rank],
180
+ output_device=args.local_rank,
181
+ find_unused_parameters=True)
182
+
183
+ checkpoint_last = os.path.join(args.output_dir, 'checkpoint-last')
184
+ scheduler_last = os.path.join(checkpoint_last, 'scheduler.pt')
185
+ optimizer_last = os.path.join(checkpoint_last, 'optimizer.pt')
186
+ if os.path.exists(scheduler_last):
187
+ scheduler.load_state_dict(torch.load(scheduler_last))
188
+ if os.path.exists(optimizer_last):
189
+ optimizer.load_state_dict(torch.load(optimizer_last))
190
+ # Train!
191
+ logger.info("***** Running training *****")
192
+ logger.info(" Num examples = %d", len(train_dataset))
193
+ logger.info(" Num Epochs = %d", args.num_train_epochs)
194
+ logger.info(" Instantaneous batch size per GPU = %d", args.per_gpu_train_batch_size)
195
+ logger.info(" Total train batch size (w. parallel, distributed & accumulation) = %d",
196
+ args.train_batch_size * args.gradient_accumulation_steps * (
197
+ torch.distributed.get_world_size() if args.local_rank != -1 else 1))
198
+ logger.info(" Gradient Accumulation steps = %d", args.gradient_accumulation_steps)
199
+ logger.info(" Total optimization steps = %d", args.max_steps)
200
+
201
+ global_step = args.start_step
202
+ tr_loss, logging_loss,avg_loss,tr_nb,tr_num,train_loss = 0.0, 0.0,0.0,0,0,0
203
+ best_acc=0.0
204
+ # model.resize_token_embeddings(len(tokenizer))
205
+ model.zero_grad()
206
+ for idx in range(args.start_epoch, int(args.num_train_epochs)):
207
+ bar = train_dataloader
208
+ tr_num=0
209
+ train_loss=0
210
+ for step, batch in enumerate(bar):
211
+ inputs = batch[0].to(args.device)
212
+ p_inputs = batch[1].to(args.device)
213
+ n_inputs = batch[2].to(args.device)
214
+ labels = batch[3].to(args.device)
215
+ model.train()
216
+ loss,vec = model(inputs,p_inputs,n_inputs,labels)
217
+
218
+
219
+ if args.n_gpu > 1:
220
+ loss = loss.mean() # mean() to average on multi-gpu parallel training
221
+ if args.gradient_accumulation_steps > 1:
222
+ loss = loss / args.gradient_accumulation_steps
223
+
224
+ if args.fp16:
225
+ with amp.scale_loss(loss, optimizer) as scaled_loss:
226
+ scaled_loss.backward()
227
+ torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm)
228
+ else:
229
+ loss.backward()
230
+ torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
231
+
232
+ tr_loss += loss.item()
233
+ tr_num+=1
234
+ train_loss+=loss.item()
235
+ if avg_loss==0:
236
+ avg_loss=tr_loss
237
+ avg_loss=round(train_loss/tr_num,5)
238
+ if (step+1)% 100==0:
239
+ logger.info("epoch {} step {} loss {}".format(idx,step+1,avg_loss))
240
+ #bar.set_description("epoch {} loss {}".format(idx,avg_loss))
241
+
242
+
243
+ if (step + 1) % args.gradient_accumulation_steps == 0:
244
+ optimizer.step()
245
+ optimizer.zero_grad()
246
+ scheduler.step()
247
+ global_step += 1
248
+ output_flag=True
249
+ avg_loss=round(np.exp((tr_loss - logging_loss) /(global_step- tr_nb)),4)
250
+ if args.local_rank in [-1, 0] and args.logging_steps > 0 and global_step % args.logging_steps == 0:
251
+ logging_loss = tr_loss
252
+ tr_nb=global_step
253
+
254
+ if args.local_rank in [-1, 0] and args.save_steps > 0 and global_step % args.save_steps == 0:
255
+
256
+ if args.local_rank == -1 and args.evaluate_during_training: # Only evaluate when single GPU otherwise metrics may not average well
257
+ results = evaluate(args, model, tokenizer,eval_when_training=True)
258
+ for key, value in results.items():
259
+ logger.info(" %s = %s", key, round(value,4))
260
+ # Save model checkpoint
261
+ tr_num=0
262
+ train_loss=0
263
+
264
+ if results['eval_map']>best_acc:
265
+ best_acc=results['eval_map']
266
+ logger.info(" "+"*"*20)
267
+ logger.info(" Best map:%s",round(best_acc,4))
268
+ logger.info(" "+"*"*20)
269
+
270
+ checkpoint_prefix = 'checkpoint-best-map'
271
+ output_dir = os.path.join(args.output_dir, '{}'.format(checkpoint_prefix))
272
+ if not os.path.exists(output_dir):
273
+ os.makedirs(output_dir)
274
+ model_to_save = model.module if hasattr(model,'module') else model
275
+ output_dir = os.path.join(output_dir, '{}'.format('model.bin'))
276
+ torch.save(model_to_save.state_dict(), output_dir)
277
+ logger.info("Saving model checkpoint to %s", output_dir)
278
+
279
+ # 每一轮记录checkpoint
280
+ output_dir = os.path.join(args.output_dir, 'epoch_{}'.format(idx))
281
+ if not os.path.exists(output_dir):
282
+ os.makedirs(output_dir)
283
+ model_to_save = model.module if hasattr(model, 'module') else model
284
+ ckpt_output_path = os.path.join(output_dir, 'subject_model.pth')
285
+ logger.info("Saving model checkpoint to %s", ckpt_output_path)
286
+ torch.save(model_to_save.state_dict(), ckpt_output_path)
287
+
288
+
289
+ eval_dataset=None
290
+ def evaluate(args, model, tokenizer,eval_when_training=False):
291
+ # Loop to handle MNLI double evaluation (matched, mis-matched)
292
+ eval_output_dir = args.output_dir
293
+ global eval_dataset
294
+ if eval_dataset is None:
295
+ eval_dataset = TextDataset(tokenizer, args,args.eval_data_file)
296
+
297
+ if not os.path.exists(eval_output_dir) and args.local_rank in [-1, 0]:
298
+ os.makedirs(eval_output_dir)
299
+
300
+ args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu)
301
+ # Note that DistributedSampler samples randomly
302
+ eval_sampler = SequentialSampler(eval_dataset) if args.local_rank == -1 else DistributedSampler(eval_dataset)
303
+ eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size,num_workers=4,pin_memory=True)
304
+
305
+ # multi-gpu evaluate
306
+ if args.n_gpu > 1 and eval_when_training is False:
307
+ model = torch.nn.DataParallel(model)
308
+
309
+ # Eval!
310
+ logger.info("***** Running evaluation *****")
311
+ logger.info(" Num examples = %d", len(eval_dataset))
312
+ logger.info(" Batch size = %d", args.eval_batch_size)
313
+ eval_loss = 0.0
314
+ nb_eval_steps = 0
315
+ model.eval()
316
+ vecs=[]
317
+ labels=[]
318
+ for batch in eval_dataloader:
319
+ inputs = batch[0].to(args.device)
320
+ p_inputs = batch[1].to(args.device)
321
+ n_inputs = batch[2].to(args.device)
322
+ label = batch[3].to(args.device)
323
+ with torch.no_grad():
324
+ lm_loss,vec = model(inputs,p_inputs,n_inputs,label)
325
+ eval_loss += lm_loss.mean().item()
326
+ vecs.append(vec.cpu().numpy())
327
+ labels.append(label.cpu().numpy())
328
+ nb_eval_steps += 1
329
+ vecs=np.concatenate(vecs,0)
330
+ labels=np.concatenate(labels,0)
331
+ eval_loss = eval_loss / nb_eval_steps
332
+ perplexity = torch.tensor(eval_loss)
333
+
334
+ scores=np.matmul(vecs,vecs.T)
335
+ dic={}
336
+ for i in range(scores.shape[0]):
337
+ scores[i,i]=-1000000
338
+ if int(labels[i]) not in dic:
339
+ dic[int(labels[i])]=-1
340
+ dic[int(labels[i])]+=1
341
+ sort_ids=np.argsort(scores, axis=-1, kind='quicksort', order=None)[:,::-1]
342
+ MAP=[]
343
+ for i in range(scores.shape[0]):
344
+ cont=0
345
+ label=int(labels[i])
346
+ Avep = []
347
+ for j in range(dic[label]):
348
+ index=sort_ids[i,j]
349
+ if int(labels[index])==label:
350
+ Avep.append((len(Avep)+1)/(j+1))
351
+ MAP.append(sum(Avep)/dic[label])
352
+
353
+ result = {
354
+ "eval_loss": float(perplexity),
355
+ "eval_map":float(np.mean(MAP))
356
+ }
357
+
358
+
359
+ return result
360
+
361
+ def test(args, model, tokenizer):
362
+ # Loop to handle MNLI double evaluation (matched, mis-matched)
363
+ eval_dataset = TextDataset(tokenizer, args,args.test_data_file)
364
+
365
+
366
+ args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu)
367
+ # Note that DistributedSampler samples randomly
368
+ eval_sampler = SequentialSampler(eval_dataset) if args.local_rank == -1 else DistributedSampler(eval_dataset)
369
+ eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size)
370
+
371
+ # multi-gpu evaluate
372
+ if args.n_gpu > 1:
373
+ model = torch.nn.DataParallel(model)
374
+
375
+ # Eval!
376
+ logger.info("***** Running Test *****")
377
+ logger.info(" Num examples = %d", len(eval_dataset))
378
+ logger.info(" Batch size = %d", args.eval_batch_size)
379
+ eval_loss = 0.0
380
+ nb_eval_steps = 0
381
+ model.eval()
382
+ vecs=[]
383
+ labels=[]
384
+ for batch in eval_dataloader:
385
+ inputs = batch[0].to(args.device)
386
+ p_inputs = batch[1].to(args.device)
387
+ n_inputs = batch[2].to(args.device)
388
+ label = batch[3].to(args.device)
389
+ with torch.no_grad():
390
+ lm_loss,vec = model(inputs,p_inputs,n_inputs,label)
391
+ eval_loss += lm_loss.mean().item()
392
+ vecs.append(vec.cpu().numpy())
393
+ labels.append(label.cpu().numpy())
394
+ nb_eval_steps += 1
395
+ vecs=np.concatenate(vecs,0)
396
+ labels=np.concatenate(labels,0)
397
+ eval_loss = eval_loss / nb_eval_steps
398
+ perplexity = torch.tensor(eval_loss)
399
+
400
+ scores=np.matmul(vecs,vecs.T)
401
+ for i in range(scores.shape[0]):
402
+ scores[i,i]=-1000000
403
+ sort_ids=np.argsort(scores, axis=-1, kind='quicksort', order=None)[:,::-1]
404
+ indexs=[]
405
+ for example in eval_dataset.examples:
406
+ indexs.append(example.index)
407
+ with open(os.path.join(args.output_dir,"predictions.jsonl"),'w') as f:
408
+ for index,sort_id in zip(indexs,sort_ids):
409
+ js={}
410
+ js['index']=index
411
+ js['answers']=[]
412
+ for idx in sort_id[:499]:
413
+ js['answers'].append(indexs[int(idx)])
414
+ f.write(json.dumps(js)+'\n')
415
+
416
+
417
+
418
+ def main():
419
+ parser = argparse.ArgumentParser()
420
+
421
+ ## Required parameters
422
+ parser.add_argument("--train_data_file", default=None, type=str, required=True,
423
+ help="The input training data file (a text file).")
424
+ parser.add_argument("--output_dir", default=None, type=str, required=True,
425
+ help="The output directory where the model predictions and checkpoints will be written.")
426
+
427
+ ## Other parameters
428
+ parser.add_argument("--eval_data_file", default=None, type=str,
429
+ help="An optional input evaluation data file to evaluate the perplexity on (a text file).")
430
+ parser.add_argument("--test_data_file", default=None, type=str,
431
+ help="An optional input evaluation data file to evaluate the perplexity on (a text file).")
432
+
433
+ parser.add_argument("--model_type", default="bert", type=str,
434
+ help="The model architecture to be fine-tuned.")
435
+ parser.add_argument("--model_name_or_path", default=None, type=str,
436
+ help="The model checkpoint for weights initialization.")
437
+
438
+ parser.add_argument("--mlm", action='store_true',
439
+ help="Train with masked-language modeling loss instead of language modeling.")
440
+ parser.add_argument("--mlm_probability", type=float, default=0.15,
441
+ help="Ratio of tokens to mask for masked language modeling loss")
442
+
443
+ parser.add_argument("--config_name", default="", type=str,
444
+ help="Optional pretrained config name or path if not the same as model_name_or_path")
445
+ parser.add_argument("--tokenizer_name", default="", type=str,
446
+ help="Optional pretrained tokenizer name or path if not the same as model_name_or_path")
447
+ parser.add_argument("--cache_dir", default="", type=str,
448
+ help="Optional directory to store the pre-trained models downloaded from s3 (instread of the default one)")
449
+ parser.add_argument("--block_size", default=-1, type=int,
450
+ help="Optional input sequence length after tokenization."
451
+ "The training dataset will be truncated in block of this size for training."
452
+ "Default to the model max input length for single sentence inputs (take into account special tokens).")
453
+ parser.add_argument("--do_train", action='store_true',
454
+ help="Whether to run training.")
455
+ parser.add_argument("--do_eval", action='store_true',
456
+ help="Whether to run eval on the dev set.")
457
+ parser.add_argument("--do_test", action='store_true',
458
+ help="Whether to run eval on the dev set.")
459
+ parser.add_argument("--evaluate_during_training", action='store_true',
460
+ help="Run evaluation during training at each logging step.")
461
+ parser.add_argument("--do_lower_case", action='store_true',
462
+ help="Set this flag if you are using an uncased model.")
463
+
464
+ parser.add_argument("--train_batch_size", default=4, type=int,
465
+ help="Batch size per GPU/CPU for training.")
466
+ parser.add_argument("--eval_batch_size", default=4, type=int,
467
+ help="Batch size per GPU/CPU for evaluation.")
468
+ parser.add_argument('--gradient_accumulation_steps', type=int, default=1,
469
+ help="Number of updates steps to accumulate before performing a backward/update pass.")
470
+ parser.add_argument("--learning_rate", default=5e-5, type=float,
471
+ help="The initial learning rate for Adam.")
472
+ parser.add_argument("--weight_decay", default=0.0, type=float,
473
+ help="Weight deay if we apply some.")
474
+ parser.add_argument("--adam_epsilon", default=1e-8, type=float,
475
+ help="Epsilon for Adam optimizer.")
476
+ parser.add_argument("--max_grad_norm", default=1.0, type=float,
477
+ help="Max gradient norm.")
478
+ parser.add_argument("--num_train_epochs", default=1.0, type=float,
479
+ help="Total number of training epochs to perform.")
480
+ parser.add_argument("--max_steps", default=-1, type=int,
481
+ help="If > 0: set total number of training steps to perform. Override num_train_epochs.")
482
+ parser.add_argument("--warmup_steps", default=0, type=int,
483
+ help="Linear warmup over warmup_steps.")
484
+
485
+ parser.add_argument('--logging_steps', type=int, default=50,
486
+ help="Log every X updates steps.")
487
+ parser.add_argument('--save_steps', type=int, default=50,
488
+ help="Save checkpoint every X updates steps.")
489
+ parser.add_argument('--save_total_limit', type=int, default=None,
490
+ help='Limit the total amount of checkpoints, delete the older checkpoints in the output_dir, does not delete by default')
491
+ parser.add_argument("--eval_all_checkpoints", action='store_true',
492
+ help="Evaluate all checkpoints starting with the same prefix as model_name_or_path ending and ending with step number")
493
+ parser.add_argument("--no_cuda", action='store_true',
494
+ help="Avoid using CUDA when available")
495
+ parser.add_argument('--overwrite_output_dir', action='store_true',
496
+ help="Overwrite the content of the output directory")
497
+ parser.add_argument('--overwrite_cache', action='store_true',
498
+ help="Overwrite the cached training and evaluation sets")
499
+ parser.add_argument('--seed', type=int, default=42,
500
+ help="random seed for initialization")
501
+ parser.add_argument('--epoch', type=int, default=42,
502
+ help="random seed for initialization")
503
+ parser.add_argument('--fp16', action='store_true',
504
+ help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit")
505
+ parser.add_argument('--fp16_opt_level', type=str, default='O1',
506
+ help="For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."
507
+ "See details at https://nvidia.github.io/apex/amp.html")
508
+ parser.add_argument("--local_rank", type=int, default=-1,
509
+ help="For distributed training: local_rank")
510
+ parser.add_argument('--server_ip', type=str, default='', help="For distant debugging.")
511
+ parser.add_argument('--server_port', type=str, default='', help="For distant debugging.")
512
+
513
+
514
+ args = parser.parse_args()
515
+
516
+
517
+ # Setup distant debugging if needed
518
+ if args.server_ip and args.server_port:
519
+ # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
520
+ import ptvsd
521
+ print("Waiting for debugger attach")
522
+ ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True)
523
+ ptvsd.wait_for_attach()
524
+
525
+ # Setup CUDA, GPU & distributed training
526
+ if args.local_rank == -1 or args.no_cuda:
527
+ device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
528
+ args.n_gpu = torch.cuda.device_count()
529
+ else: # Initializes the distributed backend which will take care of sychronizing nodes/GPUs
530
+ torch.cuda.set_device(args.local_rank)
531
+ device = torch.device("cuda", args.local_rank)
532
+ torch.distributed.init_process_group(backend='nccl')
533
+ args.n_gpu = 1
534
+ args.device = device
535
+ args.per_gpu_train_batch_size=args.train_batch_size//args.n_gpu
536
+ args.per_gpu_eval_batch_size=args.eval_batch_size//args.n_gpu
537
+ # Setup logging
538
+ logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
539
+ datefmt='%m/%d/%Y %H:%M:%S',
540
+ level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN)
541
+ logger.warning("Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s",
542
+ args.local_rank, device, args.n_gpu, bool(args.local_rank != -1), args.fp16)
543
+
544
+
545
+ # Set seed
546
+ set_seed(args.seed)
547
+
548
+ # Load pretrained model and tokenizer
549
+ if args.local_rank not in [-1, 0]:
550
+ torch.distributed.barrier() # Barrier to make sure only the first process in distributed training download model & vocab
551
+
552
+ args.start_epoch = 0
553
+ args.start_step = 0
554
+ checkpoint_last = os.path.join(args.output_dir, 'checkpoint-last')
555
+ if os.path.exists(checkpoint_last) and os.listdir(checkpoint_last):
556
+ args.model_name_or_path = os.path.join(checkpoint_last, 'pytorch_model.bin')
557
+ args.config_name = os.path.join(checkpoint_last, 'config.json')
558
+ idx_file = os.path.join(checkpoint_last, 'idx_file.txt')
559
+ with open(idx_file, encoding='utf-8') as idxf:
560
+ args.start_epoch = int(idxf.readlines()[0].strip()) + 1
561
+
562
+ step_file = os.path.join(checkpoint_last, 'step_file.txt')
563
+ if os.path.exists(step_file):
564
+ with open(step_file, encoding='utf-8') as stepf:
565
+ args.start_step = int(stepf.readlines()[0].strip())
566
+
567
+ logger.info("reload model from {}, resume from {} epoch".format(checkpoint_last, args.start_epoch))
568
+
569
+ config_class, model_class, tokenizer_class = MODEL_CLASSES[args.model_type]
570
+ config = config_class.from_pretrained(args.config_name if args.config_name else args.model_name_or_path,
571
+ cache_dir=args.cache_dir if args.cache_dir else None)
572
+ config.num_labels=1
573
+ tokenizer = tokenizer_class.from_pretrained(args.tokenizer_name,
574
+ do_lower_case=args.do_lower_case,
575
+ cache_dir=args.cache_dir if args.cache_dir else None)
576
+ if args.block_size <= 0:
577
+ args.block_size = tokenizer.max_len_single_sentence # Our input block size will be the max possible for the model
578
+ args.block_size = min(args.block_size, tokenizer.max_len_single_sentence)
579
+ if args.model_name_or_path:
580
+ model = model_class.from_pretrained(args.model_name_or_path,
581
+ from_tf=bool('.ckpt' in args.model_name_or_path),
582
+ config=config,
583
+ cache_dir=args.cache_dir if args.cache_dir else None)
584
+ else:
585
+ model = model_class(config)
586
+
587
+ model=Model(model,config,tokenizer,args)
588
+ if args.local_rank == 0:
589
+ torch.distributed.barrier() # End of barrier to make sure only the first process in distributed training download model & vocab
590
+
591
+ logger.info("Training/evaluation parameters %s", args)
592
+
593
+ # Training
594
+ if args.do_train:
595
+ if args.local_rank not in [-1, 0]:
596
+ torch.distributed.barrier() # Barrier to make sure only the first process in distributed training process the dataset, and the others will use the cache
597
+
598
+ train_dataset = TextDataset(tokenizer, args,args.train_data_file)
599
+ if args.local_rank == 0:
600
+ torch.distributed.barrier()
601
+
602
+ train(args, train_dataset, model, tokenizer)
603
+
604
+
605
+
606
+ # Evaluation
607
+ results = {}
608
+ if args.do_eval and args.local_rank in [-1, 0]:
609
+ checkpoint_prefix = 'epoch_1/subject_model.pth' #'checkpoint-best-map/model.bin'
610
+ output_dir = os.path.join(args.output_dir, '{}'.format(checkpoint_prefix))
611
+ model.load_state_dict(torch.load(output_dir),strict=False)
612
+ model.to(args.device)
613
+ result=evaluate(args, model, tokenizer)
614
+ logger.info("***** Eval results *****")
615
+ for key in sorted(result.keys()):
616
+ logger.info(" %s = %s", key, str(round(result[key],4)))
617
+
618
+ if args.do_test and args.local_rank in [-1, 0]:
619
+ checkpoint_prefix = 'epoch_1/subject_model.pth' #'checkpoint-best-map/model.bin'
620
+ output_dir = os.path.join(args.output_dir, '{}'.format(checkpoint_prefix))
621
+ model.load_state_dict(torch.load(output_dir),strict=False)
622
+ model.to(args.device)
623
+ test(args, model, tokenizer)
624
+
625
+ return results
626
+
627
+
628
+ if __name__ == "__main__":
629
+ main()
630
+
631
+
632
+
Code-Code/Clone-detection-POJ-104/code/test.sh ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ CUDA_VISIBLE_DEVICES=0,1 python run.py \
2
+ --output_dir=../model \
3
+ --model_type=roberta \
4
+ --config_name=microsoft/codebert-base \
5
+ --model_name_or_path=microsoft/codebert-base \
6
+ --tokenizer_name=roberta-base \
7
+ --do_test \
8
+ --train_data_file=../dataset/train.jsonl \
9
+ --test_data_file=../dataset/valid.jsonl \
10
+ --epoch 2 \
11
+ --block_size 400 \
12
+ --train_batch_size 8 \
13
+ --eval_batch_size 16 \
14
+ --learning_rate 2e-5 \
15
+ --max_grad_norm 1.0 \
16
+ --evaluate_during_training \
17
+ --seed 123456
Code-Code/Clone-detection-POJ-104/code/train.sh ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ CUDA_VISIBLE_DEVICES=0,1 python run.py \
2
+ --output_dir=../model \
3
+ --model_type=roberta \
4
+ --config_name=microsoft/codebert-base \
5
+ --model_name_or_path=microsoft/codebert-base \
6
+ --tokenizer_name=roberta-base \
7
+ --do_train \
8
+ --train_data_file=../dataset/train.jsonl \
9
+ --eval_data_file=../dataset/valid.jsonl \
10
+ --test_data_file=../dataset/test.jsonl \
11
+ --epoch 2 \
12
+ --block_size 400 \
13
+ --train_batch_size 8 \
14
+ --eval_batch_size 16 \
15
+ --learning_rate 2e-5 \
16
+ --max_grad_norm 1.0 \
17
+ --evaluate_during_training \
18
+ --seed 123456
Code-Code/Clone-detection-POJ-104/dataset.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4c13009574c8c3c85c4ec26f6e33e53765479f41fa20239578b473fd11df4d01
3
+ size 7269797
Code-Code/Clone-detection-POJ-104/model/epoch_0/subject_model.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:44aeb4dcda7ca079f6948aafc8038ceaad81d0e13a4e698b6587729c06ad1bc7
3
+ size 498665958
Code-Code/Clone-detection-POJ-104/model/epoch_1/subject_model.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cc0fa86663c23b599349322702b3aa1f2451a71b00943c7913f4f85ce98c40f4
3
+ size 498665958