zyf0278 commited on
Commit
10da810
·
1 Parent(s): acd0349

add CodeCompletion-token

Browse files
Code-Code/CodeCompletion-token/code/beam.py ADDED
@@ -0,0 +1,118 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn as nn
3
+ import torch
4
+ from torch.autograd import Variable
5
+ import copy
6
+
7
+ class Beam(object):
8
+ def __init__(self, size, sos, eos):
9
+ self.size = size
10
+ self.tt = torch.cuda
11
+ # The score for each translation on the beam.
12
+ self.scores = self.tt.FloatTensor(size).zero_()
13
+ # The backpointers at each time-step.
14
+ self.prevKs = []
15
+ # The outputs at each time-step.
16
+ self.nextYs = [self.tt.LongTensor(size)
17
+ .fill_(0)]
18
+ self.nextYs[0][:] = sos
19
+ # Has EOS topped the beam yet.
20
+ self._eos = eos
21
+ self.eosTop = False
22
+ # Time and k pair for finished.
23
+ self.finished = []
24
+
25
+ def getCurrentState(self):
26
+ "Get the outputs for the current timestep."
27
+ batch = self.tt.LongTensor(self.nextYs[-1]).view(-1, 1)
28
+ return batch
29
+
30
+ def getCurrentOrigin(self):
31
+ "Get the backpointers for the current timestep."
32
+ return self.prevKs[-1]
33
+
34
+ def advance(self, wordLk):
35
+ """
36
+ Given prob over words for every last beam `wordLk` and attention
37
+ `attnOut`: Compute and update the beam search.
38
+
39
+ Parameters:
40
+
41
+ * `wordLk`- probs of advancing from the last step (K x words)
42
+ * `attnOut`- attention at the last step
43
+
44
+ Returns: True if beam search is complete.
45
+ """
46
+ numWords = wordLk.size(1)
47
+
48
+ # Sum the previous scores.
49
+ if len(self.prevKs) > 0:
50
+ beamLk = wordLk + self.scores.unsqueeze(1).expand_as(wordLk)
51
+
52
+ # Don't let EOS have children.
53
+ for i in range(self.nextYs[-1].size(0)):
54
+ if self.nextYs[-1][i] in self._eos:
55
+ beamLk[i] = -1e20
56
+ else:
57
+ beamLk = wordLk[0]
58
+ flatBeamLk = beamLk.view(-1)
59
+ bestScores, bestScoresId = flatBeamLk.topk(self.size, 0, True, True)
60
+
61
+ self.scores = bestScores
62
+
63
+ # bestScoresId is flattened beam x word array, so calculate which
64
+ # word and beam each score came from
65
+ prevK = bestScoresId // numWords
66
+ self.prevKs.append(prevK)
67
+ self.nextYs.append((bestScoresId - prevK * numWords))
68
+
69
+
70
+ for i in range(self.nextYs[-1].size(0)):
71
+ if self.nextYs[-1][i] in self._eos:
72
+ s = self.scores[i]
73
+ self.finished.append((s, len(self.nextYs) - 1, i))
74
+
75
+ # End condition is when top-of-beam is EOS and no global score.
76
+ if self.nextYs[-1][0] in self._eos:
77
+ self.eosTop = True
78
+
79
+ def done(self):
80
+ return self.eosTop and len(self.finished) >=self.size
81
+
82
+ def getFinal(self):
83
+ if len(self.finished) == 0:
84
+ self.finished.append((self.scores[0], len(self.nextYs) - 1, 0))
85
+ self.finished.sort(key=lambda a: -a[0])
86
+ if len(self.finished) != self.size:
87
+ unfinished=[]
88
+ for i in range(self.nextYs[-1].size(0)):
89
+ if self.nextYs[-1][i] not in self._eos:
90
+ s = self.scores[i]
91
+ unfinished.append((s, len(self.nextYs) - 1, i))
92
+ unfinished.sort(key=lambda a: -a[0])
93
+ self.finished+=unfinished[:self.size-len(self.finished)]
94
+ return self.finished[:self.size]
95
+
96
+ def getHyp(self, beam_res):
97
+ """
98
+ Walk back to construct the full hypothesis.
99
+ """
100
+ hyps=[]
101
+ for _,timestep, k in beam_res:
102
+ hyp = []
103
+ for j in range(len(self.prevKs[:timestep]) - 1, -1, -1):
104
+ hyp.append(self.nextYs[j+1][k])
105
+ k = self.prevKs[j][k]
106
+ hyps.append(hyp[::-1])
107
+ return hyps
108
+
109
+ def buildTargetTokens(self, preds):
110
+ sentence=[]
111
+ for pred in preds:
112
+ tokens = []
113
+ for tok in pred:
114
+ tokens.append(tok)
115
+ if tok in self._eos:
116
+ break
117
+ sentence.append(tokens)
118
+ return sentence
Code-Code/CodeCompletion-token/code/dataset.py ADDED
@@ -0,0 +1,261 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # Licensed under the MIT License.
3
+ from __future__ import absolute_import, division, print_function
4
+
5
+ import argparse
6
+ import glob
7
+ import logging
8
+ import os
9
+ import pickle
10
+ import random
11
+ import re
12
+ import gc
13
+ import shutil
14
+ import json
15
+
16
+ import numpy as np
17
+ import torch
18
+ from torch.utils.data import DataLoader, Dataset, SequentialSampler, RandomSampler,TensorDataset
19
+ from torch.utils.data.distributed import DistributedSampler
20
+
21
+ from transformers import (WEIGHTS_NAME, AdamW, get_linear_schedule_with_warmup,
22
+ BertConfig, BertForMaskedLM, BertTokenizer,
23
+ GPT2Config, GPT2LMHeadModel, GPT2Tokenizer,
24
+ OpenAIGPTConfig, OpenAIGPTLMHeadModel, OpenAIGPTTokenizer,
25
+ RobertaConfig, RobertaForMaskedLM, RobertaTokenizer,
26
+ DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer)
27
+
28
+ class TextDataset(Dataset):
29
+ def __init__(self, tokenizer, args, logger, file_type='train', block_size=1024):
30
+ if args.local_rank==-1:
31
+ local_rank=0
32
+ world_size=1
33
+ else:
34
+ local_rank=args.local_rank
35
+ world_size=torch.distributed.get_world_size()
36
+
37
+ if not os.path.exists(args.output_dir):
38
+ os.makedirs(args.output_dir)
39
+ cached_file = os.path.join(args.output_dir, file_type+"_langs_%s"%(args.langs)+"_blocksize_%d"%(block_size)+"_wordsize_%d"%(world_size)+"_rank_%d"%(local_rank))
40
+ if os.path.exists(cached_file) and not args.overwrite_cache:
41
+ if file_type == 'train':
42
+ logger.warning("Loading features from cached file %s", cached_file)
43
+ with open(cached_file, 'rb') as handle:
44
+ self.inputs = pickle.load(handle)
45
+
46
+ else:
47
+ self.inputs = []
48
+ if args.langs == 'all':
49
+ langs = os.listdir(args.data_dir)
50
+ else:
51
+ langs = [args.langs]
52
+
53
+ data=[]
54
+ for lang in langs:
55
+ datafile = os.path.join(args.data_dir, lang, file_type+'.pkl')
56
+ if file_type == 'train':
57
+ logger.warning("Creating features from dataset file at %s", datafile)
58
+ # with open(datafile) as f:
59
+ # data.extend([json.loads(x)['code'] for idx,x in enumerate(f.readlines()) if idx%world_size==local_rank])
60
+ dataset = pickle.load(open(datafile, 'rb'))
61
+ data.extend(['<s> '+' '.join(x['function'].split())+' </s>' for idx,x in enumerate(dataset) if idx%world_size==local_rank])
62
+
63
+ # random.shuffle(data)
64
+ data = data
65
+ length = len(data)
66
+ logger.warning("Data size: %d"%(length))
67
+ input_ids = []
68
+ for idx,x in enumerate(data):
69
+ try:
70
+ input_ids.extend(tokenizer.encode(x))
71
+ except Exception:
72
+ pass
73
+ if idx % (length//10) == 0:
74
+ percent = idx / (length//10) * 10
75
+ logger.warning("Rank %d, load %d"%(local_rank, percent))
76
+ del data
77
+ gc.collect()
78
+
79
+ length = len(input_ids)
80
+ for i in range(0, length-block_size, block_size):
81
+ self.inputs.append(input_ids[i : i + block_size])
82
+ del input_ids
83
+ gc.collect()
84
+
85
+ if file_type == 'train':
86
+ logger.warning("Rank %d Training %d token, %d samples"%(local_rank, length, len(self.inputs)))
87
+ logger.warning("Saving features into cached file %s", cached_file)
88
+ with open(cached_file, 'wb') as handle:
89
+ pickle.dump(self.inputs, handle, protocol=pickle.HIGHEST_PROTOCOL)
90
+
91
+ def __len__(self):
92
+ return len(self.inputs)
93
+
94
+ def __getitem__(self, item):
95
+ return torch.tensor(self.inputs[item])
96
+
97
+ class finetuneDataset(Dataset):
98
+ def __init__(self, tokenizer, args, logger, file_type='train', block_size=1024):
99
+ if args.local_rank==-1:
100
+ local_rank=0
101
+ world_size=1
102
+ else:
103
+ local_rank=args.local_rank
104
+ world_size=torch.distributed.get_world_size()
105
+
106
+ if not os.path.exists(args.output_dir):
107
+ os.makedirs(args.output_dir)
108
+ cached_file = os.path.join(args.output_dir, file_type+"_blocksize_%d"%(block_size)+"_wordsize_%d"%(world_size)+"_rank_%d"%(local_rank))
109
+ if os.path.exists(cached_file) and not args.overwrite_cache:
110
+ if file_type == 'train':
111
+ logger.warning("Loading features from cached file %s", cached_file)
112
+ with open(cached_file, 'rb') as handle:
113
+ self.inputs = pickle.load(handle)
114
+
115
+ else:
116
+ self.inputs = []
117
+
118
+ datafile = os.path.join(args.data_dir, f"{file_type}.txt")
119
+ if file_type == 'train':
120
+ logger.warning("Creating features from dataset file at %s", datafile)
121
+ with open(datafile) as f:
122
+ data = f.readlines()
123
+
124
+ length = len(data)
125
+ logger.info("Data size: %d"%(length))
126
+ input_ids = []
127
+ for idx,x in enumerate(data):
128
+ x = x.strip()
129
+ if x.startswith("<s>") and x.endswith("</s>"):
130
+ pass
131
+ else:
132
+ x = "<s> " + x + " </s>"
133
+ try:
134
+ input_ids.extend(tokenizer.encode(x))
135
+ except Exception:
136
+ pass
137
+ if idx % (length//10) == 0:
138
+ percent = idx / (length//10) * 10
139
+ logger.warning("Rank %d, load %d"%(local_rank, percent))
140
+ del data
141
+ gc.collect()
142
+
143
+ length = len(input_ids) // world_size
144
+ logger.info(f"tokens: {length*world_size}")
145
+ input_ids = input_ids[local_rank*length: (local_rank+1)*length]
146
+
147
+ for i in range(0, length-block_size, block_size):
148
+ self.inputs.append(input_ids[i : i + block_size])
149
+ del input_ids
150
+ gc.collect()
151
+
152
+ if file_type == 'train':
153
+ logger.warning("Rank %d Training %d token, %d samples"%(local_rank, length, len(self.inputs)))
154
+ logger.warning("Saving features into cached file %s", cached_file)
155
+ with open(cached_file, 'wb') as handle:
156
+ pickle.dump(self.inputs, handle, protocol=pickle.HIGHEST_PROTOCOL)
157
+
158
+ def __len__(self):
159
+ return len(self.inputs)
160
+
161
+ def __getitem__(self, item):
162
+ return torch.tensor(self.inputs[item])
163
+
164
+ class EvalDataset(Dataset):
165
+ def __init__(self, tokenizer, args, logger, file_type='train', block_size=1024):
166
+ if not os.path.exists(args.output_dir):
167
+ os.makedirs(args.output_dir)
168
+ cached_file = os.path.join(args.output_dir, file_type+"_blocksize_%d"%(block_size))
169
+ if os.path.exists(cached_file) and not args.overwrite_cache:
170
+ with open(cached_file, 'rb') as handle:
171
+ self.inputs = pickle.load(handle)
172
+
173
+ else:
174
+ self.inputs = []
175
+
176
+ datafile = os.path.join(args.data_dir, f"{file_type}.txt")
177
+ with open(datafile) as f:
178
+ data = f.readlines()
179
+
180
+ length = len(data)
181
+ logger.info("Data size: %d"%(length))
182
+ input_ids = []
183
+ for idx,x in enumerate(data):
184
+ x = x.strip()
185
+ if x.startswith("<s>") and x.endswith("</s>"):
186
+ pass
187
+ else:
188
+ x = "<s> " + x + " </s>"
189
+ try:
190
+ input_ids.extend(tokenizer.encode(x))
191
+ except Exception:
192
+ pass
193
+ if idx % (length//10) == 0:
194
+ percent = idx / (length//10) * 10
195
+ logger.warning("load %d"%(percent))
196
+ del data
197
+ gc.collect()
198
+
199
+ logger.info(f"tokens: {len(input_ids)}")
200
+ self.split(input_ids, tokenizer, logger, block_size=block_size)
201
+ del input_ids
202
+ gc.collect()
203
+
204
+ with open(cached_file, 'wb') as handle:
205
+ pickle.dump(self.inputs, handle, protocol=pickle.HIGHEST_PROTOCOL)
206
+
207
+ def split(self, input_ids, tokenizer, logger, block_size=1024):
208
+ sample = []
209
+ i = 0
210
+ while i < len(input_ids):
211
+ sample = input_ids[i: i+block_size]
212
+ if len(sample) == block_size:
213
+ for j in range(block_size):
214
+ if tokenizer.convert_ids_to_tokens(sample[block_size-1-j])[0] == '\u0120' or tokenizer.convert_ids_to_tokens(sample[block_size-1-j]).startswith("<NUM_LIT"):
215
+ break
216
+ if sample[block_size-1-j] in [tokenizer.bos_token_id, tokenizer.eos_token_id, tokenizer.sep_token_id]:
217
+ if sample[block_size-1-j] != tokenizer.bos_token_id:
218
+ j -= 1
219
+ break
220
+ if j == block_size-1:
221
+ print(tokenizer.decode(sample))
222
+ exit()
223
+ sample = sample[: block_size-1-j]
224
+ # print(len(sample))
225
+ i += len(sample)
226
+ pad_len = block_size-len(sample)
227
+ sample += [tokenizer.pad_token_id]*pad_len
228
+ self.inputs.append(sample)
229
+
230
+ if len(self.inputs) % 10000 == 0:
231
+ logger.info(f"{len(self.inputs)} samples")
232
+
233
+
234
+ def __len__(self):
235
+ return len(self.inputs)
236
+
237
+ def __getitem__(self, item):
238
+ return torch.tensor(self.inputs[item])
239
+
240
+
241
+
242
+ class lineDataset(Dataset):
243
+ def __init__(self, tokenizer, args, logger, file_type='test', block_size=924):
244
+ datafile = os.path.join(args.data_dir, f"{file_type}.json")
245
+ with open(datafile) as f:
246
+ datas = f.readlines()
247
+
248
+ length = len(datas)
249
+ logger.info("Data size: %d"%(length))
250
+ self.inputs = []
251
+ self.gts = []
252
+ for data in datas:
253
+ data = json.loads(data.strip())
254
+ self.inputs.append(tokenizer.encode(data["input"])[-block_size:])
255
+ self.gts.append(data["gt"])
256
+
257
+ def __len__(self):
258
+ return len(self.inputs)
259
+
260
+ def __getitem__(self, item):
261
+ return torch.tensor(self.inputs[item]), self.gts[item]
Code-Code/CodeCompletion-token/code/eval.sh ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ LANG=java # set python for py150
2
+ DATADIR=../dataset/javaCorpus/token_completion
3
+ LITFILE=../dataset/javaCorpus/literals.json
4
+ OUTPUTDIR=../model/javaCorpus
5
+ PRETRAINDIR=microsoft/CodeGPT-small-java # microsoft/CodeGPT-small-py for py150
6
+ LOGFILE=eval_javaCorpus.log
7
+
8
+ CUDA_VISIBLE_DEVICES=0 python run_lm.py \
9
+ --data_dir=$DATADIR \
10
+ --lit_file=$LITFILE \
11
+ --langs=$LANG \
12
+ --output_dir=$OUTPUTDIR \
13
+ --pretrain_dir=$OUTPUTDIR \
14
+ --log_file=$LOGFILE \
15
+ --model_type=gpt2 \
16
+ --block_size=512 \
17
+ --do_eval \
18
+ --per_gpu_eval_batch_size=16 \
19
+ --logging_steps=100 \
20
+ --seed=42
Code-Code/CodeCompletion-token/code/evaluate.sh ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ python evaluator.py \
2
+ -a=../dataset/javaCorpus/token_completion/test.txt \
3
+ -p=../model/javaCorpus/predictions.txt
Code-Code/CodeCompletion-token/code/evaluator.py ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # Licensed under the MIT license.
3
+ import os
4
+ import logging
5
+ import argparse
6
+
7
+ logger = logging.getLogger(__name__)
8
+ logging.basicConfig(level=logging.INFO)
9
+
10
+ def main():
11
+ parser = argparse.ArgumentParser(description='Evaluate leaderboard predictions for code completion (token level).')
12
+ parser.add_argument('--answers', '-a', required=True, help="filename of the labels, in txt format.")
13
+ parser.add_argument('--predictions', '-p', required=True, help="filename of the leaderboard predictions, in txt format.")
14
+ args = parser.parse_args()
15
+
16
+ preds = open(args.predictions, "r").readlines()
17
+ gts = open(args.answers, "r").readlines()
18
+
19
+ assert len(preds) == len(gts), f"Samples of predictions and answers are not equal, {len(preds)}: {len(gts)}"
20
+
21
+ total = 0
22
+ correct = 0.0
23
+ for pred, gt in zip(preds, gts):
24
+ pred = pred.split()
25
+ gt = gt.split()
26
+ assert len(pred) == len(gt), f"Sequence length of prediction and answer are not equal, {len(pred)}: {len(gt)}"
27
+ for x, y in zip(pred, gt):
28
+ if y not in ["<s>", "</s>", "<EOL>", "<pad>"]:
29
+ total += 1
30
+ if x == y:
31
+ correct += 1
32
+
33
+ logger.info(f"Total {total} tokens, accuracy: {round(correct/total*100, 2)}")
34
+
35
+ if __name__ == "__main__":
36
+ main()
Code-Code/CodeCompletion-token/code/model.py ADDED
@@ -0,0 +1,68 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # Licensed under the MIT License.
3
+ import math
4
+ import torch
5
+ import torch.nn as nn
6
+ import torch.nn.functional as F
7
+
8
+ class RNNModel(nn.Module):
9
+ """Container module with an encoder, a recurrent module, and a decoder."""
10
+
11
+ def __init__(self, ntoken, ninp, nhid, nlayers, dropout=0.5, tie_weights=False):
12
+ super(RNNModel, self).__init__()
13
+ self.ntoken = ntoken
14
+ self.drop = nn.Dropout(dropout)
15
+ self.encoder = nn.Embedding(ntoken, ninp)
16
+ self.rnn = nn.LSTM(ninp, nhid, nlayers, dropout=dropout, batch_first=True)
17
+ self.decoder = nn.Linear(nhid, ntoken)
18
+ self.criterion = nn.CrossEntropyLoss()
19
+
20
+ # Optionally tie weights as in:
21
+ # "Using the Output Embedding to Improve Language Models" (Press & Wolf 2016)
22
+ # https://arxiv.org/abs/1608.05859
23
+ # and
24
+ # "Tying Word Vectors and Word Classifiers: A Loss Framework for Language Modeling" (Inan et al. 2016)
25
+ # https://arxiv.org/abs/1611.01462
26
+ if tie_weights:
27
+ if nhid != ninp:
28
+ raise ValueError('When using the tied flag, nhid must be equal to emsize')
29
+ self.decoder.weight = self.encoder.weight
30
+
31
+ self.init_weights()
32
+
33
+ self.nhid = nhid
34
+ self.nlayers = nlayers
35
+
36
+ def init_weights(self):
37
+ initrange = 0.1
38
+ nn.init.uniform_(self.encoder.weight, -initrange, initrange)
39
+ nn.init.zeros_(self.decoder.weight)
40
+ nn.init.uniform_(self.decoder.weight, -initrange, initrange)
41
+
42
+ def forward(self, input, hidden=None, labels=None):
43
+ emb = self.encoder(input)
44
+ if hidden is not None:
45
+ output, hidden = self.rnn(emb, hidden)
46
+ else:
47
+ output, hidden = self.rnn(emb)
48
+ output = self.drop(output)
49
+ output = self.decoder(output)
50
+ # decoded = decoded.view(-1, self.ntoken)
51
+ # output = F.log_softmax(decoded, dim=1)
52
+ if labels is not None:
53
+ shift_logits = output[..., :-1, :].contiguous()
54
+ shift_labels = labels[..., 1:].contiguous()
55
+ loss = self.criterion(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1))
56
+ return loss, output, hidden
57
+ else:
58
+ return output, hidden
59
+
60
+ def init_hidden(self, bsz):
61
+ weight = next(self.parameters())
62
+ if self.rnn_type == 'LSTM':
63
+ return (weight.new_zeros(self.nlayers, bsz, self.nhid),
64
+ weight.new_zeros(self.nlayers, bsz, self.nhid))
65
+ else:
66
+ return weight.new_zeros(self.nlayers, bsz, self.nhid)
67
+
68
+
Code-Code/CodeCompletion-token/code/run_lm.py ADDED
@@ -0,0 +1,728 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
3
+ # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+ """
17
+ Code completion (both token level and line level) pipeline in CodeXGLUE
18
+ """
19
+
20
+ from __future__ import absolute_import, division, print_function
21
+
22
+ import argparse
23
+ import glob
24
+ import logging
25
+ import os
26
+ import pickle
27
+ import random
28
+ import re
29
+ import shutil
30
+ import json
31
+
32
+ import numpy as np
33
+ import torch
34
+ from torch.utils.data import DataLoader, Dataset, SequentialSampler, RandomSampler,TensorDataset
35
+ from torch.utils.data.distributed import DistributedSampler
36
+ from dataset import TextDataset, finetuneDataset, EvalDataset, lineDataset
37
+ from beam import Beam
38
+
39
+ from transformers import (WEIGHTS_NAME, AdamW, get_linear_schedule_with_warmup,
40
+ BertConfig, BertForMaskedLM, BertTokenizer,
41
+ GPT2Config, GPT2LMHeadModel, GPT2Tokenizer,
42
+ OpenAIGPTConfig, OpenAIGPTLMHeadModel, OpenAIGPTTokenizer,
43
+ RobertaConfig, RobertaForMaskedLM, RobertaTokenizer,
44
+ DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer)
45
+ from model import RNNModel
46
+
47
+ # logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
48
+ # datefmt='%m/%d/%Y %H:%M:%S',
49
+ # level=logging.INFO)
50
+ logger = logging.getLogger(__name__)
51
+
52
+ MODEL_CLASSES = {
53
+ 'gpt2': (GPT2Config, GPT2LMHeadModel, GPT2Tokenizer),
54
+ 'rnn': (GPT2Config, RNNModel, GPT2Tokenizer),
55
+ 'openai-gpt': (OpenAIGPTConfig, OpenAIGPTLMHeadModel, OpenAIGPTTokenizer),
56
+ 'bert': (BertConfig, BertForMaskedLM, BertTokenizer),
57
+ 'roberta': (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer),
58
+ 'distilbert': (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer)
59
+ }
60
+
61
+
62
+
63
+ def load_and_cache_examples(args, tokenizer, evaluate=False):
64
+ if args.not_pretrain:
65
+ dataset = finetuneDataset(tokenizer, args, logger, file_type='dev' if evaluate else 'train',
66
+ block_size=args.block_size)
67
+ else:
68
+ dataset = TextDataset(tokenizer, args, logger, file_type='dev' if evaluate else 'train',
69
+ block_size=args.block_size)
70
+ return dataset
71
+
72
+ def set_seed(args):
73
+ random.seed(args.seed)
74
+ np.random.seed(args.seed)
75
+ torch.manual_seed(args.seed)
76
+ if args.n_gpu > 0:
77
+ torch.cuda.manual_seed_all(args.seed)
78
+
79
+ def update_config(args, config):
80
+ # config.n_positions = config.n_ctx = args.block_size
81
+ config.vocab_size = args.vocab_size
82
+
83
+ def get_special_tokens(path):
84
+ lits = json.load(open(path))
85
+ tokens = ["<STR_LIT>", "<NUM_LIT>", "<CHAR_LIT>"]
86
+ for lit in lits["str"]:
87
+ tokens.append(f"<STR_LIT:{lit}>")
88
+ for lit in lits["num"]:
89
+ tokens.append(f"<NUM_LIT:{lit}>")
90
+ for lit in lits["char"]:
91
+ tokens.append(f"<CHAR_LIT:{lit}>")
92
+ return tokens
93
+
94
+
95
+
96
+ def train(args, train_dataset, model, tokenizer, fh, pool):
97
+ """ Train the model """
98
+ if args.local_rank in [-1, 0]:
99
+ args.tensorboard_dir = os.path.join(args.output_dir, 'tensorboard')
100
+ if not os.path.exists(args.tensorboard_dir):
101
+ os.makedirs(args.tensorboard_dir)
102
+
103
+ args.batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu)
104
+ train_sampler = RandomSampler(train_dataset)
105
+
106
+ train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.batch_size, drop_last=True)
107
+ total_examples = len(train_dataset) * (
108
+ torch.distributed.get_world_size() if args.local_rank != -1 else 1)
109
+ batch_size = args.batch_size * args.gradient_accumulation_steps * (
110
+ torch.distributed.get_world_size() if args.local_rank != -1 else 1)
111
+ # if args.max_steps > 0:
112
+ # t_total = args.max_steps
113
+ # args.num_train_epochs = args.max_steps // (len(train_dataloader) // args.gradient_accumulation_steps) + 1
114
+ if args.num_train_epochs > 0:
115
+ t_total = total_examples // batch_size * args.num_train_epochs
116
+ args.max_steps = t_total
117
+ model.to(args.device)
118
+ if args.local_rank not in [-1, 0]:
119
+ torch.distributed.barrier()
120
+ # Prepare optimizer and schedule (linear warmup and decay)
121
+ no_decay = ['bias', 'LayerNorm.weight']
122
+ optimizer_grouped_parameters = [
123
+ {'params': [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],
124
+ 'weight_decay': args.weight_decay},
125
+ {'params': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
126
+ ]
127
+ optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)
128
+ scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=args.warmup_steps,
129
+ num_training_steps=t_total)
130
+ checkpoint_last = os.path.join(args.output_dir, 'checkpoint-last')
131
+ # scheduler_last = os.path.join(checkpoint_last, 'scheduler.pt')
132
+ optimizer_last = os.path.join(checkpoint_last, 'optimizer.pt')
133
+ # if os.path.exists(scheduler_last):
134
+ # scheduler.load_state_dict(torch.load(scheduler_last, map_location="cpu"))
135
+ if os.path.exists(optimizer_last):
136
+ logger.warning(f"Loading optimizer from {optimizer_last}")
137
+ optimizer.load_state_dict(torch.load(optimizer_last, map_location="cpu"))
138
+ if args.local_rank == 0:
139
+ torch.distributed.barrier()
140
+ if args.fp16:
141
+ try:
142
+ from apex import amp
143
+ except ImportError:
144
+ raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.")
145
+ model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level)
146
+
147
+ # multi-gpu training (should be after apex fp16 initialization)
148
+ if args.n_gpu > 1:
149
+ model = torch.nn.DataParallel(model)
150
+
151
+ # Distributed training (should be after apex fp16 initialization)
152
+ if args.local_rank != -1:
153
+ model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.local_rank%args.gpu_per_node],
154
+ output_device=args.local_rank%args.gpu_per_node)
155
+
156
+ # Train!
157
+ logger.info("***** Running training *****")
158
+ logger.info(" Num examples = %d", total_examples )
159
+ logger.info(" Num epoch = %d", t_total*batch_size//total_examples)
160
+ logger.info(" Instantaneous batch size per GPU = %d", args.per_gpu_train_batch_size)
161
+ logger.info(" Total train batch size (w. parallel, distributed & accumulation) = %d", batch_size)
162
+ logger.info(" Gradient Accumulation steps = %d", args.gradient_accumulation_steps)
163
+ logger.info(" Total optimization steps = %d", t_total)
164
+
165
+ global_step = args.start_step
166
+ tr_loss, logging_loss,avg_loss,tr_nb = 0.0, 0.0, 0.0, global_step
167
+ # model.resize_token_embeddings(len(tokenizer))
168
+ model.zero_grad()
169
+ set_seed(args) # Added here for reproducibility (even between python 2 and 3)
170
+
171
+ for idx in range(args.start_epoch, int(args.num_train_epochs)):
172
+ for step, batch in enumerate(train_dataloader):
173
+ inputs, labels = (batch, batch)
174
+ inputs = inputs.to(args.device)
175
+ labels = labels.to(args.device)
176
+ model.train()
177
+ outputs = model(inputs, labels=labels)
178
+ loss = outputs[0]
179
+
180
+ if args.n_gpu > 1:
181
+ loss = loss.mean() # mean() to average on multi-gpu parallel training
182
+ if args.gradient_accumulation_steps > 1:
183
+ loss = loss / args.gradient_accumulation_steps
184
+
185
+ if args.fp16:
186
+ with amp.scale_loss(loss, optimizer) as scaled_loss:
187
+ scaled_loss.backward()
188
+ torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm)
189
+ else:
190
+ loss.backward()
191
+ torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
192
+
193
+ tr_loss += loss.item()
194
+
195
+ if (step + 1) % args.gradient_accumulation_steps == 0:
196
+ optimizer.step()
197
+ optimizer.zero_grad()
198
+ scheduler.step()
199
+ global_step += 1
200
+ output_flag=True
201
+ avg_loss=round(np.exp((tr_loss - logging_loss) /(global_step- tr_nb)),4)
202
+ if global_step % args.logging_steps == 0:
203
+ logger.info(" steps: %s ppl: %s lr: %s", global_step, round(avg_loss,5), scheduler.get_last_lr()[0])
204
+ if args.local_rank in [-1, 0] and args.logging_steps > 0 and global_step % args.logging_steps == 0:
205
+ # Log metrics
206
+ logging_loss = tr_loss
207
+ tr_nb=global_step
208
+
209
+ if args.local_rank in [-1, 0] and args.save_steps > 0 and global_step % args.save_steps == 0:
210
+ checkpoint_prefix = "checkpoint"
211
+ # Save model checkpoint
212
+ if args.evaluate_during_training: # Only evaluate when single GPU otherwise metrics may not average well
213
+ results = evaluate(args, model, tokenizer, eval_when_training=True)
214
+ for key, value in results.items():
215
+ logger.info(" %s = %s", key, round(value,4))
216
+ output_dir = os.path.join(args.output_dir, '{}-{}-{}'.format(checkpoint_prefix, global_step, round(results['perplexity'],4)))
217
+ else:
218
+ output_dir = os.path.join(args.output_dir, "{}-{}".format(checkpoint_prefix, global_step))
219
+ if not os.path.exists(output_dir):
220
+ os.makedirs(output_dir)
221
+ model_to_save = (
222
+ model.module if hasattr(model, "module") else model
223
+ ) # Take care of distributed/parallel training
224
+ if args.model_type == "rnn":
225
+ torch.save(model_to_save.state_dict(), os.path.join(output_dir, "model.pt"))
226
+ else:
227
+ model_to_save.save_pretrained(output_dir)
228
+ tokenizer.save_pretrained(output_dir)
229
+
230
+ torch.save(args, os.path.join(output_dir, "training_args.bin"))
231
+ logger.info("Saving model checkpoint to %s", output_dir)
232
+
233
+ # _rotate_checkpoints(args, checkpoint_prefix)
234
+ last_output_dir = os.path.join(args.output_dir, 'checkpoint-last')
235
+ if not os.path.exists(last_output_dir):
236
+ os.makedirs(last_output_dir)
237
+ if args.model_type == "rnn":
238
+ torch.save(model_to_save.state_dict(), os.path.join(last_output_dir, "model.pt"))
239
+ else:
240
+ model_to_save.save_pretrained(last_output_dir)
241
+ tokenizer.save_pretrained(last_output_dir)
242
+ idx_file = os.path.join(last_output_dir, 'idx_file.txt')
243
+ with open(idx_file, 'w', encoding='utf-8') as idxf:
244
+ idxf.write(str(0) + '\n')
245
+
246
+ torch.save(optimizer.state_dict(), os.path.join(last_output_dir, "optimizer.pt"))
247
+ # torch.save(scheduler.state_dict(), os.path.join(last_output_dir, "scheduler.pt"))
248
+ logger.info("Saving optimizer and scheduler states to %s", last_output_dir)
249
+
250
+ step_file = os.path.join(last_output_dir, 'step_file.txt')
251
+ with open(step_file, 'w', encoding='utf-8') as stepf:
252
+ stepf.write(str(global_step) + '\n')
253
+
254
+
255
+ if args.max_steps > 0 and global_step > args.max_steps:
256
+ break
257
+
258
+ # 每一轮记录checkpoint
259
+ output_dir = os.path.join(args.output_dir, 'epoch_{}'.format(idx+1))
260
+ if not os.path.exists(output_dir):
261
+ os.makedirs(output_dir)
262
+ model_to_save = model.module if hasattr(model, 'module') else model
263
+ ckpt_output_path = os.path.join(output_dir, 'subject_model.pth')
264
+ logger.info("Saving model checkpoint to %s", ckpt_output_path)
265
+ torch.save(model_to_save.state_dict(), ckpt_output_path)
266
+
267
+ if args.max_steps > 0 and global_step > args.max_steps:
268
+ break
269
+
270
+ return global_step, tr_loss / global_step
271
+
272
+
273
+ def evaluate(args, model, tokenizer, prefix="", eval_when_training=False):
274
+ # Loop to handle MNLI double evaluation (matched, mis-matched)
275
+ eval_output_dir = args.output_dir
276
+
277
+ eval_dataset = load_and_cache_examples(args, tokenizer, evaluate=True)
278
+
279
+ if not os.path.exists(eval_output_dir) and args.local_rank in [-1, 0]:
280
+ os.makedirs(eval_output_dir)
281
+
282
+ args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu)
283
+ # Note that DistributedSampler samples randomly
284
+ eval_sampler = SequentialSampler(eval_dataset) if args.local_rank == -1 else DistributedSampler(eval_dataset)
285
+ eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size, drop_last=True)
286
+
287
+ # multi-gpu evaluate
288
+ if args.n_gpu > 1 and eval_when_training is False:
289
+ model = torch.nn.DataParallel(model)
290
+
291
+ # Eval!
292
+ #logger.info("***** Running evaluation {} *****".format(prefix))
293
+ #logger.info(" Num examples = %d", len(eval_dataset))
294
+ #logger.info(" Batch size = %d", args.eval_batch_size)
295
+ eval_loss = 0.0
296
+ nb_eval_steps = 0
297
+ model.eval()
298
+
299
+ for batch in eval_dataloader:
300
+ inputs, labels = (batch, batch)
301
+ inputs = inputs.to(args.device)
302
+ labels = labels.to(args.device)
303
+
304
+ with torch.no_grad():
305
+ outputs = model(inputs, labels=labels)
306
+ lm_loss = outputs[0]
307
+ eval_loss += lm_loss.mean().item()
308
+ nb_eval_steps += 1
309
+
310
+ eval_loss = eval_loss / nb_eval_steps
311
+ perplexity = torch.exp(torch.tensor(eval_loss))
312
+
313
+ result = {
314
+ "perplexity": float(perplexity)
315
+ }
316
+
317
+ output_eval_file = os.path.join(eval_output_dir, prefix, "eval_results.txt")
318
+ with open(output_eval_file, "w") as writer:
319
+ #logger.info("***** Eval results {} *****".format(prefix))
320
+ for key in sorted(result.keys()):
321
+ #logger.info(" %s = %s", key, str(result[key]))
322
+ writer.write("%s = %s\n" % (key, str(result[key])))
323
+
324
+ return result
325
+
326
+ def eval_acc(args, model, tokenizer, file_type='test'):
327
+ """
328
+ Evaluate token level code completion on accuracy.
329
+
330
+ This function can only used to evaluate accuracy, but not inference, because the inputs are previous sub-tokens but not tokens.
331
+ But it can be guaranteed that the accuracy in this function is the same as the real token level completion.
332
+ The reason is:
333
+ Assuming the inputs are "context_len = 100 <EOL> masks = np . zeros (", and the ground truth is "context_len".
334
+ Due to our bpe encoding, the model have to outputs "context", "_" and "len" in 3 time step, i.e. gt0="context", gt1="_", gt2="len".
335
+ In a real inference scenario:
336
+ time step 0, inputs "context_len = 100 <EOL> masks = np . zeros ( ", model outputs: out0;
337
+ time step 1, inputs: in1=out0, outputs: out1
338
+ ... until the model outputs a complete token
339
+ But in this function, no matter out0 is, in1=gt0="context".
340
+ That is to say, in this function, we feed ground truth but not output sub-token when we predict the next token which is split by bpe.
341
+ So obviouly we would get different predictions from the real token completion scenario.
342
+ However, if we calculate token leval accuracy,
343
+ if and only if the model predicts every sub-token correctly, the complete token can be seen correct.
344
+ In this situation, out0==gt0, out1==gt1, so it doesn't matter we feed gt or output to model.
345
+ In summary, this function can make models oupout the same complete token if this token equals to ground truth,
346
+ if not, the model might predict a different token from the real completion scenario, but all wrong.
347
+ So it would not affect the token level accuracy.
348
+
349
+ I use this trick to speed up evaluation due to the large test set.
350
+ """
351
+ eval_dataset = EvalDataset(tokenizer, args, logger, file_type=file_type, block_size=args.block_size)
352
+ args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu)
353
+ eval_sampler = SequentialSampler(eval_dataset) if args.local_rank == -1 else DistributedSampler(eval_dataset)
354
+ eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size)
355
+ model.to(args.device)
356
+ # multi-gpu training (should be after apex fp16 initialization)
357
+ if args.n_gpu > 1:
358
+ model = torch.nn.DataParallel(model)
359
+
360
+ # Distributed training (should be after apex fp16 initialization)
361
+ if args.local_rank != -1:
362
+ model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.local_rank%args.gpu_per_node],
363
+ output_device=args.local_rank%args.gpu_per_node)
364
+
365
+ def DecodeIds(idxs):
366
+ codes = ""
367
+ for idx in idxs:
368
+ to_add = tokenizer.convert_ids_to_tokens(idx)
369
+ if tokenizer.convert_ids_to_tokens(idx)[0] == '\u0120':
370
+ if not codes.endswith(" "):
371
+ codes += " " + to_add[1:]
372
+ else:
373
+ codes += to_add[1:]
374
+ elif (
375
+ idx in [tokenizer.bos_token_id, tokenizer.eos_token_id, tokenizer.sep_token_id, tokenizer.pad_token_id] or
376
+ tokenizer.convert_ids_to_tokens(idx).startswith("<NUM_LIT")
377
+ ):
378
+ codes += " " + to_add + " "
379
+ else:
380
+ codes += to_add
381
+ return codes.strip(" ")
382
+
383
+ model.eval()
384
+
385
+ correct = 0.0
386
+ total = 0
387
+
388
+ total_pred = []
389
+ total_gt = []
390
+
391
+ for step, batch in enumerate(eval_dataloader):
392
+ inputs = batch.to(args.device)
393
+
394
+ with torch.no_grad():
395
+ outputs = model(inputs)
396
+ pred_scores = outputs[0]
397
+ pred_ids = pred_scores.argmax(-1)
398
+
399
+ all_pred = []
400
+ all_gt = []
401
+ prev_pred = None
402
+ for pred, gt in zip(pred_ids, inputs):
403
+ pred = pred.cpu().tolist()
404
+ gt = gt.cpu().tolist()
405
+
406
+ for i, y in enumerate(gt):
407
+ if i == 0:
408
+ if y in [tokenizer.bos_token_id, tokenizer.eos_token_id, tokenizer.sep_token_id, tokenizer.pad_token_id]:
409
+ now_gt = [y]
410
+ now_pred = [0] if prev_pred is None else [prev_pred]
411
+ all_pred.append(DecodeIds(now_pred).strip().split()[0])
412
+ all_gt.append(DecodeIds(now_gt).strip())
413
+ now_gt = []
414
+ now_pred = []
415
+ else:
416
+ now_gt = [y]
417
+ now_pred = [0] if prev_pred is None else [prev_pred]
418
+ else:
419
+ if tokenizer.convert_ids_to_tokens(y)[0] == '\u0120':
420
+ if len(now_gt) > 0:
421
+ try:
422
+ all_pred.append(DecodeIds(now_pred).strip().split()[0])
423
+ except IndexError:
424
+ all_pred.append("<SPACE>")
425
+ all_gt.append(DecodeIds(now_gt).strip())
426
+ now_gt = []
427
+ now_pred = []
428
+ if y in [tokenizer.bos_token_id, tokenizer.eos_token_id, tokenizer.sep_token_id, tokenizer.pad_token_id] or tokenizer.convert_ids_to_tokens(y).startswith("<NUM_LIT"):
429
+ if len(now_gt) > 0:
430
+ try:
431
+ all_pred.append(DecodeIds(now_pred).strip().split()[0])
432
+ except IndexError:
433
+ all_pred.append("<SPACE>")
434
+ all_gt.append(DecodeIds(now_gt).strip())
435
+ now_gt = [y]
436
+ now_pred = [pred[i-1]]
437
+ try:
438
+ all_pred.append(DecodeIds(now_pred).strip().split()[0])
439
+ except IndexError:
440
+ all_pred.append("<SPACE>")
441
+ all_gt.append(DecodeIds(now_gt).strip())
442
+ now_gt = []
443
+ now_pred = []
444
+ continue
445
+ now_gt.append(y)
446
+ now_pred.append(pred[i-1])
447
+ assert len(all_pred) == len(all_gt)
448
+
449
+ total_pred.extend(all_pred)
450
+ total_gt.extend(all_gt)
451
+
452
+
453
+ for x, y in zip(all_pred, all_gt):
454
+ if y not in ["<s>", "</s>", "<EOL>", "<pad>"]:
455
+ total += 1
456
+ if x == y:
457
+ correct += 1
458
+
459
+ if step % args.logging_steps == 0:
460
+ logger.info(f"{step} are done!")
461
+ logger.info(f"{total}, {correct/total}")
462
+
463
+ # pickle.dump(total_pred, open(os.path.join(args.output_dir, "preds.pkl"), "wb"))
464
+ # pickle.dump(total_gt, open(os.path.join(args.output_dir, "gts.pkl"), "wb"))
465
+
466
+ saved_file = os.path.join(args.output_dir, "predictions.txt")
467
+ total_samples = post_process(args, total_pred, total_gt, open(os.path.join(args.data_dir, f"{file_type}.txt")).readlines(), saved_file)
468
+ logger.info(f"Eval on {total_samples}, saved at {saved_file}")
469
+
470
+ return total, correct
471
+
472
+ def post_process(args, preds, gts, true_gts, saved_file):
473
+ wf = open(saved_file, "w")
474
+
475
+ cnt = 0
476
+ new_gt = []
477
+ new_pred = []
478
+ for i, (pred,gt) in enumerate(zip(preds,gts)):
479
+ if gt in ["", "<pad>"]:
480
+ continue
481
+ new_gt.append(gt)
482
+ new_pred.append(pred.replace(" ", ""))
483
+ if gt == "</s>":
484
+ gt_str = " ".join(new_gt)
485
+ pred_str = " ".join(new_pred)
486
+ assert gt_str == true_gts[cnt].strip(), f"{cnt} sample gt_str != true_gt"
487
+ wf.write(pred_str+"\n")
488
+ cnt += 1
489
+ new_gt = []
490
+ new_pred = []
491
+
492
+ return cnt
493
+
494
+
495
+ def main():
496
+ parser = argparse.ArgumentParser()
497
+
498
+ ## Required parameters
499
+ parser.add_argument("--data_dir", default=None, type=str, required=True,
500
+ help="The input data path.")
501
+ parser.add_argument("--langs", default=None, type=str, required=True,
502
+ help="Languages to train, if all, train all languages in data_dir")
503
+ parser.add_argument("--output_dir", default=None, type=str, required=True,
504
+ help="The output directory where the model predictions and checkpoints will be written.")
505
+
506
+ ## Other parameters
507
+ parser.add_argument("--model_type", default="gpt2", type=str,
508
+ help="The model architecture to be fine-tuned.")
509
+ parser.add_argument("--pretrain_dir", default="", type=str,
510
+ help="The output directory where the model predictions and checkpoints will be written.")
511
+ parser.add_argument("--config_dir", type=str,
512
+ help="config name. Required when training from scratch")
513
+ parser.add_argument("--tokenizer_dir", type=str,
514
+ help="Pre-trained tokenizer dir. Required when training from scratch")
515
+ parser.add_argument("--lit_file", type=str,
516
+ help="literals json file")
517
+ parser.add_argument("--load_name", type=str, default="pretrained",
518
+ help="Load pretrained model name")
519
+
520
+ parser.add_argument("--mlm", action='store_true',
521
+ help="Train with masked-language modeling loss instead of language modeling.")
522
+ parser.add_argument("--mlm_probability", type=float, default=0.15,
523
+ help="Ratio of tokens to mask for masked language modeling loss")
524
+
525
+ parser.add_argument("--cache_dir", default="", type=str,
526
+ help="Optional directory to store the pre-trained models downloaded from s3 (instread of the default one)")
527
+ parser.add_argument("--block_size", default=1024, type=int,
528
+ help="Optional input sequence length after tokenization."
529
+ "The training dataset will be truncated in block of this size for training."
530
+ "Default to the model max input length for single sentence inputs (take into account special tokens).")
531
+ parser.add_argument("--do_train", action='store_true',
532
+ help="Whether to run training.")
533
+ parser.add_argument("--do_eval", action='store_true',
534
+ help="Whether to run eval on the dev set.")
535
+ parser.add_argument("--evaluate_during_training", action='store_true',
536
+ help="Run evaluation during training at each logging step.")
537
+ parser.add_argument("--do_lower_case", action='store_true',
538
+ help="Set this flag if you are using an uncased model.")
539
+
540
+ parser.add_argument("--per_gpu_train_batch_size", default=4, type=int,
541
+ help="Batch size per GPU/CPU for training.")
542
+ parser.add_argument("--per_gpu_eval_batch_size", default=12, type=int,
543
+ help="Batch size per GPU/CPU for evaluation.")
544
+ parser.add_argument('--gradient_accumulation_steps', type=int, default=1,
545
+ help="Number of updates steps to accumulate before performing a backward/update pass.")
546
+ parser.add_argument("--learning_rate", default=5e-5, type=float,
547
+ help="The initial learning rate for Adam.")
548
+ parser.add_argument("--weight_decay", default=0.0, type=float,
549
+ help="Weight deay if we apply some.")
550
+ parser.add_argument("--adam_epsilon", default=1e-8, type=float,
551
+ help="Epsilon for Adam optimizer.")
552
+ parser.add_argument("--max_grad_norm", default=1.0, type=float,
553
+ help="Max gradient norm.")
554
+ parser.add_argument("--num_train_epochs", default=1.0, type=float,
555
+ help="Total number of training epochs to perform.")
556
+ parser.add_argument("--max_steps", default=-1, type=int,
557
+ help="If > 0: set total number of training steps to perform. Override num_train_epochs.")
558
+ parser.add_argument("--warmup_steps", default=0, type=int,
559
+ help="Linear warmup over warmup_steps.")
560
+
561
+ parser.add_argument('--logging_steps', type=int, default=1000,
562
+ help="Log every X updates steps.")
563
+ parser.add_argument('--save_steps', type=int, default=5000,
564
+ help="Save checkpoint every X updates steps.")
565
+ parser.add_argument('--save_total_limit', type=int, default=None,
566
+ help='Limit the total amount of checkpoints, delete the older checkpoints in the output_dir, does not delete by default')
567
+ parser.add_argument("--eval_all_checkpoints", action='store_true',
568
+ help="Evaluate all checkpoints starting with the same prefix as model_name_or_path ending and ending with step number")
569
+ parser.add_argument("--no_cuda", action='store_true',
570
+ help="Avoid using CUDA when available")
571
+ parser.add_argument('--overwrite_output_dir', action='store_true',
572
+ help="Overwrite the content of the output directory")
573
+ parser.add_argument('--overwrite_cache', action='store_true',
574
+ help="Overwrite the cached training and evaluation sets")
575
+ parser.add_argument('--seed', type=int, default=42,
576
+ help="random seed for initialization")
577
+ parser.add_argument('--not_pretrain', action='store_true',
578
+ help="use different dataset")
579
+
580
+ parser.add_argument('--fp16', action='store_true',
581
+ help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit")
582
+ parser.add_argument('--fp16_opt_level', type=str, default='O1',
583
+ help="For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."
584
+ "See details at https://nvidia.github.io/apex/amp.html")
585
+ parser.add_argument("--local_rank", type=int, default=-1,
586
+ help="For distributed training: local_rank")
587
+ parser.add_argument("--node_index", type=int, default=-1,
588
+ help="node index if multi-node running")
589
+ parser.add_argument("--gpu_per_node", type=int, default=-1,
590
+ help="num of gpus per node")
591
+ parser.add_argument('--server_ip', type=str, default='', help="For distant debugging.")
592
+ parser.add_argument('--server_port', type=str, default='', help="For distant debugging.")
593
+
594
+ parser.add_argument('--log_file', type=str, default='')
595
+ parser.add_argument('--tensorboard_dir', type=str)
596
+
597
+ pool = None
598
+ args = parser.parse_args()
599
+
600
+ # args.output_dir = os.path.join(args.output_dir, args.dataset)
601
+
602
+ if args.model_type in ["bert", "roberta", "distilbert"] and not args.mlm:
603
+ raise ValueError("BERT and RoBERTa do not have LM heads but masked LM heads. They must be run using the --mlm "
604
+ "flag (masked language modeling).")
605
+
606
+ if os.path.exists(args.output_dir) and os.listdir(
607
+ args.output_dir) and args.do_train and not args.overwrite_output_dir:
608
+ raise ValueError(
609
+ "Output directory ({}) already exists and is not empty. Use --overwrite_output_dir to overcome.".format(
610
+ args.output_dir))
611
+
612
+ # Setup distant debugging if needed
613
+ if args.server_ip and args.server_port:
614
+ # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
615
+ import ptvsd
616
+ print("Waiting for debugger attach")
617
+ ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True)
618
+ ptvsd.wait_for_attach()
619
+
620
+ logger.info("local_rank: %d, node_index: %d, gpu_per_node: %d"%(args.local_rank, args.node_index, args.gpu_per_node))
621
+ # Setup CUDA, GPU & distributed training
622
+ if args.local_rank == -1 or args.no_cuda:
623
+ device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
624
+ args.n_gpu = torch.cuda.device_count()
625
+ else: # Initializes the distributed backend which will take care of sychronizing nodes/GPUs
626
+ torch.cuda.set_device(args.local_rank)
627
+ device = torch.device("cuda", args.local_rank)
628
+ torch.distributed.init_process_group(backend='nccl')
629
+ args.local_rank += args.node_index * args.gpu_per_node
630
+ args.n_gpu = 1
631
+ args.device = device
632
+ # args.batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu)
633
+
634
+ # Setup logging
635
+ logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
636
+ datefmt='%m/%d/%Y %H:%M:%S',
637
+ level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN)
638
+ logger.warning("Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s, world size: %s",
639
+ args.local_rank, device, args.n_gpu, bool(args.local_rank != -1), args.fp16,
640
+ torch.distributed.get_world_size() if args.local_rank != -1 else 1)
641
+
642
+ # 使用FileHandler输出到文件
643
+ fh = logging.FileHandler(args.log_file)
644
+ logger.addHandler(fh)
645
+
646
+ # Set seed
647
+ set_seed(args)
648
+
649
+ # Load pretrained model and tokenizer
650
+ if args.local_rank not in [-1, 0]:
651
+ torch.distributed.barrier() # Barrier to make sure only the first process in distributed training download model & vocab
652
+
653
+ args.start_epoch = 0
654
+ args.start_step = 0
655
+ checkpoint_last = os.path.join(args.output_dir, 'checkpoint-last')
656
+ if args.do_train and os.path.exists(checkpoint_last) and os.listdir(checkpoint_last):
657
+ args.pretrain_dir = os.path.join(checkpoint_last)
658
+ args.config_name = os.path.join(checkpoint_last, 'config.json')
659
+ idx_file = os.path.join(checkpoint_last, 'idx_file.txt')
660
+ with open(idx_file, encoding='utf-8') as idxf:
661
+ args.start_epoch = int(idxf.readlines()[0].strip()) + 1
662
+
663
+ step_file = os.path.join(checkpoint_last, 'step_file.txt')
664
+ if os.path.exists(step_file):
665
+ with open(step_file, encoding='utf-8') as stepf:
666
+ args.start_step = int(stepf.readlines()[0].strip())
667
+
668
+ logger.info("reload model from {}, resume from {} steps".format(checkpoint_last, args.start_step))
669
+
670
+ # get special tokens
671
+ special_tokens = get_special_tokens(args.lit_file)
672
+
673
+ # Load pre-trained model
674
+ config_class, model_class, tokenizer_class = MODEL_CLASSES[args.model_type]
675
+ pretrained = checkpoint_last #args.pretrain_dir
676
+ if pretrained:
677
+ tokenizer = tokenizer_class.from_pretrained(pretrained, do_lower_case=args.do_lower_case, sep_token='<EOL>', bos_token='<s>', eos_token='</s>', pad_token='<pad>', unk_token='<|UNKNOWN|>', additional_special_tokens=special_tokens)
678
+ if args.model_type == "rnn":
679
+ model = model_class(len(tokenizer), 768, 768, 1)
680
+ model_last = os.path.join(pretrained, 'model.pt')
681
+ if os.path.exists(model_last):
682
+ logger.warning(f"Loading model from {model_last}")
683
+ model.load_state_dict(torch.load(model_last, map_location="cpu"))
684
+ else:
685
+ model = model_class.from_pretrained(pretrained)
686
+ model.resize_token_embeddings(len(tokenizer))
687
+ else:
688
+ tokenizer = tokenizer_class.from_pretrained(args.tokenizer_dir, sep_token='<EOL>', bos_token='<s>', eos_token='</s>', pad_token='<pad>', unk_token='<|UNKNOWN|>', additional_special_tokens=special_tokens)
689
+ args.vocab_size = len(tokenizer)
690
+ if args.model_type == "rnn":
691
+ model = model_class(len(tokenizer), 768, 768, 1)
692
+ else:
693
+ config = config_class.from_pretrained(args.config_dir)
694
+ model = model_class(config)
695
+ model.resize_token_embeddings(len(tokenizer))
696
+
697
+
698
+ model_parameters = model.parameters()
699
+ num_params = sum([np.prod(p.size()) for p in model_parameters])
700
+ logger.info(f"Model has a total of {num_params} trainable parameters")
701
+
702
+ if args.local_rank == 0:
703
+ torch.distributed.barrier() # End of barrier to make sure only the first process in distributed training download model & vocab
704
+
705
+ logger.info("Training/evaluation parameters %s", args)
706
+
707
+ # Training
708
+ if args.do_train:
709
+ train_dataset = load_and_cache_examples(args, tokenizer, evaluate=False)
710
+
711
+ global_step, tr_loss = train(args, train_dataset, model, tokenizer, fh, pool)
712
+ logger.info(" global_step = %s, average loss = %s", global_step, tr_loss)
713
+
714
+ # Only works on single GPU
715
+ if args.do_eval:
716
+ checkpoint_prefix = 'epoch_5/subject_model.pth'
717
+ output_dir = os.path.join(args.output_dir, '{}'.format(checkpoint_prefix))
718
+ model.load_state_dict(torch.load(output_dir))
719
+ model.to(args.device)
720
+ # 不要用dev文件,否则会在EvalDataset的__init__中检测不通过,被exit
721
+ # dev_total, dev_cr = eval_acc(args, model, tokenizer, 'dev')
722
+ # logger.info(f"Dev total tokens: {dev_total}, accuracy: {dev_cr/dev_total}")
723
+ test_total, test_cr = eval_acc(args, model, tokenizer, 'test')
724
+ logger.info(f"Test total tokens: {test_total}, accuracy: {test_cr/test_total}")
725
+
726
+
727
+ if __name__ == "__main__":
728
+ main()
Code-Code/CodeCompletion-token/code/train.sh ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ LANG=java # set python for py150
2
+ DATADIR=../dataset/javaCorpus/token_completion
3
+ LITFILE=../dataset/javaCorpus/literals.json
4
+ OUTPUTDIR=../model/javaCorpus
5
+ PRETRAINDIR=microsoft/CodeGPT-small-java # microsoft/CodeGPT-small-py for py150
6
+ LOGFILE=train_javaCorpus.log
7
+ PER_NODE_GPU=4 # modify YOUR_GPU_NUM
8
+
9
+ CUDA_VISIBLE_DEVICES=0,1,2,3 python run_lm.py \
10
+ --data_dir=$DATADIR \
11
+ --lit_file=$LITFILE \
12
+ --langs=$LANG \
13
+ --output_dir=$OUTPUTDIR \
14
+ --pretrain_dir=$PRETRAINDIR \
15
+ --log_file=$LOGFILE \
16
+ --model_type=gpt2 \
17
+ --block_size=512 \
18
+ --do_train \
19
+ --gpu_per_node $PER_NODE_GPU \
20
+ --learning_rate=8e-5 \
21
+ --weight_decay=0.01 \
22
+ --evaluate_during_training \
23
+ --per_gpu_train_batch_size=1 \
24
+ --per_gpu_eval_batch_size=4 \
25
+ --gradient_accumulation_steps=4 \
26
+ --num_train_epochs=5 \
27
+ --logging_steps=100 \
28
+ --save_steps=1000 \
29
+ --seed=42 \
30
+ --overwrite_output_dir \
31
+ --not_pretrain
Code-Code/CodeCompletion-token/data.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3fe81ae13261569dcb0147143f6be01900bdea8fc19394b931a2f6be720dac03
3
+ size 16149700
Code-Code/CodeCompletion-token/model/javaCorpus/epoch_1/subject_model.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7620d7764c8ab3ed610bd33a089895ae34640f5d8ac29ba18b3906228df3e79f
3
+ size 497840154
Code-Code/CodeCompletion-token/model/javaCorpus/epoch_2/subject_model.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:14cbd1d37904f6daacbe4345be5c9ebb052ff0320d6a652630e7fa2c8a14bd34
3
+ size 497840154
Code-Code/CodeCompletion-token/model/javaCorpus/epoch_3/subject_model.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8750f7eff3f95fea0dc69af85df906d5a4bc7387bc46f80aece0877e62d20f3d
3
+ size 497840154
Code-Code/CodeCompletion-token/model/javaCorpus/epoch_4/subject_model.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:614d002f472a8aa35050b56e8ccb6c5fcdeabe1bbf5f50e0c2e3d18e0dd0ed23
3
+ size 497840154
Code-Code/CodeCompletion-token/model/javaCorpus/epoch_5/subject_model.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fdf5488012ceaf71409a8d129f391f4ba06a86054b63b79a8c0b4c0c41799f20
3
+ size 497840154