zyf0278 commited on
Commit
6f52c7a
·
1 Parent(s): d43c7c7

add text-to-code

Browse files
Text-code/text-to-code/code/beam.py ADDED
@@ -0,0 +1,118 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn as nn
3
+ import torch
4
+ from torch.autograd import Variable
5
+ import copy
6
+
7
+ class Beam(object):
8
+ def __init__(self, size,sos,eos):
9
+ self.size = size
10
+ self.tt = torch.cuda
11
+ # The score for each translation on the beam.
12
+ self.scores = self.tt.FloatTensor(size).zero_()
13
+ # The backpointers at each time-step.
14
+ self.prevKs = []
15
+ # The outputs at each time-step.
16
+ self.nextYs = [self.tt.LongTensor(size)
17
+ .fill_(0)]
18
+ self.nextYs[0][:] = sos
19
+ # Has EOS topped the beam yet.
20
+ self._eos = eos
21
+ self.eosTop = False
22
+ # Time and k pair for finished.
23
+ self.finished = []
24
+
25
+ def getCurrentState(self):
26
+ "Get the outputs for the current timestep."
27
+ batch = self.tt.LongTensor(self.nextYs[-1]).view(-1, 1)
28
+ return batch
29
+
30
+ def getCurrentOrigin(self):
31
+ "Get the backpointers for the current timestep."
32
+ return self.prevKs[-1]
33
+
34
+ def advance(self, wordLk):
35
+ """
36
+ Given prob over words for every last beam `wordLk` and attention
37
+ `attnOut`: Compute and update the beam search.
38
+
39
+ Parameters:
40
+
41
+ * `wordLk`- probs of advancing from the last step (K x words)
42
+ * `attnOut`- attention at the last step
43
+
44
+ Returns: True if beam search is complete.
45
+ """
46
+ numWords = wordLk.size(1)
47
+
48
+ # Sum the previous scores.
49
+ if len(self.prevKs) > 0:
50
+ beamLk = wordLk + self.scores.unsqueeze(1).expand_as(wordLk)
51
+
52
+ # Don't let EOS have children.
53
+ for i in range(self.nextYs[-1].size(0)):
54
+ if self.nextYs[-1][i] == self._eos:
55
+ beamLk[i] = -1e20
56
+ else:
57
+ beamLk = wordLk[0]
58
+ flatBeamLk = beamLk.view(-1)
59
+ bestScores, bestScoresId = flatBeamLk.topk(self.size, 0, True, True)
60
+
61
+ self.scores = bestScores
62
+
63
+ # bestScoresId is flattened beam x word array, so calculate which
64
+ # word and beam each score came from
65
+ prevK = bestScoresId // numWords
66
+ self.prevKs.append(prevK)
67
+ self.nextYs.append((bestScoresId - prevK * numWords))
68
+
69
+
70
+ for i in range(self.nextYs[-1].size(0)):
71
+ if self.nextYs[-1][i] == self._eos:
72
+ s = self.scores[i]
73
+ self.finished.append((s, len(self.nextYs) - 1, i))
74
+
75
+ # End condition is when top-of-beam is EOS and no global score.
76
+ if self.nextYs[-1][0] == self._eos:
77
+ self.eosTop = True
78
+
79
+ def done(self):
80
+ return self.eosTop and len(self.finished) >=self.size
81
+
82
+ def getFinal(self):
83
+ if len(self.finished) == 0:
84
+ self.finished.append((self.scores[0], len(self.nextYs) - 1, 0))
85
+ self.finished.sort(key=lambda a: -a[0])
86
+ if len(self.finished) != self.size:
87
+ unfinished=[]
88
+ for i in range(self.nextYs[-1].size(0)):
89
+ if self.nextYs[-1][i] != self._eos:
90
+ s = self.scores[i]
91
+ unfinished.append((s, len(self.nextYs) - 1, i))
92
+ unfinished.sort(key=lambda a: -a[0])
93
+ self.finished+=unfinished[:self.size-len(self.finished)]
94
+ return self.finished[:self.size]
95
+
96
+ def getHyp(self, beam_res):
97
+ """
98
+ Walk back to construct the full hypothesis.
99
+ """
100
+ hyps=[]
101
+ for _,timestep, k in beam_res:
102
+ hyp = []
103
+ for j in range(len(self.prevKs[:timestep]) - 1, -1, -1):
104
+ hyp.append(self.nextYs[j+1][k])
105
+ k = self.prevKs[j][k]
106
+ hyps.append(hyp[::-1])
107
+ return hyps
108
+
109
+ def buildTargetTokens(self, preds):
110
+ sentence=[]
111
+ for pred in preds:
112
+ tokens = []
113
+ for tok in pred:
114
+ if tok==self._eos:
115
+ break
116
+ tokens.append(tok)
117
+ sentence.append(tokens)
118
+ return sentence
Text-code/text-to-code/code/bleu.py ADDED
@@ -0,0 +1,134 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2017 Google Inc. All Rights Reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ # ==============================================================================
15
+
16
+ """Python implementation of BLEU and smooth-BLEU.
17
+
18
+ This module provides a Python implementation of BLEU and smooth-BLEU.
19
+ Smooth BLEU is computed following the method outlined in the paper:
20
+ Chin-Yew Lin, Franz Josef Och. ORANGE: a method for evaluating automatic
21
+ evaluation metrics for machine translation. COLING 2004.
22
+ """
23
+
24
+ import collections
25
+ import math
26
+
27
+
28
+ def _get_ngrams(segment, max_order):
29
+ """Extracts all n-grams upto a given maximum order from an input segment.
30
+
31
+ Args:
32
+ segment: text segment from which n-grams will be extracted.
33
+ max_order: maximum length in tokens of the n-grams returned by this
34
+ methods.
35
+
36
+ Returns:
37
+ The Counter containing all n-grams upto max_order in segment
38
+ with a count of how many times each n-gram occurred.
39
+ """
40
+ ngram_counts = collections.Counter()
41
+ for order in range(1, max_order + 1):
42
+ for i in range(0, len(segment) - order + 1):
43
+ ngram = tuple(segment[i:i+order])
44
+ ngram_counts[ngram] += 1
45
+ return ngram_counts
46
+
47
+
48
+ def compute_bleu(reference_corpus, translation_corpus, max_order=4,
49
+ smooth=False):
50
+ """Computes BLEU score of translated segments against one or more references.
51
+
52
+ Args:
53
+ reference_corpus: list of lists of references for each translation. Each
54
+ reference should be tokenized into a list of tokens.
55
+ translation_corpus: list of translations to score. Each translation
56
+ should be tokenized into a list of tokens.
57
+ max_order: Maximum n-gram order to use when computing BLEU score.
58
+ smooth: Whether or not to apply Lin et al. 2004 smoothing.
59
+
60
+ Returns:
61
+ 3-Tuple with the BLEU score, n-gram precisions, geometric mean of n-gram
62
+ precisions and brevity penalty.
63
+ """
64
+ matches_by_order = [0] * max_order
65
+ possible_matches_by_order = [0] * max_order
66
+ reference_length = 0
67
+ translation_length = 0
68
+ for (references, translation) in zip(reference_corpus,
69
+ translation_corpus):
70
+ reference_length += min(len(r) for r in references)
71
+ translation_length += len(translation)
72
+
73
+ merged_ref_ngram_counts = collections.Counter()
74
+ for reference in references:
75
+ merged_ref_ngram_counts |= _get_ngrams(reference, max_order)
76
+ translation_ngram_counts = _get_ngrams(translation, max_order)
77
+ overlap = translation_ngram_counts & merged_ref_ngram_counts
78
+ for ngram in overlap:
79
+ matches_by_order[len(ngram)-1] += overlap[ngram]
80
+ for order in range(1, max_order+1):
81
+ possible_matches = len(translation) - order + 1
82
+ if possible_matches > 0:
83
+ possible_matches_by_order[order-1] += possible_matches
84
+
85
+ precisions = [0] * max_order
86
+ for i in range(0, max_order):
87
+ if smooth:
88
+ precisions[i] = ((matches_by_order[i] + 1.) /
89
+ (possible_matches_by_order[i] + 1.))
90
+ else:
91
+ if possible_matches_by_order[i] > 0:
92
+ precisions[i] = (float(matches_by_order[i]) /
93
+ possible_matches_by_order[i])
94
+ else:
95
+ precisions[i] = 0.0
96
+
97
+ if min(precisions) > 0:
98
+ p_log_sum = sum((1. / max_order) * math.log(p) for p in precisions)
99
+ geo_mean = math.exp(p_log_sum)
100
+ else:
101
+ geo_mean = 0
102
+
103
+ ratio = float(translation_length) / reference_length
104
+
105
+ if ratio > 1.0:
106
+ bp = 1.
107
+ else:
108
+ bp = math.exp(1 - 1. / ratio)
109
+
110
+ bleu = geo_mean * bp
111
+
112
+ return (bleu, precisions, bp, ratio, translation_length, reference_length)
113
+
114
+
115
+ def _bleu(ref_file, trans_file, subword_option=None):
116
+ max_order = 4
117
+ smooth = True
118
+ ref_files = [ref_file]
119
+ reference_text = []
120
+ for reference_filename in ref_files:
121
+ with open(reference_filename) as fh:
122
+ reference_text.append(fh.readlines())
123
+ per_segment_references = []
124
+ for references in zip(*reference_text):
125
+ reference_list = []
126
+ for reference in references:
127
+ reference_list.append(reference.strip().split())
128
+ per_segment_references.append(reference_list)
129
+ translations = []
130
+ with open(trans_file) as fh:
131
+ for line in fh:
132
+ translations.append(line.strip().split())
133
+ bleu_score, _, _, _, _, _ = compute_bleu(per_segment_references, translations, max_order, smooth)
134
+ return round(100 * bleu_score,2)
Text-code/text-to-code/code/dataset.py ADDED
@@ -0,0 +1,116 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # Licensed under the MIT License.
3
+ from __future__ import absolute_import, division, print_function
4
+
5
+ import argparse
6
+ import glob
7
+ import logging
8
+ import os
9
+ import pickle
10
+ import random
11
+ import re
12
+ import gc
13
+ import shutil
14
+ import json
15
+
16
+ import numpy as np
17
+ import torch
18
+ from torch.utils.data import DataLoader, Dataset, SequentialSampler, RandomSampler,TensorDataset
19
+ from torch.utils.data.distributed import DistributedSampler
20
+
21
+ try:
22
+ from torch.utils.tensorboard import SummaryWriter
23
+ except:
24
+ from tensorboardX import SummaryWriter
25
+
26
+ from transformers import (WEIGHTS_NAME, AdamW, get_linear_schedule_with_warmup,
27
+ BertConfig, BertForMaskedLM, BertTokenizer,
28
+ GPT2Config, GPT2LMHeadModel, GPT2Tokenizer,
29
+ OpenAIGPTConfig, OpenAIGPTLMHeadModel, OpenAIGPTTokenizer,
30
+ RobertaConfig, RobertaForMaskedLM, RobertaTokenizer,
31
+ DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer)
32
+
33
+
34
+ class concodeDataset(Dataset):
35
+ def __init__(self, tokenizer, args, logger, file_type='train', block_size=512, mode='train'):
36
+ if args.local_rank==-1:
37
+ local_rank=0
38
+ world_size=1
39
+ else:
40
+ local_rank=args.local_rank
41
+ world_size=torch.distributed.get_world_size()
42
+
43
+ self.block_size = block_size
44
+ self.mode = mode
45
+
46
+ if not os.path.exists(args.output_dir):
47
+ os.makedirs(args.output_dir)
48
+ cached_file = os.path.join(args.output_dir, file_type+"_blocksize_%d"%(block_size)+"_wordsize_%d"%(world_size)+"_rank_%d"%(local_rank))
49
+ if mode != 'test' and os.path.exists(cached_file) and not args.overwrite_cache:
50
+ if file_type == 'train':
51
+ logger.warning("Loading features from cached file %s", cached_file)
52
+ with open(cached_file, 'rb') as handle:
53
+ data = pickle.load(handle)
54
+ self.inputs = data['inputs']
55
+ self.token_labels = data['token_labels']
56
+
57
+ else:
58
+ self.inputs = []
59
+ self.token_labels = []
60
+
61
+ datafile = os.path.join(args.data_dir, f"{file_type}.json")
62
+ if file_type == 'train':
63
+ logger.warning("Creating features from dataset file at %s", datafile)
64
+ datas = open(datafile).readlines()
65
+
66
+ length = len(datas)
67
+ logger.info("Data size: %d"%(length))
68
+ for idx, x in enumerate(datas):
69
+ if idx % (length//10) == 0:
70
+ percent = idx / (length//10) * 10
71
+ logger.warning("Rank %d, load %d"%(local_rank, percent))
72
+ if idx % world_size != local_rank:
73
+ continue
74
+ x = json.loads(x)
75
+ code = tokenizer.encode(x["code"])
76
+ nl = tokenizer.encode(x["nl"])
77
+
78
+ input_ids, input_labels = self.pad_and_get_mask(code, nl, tokenizer)
79
+ self.inputs.append(input_ids)
80
+ self.token_labels.append(input_labels)
81
+
82
+ if file_type == 'train':
83
+ logger.warning("Rank %d Training %d token, %d samples"%(local_rank, length, len(self.inputs)))
84
+ logger.warning("Saving features into cached file %s", cached_file)
85
+ if mode != 'test':
86
+ with open(cached_file, 'wb') as handle:
87
+ pickle.dump({'inputs': self.inputs, 'token_labels': self.token_labels}, handle, protocol=pickle.HIGHEST_PROTOCOL)
88
+
89
+ def pad_and_get_mask(self, code, nl, tokenizer):
90
+ if self.mode == 'test':
91
+ code = []
92
+ while (len(code) + len(nl) + 2 > self.block_size):
93
+ if (len(code) > len(nl)):
94
+ code = code[:-1]
95
+ else:
96
+ nl = nl[:-1]
97
+ if self.mode == 'train':
98
+ inputs = nl + [tokenizer.bos_token_id] + code + [tokenizer.eos_token_id]
99
+ labels = [1] * len(nl) + [2] * (len(code)+1) + [0]
100
+ else:
101
+ inputs = nl + [tokenizer.bos_token_id]
102
+ labels = [1] * len(nl) + [2]
103
+ return inputs, labels
104
+ assert len(inputs) <= self.block_size
105
+ pad_len = self.block_size - len(inputs)
106
+ inputs += [tokenizer.pad_token_id] * pad_len
107
+ labels += [0] * pad_len
108
+ assert len(inputs) == len(labels)
109
+ return inputs, labels
110
+
111
+
112
+ def __len__(self):
113
+ return len(self.inputs)
114
+
115
+ def __getitem__(self, item):
116
+ return torch.tensor(self.inputs[item]), torch.tensor(self.token_labels[item])
Text-code/text-to-code/code/eval.sh ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ LANG=java
2
+ DATADIR=../dataset
3
+ OUTPUTDIR=../model
4
+ PRETRAINDIR=../model/checkpoint-last
5
+ LOGFILE=text2code_concode_eval.log
6
+
7
+ CUDA_VISIBLE_DEVICES=2 python run.py \
8
+ --data_dir=$DATADIR \
9
+ --langs=$LANG \
10
+ --output_dir=$OUTPUTDIR \
11
+ --pretrain_dir=$PRETRAINDIR \
12
+ --log_file=$LOGFILE \
13
+ --model_type=gpt2 \
14
+ --block_size=512 \
15
+ --do_eval \
16
+ --logging_steps=100 \
17
+ --seed=42
Text-code/text-to-code/code/evaluator.py ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # Licensed under the MIT license.
3
+ import os
4
+ import logging
5
+ import argparse
6
+ from bleu import _bleu
7
+ import json
8
+
9
+ logger = logging.getLogger(__name__)
10
+ logging.basicConfig(level=logging.INFO)
11
+
12
+ def main():
13
+ parser = argparse.ArgumentParser(description='Evaluate leaderboard predictions for code completion (line level).')
14
+ parser.add_argument('--answers', '-a', required=True, help="filename of the labels, in json format.")
15
+ parser.add_argument('--predictions', '-p', required=True, help="filename of the leaderboard predictions, in txt format.")
16
+ args = parser.parse_args()
17
+
18
+ preds = open(args.predictions, "r").readlines()
19
+ gts = open(args.answers, "r").readlines()
20
+
21
+ assert len(preds) == len(gts), f"Samples of predictions and answers are not equal, {len(preds)}: {len(gts)}"
22
+
23
+ total = len(gts)
24
+ EM = 0.0
25
+ with open("ground_truth.txt", "w") as wf:
26
+ for pred, gt in zip(preds, gts):
27
+ pred = pred.strip()
28
+ gt = json.loads(gt)["code"]
29
+ wf.write(gt+"\n")
30
+ if pred.split() == gt.split():
31
+ EM += 1
32
+
33
+ bleu_score = round(_bleu("ground_truth.txt", args.predictions), 2)
34
+ logger.info(f"BLEU: {bleu_score}, EM: {round(EM/total*100, 2)}")
35
+
36
+ try:
37
+ os.remove("ground_truth.txt")
38
+ except Exception:
39
+ pass
40
+
41
+ if __name__ == "__main__":
42
+ main()
Text-code/text-to-code/code/run.py ADDED
@@ -0,0 +1,665 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
3
+ # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+ """
17
+ Text to code generation pipeline in CodeXGLUE
18
+ """
19
+
20
+ from __future__ import absolute_import, division, print_function
21
+
22
+ import argparse
23
+ import glob
24
+ import logging
25
+ import os
26
+ import pickle
27
+ import random
28
+ import re
29
+ import shutil
30
+ import json
31
+
32
+ import numpy as np
33
+ import torch
34
+ from torch.utils.data import DataLoader, Dataset, SequentialSampler, RandomSampler,TensorDataset
35
+ from torch.utils.data.distributed import DistributedSampler
36
+ from dataset import concodeDataset
37
+ from beam import Beam
38
+
39
+ try:
40
+ from torch.utils.tensorboard import SummaryWriter
41
+ except:
42
+ from tensorboardX import SummaryWriter
43
+
44
+ from torch.nn import CrossEntropyLoss
45
+
46
+ from bleu import _bleu
47
+ from transformers import (WEIGHTS_NAME, AdamW, get_linear_schedule_with_warmup,
48
+ BertConfig, BertForMaskedLM, BertTokenizer,
49
+ GPT2Config, GPT2LMHeadModel, GPT2Tokenizer,
50
+ OpenAIGPTConfig, OpenAIGPTLMHeadModel, OpenAIGPTTokenizer,
51
+ RobertaConfig, RobertaForMaskedLM, RobertaTokenizer,
52
+ DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer)
53
+
54
+ logger = logging.getLogger(__name__)
55
+
56
+ MODEL_CLASSES = {
57
+ 'gpt2': (GPT2Config, GPT2LMHeadModel, GPT2Tokenizer),
58
+ 'openai-gpt': (OpenAIGPTConfig, OpenAIGPTLMHeadModel, OpenAIGPTTokenizer),
59
+ 'bert': (BertConfig, BertForMaskedLM, BertTokenizer),
60
+ 'roberta': (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer),
61
+ 'distilbert': (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer)
62
+ }
63
+
64
+
65
+
66
+ def load_and_cache_examples(args, tokenizer, evaluate=False):
67
+ dataset = concodeDataset(tokenizer, args, logger, file_type='dev' if evaluate else 'train',
68
+ block_size=args.block_size)
69
+ return dataset
70
+
71
+
72
+ def set_seed(args):
73
+ random.seed(args.seed)
74
+ np.random.seed(args.seed)
75
+ torch.manual_seed(args.seed)
76
+ if args.n_gpu > 0:
77
+ torch.cuda.manual_seed_all(args.seed)
78
+
79
+
80
+ def update_config(model, tokenizer):
81
+ model.config.bos_token_id = tokenizer.bos_token_id
82
+ model.config.eos_token_id = tokenizer.eos_token_id
83
+ model.config.pad_token_id = tokenizer.pad_token_id
84
+
85
+
86
+ def train(args, train_dataset, model, tokenizer, fh, pool):
87
+ """ Train the model """
88
+ if args.local_rank in [-1, 0]:
89
+ args.tensorboard_dir = os.path.join(args.output_dir, 'tensorboard')
90
+ if not os.path.exists(args.tensorboard_dir):
91
+ os.makedirs(args.tensorboard_dir)
92
+ tb_writer = SummaryWriter(args.tensorboard_dir)
93
+
94
+ args.batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu)
95
+ train_sampler = RandomSampler(train_dataset)
96
+
97
+ train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.batch_size, drop_last=True)
98
+ total_examples = len(train_dataset) * (
99
+ torch.distributed.get_world_size() if args.local_rank != -1 else 1)
100
+ batch_size = args.batch_size * args.gradient_accumulation_steps * (
101
+ torch.distributed.get_world_size() if args.local_rank != -1 else 1)
102
+ # if args.max_steps > 0:
103
+ # t_total = args.max_steps
104
+ # args.num_train_epochs = args.max_steps // (len(train_dataloader) // args.gradient_accumulation_steps) + 1
105
+ if args.num_train_epochs > 0:
106
+ t_total = total_examples // batch_size * args.num_train_epochs
107
+ args.max_steps = t_total
108
+ model.to(args.device)
109
+ if args.local_rank not in [-1, 0]:
110
+ torch.distributed.barrier()
111
+ # Prepare optimizer and schedule (linear warmup and decay)
112
+ no_decay = ['bias', 'LayerNorm.weight']
113
+ optimizer_grouped_parameters = [
114
+ {'params': [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],
115
+ 'weight_decay': args.weight_decay},
116
+ {'params': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
117
+ ]
118
+ optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)
119
+ scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=args.warmup_steps,
120
+ num_training_steps=t_total)
121
+ checkpoint_last = os.path.join(args.output_dir, 'checkpoint-last')
122
+ scheduler_last = os.path.join(checkpoint_last, 'scheduler.pt')
123
+ optimizer_last = os.path.join(checkpoint_last, 'optimizer.pt')
124
+ if os.path.exists(scheduler_last):
125
+ scheduler.load_state_dict(torch.load(scheduler_last, map_location="cpu"))
126
+ if os.path.exists(optimizer_last):
127
+ optimizer.load_state_dict(torch.load(optimizer_last, map_location="cpu"))
128
+ if args.local_rank == 0:
129
+ torch.distributed.barrier()
130
+ if args.fp16:
131
+ try:
132
+ from apex import amp
133
+ except ImportError:
134
+ raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.")
135
+ model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level)
136
+
137
+ # multi-gpu training (should be after apex fp16 initialization)
138
+ if args.n_gpu > 1:
139
+ model = torch.nn.DataParallel(model)
140
+
141
+ # Distributed training (should be after apex fp16 initialization)
142
+ if args.local_rank != -1:
143
+ model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.local_rank%args.gpu_per_node],
144
+ output_device=args.local_rank%args.gpu_per_node,
145
+ find_unused_parameters=True)
146
+
147
+ # Train!
148
+ logger.info("***** Running training *****")
149
+ logger.info(" Num examples = %d", total_examples )
150
+ logger.info(" Num epoch = %d", t_total*batch_size//total_examples)
151
+ logger.info(" Instantaneous batch size per GPU = %d", args.per_gpu_train_batch_size)
152
+ logger.info(" Total train batch size (w. parallel, distributed & accumulation) = %d", batch_size)
153
+ logger.info(" Gradient Accumulation steps = %d", args.gradient_accumulation_steps)
154
+ logger.info(" Total optimization steps = %d", t_total)
155
+
156
+ global_step = args.start_step
157
+ tr_loss, logging_loss,avg_loss,tr_nb = 0.0, 0.0,0.0,0
158
+ # model.resize_token_embeddings(len(tokenizer))
159
+ model.zero_grad()
160
+ set_seed(args) # Added here for reproducibility (even between python 2 and 3)
161
+
162
+ best_bleu = 0.0
163
+
164
+ for idx in range(args.start_epoch, int(args.num_train_epochs)):
165
+ for step, (batch, token_labels) in enumerate(train_dataloader):
166
+ inputs = batch.to(args.device)
167
+ attn_mask = torch.tensor(token_labels.clone().detach() != 0, dtype=torch.uint8, device=args.device)
168
+ loss_mask = torch.tensor(token_labels.clone().detach() == 2, dtype=torch.uint8, device=args.device)
169
+ model.train()
170
+ # outputs = model(inputs, attention_mask=attn_mask, labels=inputs, loss_mask=loss_mask)
171
+ # loss = outputs[0]
172
+ outputs = model(inputs, attention_mask=attn_mask)
173
+ logits = outputs[0]
174
+ labels = inputs
175
+ shift_logits = logits[..., :-1, :].contiguous()
176
+ shift_labels = labels[..., 1:].contiguous()
177
+ # Flatten the tokens
178
+ loss_fct = CrossEntropyLoss()
179
+ flatten_shift_loss_mask = loss_mask[..., :-1].contiguous().view(-1)
180
+ ids = torch.nonzero(flatten_shift_loss_mask).view(-1)
181
+ loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1))[ids], shift_labels.view(-1)[ids])
182
+
183
+ if args.n_gpu > 1:
184
+ loss = loss.mean() # mean() to average on multi-gpu parallel training
185
+ if args.gradient_accumulation_steps > 1:
186
+ loss = loss / args.gradient_accumulation_steps
187
+
188
+ if args.fp16:
189
+ with amp.scale_loss(loss, optimizer) as scaled_loss:
190
+ scaled_loss.backward()
191
+ torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm)
192
+ else:
193
+ loss.backward()
194
+ torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
195
+
196
+ tr_loss += loss.item()
197
+
198
+ if (step + 1) % args.gradient_accumulation_steps == 0:
199
+ optimizer.step()
200
+ optimizer.zero_grad()
201
+ scheduler.step()
202
+ global_step += 1
203
+ output_flag=True
204
+ avg_loss=round(np.exp((tr_loss - logging_loss) /(global_step- tr_nb)),4)
205
+ if global_step % args.logging_steps == 0:
206
+ logger.info(" steps: %s ppl: %s", global_step, round(avg_loss,5))
207
+ if args.local_rank in [-1, 0] and args.logging_steps > 0 and global_step % args.logging_steps == 0:
208
+ # Log metrics
209
+ tb_writer.add_scalar('lr', scheduler.get_last_lr()[0], global_step)
210
+ tb_writer.add_scalar('loss', (tr_loss - logging_loss) / args.logging_steps, global_step)
211
+ logging_loss = tr_loss
212
+ tr_nb=global_step
213
+
214
+ if args.local_rank in [-1, 0] and args.save_steps > 0 and global_step % args.save_steps == 0:
215
+ checkpoint_prefix = "checkpoint"
216
+ # Save model checkpoint
217
+ if args.evaluate_during_training: # Only evaluate when single GPU otherwise metrics may not average well
218
+ results = evaluate(args, model, tokenizer, eval_when_training=True)
219
+ for key, value in results.items():
220
+ tb_writer.add_scalar('eval_{}'.format(key), value, global_step)
221
+ logger.info(" %s = %s", key, round(value,4))
222
+ output_dir = os.path.join(args.output_dir, '{}-{}-{}'.format(checkpoint_prefix, global_step, round(results['perplexity'],4)))
223
+ # dev_bleu, dev_EM = eval_bleu(args, model, tokenizer, file_type='dev', num=100)
224
+ # logger.info(f"dev bleu: {dev_bleu}, dev EM: {dev_EM}")
225
+ # output_dir = os.path.join(args.output_dir, '{}-{}-{}'.format(checkpoint_prefix, global_step, round(dev_bleu,2)))
226
+ # if dev_bleu > best_bleu:
227
+ # best_bleu = dev_bleu
228
+ # logger.info(f"best bleu updated. saved in {output_dir}")
229
+ # logger.info(f"best bleu: {best_bleu}")
230
+ else:
231
+ output_dir = os.path.join(args.output_dir, "{}-{}".format(checkpoint_prefix, global_step))
232
+ if not os.path.exists(output_dir):
233
+ os.makedirs(output_dir)
234
+ model_to_save = (
235
+ model.module if hasattr(model, "module") else model
236
+ ) # Take care of distributed/parallel training
237
+ model_to_save.save_pretrained(output_dir)
238
+ tokenizer.save_pretrained(output_dir)
239
+
240
+ torch.save(args, os.path.join(output_dir, "training_args.bin"))
241
+ logger.info("Saving model checkpoint to %s", output_dir)
242
+
243
+ # _rotate_checkpoints(args, checkpoint_prefix)
244
+ last_output_dir = os.path.join(args.output_dir, 'checkpoint-last')
245
+ if not os.path.exists(last_output_dir):
246
+ os.makedirs(last_output_dir)
247
+ model_to_save.save_pretrained(last_output_dir)
248
+ tokenizer.save_pretrained(last_output_dir)
249
+ idx_file = os.path.join(last_output_dir, 'idx_file.txt')
250
+ with open(idx_file, 'w', encoding='utf-8') as idxf:
251
+ idxf.write(str(0) + '\n')
252
+
253
+ torch.save(optimizer.state_dict(), os.path.join(last_output_dir, "optimizer.pt"))
254
+ torch.save(scheduler.state_dict(), os.path.join(last_output_dir, "scheduler.pt"))
255
+ logger.info("Saving optimizer and scheduler states to %s", last_output_dir)
256
+
257
+ step_file = os.path.join(last_output_dir, 'step_file.txt')
258
+ with open(step_file, 'w', encoding='utf-8') as stepf:
259
+ stepf.write(str(global_step) + '\n')
260
+
261
+ # torch.save(optimizer.state_dict(), os.path.join(output_dir, "optimizer.pt"))
262
+ # torch.save(scheduler.state_dict(), os.path.join(output_dir, "scheduler.pt"))
263
+ # logger.info("Saving optimizer and scheduler states to %s", output_dir)
264
+
265
+
266
+ if args.max_steps > 0 and global_step > args.max_steps:
267
+ break
268
+ if args.max_steps > 0 and global_step > args.max_steps:
269
+ break
270
+
271
+ # 每一轮记录checkpoint
272
+ output_dir = os.path.join(args.output_dir, 'epoch_{}'.format(idx+1))
273
+ if not os.path.exists(output_dir):
274
+ os.makedirs(output_dir)
275
+ model_to_save = model.module if hasattr(model, 'module') else model
276
+ ckpt_output_path = os.path.join(output_dir, 'subject_model.pth')
277
+ logger.info("Saving model checkpoint to %s", ckpt_output_path)
278
+ torch.save(model_to_save.state_dict(), ckpt_output_path)
279
+
280
+ if args.local_rank in [-1, 0]:
281
+ tb_writer.close()
282
+
283
+ return global_step, tr_loss / global_step
284
+
285
+
286
+ def evaluate(args, model, tokenizer, prefix="", eval_when_training=False):
287
+ # Loop to handle MNLI double evaluation (matched, mis-matched)
288
+ eval_output_dir = args.output_dir
289
+
290
+ eval_dataset = load_and_cache_examples(args, tokenizer, evaluate=True)
291
+
292
+ if not os.path.exists(eval_output_dir) and args.local_rank in [-1, 0]:
293
+ os.makedirs(eval_output_dir)
294
+
295
+ args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu)
296
+ # Note that DistributedSampler samples randomly
297
+ eval_sampler = SequentialSampler(eval_dataset)
298
+ eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size)
299
+
300
+ # multi-gpu evaluate
301
+ if args.n_gpu > 1 and eval_when_training is False:
302
+ model = torch.nn.DataParallel(model)
303
+
304
+ # Eval!
305
+ #logger.info("***** Running evaluation {} *****".format(prefix))
306
+ #logger.info(" Num examples = %d", len(eval_dataset))
307
+ #logger.info(" Batch size = %d", args.eval_batch_size)
308
+ eval_loss = 0.0
309
+ nb_eval_steps = 0
310
+ model.eval()
311
+
312
+ for step, (batch, token_labels) in enumerate(eval_dataloader):
313
+
314
+ inputs = batch.to(args.device)
315
+ attn_mask = torch.tensor(token_labels.clone().detach() != 0, dtype=torch.uint8, device=args.device)
316
+ loss_mask = torch.tensor(token_labels.clone().detach() == 2, dtype=torch.uint8, device=args.device)
317
+ with torch.no_grad():
318
+ outputs = model(inputs, attention_mask=attn_mask)
319
+ logits = outputs[0]
320
+ labels = inputs
321
+ shift_logits = logits[..., :-1, :].contiguous()
322
+ shift_labels = labels[..., 1:].contiguous()
323
+ # Flatten the tokens
324
+ loss_fct = CrossEntropyLoss()
325
+ flatten_shift_loss_mask = loss_mask[..., :-1].contiguous().view(-1)
326
+ ids = torch.nonzero(flatten_shift_loss_mask).view(-1)
327
+ loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1))[ids], shift_labels.view(-1)[ids])
328
+ eval_loss += loss.mean().item()
329
+ nb_eval_steps += 1
330
+
331
+ # inputs = batch.to(args.device)
332
+ # attn_mask = torch.tensor(token_labels.clone().detach() != 0, dtype=torch.uint8, device=args.device)
333
+ # loss_mask = torch.tensor(token_labels.clone().detach() == 2, dtype=torch.uint8, device=args.device)
334
+ # with torch.no_grad():
335
+ # outputs = model(inputs, attention_mask=attn_mask, labels=inputs, loss_mask=loss_mask)
336
+ # loss = outputs[0]
337
+ # eval_loss += loss.mean().item()
338
+ # nb_eval_steps += 1
339
+
340
+ eval_loss = eval_loss / nb_eval_steps
341
+ perplexity = torch.exp(torch.tensor(eval_loss))
342
+
343
+ result = {
344
+ "perplexity": float(perplexity)
345
+ }
346
+
347
+ output_eval_file = os.path.join(eval_output_dir, prefix, "eval_results.txt")
348
+ with open(output_eval_file, "w") as writer:
349
+ #logger.info("***** Eval results {} *****".format(prefix))
350
+ for key in sorted(result.keys()):
351
+ #logger.info(" %s = %s", key, str(result[key]))
352
+ writer.write("%s = %s\n" % (key, str(result[key])))
353
+
354
+ return result
355
+
356
+ def eval_bleu(args, model, tokenizer, file_type='test', num=2000):
357
+ dataset = concodeDataset(tokenizer, args, logger, file_type=file_type, block_size=args.block_size, mode='test')
358
+ test_sampler = SequentialSampler(dataset)
359
+ test_dataloader = DataLoader(dataset, sampler=test_sampler, batch_size=1)
360
+ model.to(args.device)
361
+ model.zero_grad()
362
+ model.eval()
363
+
364
+ preds = []
365
+ max_gen_len = 100
366
+ for step, (batch, token_labels) in enumerate(test_dataloader):
367
+ if step >= num:
368
+ break
369
+ inputs = batch.to(args.device)
370
+ # with torch.no_grad():
371
+ # outputs = model.generate(inputs, max_length=args.block_size, num_beams=10, temperature=0.7, early_stopping=False, top_k=70, \
372
+ # bos_token_id=tokenizer.bos_token_id, eos_token_id=tokenizer.eos_token_id, pad_token_id=tokenizer.pad_token_id)
373
+ # # outputs = model.generate(inputs, max_length=args.block_size, do_sample=True, temperature=0.7, top_k=70, top_p=0.95, \
374
+ # # bos_token_id=tokenizer.bos_token_id, eos_token_id=tokenizer.pad_token_id, pad_token_id=tokenizer.pad_token_id)
375
+ # # outputs = model.generate(inputs, max_length=args.block_size, num_beams=10, temperature=0.7, early_stopping=False, top_k=70)
376
+ # # outputs = model.generate(inputs, max_length=args.block_size, do_sample=True, temperature=0.7, top_k=70, top_p=0.95)
377
+ # generation = tokenizer.decode(outputs[0])[len(tokenizer.decode(inputs[0])):]
378
+ # preds.append(generation.rstrip("<pad>"))
379
+
380
+ with torch.no_grad():
381
+ beam_size = 10
382
+ m = torch.nn.LogSoftmax(dim=-1)
383
+ outputs = model(inputs)[1]
384
+ p = []
385
+ zero = torch.cuda.LongTensor(1).fill_(0)
386
+ for i in range(inputs.shape[0]):
387
+ # Compatible with transformers version 3.3.0 and 4.13.0
388
+ past = [torch.cat([x[0].unsqueeze(0),x[1].unsqueeze(0)],dim=0) if type(x)==tuple else x for x in outputs]
389
+ past_hidden = [x[:, i:i+1].expand(-1, beam_size, -1, -1, -1) for x in past]
390
+ # context_mask=source_mask[i:i+1,:].expand(beam_size,-1)
391
+ beam = Beam(beam_size, tokenizer.bos_token_id, tokenizer.eos_token_id)
392
+ input_ids = None
393
+ for _ in range(max_gen_len):
394
+ if beam.done():
395
+ break
396
+ input_ids = beam.getCurrentState()
397
+ # context_mask=torch.cat((context_mask,input_ids*0+1),-1)
398
+ # mask=context_mask.unsqueeze(0).unsqueeze(-2).unsqueeze(-2).expand(self.config.n_layer, -1, -1, -1, -1)
399
+ transformer_outputs = model(input_ids, past_key_values=past_hidden)
400
+ out = m(transformer_outputs[0][:, -1, :]).data
401
+ # out = self.lsm(self.lm_head(transformer_outputs[0][:,-1,:])).data
402
+ beam.advance(out)
403
+ past = [torch.cat([x[0].unsqueeze(0),x[1].unsqueeze(0)],dim=0) if type(x)==tuple else x for x in transformer_outputs[1]]
404
+ past_hidden = [x.data.index_select(1, beam.getCurrentOrigin()) for x in past]
405
+ hyp = beam.getHyp(beam.getFinal())
406
+ pred =beam.buildTargetTokens(hyp)[:beam_size]
407
+
408
+ pred = [torch.cat([x.view(-1) for x in p]+[zero]*(max_gen_len-len(p))).view(1,-1) for p in pred]
409
+ p.append(torch.cat(pred, 0).unsqueeze(0))
410
+ p = torch.cat(p, 0)
411
+ for pred in p:
412
+ t = pred[0].cpu().numpy()
413
+ t = list(t)
414
+ if 0 in t:
415
+ t = t[:t.index(0)]
416
+ text = tokenizer.decode(t, clean_up_tokenization_spaces=False)
417
+ # print(text)
418
+ preds.append(text)
419
+
420
+ if step % args.logging_steps == 0:
421
+ logger.info(f"{step} are done!")
422
+
423
+ golds = []
424
+ datafile = os.path.join(args.data_dir, f"{file_type}.json")
425
+ datas = open(datafile).readlines()
426
+ for x in datas[:num]:
427
+ x = json.loads(x)
428
+ golds.append(x["code"])
429
+
430
+ assert len(preds) == len(golds)
431
+
432
+ EM = []
433
+ with open(os.path.join(args.output_dir, f"{file_type}.output"), 'w') as f, open(os.path.join(args.output_dir, f"{file_type}.gold"), 'w') as f1:
434
+ for pred, gold in zip(preds, golds):
435
+ f.write(pred+'\n')
436
+ f1.write(gold+'\n')
437
+ EM.append(pred.split() == gold.split())
438
+
439
+ if file_type == "test":
440
+ return 0, 0
441
+
442
+ bleu_score = round(_bleu(os.path.join(args.output_dir, f"{file_type}.gold"), os.path.join(args.output_dir, f"{file_type}.output")), 2)
443
+ EM = round(np.mean(EM) * 100, 2)
444
+ return bleu_score, EM
445
+
446
+
447
+
448
+ def main():
449
+ parser = argparse.ArgumentParser()
450
+
451
+ ## Required parameters
452
+ parser.add_argument("--data_dir", default=None, type=str, required=True,
453
+ help="The input data path.")
454
+ parser.add_argument("--langs", default=None, type=str, required=True,
455
+ help="Languages to train, if all, train all languages in data_dir")
456
+ parser.add_argument("--output_dir", default=None, type=str, required=True,
457
+ help="The output directory where the model predictions and checkpoints will be written.")
458
+
459
+ ## Other parameters
460
+ parser.add_argument("--model_type", default="gpt2", type=str,
461
+ help="The model architecture to be fine-tuned.")
462
+ parser.add_argument("--pretrain_dir", default="", type=str,
463
+ help="The output directory where the model predictions and checkpoints will be written.")
464
+ parser.add_argument("--config_dir", type=str,
465
+ help="config name. Required when training from scratch")
466
+ parser.add_argument("--tokenizer_dir", type=str,
467
+ help="Pre-trained tokenizer dir. Required when training from scratch")
468
+ parser.add_argument("--load_name", type=str, default="pretrained",
469
+ help="Load pretrained model name")
470
+
471
+ parser.add_argument("--mlm", action='store_true',
472
+ help="Train with masked-language modeling loss instead of language modeling.")
473
+ parser.add_argument("--mlm_probability", type=float, default=0.15,
474
+ help="Ratio of tokens to mask for masked language modeling loss")
475
+
476
+ parser.add_argument("--cache_dir", default="", type=str,
477
+ help="Optional directory to store the pre-trained models downloaded from s3 (instread of the default one)")
478
+ parser.add_argument("--block_size", default=1024, type=int,
479
+ help="Optional input sequence length after tokenization."
480
+ "The training dataset will be truncated in block of this size for training."
481
+ "Default to the model max input length for single sentence inputs (take into account special tokens).")
482
+ parser.add_argument("--do_train", action='store_true',
483
+ help="Whether to run training.")
484
+ parser.add_argument("--do_eval", action='store_true',
485
+ help="Whether to run eval on the dev set.")
486
+ parser.add_argument("--do_infer", action='store_true',
487
+ help="Whether to run inference on test set.")
488
+ parser.add_argument("--evaluate_during_training", action='store_true',
489
+ help="Run evaluation during training at each logging step.")
490
+ parser.add_argument("--do_lower_case", action='store_true',
491
+ help="Set this flag if you are using an uncased model.")
492
+
493
+ parser.add_argument("--per_gpu_train_batch_size", default=2, type=int,
494
+ help="Batch size per GPU/CPU for training.")
495
+ parser.add_argument("--per_gpu_eval_batch_size", default=4, type=int,
496
+ help="Batch size per GPU/CPU for evaluation.")
497
+ parser.add_argument('--gradient_accumulation_steps', type=int, default=1,
498
+ help="Number of updates steps to accumulate before performing a backward/update pass.")
499
+ parser.add_argument("--learning_rate", default=5e-5, type=float,
500
+ help="The initial learning rate for Adam.")
501
+ parser.add_argument("--weight_decay", default=0.0, type=float,
502
+ help="Weight deay if we apply some.")
503
+ parser.add_argument("--adam_epsilon", default=1e-8, type=float,
504
+ help="Epsilon for Adam optimizer.")
505
+ parser.add_argument("--max_grad_norm", default=1.0, type=float,
506
+ help="Max gradient norm.")
507
+ parser.add_argument("--num_train_epochs", default=1.0, type=float,
508
+ help="Total number of training epochs to perform.")
509
+ parser.add_argument("--max_steps", default=-1, type=int,
510
+ help="If > 0: set total number of training steps to perform. Override num_train_epochs.")
511
+ parser.add_argument("--warmup_steps", default=0, type=int,
512
+ help="Linear warmup over warmup_steps.")
513
+
514
+ parser.add_argument('--logging_steps', type=int, default=10,
515
+ help="Log every X updates steps.")
516
+ parser.add_argument('--save_steps', type=int, default=50,
517
+ help="Save checkpoint every X updates steps.")
518
+ parser.add_argument('--save_total_limit', type=int, default=None,
519
+ help='Limit the total amount of checkpoints, delete the older checkpoints in the output_dir, does not delete by default')
520
+ parser.add_argument("--eval_all_checkpoints", action='store_true',
521
+ help="Evaluate all checkpoints starting with the same prefix as model_name_or_path ending and ending with step number")
522
+ parser.add_argument("--no_cuda", action='store_true',
523
+ help="Avoid using CUDA when available")
524
+ parser.add_argument('--overwrite_output_dir', action='store_true',
525
+ help="Overwrite the content of the output directory")
526
+ parser.add_argument('--overwrite_cache', action='store_true',
527
+ help="Overwrite the cached training and evaluation sets")
528
+ parser.add_argument('--seed', type=int, default=42,
529
+ help="random seed for initialization")
530
+
531
+ parser.add_argument('--fp16', action='store_true',
532
+ help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit")
533
+ parser.add_argument('--fp16_opt_level', type=str, default='O1',
534
+ help="For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."
535
+ "See details at https://nvidia.github.io/apex/amp.html")
536
+ parser.add_argument("--local_rank", type=int, default=-1,
537
+ help="For distributed training: local_rank")
538
+ parser.add_argument("--node_index", type=int, default=-1,
539
+ help="node index if multi-node running")
540
+ parser.add_argument("--gpu_per_node", type=int, default=-1,
541
+ help="num of gpus per node")
542
+ parser.add_argument('--server_ip', type=str, default='', help="For distant debugging.")
543
+ parser.add_argument('--server_port', type=str, default='', help="For distant debugging.")
544
+
545
+ parser.add_argument('--log_file', type=str, default='')
546
+ parser.add_argument('--tensorboard_dir', type=str)
547
+
548
+ pool = None
549
+ args = parser.parse_args()
550
+
551
+ # args.output_dir = os.path.join(args.output_dir, args.dataset)
552
+
553
+ if args.model_type in ["bert", "roberta", "distilbert"] and not args.mlm:
554
+ raise ValueError("BERT and RoBERTa do not have LM heads but masked LM heads. They must be run using the --mlm "
555
+ "flag (masked language modeling).")
556
+
557
+ if os.path.exists(args.output_dir) and os.listdir(
558
+ args.output_dir) and args.do_train and not args.overwrite_output_dir:
559
+ raise ValueError(
560
+ "Output directory ({}) already exists and is not empty. Use --overwrite_output_dir to overcome.".format(
561
+ args.output_dir))
562
+
563
+ # Setup distant debugging if needed
564
+ if args.server_ip and args.server_port:
565
+ # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
566
+ import ptvsd
567
+ print("Waiting for debugger attach")
568
+ ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True)
569
+ ptvsd.wait_for_attach()
570
+
571
+ logger.warning("local_rank: %d, node_index: %d, gpu_per_node: %d"%(args.local_rank, args.node_index, args.gpu_per_node))
572
+ # Setup CUDA, GPU & distributed training
573
+ if args.local_rank == -1 or args.no_cuda:
574
+ device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
575
+ args.n_gpu = torch.cuda.device_count()
576
+ else: # Initializes the distributed backend which will take care of sychronizing nodes/GPUs
577
+ torch.cuda.set_device(args.local_rank)
578
+ device = torch.device("cuda", args.local_rank)
579
+ torch.distributed.init_process_group(backend='nccl')
580
+ args.local_rank += args.node_index * args.gpu_per_node
581
+ args.n_gpu = 1
582
+ args.device = device
583
+ # args.batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu)
584
+
585
+ # Setup logging
586
+ logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
587
+ datefmt='%m/%d/%Y %H:%M:%S',
588
+ level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN)
589
+ logger.warning("Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s, world size: %s",
590
+ args.local_rank, device, args.n_gpu, bool(args.local_rank != -1), args.fp16,
591
+ torch.distributed.get_world_size() if args.local_rank != -1 else 1)
592
+
593
+ # 使用FileHandler输出到文件
594
+ fh = logging.FileHandler(args.log_file)
595
+ logger.addHandler(fh)
596
+
597
+ # Set seed
598
+ set_seed(args)
599
+
600
+ # Load pretrained model and tokenizer
601
+ if args.local_rank not in [-1, 0]:
602
+ torch.distributed.barrier() # Barrier to make sure only the first process in distributed training download model & vocab
603
+
604
+ args.start_epoch = 0
605
+ args.start_step = 0
606
+ checkpoint_last = os.path.join(args.output_dir, 'checkpoint-last')
607
+ if args.do_train and os.path.exists(checkpoint_last) and os.listdir(checkpoint_last):
608
+ args.pretrain_dir = os.path.join(checkpoint_last)
609
+ args.config_name = os.path.join(checkpoint_last, 'config.json')
610
+ idx_file = os.path.join(checkpoint_last, 'idx_file.txt')
611
+ with open(idx_file, encoding='utf-8') as idxf:
612
+ args.start_epoch = int(idxf.readlines()[0].strip()) + 1
613
+
614
+ step_file = os.path.join(checkpoint_last, 'step_file.txt')
615
+ if os.path.exists(step_file):
616
+ with open(step_file, encoding='utf-8') as stepf:
617
+ args.start_step = int(stepf.readlines()[0].strip())
618
+
619
+ logger.info("reload model from {}, resume from {} epoch".format(checkpoint_last, args.start_epoch))
620
+
621
+ # Load pre-trained model
622
+ config_class, model_class, tokenizer_class = MODEL_CLASSES[args.model_type]
623
+ pretrained = args.pretrain_dir
624
+ if pretrained:
625
+ tokenizer = tokenizer_class.from_pretrained(pretrained, do_lower_case=args.do_lower_case, bos_token='<s>', eos_token='</s>', pad_token='<pad>', unk_token='<|UNKNOWN|>', sep_token='concode_elem_sep')
626
+ logger.info(tokenizer.encode("<s> hello world <pad> </s>"))
627
+ model = model_class.from_pretrained(pretrained)
628
+ model.resize_token_embeddings(len(tokenizer))
629
+ update_config(model, tokenizer)
630
+ logger.info(model.config)
631
+ else:
632
+ tokenizer = tokenizer_class.from_pretrained(args.tokenizer_dir, bos_token='<s>', eos_token='</s>', pad_token='<pad>', unk_token='<|UNKNOWN|>', sep_token='concode_elem_sep')
633
+ args.vocab_size = tokenizer.vocab_size
634
+ config = config_class.from_pretrained(args.config_dir)
635
+ model = model_class(config)
636
+ model.resize_token_embeddings(len(tokenizer))
637
+ update_config(model, tokenizer)
638
+
639
+ model_parameters = model.parameters()
640
+ num_params = sum([np.prod(p.size()) for p in model_parameters])
641
+ logger.info(f"Model has a total of {num_params} trainable parameters")
642
+
643
+ if args.local_rank == 0:
644
+ torch.distributed.barrier() # End of barrier to make sure only the first process in distributed training download model & vocab
645
+
646
+ logger.info("Training/evaluation parameters %s", args)
647
+
648
+ # Training
649
+ if args.do_train:
650
+ train_dataset = load_and_cache_examples(args, tokenizer, evaluate=False)
651
+
652
+ global_step, tr_loss = train(args, train_dataset, model, tokenizer, fh, pool)
653
+ logger.info(" global_step = %s, average loss = %s", global_step, tr_loss)
654
+
655
+ if args.do_eval: # only works on 1 GPU
656
+ dev_bleu, dev_EM = eval_bleu(args, model, tokenizer, file_type='dev', num=2000)
657
+ logger.info(f"dev bleu: {dev_bleu}, dev EM: {dev_EM}")
658
+
659
+ if args.do_infer: # only works on 1 GPU
660
+ test_bleu, test_EM = eval_bleu(args, model, tokenizer, file_type='test', num=2000)
661
+ logger.info(f"test bleu: {test_bleu}, test EM: {test_EM}")
662
+
663
+
664
+ if __name__ == "__main__":
665
+ main()
Text-code/text-to-code/code/train.sh ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ LANG=java
2
+ DATADIR=../dataset
3
+ OUTPUTDIR=../model
4
+ PRETRAINDIR=microsoft/CodeGPT-small-java-adaptedGPT2 # will download pre-trained CodeGPT model
5
+ LOGFILE=text2code_concode.log
6
+ PER_NODE_GPU=2 # modify YOUR_GPU_NUM
7
+
8
+ CUDA_VISIBLE_DEVICES=2,3 python run.py \
9
+ --data_dir=$DATADIR \
10
+ --langs=$LANG \
11
+ --output_dir=$OUTPUTDIR \
12
+ --pretrain_dir=$PRETRAINDIR \
13
+ --log_file=$LOGFILE \
14
+ --model_type=gpt2 \
15
+ --block_size=512 \
16
+ --do_train \
17
+ --node_index 0 \
18
+ --gpu_per_node $PER_NODE_GPU \
19
+ --learning_rate=5e-5 \
20
+ --weight_decay=0.01 \
21
+ --evaluate_during_training \
22
+ --per_gpu_train_batch_size=6 \
23
+ --per_gpu_eval_batch_size=12 \
24
+ --gradient_accumulation_steps=2 \
25
+ --num_train_epochs=30 \
26
+ --logging_steps=100 \
27
+ --save_steps=5000 \
28
+ --overwrite_output_dir \
29
+ --seed=42
Text-code/text-to-code/data.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fb2b374c733f0a4d1b7e65aa063b7f1af6a38eab55fb9c26cf5c20e435934ac9
3
+ size 20278219