code stringlengths 17 6.64M |
|---|
def corpus_dataflow_match(references, candidates, lang):
LANGUAGE = Language((root_dir + '/parser/languages.so'), lang)
parser = Parser()
parser.set_language(LANGUAGE)
parser = [parser, dfg_function[lang]]
match_count = 0
total_count = 0
scores = []
for i in range(len(candidates)):
references_sample = references[i]
candidate = candidates[i]
for reference in references_sample:
try:
candidate = remove_comments_and_docstrings(candidate, 'java')
except:
pass
try:
reference = remove_comments_and_docstrings(reference, 'java')
except:
pass
cand_dfg = get_data_flow(candidate, parser)
ref_dfg = get_data_flow(reference, parser)
normalized_cand_dfg = normalize_dataflow(cand_dfg)
normalized_ref_dfg = normalize_dataflow(ref_dfg)
if (len(normalized_ref_dfg) > 0):
total_count += len(normalized_ref_dfg)
current_match_count = 0
for dataflow in normalized_ref_dfg:
if (dataflow in normalized_cand_dfg):
match_count += 1
normalized_cand_dfg.remove(dataflow)
current_match_count += 1
scores.append((float(current_match_count) / len(normalized_ref_dfg)))
else:
scores.append(0.0)
if (total_count == 0):
print('WARNING: There is no reference data-flows extracted from the whole corpus, and the data-flow match score degenerates to 0. Please consider ignoring this score.')
return 0
score = (match_count / total_count)
return score
|
def get_data_flow(code, parser):
try:
tree = parser[0].parse(bytes(code, 'utf8'))
root_node = tree.root_node
tokens_index = tree_to_token_index(root_node)
code = code.split('\n')
code_tokens = [index_to_code_token(x, code) for x in tokens_index]
index_to_code = {}
for (idx, (index, code)) in enumerate(zip(tokens_index, code_tokens)):
index_to_code[index] = (idx, code)
try:
(DFG, _) = parser[1](root_node, index_to_code, {})
except:
DFG = []
DFG = sorted(DFG, key=(lambda x: x[1]))
indexs = set()
for d in DFG:
if (len(d[(- 1)]) != 0):
indexs.add(d[1])
for x in d[(- 1)]:
indexs.add(x)
new_DFG = []
for d in DFG:
if (d[1] in indexs):
new_DFG.append(d)
codes = code_tokens
dfg = new_DFG
except:
codes = code.split()
dfg = []
dic = {}
for d in dfg:
if (d[1] not in dic):
dic[d[1]] = d
else:
dic[d[1]] = (d[0], d[1], d[2], list(set((dic[d[1]][3] + d[3]))), list(set((dic[d[1]][4] + d[4]))))
DFG = []
for d in dic:
DFG.append(dic[d])
dfg = DFG
return dfg
|
def normalize_dataflow_item(dataflow_item):
var_name = dataflow_item[0]
var_pos = dataflow_item[1]
relationship = dataflow_item[2]
par_vars_name_list = dataflow_item[3]
par_vars_pos_list = dataflow_item[4]
var_names = list(set((par_vars_name_list + [var_name])))
norm_names = {}
for i in range(len(var_names)):
norm_names[var_names[i]] = ('var_' + str(i))
norm_var_name = norm_names[var_name]
relationship = dataflow_item[2]
norm_par_vars_name_list = [norm_names[x] for x in par_vars_name_list]
return (norm_var_name, relationship, norm_par_vars_name_list)
|
def normalize_dataflow(dataflow):
var_dict = {}
i = 0
normalized_dataflow = []
for item in dataflow:
var_name = item[0]
relationship = item[2]
par_vars_name_list = item[3]
for name in par_vars_name_list:
if (name not in var_dict):
var_dict[name] = ('var_' + str(i))
i += 1
if (var_name not in var_dict):
var_dict[var_name] = ('var_' + str(i))
i += 1
normalized_dataflow.append((var_dict[var_name], relationship, [var_dict[x] for x in par_vars_name_list]))
return normalized_dataflow
|
def DFG_python(root_node, index_to_code, states):
assignment = ['assignment', 'augmented_assignment', 'for_in_clause']
if_statement = ['if_statement']
for_statement = ['for_statement']
while_statement = ['while_statement']
do_first_statement = ['for_in_clause']
def_statement = ['default_parameter']
states = states.copy()
if (((len(root_node.children) == 0) or (root_node.type in ['string_literal', 'string', 'character_literal'])) and (root_node.type != 'comment')):
(idx, code) = index_to_code[(root_node.start_point, root_node.end_point)]
if (root_node.type == code):
return ([], states)
elif (code in states):
return ([(code, idx, 'comesFrom', [code], states[code].copy())], states)
else:
if (root_node.type == 'identifier'):
states[code] = [idx]
return ([(code, idx, 'comesFrom', [], [])], states)
elif (root_node.type in def_statement):
name = root_node.child_by_field_name('name')
value = root_node.child_by_field_name('value')
DFG = []
if (value is None):
indexs = tree_to_variable_index(name, index_to_code)
for index in indexs:
(idx, code) = index_to_code[index]
DFG.append((code, idx, 'comesFrom', [], []))
states[code] = [idx]
return (sorted(DFG, key=(lambda x: x[1])), states)
else:
name_indexs = tree_to_variable_index(name, index_to_code)
value_indexs = tree_to_variable_index(value, index_to_code)
(temp, states) = DFG_python(value, index_to_code, states)
DFG += temp
for index1 in name_indexs:
(idx1, code1) = index_to_code[index1]
for index2 in value_indexs:
(idx2, code2) = index_to_code[index2]
DFG.append((code1, idx1, 'comesFrom', [code2], [idx2]))
states[code1] = [idx1]
return (sorted(DFG, key=(lambda x: x[1])), states)
elif (root_node.type in assignment):
if (root_node.type == 'for_in_clause'):
right_nodes = [root_node.children[(- 1)]]
left_nodes = [root_node.child_by_field_name('left')]
else:
if (root_node.child_by_field_name('right') is None):
return ([], states)
left_nodes = [x for x in root_node.child_by_field_name('left').children if (x.type != ',')]
right_nodes = [x for x in root_node.child_by_field_name('right').children if (x.type != ',')]
if (len(right_nodes) != len(left_nodes)):
left_nodes = [root_node.child_by_field_name('left')]
right_nodes = [root_node.child_by_field_name('right')]
if (len(left_nodes) == 0):
left_nodes = [root_node.child_by_field_name('left')]
if (len(right_nodes) == 0):
right_nodes = [root_node.child_by_field_name('right')]
DFG = []
for node in right_nodes:
(temp, states) = DFG_python(node, index_to_code, states)
DFG += temp
for (left_node, right_node) in zip(left_nodes, right_nodes):
left_tokens_index = tree_to_variable_index(left_node, index_to_code)
right_tokens_index = tree_to_variable_index(right_node, index_to_code)
temp = []
for token1_index in left_tokens_index:
(idx1, code1) = index_to_code[token1_index]
temp.append((code1, idx1, 'computedFrom', [index_to_code[x][1] for x in right_tokens_index], [index_to_code[x][0] for x in right_tokens_index]))
states[code1] = [idx1]
DFG += temp
return (sorted(DFG, key=(lambda x: x[1])), states)
elif (root_node.type in if_statement):
DFG = []
current_states = states.copy()
others_states = []
tag = False
if ('else' in root_node.type):
tag = True
for child in root_node.children:
if ('else' in child.type):
tag = True
if (child.type not in ['elif_clause', 'else_clause']):
(temp, current_states) = DFG_python(child, index_to_code, current_states)
DFG += temp
else:
(temp, new_states) = DFG_python(child, index_to_code, states)
DFG += temp
others_states.append(new_states)
others_states.append(current_states)
if (tag is False):
others_states.append(states)
new_states = {}
for dic in others_states:
for key in dic:
if (key not in new_states):
new_states[key] = dic[key].copy()
else:
new_states[key] += dic[key]
for key in new_states:
new_states[key] = sorted(list(set(new_states[key])))
return (sorted(DFG, key=(lambda x: x[1])), new_states)
elif (root_node.type in for_statement):
DFG = []
for i in range(2):
right_nodes = [x for x in root_node.child_by_field_name('right').children if (x.type != ',')]
left_nodes = [x for x in root_node.child_by_field_name('left').children if (x.type != ',')]
if (len(right_nodes) != len(left_nodes)):
left_nodes = [root_node.child_by_field_name('left')]
right_nodes = [root_node.child_by_field_name('right')]
if (len(left_nodes) == 0):
left_nodes = [root_node.child_by_field_name('left')]
if (len(right_nodes) == 0):
right_nodes = [root_node.child_by_field_name('right')]
for node in right_nodes:
(temp, states) = DFG_python(node, index_to_code, states)
DFG += temp
for (left_node, right_node) in zip(left_nodes, right_nodes):
left_tokens_index = tree_to_variable_index(left_node, index_to_code)
right_tokens_index = tree_to_variable_index(right_node, index_to_code)
temp = []
for token1_index in left_tokens_index:
(idx1, code1) = index_to_code[token1_index]
temp.append((code1, idx1, 'computedFrom', [index_to_code[x][1] for x in right_tokens_index], [index_to_code[x][0] for x in right_tokens_index]))
states[code1] = [idx1]
DFG += temp
if (root_node.children[(- 1)].type == 'block'):
(temp, states) = DFG_python(root_node.children[(- 1)], index_to_code, states)
DFG += temp
dic = {}
for x in DFG:
if ((x[0], x[1], x[2]) not in dic):
dic[(x[0], x[1], x[2])] = [x[3], x[4]]
else:
dic[(x[0], x[1], x[2])][0] = list(set((dic[(x[0], x[1], x[2])][0] + x[3])))
dic[(x[0], x[1], x[2])][1] = sorted(list(set((dic[(x[0], x[1], x[2])][1] + x[4]))))
DFG = [(x[0], x[1], x[2], y[0], y[1]) for (x, y) in sorted(dic.items(), key=(lambda t: t[0][1]))]
return (sorted(DFG, key=(lambda x: x[1])), states)
elif (root_node.type in while_statement):
DFG = []
for i in range(2):
for child in root_node.children:
(temp, states) = DFG_python(child, index_to_code, states)
DFG += temp
dic = {}
for x in DFG:
if ((x[0], x[1], x[2]) not in dic):
dic[(x[0], x[1], x[2])] = [x[3], x[4]]
else:
dic[(x[0], x[1], x[2])][0] = list(set((dic[(x[0], x[1], x[2])][0] + x[3])))
dic[(x[0], x[1], x[2])][1] = sorted(list(set((dic[(x[0], x[1], x[2])][1] + x[4]))))
DFG = [(x[0], x[1], x[2], y[0], y[1]) for (x, y) in sorted(dic.items(), key=(lambda t: t[0][1]))]
return (sorted(DFG, key=(lambda x: x[1])), states)
else:
DFG = []
for child in root_node.children:
if (child.type in do_first_statement):
(temp, states) = DFG_python(child, index_to_code, states)
DFG += temp
for child in root_node.children:
if (child.type not in do_first_statement):
(temp, states) = DFG_python(child, index_to_code, states)
DFG += temp
return (sorted(DFG, key=(lambda x: x[1])), states)
|
def DFG_java(root_node, index_to_code, states):
assignment = ['assignment_expression']
def_statement = ['variable_declarator']
increment_statement = ['update_expression']
if_statement = ['if_statement', 'else']
for_statement = ['for_statement']
enhanced_for_statement = ['enhanced_for_statement']
while_statement = ['while_statement']
do_first_statement = []
states = states.copy()
if (((len(root_node.children) == 0) or (root_node.type in ['string_literal', 'string', 'character_literal'])) and (root_node.type != 'comment')):
(idx, code) = index_to_code[(root_node.start_point, root_node.end_point)]
if (root_node.type == code):
return ([], states)
elif (code in states):
return ([(code, idx, 'comesFrom', [code], states[code].copy())], states)
else:
if (root_node.type == 'identifier'):
states[code] = [idx]
return ([(code, idx, 'comesFrom', [], [])], states)
elif (root_node.type in def_statement):
name = root_node.child_by_field_name('name')
value = root_node.child_by_field_name('value')
DFG = []
if (value is None):
indexs = tree_to_variable_index(name, index_to_code)
for index in indexs:
(idx, code) = index_to_code[index]
DFG.append((code, idx, 'comesFrom', [], []))
states[code] = [idx]
return (sorted(DFG, key=(lambda x: x[1])), states)
else:
name_indexs = tree_to_variable_index(name, index_to_code)
value_indexs = tree_to_variable_index(value, index_to_code)
(temp, states) = DFG_java(value, index_to_code, states)
DFG += temp
for index1 in name_indexs:
(idx1, code1) = index_to_code[index1]
for index2 in value_indexs:
(idx2, code2) = index_to_code[index2]
DFG.append((code1, idx1, 'comesFrom', [code2], [idx2]))
states[code1] = [idx1]
return (sorted(DFG, key=(lambda x: x[1])), states)
elif (root_node.type in assignment):
left_nodes = root_node.child_by_field_name('left')
right_nodes = root_node.child_by_field_name('right')
DFG = []
(temp, states) = DFG_java(right_nodes, index_to_code, states)
DFG += temp
name_indexs = tree_to_variable_index(left_nodes, index_to_code)
value_indexs = tree_to_variable_index(right_nodes, index_to_code)
for index1 in name_indexs:
(idx1, code1) = index_to_code[index1]
for index2 in value_indexs:
(idx2, code2) = index_to_code[index2]
DFG.append((code1, idx1, 'computedFrom', [code2], [idx2]))
states[code1] = [idx1]
return (sorted(DFG, key=(lambda x: x[1])), states)
elif (root_node.type in increment_statement):
DFG = []
indexs = tree_to_variable_index(root_node, index_to_code)
for index1 in indexs:
(idx1, code1) = index_to_code[index1]
for index2 in indexs:
(idx2, code2) = index_to_code[index2]
DFG.append((code1, idx1, 'computedFrom', [code2], [idx2]))
states[code1] = [idx1]
return (sorted(DFG, key=(lambda x: x[1])), states)
elif (root_node.type in if_statement):
DFG = []
current_states = states.copy()
others_states = []
flag = False
tag = False
if ('else' in root_node.type):
tag = True
for child in root_node.children:
if ('else' in child.type):
tag = True
if ((child.type not in if_statement) and (flag is False)):
(temp, current_states) = DFG_java(child, index_to_code, current_states)
DFG += temp
else:
flag = True
(temp, new_states) = DFG_java(child, index_to_code, states)
DFG += temp
others_states.append(new_states)
others_states.append(current_states)
if (tag is False):
others_states.append(states)
new_states = {}
for dic in others_states:
for key in dic:
if (key not in new_states):
new_states[key] = dic[key].copy()
else:
new_states[key] += dic[key]
for key in new_states:
new_states[key] = sorted(list(set(new_states[key])))
return (sorted(DFG, key=(lambda x: x[1])), new_states)
elif (root_node.type in for_statement):
DFG = []
for child in root_node.children:
(temp, states) = DFG_java(child, index_to_code, states)
DFG += temp
flag = False
for child in root_node.children:
if flag:
(temp, states) = DFG_java(child, index_to_code, states)
DFG += temp
elif (child.type == 'local_variable_declaration'):
flag = True
dic = {}
for x in DFG:
if ((x[0], x[1], x[2]) not in dic):
dic[(x[0], x[1], x[2])] = [x[3], x[4]]
else:
dic[(x[0], x[1], x[2])][0] = list(set((dic[(x[0], x[1], x[2])][0] + x[3])))
dic[(x[0], x[1], x[2])][1] = sorted(list(set((dic[(x[0], x[1], x[2])][1] + x[4]))))
DFG = [(x[0], x[1], x[2], y[0], y[1]) for (x, y) in sorted(dic.items(), key=(lambda t: t[0][1]))]
return (sorted(DFG, key=(lambda x: x[1])), states)
elif (root_node.type in enhanced_for_statement):
name = root_node.child_by_field_name('name')
value = root_node.child_by_field_name('value')
body = root_node.child_by_field_name('body')
DFG = []
for i in range(2):
(temp, states) = DFG_java(value, index_to_code, states)
DFG += temp
name_indexs = tree_to_variable_index(name, index_to_code)
value_indexs = tree_to_variable_index(value, index_to_code)
for index1 in name_indexs:
(idx1, code1) = index_to_code[index1]
for index2 in value_indexs:
(idx2, code2) = index_to_code[index2]
DFG.append((code1, idx1, 'computedFrom', [code2], [idx2]))
states[code1] = [idx1]
(temp, states) = DFG_java(body, index_to_code, states)
DFG += temp
dic = {}
for x in DFG:
if ((x[0], x[1], x[2]) not in dic):
dic[(x[0], x[1], x[2])] = [x[3], x[4]]
else:
dic[(x[0], x[1], x[2])][0] = list(set((dic[(x[0], x[1], x[2])][0] + x[3])))
dic[(x[0], x[1], x[2])][1] = sorted(list(set((dic[(x[0], x[1], x[2])][1] + x[4]))))
DFG = [(x[0], x[1], x[2], y[0], y[1]) for (x, y) in sorted(dic.items(), key=(lambda t: t[0][1]))]
return (sorted(DFG, key=(lambda x: x[1])), states)
elif (root_node.type in while_statement):
DFG = []
for i in range(2):
for child in root_node.children:
(temp, states) = DFG_java(child, index_to_code, states)
DFG += temp
dic = {}
for x in DFG:
if ((x[0], x[1], x[2]) not in dic):
dic[(x[0], x[1], x[2])] = [x[3], x[4]]
else:
dic[(x[0], x[1], x[2])][0] = list(set((dic[(x[0], x[1], x[2])][0] + x[3])))
dic[(x[0], x[1], x[2])][1] = sorted(list(set((dic[(x[0], x[1], x[2])][1] + x[4]))))
DFG = [(x[0], x[1], x[2], y[0], y[1]) for (x, y) in sorted(dic.items(), key=(lambda t: t[0][1]))]
return (sorted(DFG, key=(lambda x: x[1])), states)
else:
DFG = []
for child in root_node.children:
if (child.type in do_first_statement):
(temp, states) = DFG_java(child, index_to_code, states)
DFG += temp
for child in root_node.children:
if (child.type not in do_first_statement):
(temp, states) = DFG_java(child, index_to_code, states)
DFG += temp
return (sorted(DFG, key=(lambda x: x[1])), states)
|
def DFG_csharp(root_node, index_to_code, states):
assignment = ['assignment_expression']
def_statement = ['variable_declarator']
increment_statement = ['postfix_unary_expression']
if_statement = ['if_statement', 'else']
for_statement = ['for_statement']
enhanced_for_statement = ['for_each_statement']
while_statement = ['while_statement']
do_first_statement = []
states = states.copy()
if (((len(root_node.children) == 0) or (root_node.type in ['string_literal', 'string', 'character_literal'])) and (root_node.type != 'comment')):
(idx, code) = index_to_code[(root_node.start_point, root_node.end_point)]
if (root_node.type == code):
return ([], states)
elif (code in states):
return ([(code, idx, 'comesFrom', [code], states[code].copy())], states)
else:
if (root_node.type == 'identifier'):
states[code] = [idx]
return ([(code, idx, 'comesFrom', [], [])], states)
elif (root_node.type in def_statement):
if (len(root_node.children) == 2):
name = root_node.children[0]
value = root_node.children[1]
else:
name = root_node.children[0]
value = None
DFG = []
if (value is None):
indexs = tree_to_variable_index(name, index_to_code)
for index in indexs:
(idx, code) = index_to_code[index]
DFG.append((code, idx, 'comesFrom', [], []))
states[code] = [idx]
return (sorted(DFG, key=(lambda x: x[1])), states)
else:
name_indexs = tree_to_variable_index(name, index_to_code)
value_indexs = tree_to_variable_index(value, index_to_code)
(temp, states) = DFG_csharp(value, index_to_code, states)
DFG += temp
for index1 in name_indexs:
(idx1, code1) = index_to_code[index1]
for index2 in value_indexs:
(idx2, code2) = index_to_code[index2]
DFG.append((code1, idx1, 'comesFrom', [code2], [idx2]))
states[code1] = [idx1]
return (sorted(DFG, key=(lambda x: x[1])), states)
elif (root_node.type in assignment):
left_nodes = root_node.child_by_field_name('left')
right_nodes = root_node.child_by_field_name('right')
DFG = []
(temp, states) = DFG_csharp(right_nodes, index_to_code, states)
DFG += temp
name_indexs = tree_to_variable_index(left_nodes, index_to_code)
value_indexs = tree_to_variable_index(right_nodes, index_to_code)
for index1 in name_indexs:
(idx1, code1) = index_to_code[index1]
for index2 in value_indexs:
(idx2, code2) = index_to_code[index2]
DFG.append((code1, idx1, 'computedFrom', [code2], [idx2]))
states[code1] = [idx1]
return (sorted(DFG, key=(lambda x: x[1])), states)
elif (root_node.type in increment_statement):
DFG = []
indexs = tree_to_variable_index(root_node, index_to_code)
for index1 in indexs:
(idx1, code1) = index_to_code[index1]
for index2 in indexs:
(idx2, code2) = index_to_code[index2]
DFG.append((code1, idx1, 'computedFrom', [code2], [idx2]))
states[code1] = [idx1]
return (sorted(DFG, key=(lambda x: x[1])), states)
elif (root_node.type in if_statement):
DFG = []
current_states = states.copy()
others_states = []
flag = False
tag = False
if ('else' in root_node.type):
tag = True
for child in root_node.children:
if ('else' in child.type):
tag = True
if ((child.type not in if_statement) and (flag is False)):
(temp, current_states) = DFG_csharp(child, index_to_code, current_states)
DFG += temp
else:
flag = True
(temp, new_states) = DFG_csharp(child, index_to_code, states)
DFG += temp
others_states.append(new_states)
others_states.append(current_states)
if (tag is False):
others_states.append(states)
new_states = {}
for dic in others_states:
for key in dic:
if (key not in new_states):
new_states[key] = dic[key].copy()
else:
new_states[key] += dic[key]
for key in new_states:
new_states[key] = sorted(list(set(new_states[key])))
return (sorted(DFG, key=(lambda x: x[1])), new_states)
elif (root_node.type in for_statement):
DFG = []
for child in root_node.children:
(temp, states) = DFG_csharp(child, index_to_code, states)
DFG += temp
flag = False
for child in root_node.children:
if flag:
(temp, states) = DFG_csharp(child, index_to_code, states)
DFG += temp
elif (child.type == 'local_variable_declaration'):
flag = True
dic = {}
for x in DFG:
if ((x[0], x[1], x[2]) not in dic):
dic[(x[0], x[1], x[2])] = [x[3], x[4]]
else:
dic[(x[0], x[1], x[2])][0] = list(set((dic[(x[0], x[1], x[2])][0] + x[3])))
dic[(x[0], x[1], x[2])][1] = sorted(list(set((dic[(x[0], x[1], x[2])][1] + x[4]))))
DFG = [(x[0], x[1], x[2], y[0], y[1]) for (x, y) in sorted(dic.items(), key=(lambda t: t[0][1]))]
return (sorted(DFG, key=(lambda x: x[1])), states)
elif (root_node.type in enhanced_for_statement):
name = root_node.child_by_field_name('left')
value = root_node.child_by_field_name('right')
body = root_node.child_by_field_name('body')
DFG = []
for i in range(2):
(temp, states) = DFG_csharp(value, index_to_code, states)
DFG += temp
name_indexs = tree_to_variable_index(name, index_to_code)
value_indexs = tree_to_variable_index(value, index_to_code)
for index1 in name_indexs:
(idx1, code1) = index_to_code[index1]
for index2 in value_indexs:
(idx2, code2) = index_to_code[index2]
DFG.append((code1, idx1, 'computedFrom', [code2], [idx2]))
states[code1] = [idx1]
(temp, states) = DFG_csharp(body, index_to_code, states)
DFG += temp
dic = {}
for x in DFG:
if ((x[0], x[1], x[2]) not in dic):
dic[(x[0], x[1], x[2])] = [x[3], x[4]]
else:
dic[(x[0], x[1], x[2])][0] = list(set((dic[(x[0], x[1], x[2])][0] + x[3])))
dic[(x[0], x[1], x[2])][1] = sorted(list(set((dic[(x[0], x[1], x[2])][1] + x[4]))))
DFG = [(x[0], x[1], x[2], y[0], y[1]) for (x, y) in sorted(dic.items(), key=(lambda t: t[0][1]))]
return (sorted(DFG, key=(lambda x: x[1])), states)
elif (root_node.type in while_statement):
DFG = []
for i in range(2):
for child in root_node.children:
(temp, states) = DFG_csharp(child, index_to_code, states)
DFG += temp
dic = {}
for x in DFG:
if ((x[0], x[1], x[2]) not in dic):
dic[(x[0], x[1], x[2])] = [x[3], x[4]]
else:
dic[(x[0], x[1], x[2])][0] = list(set((dic[(x[0], x[1], x[2])][0] + x[3])))
dic[(x[0], x[1], x[2])][1] = sorted(list(set((dic[(x[0], x[1], x[2])][1] + x[4]))))
DFG = [(x[0], x[1], x[2], y[0], y[1]) for (x, y) in sorted(dic.items(), key=(lambda t: t[0][1]))]
return (sorted(DFG, key=(lambda x: x[1])), states)
else:
DFG = []
for child in root_node.children:
if (child.type in do_first_statement):
(temp, states) = DFG_csharp(child, index_to_code, states)
DFG += temp
for child in root_node.children:
if (child.type not in do_first_statement):
(temp, states) = DFG_csharp(child, index_to_code, states)
DFG += temp
return (sorted(DFG, key=(lambda x: x[1])), states)
|
def DFG_ruby(root_node, index_to_code, states):
assignment = ['assignment', 'operator_assignment']
if_statement = ['if', 'elsif', 'else', 'unless', 'when']
for_statement = ['for']
while_statement = ['while_modifier', 'until']
do_first_statement = []
def_statement = ['keyword_parameter']
if (((len(root_node.children) == 0) or (root_node.type in ['string_literal', 'string', 'character_literal'])) and (root_node.type != 'comment')):
states = states.copy()
(idx, code) = index_to_code[(root_node.start_point, root_node.end_point)]
if (root_node.type == code):
return ([], states)
elif (code in states):
return ([(code, idx, 'comesFrom', [code], states[code].copy())], states)
else:
if (root_node.type == 'identifier'):
states[code] = [idx]
return ([(code, idx, 'comesFrom', [], [])], states)
elif (root_node.type in def_statement):
name = root_node.child_by_field_name('name')
value = root_node.child_by_field_name('value')
DFG = []
if (value is None):
indexs = tree_to_variable_index(name, index_to_code)
for index in indexs:
(idx, code) = index_to_code[index]
DFG.append((code, idx, 'comesFrom', [], []))
states[code] = [idx]
return (sorted(DFG, key=(lambda x: x[1])), states)
else:
name_indexs = tree_to_variable_index(name, index_to_code)
value_indexs = tree_to_variable_index(value, index_to_code)
(temp, states) = DFG_ruby(value, index_to_code, states)
DFG += temp
for index1 in name_indexs:
(idx1, code1) = index_to_code[index1]
for index2 in value_indexs:
(idx2, code2) = index_to_code[index2]
DFG.append((code1, idx1, 'comesFrom', [code2], [idx2]))
states[code1] = [idx1]
return (sorted(DFG, key=(lambda x: x[1])), states)
elif (root_node.type in assignment):
left_nodes = [x for x in root_node.child_by_field_name('left').children if (x.type != ',')]
right_nodes = [x for x in root_node.child_by_field_name('right').children if (x.type != ',')]
if (len(right_nodes) != len(left_nodes)):
left_nodes = [root_node.child_by_field_name('left')]
right_nodes = [root_node.child_by_field_name('right')]
if (len(left_nodes) == 0):
left_nodes = [root_node.child_by_field_name('left')]
if (len(right_nodes) == 0):
right_nodes = [root_node.child_by_field_name('right')]
if (root_node.type == 'operator_assignment'):
left_nodes = [root_node.children[0]]
right_nodes = [root_node.children[(- 1)]]
DFG = []
for node in right_nodes:
(temp, states) = DFG_ruby(node, index_to_code, states)
DFG += temp
for (left_node, right_node) in zip(left_nodes, right_nodes):
left_tokens_index = tree_to_variable_index(left_node, index_to_code)
right_tokens_index = tree_to_variable_index(right_node, index_to_code)
temp = []
for token1_index in left_tokens_index:
(idx1, code1) = index_to_code[token1_index]
temp.append((code1, idx1, 'computedFrom', [index_to_code[x][1] for x in right_tokens_index], [index_to_code[x][0] for x in right_tokens_index]))
states[code1] = [idx1]
DFG += temp
return (sorted(DFG, key=(lambda x: x[1])), states)
elif (root_node.type in if_statement):
DFG = []
current_states = states.copy()
others_states = []
tag = False
if ('else' in root_node.type):
tag = True
for child in root_node.children:
if ('else' in child.type):
tag = True
if (child.type not in if_statement):
(temp, current_states) = DFG_ruby(child, index_to_code, current_states)
DFG += temp
else:
(temp, new_states) = DFG_ruby(child, index_to_code, states)
DFG += temp
others_states.append(new_states)
others_states.append(current_states)
if (tag is False):
others_states.append(states)
new_states = {}
for dic in others_states:
for key in dic:
if (key not in new_states):
new_states[key] = dic[key].copy()
else:
new_states[key] += dic[key]
for key in new_states:
new_states[key] = sorted(list(set(new_states[key])))
return (sorted(DFG, key=(lambda x: x[1])), new_states)
elif (root_node.type in for_statement):
DFG = []
for i in range(2):
left_nodes = [root_node.child_by_field_name('pattern')]
right_nodes = [root_node.child_by_field_name('value')]
assert (len(right_nodes) == len(left_nodes))
for node in right_nodes:
(temp, states) = DFG_ruby(node, index_to_code, states)
DFG += temp
for (left_node, right_node) in zip(left_nodes, right_nodes):
left_tokens_index = tree_to_variable_index(left_node, index_to_code)
right_tokens_index = tree_to_variable_index(right_node, index_to_code)
temp = []
for token1_index in left_tokens_index:
(idx1, code1) = index_to_code[token1_index]
temp.append((code1, idx1, 'computedFrom', [index_to_code[x][1] for x in right_tokens_index], [index_to_code[x][0] for x in right_tokens_index]))
states[code1] = [idx1]
DFG += temp
(temp, states) = DFG_ruby(root_node.child_by_field_name('body'), index_to_code, states)
DFG += temp
dic = {}
for x in DFG:
if ((x[0], x[1], x[2]) not in dic):
dic[(x[0], x[1], x[2])] = [x[3], x[4]]
else:
dic[(x[0], x[1], x[2])][0] = list(set((dic[(x[0], x[1], x[2])][0] + x[3])))
dic[(x[0], x[1], x[2])][1] = sorted(list(set((dic[(x[0], x[1], x[2])][1] + x[4]))))
DFG = [(x[0], x[1], x[2], y[0], y[1]) for (x, y) in sorted(dic.items(), key=(lambda t: t[0][1]))]
return (sorted(DFG, key=(lambda x: x[1])), states)
elif (root_node.type in while_statement):
DFG = []
for i in range(2):
for child in root_node.children:
(temp, states) = DFG_ruby(child, index_to_code, states)
DFG += temp
dic = {}
for x in DFG:
if ((x[0], x[1], x[2]) not in dic):
dic[(x[0], x[1], x[2])] = [x[3], x[4]]
else:
dic[(x[0], x[1], x[2])][0] = list(set((dic[(x[0], x[1], x[2])][0] + x[3])))
dic[(x[0], x[1], x[2])][1] = sorted(list(set((dic[(x[0], x[1], x[2])][1] + x[4]))))
DFG = [(x[0], x[1], x[2], y[0], y[1]) for (x, y) in sorted(dic.items(), key=(lambda t: t[0][1]))]
return (sorted(DFG, key=(lambda x: x[1])), states)
else:
DFG = []
for child in root_node.children:
if (child.type in do_first_statement):
(temp, states) = DFG_ruby(child, index_to_code, states)
DFG += temp
for child in root_node.children:
if (child.type not in do_first_statement):
(temp, states) = DFG_ruby(child, index_to_code, states)
DFG += temp
return (sorted(DFG, key=(lambda x: x[1])), states)
|
def DFG_go(root_node, index_to_code, states):
assignment = ['assignment_statement']
def_statement = ['var_spec']
increment_statement = ['inc_statement']
if_statement = ['if_statement', 'else']
for_statement = ['for_statement']
enhanced_for_statement = []
while_statement = []
do_first_statement = []
states = states.copy()
if (((len(root_node.children) == 0) or (root_node.type in ['string_literal', 'string', 'character_literal'])) and (root_node.type != 'comment')):
(idx, code) = index_to_code[(root_node.start_point, root_node.end_point)]
if (root_node.type == code):
return ([], states)
elif (code in states):
return ([(code, idx, 'comesFrom', [code], states[code].copy())], states)
else:
if (root_node.type == 'identifier'):
states[code] = [idx]
return ([(code, idx, 'comesFrom', [], [])], states)
elif (root_node.type in def_statement):
name = root_node.child_by_field_name('name')
value = root_node.child_by_field_name('value')
DFG = []
if (value is None):
indexs = tree_to_variable_index(name, index_to_code)
for index in indexs:
(idx, code) = index_to_code[index]
DFG.append((code, idx, 'comesFrom', [], []))
states[code] = [idx]
return (sorted(DFG, key=(lambda x: x[1])), states)
else:
name_indexs = tree_to_variable_index(name, index_to_code)
value_indexs = tree_to_variable_index(value, index_to_code)
(temp, states) = DFG_go(value, index_to_code, states)
DFG += temp
for index1 in name_indexs:
(idx1, code1) = index_to_code[index1]
for index2 in value_indexs:
(idx2, code2) = index_to_code[index2]
DFG.append((code1, idx1, 'comesFrom', [code2], [idx2]))
states[code1] = [idx1]
return (sorted(DFG, key=(lambda x: x[1])), states)
elif (root_node.type in assignment):
left_nodes = root_node.child_by_field_name('left')
right_nodes = root_node.child_by_field_name('right')
DFG = []
(temp, states) = DFG_go(right_nodes, index_to_code, states)
DFG += temp
name_indexs = tree_to_variable_index(left_nodes, index_to_code)
value_indexs = tree_to_variable_index(right_nodes, index_to_code)
for index1 in name_indexs:
(idx1, code1) = index_to_code[index1]
for index2 in value_indexs:
(idx2, code2) = index_to_code[index2]
DFG.append((code1, idx1, 'computedFrom', [code2], [idx2]))
states[code1] = [idx1]
return (sorted(DFG, key=(lambda x: x[1])), states)
elif (root_node.type in increment_statement):
DFG = []
indexs = tree_to_variable_index(root_node, index_to_code)
for index1 in indexs:
(idx1, code1) = index_to_code[index1]
for index2 in indexs:
(idx2, code2) = index_to_code[index2]
DFG.append((code1, idx1, 'computedFrom', [code2], [idx2]))
states[code1] = [idx1]
return (sorted(DFG, key=(lambda x: x[1])), states)
elif (root_node.type in if_statement):
DFG = []
current_states = states.copy()
others_states = []
flag = False
tag = False
if ('else' in root_node.type):
tag = True
for child in root_node.children:
if ('else' in child.type):
tag = True
if ((child.type not in if_statement) and (flag is False)):
(temp, current_states) = DFG_go(child, index_to_code, current_states)
DFG += temp
else:
flag = True
(temp, new_states) = DFG_go(child, index_to_code, states)
DFG += temp
others_states.append(new_states)
others_states.append(current_states)
if (tag is False):
others_states.append(states)
new_states = {}
for dic in others_states:
for key in dic:
if (key not in new_states):
new_states[key] = dic[key].copy()
else:
new_states[key] += dic[key]
for key in states:
if (key not in new_states):
new_states[key] = states[key]
else:
new_states[key] += states[key]
for key in new_states:
new_states[key] = sorted(list(set(new_states[key])))
return (sorted(DFG, key=(lambda x: x[1])), new_states)
elif (root_node.type in for_statement):
DFG = []
for child in root_node.children:
(temp, states) = DFG_go(child, index_to_code, states)
DFG += temp
flag = False
for child in root_node.children:
if flag:
(temp, states) = DFG_go(child, index_to_code, states)
DFG += temp
elif (child.type == 'for_clause'):
if (child.child_by_field_name('update') is not None):
(temp, states) = DFG_go(child.child_by_field_name('update'), index_to_code, states)
DFG += temp
flag = True
dic = {}
for x in DFG:
if ((x[0], x[1], x[2]) not in dic):
dic[(x[0], x[1], x[2])] = [x[3], x[4]]
else:
dic[(x[0], x[1], x[2])][0] = list(set((dic[(x[0], x[1], x[2])][0] + x[3])))
dic[(x[0], x[1], x[2])][1] = sorted(list(set((dic[(x[0], x[1], x[2])][1] + x[4]))))
DFG = [(x[0], x[1], x[2], y[0], y[1]) for (x, y) in sorted(dic.items(), key=(lambda t: t[0][1]))]
return (sorted(DFG, key=(lambda x: x[1])), states)
else:
DFG = []
for child in root_node.children:
if (child.type in do_first_statement):
(temp, states) = DFG_go(child, index_to_code, states)
DFG += temp
for child in root_node.children:
if (child.type not in do_first_statement):
(temp, states) = DFG_go(child, index_to_code, states)
DFG += temp
return (sorted(DFG, key=(lambda x: x[1])), states)
|
def DFG_php(root_node, index_to_code, states):
assignment = ['assignment_expression', 'augmented_assignment_expression']
def_statement = ['simple_parameter']
increment_statement = ['update_expression']
if_statement = ['if_statement', 'else_clause']
for_statement = ['for_statement']
enhanced_for_statement = ['foreach_statement']
while_statement = ['while_statement']
do_first_statement = []
states = states.copy()
if (((len(root_node.children) == 0) or (root_node.type in ['string_literal', 'string', 'character_literal'])) and (root_node.type != 'comment')):
(idx, code) = index_to_code[(root_node.start_point, root_node.end_point)]
if (root_node.type == code):
return ([], states)
elif (code in states):
return ([(code, idx, 'comesFrom', [code], states[code].copy())], states)
else:
if (root_node.type == 'identifier'):
states[code] = [idx]
return ([(code, idx, 'comesFrom', [], [])], states)
elif (root_node.type in def_statement):
name = root_node.child_by_field_name('name')
value = root_node.child_by_field_name('default_value')
DFG = []
if (value is None):
indexs = tree_to_variable_index(name, index_to_code)
for index in indexs:
(idx, code) = index_to_code[index]
DFG.append((code, idx, 'comesFrom', [], []))
states[code] = [idx]
return (sorted(DFG, key=(lambda x: x[1])), states)
else:
name_indexs = tree_to_variable_index(name, index_to_code)
value_indexs = tree_to_variable_index(value, index_to_code)
(temp, states) = DFG_php(value, index_to_code, states)
DFG += temp
for index1 in name_indexs:
(idx1, code1) = index_to_code[index1]
for index2 in value_indexs:
(idx2, code2) = index_to_code[index2]
DFG.append((code1, idx1, 'comesFrom', [code2], [idx2]))
states[code1] = [idx1]
return (sorted(DFG, key=(lambda x: x[1])), states)
elif (root_node.type in assignment):
left_nodes = root_node.child_by_field_name('left')
right_nodes = root_node.child_by_field_name('right')
DFG = []
(temp, states) = DFG_php(right_nodes, index_to_code, states)
DFG += temp
name_indexs = tree_to_variable_index(left_nodes, index_to_code)
value_indexs = tree_to_variable_index(right_nodes, index_to_code)
for index1 in name_indexs:
(idx1, code1) = index_to_code[index1]
for index2 in value_indexs:
(idx2, code2) = index_to_code[index2]
DFG.append((code1, idx1, 'computedFrom', [code2], [idx2]))
states[code1] = [idx1]
return (sorted(DFG, key=(lambda x: x[1])), states)
elif (root_node.type in increment_statement):
DFG = []
indexs = tree_to_variable_index(root_node, index_to_code)
for index1 in indexs:
(idx1, code1) = index_to_code[index1]
for index2 in indexs:
(idx2, code2) = index_to_code[index2]
DFG.append((code1, idx1, 'computedFrom', [code2], [idx2]))
states[code1] = [idx1]
return (sorted(DFG, key=(lambda x: x[1])), states)
elif (root_node.type in if_statement):
DFG = []
current_states = states.copy()
others_states = []
flag = False
tag = False
if ('else' in root_node.type):
tag = True
for child in root_node.children:
if ('else' in child.type):
tag = True
if ((child.type not in if_statement) and (flag is False)):
(temp, current_states) = DFG_php(child, index_to_code, current_states)
DFG += temp
else:
flag = True
(temp, new_states) = DFG_php(child, index_to_code, states)
DFG += temp
others_states.append(new_states)
others_states.append(current_states)
new_states = {}
for dic in others_states:
for key in dic:
if (key not in new_states):
new_states[key] = dic[key].copy()
else:
new_states[key] += dic[key]
for key in states:
if (key not in new_states):
new_states[key] = states[key]
else:
new_states[key] += states[key]
for key in new_states:
new_states[key] = sorted(list(set(new_states[key])))
return (sorted(DFG, key=(lambda x: x[1])), new_states)
elif (root_node.type in for_statement):
DFG = []
for child in root_node.children:
(temp, states) = DFG_php(child, index_to_code, states)
DFG += temp
flag = False
for child in root_node.children:
if flag:
(temp, states) = DFG_php(child, index_to_code, states)
DFG += temp
elif (child.type == 'assignment_expression'):
flag = True
dic = {}
for x in DFG:
if ((x[0], x[1], x[2]) not in dic):
dic[(x[0], x[1], x[2])] = [x[3], x[4]]
else:
dic[(x[0], x[1], x[2])][0] = list(set((dic[(x[0], x[1], x[2])][0] + x[3])))
dic[(x[0], x[1], x[2])][1] = sorted(list(set((dic[(x[0], x[1], x[2])][1] + x[4]))))
DFG = [(x[0], x[1], x[2], y[0], y[1]) for (x, y) in sorted(dic.items(), key=(lambda t: t[0][1]))]
return (sorted(DFG, key=(lambda x: x[1])), states)
elif (root_node.type in enhanced_for_statement):
name = None
value = None
for child in root_node.children:
if ((child.type == 'variable_name') and (value is None)):
value = child
elif ((child.type == 'variable_name') and (name is None)):
name = child
break
body = root_node.child_by_field_name('body')
DFG = []
for i in range(2):
(temp, states) = DFG_php(value, index_to_code, states)
DFG += temp
name_indexs = tree_to_variable_index(name, index_to_code)
value_indexs = tree_to_variable_index(value, index_to_code)
for index1 in name_indexs:
(idx1, code1) = index_to_code[index1]
for index2 in value_indexs:
(idx2, code2) = index_to_code[index2]
DFG.append((code1, idx1, 'computedFrom', [code2], [idx2]))
states[code1] = [idx1]
(temp, states) = DFG_php(body, index_to_code, states)
DFG += temp
dic = {}
for x in DFG:
if ((x[0], x[1], x[2]) not in dic):
dic[(x[0], x[1], x[2])] = [x[3], x[4]]
else:
dic[(x[0], x[1], x[2])][0] = list(set((dic[(x[0], x[1], x[2])][0] + x[3])))
dic[(x[0], x[1], x[2])][1] = sorted(list(set((dic[(x[0], x[1], x[2])][1] + x[4]))))
DFG = [(x[0], x[1], x[2], y[0], y[1]) for (x, y) in sorted(dic.items(), key=(lambda t: t[0][1]))]
return (sorted(DFG, key=(lambda x: x[1])), states)
elif (root_node.type in while_statement):
DFG = []
for i in range(2):
for child in root_node.children:
(temp, states) = DFG_php(child, index_to_code, states)
DFG += temp
dic = {}
for x in DFG:
if ((x[0], x[1], x[2]) not in dic):
dic[(x[0], x[1], x[2])] = [x[3], x[4]]
else:
dic[(x[0], x[1], x[2])][0] = list(set((dic[(x[0], x[1], x[2])][0] + x[3])))
dic[(x[0], x[1], x[2])][1] = sorted(list(set((dic[(x[0], x[1], x[2])][1] + x[4]))))
DFG = [(x[0], x[1], x[2], y[0], y[1]) for (x, y) in sorted(dic.items(), key=(lambda t: t[0][1]))]
return (sorted(DFG, key=(lambda x: x[1])), states)
else:
DFG = []
for child in root_node.children:
if (child.type in do_first_statement):
(temp, states) = DFG_php(child, index_to_code, states)
DFG += temp
for child in root_node.children:
if (child.type not in do_first_statement):
(temp, states) = DFG_php(child, index_to_code, states)
DFG += temp
return (sorted(DFG, key=(lambda x: x[1])), states)
|
def DFG_javascript(root_node, index_to_code, states):
assignment = ['assignment_pattern', 'augmented_assignment_expression']
def_statement = ['variable_declarator']
increment_statement = ['update_expression']
if_statement = ['if_statement', 'else']
for_statement = ['for_statement']
enhanced_for_statement = []
while_statement = ['while_statement']
do_first_statement = []
states = states.copy()
if (((len(root_node.children) == 0) or (root_node.type in ['string_literal', 'string', 'character_literal'])) and (root_node.type != 'comment')):
(idx, code) = index_to_code[(root_node.start_point, root_node.end_point)]
if (root_node.type == code):
return ([], states)
elif (code in states):
return ([(code, idx, 'comesFrom', [code], states[code].copy())], states)
else:
if (root_node.type == 'identifier'):
states[code] = [idx]
return ([(code, idx, 'comesFrom', [], [])], states)
elif (root_node.type in def_statement):
name = root_node.child_by_field_name('name')
value = root_node.child_by_field_name('value')
DFG = []
if (value is None):
indexs = tree_to_variable_index(name, index_to_code)
for index in indexs:
(idx, code) = index_to_code[index]
DFG.append((code, idx, 'comesFrom', [], []))
states[code] = [idx]
return (sorted(DFG, key=(lambda x: x[1])), states)
else:
name_indexs = tree_to_variable_index(name, index_to_code)
value_indexs = tree_to_variable_index(value, index_to_code)
(temp, states) = DFG_javascript(value, index_to_code, states)
DFG += temp
for index1 in name_indexs:
(idx1, code1) = index_to_code[index1]
for index2 in value_indexs:
(idx2, code2) = index_to_code[index2]
DFG.append((code1, idx1, 'comesFrom', [code2], [idx2]))
states[code1] = [idx1]
return (sorted(DFG, key=(lambda x: x[1])), states)
elif (root_node.type in assignment):
left_nodes = root_node.child_by_field_name('left')
right_nodes = root_node.child_by_field_name('right')
DFG = []
(temp, states) = DFG_javascript(right_nodes, index_to_code, states)
DFG += temp
name_indexs = tree_to_variable_index(left_nodes, index_to_code)
value_indexs = tree_to_variable_index(right_nodes, index_to_code)
for index1 in name_indexs:
(idx1, code1) = index_to_code[index1]
for index2 in value_indexs:
(idx2, code2) = index_to_code[index2]
DFG.append((code1, idx1, 'computedFrom', [code2], [idx2]))
states[code1] = [idx1]
return (sorted(DFG, key=(lambda x: x[1])), states)
elif (root_node.type in increment_statement):
DFG = []
indexs = tree_to_variable_index(root_node, index_to_code)
for index1 in indexs:
(idx1, code1) = index_to_code[index1]
for index2 in indexs:
(idx2, code2) = index_to_code[index2]
DFG.append((code1, idx1, 'computedFrom', [code2], [idx2]))
states[code1] = [idx1]
return (sorted(DFG, key=(lambda x: x[1])), states)
elif (root_node.type in if_statement):
DFG = []
current_states = states.copy()
others_states = []
flag = False
tag = False
if ('else' in root_node.type):
tag = True
for child in root_node.children:
if ('else' in child.type):
tag = True
if ((child.type not in if_statement) and (flag is False)):
(temp, current_states) = DFG_javascript(child, index_to_code, current_states)
DFG += temp
else:
flag = True
(temp, new_states) = DFG_javascript(child, index_to_code, states)
DFG += temp
others_states.append(new_states)
others_states.append(current_states)
if (tag is False):
others_states.append(states)
new_states = {}
for dic in others_states:
for key in dic:
if (key not in new_states):
new_states[key] = dic[key].copy()
else:
new_states[key] += dic[key]
for key in states:
if (key not in new_states):
new_states[key] = states[key]
else:
new_states[key] += states[key]
for key in new_states:
new_states[key] = sorted(list(set(new_states[key])))
return (sorted(DFG, key=(lambda x: x[1])), new_states)
elif (root_node.type in for_statement):
DFG = []
for child in root_node.children:
(temp, states) = DFG_javascript(child, index_to_code, states)
DFG += temp
flag = False
for child in root_node.children:
if flag:
(temp, states) = DFG_javascript(child, index_to_code, states)
DFG += temp
elif (child.type == 'variable_declaration'):
flag = True
dic = {}
for x in DFG:
if ((x[0], x[1], x[2]) not in dic):
dic[(x[0], x[1], x[2])] = [x[3], x[4]]
else:
dic[(x[0], x[1], x[2])][0] = list(set((dic[(x[0], x[1], x[2])][0] + x[3])))
dic[(x[0], x[1], x[2])][1] = sorted(list(set((dic[(x[0], x[1], x[2])][1] + x[4]))))
DFG = [(x[0], x[1], x[2], y[0], y[1]) for (x, y) in sorted(dic.items(), key=(lambda t: t[0][1]))]
return (sorted(DFG, key=(lambda x: x[1])), states)
elif (root_node.type in while_statement):
DFG = []
for i in range(2):
for child in root_node.children:
(temp, states) = DFG_javascript(child, index_to_code, states)
DFG += temp
dic = {}
for x in DFG:
if ((x[0], x[1], x[2]) not in dic):
dic[(x[0], x[1], x[2])] = [x[3], x[4]]
else:
dic[(x[0], x[1], x[2])][0] = list(set((dic[(x[0], x[1], x[2])][0] + x[3])))
dic[(x[0], x[1], x[2])][1] = sorted(list(set((dic[(x[0], x[1], x[2])][1] + x[4]))))
DFG = [(x[0], x[1], x[2], y[0], y[1]) for (x, y) in sorted(dic.items(), key=(lambda t: t[0][1]))]
return (sorted(DFG, key=(lambda x: x[1])), states)
else:
DFG = []
for child in root_node.children:
if (child.type in do_first_statement):
(temp, states) = DFG_javascript(child, index_to_code, states)
DFG += temp
for child in root_node.children:
if (child.type not in do_first_statement):
(temp, states) = DFG_javascript(child, index_to_code, states)
DFG += temp
return (sorted(DFG, key=(lambda x: x[1])), states)
|
def remove_comments_and_docstrings(source, lang):
if (lang in ['python']):
"\n Returns 'source' minus comments and docstrings.\n "
io_obj = StringIO(source)
out = ''
prev_toktype = tokenize.INDENT
last_lineno = (- 1)
last_col = 0
for tok in tokenize.generate_tokens(io_obj.readline):
token_type = tok[0]
token_string = tok[1]
(start_line, start_col) = tok[2]
(end_line, end_col) = tok[3]
ltext = tok[4]
if (start_line > last_lineno):
last_col = 0
if (start_col > last_col):
out += (' ' * (start_col - last_col))
if (token_type == tokenize.COMMENT):
pass
elif (token_type == tokenize.STRING):
if (prev_toktype != tokenize.INDENT):
if (prev_toktype != tokenize.NEWLINE):
if (start_col > 0):
out += token_string
else:
out += token_string
prev_toktype = token_type
last_col = end_col
last_lineno = end_line
temp = []
for x in out.split('\n'):
if (x.strip() != ''):
temp.append(x)
return '\n'.join(temp)
elif (lang in ['ruby']):
return source
else:
def replacer(match):
s = match.group(0)
if s.startswith('/'):
return ' '
else:
return s
pattern = re.compile('//.*?$|/\\*.*?\\*/|\\\'(?:\\\\.|[^\\\\\\\'])*\\\'|"(?:\\\\.|[^\\\\"])*"', (re.DOTALL | re.MULTILINE))
temp = []
for x in re.sub(pattern, replacer, source).split('\n'):
if (x.strip() != ''):
temp.append(x)
return '\n'.join(temp)
|
def tree_to_token_index(root_node):
if (((len(root_node.children) == 0) or (root_node.type in ['string_literal', 'string', 'character_literal'])) and (root_node.type != 'comment')):
return [(root_node.start_point, root_node.end_point)]
else:
code_tokens = []
for child in root_node.children:
code_tokens += tree_to_token_index(child)
return code_tokens
|
def tree_to_variable_index(root_node, index_to_code):
if (((len(root_node.children) == 0) or (root_node.type in ['string_literal', 'string', 'character_literal'])) and (root_node.type != 'comment')):
index = (root_node.start_point, root_node.end_point)
(_, code) = index_to_code[index]
if (root_node.type != code):
return [(root_node.start_point, root_node.end_point)]
else:
return []
else:
code_tokens = []
for child in root_node.children:
code_tokens += tree_to_variable_index(child, index_to_code)
return code_tokens
|
def index_to_code_token(index, code):
start_point = index[0]
end_point = index[1]
if (start_point[0] == end_point[0]):
s = code[start_point[0]][start_point[1]:end_point[1]]
else:
s = ''
s += code[start_point[0]][start_point[1]:]
for i in range((start_point[0] + 1), end_point[0]):
s += code[i]
s += code[end_point[0]][:end_point[1]]
return s
|
def calc_syntax_match(references, candidate, lang):
return corpus_syntax_match([references], [candidate], lang)
|
def corpus_syntax_match(references, candidates, lang):
JAVA_LANGUAGE = Language((root_dir + '/parser/languages.so'), lang)
parser = Parser()
parser.set_language(JAVA_LANGUAGE)
match_count = 0
total_count = 0
for i in range(len(candidates)):
references_sample = references[i]
candidate = candidates[i]
for reference in references_sample:
try:
candidate = remove_comments_and_docstrings(candidate, 'java')
except:
pass
try:
reference = remove_comments_and_docstrings(reference, 'java')
except:
pass
candidate_tree = parser.parse(bytes(candidate, 'utf8')).root_node
reference_tree = parser.parse(bytes(reference, 'utf8')).root_node
def get_all_sub_trees(root_node):
node_stack = []
sub_tree_sexp_list = []
depth = 1
node_stack.append([root_node, depth])
while (len(node_stack) != 0):
(cur_node, cur_depth) = node_stack.pop()
sub_tree_sexp_list.append([cur_node.sexp(), cur_depth])
for child_node in cur_node.children:
if (len(child_node.children) != 0):
depth = (cur_depth + 1)
node_stack.append([child_node, depth])
return sub_tree_sexp_list
cand_sexps = [x[0] for x in get_all_sub_trees(candidate_tree)]
ref_sexps = get_all_sub_trees(reference_tree)
for (sub_tree, depth) in ref_sexps:
if (sub_tree in cand_sexps):
match_count += 1
total_count += len(ref_sexps)
score = (match_count / total_count)
return score
|
def pad_sequence(sequence, n, pad_left=False, pad_right=False, left_pad_symbol=None, right_pad_symbol=None):
"\n Returns a padded sequence of items before ngram extraction.\n >>> list(pad_sequence([1,2,3,4,5], 2, pad_left=True, pad_right=True, left_pad_symbol='<s>',\n right_pad_symbol='</s>'))\n ['<s>', 1, 2, 3, 4, 5, '</s>']\n >>> list(pad_sequence([1,2,3,4,5], 2, pad_left=True, left_pad_symbol='<s>'))\n ['<s>', 1, 2, 3, 4, 5]\n >>> list(pad_sequence([1,2,3,4,5], 2, pad_right=True, right_pad_symbol='</s>'))\n [1, 2, 3, 4, 5, '</s>']\n :param sequence: the source data to be padded\n :type sequence: sequence or iter\n :param n: the degree of the ngrams\n :type n: int\n :param pad_left: whether the ngrams should be left-padded\n :type pad_left: bool\n :param pad_right: whether the ngrams should be right-padded\n :type pad_right: bool\n :param left_pad_symbol: the symbol to use for left padding (default is None)\n :type left_pad_symbol: any\n :param right_pad_symbol: the symbol to use for right padding (default is None)\n :type right_pad_symbol: any\n :rtype: sequence or iter\n "
sequence = iter(sequence)
if pad_left:
sequence = chain(((left_pad_symbol,) * (n - 1)), sequence)
if pad_right:
sequence = chain(sequence, ((right_pad_symbol,) * (n - 1)))
return sequence
|
def ngrams(sequence, n, pad_left=False, pad_right=False, left_pad_symbol=None, right_pad_symbol=None):
"\n Return the ngrams generated from a sequence of items, as an iterator.\n For example:\n >>> from nltk.util import ngrams\n >>> list(ngrams([1,2,3,4,5], 3))\n [(1, 2, 3), (2, 3, 4), (3, 4, 5)]\n Wrap with list for a list version of this function. Set pad_left\n or pad_right to true in order to get additional ngrams:\n >>> list(ngrams([1,2,3,4,5], 2, pad_right=True))\n [(1, 2), (2, 3), (3, 4), (4, 5), (5, None)]\n >>> list(ngrams([1,2,3,4,5], 2, pad_right=True, right_pad_symbol='</s>'))\n [(1, 2), (2, 3), (3, 4), (4, 5), (5, '</s>')]\n >>> list(ngrams([1,2,3,4,5], 2, pad_left=True, left_pad_symbol='<s>'))\n [('<s>', 1), (1, 2), (2, 3), (3, 4), (4, 5)]\n >>> list(ngrams([1,2,3,4,5], 2, pad_left=True, pad_right=True, left_pad_symbol='<s>', right_pad_symbol='</s>'))\n [('<s>', 1), (1, 2), (2, 3), (3, 4), (4, 5), (5, '</s>')]\n :param sequence: the source data to be converted into ngrams\n :type sequence: sequence or iter\n :param n: the degree of the ngrams\n :type n: int\n :param pad_left: whether the ngrams should be left-padded\n :type pad_left: bool\n :param pad_right: whether the ngrams should be right-padded\n :type pad_right: bool\n :param left_pad_symbol: the symbol to use for left padding (default is None)\n :type left_pad_symbol: any\n :param right_pad_symbol: the symbol to use for right padding (default is None)\n :type right_pad_symbol: any\n :rtype: sequence or iter\n "
sequence = pad_sequence(sequence, n, pad_left, pad_right, left_pad_symbol, right_pad_symbol)
history = []
while (n > 1):
try:
next_item = next(sequence)
except StopIteration:
return
history.append(next_item)
n -= 1
for item in sequence:
history.append(item)
(yield tuple(history))
del history[0]
|
def sentence_bleu(references, hypothesis, weights=(0.25, 0.25, 0.25, 0.25), smoothing_function=None, auto_reweigh=False):
'\n Calculate BLEU score (Bilingual Evaluation Understudy) from\n Papineni, Kishore, Salim Roukos, Todd Ward, and Wei-Jing Zhu. 2002.\n "BLEU: a method for automatic evaluation of machine translation."\n In Proceedings of ACL. http://www.aclweb.org/anthology/P02-1040.pdf\n >>> hypothesis1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'military\', \'always\',\n ... \'obeys\', \'the\', \'commands\', \'of\', \'the\', \'party\']\n >>> hypothesis2 = [\'It\', \'is\', \'to\', \'insure\', \'the\', \'troops\',\n ... \'forever\', \'hearing\', \'the\', \'activity\', \'guidebook\',\n ... \'that\', \'party\', \'direct\']\n >>> reference1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'military\', \'will\', \'forever\',\n ... \'heed\', \'Party\', \'commands\']\n >>> reference2 = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'military\', \'forces\', \'always\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\',\n ... \'Party\']\n >>> reference3 = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'army\', \'always\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'party\']\n >>> sentence_bleu([reference1, reference2, reference3], hypothesis1) # doctest: +ELLIPSIS\n 0.5045...\n If there is no ngrams overlap for any order of n-grams, BLEU returns the\n value 0. This is because the precision for the order of n-grams without\n overlap is 0, and the geometric mean in the final BLEU score computation\n multiplies the 0 with the precision of other n-grams. This results in 0\n (independently of the precision of the othe n-gram orders). The following\n example has zero 3-gram and 4-gram overlaps:\n >>> round(sentence_bleu([reference1, reference2, reference3], hypothesis2),4) # doctest: +ELLIPSIS\n 0.0\n To avoid this harsh behaviour when no ngram overlaps are found a smoothing\n function can be used.\n >>> chencherry = SmoothingFunction()\n >>> sentence_bleu([reference1, reference2, reference3], hypothesis2,\n ... smoothing_function=chencherry.method1) # doctest: +ELLIPSIS\n 0.0370...\n The default BLEU calculates a score for up to 4-grams using uniform\n weights (this is called BLEU-4). To evaluate your translations with\n higher/lower order ngrams, use customized weights. E.g. when accounting\n for up to 5-grams with uniform weights (this is called BLEU-5) use:\n >>> weights = (1./5., 1./5., 1./5., 1./5., 1./5.)\n >>> sentence_bleu([reference1, reference2, reference3], hypothesis1, weights) # doctest: +ELLIPSIS\n 0.3920...\n :param references: reference sentences\n :type references: list(list(str))\n :param hypothesis: a hypothesis sentence\n :type hypothesis: list(str)\n :param weights: weights for unigrams, bigrams, trigrams and so on\n :type weights: list(float)\n :param smoothing_function:\n :type smoothing_function: SmoothingFunction\n :param auto_reweigh: Option to re-normalize the weights uniformly.\n :type auto_reweigh: bool\n :return: The sentence-level BLEU score.\n :rtype: float\n '
return corpus_bleu([references], [hypothesis], weights, smoothing_function, auto_reweigh)
|
def corpus_bleu(list_of_references, hypotheses, weights=(0.25, 0.25, 0.25, 0.25), smoothing_function=None, auto_reweigh=False):
"\n Calculate a single corpus-level BLEU score (aka. system-level BLEU) for all\n the hypotheses and their respective references.\n Instead of averaging the sentence level BLEU scores (i.e. marco-average\n precision), the original BLEU metric (Papineni et al. 2002) accounts for\n the micro-average precision (i.e. summing the numerators and denominators\n for each hypothesis-reference(s) pairs before the division).\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'military', 'always',\n ... 'obeys', 'the', 'commands', 'of', 'the', 'party']\n >>> ref1a = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'military', 'will', 'forever',\n ... 'heed', 'Party', 'commands']\n >>> ref1b = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'military', 'forces', 'always',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'Party']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'army', 'always', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'party']\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> corpus_bleu(list_of_references, hypotheses) # doctest: +ELLIPSIS\n 0.5920...\n The example below show that corpus_bleu() is different from averaging\n sentence_bleu() for hypotheses\n >>> score1 = sentence_bleu([ref1a, ref1b, ref1c], hyp1)\n >>> score2 = sentence_bleu([ref2a], hyp2)\n >>> (score1 + score2) / 2 # doctest: +ELLIPSIS\n 0.6223...\n :param list_of_references: a corpus of lists of reference sentences, w.r.t. hypotheses\n :type list_of_references: list(list(list(str)))\n :param hypotheses: a list of hypothesis sentences\n :type hypotheses: list(list(str))\n :param weights: weights for unigrams, bigrams, trigrams and so on\n :type weights: list(float)\n :param smoothing_function:\n :type smoothing_function: SmoothingFunction\n :param auto_reweigh: Option to re-normalize the weights uniformly.\n :type auto_reweigh: bool\n :return: The corpus-level BLEU score.\n :rtype: float\n "
p_numerators = Counter()
p_denominators = Counter()
(hyp_lengths, ref_lengths) = (0, 0)
assert (len(list_of_references) == len(hypotheses)), 'The number of hypotheses and their reference(s) should be the same '
for (references, hypothesis) in zip(list_of_references, hypotheses):
for (i, _) in enumerate(weights, start=1):
(p_i_numeraotr, p_i_denominator) = modified_recall(references, hypothesis, i)
p_numerators[i] += p_i_numeraotr
p_denominators[i] += p_i_denominator
hyp_len = len(hypothesis)
hyp_lengths += hyp_len
ref_lengths += closest_ref_length(references, hyp_len)
bp = brevity_penalty(ref_lengths, hyp_lengths)
if auto_reweigh:
if ((hyp_lengths < 4) and (weights == (0.25, 0.25, 0.25, 0.25))):
weights = (((1 / hyp_lengths),) * hyp_lengths)
p_n = [(p_numerators[i], p_denominators[i]) for (i, _) in enumerate(weights, start=1)]
if (p_numerators[1] == 0):
return 0
if (not smoothing_function):
smoothing_function = SmoothingFunction().method1
p_n = smoothing_function(p_n, references=references, hypothesis=hypothesis, hyp_len=hyp_lengths)
s = ((w_i * math.log((p_i[0] / p_i[1]))) for (w_i, p_i) in zip(weights, p_n))
s = (bp * math.exp(math.fsum(s)))
return s
|
def modified_recall(references, hypothesis, n):
"\n Calculate modified ngram recall.\n :param references: A list of reference translations.\n :type references: list(list(str))\n :param hypothesis: A hypothesis translation.\n :type hypothesis: list(str)\n :param n: The ngram order.\n :type n: int\n :return: BLEU's modified precision for the nth order ngram.\n :rtype: Fraction\n "
numerator = 0
denominator = 0
counts = (Counter(ngrams(hypothesis, n)) if (len(hypothesis) >= n) else Counter())
max_counts = {}
for reference_and_weights in references:
reference = reference_and_weights[0]
weights = reference_and_weights[1]
reference_counts = (Counter(ngrams(reference, n)) if (len(reference) >= n) else Counter())
clipped_counts = {ngram: min(count, counts[ngram]) for (ngram, count) in reference_counts.items()}
if ((n == 1) and (len(weights) == len(reference_counts))):
def weighted_sum(weights, counts):
sum_counts = 0
for (ngram, count) in counts.items():
sum_counts += (count * (weights[ngram[0]] if (ngram[0] in weights) else 1))
return sum_counts
numerator += weighted_sum(weights, clipped_counts)
denominator += max(1, weighted_sum(weights, reference_counts))
else:
numerator += sum(clipped_counts.values())
denominator += max(1, sum(reference_counts.values()))
return (numerator, denominator)
|
def closest_ref_length(references, hyp_len):
"\n This function finds the reference that is the closest length to the\n hypothesis. The closest reference length is referred to as *r* variable\n from the brevity penalty formula in Papineni et. al. (2002)\n :param references: A list of reference translations.\n :type references: list(list(str))\n :param hyp_len: The length of the hypothesis.\n :type hyp_len: int\n :return: The length of the reference that's closest to the hypothesis.\n :rtype: int\n "
ref_lens = (len(reference) for reference in references)
closest_ref_len = min(ref_lens, key=(lambda ref_len: (abs((ref_len - hyp_len)), ref_len)))
return closest_ref_len
|
def brevity_penalty(closest_ref_len, hyp_len):
"\n Calculate brevity penalty.\n As the modified n-gram precision still has the problem from the short\n length sentence, brevity penalty is used to modify the overall BLEU\n score according to length.\n An example from the paper. There are three references with length 12, 15\n and 17. And a concise hypothesis of the length 12. The brevity penalty is 1.\n >>> reference1 = list('aaaaaaaaaaaa') # i.e. ['a'] * 12\n >>> reference2 = list('aaaaaaaaaaaaaaa') # i.e. ['a'] * 15\n >>> reference3 = list('aaaaaaaaaaaaaaaaa') # i.e. ['a'] * 17\n >>> hypothesis = list('aaaaaaaaaaaa') # i.e. ['a'] * 12\n >>> references = [reference1, reference2, reference3]\n >>> hyp_len = len(hypothesis)\n >>> closest_ref_len = closest_ref_length(references, hyp_len)\n >>> brevity_penalty(closest_ref_len, hyp_len)\n 1.0\n In case a hypothesis translation is shorter than the references, penalty is\n applied.\n >>> references = [['a'] * 28, ['a'] * 28]\n >>> hypothesis = ['a'] * 12\n >>> hyp_len = len(hypothesis)\n >>> closest_ref_len = closest_ref_length(references, hyp_len)\n >>> brevity_penalty(closest_ref_len, hyp_len)\n 0.2635971381157267\n The length of the closest reference is used to compute the penalty. If the\n length of a hypothesis is 12, and the reference lengths are 13 and 2, the\n penalty is applied because the hypothesis length (12) is less then the\n closest reference length (13).\n >>> references = [['a'] * 13, ['a'] * 2]\n >>> hypothesis = ['a'] * 12\n >>> hyp_len = len(hypothesis)\n >>> closest_ref_len = closest_ref_length(references, hyp_len)\n >>> brevity_penalty(closest_ref_len, hyp_len) # doctest: +ELLIPSIS\n 0.9200...\n The brevity penalty doesn't depend on reference order. More importantly,\n when two reference sentences are at the same distance, the shortest\n reference sentence length is used.\n >>> references = [['a'] * 13, ['a'] * 11]\n >>> hypothesis = ['a'] * 12\n >>> hyp_len = len(hypothesis)\n >>> closest_ref_len = closest_ref_length(references, hyp_len)\n >>> bp1 = brevity_penalty(closest_ref_len, hyp_len)\n >>> hyp_len = len(hypothesis)\n >>> closest_ref_len = closest_ref_length(reversed(references), hyp_len)\n >>> bp2 = brevity_penalty(closest_ref_len, hyp_len)\n >>> bp1 == bp2 == 1\n True\n A test example from mteval-v13a.pl (starting from the line 705):\n >>> references = [['a'] * 11, ['a'] * 8]\n >>> hypothesis = ['a'] * 7\n >>> hyp_len = len(hypothesis)\n >>> closest_ref_len = closest_ref_length(references, hyp_len)\n >>> brevity_penalty(closest_ref_len, hyp_len) # doctest: +ELLIPSIS\n 0.8668...\n >>> references = [['a'] * 11, ['a'] * 8, ['a'] * 6, ['a'] * 7]\n >>> hypothesis = ['a'] * 7\n >>> hyp_len = len(hypothesis)\n >>> closest_ref_len = closest_ref_length(references, hyp_len)\n >>> brevity_penalty(closest_ref_len, hyp_len)\n 1.0\n :param hyp_len: The length of the hypothesis for a single sentence OR the\n sum of all the hypotheses' lengths for a corpus\n :type hyp_len: int\n :param closest_ref_len: The length of the closest reference for a single\n hypothesis OR the sum of all the closest references for every hypotheses.\n :type closest_ref_len: int\n :return: BLEU's brevity penalty.\n :rtype: float\n "
if (hyp_len > closest_ref_len):
return 1
elif (hyp_len == 0):
return 0
else:
return math.exp((1 - (closest_ref_len / hyp_len)))
|
class SmoothingFunction():
'\n This is an implementation of the smoothing techniques\n for segment-level BLEU scores that was presented in\n Boxing Chen and Collin Cherry (2014) A Systematic Comparison of\n Smoothing Techniques for Sentence-Level BLEU. In WMT14.\n http://acl2014.org/acl2014/W14-33/pdf/W14-3346.pdf\n '
def __init__(self, epsilon=0.1, alpha=5, k=5):
"\n This will initialize the parameters required for the various smoothing\n techniques, the default values are set to the numbers used in the\n experiments from Chen and Cherry (2014).\n >>> hypothesis1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which', 'ensures',\n ... 'that', 'the', 'military', 'always', 'obeys', 'the',\n ... 'commands', 'of', 'the', 'party']\n >>> reference1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'that', 'ensures',\n ... 'that', 'the', 'military', 'will', 'forever', 'heed',\n ... 'Party', 'commands']\n >>> chencherry = SmoothingFunction()\n >>> print(sentence_bleu([reference1], hypothesis1)) # doctest: +ELLIPSIS\n 0.4118...\n >>> print(sentence_bleu([reference1], hypothesis1, smoothing_function=chencherry.method0)) # doctest: +ELLIPSIS\n 0.4118...\n >>> print(sentence_bleu([reference1], hypothesis1, smoothing_function=chencherry.method1)) # doctest: +ELLIPSIS\n 0.4118...\n >>> print(sentence_bleu([reference1], hypothesis1, smoothing_function=chencherry.method2)) # doctest: +ELLIPSIS\n 0.4489...\n >>> print(sentence_bleu([reference1], hypothesis1, smoothing_function=chencherry.method3)) # doctest: +ELLIPSIS\n 0.4118...\n >>> print(sentence_bleu([reference1], hypothesis1, smoothing_function=chencherry.method4)) # doctest: +ELLIPSIS\n 0.4118...\n >>> print(sentence_bleu([reference1], hypothesis1, smoothing_function=chencherry.method5)) # doctest: +ELLIPSIS\n 0.4905...\n >>> print(sentence_bleu([reference1], hypothesis1, smoothing_function=chencherry.method6)) # doctest: +ELLIPSIS\n 0.4135...\n >>> print(sentence_bleu([reference1], hypothesis1, smoothing_function=chencherry.method7)) # doctest: +ELLIPSIS\n 0.4905...\n :param epsilon: the epsilon value use in method 1\n :type epsilon: float\n :param alpha: the alpha value use in method 6\n :type alpha: int\n :param k: the k value use in method 4\n :type k: int\n "
self.epsilon = epsilon
self.alpha = alpha
self.k = k
def method0(self, p_n, *args, **kwargs):
'\n No smoothing.\n '
p_n_new = []
for (i, p_i) in enumerate(p_n):
if (p_i[0] != 0):
p_n_new.append(p_i)
else:
_msg = str('\nThe hypothesis contains 0 counts of {}-gram overlaps.\nTherefore the BLEU score evaluates to 0, independently of\nhow many N-gram overlaps of lower order it contains.\nConsider using lower n-gram order or use SmoothingFunction()').format((i + 1))
warnings.warn(_msg)
p_n_new.append(sys.float_info.min)
return p_n_new
def method1(self, p_n, *args, **kwargs):
'\n Smoothing method 1: Add *epsilon* counts to precision with 0 counts.\n '
return [(((p_i[0] + self.epsilon), p_i[1]) if (p_i[0] == 0) else p_i) for p_i in p_n]
def method2(self, p_n, *args, **kwargs):
'\n Smoothing method 2: Add 1 to both numerator and denominator from\n Chin-Yew Lin and Franz Josef Och (2004) Automatic evaluation of\n machine translation quality using longest common subsequence and\n skip-bigram statistics. In ACL04.\n '
return [((p_i[0] + 1), (p_i[1] + 1)) for p_i in p_n]
def method3(self, p_n, *args, **kwargs):
"\n Smoothing method 3: NIST geometric sequence smoothing\n The smoothing is computed by taking 1 / ( 2^k ), instead of 0, for each\n precision score whose matching n-gram count is null.\n k is 1 for the first 'n' value for which the n-gram match count is null/\n For example, if the text contains:\n - one 2-gram match\n - and (consequently) two 1-gram matches\n the n-gram count for each individual precision score would be:\n - n=1 => prec_count = 2 (two unigrams)\n - n=2 => prec_count = 1 (one bigram)\n - n=3 => prec_count = 1/2 (no trigram, taking 'smoothed' value of 1 / ( 2^k ), with k=1)\n - n=4 => prec_count = 1/4 (no fourgram, taking 'smoothed' value of 1 / ( 2^k ), with k=2)\n "
incvnt = 1
for (i, p_i) in enumerate(p_n):
if (p_i.numerator == 0):
p_n[i] = (1 / ((2 ** incvnt) * p_i.denominator))
incvnt += 1
return p_n
def method4(self, p_n, references, hypothesis, hyp_len=None, *args, **kwargs):
'\n Smoothing method 4:\n Shorter translations may have inflated precision values due to having\n smaller denominators; therefore, we give them proportionally\n smaller smoothed counts. Instead of scaling to 1/(2^k), Chen and Cherry\n suggests dividing by 1/ln(len(T)), where T is the length of the translation.\n '
hyp_len = (hyp_len if hyp_len else len(hypothesis))
for (i, p_i) in enumerate(p_n):
if ((p_i.numerator == 0) and (hyp_len != 0)):
incvnt = (i + ((1 * self.k) / math.log(hyp_len)))
p_n[i] = (incvnt / p_i.denominator)
return p_n
def method5(self, p_n, references, hypothesis, hyp_len=None, *args, **kwargs):
'\n Smoothing method 5:\n The matched counts for similar values of n should be similar. To a\n calculate the n-gram matched count, it averages the n−1, n and n+1 gram\n matched counts.\n '
hyp_len = (hyp_len if hyp_len else len(hypothesis))
m = {}
p_n_plus1 = (p_n + [modified_precision(references, hypothesis, 5)])
m[(- 1)] = (p_n[0] + 1)
for (i, p_i) in enumerate(p_n):
p_n[i] = (((m[(i - 1)] + p_i) + p_n_plus1[(i + 1)]) / 3)
m[i] = p_n[i]
return p_n
def method6(self, p_n, references, hypothesis, hyp_len=None, *args, **kwargs):
'\n Smoothing method 6:\n Interpolates the maximum likelihood estimate of the precision *p_n* with\n a prior estimate *pi0*. The prior is estimated by assuming that the ratio\n between pn and pn−1 will be the same as that between pn−1 and pn−2; from\n Gao and He (2013) Training MRF-Based Phrase Translation Models using\n Gradient Ascent. In NAACL.\n '
hyp_len = (hyp_len if hyp_len else len(hypothesis))
assert p_n[2], 'This smoothing method requires non-zero precision for bigrams.'
for (i, p_i) in enumerate(p_n):
if (i in [0, 1]):
continue
else:
pi0 = (0 if (p_n[(i - 2)] == 0) else ((p_n[(i - 1)] ** 2) / p_n[(i - 2)]))
m = p_i.numerator
l = sum((1 for _ in ngrams(hypothesis, (i + 1))))
p_n[i] = ((m + (self.alpha * pi0)) / (l + self.alpha))
return p_n
def method7(self, p_n, references, hypothesis, hyp_len=None, *args, **kwargs):
'\n Smoothing method 7:\n Interpolates methods 4 and 5.\n '
hyp_len = (hyp_len if hyp_len else len(hypothesis))
p_n = self.method4(p_n, references, hypothesis, hyp_len)
p_n = self.method5(p_n, references, hypothesis, hyp_len)
return p_n
|
def _get_ngrams(segment, max_order):
'Extracts all n-grams upto a given maximum order from an input segment.\n\n Args:\n segment: text segment from which n-grams will be extracted.\n max_order: maximum length in tokens of the n-grams returned by this\n methods.\n\n Returns:\n The Counter containing all n-grams upto max_order in segment\n with a count of how many times each n-gram occurred.\n '
ngram_counts = collections.Counter()
for order in range(1, (max_order + 1)):
for i in range(0, ((len(segment) - order) + 1)):
ngram = tuple(segment[i:(i + order)])
ngram_counts[ngram] += 1
return ngram_counts
|
def compute_bleu(reference_corpus, translation_corpus, max_order=4, smooth=False):
'Computes BLEU score of translated segments against one or more references.\n\n Args:\n reference_corpus: list of lists of references for each translation. Each\n reference should be tokenized into a list of tokens.\n translation_corpus: list of translations to score. Each translation\n should be tokenized into a list of tokens.\n max_order: Maximum n-gram order to use when computing BLEU score.\n smooth: Whether or not to apply Lin et al. 2004 smoothing.\n\n Returns:\n 3-Tuple with the BLEU score, n-gram precisions, geometric mean of n-gram\n precisions and brevity penalty.\n '
matches_by_order = ([0] * max_order)
possible_matches_by_order = ([0] * max_order)
reference_length = 0
translation_length = 0
for (references, translation) in zip(reference_corpus, translation_corpus):
reference_length += min((len(r) for r in references))
translation_length += len(translation)
merged_ref_ngram_counts = collections.Counter()
for reference in references:
merged_ref_ngram_counts |= _get_ngrams(reference, max_order)
translation_ngram_counts = _get_ngrams(translation, max_order)
overlap = (translation_ngram_counts & merged_ref_ngram_counts)
for ngram in overlap:
matches_by_order[(len(ngram) - 1)] += overlap[ngram]
for order in range(1, (max_order + 1)):
possible_matches = ((len(translation) - order) + 1)
if (possible_matches > 0):
possible_matches_by_order[(order - 1)] += possible_matches
precisions = ([0] * max_order)
for i in range(0, max_order):
if smooth:
precisions[i] = ((matches_by_order[i] + 1.0) / (possible_matches_by_order[i] + 1.0))
elif (possible_matches_by_order[i] > 0):
precisions[i] = (float(matches_by_order[i]) / possible_matches_by_order[i])
else:
precisions[i] = 0.0
if (min(precisions) > 0):
p_log_sum = sum((((1.0 / max_order) * math.log(p)) for p in precisions))
geo_mean = math.exp(p_log_sum)
else:
geo_mean = 0
ratio = (float(translation_length) / reference_length)
if (ratio > 1.0):
bp = 1.0
else:
bp = math.exp((1 - (1.0 / ratio)))
bleu = (geo_mean * bp)
return (bleu, precisions, bp, ratio, translation_length, reference_length)
|
def _bleu(ref_file, trans_file, subword_option=None):
max_order = 4
smooth = True
ref_files = [ref_file]
reference_text = []
for reference_filename in ref_files:
with open(reference_filename) as fh:
reference_text.append(fh.readlines())
per_segment_references = []
for references in zip(*reference_text):
reference_list = []
for reference in references:
reference_list.append(reference.strip().split())
per_segment_references.append(reference_list)
translations = []
with open(trans_file) as fh:
for line in fh:
translations.append(line.strip().split())
(bleu_score, _, _, _, _, _) = compute_bleu(per_segment_references, translations, max_order, smooth)
return round((100 * bleu_score), 2)
|
def do_test(target, exp_1, exp_2, test_type, metrics, lang):
(semcode_results, codet5_results) = ([], [])
with open(target) as goldf:
with open(exp_1) as semcodef:
with open(exp_2) as codet5f:
golds = goldf.readlines()
for (gold, semcode, codet5) in tqdm(zip(golds, semcodef.readlines(), codet5f.readlines()), total=len(golds)):
semcode_results.append(evaluate_per_example(reference=gold.strip(), hypothesis=semcode.strip(), lang=lang))
codet5_results.append(evaluate_per_example(reference=gold.strip(), hypothesis=codet5.strip(), lang=lang))
semcode_df = pd.DataFrame(semcode_results)
codet5_df = pd.DataFrame(codet5_results)
if ('all' in metrics):
metrics = semcode_df.columns.tolist()
test = (ttest_ind if (test_type == 't-test') else wilcoxon)
for m in metrics:
(stat, p_value) = test(semcode_df[m].tolist(), codet5_df[m].tolist())
semcode_r = semcode_df[m].tolist()
codet5_r = codet5_df[m].tolist()
result_data = pd.DataFrame(([{'Experiment': 'SemCode', m: v} for v in semcode_df[m].tolist()] + [{'Experiment': 'CodeT5', m: v} for v in codet5_df[m].tolist()]))
print(('SemCode : \n Mean %.4f\n 1st q %.4f \n Median %.4f\n 3rd q %.4f' % (np.mean(semcode_r).item(), np.percentile(semcode_r, 25).item(), np.percentile(semcode_r, 50).item(), np.percentile(semcode_r, 75).item())))
print(('CodeT5 : \n Mean %.4f\n 1st q %.4f \n Median %.4f\n 3rd q %.4f' % (np.mean(codet5_r).item(), np.percentile(codet5_r, 25).item(), np.percentile(codet5_r, 50).item(), np.percentile(codet5_r, 75).item())))
plt.figure()
ax = sns.violinplot(data=result_data, x=m, y='Experiment', hue='Experiment', split=True, inner='quartiles')
ax.set(title=m)
plt.legend()
r_data = pd.DataFrame({'SemCode': semcode_df[m].tolist(), 'CodeT5': codet5_df[m].tolist()})
plt.figure()
sns.jointplot(data=r_data, x='SemCode', y='CodeT5')
plt.show()
print(f'''
Metric : {m}
Statistics : {stat}
P-Value : {p_value}''')
pass
|
def normalize(s):
'Normalize and tokenize text. This is lifted from NIST mteval-v11a.pl.'
if nonorm:
return s.split()
if (type(s) is not str):
s = ' '.join(s)
for (pattern, replace) in normalize1:
s = re.sub(pattern, replace, s)
s = xml.sax.saxutils.unescape(s, {'"': '"'})
s = (' %s ' % s)
if (not preserve_case):
s = s.lower()
for (pattern, replace) in normalize2:
s = re.sub(pattern, replace, s)
return s.split()
|
def count_ngrams(words, n=4):
counts = {}
for k in range(1, (n + 1)):
for i in range(((len(words) - k) + 1)):
ngram = tuple(words[i:(i + k)])
counts[ngram] = (counts.get(ngram, 0) + 1)
return counts
|
def cook_refs(refs, n=4):
'Takes a list of reference sentences for a single segment\n and returns an object that encapsulates everything that BLEU\n needs to know about them.'
refs = [normalize(ref) for ref in refs]
maxcounts = {}
for ref in refs:
counts = count_ngrams(ref, n)
for (ngram, count) in counts.items():
maxcounts[ngram] = max(maxcounts.get(ngram, 0), count)
return ([len(ref) for ref in refs], maxcounts)
|
def cook_test(test, item, n=4):
'Takes a test sentence and returns an object that\n encapsulates everything that BLEU needs to know about it.'
(reflens, refmaxcounts) = item
test = normalize(test)
result = {}
result['testlen'] = len(test)
if (eff_ref_len == 'shortest'):
result['reflen'] = min(reflens)
elif (eff_ref_len == 'average'):
result['reflen'] = (float(sum(reflens)) / len(reflens))
elif (eff_ref_len == 'closest'):
min_diff = None
for reflen in reflens:
if ((min_diff is None) or (abs((reflen - len(test))) < min_diff)):
min_diff = abs((reflen - len(test)))
result['reflen'] = reflen
result['guess'] = [max(((len(test) - k) + 1), 0) for k in range(1, (n + 1))]
result['correct'] = ([0] * n)
counts = count_ngrams(test, n)
for (ngram, count) in counts.items():
result['correct'][(len(ngram) - 1)] += min(refmaxcounts.get(ngram, 0), count)
return result
|
def score_cooked(allcomps, n=4, ground=0, smooth=1):
totalcomps = {'testlen': 0, 'reflen': 0, 'guess': ([0] * n), 'correct': ([0] * n)}
for comps in allcomps:
for key in ['testlen', 'reflen']:
totalcomps[key] += comps[key]
for key in ['guess', 'correct']:
for k in range(n):
totalcomps[key][k] += comps[key][k]
logbleu = 0.0
all_bleus = []
for k in range(n):
correct = totalcomps['correct'][k]
guess = totalcomps['guess'][k]
addsmooth = 0
if ((smooth == 1) and (k > 0)):
addsmooth = 1
logbleu += (math.log(((correct + addsmooth) + sys.float_info.min)) - math.log(((guess + addsmooth) + sys.float_info.min)))
if (guess == 0):
all_bleus.append((- 10000000))
else:
all_bleus.append((math.log((correct + sys.float_info.min)) - math.log(guess)))
logbleu /= float(n)
all_bleus.insert(0, logbleu)
brevPenalty = min(0, (1 - (float((totalcomps['reflen'] + 1)) / (totalcomps['testlen'] + 1))))
for i in range(len(all_bleus)):
if (i == 0):
all_bleus[i] += brevPenalty
all_bleus[i] = math.exp(all_bleus[i])
return all_bleus
|
def bleu(refs, candidate, ground=0, smooth=1):
refs = cook_refs(refs)
test = cook_test(candidate, refs)
return score_cooked([test], ground=ground, smooth=smooth)
|
def splitPuncts(line):
return ' '.join(re.findall('[\\w]+|[^\\s\\w]', line))
|
def computeMaps(predictions, goldfile):
predictionMap = {}
goldMap = {}
gf = open(goldfile, 'r')
for row in predictions:
cols = row.strip().split('\t')
if (len(cols) == 1):
(rid, pred) = (cols[0], '')
else:
(rid, pred) = (cols[0], cols[1])
predictionMap[rid] = [splitPuncts(pred.strip().lower())]
for row in gf:
(rid, pred) = row.split('\t')
if (rid in predictionMap):
if (rid not in goldMap):
goldMap[rid] = []
goldMap[rid].append(splitPuncts(pred.strip().lower()))
sys.stderr.write((('Total: ' + str(len(goldMap))) + '\n'))
return (goldMap, predictionMap)
|
def bleuFromMaps(m1, m2):
score = ([0] * 5)
num = 0.0
for key in m1:
if (key in m2):
bl = bleu(m1[key], m2[key][0])
score = [(score[i] + bl[i]) for i in range(0, len(bl))]
num += 1
return [((s * 100.0) / num) for s in score]
|
def add_args(parser):
parser.add_argument('--task', type=str, required=True, choices=['summarize', 'concode', 'translate', 'refine', 'defect', 'clone', 'multi_task', 'refine-concrete', 'refine-commit', 'translate-500', 'eval_pretrain'])
parser.add_argument('--sub_task', type=str, default='none')
parser.add_argument('--lang', type=str, default='')
parser.add_argument('--eval_task', type=str, default='')
parser.add_argument('--model_type', default='codet5', type=str, choices=['roberta', 'bart', 'codet5'])
parser.add_argument('--add_lang_ids', action='store_true')
parser.add_argument('--data_num', default=(- 1), type=int)
parser.add_argument('--start_epoch', default=0, type=int)
parser.add_argument('--num_train_epochs', default=100, type=int)
parser.add_argument('--patience', default=5, type=int)
parser.add_argument('--cache_path', type=str, required=True)
parser.add_argument('--summary_dir', type=str, required=True)
parser.add_argument('--data_dir', type=str, required=True)
parser.add_argument('--res_dir', type=str, required=True)
parser.add_argument('--res_fn', type=str, default='')
parser.add_argument('--add_task_prefix', action='store_true', help='Whether to add task prefix for t5 and codet5')
parser.add_argument('--save_last_checkpoints', action='store_true')
parser.add_argument('--always_save_model', action='store_true')
parser.add_argument('--do_eval_bleu', action='store_true', help='Whether to evaluate bleu on dev set.')
parser.add_argument('--zero_shot', action='store_true')
parser.add_argument('--model_name_or_path', default='roberta-base', type=str, help='Path to pre-trained model: e.g. roberta-base')
parser.add_argument('--output_dir', default=None, type=str, required=True, help='The output directory where the model predictions and checkpoints will be written.')
parser.add_argument('--load_model_path', default=None, type=str, help='Path to trained model: Should contain the .bin files')
parser.add_argument('--train_filename', default=None, type=str, help='The train filename. Should contain the .jsonl files for this task.')
parser.add_argument('--dev_filename', default=None, type=str, help='The dev filename. Should contain the .jsonl files for this task.')
parser.add_argument('--test_filename', default=None, type=str, help='The test filename. Should contain the .jsonl files for this task.')
parser.add_argument('--config_name', default='', type=str, help='Pretrained config name or path if not the same as model_name')
parser.add_argument('--tokenizer_name', default='roberta-base', type=str, help='Pretrained tokenizer name or path if not the same as model_name')
parser.add_argument('--max_source_length', default=64, type=int, help='The maximum total source sequence length after tokenization. Sequences longer than this will be truncated, sequences shorter will be padded.')
parser.add_argument('--max_target_length', default=32, type=int, help='The maximum total target sequence length after tokenization. Sequences longer than this will be truncated, sequences shorter will be padded.')
parser.add_argument('--do_train', action='store_true', help='Whether to run eval on the train set.')
parser.add_argument('--do_eval', action='store_true', help='Whether to run eval on the dev set.')
parser.add_argument('--do_test', action='store_true', help='Whether to run eval on the dev set.')
parser.add_argument('--do_lower_case', action='store_true', help='Set this flag if you are using an uncased model.')
parser.add_argument('--no_cuda', action='store_true', help='Avoid using CUDA when available')
parser.add_argument('--train_batch_size', default=8, type=int, help='Batch size per GPU/CPU for training.')
parser.add_argument('--eval_batch_size', default=8, type=int, help='Batch size per GPU/CPU for evaluation.')
parser.add_argument('--gradient_accumulation_steps', type=int, default=8, help='Number of updates steps to accumulate before performing a backward/update pass.')
parser.add_argument('--learning_rate', default=5e-05, type=float, help='The initial learning rate for Adam.')
parser.add_argument('--beam_size', default=10, type=int, help='beam size for beam search')
parser.add_argument('--weight_decay', default=0.0, type=float, help='Weight deay if we apply some.')
parser.add_argument('--adam_epsilon', default=1e-08, type=float, help='Epsilon for Adam optimizer.')
parser.add_argument('--max_grad_norm', default=1.0, type=float, help='Max gradient norm.')
parser.add_argument('--save_steps', default=(- 1), type=int)
parser.add_argument('--log_steps', default=(- 1), type=int)
parser.add_argument('--max_steps', default=(- 1), type=int, help='If > 0: set total number of training steps to perform. Override num_train_epochs.')
parser.add_argument('--eval_steps', default=(- 1), type=int, help='')
parser.add_argument('--train_steps', default=(- 1), type=int, help='')
parser.add_argument('--warmup_steps', default=100, type=int, help='Linear warmup over warmup_steps.')
parser.add_argument('--local_rank', type=int, default=(- 1), help='For distributed training: local_rank')
parser.add_argument('--seed', type=int, default=1234, help='random seed for initialization')
args = parser.parse_args()
if (args.task in ['summarize']):
args.lang = args.sub_task
elif (args.task in ['refine', 'concode', 'clone']):
args.lang = 'java'
elif (args.task == 'defect'):
args.lang = 'c'
elif (args.task == 'translate'):
args.lang = ('c_sharp' if (args.sub_task == 'java-cs') else 'java')
return args
|
def set_dist(args):
if ((args.local_rank == (- 1)) or args.no_cuda):
device = torch.device(('cuda' if (torch.cuda.is_available() and (not args.no_cuda)) else 'cpu'))
args.n_gpu = torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank)
device = torch.device('cuda', args.local_rank)
torch.distributed.init_process_group(backend='nccl')
args.n_gpu = 1
cpu_cont = multiprocessing.cpu_count()
logger.warning('Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, cpu count: %d', args.local_rank, device, args.n_gpu, bool((args.local_rank != (- 1))), cpu_cont)
args.device = device
args.cpu_cont = cpu_cont
|
def set_seed(args):
'set random seed.'
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if (args.n_gpu > 0):
torch.cuda.manual_seed_all(args.seed)
|
@dataclass
class ModelArguments():
'\n Arguments pertaining to which model/config/tokenizer we are going to fine-tune from.\n '
model_name_or_path: str = field(metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'})
config_name: Optional[str] = field(default=None, metadata={'help': 'Pretrained config name or path if not the same as model_name'})
tokenizer_name: Optional[str] = field(default=None, metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'})
cache_dir: Optional[str] = field(default=None, metadata={'help': 'Where to store the pretrained models downloaded from huggingface.co'})
use_fast_tokenizer: bool = field(default=True, metadata={'help': 'Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'})
model_revision: str = field(default='main', metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'})
use_auth_token: bool = field(default=False, metadata={'help': 'Will use the token generated when running `transformers-cli login` (necessary to use this script with private models).'})
|
@dataclass
class DataTrainingArguments():
'\n Arguments pertaining to what data we are going to input our model for training and eval.\n '
source_lang: str = field(default=None, metadata={'help': 'Source language id for translation.'})
target_lang: str = field(default=None, metadata={'help': 'Target language id for translation.'})
dataset_name: Optional[str] = field(default=None, metadata={'help': 'The name of the dataset to use (via the datasets library).'})
dataset_config_name: Optional[str] = field(default=None, metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'})
train_file: Optional[str] = field(default=None, metadata={'help': 'The input training data file (a jsonlines).'})
validation_file: Optional[str] = field(default=None, metadata={'help': 'An optional input evaluation data file to evaluate the metrics (sacreblue) on a jsonlines file.'})
test_file: Optional[str] = field(default=None, metadata={'help': 'An optional input test data file to evaluate the metrics (sacreblue) on a jsonlines file.'})
overwrite_cache: bool = field(default=False, metadata={'help': 'Overwrite the cached training and evaluation sets'})
preprocessing_num_workers: Optional[int] = field(default=None, metadata={'help': 'The number of processes to use for the preprocessing.'})
max_source_length: Optional[int] = field(default=1024, metadata={'help': 'The maximum total input sequence length after tokenization. Sequences longer than this will be truncated, sequences shorter will be padded.'})
max_target_length: Optional[int] = field(default=128, metadata={'help': 'The maximum total sequence length for target text after tokenization. Sequences longer than this will be truncated, sequences shorter will be padded.'})
val_max_target_length: Optional[int] = field(default=None, metadata={'help': 'The maximum total sequence length for validation target text after tokenization. Sequences longer than this will be truncated, sequences shorter will be padded. Will default to `max_target_length`.This argument is also used to override the ``max_length`` param of ``model.generate``, which is used during ``evaluate`` and ``predict``.'})
pad_to_max_length: bool = field(default=False, metadata={'help': 'Whether to pad all samples to model maximum sentence length. If False, will pad the samples dynamically when batching to the maximum length in the batch. More efficient on GPU but very bad for TPU.'})
max_train_samples: Optional[int] = field(default=None, metadata={'help': 'For debugging purposes or quicker training, truncate the number of training examples to this value if set.'})
max_eval_samples: Optional[int] = field(default=None, metadata={'help': 'For debugging purposes or quicker training, truncate the number of evaluation examples to this value if set.'})
max_predict_samples: Optional[int] = field(default=None, metadata={'help': 'For debugging purposes or quicker training, truncate the number of prediction examples to this value if set.'})
num_beams: Optional[int] = field(default=None, metadata={'help': 'Number of beams to use for evaluation. This argument will be passed to ``model.generate``, which is used during ``evaluate`` and ``predict``.'})
ignore_pad_token_for_loss: bool = field(default=True, metadata={'help': 'Whether to ignore the tokens corresponding to padded labels in the loss computation or not.'})
source_prefix: Optional[str] = field(default=None, metadata={'help': 'A prefix to add before every source text (useful for T5 models).'})
forced_bos_token: Optional[str] = field(default=None, metadata={'help': 'The token to force as the first generated token after the :obj:`decoder_start_token_id`.Useful for multilingual models like :doc:`mBART <../model_doc/mbart>` where the first generated token needs to be the target language token.(Usually it is the target language token)'})
def __post_init__(self):
if ((self.dataset_name is None) and (self.train_file is None) and (self.validation_file is None)):
raise ValueError('Need either a dataset name or a training/validation file.')
elif ((self.source_lang is None) or (self.target_lang is None)):
raise ValueError('Need to specify the source language and the target language.')
valid_extensions = ['json', 'jsonl']
if (self.train_file is not None):
extension = self.train_file.split('.')[(- 1)]
assert (extension in valid_extensions), '`train_file` should be a jsonlines file.'
if (self.validation_file is not None):
extension = self.validation_file.split('.')[(- 1)]
assert (extension in valid_extensions), '`validation_file` should be a jsonlines file.'
if (self.val_max_target_length is None):
self.val_max_target_length = self.max_target_length
|
def main():
parser = HfArgumentParser((ModelArguments, DataTrainingArguments, Seq2SeqTrainingArguments))
if ((len(sys.argv) == 2) and sys.argv[1].endswith('.json')):
(model_args, data_args, training_args) = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
else:
(model_args, data_args, training_args) = parser.parse_args_into_dataclasses()
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', handlers=[logging.StreamHandler(sys.stdout)])
log_level = training_args.get_process_log_level()
logger.setLevel(log_level)
datasets.utils.logging.set_verbosity(log_level)
transformers.utils.logging.set_verbosity(log_level)
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.warning((f'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}' + f'distributed training: {bool((training_args.local_rank != (- 1)))}, 16-bits training: {training_args.fp16}'))
logger.info(f'Training/evaluation parameters {training_args}')
if ((data_args.source_prefix is None) and (model_args.model_name_or_path in ['t5-small', 't5-base', 't5-large', 't5-3b', 't5-11b'])):
logger.warning("You're running a t5 model but didn't provide a source prefix, which is expected, e.g. with `--source_prefix 'translate English to German: ' `")
last_checkpoint = None
if (os.path.isdir(training_args.output_dir) and training_args.do_train and (not training_args.overwrite_output_dir)):
last_checkpoint = get_last_checkpoint(training_args.output_dir)
if ((last_checkpoint is None) and (len(os.listdir(training_args.output_dir)) > 0)):
raise ValueError(f'Output directory ({training_args.output_dir}) already exists and is not empty. Use --overwrite_output_dir to overcome.')
elif ((last_checkpoint is not None) and (training_args.resume_from_checkpoint is None)):
logger.info(f'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change the `--output_dir` or add `--overwrite_output_dir` to train from scratch.')
set_seed(training_args.seed)
if (data_args.dataset_name is not None):
raw_datasets = load_dataset(data_args.dataset_name, data_args.dataset_config_name, cache_dir=model_args.cache_dir)
else:
data_files = {}
if (data_args.train_file is not None):
data_files['train'] = data_args.train_file
extension = data_args.train_file.split('.')[(- 1)]
if (data_args.validation_file is not None):
data_files['validation'] = data_args.validation_file
extension = data_args.validation_file.split('.')[(- 1)]
if (data_args.test_file is not None):
data_files['test'] = data_args.test_file
extension = data_args.test_file.split('.')[(- 1)]
raw_datasets = load_dataset(extension, data_files=data_files, cache_dir=model_args.cache_dir)
config = AutoConfig.from_pretrained((model_args.config_name if model_args.config_name else model_args.model_name_or_path), cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=(True if model_args.use_auth_token else None))
tokenizer = AutoTokenizer.from_pretrained((model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path), cache_dir=model_args.cache_dir, use_fast=model_args.use_fast_tokenizer, revision=model_args.model_revision, use_auth_token=(True if model_args.use_auth_token else None))
model = AutoModelForSeq2SeqLM.from_pretrained(model_args.model_name_or_path, from_tf=bool(('.ckpt' in model_args.model_name_or_path)), config=config, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=(True if model_args.use_auth_token else None))
model.resize_token_embeddings(len(tokenizer))
if ((model.config.decoder_start_token_id is None) and isinstance(tokenizer, (MBartTokenizer, MBartTokenizerFast))):
if isinstance(tokenizer, MBartTokenizer):
model.config.decoder_start_token_id = tokenizer.lang_code_to_id[data_args.target_lang]
else:
model.config.decoder_start_token_id = tokenizer.convert_tokens_to_ids(data_args.target_lang)
if (model.config.decoder_start_token_id is None):
raise ValueError('Make sure that `config.decoder_start_token_id` is correctly defined')
prefix = (data_args.source_prefix if (data_args.source_prefix is not None) else '')
if training_args.do_train:
column_names = raw_datasets['train'].column_names
elif training_args.do_eval:
column_names = raw_datasets['validation'].column_names
elif training_args.do_predict:
column_names = raw_datasets['test'].column_names
else:
logger.info('There is nothing to do. Please pass `do_train`, `do_eval` and/or `do_predict`.')
return
if isinstance(tokenizer, tuple(MULTILINGUAL_TOKENIZERS)):
assert ((data_args.target_lang is not None) and (data_args.source_lang is not None)), f'{tokenizer.__class__.__name__} is a multilingual tokenizer which requires --source_lang and --target_lang arguments.'
tokenizer.src_lang = data_args.source_lang
tokenizer.tgt_lang = data_args.target_lang
forced_bos_token_id = (tokenizer.lang_code_to_id[data_args.forced_bos_token] if (data_args.forced_bos_token is not None) else None)
model.config.forced_bos_token_id = forced_bos_token_id
source_lang = data_args.source_lang.split('_')[0]
target_lang = data_args.target_lang.split('_')[0]
max_target_length = data_args.max_target_length
padding = ('max_length' if data_args.pad_to_max_length else False)
if ((training_args.label_smoothing_factor > 0) and (not hasattr(model, 'prepare_decoder_input_ids_from_labels'))):
logger.warning(f'label_smoothing is enabled but the `prepare_decoder_input_ids_from_labels` method is not defined for`{model.__class__.__name__}`. This will lead to loss being calculated twice and will take up more memory')
def preprocess_function(examples):
inputs = [ex[source_lang] for ex in examples['translation']]
targets = [ex[target_lang] for ex in examples['translation']]
inputs = [(prefix + inp) for inp in inputs]
model_inputs = tokenizer(inputs, max_length=data_args.max_source_length, padding=padding, truncation=True)
with tokenizer.as_target_tokenizer():
labels = tokenizer(targets, max_length=max_target_length, padding=padding, truncation=True)
if ((padding == 'max_length') and data_args.ignore_pad_token_for_loss):
labels['input_ids'] = [[(l if (l != tokenizer.pad_token_id) else (- 100)) for l in label] for label in labels['input_ids']]
model_inputs['labels'] = labels['input_ids']
return model_inputs
if training_args.do_train:
if ('train' not in raw_datasets):
raise ValueError('--do_train requires a train dataset')
train_dataset = raw_datasets['train']
if (data_args.max_train_samples is not None):
train_dataset = train_dataset.select(range(data_args.max_train_samples))
with training_args.main_process_first(desc='train dataset map pre-processing'):
train_dataset = train_dataset.map(preprocess_function, batched=True, num_proc=data_args.preprocessing_num_workers, remove_columns=column_names, load_from_cache_file=(not data_args.overwrite_cache), desc='Running tokenizer on train dataset')
if training_args.do_eval:
max_target_length = data_args.val_max_target_length
if ('validation' not in raw_datasets):
raise ValueError('--do_eval requires a validation dataset')
eval_dataset = raw_datasets['validation']
if (data_args.max_eval_samples is not None):
eval_dataset = eval_dataset.select(range(data_args.max_eval_samples))
with training_args.main_process_first(desc='validation dataset map pre-processing'):
eval_dataset = eval_dataset.map(preprocess_function, batched=True, num_proc=data_args.preprocessing_num_workers, remove_columns=column_names, load_from_cache_file=(not data_args.overwrite_cache), desc='Running tokenizer on validation dataset')
if training_args.do_predict:
max_target_length = data_args.val_max_target_length
if ('test' not in raw_datasets):
raise ValueError('--do_predict requires a test dataset')
predict_dataset = raw_datasets['test']
if (data_args.max_predict_samples is not None):
predict_dataset = predict_dataset.select(range(data_args.max_predict_samples))
with training_args.main_process_first(desc='prediction dataset map pre-processing'):
predict_dataset = predict_dataset.map(preprocess_function, batched=True, num_proc=data_args.preprocessing_num_workers, remove_columns=column_names, load_from_cache_file=(not data_args.overwrite_cache), desc='Running tokenizer on prediction dataset')
label_pad_token_id = ((- 100) if data_args.ignore_pad_token_for_loss else tokenizer.pad_token_id)
if data_args.pad_to_max_length:
data_collator = default_data_collator
else:
data_collator = DataCollatorForSeq2Seq(tokenizer, model=model, label_pad_token_id=label_pad_token_id, pad_to_multiple_of=(8 if training_args.fp16 else None))
metric = load_metric('sacrebleu')
def postprocess_text(preds, labels):
preds = [pred.strip() for pred in preds]
labels = [[label.strip()] for label in labels]
return (preds, labels)
def compute_metrics(eval_preds):
(preds, labels) = eval_preds
if isinstance(preds, tuple):
preds = preds[0]
decoded_preds = tokenizer.batch_decode(preds, skip_special_tokens=True)
if data_args.ignore_pad_token_for_loss:
labels = np.where((labels != (- 100)), labels, tokenizer.pad_token_id)
decoded_labels = tokenizer.batch_decode(labels, skip_special_tokens=True)
(decoded_preds, decoded_labels) = postprocess_text(decoded_preds, decoded_labels)
result = metric.compute(predictions=decoded_preds, references=decoded_labels)
result = {'bleu': result['score']}
prediction_lens = [np.count_nonzero((pred != tokenizer.pad_token_id)) for pred in preds]
result['gen_len'] = np.mean(prediction_lens)
result = {k: round(v, 4) for (k, v) in result.items()}
return result
trainer = Seq2SeqTrainer(model=model, args=training_args, train_dataset=(train_dataset if training_args.do_train else None), eval_dataset=(eval_dataset if training_args.do_eval else None), tokenizer=tokenizer, data_collator=data_collator, compute_metrics=(compute_metrics if training_args.predict_with_generate else None))
if training_args.do_train:
checkpoint = None
if (training_args.resume_from_checkpoint is not None):
checkpoint = training_args.resume_from_checkpoint
elif (last_checkpoint is not None):
checkpoint = last_checkpoint
train_result = trainer.train(resume_from_checkpoint=checkpoint)
trainer.save_model()
metrics = train_result.metrics
max_train_samples = (data_args.max_train_samples if (data_args.max_train_samples is not None) else len(train_dataset))
metrics['train_samples'] = min(max_train_samples, len(train_dataset))
trainer.log_metrics('train', metrics)
trainer.save_metrics('train', metrics)
trainer.save_state()
results = {}
max_length = (training_args.generation_max_length if (training_args.generation_max_length is not None) else data_args.val_max_target_length)
num_beams = (data_args.num_beams if (data_args.num_beams is not None) else training_args.generation_num_beams)
if training_args.do_eval:
logger.info('*** Evaluate ***')
metrics = trainer.evaluate(max_length=max_length, num_beams=num_beams, metric_key_prefix='eval')
max_eval_samples = (data_args.max_eval_samples if (data_args.max_eval_samples is not None) else len(eval_dataset))
metrics['eval_samples'] = min(max_eval_samples, len(eval_dataset))
trainer.log_metrics('eval', metrics)
trainer.save_metrics('eval', metrics)
if training_args.do_predict:
logger.info('*** Predict ***')
predict_results = trainer.predict(predict_dataset, metric_key_prefix='predict', max_length=max_length, num_beams=num_beams)
metrics = predict_results.metrics
max_predict_samples = (data_args.max_predict_samples if (data_args.max_predict_samples is not None) else len(predict_dataset))
metrics['predict_samples'] = min(max_predict_samples, len(predict_dataset))
trainer.log_metrics('predict', metrics)
trainer.save_metrics('predict', metrics)
if trainer.is_world_process_zero():
if training_args.predict_with_generate:
predictions = tokenizer.batch_decode(predict_results.predictions, skip_special_tokens=True, clean_up_tokenization_spaces=True)
predictions = [pred.strip() for pred in predictions]
output_prediction_file = os.path.join(training_args.output_dir, 'generated_predictions.txt')
with open(output_prediction_file, 'w', encoding='utf-8') as writer:
writer.write('\n'.join(predictions))
kwargs = {'finetuned_from': model_args.model_name_or_path, 'tasks': 'translation'}
if (data_args.dataset_name is not None):
kwargs['dataset_tags'] = data_args.dataset_name
if (data_args.dataset_config_name is not None):
kwargs['dataset_args'] = data_args.dataset_config_name
kwargs['dataset'] = f'{data_args.dataset_name} {data_args.dataset_config_name}'
else:
kwargs['dataset'] = data_args.dataset_name
languages = [l for l in [data_args.source_lang, data_args.target_lang] if (l is not None)]
if (len(languages) > 0):
kwargs['language'] = languages
if training_args.push_to_hub:
trainer.push_to_hub(**kwargs)
else:
trainer.create_model_card(**kwargs)
return results
|
def _mp_fn(index):
main()
|
def eval_ppl_epoch(args, eval_data, eval_examples, model, tokenizer):
eval_sampler = SequentialSampler(eval_data)
eval_dataloader = DataLoader(eval_data, sampler=eval_sampler, batch_size=args.eval_batch_size, num_workers=4, pin_memory=True)
logger.info((' ' + '***** Running ppl evaluation *****'))
logger.info(' Num examples = %d', len(eval_examples))
logger.info(' Batch size = %d', args.eval_batch_size)
model.eval()
(eval_loss, batch_num) = (0, 0)
for batch in tqdm(eval_dataloader, total=len(eval_dataloader), desc='Eval ppl'):
batch = tuple((t.to(args.device) for t in batch))
(source_ids, target_ids) = batch
source_mask = source_ids.ne(tokenizer.pad_token_id)
target_mask = target_ids.ne(tokenizer.pad_token_id)
with torch.no_grad():
outputs = model(input_ids=source_ids, attention_mask=source_mask, labels=target_ids, decoder_attention_mask=target_mask)
loss = outputs.loss
eval_loss += loss.item()
batch_num += 1
eval_loss = (eval_loss / batch_num)
eval_ppl = round(np.exp(eval_loss), 5)
return eval_ppl
|
def eval_bleu_per_example(args, eval_data, eval_examples, model, tokenizer, split_tag, criteria):
assert (args.task != 'summarize')
logger.info(' ***** Running bleu evaluation on {} data*****'.format(split_tag))
logger.info(' Num examples = %d', len(eval_examples))
logger.info(' Batch size = %d', args.eval_batch_size)
eval_sampler = SequentialSampler(eval_data)
eval_dataloader = DataLoader(eval_data, sampler=eval_sampler, batch_size=args.eval_batch_size, num_workers=4, pin_memory=True)
model.eval()
pred_ids = []
for batch in tqdm(eval_dataloader, total=len(eval_dataloader), desc='Eval bleu for {} set'.format(split_tag)):
source_ids = batch[0].to(args.device)
source_mask = source_ids.ne(tokenizer.pad_token_id)
with torch.no_grad():
preds = model.generate(source_ids, attention_mask=source_mask, use_cache=True, num_beams=args.beam_size, max_length=args.max_target_length)
top_preds = list(preds.cpu().numpy())
pred_ids.extend(top_preds)
pred_nls = [tokenizer.decode(id, skip_special_tokens=True, clean_up_tokenization_spaces=False) for id in pred_ids]
output_fn = os.path.join(args.res_dir, 'test_{}.output'.format(criteria))
gold_fn = os.path.join(args.res_dir, 'test_{}.gold'.format(criteria))
src_fn = os.path.join(args.res_dir, 'test_{}.src'.format(criteria))
(dev_accs, predictions) = ([], [])
results = []
with open(output_fn, 'w') as f, open(gold_fn, 'w') as f1, open(src_fn, 'w') as f2:
for (pred_nl, gold) in zip(pred_nls, eval_examples):
assert isinstance(gold, Example)
dev_accs.append((pred_nl.strip() == gold.target.strip()))
metadata = gold.meta_data
f.write((pred_nl.strip() + '\n'))
f1.write((gold.target.strip() + '\n'))
f2.write((gold.source.strip() + '\n'))
example_result = calc_code_bleu.evaluate_per_example(reference=gold.target.strip(), hypothesis=pred_nl.strip(), lang=metadata['lang'])
for key in example_result:
metadata[key] = example_result[key]
results.append(metadata)
return results
|
def eval_bleu_epoch(args, eval_data, eval_examples, model, tokenizer, split_tag, criteria):
logger.info(' ***** Running bleu evaluation on {} data*****'.format(split_tag))
logger.info(' Num examples = %d', len(eval_examples))
logger.info(' Batch size = %d', args.eval_batch_size)
eval_sampler = SequentialSampler(eval_data)
eval_dataloader = DataLoader(eval_data, sampler=eval_sampler, batch_size=args.eval_batch_size, num_workers=4, pin_memory=True)
model.eval()
pred_ids = []
for batch in tqdm(eval_dataloader, total=len(eval_dataloader), desc='Eval bleu for {} set'.format(split_tag)):
source_ids = batch[0].to(args.device)
source_mask = source_ids.ne(tokenizer.pad_token_id)
with torch.no_grad():
preds = model.generate(source_ids, attention_mask=source_mask, use_cache=True, num_beams=args.beam_size, max_length=args.max_target_length)
top_preds = list(preds.cpu().numpy())
pred_ids.extend(top_preds)
pred_nls = [tokenizer.decode(id, skip_special_tokens=True, clean_up_tokenization_spaces=False) for id in pred_ids]
output_fn = os.path.join(args.res_dir, f'{split_tag}_{criteria}.output')
gold_fn = os.path.join(args.res_dir, f'{split_tag}_{criteria}.gold')
src_fn = os.path.join(args.res_dir, f'{split_tag}_{criteria}.src')
(dev_accs, predictions) = ([], [])
with open(output_fn, 'w') as f, open(gold_fn, 'w') as f1, open(src_fn, 'w') as f2:
for (pred_nl, gold) in zip(pred_nls, eval_examples):
dev_accs.append((pred_nl.strip() == gold.target.strip()))
if (args.task in ['summarize']):
predictions.append(((str(gold.idx) + '\t') + pred_nl))
f.write((((str(gold.idx) + '\t') + pred_nl.strip()) + '\n'))
f1.write((((str(gold.idx) + '\t') + gold.target.strip()) + '\n'))
f2.write((((str(gold.idx) + '\t') + gold.source.strip()) + '\n'))
else:
f.write((pred_nl.strip() + '\n'))
f1.write((gold.target.strip() + '\n'))
f2.write((gold.source.strip() + '\n'))
codebleu = None
bleu = round(_bleu(gold_fn, output_fn), 2)
if (args.task not in ['summarize']):
if ('refine' in args.task):
args.lang = 'java'
codebleu = calc_code_bleu.get_codebleu(gold_fn, output_fn, args.lang)
result = {'em': (np.mean(dev_accs) * 100), 'bleu': bleu}
if (codebleu is not None):
result['codebleu'] = (codebleu * 100)
logger.info('***** Eval results *****')
for key in sorted(result.keys()):
logger.info(' %s = %s', key, str(round(result[key], 4)))
return result
|
def add_language_to_args(args):
if ((args.task == 'concode') or ('refine' in args.task)):
args.lang = 'java'
elif ('translate' in args.task):
if (args.sub_task == 'java-cs'):
args.lang = 'c_sharp'
else:
args.lang = 'java'
pass
|
def main():
parser = argparse.ArgumentParser()
args = add_args(parser)
add_language_to_args(args)
logger.info(args)
t0 = time.time()
if args.zero_shot:
args.output_dir = (args.output_dir + '-zero-shot')
if (not os.path.exists(args.output_dir)):
os.makedirs(args.output_dir, exist_ok=True)
args.num_train_epochs = 0
set_dist(args)
set_seed(args)
(config, model, tokenizer) = build_or_load_gen_model(args)
model.to(args.device)
if (args.n_gpu > 1):
model = torch.nn.DataParallel(model)
pool = multiprocessing.Pool(args.cpu_cont)
(args.train_filename, args.dev_filename, args.test_filename) = get_filenames(args.data_dir, args.task, args.sub_task)
fa = open(os.path.join(args.output_dir, 'summary.log'), 'a+')
args.eval_batch_size = int((args.eval_batch_size / args.gradient_accumulation_steps))
args.train_batch_size = int((args.train_batch_size / args.gradient_accumulation_steps))
if args.do_train:
model_trained = True
if ((args.local_rank in [(- 1), 0]) and (args.data_num == (- 1))):
summary_fn = '{}/{}'.format(args.summary_dir, '/'.join(args.output_dir.split('/')[1:]))
tb_writer = SummaryWriter(summary_fn)
(train_examples, train_data) = load_and_cache_gen_data(args, args.train_filename, pool, tokenizer, 'train')
train_sampler = (RandomSampler(train_data) if (args.local_rank == (- 1)) else DistributedSampler(train_data))
train_dataloader = DataLoader(train_data, sampler=train_sampler, batch_size=args.train_batch_size, num_workers=4, pin_memory=True)
no_decay = ['bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [{'params': [p for (n, p) in model.named_parameters() if (not any(((nd in n) for nd in no_decay)))], 'weight_decay': args.weight_decay}, {'params': [p for (n, p) in model.named_parameters() if any(((nd in n) for nd in no_decay))], 'weight_decay': 0.0}]
optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)
num_train_optimization_steps = (args.num_train_epochs * len(train_dataloader))
scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=args.warmup_steps, num_training_steps=num_train_optimization_steps)
train_example_num = len(train_data)
logger.info('***** Running training *****')
logger.info(' Num examples = %d', train_example_num)
logger.info(' Batch size = %d', args.train_batch_size)
logger.info(' Batch num = %d', math.ceil((train_example_num / args.train_batch_size)))
logger.info(' Num epoch = %d', args.num_train_epochs)
dev_dataset = {}
(global_step, best_bleu_em, best_ppl) = (0, (- 1), 1000000.0)
(not_loss_dec_cnt, not_bleu_em_inc_cnt) = (0, (0 if args.do_eval_bleu else 1000000.0))
for cur_epoch in range(args.start_epoch, int(args.num_train_epochs)):
bar = tqdm(train_dataloader, total=len(train_dataloader), desc='Training')
(nb_tr_examples, nb_tr_steps, tr_loss) = (0, 0, 0)
model.train()
for (step, batch) in enumerate(bar):
batch = tuple((t.to(args.device) for t in batch))
(source_ids, target_ids) = batch
source_mask = source_ids.ne(tokenizer.pad_token_id)
target_mask = target_ids.ne(tokenizer.pad_token_id)
outputs = model(input_ids=source_ids, attention_mask=source_mask, labels=target_ids, decoder_attention_mask=target_mask)
loss = outputs.loss
if (args.n_gpu > 1):
loss = loss.mean()
if (args.gradient_accumulation_steps > 1):
loss = (loss / args.gradient_accumulation_steps)
tr_loss += loss.item()
nb_tr_examples += source_ids.size(0)
nb_tr_steps += 1
loss.backward()
if ((nb_tr_steps % args.gradient_accumulation_steps) == 0):
optimizer.step()
optimizer.zero_grad()
scheduler.step()
global_step += 1
train_loss = round(((tr_loss * args.gradient_accumulation_steps) / (nb_tr_steps + 1)), 4)
bar.set_description('[{}] Train loss {}'.format(cur_epoch, round(train_loss, 3)))
if args.do_eval:
if ('dev_loss' in dev_dataset):
(eval_examples, eval_data) = dev_dataset['dev_loss']
else:
(eval_examples, eval_data) = load_and_cache_gen_data(args, args.dev_filename, pool, tokenizer, 'dev')
dev_dataset['dev_loss'] = (eval_examples, eval_data)
eval_ppl = eval_ppl_epoch(args, eval_data, eval_examples, model, tokenizer)
result = {'epoch': cur_epoch, 'global_step': global_step, 'eval_ppl': eval_ppl}
for key in sorted(result.keys()):
logger.info(' %s = %s', key, str(result[key]))
logger.info((' ' + ('*' * 20)))
if (args.data_num == (- 1)):
tb_writer.add_scalar('dev_ppl', eval_ppl, cur_epoch)
if args.save_last_checkpoints:
last_output_dir = os.path.join(args.output_dir, f'checkpoint-epoch-{cur_epoch}')
if (not os.path.exists(last_output_dir)):
os.makedirs(last_output_dir)
model_to_save = (model.module if hasattr(model, 'module') else model)
output_model_file = os.path.join(last_output_dir, 'pytorch_model.bin')
torch.save(model_to_save.state_dict(), output_model_file)
logger.info('Save the last model into %s', output_model_file)
if (eval_ppl < best_ppl):
not_loss_dec_cnt = 0
logger.info(' Best ppl:%s', eval_ppl)
logger.info((' ' + ('*' * 20)))
fa.write(('[%d] Best ppl changed into %.4f\n' % (cur_epoch, eval_ppl)))
best_ppl = eval_ppl
output_dir = os.path.join(args.output_dir, 'checkpoint-best-ppl')
if (not os.path.exists(output_dir)):
os.makedirs(output_dir)
if args.always_save_model:
model_to_save = (model.module if hasattr(model, 'module') else model)
output_model_file = os.path.join(output_dir, 'pytorch_model.bin')
torch.save(model_to_save.state_dict(), output_model_file)
logger.info('Save the best ppl model into %s', output_model_file)
else:
not_loss_dec_cnt += 1
logger.info('Ppl does not decrease for %d epochs', not_loss_dec_cnt)
logger.info('***** CUDA.empty_cache() *****')
torch.cuda.empty_cache()
if args.do_eval_bleu:
(eval_examples, eval_data) = load_and_cache_gen_data(args, args.dev_filename, pool, tokenizer, 'dev', only_src=True, is_sample=True)
result = eval_bleu_epoch(args, eval_data, eval_examples, model, tokenizer, 'dev', ('e%d' % cur_epoch))
(dev_bleu, dev_em) = (result['bleu'], result['em'])
dev_bleu_em = ((0.3 * dev_bleu) + (0.7 * dev_em))
if (args.data_num == (- 1)):
tb_writer.add_scalar('dev_bleu_em', dev_bleu_em, cur_epoch)
if (dev_bleu_em > best_bleu_em):
not_bleu_em_inc_cnt = 0
logger.info(' [%d] Best bleu+em: %.2f (bleu: %.2f, em: %.2f)', cur_epoch, dev_bleu_em, dev_bleu, dev_em)
logger.info((' ' + ('*' * 20)))
best_bleu_em = dev_bleu_em
fa.write(('[%d] Best bleu+em changed into %.2f (bleu: %.2f, em: %.2f)\n' % (cur_epoch, best_bleu_em, dev_bleu, dev_em)))
output_dir = os.path.join(args.output_dir, 'checkpoint-best-bleu')
if (not os.path.exists(output_dir)):
os.makedirs(output_dir)
if ((args.data_num == (- 1)) or args.always_save_model):
model_to_save = (model.module if hasattr(model, 'module') else model)
output_model_file = os.path.join(output_dir, 'pytorch_model.bin')
torch.save(model_to_save.state_dict(), output_model_file)
logger.info('[%d] Save the best bleu model into %s', cur_epoch, output_model_file)
else:
not_bleu_em_inc_cnt += 1
logger.info('[%d] Bleu does not increase for %d epochs', cur_epoch, not_bleu_em_inc_cnt)
fa.write(('[%d] Best bleu+em (%.2f) does not drop changed for %d epochs, cur bleu+em: %.2f (bleu: %.2f, em: %.2f)\n' % (cur_epoch, best_bleu_em, not_bleu_em_inc_cnt, dev_bleu_em, dev_bleu, dev_em)))
if all([(x > args.patience) for x in [not_bleu_em_inc_cnt]]):
stop_early_str = ('[%d] Early stop as not_bleu_em_inc_cnt=%d, and not_loss_dec_cnt=%d\n' % (cur_epoch, not_bleu_em_inc_cnt, not_loss_dec_cnt))
logger.info(stop_early_str)
fa.write(stop_early_str)
break
logger.info('***** CUDA.empty_cache() *****')
torch.cuda.empty_cache()
if ((args.local_rank in [(- 1), 0]) and (args.data_num == (- 1))):
tb_writer.close()
logger.info('Finish training and take %s', get_elapse_time(t0))
if (args.do_eval or args.do_eval):
if (args.load_model_path is None):
print('Either train the model. Or supply the path of the trained model!')
exit()
data_part = []
if args.do_eval:
data_part.append('dev')
if args.do_test:
data_part.append('test')
for part in data_part:
logger.info(('=' * 100))
logger.info((' ' + f'***** Testing on {part} dataset *****'))
logger.info(' Batch size = %d', args.eval_batch_size)
(eval_examples, eval_data) = load_and_cache_gen_data(args, args.test_filename, pool, tokenizer, part, only_src=True, is_sample=False)
result = eval_bleu_epoch(args, eval_data, eval_examples, model, tokenizer, 'test', f'{args.task}')
(test_bleu, test_em) = (result['bleu'], result['em'])
test_codebleu = (result['codebleu'] if ('codebleu' in result) else 0)
result_str = ('bleu-4: %.2f, em: %.4f, codebleu: %.4f\n' % (test_bleu, test_em, test_codebleu))
logger.info(result_str)
fa.write(result_str)
logger.info(('=' * 100))
logger.info('Finish and take {}'.format(get_elapse_time(t0)))
fa.write('Finish and take {}'.format(get_elapse_time(t0)))
fa.close()
|
def add_lang_by_task(target_str, task, sub_task):
if (task == 'summarize'):
target_str = ('<en> ' + target_str)
elif (task == 'refine'):
target_str = ('<java> ' + target_str)
elif (task == 'translate'):
if (sub_task == 'java-cs'):
target_str = ('<c_sharp> ' + target_str)
else:
target_str = ('<java> ' + target_str)
elif (task == 'concode'):
target_str = ('<java> ' + target_str)
elif (task == 'defect'):
target_str = target_str
return target_str
|
def convert_examples_to_features(item):
(example, example_index, tokenizer, args, stage) = item
if ((args.model_type in ['t5', 'codet5']) and args.add_task_prefix):
if (args.sub_task != 'none'):
source_str = '{} {}: {}'.format(args.task, args.sub_task, example.source)
else:
source_str = '{}: {}'.format(args.task, example.source)
else:
source_str = example.source
source_str = source_str.replace('</s>', '<unk>')
source_ids = tokenizer.encode(source_str, max_length=args.max_source_length, padding='max_length', truncation=True)
assert (source_ids.count(tokenizer.eos_token_id) == 1)
if (stage == 'test'):
target_ids = []
else:
target_str = example.target
if args.add_lang_ids:
target_str = add_lang_by_task(example.target, args.task, args.sub_task)
if (args.task in ['defect', 'clone']):
if (target_str == 0):
target_str = 'false'
elif (target_str == 1):
target_str = 'true'
else:
raise NameError
target_str = target_str.replace('</s>', '<unk>')
target_ids = tokenizer.encode(target_str, max_length=args.max_target_length, padding='max_length', truncation=True)
assert (target_ids.count(tokenizer.eos_token_id) == 1)
return InputFeatures(example_index, source_ids, target_ids, url=example.url)
|
def convert_clone_examples_to_features(item):
(example, example_index, tokenizer, args) = item
if ((args.model_type in ['t5', 'codet5']) and args.add_task_prefix):
source_str = '{}: {}'.format(args.task, example.source)
target_str = '{}: {}'.format(args.task, example.target)
else:
source_str = example.source
target_str = example.target
code1 = tokenizer.encode(source_str, max_length=args.max_source_length, padding='max_length', truncation=True)
code2 = tokenizer.encode(target_str, max_length=args.max_source_length, padding='max_length', truncation=True)
source_ids = (code1 + code2)
return CloneInputFeatures(example_index, source_ids, example.label, example.url1, example.url2)
|
def convert_defect_examples_to_features(item):
(example, example_index, tokenizer, args) = item
if ((args.model_type in ['t5', 'codet5']) and args.add_task_prefix):
source_str = '{}: {}'.format(args.task, example.source)
else:
source_str = example.source
code = tokenizer.encode(source_str, max_length=args.max_source_length, padding='max_length', truncation=True)
return DefectInputFeatures(example_index, code, example.target)
|
class CloneInputFeatures(object):
'A single training/test features for a example.'
def __init__(self, example_id, source_ids, label, url1, url2):
self.example_id = example_id
self.source_ids = source_ids
self.label = label
self.url1 = url1
self.url2 = url2
|
class DefectInputFeatures(object):
'A single training/test features for a example.'
def __init__(self, example_id, source_ids, label):
self.example_id = example_id
self.source_ids = source_ids
self.label = label
|
class InputFeatures(object):
'A single training/test features for a example.'
def __init__(self, example_id, source_ids, target_ids, url=None):
self.example_id = example_id
self.source_ids = source_ids
self.target_ids = target_ids
self.url = url
|
class Example(object):
'A single training/test example.'
def __init__(self, idx, source, target, url=None, task='', sub_task='', meta_data=None):
self.idx = idx
self.source = source
self.target = target
self.url = url
self.task = task
self.sub_task = sub_task
self.meta_data = meta_data
|
class CloneExample(object):
'A single training/test example.'
def __init__(self, code1, code2, label, url1, url2):
self.source = code1
self.target = code2
self.label = label
self.url1 = url1
self.url2 = url2
|
def read_translate_examples(filename, data_num):
'Read examples from filename.'
examples = []
assert (len(filename.split(',')) == 2)
src_filename = filename.split(',')[0]
trg_filename = filename.split(',')[1]
idx = 0
with open(src_filename) as f1, open(trg_filename) as f2:
for (line1, line2) in zip(f1, f2):
src = line1.strip()
trg = line2.strip()
examples.append(Example(idx=idx, source=src, target=trg))
idx += 1
if (idx == data_num):
break
return examples
|
def read_refine_examples(filename, data_num):
'Read examples from filename.'
examples = []
assert (len(filename.split(',')) == 2)
src_filename = filename.split(',')[0]
trg_filename = filename.split(',')[1]
idx = 0
with open(src_filename) as f1, open(trg_filename) as f2:
for (line1, line2) in zip(f1, f2):
examples.append(Example(idx=idx, source=line1.strip(), target=line2.strip()))
idx += 1
if (idx == data_num):
break
return examples
|
def read_concode_examples(filename, data_num):
'Read examples from filename.'
examples = []
with open(filename) as f:
for (idx, line) in enumerate(f):
x = json.loads(line)
examples.append(Example(idx=idx, source=x['nl'].strip(), target=x['code'].strip()))
idx += 1
if (idx == data_num):
break
return examples
|
def read_summarize_examples(filename, data_num):
'Read examples from filename.'
examples = []
with open(filename, encoding='utf-8') as f:
for (idx, line) in enumerate(f):
line = line.strip()
js = json.loads(line)
if ('idx' not in js):
js['idx'] = idx
code = ' '.join(js['code_tokens']).replace('\n', ' ')
code = ' '.join(code.strip().split())
nl = ' '.join(js['docstring_tokens']).replace('\n', '')
nl = ' '.join(nl.strip().split())
examples.append(Example(idx=idx, source=code, target=nl))
if ((idx + 1) == data_num):
break
return examples
|
def read_defect_examples(filename, data_num):
'Read examples from filename.'
examples = []
with open(filename, encoding='utf-8') as f:
for (idx, line) in enumerate(f):
line = line.strip()
js = json.loads(line)
code = ' '.join(js['func'].split())
examples.append(Example(idx=js['idx'], source=code, target=js['target']))
if ((idx + 1) == data_num):
break
return examples
|
def read_clone_examples(filename, data_num):
'Read examples from filename.'
index_filename = filename
url_to_code = {}
with open(('/'.join(index_filename.split('/')[:(- 1)]) + '/data.jsonl')) as f:
for line in f:
line = line.strip()
js = json.loads(line)
code = ' '.join(js['func'].split())
url_to_code[js['idx']] = code
data = []
with open(index_filename) as f:
idx = 0
for line in f:
line = line.strip()
(url1, url2, label) = line.split('\t')
if ((url1 not in url_to_code) or (url2 not in url_to_code)):
continue
if (label == '0'):
label = 0
else:
label = 1
data.append(CloneExample(url_to_code[url1], url_to_code[url2], label, url1, url2))
idx += 1
if (idx == data_num):
break
return data
|
def read_pretrain_eval_data(pretrain_data_dir):
all_valid_files = [f for f in os.listdir(pretrain_data_dir) if f.endswith('_valid.jsonl')]
languages = [f[:(- 12)] for f in all_valid_files]
print(f'Found Languages : {languages}')
examples_dict = {}
for lang in languages:
fp = open(os.path.join(pretrain_data_dir, (lang + '_valid.jsonl')))
examples = []
for (li, line) in enumerate(fp):
d = json.loads(line.strip())
examples.append(Example(idx=li, source=d['source'], target=d['target'], meta_data={'transformer': d['transformer'], 'lang': lang}))
examples_dict[lang] = examples
return examples_dict
|
def load_pretrain_eval_data(args, pretrain_datadir, pool, tokenizer):
all_lang_examples = read_pretrain_eval_data(pretrain_datadir)
(complete_examples, complete_data) = ([], [])
for lang in all_lang_examples.keys():
examples = all_lang_examples[lang]
tuple_examples = [(example, idx, tokenizer, args, 'valid') for (idx, example) in enumerate(examples)]
features = pool.map(convert_examples_to_features, tqdm(tuple_examples, total=len(tuple_examples)))
all_source_ids = torch.tensor([f.source_ids for f in features], dtype=torch.long)
all_target_ids = torch.tensor([f.target_ids for f in features], dtype=torch.long)
data = TensorDataset(all_source_ids, all_target_ids)
all_lang_examples[lang] = (examples, data)
complete_examples.extend(examples)
complete_data.extend(data)
return (all_lang_examples, (complete_examples, complete_data))
|
def load_and_cache_gen_data(args, filename, pool, tokenizer, split_tag, only_src=False, is_sample=False):
data_tag = ('_all' if (args.data_num == (- 1)) else ('_%d' % args.data_num))
cache_fn = '{}/{}.pt'.format(args.cache_path, ((split_tag + ('_src' if only_src else '')) + data_tag))
examples = read_examples(filename, args.data_num, args.task)
if is_sample:
examples = random.sample(examples, min(50, len(examples)))
if (split_tag == 'train'):
calc_stats(examples, tokenizer, is_tokenize=True)
else:
calc_stats(examples)
if (os.path.exists(cache_fn) and (not is_sample)):
logger.info('Load cache data from %s', cache_fn)
data = torch.load(cache_fn)
else:
if is_sample:
logger.info('Sample 50 data for computing bleu from %s', filename)
else:
logger.info('Create cache data into %s', cache_fn)
tuple_examples = [(example, idx, tokenizer, args, split_tag) for (idx, example) in enumerate(examples)]
features = pool.map(convert_examples_to_features, tqdm(tuple_examples, total=len(tuple_examples)))
all_source_ids = torch.tensor([f.source_ids for f in features], dtype=torch.long)
if ((split_tag == 'test') or only_src):
data = TensorDataset(all_source_ids)
else:
all_target_ids = torch.tensor([f.target_ids for f in features], dtype=torch.long)
data = TensorDataset(all_source_ids, all_target_ids)
if ((args.local_rank in [(- 1), 0]) and (not is_sample)):
torch.save(data, cache_fn)
return (examples, data)
|
def load_and_cache_clone_data(args, filename, pool, tokenizer, split_tag, is_sample=False):
cache_fn = '{}/{}.pt'.format(args.cache_path, ((split_tag + '_all') if (args.data_num == (- 1)) else ('_%d' % args.data_num)))
examples = read_examples(filename, args.data_num, args.task)
if is_sample:
examples = random.sample(examples, int((len(examples) * 0.1)))
calc_stats(examples, tokenizer, is_tokenize=True)
if os.path.exists(cache_fn):
logger.info('Load cache data from %s', cache_fn)
data = torch.load(cache_fn)
else:
if is_sample:
logger.info('Sample 10 percent of data from %s', filename)
elif (args.data_num == (- 1)):
logger.info('Create cache data into %s', cache_fn)
tuple_examples = [(example, idx, tokenizer, args) for (idx, example) in enumerate(examples)]
features = pool.map(convert_clone_examples_to_features, tqdm(tuple_examples, total=len(tuple_examples)))
all_source_ids = torch.tensor([f.source_ids for f in features], dtype=torch.long)
all_labels = torch.tensor([f.label for f in features], dtype=torch.long)
data = TensorDataset(all_source_ids, all_labels)
if ((args.local_rank in [(- 1), 0]) and (args.data_num == (- 1))):
torch.save(data, cache_fn)
return (examples, data)
|
def load_and_cache_defect_data(args, filename, pool, tokenizer, split_tag, is_sample=False):
cache_fn = os.path.join(args.cache_path, split_tag)
examples = read_examples(filename, args.data_num, args.task)
if is_sample:
examples = random.sample(examples, int((len(examples) * 0.1)))
calc_stats(examples, tokenizer, is_tokenize=True)
if os.path.exists(cache_fn):
logger.info('Load cache data from %s', cache_fn)
data = torch.load(cache_fn)
else:
if is_sample:
logger.info('Sample 10 percent of data from %s', filename)
elif (args.data_num == (- 1)):
logger.info('Create cache data into %s', cache_fn)
tuple_examples = [(example, idx, tokenizer, args) for (idx, example) in enumerate(examples)]
features = pool.map(convert_defect_examples_to_features, tqdm(tuple_examples, total=len(tuple_examples)))
all_source_ids = torch.tensor([f.source_ids for f in features], dtype=torch.long)
all_labels = torch.tensor([f.label for f in features], dtype=torch.long)
data = TensorDataset(all_source_ids, all_labels)
if ((args.local_rank in [(- 1), 0]) and (args.data_num == (- 1))):
torch.save(data, cache_fn)
return (examples, data)
|
def load_and_cache_multi_gen_data(args, pool, tokenizer, split_tag, only_src=False, is_sample=False):
cache_fn = os.path.join(args.cache_path, split_tag)
if (os.path.exists(cache_fn) and (not is_sample)):
logger.info('Load cache data from %s', cache_fn)
examples_data_dict = torch.load(cache_fn)
else:
examples_data_dict = {}
task_list = ['summarize', 'translate', 'refine', 'concode', 'defect', 'refine-concrete', 'refine-commit', 'translate-500']
for task in task_list:
if (task == 'summarize'):
sub_tasks = ['ruby', 'javascript', 'go', 'python', 'java', 'php']
elif (task == 'translate'):
sub_tasks = ['java-cs', 'cs-java']
elif ('refine' in task):
sub_tasks = ['small', 'medium']
else:
sub_tasks = ['none']
args.task = task
for sub_task in sub_tasks:
args.sub_task = sub_task
if (task == 'summarize'):
args.max_source_length = 256
args.max_target_length = 128
elif ('translate' in task):
args.max_source_length = 320
args.max_target_length = 256
elif ('refine' in task):
if (sub_task == 'small'):
args.max_source_length = 130
args.max_target_length = 120
else:
args.max_source_length = 240
args.max_target_length = 240
elif (task == 'concode'):
args.max_source_length = 320
args.max_target_length = 150
elif (task == 'defect'):
args.max_source_length = 512
args.max_target_length = 3
filename = get_filenames(args.data_dir, args.task, args.sub_task, split_tag)
examples = read_examples(filename, args.data_num, args.task)
if is_sample:
examples = random.sample(examples, min(5000, len(examples)))
if (split_tag == 'train'):
calc_stats(examples, tokenizer, is_tokenize=True)
else:
calc_stats(examples)
tuple_examples = [(example, idx, tokenizer, args, split_tag) for (idx, example) in enumerate(examples)]
if (args.data_num == (- 1)):
features = pool.map(convert_examples_to_features, tqdm(tuple_examples, total=len(tuple_examples)))
else:
features = [convert_examples_to_features(x) for x in tuple_examples]
all_source_ids = torch.tensor([f.source_ids for f in features], dtype=torch.long)
if only_src:
data = TensorDataset(all_source_ids)
else:
all_target_ids = torch.tensor([f.target_ids for f in features], dtype=torch.long)
data = TensorDataset(all_source_ids, all_target_ids)
examples_data_dict[('{}_{}'.format(task, sub_task) if (sub_task != 'none') else task)] = (examples, data)
if ((args.local_rank in [(- 1), 0]) and (not is_sample)):
torch.save(examples_data_dict, cache_fn)
logger.info('Save data into %s', cache_fn)
return examples_data_dict
|
def get_filenames(data_root, task, sub_task, split=''):
if (task == 'concode'):
data_dir = '{}/{}'.format(data_root, task)
train_fn = '{}/train.json'.format(data_dir)
dev_fn = '{}/dev.json'.format(data_dir)
test_fn = '{}/test.json'.format(data_dir)
elif (task == 'summarize'):
data_dir = '{}/{}/{}'.format(data_root, task, sub_task)
train_fn = '{}/train.jsonl'.format(data_dir)
dev_fn = '{}/valid.jsonl'.format(data_dir)
test_fn = '{}/test.jsonl'.format(data_dir)
elif ('refine' in task):
data_dir = '{}/{}/{}'.format(data_root, task, sub_task)
train_fn = '{}/train.buggy-fixed.buggy,{}/train.buggy-fixed.fixed'.format(data_dir, data_dir)
dev_fn = '{}/valid.buggy-fixed.buggy,{}/valid.buggy-fixed.fixed'.format(data_dir, data_dir)
test_fn = '{}/test.buggy-fixed.buggy,{}/test.buggy-fixed.fixed'.format(data_dir, data_dir)
elif ('translate' in task):
data_dir = '{}/{}'.format(data_root, task)
if (sub_task == 'cs-java'):
train_fn = '{}/train.java-cs.txt.cs,{}/train.java-cs.txt.java'.format(data_dir, data_dir)
dev_fn = '{}/valid.java-cs.txt.cs,{}/valid.java-cs.txt.java'.format(data_dir, data_dir)
test_fn = '{}/test.java-cs.txt.cs,{}/test.java-cs.txt.java'.format(data_dir, data_dir)
else:
train_fn = '{}/train.java-cs.txt.java,{}/train.java-cs.txt.cs'.format(data_dir, data_dir)
dev_fn = '{}/valid.java-cs.txt.java,{}/valid.java-cs.txt.cs'.format(data_dir, data_dir)
test_fn = '{}/test.java-cs.txt.java,{}/test.java-cs.txt.cs'.format(data_dir, data_dir)
elif (task == 'clone'):
data_dir = '{}/{}'.format(data_root, task)
train_fn = '{}/train.txt'.format(data_dir)
dev_fn = '{}/valid.txt'.format(data_dir)
test_fn = '{}/test.txt'.format(data_dir)
elif (task == 'defect'):
data_dir = '{}/{}'.format(data_root, task)
train_fn = '{}/train.jsonl'.format(data_dir)
dev_fn = '{}/valid.jsonl'.format(data_dir)
test_fn = '{}/test.jsonl'.format(data_dir)
if (split == 'train'):
return train_fn
elif (split == 'dev'):
return dev_fn
elif (split == 'test'):
return test_fn
else:
return (train_fn, dev_fn, test_fn)
|
def read_examples(filename, data_num, task):
read_example_dict = {'summarize': read_summarize_examples, 'refine': read_refine_examples, 'refine-concrete': read_refine_examples, 'refine-commit': read_refine_examples, 'translate': read_translate_examples, 'translate-500': read_translate_examples, 'concode': read_concode_examples, 'clone': read_clone_examples, 'defect': read_defect_examples}
return read_example_dict[task](filename, data_num)
|
def calc_stats(examples, tokenizer=None, is_tokenize=False):
avg_src_len = []
avg_trg_len = []
avg_src_len_tokenize = []
avg_trg_len_tokenize = []
for ex in examples:
if is_tokenize:
avg_src_len.append(len(ex.source.split()))
avg_trg_len.append(len(str(ex.target).split()))
avg_src_len_tokenize.append(len(tokenizer.tokenize(ex.source)))
avg_trg_len_tokenize.append(len(tokenizer.tokenize(str(ex.target))))
else:
avg_src_len.append(len(ex.source.split()))
avg_trg_len.append(len(str(ex.target).split()))
if is_tokenize:
logger.info('Read %d examples, avg src len: %d, avg trg len: %d, max src len: %d, max trg len: %d', len(examples), np.mean(avg_src_len), np.mean(avg_trg_len), max(avg_src_len), max(avg_trg_len))
logger.info('[TOKENIZE] avg src len: %d, avg trg len: %d, max src len: %d, max trg len: %d', np.mean(avg_src_len_tokenize), np.mean(avg_trg_len_tokenize), max(avg_src_len_tokenize), max(avg_trg_len_tokenize))
else:
logger.info('Read %d examples, avg src len: %d, avg trg len: %d, max src len: %d, max trg len: %d', len(examples), np.mean(avg_src_len), np.mean(avg_trg_len), max(avg_src_len), max(avg_trg_len))
|
def get_elapse_time(t0):
elapse_time = (time.time() - t0)
if (elapse_time > 3600):
hour = int((elapse_time // 3600))
minute = int(((elapse_time % 3600) // 60))
return '{}h{}m'.format(hour, minute)
else:
minute = int(((elapse_time % 3600) // 60))
return '{}m'.format(minute)
|
class DelayedKeyboardInterrupt():
def __enter__(self):
self.signal_received = False
self.old_handler = signal.signal(signal.SIGINT, self.handler)
def handler(self, sig, frame):
self.signal_received = (sig, frame)
logger.debug('SIGINT received. Delaying KeyboardInterrupt.')
def __exit__(self, type, value, traceback):
signal.signal(signal.SIGINT, self.old_handler)
if self.signal_received:
self.old_handler(*self.signal_received)
|
def set_seeds(seed):
torch.manual_seed(seed)
torch.random.manual_seed(seed)
np.random.seed(seed)
random.seed(seed)
torch.cuda.manual_seed(seed)
|
def create_transformers_from_conf_file(processing_conf):
classes = [BlockSwap, ConfusionRemover, DeadCodeInserter, ForWhileTransformer, OperandSwap, VarRenamer]
transformers = {c: processing_conf[c.__name__] for c in classes}
return transformers
|
class ExampleProcessor():
def __init__(self, language, parser_path, transformation_config, bidirection_transformation=False, max_function_length=400):
self.language = language
self.parser_path = parser_path
self.transformation_config = transformation_config
self.max_function_length = max_function_length
self.bidirection_transformation = bidirection_transformation
def initialize(self):
global example_tokenizer
global example_transformer
transformers = create_transformers_from_conf_file(self.transformation_config)
if (self.language == 'nl'):
example_tokenizer = nltk.word_tokenize
else:
example_tokenizer = NoTransformation(self.parser_path, self.language)
example_transformer = SemanticPreservingTransformation(parser_path=self.parser_path, language=self.language, transform_functions=transformers)
def process_example(self, code):
global example_tokenizer
global example_transformer
try:
if (self.language == 'nl'):
original_code = ' '.join(example_tokenizer(code))
else:
(original_code, _) = example_tokenizer.transform_code(code)
if (len(original_code.split()) > self.max_function_length):
return (- 1)
(transformed_code, used_transformer) = example_transformer.transform_code(code)
if used_transformer:
if (used_transformer == 'ConfusionRemover'):
temp = original_code
original_code = transformed_code
transformed_code = temp
if (isinstance(self.bidirection_transformation, str) and (self.bidirection_transformation == 'adaptive')):
bidirection = (used_transformer in ['BlockSwap', 'ForWhileTransformer', 'OperandSwap'])
else:
assert isinstance(self.bidirection_transformation, bool)
bidirection = self.bidirection_transformation
if (bidirection and (np.random.uniform() < 0.5) and (used_transformer != 'SyntacticNoisingTransformation')):
return {'source': original_code, 'target': transformed_code, 'transformer': used_transformer}
else:
return {'source': transformed_code, 'target': original_code, 'transformer': used_transformer}
else:
return (- 1)
except KeyboardInterrupt:
print('Stopping parsing for ', code)
return (- 1)
except:
return (- 1)
|
def process_functions(pool, example_processor, functions, train_file_path=None, valid_file_path=None, valid_percentage=0.002):
used_transformers = {}
success = 0
tf = (open(train_file_path, 'wt') if (train_file_path is not None) else None)
vf = (open(valid_file_path, 'wt') if (train_file_path is not None) else None)
with tqdm(total=len(functions)) as pbar:
processed_example_iterator = pool.imap(func=example_processor.process_example, iterable=functions, chunksize=1000)
count = 0
while True:
pbar.update()
count += 1
try:
out = next(processed_example_iterator)
if (isinstance(out, int) and (out == (- 1))):
continue
if (out['transformer'] not in used_transformers.keys()):
used_transformers[out['transformer']] = 0
used_transformers[out['transformer']] += 1
if (np.random.uniform() < valid_percentage):
if (vf is not None):
vf.write((json.dumps(out) + '\n'))
vf.flush()
elif (tf is not None):
tf.write((json.dumps(out) + '\n'))
tf.flush()
success += 1
except multiprocessing.TimeoutError:
print(f'{count} encountered timeout')
except StopIteration:
print(f'{count} stop iteration')
break
if (tf is not None):
tf.close()
if (vf is not None):
vf.close()
print(f'''
Total : {len(functions)},
Success : {success},
Failure : {(len(functions) - success)}
Stats : {json.dumps(used_transformers, indent=4)}
''')
|
def set_seeds(seed):
torch.manual_seed(seed)
torch.random.manual_seed(seed)
np.random.seed(seed)
random.seed(seed)
torch.cuda.manual_seed(seed)
|
def find_langs_in_data_dir(data_dir):
return list(set(['_'.join(f[:(- 6)].split('_')[:(- 1)]) for f in os.listdir(data_dir) if f.endswith('.jsonl')]))
|
def num_parameters(model):
model_parameters = model.parameters()
return sum([np.prod(p.size()) for p in model_parameters])
|
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--macro_eval', action='store_true')
args = add_args(parser)
logger.info(args)
set_dist(args)
set_seed(args)
(config, model, tokenizer) = build_or_load_gen_model(args)
model.to(args.device)
if (args.n_gpu > 1):
model = torch.nn.DataParallel(model)
pool = multiprocessing.Pool(args.cpu_cont)
logger.info((' ' + '***** Evaluating Pretrain Performance *****'))
logger.info(' Batch size = %d', args.eval_batch_size)
(data_per_lang, (complete_examples, complete_data)) = load_pretrain_eval_data(args=args, pretrain_datadir=args.data_dir, pool=pool, tokenizer=tokenizer)
if (not args.macro_eval):
eval_ppl = eval_ppl_epoch(args, complete_data, complete_examples, model, tokenizer)
logger.info(f'{eval_ppl}')
eval_result = eval_bleu_per_example(args=args, eval_data=complete_data, eval_examples=complete_examples, model=model, tokenizer=tokenizer, split_tag='test', criteria='Pretrain-Evaluation')
eval_result_df = pd.DataFrame(data=eval_result)
output_fn = os.path.join(args.res_dir, 'pretraining_test_res.csv')
eval_result_df.to_csv(output_fn)
logger.info(f'Detailed Output written to : {os.path.realpath(output_fn)}')
else:
fa = open(os.path.join(args.output_dir, 'summary.log'), 'a+')
for lang in data_per_lang.keys():
args.lang = lang
(eval_examples, eval_data) = data_per_lang[lang]
eval_ppl = eval_ppl_epoch(args, eval_data, eval_examples, model, tokenizer)
logger.info(f'{lang} : {eval_ppl}')
result = eval_bleu_epoch(args, eval_data, eval_examples, model, tokenizer, 'test', f'{args.task}')
(test_bleu, test_em) = (result['bleu'], result['em'])
test_codebleu = result['codebleu']
result_str = ('Lang : %s PPL: %.5f bleu-4: %.2f, em: %.4f, codebleu: %.4f\n' % (lang, eval_ppl, test_bleu, test_em, test_codebleu))
logger.info(result_str)
print(result_str)
fa.write(result_str)
if args.res_fn:
with open(args.res_fn, 'a+') as f:
f.write(result_str)
fa.close()
|
class SemCodeTrainer(Trainer):
def _load_rng_state(self, checkpoint):
if (checkpoint is None):
return
local_rank = (xm.get_local_ordinal() if is_torch_tpu_available() else self.args.local_rank)
if (local_rank != (- 1)):
rng_file = os.path.join(checkpoint, f'rng_state_{local_rank}.pth')
if (not os.path.isfile(os.path.join(checkpoint, rng_file))):
logger.info(f"Didn't find an RNG file for process {local_rank}, if you are resuming a training that wasn't launched in a distributed fashion, reproducibility is not guaranteed.")
return
else:
rng_file = os.path.join(checkpoint, 'rng_state.pth')
if (not os.path.isfile(os.path.join(checkpoint, rng_file))):
logger.info("Didn't find an RNG file, if you are resuming a training that was launched in a distributed fashion, reproducibility is not guaranteed.")
return
checkpoint_rng_state = torch.load(rng_file)
random.setstate(checkpoint_rng_state['python'])
np.random.set_state(checkpoint_rng_state['numpy'])
torch.random.set_rng_state(checkpoint_rng_state['cpu'])
if torch.cuda.is_available():
try:
if (self.args.local_rank != (- 1)):
torch.cuda.random.set_rng_state(checkpoint_rng_state['cuda'])
else:
torch.cuda.random.set_rng_state_all(checkpoint_rng_state['cuda'])
except Exception as ex:
logger.info(f'''Error encountered while loading the states, you may have used different numbers of GPUs
Error Message {ex}
''')
if is_torch_tpu_available():
xm.set_rng_state(checkpoint_rng_state['xla'])
def save_checkpoint(self):
checkpoint_folder = f'{PREFIX_CHECKPOINT_DIR}-{self.state.global_step}'
run_dir = self.args.output_dir
self.store_flos()
output_dir = os.path.join(run_dir, checkpoint_folder)
self.save_model(output_dir)
if self.deepspeed:
self.deepspeed.save_checkpoint(output_dir)
if (self.sharded_ddp == ShardedDDPOption.SIMPLE):
self.optimizer.consolidate_state_dict()
if is_torch_tpu_available():
xm.rendezvous('saving_optimizer_states')
xm.save(self.optimizer.state_dict(), os.path.join(output_dir, OPTIMIZER_NAME))
with warnings.catch_warnings(record=True) as caught_warnings:
xm.save(self.lr_scheduler.state_dict(), os.path.join(output_dir, SCHEDULER_NAME))
reissue_pt_warnings(caught_warnings)
elif is_sagemaker_mp_enabled():
if (smp.dp_rank() == 0):
opt_state_dict = self.optimizer.state_dict()
if self.args.should_save:
torch.save(opt_state_dict, os.path.join(output_dir, OPTIMIZER_NAME))
with warnings.catch_warnings(record=True) as caught_warnings:
torch.save(self.lr_scheduler.state_dict(), os.path.join(output_dir, SCHEDULER_NAME))
reissue_pt_warnings(caught_warnings)
if self.use_amp:
torch.save(self.scaler.state_dict(), os.path.join(output_dir, SCALER_NAME))
elif (not self.deepspeed):
torch.save(self.optimizer.state_dict(), os.path.join(output_dir, OPTIMIZER_NAME))
with warnings.catch_warnings(record=True) as caught_warnings:
torch.save(self.lr_scheduler.state_dict(), os.path.join(output_dir, SCHEDULER_NAME))
reissue_pt_warnings(caught_warnings)
if self.use_amp:
torch.save(self.scaler.state_dict(), os.path.join(output_dir, SCALER_NAME))
self.state.save_to_json(os.path.join(output_dir, TRAINER_STATE_NAME))
rng_states = {'python': random.getstate(), 'numpy': np.random.get_state(), 'cpu': torch.random.get_rng_state()}
if torch.cuda.is_available():
if (self.args.local_rank == (- 1)):
rng_states['cuda'] = torch.cuda.random.get_rng_state_all()
else:
rng_states['cuda'] = torch.cuda.random.get_rng_state()
if is_torch_tpu_available():
rng_states['xla'] = xm.get_rng_state()
os.makedirs(output_dir, exist_ok=True)
local_rank = (xm.get_local_ordinal() if is_torch_tpu_available() else self.args.local_rank)
if (local_rank == (- 1)):
torch.save(rng_states, os.path.join(output_dir, 'rng_state.pth'))
else:
torch.save(rng_states, os.path.join(output_dir, f'rng_state_{local_rank}.pth'))
self._rotate_checkpoints(use_mtime=True, output_dir=run_dir)
|
class DemoTransformationTest(unittest.TestCase):
def setUp(self) -> None:
sitter_lib_path = 'sitter-libs'
libs = [os.path.join(sitter_lib_path, d) for d in os.listdir(sitter_lib_path)]
tree_sitter.Language.build_library('parser/languages.so', libs)
def test_parsing(self):
code = '\n class A {\n public void foo(){\n int i=0;\n }\n }\n '
transformer = DemoTransformation(parser='parser/languages.so', language='java')
root = transformer.parse_code(code)
self.assertTrue(isinstance(root, tree_sitter.Node))
def test_tokens(self):
code = '\n class A {\n public void foo(){\n int i=0;\n }\n }\n '
expected_tokens = 'class A { public void foo ( ) { int i = 0 ; } }'.split()
transformer = DemoTransformation(parser='parser/languages.so', language='java')
root = transformer.parse_code(code)
(tokens, _) = transformer.get_tokens_with_node_type(code.encode(), root)
self.assertListEqual(tokens, expected_tokens)
|
def seconds_to_tokens(sec, sr, prior, chunk_size):
tokens = ((sec * hps.sr) // prior.raw_to_tokens)
tokens = (((tokens // chunk_size) + 1) * chunk_size)
assert (tokens <= prior.n_ctx), 'Choose a shorter generation length to stay within the top prior context'
return tokens
|
def display_chromagraph(audio_file_path, ctr=1):
(y, sr) = librosa.load(audio_file_path)
plt.figure(figsize=(8, 4))
C = librosa.feature.chroma_cqt(y=y, sr=sr)
fig_ax = librosa.display.specshow(C, y_axis='chroma')
plt.colorbar()
plt.title('Chromagram')
plt.savefig(f'{ctr}.png')
|
def seconds_to_tokens(sec, sr, prior, chunk_size):
tokens = ((sec * hps.sr) // prior.raw_to_tokens)
tokens = (((tokens // chunk_size) + 1) * chunk_size)
assert (tokens <= prior.n_ctx), 'Choose a shorter generation length to stay within the top prior context'
return tokens
|
def display_chromagraph(audio_file_path, ctr=1):
(y, sr) = librosa.load(audio_file_path)
plt.figure(figsize=(8, 4))
C = librosa.feature.chroma_cqt(y=y, sr=sr)
fig_ax = librosa.display.specshow(C, y_axis='chroma')
plt.colorbar()
plt.title('Chromagram')
plt.savefig(f'{ctr}.png')
|
def seconds_to_tokens(sec, sr, prior, chunk_size):
tokens = ((sec * hps.sr) // prior.raw_to_tokens)
tokens = (((tokens // chunk_size) + 1) * chunk_size)
assert (tokens <= prior.n_ctx), 'Choose a shorter generation length to stay within the top prior context'
return tokens
|
def display_chromagraph(audio_file_path, ctr=1):
(y, sr) = librosa.load(audio_file_path)
plt.figure(figsize=(8, 4))
C = librosa.feature.chroma_cqt(y=y, sr=sr)
fig_ax = librosa.display.specshow(C, y_axis='chroma')
plt.colorbar()
plt.title('Chromagram')
plt.savefig(f'{ctr}.png')
|
def getNotes(path_to_midi):
data = glob((path_to_midi + '/*.mid'))
data = data[:3]
notes = []
for midifile in data:
midi = converter.parse(midifile)
parse_note = []
try:
parts = instrument.partitionByInstrument(midi)
except:
pass
if parts:
parse_note = parts.parts[0].recurse()
else:
parse_note = midi.flat.notes
for a_note in parse_note:
if isinstance(a_note, note.Note):
notes.append(str(note.pitch))
elif isinstance(a_note, chord.Chord):
notes.append('.'.join((str(n) for n in a_note.normalOrder)))
with open('data/notes', 'wb') as f:
pickle.dump(notes, f)
return notes
|
def noteToSequence(notes, nvocab):
seq_len = 100
pitches = sorted(set((note for note in notes)))
int_note = dict(((note, number) for (number, note) in enumerate(pitches)))
inp = []
out = []
for i in range(0, (len(notes) - seq_len), 1):
seq_in = notes[i:(i + seq_len)]
seq_out = notes[(i + seq_len)]
inp.append([int_note[ascii] for ascii in seq_in])
out.append(int_note[seq_out])
npatterns = len(inp)
inp = np.reshape(inp, (npatterns, seq_len, 1))
inp = (inp / float(nvocab))
out = np_utils.to_categorical(out)
return (inp, out)
|
class MIDI(object):
def __init__(self, volume=0.8, freq=44100, bitsize=(- 16), nof_channels=2, buffer=1024):
self.volume = volume
self.freq = freq
self.bitsize = bitsize
self.nof_channels = nof_channels
self.buf = buffer
def play_music(self, music_file):
clock = pygame.time.Clock()
pygame.mixer.music.load(music_file)
pygame.mixer.music.play()
while pygame.mixer.music.get_busy():
clock.tick(30)
def play_midi(self, midi_file):
pygame.mixer.init(self.freq, self.bitsize, self.nof_channels, self.buf)
pygame.mixer.music.set_volume(self.volume)
try:
self.play_music(midi_file)
except KeyboardInterrupt:
pygame.mixer.music.fadeout(1000)
pygame.mixer.music.stop()
raise SystemExit
|
def to_image(img_tensor, seg_tensor=None):
img_array = (((img_tensor.clamp((- 1), 1).cpu().numpy() + 1) / 2).transpose(1, 2, 0) * 255)
if (seg_tensor is not None):
seg_array = seg_tensor.cpu().numpy().transpose(1, 2, 0)
img_array = ((img_array * seg_array) + (255.0 * (1 - seg_array)))
return Image.fromarray(img_array.astype('uint8'))
|
class VOCSegmentation(Dataset):
'\n ATR dataset\n '
def __init__(self, base_dir=Path.db_root_dir('atr'), split='train', transform=None, flip=False):
'\n :param base_dir: path to ATR dataset directory\n :param split: train/val\n :param transform: transform to apply\n '
super(VOCSegmentation).__init__()
self._flip_flag = flip
self._base_dir = base_dir
self._image_dir = os.path.join(self._base_dir, 'JPEGImages')
self._cat_dir = os.path.join(self._base_dir, 'SegmentationClassAug')
self._flip_dir = os.path.join(self._base_dir, 'SegmentationClassAug_rev')
if isinstance(split, str):
self.split = [split]
else:
split.sort()
self.split = split
self.transform = transform
_splits_dir = os.path.join(self._base_dir, 'list')
self.im_ids = []
self.images = []
self.categories = []
self.flip_categories = []
for splt in self.split:
with open(os.path.join(os.path.join(_splits_dir, (splt + '_id.txt'))), 'r') as f:
lines = f.read().splitlines()
for (ii, line) in enumerate(lines):
_image = os.path.join(self._image_dir, (line + '.jpg'))
_cat = os.path.join(self._cat_dir, (line + '.png'))
_flip = os.path.join(self._flip_dir, (line + '.png'))
assert os.path.isfile(_image)
assert os.path.isfile(_cat)
assert os.path.isfile(_flip)
self.im_ids.append(line)
self.images.append(_image)
self.categories.append(_cat)
self.flip_categories.append(_flip)
assert (len(self.images) == len(self.categories))
assert (len(self.flip_categories) == len(self.categories))
print('Number of images in {}: {:d}'.format(split, len(self.images)))
def __len__(self):
return len(self.images)
def __getitem__(self, index):
(_img, _target) = self._make_img_gt_point_pair(index)
sample = {'image': _img, 'label': _target}
if (self.transform is not None):
sample = self.transform(sample)
return sample
def _make_img_gt_point_pair(self, index):
_img = Image.open(self.images[index]).convert('RGB')
if self._flip_flag:
if (random.random() < 0.5):
_target = Image.open(self.flip_categories[index])
_img = _img.transpose(Image.FLIP_LEFT_RIGHT)
else:
_target = Image.open(self.categories[index])
else:
_target = Image.open(self.categories[index])
return (_img, _target)
def __str__(self):
return (('ATR(split=' + str(self.split)) + ')')
|
class VOCSegmentation(Dataset):
'\n CIHP dataset\n '
def __init__(self, base_dir=Path.db_root_dir('cihp'), split='train', transform=None, flip=False):
'\n :param base_dir: path to CIHP dataset directory\n :param split: train/val/test\n :param transform: transform to apply\n '
super(VOCSegmentation).__init__()
self._flip_flag = flip
self._base_dir = base_dir
self._image_dir = os.path.join(self._base_dir, 'Images')
self._cat_dir = os.path.join(self._base_dir, 'Category_ids')
self._flip_dir = os.path.join(self._base_dir, 'Category_rev_ids')
if isinstance(split, str):
self.split = [split]
else:
split.sort()
self.split = split
self.transform = transform
_splits_dir = os.path.join(self._base_dir, 'lists')
self.im_ids = []
self.images = []
self.categories = []
self.flip_categories = []
for splt in self.split:
with open(os.path.join(os.path.join(_splits_dir, (splt + '_id.txt'))), 'r') as f:
lines = f.read().splitlines()
for (ii, line) in enumerate(lines):
_image = os.path.join(self._image_dir, (line + '.jpg'))
_cat = os.path.join(self._cat_dir, (line + '.png'))
_flip = os.path.join(self._flip_dir, (line + '.png'))
assert os.path.isfile(_image)
assert os.path.isfile(_cat)
assert os.path.isfile(_flip)
self.im_ids.append(line)
self.images.append(_image)
self.categories.append(_cat)
self.flip_categories.append(_flip)
assert (len(self.images) == len(self.categories))
assert (len(self.flip_categories) == len(self.categories))
print('Number of images in {}: {:d}'.format(split, len(self.images)))
def __len__(self):
return len(self.images)
def __getitem__(self, index):
(_img, _target) = self._make_img_gt_point_pair(index)
sample = {'image': _img, 'label': _target}
if (self.transform is not None):
sample = self.transform(sample)
return sample
def _make_img_gt_point_pair(self, index):
_img = Image.open(self.images[index]).convert('RGB')
if self._flip_flag:
if (random.random() < 0.5):
_target = Image.open(self.flip_categories[index])
_img = _img.transpose(Image.FLIP_LEFT_RIGHT)
else:
_target = Image.open(self.categories[index])
else:
_target = Image.open(self.categories[index])
return (_img, _target)
def __str__(self):
return (('CIHP(split=' + str(self.split)) + ')')
|
class VOCSegmentation(Dataset):
'\n Pascal dataset\n '
def __init__(self, cihp_dir=Path.db_root_dir('cihp'), split='train', transform=None, flip=False, pascal_dir=PP.db_root_dir('pascal'), atr_dir=PA.db_root_dir('atr')):
'\n :param cihp_dir: path to CIHP dataset directory\n :param pascal_dir: path to PASCAL dataset directory\n :param atr_dir: path to ATR dataset directory\n :param split: train/val\n :param transform: transform to apply\n '
super(VOCSegmentation).__init__()
self._flip_flag = flip
self._base_dir = cihp_dir
self._image_dir = os.path.join(self._base_dir, 'Images')
self._cat_dir = os.path.join(self._base_dir, 'Category_ids')
self._flip_dir = os.path.join(self._base_dir, 'Category_rev_ids')
self._base_dir_pascal = pascal_dir
self._image_dir_pascal = os.path.join(self._base_dir_pascal, 'JPEGImages')
self._cat_dir_pascal = os.path.join(self._base_dir_pascal, 'SegmentationPart')
self._base_dir_atr = atr_dir
self._image_dir_atr = os.path.join(self._base_dir_atr, 'JPEGImages')
self._cat_dir_atr = os.path.join(self._base_dir_atr, 'SegmentationClassAug')
self._flip_dir_atr = os.path.join(self._base_dir_atr, 'SegmentationClassAug_rev')
if isinstance(split, str):
self.split = [split]
else:
split.sort()
self.split = split
self.transform = transform
_splits_dir = os.path.join(self._base_dir, 'lists')
_splits_dir_pascal = os.path.join(self._base_dir_pascal, 'list')
_splits_dir_atr = os.path.join(self._base_dir_atr, 'list')
self.im_ids = []
self.images = []
self.categories = []
self.flip_categories = []
self.datasets_lbl = []
self.num_cihp = 0
self.num_pascal = 0
self.num_atr = 0
for splt in self.split:
with open(os.path.join(os.path.join(_splits_dir, (splt + '_id.txt'))), 'r') as f:
lines = f.read().splitlines()
self.num_cihp += len(lines)
for (ii, line) in enumerate(lines):
_image = os.path.join(self._image_dir, (line + '.jpg'))
_cat = os.path.join(self._cat_dir, (line + '.png'))
_flip = os.path.join(self._flip_dir, (line + '.png'))
assert os.path.isfile(_image)
assert os.path.isfile(_cat)
assert os.path.isfile(_flip)
self.im_ids.append(line)
self.images.append(_image)
self.categories.append(_cat)
self.flip_categories.append(_flip)
self.datasets_lbl.append(0)
for splt in self.split:
if (splt == 'test'):
splt = 'val'
with open(os.path.join(os.path.join(_splits_dir_pascal, (splt + '_id.txt'))), 'r') as f:
lines = f.read().splitlines()
self.num_pascal += len(lines)
for (ii, line) in enumerate(lines):
_image = os.path.join(self._image_dir_pascal, (line + '.jpg'))
_cat = os.path.join(self._cat_dir_pascal, (line + '.png'))
assert os.path.isfile(_image)
assert os.path.isfile(_cat)
self.im_ids.append(line)
self.images.append(_image)
self.categories.append(_cat)
self.flip_categories.append([])
self.datasets_lbl.append(1)
for splt in self.split:
with open(os.path.join(os.path.join(_splits_dir_atr, (splt + '_id.txt'))), 'r') as f:
lines = f.read().splitlines()
self.num_atr += len(lines)
for (ii, line) in enumerate(lines):
_image = os.path.join(self._image_dir_atr, (line + '.jpg'))
_cat = os.path.join(self._cat_dir_atr, (line + '.png'))
_flip = os.path.join(self._flip_dir_atr, (line + '.png'))
assert os.path.isfile(_image)
assert os.path.isfile(_cat)
assert os.path.isfile(_flip)
self.im_ids.append(line)
self.images.append(_image)
self.categories.append(_cat)
self.flip_categories.append(_flip)
self.datasets_lbl.append(2)
assert (len(self.images) == len(self.categories))
print('Number of images in {}: {:d}'.format(split, len(self.images)))
def __len__(self):
return len(self.images)
def get_class_num(self):
return (self.num_cihp, self.num_pascal, self.num_atr)
def __getitem__(self, index):
(_img, _target, _lbl) = self._make_img_gt_point_pair(index)
sample = {'image': _img, 'label': _target}
if (self.transform is not None):
sample = self.transform(sample)
sample['pascal'] = _lbl
return sample
def _make_img_gt_point_pair(self, index):
_img = Image.open(self.images[index]).convert('RGB')
type_lbl = self.datasets_lbl[index]
if self._flip_flag:
if (random.random() < 0.5):
_img = _img.transpose(Image.FLIP_LEFT_RIGHT)
if ((type_lbl == 0) or (type_lbl == 2)):
_target = Image.open(self.flip_categories[index])
else:
_target = Image.open(self.categories[index])
_target = _target.transpose(Image.FLIP_LEFT_RIGHT)
else:
_target = Image.open(self.categories[index])
else:
_target = Image.open(self.categories[index])
return (_img, _target, type_lbl)
def __str__(self):
return (('datasets(split=' + str(self.split)) + ')')
|
class RandomCrop(object):
def __init__(self, size, padding=0):
if isinstance(size, numbers.Number):
self.size = (int(size), int(size))
else:
self.size = size
self.padding = padding
def __call__(self, sample):
(img, mask) = (sample['image'], sample['label'])
if (self.padding > 0):
img = ImageOps.expand(img, border=self.padding, fill=0)
mask = ImageOps.expand(mask, border=self.padding, fill=0)
assert (img.size == mask.size)
(w, h) = img.size
(th, tw) = self.size
if ((w == tw) and (h == th)):
return {'image': img, 'label': mask}
if ((w < tw) or (h < th)):
img = img.resize((tw, th), Image.BILINEAR)
mask = mask.resize((tw, th), Image.NEAREST)
return {'image': img, 'label': mask}
x1 = random.randint(0, (w - tw))
y1 = random.randint(0, (h - th))
img = img.crop((x1, y1, (x1 + tw), (y1 + th)))
mask = mask.crop((x1, y1, (x1 + tw), (y1 + th)))
return {'image': img, 'label': mask}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.