code
stringlengths 13
6.09M
| order_type
stringclasses 2
values | original_example
dict | step_ids
listlengths 1
5
|
|---|---|---|---|
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
out.write('SEQUENCE_ID,TYPE,DOMAINS,TP,FP,FN,Sens,PPV,Jaccard\n')
<|reserved_special_token_0|>
for file in os.listdir(docpath + 'isoSegmenter100'):
if file.endswith('.csv') and 'E' in file:
predict_data = csv.DictReader(open(docpath + 'isoSegmenter100/' +
file, 'r', encoding='UTF-8'))
seqid = file.replace('.csv', '')
with open(docpath + 'ground_truth100/' + seqid + '.json', 'r',
encoding='UTF-8') as json_file:
truth_data = json.load(json_file)
true_boundaries = []
tp_seq = 0
fp_seq = 0
fn_seq = 0
for i in range(0, int(truth_data['tot_length']) + 1, int(truth_data
['domain_length'])):
true_boundaries.append(i)
for pred_domain in predict_data:
matched = False
for i in range(0, len(true_boundaries) - 1):
startdiff = int(pred_domain['Start']) - true_boundaries[i]
enddiff = int(pred_domain['End']) - true_boundaries[i + 1]
tolerance = cutoff * (true_boundaries[i + 1] -
true_boundaries[i])
if abs(startdiff) <= tolerance:
if abs(enddiff) <= tolerance:
tp_seq += 1
matched = True
print(seqid)
print('START MATCH: ' + str(true_boundaries[i]) +
', ' + pred_domain['Start'])
print('END MATCH: ' + str(true_boundaries[i + 1]) +
', ' + pred_domain['End'])
print('DIFFERENCES: ' + str(startdiff) + ', ' + str
(enddiff) + ', TOLERANCE = ' + str(tolerance))
print()
break
if not matched:
fp_seq += 1
fn_seq = int(truth_data['domains']) - tp_seq
tp_eq += tp_seq
fp_eq += fp_seq
fn_eq += fn_seq
sensitivity = round(tp_seq / (tp_seq + fn_seq), 5)
ppv = round(tp_seq / (tp_seq + fp_seq), 5)
jaccard = round(tp_seq / (tp_seq + fp_seq + fn_seq), 5)
out.write(seqid + ',E,' + str(truth_data['domains']) + ',' + str(
tp_seq) + ',' + str(fp_seq) + ',' + str(fn_seq) + ',' + str(
sensitivity) + ',' + str(ppv) + ',' + str(jaccard) + '\n')
summary.write('EQUAL-LENGTH STATISTICS\n')
summary.write('TP equal domain: ' + str(tp_eq) + '\n')
summary.write('FP equal domain: ' + str(fp_eq) + '\n')
summary.write('FN equal domain: ' + str(fn_eq) + '\n')
summary.write('Sensitivity: ' + str(round(tp_eq / (tp_eq + fn_eq), 5)) + '\n')
summary.write('Precision(PPV): ' + str(round(tp_eq / (tp_eq + fp_eq), 5)) +
'\n')
summary.write('Jaccard Index: ' + str(round(tp_eq / (tp_eq + fp_eq + fn_eq),
5)) + '\n\n')
<|reserved_special_token_0|>
for file in os.listdir(docpath + 'isoSegmenter100'):
if file.endswith('.csv') and 'V' in file:
predict_data = csv.DictReader(open(docpath + 'isoSegmenter100/' +
file, 'r', encoding='UTF-8'))
seqid = file.replace('.csv', '')
with open(docpath + 'ground_truth100/' + seqid + '.json', 'r',
encoding='UTF-8') as json_file:
truth_data = json.load(json_file)
true_boundaries = [1]
tp_seq = 0
fp_seq = 0
fn_seq = 0
for i in range(1, int(truth_data['domains']) + 1):
b_next = true_boundaries[i - 1] + int(truth_data['length_' +
str(i)])
true_boundaries.append(b_next)
for pred_domain in predict_data:
matched = False
for i in range(0, len(true_boundaries) - 1):
startdiff = int(pred_domain['Start']) - true_boundaries[i]
enddiff = int(pred_domain['End']) - true_boundaries[i + 1]
tolerance = cutoff * (true_boundaries[i + 1] -
true_boundaries[i])
if abs(startdiff) <= tolerance:
if abs(enddiff) <= tolerance:
tp_seq += 1
matched = True
print(seqid)
print('START MATCH: ' + str(true_boundaries[i]) +
', ' + pred_domain['Start'])
print('END MATCH: ' + str(true_boundaries[i + 1]) +
', ' + pred_domain['End'])
print('DIFFERENCES: ' + str(startdiff) + ', ' + str
(enddiff) + ', TOLERANCE = ' + str(tolerance))
print()
break
if not matched:
fp_seq += 1
fn_seq = int(truth_data['domains']) - tp_seq
tp_var += tp_seq
fp_var += fp_seq
fn_var += fn_seq
sensitivity = round(tp_seq / (tp_seq + fn_seq), 5)
ppv = round(tp_seq / (tp_seq + fp_seq), 5)
jaccard = round(tp_seq / (tp_seq + fp_seq + fn_seq), 5)
out.write(seqid + ',V,' + str(truth_data['domains']) + ',' + str(
tp_seq) + ',' + str(fp_seq) + ',' + str(fn_seq) + ',' + str(
sensitivity) + ',' + str(ppv) + ',' + str(jaccard) + '\n')
summary.write('VARIABLE-LENGTH STATISTICS\n')
summary.write('TP equal domain: ' + str(tp_var) + '\n')
summary.write('FP equal domain: ' + str(fp_var) + '\n')
summary.write('FN equal domain: ' + str(fn_var) + '\n')
summary.write('Sensitivity: ' + str(round(tp_var / (tp_var + fn_var), 5)) +
'\n')
summary.write('Precision(PPV): ' + str(round(tp_var / (tp_var + fp_var), 5)
) + '\n')
summary.write('Jaccard Index: ' + str(round(tp_var / (tp_var + fp_var +
fn_var), 5)) + '\n\n')
summary.write('OVERALL STATISTICS\n')
summary.write('TP: ' + str(tp_var + tp_eq) + '\n')
summary.write('FP: ' + str(fp_var + fp_eq) + '\n')
summary.write('FN: ' + str(fn_var + fn_eq) + '\n')
summary.write('Sensitivity: ' + str(round((tp_var + tp_eq) / (tp_var +
fn_var + tp_eq + fn_eq), 5)) + '\n')
summary.write('Precision(PPV): ' + str(round((tp_var + tp_eq) / (tp_var +
fp_var + tp_eq + fp_eq), 5)) + '\n')
summary.write('Jaccard Index: ' + str(round((tp_var + tp_eq) / (tp_var +
fp_var + fn_var + tp_eq + fp_eq + fn_eq), 5)) + '\n')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
cutoff = float(input('Tolerance (decimal)? '))
docpath = 'C:/Users/RackS/Documents/'
out = open('isosegmenter_scoring_error' + str(cutoff * 100) + '.csv', 'w',
encoding='UTF-8')
summary = open('isosegmenter_score_summary_error' + str(cutoff * 100) +
'.txt', 'w', encoding='UTF-8')
out.write('SEQUENCE_ID,TYPE,DOMAINS,TP,FP,FN,Sens,PPV,Jaccard\n')
tp_eq = 0
fp_eq = 0
fn_eq = 0
for file in os.listdir(docpath + 'isoSegmenter100'):
if file.endswith('.csv') and 'E' in file:
predict_data = csv.DictReader(open(docpath + 'isoSegmenter100/' +
file, 'r', encoding='UTF-8'))
seqid = file.replace('.csv', '')
with open(docpath + 'ground_truth100/' + seqid + '.json', 'r',
encoding='UTF-8') as json_file:
truth_data = json.load(json_file)
true_boundaries = []
tp_seq = 0
fp_seq = 0
fn_seq = 0
for i in range(0, int(truth_data['tot_length']) + 1, int(truth_data
['domain_length'])):
true_boundaries.append(i)
for pred_domain in predict_data:
matched = False
for i in range(0, len(true_boundaries) - 1):
startdiff = int(pred_domain['Start']) - true_boundaries[i]
enddiff = int(pred_domain['End']) - true_boundaries[i + 1]
tolerance = cutoff * (true_boundaries[i + 1] -
true_boundaries[i])
if abs(startdiff) <= tolerance:
if abs(enddiff) <= tolerance:
tp_seq += 1
matched = True
print(seqid)
print('START MATCH: ' + str(true_boundaries[i]) +
', ' + pred_domain['Start'])
print('END MATCH: ' + str(true_boundaries[i + 1]) +
', ' + pred_domain['End'])
print('DIFFERENCES: ' + str(startdiff) + ', ' + str
(enddiff) + ', TOLERANCE = ' + str(tolerance))
print()
break
if not matched:
fp_seq += 1
fn_seq = int(truth_data['domains']) - tp_seq
tp_eq += tp_seq
fp_eq += fp_seq
fn_eq += fn_seq
sensitivity = round(tp_seq / (tp_seq + fn_seq), 5)
ppv = round(tp_seq / (tp_seq + fp_seq), 5)
jaccard = round(tp_seq / (tp_seq + fp_seq + fn_seq), 5)
out.write(seqid + ',E,' + str(truth_data['domains']) + ',' + str(
tp_seq) + ',' + str(fp_seq) + ',' + str(fn_seq) + ',' + str(
sensitivity) + ',' + str(ppv) + ',' + str(jaccard) + '\n')
summary.write('EQUAL-LENGTH STATISTICS\n')
summary.write('TP equal domain: ' + str(tp_eq) + '\n')
summary.write('FP equal domain: ' + str(fp_eq) + '\n')
summary.write('FN equal domain: ' + str(fn_eq) + '\n')
summary.write('Sensitivity: ' + str(round(tp_eq / (tp_eq + fn_eq), 5)) + '\n')
summary.write('Precision(PPV): ' + str(round(tp_eq / (tp_eq + fp_eq), 5)) +
'\n')
summary.write('Jaccard Index: ' + str(round(tp_eq / (tp_eq + fp_eq + fn_eq),
5)) + '\n\n')
tp_var = 0
fp_var = 0
fn_var = 0
for file in os.listdir(docpath + 'isoSegmenter100'):
if file.endswith('.csv') and 'V' in file:
predict_data = csv.DictReader(open(docpath + 'isoSegmenter100/' +
file, 'r', encoding='UTF-8'))
seqid = file.replace('.csv', '')
with open(docpath + 'ground_truth100/' + seqid + '.json', 'r',
encoding='UTF-8') as json_file:
truth_data = json.load(json_file)
true_boundaries = [1]
tp_seq = 0
fp_seq = 0
fn_seq = 0
for i in range(1, int(truth_data['domains']) + 1):
b_next = true_boundaries[i - 1] + int(truth_data['length_' +
str(i)])
true_boundaries.append(b_next)
for pred_domain in predict_data:
matched = False
for i in range(0, len(true_boundaries) - 1):
startdiff = int(pred_domain['Start']) - true_boundaries[i]
enddiff = int(pred_domain['End']) - true_boundaries[i + 1]
tolerance = cutoff * (true_boundaries[i + 1] -
true_boundaries[i])
if abs(startdiff) <= tolerance:
if abs(enddiff) <= tolerance:
tp_seq += 1
matched = True
print(seqid)
print('START MATCH: ' + str(true_boundaries[i]) +
', ' + pred_domain['Start'])
print('END MATCH: ' + str(true_boundaries[i + 1]) +
', ' + pred_domain['End'])
print('DIFFERENCES: ' + str(startdiff) + ', ' + str
(enddiff) + ', TOLERANCE = ' + str(tolerance))
print()
break
if not matched:
fp_seq += 1
fn_seq = int(truth_data['domains']) - tp_seq
tp_var += tp_seq
fp_var += fp_seq
fn_var += fn_seq
sensitivity = round(tp_seq / (tp_seq + fn_seq), 5)
ppv = round(tp_seq / (tp_seq + fp_seq), 5)
jaccard = round(tp_seq / (tp_seq + fp_seq + fn_seq), 5)
out.write(seqid + ',V,' + str(truth_data['domains']) + ',' + str(
tp_seq) + ',' + str(fp_seq) + ',' + str(fn_seq) + ',' + str(
sensitivity) + ',' + str(ppv) + ',' + str(jaccard) + '\n')
summary.write('VARIABLE-LENGTH STATISTICS\n')
summary.write('TP equal domain: ' + str(tp_var) + '\n')
summary.write('FP equal domain: ' + str(fp_var) + '\n')
summary.write('FN equal domain: ' + str(fn_var) + '\n')
summary.write('Sensitivity: ' + str(round(tp_var / (tp_var + fn_var), 5)) +
'\n')
summary.write('Precision(PPV): ' + str(round(tp_var / (tp_var + fp_var), 5)
) + '\n')
summary.write('Jaccard Index: ' + str(round(tp_var / (tp_var + fp_var +
fn_var), 5)) + '\n\n')
summary.write('OVERALL STATISTICS\n')
summary.write('TP: ' + str(tp_var + tp_eq) + '\n')
summary.write('FP: ' + str(fp_var + fp_eq) + '\n')
summary.write('FN: ' + str(fn_var + fn_eq) + '\n')
summary.write('Sensitivity: ' + str(round((tp_var + tp_eq) / (tp_var +
fn_var + tp_eq + fn_eq), 5)) + '\n')
summary.write('Precision(PPV): ' + str(round((tp_var + tp_eq) / (tp_var +
fp_var + tp_eq + fp_eq), 5)) + '\n')
summary.write('Jaccard Index: ' + str(round((tp_var + tp_eq) / (tp_var +
fp_var + fn_var + tp_eq + fp_eq + fn_eq), 5)) + '\n')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
import os
import json
import csv
cutoff = float(input('Tolerance (decimal)? '))
docpath = 'C:/Users/RackS/Documents/'
out = open('isosegmenter_scoring_error' + str(cutoff * 100) + '.csv', 'w',
encoding='UTF-8')
summary = open('isosegmenter_score_summary_error' + str(cutoff * 100) +
'.txt', 'w', encoding='UTF-8')
out.write('SEQUENCE_ID,TYPE,DOMAINS,TP,FP,FN,Sens,PPV,Jaccard\n')
tp_eq = 0
fp_eq = 0
fn_eq = 0
for file in os.listdir(docpath + 'isoSegmenter100'):
if file.endswith('.csv') and 'E' in file:
predict_data = csv.DictReader(open(docpath + 'isoSegmenter100/' +
file, 'r', encoding='UTF-8'))
seqid = file.replace('.csv', '')
with open(docpath + 'ground_truth100/' + seqid + '.json', 'r',
encoding='UTF-8') as json_file:
truth_data = json.load(json_file)
true_boundaries = []
tp_seq = 0
fp_seq = 0
fn_seq = 0
for i in range(0, int(truth_data['tot_length']) + 1, int(truth_data
['domain_length'])):
true_boundaries.append(i)
for pred_domain in predict_data:
matched = False
for i in range(0, len(true_boundaries) - 1):
startdiff = int(pred_domain['Start']) - true_boundaries[i]
enddiff = int(pred_domain['End']) - true_boundaries[i + 1]
tolerance = cutoff * (true_boundaries[i + 1] -
true_boundaries[i])
if abs(startdiff) <= tolerance:
if abs(enddiff) <= tolerance:
tp_seq += 1
matched = True
print(seqid)
print('START MATCH: ' + str(true_boundaries[i]) +
', ' + pred_domain['Start'])
print('END MATCH: ' + str(true_boundaries[i + 1]) +
', ' + pred_domain['End'])
print('DIFFERENCES: ' + str(startdiff) + ', ' + str
(enddiff) + ', TOLERANCE = ' + str(tolerance))
print()
break
if not matched:
fp_seq += 1
fn_seq = int(truth_data['domains']) - tp_seq
tp_eq += tp_seq
fp_eq += fp_seq
fn_eq += fn_seq
sensitivity = round(tp_seq / (tp_seq + fn_seq), 5)
ppv = round(tp_seq / (tp_seq + fp_seq), 5)
jaccard = round(tp_seq / (tp_seq + fp_seq + fn_seq), 5)
out.write(seqid + ',E,' + str(truth_data['domains']) + ',' + str(
tp_seq) + ',' + str(fp_seq) + ',' + str(fn_seq) + ',' + str(
sensitivity) + ',' + str(ppv) + ',' + str(jaccard) + '\n')
summary.write('EQUAL-LENGTH STATISTICS\n')
summary.write('TP equal domain: ' + str(tp_eq) + '\n')
summary.write('FP equal domain: ' + str(fp_eq) + '\n')
summary.write('FN equal domain: ' + str(fn_eq) + '\n')
summary.write('Sensitivity: ' + str(round(tp_eq / (tp_eq + fn_eq), 5)) + '\n')
summary.write('Precision(PPV): ' + str(round(tp_eq / (tp_eq + fp_eq), 5)) +
'\n')
summary.write('Jaccard Index: ' + str(round(tp_eq / (tp_eq + fp_eq + fn_eq),
5)) + '\n\n')
tp_var = 0
fp_var = 0
fn_var = 0
for file in os.listdir(docpath + 'isoSegmenter100'):
if file.endswith('.csv') and 'V' in file:
predict_data = csv.DictReader(open(docpath + 'isoSegmenter100/' +
file, 'r', encoding='UTF-8'))
seqid = file.replace('.csv', '')
with open(docpath + 'ground_truth100/' + seqid + '.json', 'r',
encoding='UTF-8') as json_file:
truth_data = json.load(json_file)
true_boundaries = [1]
tp_seq = 0
fp_seq = 0
fn_seq = 0
for i in range(1, int(truth_data['domains']) + 1):
b_next = true_boundaries[i - 1] + int(truth_data['length_' +
str(i)])
true_boundaries.append(b_next)
for pred_domain in predict_data:
matched = False
for i in range(0, len(true_boundaries) - 1):
startdiff = int(pred_domain['Start']) - true_boundaries[i]
enddiff = int(pred_domain['End']) - true_boundaries[i + 1]
tolerance = cutoff * (true_boundaries[i + 1] -
true_boundaries[i])
if abs(startdiff) <= tolerance:
if abs(enddiff) <= tolerance:
tp_seq += 1
matched = True
print(seqid)
print('START MATCH: ' + str(true_boundaries[i]) +
', ' + pred_domain['Start'])
print('END MATCH: ' + str(true_boundaries[i + 1]) +
', ' + pred_domain['End'])
print('DIFFERENCES: ' + str(startdiff) + ', ' + str
(enddiff) + ', TOLERANCE = ' + str(tolerance))
print()
break
if not matched:
fp_seq += 1
fn_seq = int(truth_data['domains']) - tp_seq
tp_var += tp_seq
fp_var += fp_seq
fn_var += fn_seq
sensitivity = round(tp_seq / (tp_seq + fn_seq), 5)
ppv = round(tp_seq / (tp_seq + fp_seq), 5)
jaccard = round(tp_seq / (tp_seq + fp_seq + fn_seq), 5)
out.write(seqid + ',V,' + str(truth_data['domains']) + ',' + str(
tp_seq) + ',' + str(fp_seq) + ',' + str(fn_seq) + ',' + str(
sensitivity) + ',' + str(ppv) + ',' + str(jaccard) + '\n')
summary.write('VARIABLE-LENGTH STATISTICS\n')
summary.write('TP equal domain: ' + str(tp_var) + '\n')
summary.write('FP equal domain: ' + str(fp_var) + '\n')
summary.write('FN equal domain: ' + str(fn_var) + '\n')
summary.write('Sensitivity: ' + str(round(tp_var / (tp_var + fn_var), 5)) +
'\n')
summary.write('Precision(PPV): ' + str(round(tp_var / (tp_var + fp_var), 5)
) + '\n')
summary.write('Jaccard Index: ' + str(round(tp_var / (tp_var + fp_var +
fn_var), 5)) + '\n\n')
summary.write('OVERALL STATISTICS\n')
summary.write('TP: ' + str(tp_var + tp_eq) + '\n')
summary.write('FP: ' + str(fp_var + fp_eq) + '\n')
summary.write('FN: ' + str(fn_var + fn_eq) + '\n')
summary.write('Sensitivity: ' + str(round((tp_var + tp_eq) / (tp_var +
fn_var + tp_eq + fn_eq), 5)) + '\n')
summary.write('Precision(PPV): ' + str(round((tp_var + tp_eq) / (tp_var +
fp_var + tp_eq + fp_eq), 5)) + '\n')
summary.write('Jaccard Index: ' + str(round((tp_var + tp_eq) / (tp_var +
fp_var + fn_var + tp_eq + fp_eq + fn_eq), 5)) + '\n')
<|reserved_special_token_1|>
"""
"""
import os
import json
import csv
cutoff = float(input("Tolerance (decimal)? "))
docpath = "C:/Users/RackS/Documents/"
out = open("isosegmenter_scoring_error"+str(cutoff*100)+".csv", 'w', encoding='UTF-8')
summary = open("isosegmenter_score_summary_error"+str(cutoff*100)+".txt", 'w', encoding='UTF-8')
out.write("SEQUENCE_ID,TYPE,DOMAINS,TP,FP,FN,Sens,PPV,Jaccard\n")
tp_eq = 0
fp_eq = 0
fn_eq = 0
for file in os.listdir(docpath+"isoSegmenter100"):
if file.endswith(".csv") and "E" in file:
predict_data = csv.DictReader(open(docpath+"isoSegmenter100/"+file, 'r', encoding='UTF-8'))
seqid = file.replace(".csv", "")
with open(docpath+"ground_truth100/"+seqid+".json", 'r', encoding='UTF-8') as json_file:
truth_data = json.load(json_file)
true_boundaries = []
tp_seq = 0
fp_seq = 0
fn_seq = 0
for i in range(0, int(truth_data['tot_length']) + 1, int(truth_data['domain_length'])):
true_boundaries.append(i)
for pred_domain in predict_data:
matched = False
for i in range(0, len(true_boundaries) - 1):
startdiff = int(pred_domain['Start']) - true_boundaries[i]
enddiff = int(pred_domain['End']) - true_boundaries[i+1]
tolerance = cutoff*(true_boundaries[i+1] - true_boundaries[i])
if abs(startdiff) <= tolerance:
if abs(enddiff) <= tolerance:
tp_seq += 1
matched = True
print(seqid)
print("START MATCH: " + str(true_boundaries[i]) + ", " + pred_domain['Start'])
print("END MATCH: " + str(true_boundaries[i+1]) + ", " + pred_domain['End'])
print("DIFFERENCES: " + str(startdiff) + ", " + str(enddiff) + ", TOLERANCE = " + str(tolerance))
print()
break
if not matched:
fp_seq += 1
fn_seq = int(truth_data['domains']) - tp_seq
tp_eq += tp_seq
fp_eq += fp_seq
fn_eq += fn_seq
sensitivity = round(tp_seq/(tp_seq + fn_seq), 5)
ppv = round(tp_seq/(tp_seq+fp_seq), 5)
jaccard = round(tp_seq/(tp_seq + fp_seq + fn_seq), 5)
out.write(seqid+",E,"+str(truth_data['domains'])+","+str(tp_seq)+","+str(fp_seq)+","+str(fn_seq)+","+str(sensitivity)+","+str(ppv)+","+str(jaccard)+"\n")
summary.write("EQUAL-LENGTH STATISTICS\n")
summary.write("TP equal domain: " + str(tp_eq) + "\n")
summary.write("FP equal domain: " + str(fp_eq) + "\n")
summary.write("FN equal domain: " + str(fn_eq) + "\n")
summary.write("Sensitivity: " + str(round(tp_eq/(tp_eq + fn_eq),5)) + "\n")
summary.write("Precision(PPV): " + str(round(tp_eq/(tp_eq + fp_eq),5)) + "\n")
summary.write("Jaccard Index: " + str(round(tp_eq/(tp_eq + fp_eq + fn_eq),5)) + "\n\n")
tp_var = 0
fp_var = 0
fn_var = 0
for file in os.listdir(docpath+"isoSegmenter100"):
if file.endswith(".csv") and "V" in file:
predict_data = csv.DictReader(open(docpath+"isoSegmenter100/"+file, 'r', encoding='UTF-8'))
seqid = file.replace(".csv", "")
with open(docpath+"ground_truth100/"+seqid+".json", 'r', encoding='UTF-8') as json_file:
truth_data = json.load(json_file)
true_boundaries = [1]
tp_seq = 0
fp_seq = 0
fn_seq = 0
for i in range(1, int(truth_data['domains']) + 1):
b_next = true_boundaries[i-1] + int(truth_data['length_'+str(i)])
true_boundaries.append(b_next)
for pred_domain in predict_data:
matched = False
for i in range(0, len(true_boundaries) - 1):
startdiff = int(pred_domain['Start']) - true_boundaries[i]
enddiff = int(pred_domain['End']) - true_boundaries[i+1]
tolerance = cutoff*(true_boundaries[i+1] - true_boundaries[i])
if abs(startdiff) <= tolerance:
if abs(enddiff) <= tolerance:
tp_seq += 1
matched = True
print(seqid)
print("START MATCH: " + str(true_boundaries[i]) + ", " + pred_domain['Start'])
print("END MATCH: " + str(true_boundaries[i+1]) + ", " + pred_domain['End'])
print("DIFFERENCES: " + str(startdiff) + ", " + str(enddiff) + ", TOLERANCE = " + str(tolerance))
print()
break
if not matched:
fp_seq += 1
fn_seq = int(truth_data['domains']) - tp_seq
tp_var += tp_seq
fp_var += fp_seq
fn_var += fn_seq
sensitivity = round(tp_seq/(tp_seq + fn_seq), 5)
ppv = round(tp_seq/(tp_seq+fp_seq), 5)
jaccard = round(tp_seq/(tp_seq + fp_seq + fn_seq), 5)
out.write(seqid+",V,"+str(truth_data['domains'])+","+str(tp_seq)+","+str(fp_seq)+","+str(fn_seq)+","+str(sensitivity)+","+str(ppv)+","+str(jaccard)+"\n")
summary.write("VARIABLE-LENGTH STATISTICS\n")
summary.write("TP equal domain: " + str(tp_var) + "\n")
summary.write("FP equal domain: " + str(fp_var) + "\n")
summary.write("FN equal domain: " + str(fn_var) + "\n")
summary.write("Sensitivity: " + str(round(tp_var/(tp_var + fn_var),5)) + "\n")
summary.write("Precision(PPV): " + str(round(tp_var/(tp_var + fp_var),5)) + "\n")
summary.write("Jaccard Index: " + str(round(tp_var/(tp_var + fp_var + fn_var),5)) + "\n\n")
summary.write("OVERALL STATISTICS\n")
summary.write("TP: " + str(tp_var + tp_eq) + "\n")
summary.write("FP: " + str(fp_var + fp_eq) + "\n")
summary.write("FN: " + str(fn_var + fn_eq) + "\n")
summary.write("Sensitivity: " + str(round((tp_var + tp_eq)/(tp_var + fn_var + tp_eq + fn_eq),5)) + "\n")
summary.write("Precision(PPV): " + str(round((tp_var + tp_eq)/(tp_var + fp_var + tp_eq + fp_eq),5)) + "\n")
summary.write("Jaccard Index: " + str(round((tp_var + tp_eq)/(tp_var + fp_var + fn_var + tp_eq + fp_eq + fn_eq),5)) + "\n")
|
flexible
|
{
"blob_id": "af2aa236f6bfc582093faf868a374be1ebdfabf2",
"index": 1235,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nout.write('SEQUENCE_ID,TYPE,DOMAINS,TP,FP,FN,Sens,PPV,Jaccard\\n')\n<mask token>\nfor file in os.listdir(docpath + 'isoSegmenter100'):\n if file.endswith('.csv') and 'E' in file:\n predict_data = csv.DictReader(open(docpath + 'isoSegmenter100/' +\n file, 'r', encoding='UTF-8'))\n seqid = file.replace('.csv', '')\n with open(docpath + 'ground_truth100/' + seqid + '.json', 'r',\n encoding='UTF-8') as json_file:\n truth_data = json.load(json_file)\n true_boundaries = []\n tp_seq = 0\n fp_seq = 0\n fn_seq = 0\n for i in range(0, int(truth_data['tot_length']) + 1, int(truth_data\n ['domain_length'])):\n true_boundaries.append(i)\n for pred_domain in predict_data:\n matched = False\n for i in range(0, len(true_boundaries) - 1):\n startdiff = int(pred_domain['Start']) - true_boundaries[i]\n enddiff = int(pred_domain['End']) - true_boundaries[i + 1]\n tolerance = cutoff * (true_boundaries[i + 1] -\n true_boundaries[i])\n if abs(startdiff) <= tolerance:\n if abs(enddiff) <= tolerance:\n tp_seq += 1\n matched = True\n print(seqid)\n print('START MATCH: ' + str(true_boundaries[i]) +\n ', ' + pred_domain['Start'])\n print('END MATCH: ' + str(true_boundaries[i + 1]) +\n ', ' + pred_domain['End'])\n print('DIFFERENCES: ' + str(startdiff) + ', ' + str\n (enddiff) + ', TOLERANCE = ' + str(tolerance))\n print()\n break\n if not matched:\n fp_seq += 1\n fn_seq = int(truth_data['domains']) - tp_seq\n tp_eq += tp_seq\n fp_eq += fp_seq\n fn_eq += fn_seq\n sensitivity = round(tp_seq / (tp_seq + fn_seq), 5)\n ppv = round(tp_seq / (tp_seq + fp_seq), 5)\n jaccard = round(tp_seq / (tp_seq + fp_seq + fn_seq), 5)\n out.write(seqid + ',E,' + str(truth_data['domains']) + ',' + str(\n tp_seq) + ',' + str(fp_seq) + ',' + str(fn_seq) + ',' + str(\n sensitivity) + ',' + str(ppv) + ',' + str(jaccard) + '\\n')\nsummary.write('EQUAL-LENGTH STATISTICS\\n')\nsummary.write('TP equal domain: ' + str(tp_eq) + '\\n')\nsummary.write('FP equal domain: ' + str(fp_eq) + '\\n')\nsummary.write('FN equal domain: ' + str(fn_eq) + '\\n')\nsummary.write('Sensitivity: ' + str(round(tp_eq / (tp_eq + fn_eq), 5)) + '\\n')\nsummary.write('Precision(PPV): ' + str(round(tp_eq / (tp_eq + fp_eq), 5)) +\n '\\n')\nsummary.write('Jaccard Index: ' + str(round(tp_eq / (tp_eq + fp_eq + fn_eq),\n 5)) + '\\n\\n')\n<mask token>\nfor file in os.listdir(docpath + 'isoSegmenter100'):\n if file.endswith('.csv') and 'V' in file:\n predict_data = csv.DictReader(open(docpath + 'isoSegmenter100/' +\n file, 'r', encoding='UTF-8'))\n seqid = file.replace('.csv', '')\n with open(docpath + 'ground_truth100/' + seqid + '.json', 'r',\n encoding='UTF-8') as json_file:\n truth_data = json.load(json_file)\n true_boundaries = [1]\n tp_seq = 0\n fp_seq = 0\n fn_seq = 0\n for i in range(1, int(truth_data['domains']) + 1):\n b_next = true_boundaries[i - 1] + int(truth_data['length_' +\n str(i)])\n true_boundaries.append(b_next)\n for pred_domain in predict_data:\n matched = False\n for i in range(0, len(true_boundaries) - 1):\n startdiff = int(pred_domain['Start']) - true_boundaries[i]\n enddiff = int(pred_domain['End']) - true_boundaries[i + 1]\n tolerance = cutoff * (true_boundaries[i + 1] -\n true_boundaries[i])\n if abs(startdiff) <= tolerance:\n if abs(enddiff) <= tolerance:\n tp_seq += 1\n matched = True\n print(seqid)\n print('START MATCH: ' + str(true_boundaries[i]) +\n ', ' + pred_domain['Start'])\n print('END MATCH: ' + str(true_boundaries[i + 1]) +\n ', ' + pred_domain['End'])\n print('DIFFERENCES: ' + str(startdiff) + ', ' + str\n (enddiff) + ', TOLERANCE = ' + str(tolerance))\n print()\n break\n if not matched:\n fp_seq += 1\n fn_seq = int(truth_data['domains']) - tp_seq\n tp_var += tp_seq\n fp_var += fp_seq\n fn_var += fn_seq\n sensitivity = round(tp_seq / (tp_seq + fn_seq), 5)\n ppv = round(tp_seq / (tp_seq + fp_seq), 5)\n jaccard = round(tp_seq / (tp_seq + fp_seq + fn_seq), 5)\n out.write(seqid + ',V,' + str(truth_data['domains']) + ',' + str(\n tp_seq) + ',' + str(fp_seq) + ',' + str(fn_seq) + ',' + str(\n sensitivity) + ',' + str(ppv) + ',' + str(jaccard) + '\\n')\nsummary.write('VARIABLE-LENGTH STATISTICS\\n')\nsummary.write('TP equal domain: ' + str(tp_var) + '\\n')\nsummary.write('FP equal domain: ' + str(fp_var) + '\\n')\nsummary.write('FN equal domain: ' + str(fn_var) + '\\n')\nsummary.write('Sensitivity: ' + str(round(tp_var / (tp_var + fn_var), 5)) +\n '\\n')\nsummary.write('Precision(PPV): ' + str(round(tp_var / (tp_var + fp_var), 5)\n ) + '\\n')\nsummary.write('Jaccard Index: ' + str(round(tp_var / (tp_var + fp_var +\n fn_var), 5)) + '\\n\\n')\nsummary.write('OVERALL STATISTICS\\n')\nsummary.write('TP: ' + str(tp_var + tp_eq) + '\\n')\nsummary.write('FP: ' + str(fp_var + fp_eq) + '\\n')\nsummary.write('FN: ' + str(fn_var + fn_eq) + '\\n')\nsummary.write('Sensitivity: ' + str(round((tp_var + tp_eq) / (tp_var +\n fn_var + tp_eq + fn_eq), 5)) + '\\n')\nsummary.write('Precision(PPV): ' + str(round((tp_var + tp_eq) / (tp_var +\n fp_var + tp_eq + fp_eq), 5)) + '\\n')\nsummary.write('Jaccard Index: ' + str(round((tp_var + tp_eq) / (tp_var +\n fp_var + fn_var + tp_eq + fp_eq + fn_eq), 5)) + '\\n')\n",
"step-3": "<mask token>\ncutoff = float(input('Tolerance (decimal)? '))\ndocpath = 'C:/Users/RackS/Documents/'\nout = open('isosegmenter_scoring_error' + str(cutoff * 100) + '.csv', 'w',\n encoding='UTF-8')\nsummary = open('isosegmenter_score_summary_error' + str(cutoff * 100) +\n '.txt', 'w', encoding='UTF-8')\nout.write('SEQUENCE_ID,TYPE,DOMAINS,TP,FP,FN,Sens,PPV,Jaccard\\n')\ntp_eq = 0\nfp_eq = 0\nfn_eq = 0\nfor file in os.listdir(docpath + 'isoSegmenter100'):\n if file.endswith('.csv') and 'E' in file:\n predict_data = csv.DictReader(open(docpath + 'isoSegmenter100/' +\n file, 'r', encoding='UTF-8'))\n seqid = file.replace('.csv', '')\n with open(docpath + 'ground_truth100/' + seqid + '.json', 'r',\n encoding='UTF-8') as json_file:\n truth_data = json.load(json_file)\n true_boundaries = []\n tp_seq = 0\n fp_seq = 0\n fn_seq = 0\n for i in range(0, int(truth_data['tot_length']) + 1, int(truth_data\n ['domain_length'])):\n true_boundaries.append(i)\n for pred_domain in predict_data:\n matched = False\n for i in range(0, len(true_boundaries) - 1):\n startdiff = int(pred_domain['Start']) - true_boundaries[i]\n enddiff = int(pred_domain['End']) - true_boundaries[i + 1]\n tolerance = cutoff * (true_boundaries[i + 1] -\n true_boundaries[i])\n if abs(startdiff) <= tolerance:\n if abs(enddiff) <= tolerance:\n tp_seq += 1\n matched = True\n print(seqid)\n print('START MATCH: ' + str(true_boundaries[i]) +\n ', ' + pred_domain['Start'])\n print('END MATCH: ' + str(true_boundaries[i + 1]) +\n ', ' + pred_domain['End'])\n print('DIFFERENCES: ' + str(startdiff) + ', ' + str\n (enddiff) + ', TOLERANCE = ' + str(tolerance))\n print()\n break\n if not matched:\n fp_seq += 1\n fn_seq = int(truth_data['domains']) - tp_seq\n tp_eq += tp_seq\n fp_eq += fp_seq\n fn_eq += fn_seq\n sensitivity = round(tp_seq / (tp_seq + fn_seq), 5)\n ppv = round(tp_seq / (tp_seq + fp_seq), 5)\n jaccard = round(tp_seq / (tp_seq + fp_seq + fn_seq), 5)\n out.write(seqid + ',E,' + str(truth_data['domains']) + ',' + str(\n tp_seq) + ',' + str(fp_seq) + ',' + str(fn_seq) + ',' + str(\n sensitivity) + ',' + str(ppv) + ',' + str(jaccard) + '\\n')\nsummary.write('EQUAL-LENGTH STATISTICS\\n')\nsummary.write('TP equal domain: ' + str(tp_eq) + '\\n')\nsummary.write('FP equal domain: ' + str(fp_eq) + '\\n')\nsummary.write('FN equal domain: ' + str(fn_eq) + '\\n')\nsummary.write('Sensitivity: ' + str(round(tp_eq / (tp_eq + fn_eq), 5)) + '\\n')\nsummary.write('Precision(PPV): ' + str(round(tp_eq / (tp_eq + fp_eq), 5)) +\n '\\n')\nsummary.write('Jaccard Index: ' + str(round(tp_eq / (tp_eq + fp_eq + fn_eq),\n 5)) + '\\n\\n')\ntp_var = 0\nfp_var = 0\nfn_var = 0\nfor file in os.listdir(docpath + 'isoSegmenter100'):\n if file.endswith('.csv') and 'V' in file:\n predict_data = csv.DictReader(open(docpath + 'isoSegmenter100/' +\n file, 'r', encoding='UTF-8'))\n seqid = file.replace('.csv', '')\n with open(docpath + 'ground_truth100/' + seqid + '.json', 'r',\n encoding='UTF-8') as json_file:\n truth_data = json.load(json_file)\n true_boundaries = [1]\n tp_seq = 0\n fp_seq = 0\n fn_seq = 0\n for i in range(1, int(truth_data['domains']) + 1):\n b_next = true_boundaries[i - 1] + int(truth_data['length_' +\n str(i)])\n true_boundaries.append(b_next)\n for pred_domain in predict_data:\n matched = False\n for i in range(0, len(true_boundaries) - 1):\n startdiff = int(pred_domain['Start']) - true_boundaries[i]\n enddiff = int(pred_domain['End']) - true_boundaries[i + 1]\n tolerance = cutoff * (true_boundaries[i + 1] -\n true_boundaries[i])\n if abs(startdiff) <= tolerance:\n if abs(enddiff) <= tolerance:\n tp_seq += 1\n matched = True\n print(seqid)\n print('START MATCH: ' + str(true_boundaries[i]) +\n ', ' + pred_domain['Start'])\n print('END MATCH: ' + str(true_boundaries[i + 1]) +\n ', ' + pred_domain['End'])\n print('DIFFERENCES: ' + str(startdiff) + ', ' + str\n (enddiff) + ', TOLERANCE = ' + str(tolerance))\n print()\n break\n if not matched:\n fp_seq += 1\n fn_seq = int(truth_data['domains']) - tp_seq\n tp_var += tp_seq\n fp_var += fp_seq\n fn_var += fn_seq\n sensitivity = round(tp_seq / (tp_seq + fn_seq), 5)\n ppv = round(tp_seq / (tp_seq + fp_seq), 5)\n jaccard = round(tp_seq / (tp_seq + fp_seq + fn_seq), 5)\n out.write(seqid + ',V,' + str(truth_data['domains']) + ',' + str(\n tp_seq) + ',' + str(fp_seq) + ',' + str(fn_seq) + ',' + str(\n sensitivity) + ',' + str(ppv) + ',' + str(jaccard) + '\\n')\nsummary.write('VARIABLE-LENGTH STATISTICS\\n')\nsummary.write('TP equal domain: ' + str(tp_var) + '\\n')\nsummary.write('FP equal domain: ' + str(fp_var) + '\\n')\nsummary.write('FN equal domain: ' + str(fn_var) + '\\n')\nsummary.write('Sensitivity: ' + str(round(tp_var / (tp_var + fn_var), 5)) +\n '\\n')\nsummary.write('Precision(PPV): ' + str(round(tp_var / (tp_var + fp_var), 5)\n ) + '\\n')\nsummary.write('Jaccard Index: ' + str(round(tp_var / (tp_var + fp_var +\n fn_var), 5)) + '\\n\\n')\nsummary.write('OVERALL STATISTICS\\n')\nsummary.write('TP: ' + str(tp_var + tp_eq) + '\\n')\nsummary.write('FP: ' + str(fp_var + fp_eq) + '\\n')\nsummary.write('FN: ' + str(fn_var + fn_eq) + '\\n')\nsummary.write('Sensitivity: ' + str(round((tp_var + tp_eq) / (tp_var +\n fn_var + tp_eq + fn_eq), 5)) + '\\n')\nsummary.write('Precision(PPV): ' + str(round((tp_var + tp_eq) / (tp_var +\n fp_var + tp_eq + fp_eq), 5)) + '\\n')\nsummary.write('Jaccard Index: ' + str(round((tp_var + tp_eq) / (tp_var +\n fp_var + fn_var + tp_eq + fp_eq + fn_eq), 5)) + '\\n')\n",
"step-4": "<mask token>\nimport os\nimport json\nimport csv\ncutoff = float(input('Tolerance (decimal)? '))\ndocpath = 'C:/Users/RackS/Documents/'\nout = open('isosegmenter_scoring_error' + str(cutoff * 100) + '.csv', 'w',\n encoding='UTF-8')\nsummary = open('isosegmenter_score_summary_error' + str(cutoff * 100) +\n '.txt', 'w', encoding='UTF-8')\nout.write('SEQUENCE_ID,TYPE,DOMAINS,TP,FP,FN,Sens,PPV,Jaccard\\n')\ntp_eq = 0\nfp_eq = 0\nfn_eq = 0\nfor file in os.listdir(docpath + 'isoSegmenter100'):\n if file.endswith('.csv') and 'E' in file:\n predict_data = csv.DictReader(open(docpath + 'isoSegmenter100/' +\n file, 'r', encoding='UTF-8'))\n seqid = file.replace('.csv', '')\n with open(docpath + 'ground_truth100/' + seqid + '.json', 'r',\n encoding='UTF-8') as json_file:\n truth_data = json.load(json_file)\n true_boundaries = []\n tp_seq = 0\n fp_seq = 0\n fn_seq = 0\n for i in range(0, int(truth_data['tot_length']) + 1, int(truth_data\n ['domain_length'])):\n true_boundaries.append(i)\n for pred_domain in predict_data:\n matched = False\n for i in range(0, len(true_boundaries) - 1):\n startdiff = int(pred_domain['Start']) - true_boundaries[i]\n enddiff = int(pred_domain['End']) - true_boundaries[i + 1]\n tolerance = cutoff * (true_boundaries[i + 1] -\n true_boundaries[i])\n if abs(startdiff) <= tolerance:\n if abs(enddiff) <= tolerance:\n tp_seq += 1\n matched = True\n print(seqid)\n print('START MATCH: ' + str(true_boundaries[i]) +\n ', ' + pred_domain['Start'])\n print('END MATCH: ' + str(true_boundaries[i + 1]) +\n ', ' + pred_domain['End'])\n print('DIFFERENCES: ' + str(startdiff) + ', ' + str\n (enddiff) + ', TOLERANCE = ' + str(tolerance))\n print()\n break\n if not matched:\n fp_seq += 1\n fn_seq = int(truth_data['domains']) - tp_seq\n tp_eq += tp_seq\n fp_eq += fp_seq\n fn_eq += fn_seq\n sensitivity = round(tp_seq / (tp_seq + fn_seq), 5)\n ppv = round(tp_seq / (tp_seq + fp_seq), 5)\n jaccard = round(tp_seq / (tp_seq + fp_seq + fn_seq), 5)\n out.write(seqid + ',E,' + str(truth_data['domains']) + ',' + str(\n tp_seq) + ',' + str(fp_seq) + ',' + str(fn_seq) + ',' + str(\n sensitivity) + ',' + str(ppv) + ',' + str(jaccard) + '\\n')\nsummary.write('EQUAL-LENGTH STATISTICS\\n')\nsummary.write('TP equal domain: ' + str(tp_eq) + '\\n')\nsummary.write('FP equal domain: ' + str(fp_eq) + '\\n')\nsummary.write('FN equal domain: ' + str(fn_eq) + '\\n')\nsummary.write('Sensitivity: ' + str(round(tp_eq / (tp_eq + fn_eq), 5)) + '\\n')\nsummary.write('Precision(PPV): ' + str(round(tp_eq / (tp_eq + fp_eq), 5)) +\n '\\n')\nsummary.write('Jaccard Index: ' + str(round(tp_eq / (tp_eq + fp_eq + fn_eq),\n 5)) + '\\n\\n')\ntp_var = 0\nfp_var = 0\nfn_var = 0\nfor file in os.listdir(docpath + 'isoSegmenter100'):\n if file.endswith('.csv') and 'V' in file:\n predict_data = csv.DictReader(open(docpath + 'isoSegmenter100/' +\n file, 'r', encoding='UTF-8'))\n seqid = file.replace('.csv', '')\n with open(docpath + 'ground_truth100/' + seqid + '.json', 'r',\n encoding='UTF-8') as json_file:\n truth_data = json.load(json_file)\n true_boundaries = [1]\n tp_seq = 0\n fp_seq = 0\n fn_seq = 0\n for i in range(1, int(truth_data['domains']) + 1):\n b_next = true_boundaries[i - 1] + int(truth_data['length_' +\n str(i)])\n true_boundaries.append(b_next)\n for pred_domain in predict_data:\n matched = False\n for i in range(0, len(true_boundaries) - 1):\n startdiff = int(pred_domain['Start']) - true_boundaries[i]\n enddiff = int(pred_domain['End']) - true_boundaries[i + 1]\n tolerance = cutoff * (true_boundaries[i + 1] -\n true_boundaries[i])\n if abs(startdiff) <= tolerance:\n if abs(enddiff) <= tolerance:\n tp_seq += 1\n matched = True\n print(seqid)\n print('START MATCH: ' + str(true_boundaries[i]) +\n ', ' + pred_domain['Start'])\n print('END MATCH: ' + str(true_boundaries[i + 1]) +\n ', ' + pred_domain['End'])\n print('DIFFERENCES: ' + str(startdiff) + ', ' + str\n (enddiff) + ', TOLERANCE = ' + str(tolerance))\n print()\n break\n if not matched:\n fp_seq += 1\n fn_seq = int(truth_data['domains']) - tp_seq\n tp_var += tp_seq\n fp_var += fp_seq\n fn_var += fn_seq\n sensitivity = round(tp_seq / (tp_seq + fn_seq), 5)\n ppv = round(tp_seq / (tp_seq + fp_seq), 5)\n jaccard = round(tp_seq / (tp_seq + fp_seq + fn_seq), 5)\n out.write(seqid + ',V,' + str(truth_data['domains']) + ',' + str(\n tp_seq) + ',' + str(fp_seq) + ',' + str(fn_seq) + ',' + str(\n sensitivity) + ',' + str(ppv) + ',' + str(jaccard) + '\\n')\nsummary.write('VARIABLE-LENGTH STATISTICS\\n')\nsummary.write('TP equal domain: ' + str(tp_var) + '\\n')\nsummary.write('FP equal domain: ' + str(fp_var) + '\\n')\nsummary.write('FN equal domain: ' + str(fn_var) + '\\n')\nsummary.write('Sensitivity: ' + str(round(tp_var / (tp_var + fn_var), 5)) +\n '\\n')\nsummary.write('Precision(PPV): ' + str(round(tp_var / (tp_var + fp_var), 5)\n ) + '\\n')\nsummary.write('Jaccard Index: ' + str(round(tp_var / (tp_var + fp_var +\n fn_var), 5)) + '\\n\\n')\nsummary.write('OVERALL STATISTICS\\n')\nsummary.write('TP: ' + str(tp_var + tp_eq) + '\\n')\nsummary.write('FP: ' + str(fp_var + fp_eq) + '\\n')\nsummary.write('FN: ' + str(fn_var + fn_eq) + '\\n')\nsummary.write('Sensitivity: ' + str(round((tp_var + tp_eq) / (tp_var +\n fn_var + tp_eq + fn_eq), 5)) + '\\n')\nsummary.write('Precision(PPV): ' + str(round((tp_var + tp_eq) / (tp_var +\n fp_var + tp_eq + fp_eq), 5)) + '\\n')\nsummary.write('Jaccard Index: ' + str(round((tp_var + tp_eq) / (tp_var +\n fp_var + fn_var + tp_eq + fp_eq + fn_eq), 5)) + '\\n')\n",
"step-5": "\"\"\"\n\"\"\"\nimport os\nimport json\nimport csv\n\ncutoff = float(input(\"Tolerance (decimal)? \"))\ndocpath = \"C:/Users/RackS/Documents/\"\nout = open(\"isosegmenter_scoring_error\"+str(cutoff*100)+\".csv\", 'w', encoding='UTF-8')\nsummary = open(\"isosegmenter_score_summary_error\"+str(cutoff*100)+\".txt\", 'w', encoding='UTF-8')\nout.write(\"SEQUENCE_ID,TYPE,DOMAINS,TP,FP,FN,Sens,PPV,Jaccard\\n\")\n\ntp_eq = 0\nfp_eq = 0\nfn_eq = 0\n\nfor file in os.listdir(docpath+\"isoSegmenter100\"):\n if file.endswith(\".csv\") and \"E\" in file:\n predict_data = csv.DictReader(open(docpath+\"isoSegmenter100/\"+file, 'r', encoding='UTF-8'))\n seqid = file.replace(\".csv\", \"\")\n with open(docpath+\"ground_truth100/\"+seqid+\".json\", 'r', encoding='UTF-8') as json_file:\n truth_data = json.load(json_file)\n\n true_boundaries = []\n tp_seq = 0\n fp_seq = 0\n fn_seq = 0\n for i in range(0, int(truth_data['tot_length']) + 1, int(truth_data['domain_length'])):\n true_boundaries.append(i)\n\n for pred_domain in predict_data:\n matched = False\n for i in range(0, len(true_boundaries) - 1):\n startdiff = int(pred_domain['Start']) - true_boundaries[i]\n enddiff = int(pred_domain['End']) - true_boundaries[i+1]\n tolerance = cutoff*(true_boundaries[i+1] - true_boundaries[i])\n if abs(startdiff) <= tolerance:\n if abs(enddiff) <= tolerance:\n tp_seq += 1\n matched = True\n print(seqid)\n print(\"START MATCH: \" + str(true_boundaries[i]) + \", \" + pred_domain['Start'])\n print(\"END MATCH: \" + str(true_boundaries[i+1]) + \", \" + pred_domain['End'])\n print(\"DIFFERENCES: \" + str(startdiff) + \", \" + str(enddiff) + \", TOLERANCE = \" + str(tolerance))\n print()\n break\n if not matched:\n fp_seq += 1\n\n fn_seq = int(truth_data['domains']) - tp_seq\n tp_eq += tp_seq\n fp_eq += fp_seq\n fn_eq += fn_seq\n sensitivity = round(tp_seq/(tp_seq + fn_seq), 5)\n ppv = round(tp_seq/(tp_seq+fp_seq), 5)\n jaccard = round(tp_seq/(tp_seq + fp_seq + fn_seq), 5)\n out.write(seqid+\",E,\"+str(truth_data['domains'])+\",\"+str(tp_seq)+\",\"+str(fp_seq)+\",\"+str(fn_seq)+\",\"+str(sensitivity)+\",\"+str(ppv)+\",\"+str(jaccard)+\"\\n\")\n\nsummary.write(\"EQUAL-LENGTH STATISTICS\\n\")\nsummary.write(\"TP equal domain: \" + str(tp_eq) + \"\\n\")\nsummary.write(\"FP equal domain: \" + str(fp_eq) + \"\\n\")\nsummary.write(\"FN equal domain: \" + str(fn_eq) + \"\\n\")\nsummary.write(\"Sensitivity: \" + str(round(tp_eq/(tp_eq + fn_eq),5)) + \"\\n\")\nsummary.write(\"Precision(PPV): \" + str(round(tp_eq/(tp_eq + fp_eq),5)) + \"\\n\")\nsummary.write(\"Jaccard Index: \" + str(round(tp_eq/(tp_eq + fp_eq + fn_eq),5)) + \"\\n\\n\")\n\ntp_var = 0\nfp_var = 0\nfn_var = 0\nfor file in os.listdir(docpath+\"isoSegmenter100\"):\n if file.endswith(\".csv\") and \"V\" in file:\n predict_data = csv.DictReader(open(docpath+\"isoSegmenter100/\"+file, 'r', encoding='UTF-8'))\n seqid = file.replace(\".csv\", \"\")\n with open(docpath+\"ground_truth100/\"+seqid+\".json\", 'r', encoding='UTF-8') as json_file:\n truth_data = json.load(json_file)\n\n true_boundaries = [1]\n tp_seq = 0\n fp_seq = 0\n fn_seq = 0\n for i in range(1, int(truth_data['domains']) + 1):\n b_next = true_boundaries[i-1] + int(truth_data['length_'+str(i)])\n true_boundaries.append(b_next)\n\n for pred_domain in predict_data:\n matched = False\n for i in range(0, len(true_boundaries) - 1):\n startdiff = int(pred_domain['Start']) - true_boundaries[i]\n enddiff = int(pred_domain['End']) - true_boundaries[i+1]\n tolerance = cutoff*(true_boundaries[i+1] - true_boundaries[i])\n if abs(startdiff) <= tolerance:\n if abs(enddiff) <= tolerance:\n tp_seq += 1\n matched = True\n print(seqid)\n print(\"START MATCH: \" + str(true_boundaries[i]) + \", \" + pred_domain['Start'])\n print(\"END MATCH: \" + str(true_boundaries[i+1]) + \", \" + pred_domain['End'])\n print(\"DIFFERENCES: \" + str(startdiff) + \", \" + str(enddiff) + \", TOLERANCE = \" + str(tolerance))\n print()\n break\n if not matched:\n fp_seq += 1\n\n fn_seq = int(truth_data['domains']) - tp_seq\n tp_var += tp_seq\n fp_var += fp_seq\n fn_var += fn_seq\n sensitivity = round(tp_seq/(tp_seq + fn_seq), 5)\n ppv = round(tp_seq/(tp_seq+fp_seq), 5)\n jaccard = round(tp_seq/(tp_seq + fp_seq + fn_seq), 5)\n out.write(seqid+\",V,\"+str(truth_data['domains'])+\",\"+str(tp_seq)+\",\"+str(fp_seq)+\",\"+str(fn_seq)+\",\"+str(sensitivity)+\",\"+str(ppv)+\",\"+str(jaccard)+\"\\n\")\n\nsummary.write(\"VARIABLE-LENGTH STATISTICS\\n\")\nsummary.write(\"TP equal domain: \" + str(tp_var) + \"\\n\")\nsummary.write(\"FP equal domain: \" + str(fp_var) + \"\\n\")\nsummary.write(\"FN equal domain: \" + str(fn_var) + \"\\n\")\nsummary.write(\"Sensitivity: \" + str(round(tp_var/(tp_var + fn_var),5)) + \"\\n\")\nsummary.write(\"Precision(PPV): \" + str(round(tp_var/(tp_var + fp_var),5)) + \"\\n\")\nsummary.write(\"Jaccard Index: \" + str(round(tp_var/(tp_var + fp_var + fn_var),5)) + \"\\n\\n\")\n \n\nsummary.write(\"OVERALL STATISTICS\\n\")\nsummary.write(\"TP: \" + str(tp_var + tp_eq) + \"\\n\")\nsummary.write(\"FP: \" + str(fp_var + fp_eq) + \"\\n\")\nsummary.write(\"FN: \" + str(fn_var + fn_eq) + \"\\n\")\nsummary.write(\"Sensitivity: \" + str(round((tp_var + tp_eq)/(tp_var + fn_var + tp_eq + fn_eq),5)) + \"\\n\")\nsummary.write(\"Precision(PPV): \" + str(round((tp_var + tp_eq)/(tp_var + fp_var + tp_eq + fp_eq),5)) + \"\\n\")\nsummary.write(\"Jaccard Index: \" + str(round((tp_var + tp_eq)/(tp_var + fp_var + fn_var + tp_eq + fp_eq + fn_eq),5)) + \"\\n\")",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import cv2
import numpy as np
result=cv2.VideoCapture(0)
while True:
ret,square=result.read()
area=square[100:200,100:200]
cv2.imshow("video",square)
cv2.imshow("video2",area)
print(square)
if cv2.waitKey(25) & 0xff == ord('q'):
break
result.release()
cv2.destroyAllWindows()
|
normal
|
{
"blob_id": "934921b22d036bd611134ce74f6eba3a2710018e",
"index": 529,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwhile True:\n ret, square = result.read()\n area = square[100:200, 100:200]\n cv2.imshow('video', square)\n cv2.imshow('video2', area)\n print(square)\n if cv2.waitKey(25) & 255 == ord('q'):\n break\nresult.release()\ncv2.destroyAllWindows()\n",
"step-3": "<mask token>\nresult = cv2.VideoCapture(0)\nwhile True:\n ret, square = result.read()\n area = square[100:200, 100:200]\n cv2.imshow('video', square)\n cv2.imshow('video2', area)\n print(square)\n if cv2.waitKey(25) & 255 == ord('q'):\n break\nresult.release()\ncv2.destroyAllWindows()\n",
"step-4": "import cv2\nimport numpy as np\nresult = cv2.VideoCapture(0)\nwhile True:\n ret, square = result.read()\n area = square[100:200, 100:200]\n cv2.imshow('video', square)\n cv2.imshow('video2', area)\n print(square)\n if cv2.waitKey(25) & 255 == ord('q'):\n break\nresult.release()\ncv2.destroyAllWindows()\n",
"step-5": "import cv2\nimport numpy as np\n\n\nresult=cv2.VideoCapture(0)\n\nwhile True:\n ret,square=result.read()\n area=square[100:200,100:200]\n cv2.imshow(\"video\",square)\n cv2.imshow(\"video2\",area)\n print(square)\n\n if cv2.waitKey(25) & 0xff == ord('q'):\n break\nresult.release()\ncv2.destroyAllWindows()\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
class Wspak:
"""Iterator zwracający wartości w odwróconym porządku"""
def __init__(self, data):
self.data = data
self.index = -2
self.i=len(data)-1
def __iter__(self):
return self
def __next__(self):
if self.index >= self.i:
raise StopIteration
self.index = self.index+2
return self.data[self.index]
d=(["sdasda","sdasdasd","sdsad232","dasda","dsada"])
g=(2,3,4,6,7)
d = [x for x in Wspak(d)]
for x in Wspak(g):
print(x)
print(d)
|
normal
|
{
"blob_id": "ea1d62c4a8c406dde9bb138ee045be5e682fdbfe",
"index": 566,
"step-1": "class Wspak:\n <mask token>\n\n def __init__(self, data):\n self.data = data\n self.index = -2\n self.i = len(data) - 1\n <mask token>\n <mask token>\n\n\n<mask token>\n",
"step-2": "class Wspak:\n \"\"\"Iterator zwracający wartości w odwróconym porządku\"\"\"\n\n def __init__(self, data):\n self.data = data\n self.index = -2\n self.i = len(data) - 1\n\n def __iter__(self):\n return self\n\n def __next__(self):\n if self.index >= self.i:\n raise StopIteration\n self.index = self.index + 2\n return self.data[self.index]\n\n\n<mask token>\n",
"step-3": "class Wspak:\n \"\"\"Iterator zwracający wartości w odwróconym porządku\"\"\"\n\n def __init__(self, data):\n self.data = data\n self.index = -2\n self.i = len(data) - 1\n\n def __iter__(self):\n return self\n\n def __next__(self):\n if self.index >= self.i:\n raise StopIteration\n self.index = self.index + 2\n return self.data[self.index]\n\n\n<mask token>\nfor x in Wspak(g):\n print(x)\nprint(d)\n",
"step-4": "class Wspak:\n \"\"\"Iterator zwracający wartości w odwróconym porządku\"\"\"\n\n def __init__(self, data):\n self.data = data\n self.index = -2\n self.i = len(data) - 1\n\n def __iter__(self):\n return self\n\n def __next__(self):\n if self.index >= self.i:\n raise StopIteration\n self.index = self.index + 2\n return self.data[self.index]\n\n\nd = ['sdasda', 'sdasdasd', 'sdsad232', 'dasda', 'dsada']\ng = 2, 3, 4, 6, 7\nd = [x for x in Wspak(d)]\nfor x in Wspak(g):\n print(x)\nprint(d)\n",
"step-5": "class Wspak:\n \"\"\"Iterator zwracający wartości w odwróconym porządku\"\"\"\n def __init__(self, data):\n self.data = data\n self.index = -2\n self.i=len(data)-1\n\n def __iter__(self):\n return self\n def __next__(self):\n if self.index >= self.i:\n raise StopIteration\n self.index = self.index+2\n return self.data[self.index]\nd=([\"sdasda\",\"sdasdasd\",\"sdsad232\",\"dasda\",\"dsada\"])\ng=(2,3,4,6,7)\nd = [x for x in Wspak(d)]\nfor x in Wspak(g):\n print(x)\nprint(d)",
"step-ids": [
2,
5,
6,
7,
8
]
}
|
[
2,
5,
6,
7,
8
] |
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
from sklearn.cluster import KMeans
from kneed import KneeLocator
#Create a panda data frame from the csv file
df = pd.read_csv('ClusterPlot.csv', usecols=['V1','V2'])
#Convert the panda data frame to a NumPy array
arr = df.to_numpy()
#Code used to visualise the data and check if the import worked correctly
#Now commented out but retained for debugging.
#plt.scatter(arr[:,0],arr[:,1], label='True Position')
#plt.show()
# Create an array to store the Sum of Squared Errors or the cluster inertia
# for the k-clusters in multiple runs of the K-Means algo with different
# number of clusters assumed
distortions = []
for i in range(1,11):
km = KMeans(n_clusters=i, init='random',
n_init=10, max_iter=300,
tol=1e-04, random_state=0)
km.fit(arr)
distortions.append(km.inertia_)
# Find the elbow or knee from the plot of no. of clusters vs distortion for that
# number. This algorithm locates the knee and that is used to provide the Number
# of clusters to the main run of K-means algo.
kn = KneeLocator(range(1,11), distortions, curve='convex', direction='decreasing')
print('The number of clusters are: ' + str(kn.knee))
#plot the no. of clusters vs distortion graph and annotate the elbow point
plt.plot(range(1, 11), distortions, marker='o')
plt.xlabel('Number of clusters')
plt.ylabel('Distortion')
plt.vlines(kn.knee, plt.ylim()[0], plt.ylim()[1], linestyles='dashed')
plt.show()
#From the sciKitLearn clustering algorithms, the K-means clustering
#algorithm is used.
km = KMeans(
n_clusters=kn.knee, init='random',
n_init=10, max_iter=300,
tol=1e-04, random_state=0
)
#Obtain the cluster labels by running the K-means algorithm with
# the parameters defined above.
y_km = km.fit_predict(arr)
#Color Array
colors = ['lightgreen','orange','lightblue','azure', 'crimson','lightpink','black','gold', 'coral', 'navy']
#Marker Array
markers = ['s','o','v', '^', '<', '>', 'h', 'H', 'D', 'd']
#Plot the clusters.
for i in range(0, 3):
plt.scatter(
arr[y_km == i, 0], arr[y_km == i, 1],
s=50, c=colors[i],
marker=markers[i], edgecolor='black',
label='cluster ' + str(i+1)
)
# Plotting the centroids for all the clusters.
plt.scatter(
km.cluster_centers_[:, 0], km.cluster_centers_[:, 1],
s=250, marker='*',
c='red', edgecolor='black',
label='centroids'
)
plt.legend(scatterpoints=1)
plt.grid()
plt.show()
|
normal
|
{
"blob_id": "09417014963172fc71b4268aafdec1405c04f34d",
"index": 3472,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor i in range(1, 11):\n km = KMeans(n_clusters=i, init='random', n_init=10, max_iter=300, tol=\n 0.0001, random_state=0)\n km.fit(arr)\n distortions.append(km.inertia_)\n<mask token>\nprint('The number of clusters are: ' + str(kn.knee))\nplt.plot(range(1, 11), distortions, marker='o')\nplt.xlabel('Number of clusters')\nplt.ylabel('Distortion')\nplt.vlines(kn.knee, plt.ylim()[0], plt.ylim()[1], linestyles='dashed')\nplt.show()\n<mask token>\nfor i in range(0, 3):\n plt.scatter(arr[y_km == i, 0], arr[y_km == i, 1], s=50, c=colors[i],\n marker=markers[i], edgecolor='black', label='cluster ' + str(i + 1))\nplt.scatter(km.cluster_centers_[:, 0], km.cluster_centers_[:, 1], s=250,\n marker='*', c='red', edgecolor='black', label='centroids')\nplt.legend(scatterpoints=1)\nplt.grid()\nplt.show()\n",
"step-3": "<mask token>\ndf = pd.read_csv('ClusterPlot.csv', usecols=['V1', 'V2'])\narr = df.to_numpy()\ndistortions = []\nfor i in range(1, 11):\n km = KMeans(n_clusters=i, init='random', n_init=10, max_iter=300, tol=\n 0.0001, random_state=0)\n km.fit(arr)\n distortions.append(km.inertia_)\nkn = KneeLocator(range(1, 11), distortions, curve='convex', direction=\n 'decreasing')\nprint('The number of clusters are: ' + str(kn.knee))\nplt.plot(range(1, 11), distortions, marker='o')\nplt.xlabel('Number of clusters')\nplt.ylabel('Distortion')\nplt.vlines(kn.knee, plt.ylim()[0], plt.ylim()[1], linestyles='dashed')\nplt.show()\nkm = KMeans(n_clusters=kn.knee, init='random', n_init=10, max_iter=300, tol\n =0.0001, random_state=0)\ny_km = km.fit_predict(arr)\ncolors = ['lightgreen', 'orange', 'lightblue', 'azure', 'crimson',\n 'lightpink', 'black', 'gold', 'coral', 'navy']\nmarkers = ['s', 'o', 'v', '^', '<', '>', 'h', 'H', 'D', 'd']\nfor i in range(0, 3):\n plt.scatter(arr[y_km == i, 0], arr[y_km == i, 1], s=50, c=colors[i],\n marker=markers[i], edgecolor='black', label='cluster ' + str(i + 1))\nplt.scatter(km.cluster_centers_[:, 0], km.cluster_centers_[:, 1], s=250,\n marker='*', c='red', edgecolor='black', label='centroids')\nplt.legend(scatterpoints=1)\nplt.grid()\nplt.show()\n",
"step-4": "import pandas as pd\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom sklearn.cluster import KMeans\nfrom kneed import KneeLocator\ndf = pd.read_csv('ClusterPlot.csv', usecols=['V1', 'V2'])\narr = df.to_numpy()\ndistortions = []\nfor i in range(1, 11):\n km = KMeans(n_clusters=i, init='random', n_init=10, max_iter=300, tol=\n 0.0001, random_state=0)\n km.fit(arr)\n distortions.append(km.inertia_)\nkn = KneeLocator(range(1, 11), distortions, curve='convex', direction=\n 'decreasing')\nprint('The number of clusters are: ' + str(kn.knee))\nplt.plot(range(1, 11), distortions, marker='o')\nplt.xlabel('Number of clusters')\nplt.ylabel('Distortion')\nplt.vlines(kn.knee, plt.ylim()[0], plt.ylim()[1], linestyles='dashed')\nplt.show()\nkm = KMeans(n_clusters=kn.knee, init='random', n_init=10, max_iter=300, tol\n =0.0001, random_state=0)\ny_km = km.fit_predict(arr)\ncolors = ['lightgreen', 'orange', 'lightblue', 'azure', 'crimson',\n 'lightpink', 'black', 'gold', 'coral', 'navy']\nmarkers = ['s', 'o', 'v', '^', '<', '>', 'h', 'H', 'D', 'd']\nfor i in range(0, 3):\n plt.scatter(arr[y_km == i, 0], arr[y_km == i, 1], s=50, c=colors[i],\n marker=markers[i], edgecolor='black', label='cluster ' + str(i + 1))\nplt.scatter(km.cluster_centers_[:, 0], km.cluster_centers_[:, 1], s=250,\n marker='*', c='red', edgecolor='black', label='centroids')\nplt.legend(scatterpoints=1)\nplt.grid()\nplt.show()\n",
"step-5": "import pandas as pd\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom sklearn.cluster import KMeans\nfrom kneed import KneeLocator\n\n#Create a panda data frame from the csv file\ndf = pd.read_csv('ClusterPlot.csv', usecols=['V1','V2'])\n\n#Convert the panda data frame to a NumPy array\narr = df.to_numpy()\n\n#Code used to visualise the data and check if the import worked correctly\n#Now commented out but retained for debugging.\n#plt.scatter(arr[:,0],arr[:,1], label='True Position')\n#plt.show()\n\n# Create an array to store the Sum of Squared Errors or the cluster inertia\n# for the k-clusters in multiple runs of the K-Means algo with different\n# number of clusters assumed\n\ndistortions = []\n\nfor i in range(1,11):\n km = KMeans(n_clusters=i, init='random',\n n_init=10, max_iter=300,\n tol=1e-04, random_state=0)\n km.fit(arr)\n distortions.append(km.inertia_)\n\n# Find the elbow or knee from the plot of no. of clusters vs distortion for that\n# number. This algorithm locates the knee and that is used to provide the Number\n# of clusters to the main run of K-means algo.\n\nkn = KneeLocator(range(1,11), distortions, curve='convex', direction='decreasing')\nprint('The number of clusters are: ' + str(kn.knee))\n\n#plot the no. of clusters vs distortion graph and annotate the elbow point\n\nplt.plot(range(1, 11), distortions, marker='o')\nplt.xlabel('Number of clusters')\nplt.ylabel('Distortion')\nplt.vlines(kn.knee, plt.ylim()[0], plt.ylim()[1], linestyles='dashed')\nplt.show()\n\n\n\n#From the sciKitLearn clustering algorithms, the K-means clustering\n#algorithm is used.\nkm = KMeans(\n n_clusters=kn.knee, init='random',\n n_init=10, max_iter=300,\n tol=1e-04, random_state=0\n)\n\n#Obtain the cluster labels by running the K-means algorithm with\n# the parameters defined above.\ny_km = km.fit_predict(arr)\n\n#Color Array\ncolors = ['lightgreen','orange','lightblue','azure', 'crimson','lightpink','black','gold', 'coral', 'navy']\n\n#Marker Array\nmarkers = ['s','o','v', '^', '<', '>', 'h', 'H', 'D', 'd']\n\n#Plot the clusters.\nfor i in range(0, 3):\n plt.scatter(\n arr[y_km == i, 0], arr[y_km == i, 1],\n s=50, c=colors[i],\n marker=markers[i], edgecolor='black',\n label='cluster ' + str(i+1)\n)\n\n# Plotting the centroids for all the clusters.\nplt.scatter(\n km.cluster_centers_[:, 0], km.cluster_centers_[:, 1],\n s=250, marker='*',\n c='red', edgecolor='black',\n label='centroids'\n)\n\nplt.legend(scatterpoints=1)\nplt.grid()\nplt.show()\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
#!/usr/bin/env python
import argparse
import csv
import glob
import os
import sys
def run_main():
"""
Main function to process user input and then generate the description files for each run
:return: exit code -- 0 on success, 1 otherwise
"""
parser = argparse.ArgumentParser(description="Scan a run directory and create files to ")
parser.add_argument('--run-directory', dest='run_directory',
action='store', default='',
help='path to directory with xed files to process')
args = parser.parse_args(sys.argv[1:])
if not os.path.isdir(args.run_directory):
sys.stderr.write("{0} is not a directory, exiting\n".format(args.run_directory))
return 1
run_name = os.path.abspath(args.run_directory)
if os.path.basename(run_name):
run_name = os.path.basename(run_name)
else:
run_name = os.path.split(run_name)[0].split('/')[-1]
if not os.path.exists('info'):
os.mkdir('info')
for directory in os.listdir(args.run_directory):
if not os.path.isdir(os.path.join(args.run_directory, directory)):
continue
csv_filename = "info/{0}_{1}_files.csv".format(run_name, directory)
entries = glob.glob(os.path.join(args.run_directory, directory, '*.xed'))
if len(entries) == 0:
continue
with open(csv_filename, 'w') as file_obj:
csv_writer = csv.writer(file_obj)
csv_writer.writerow(['Run', 'Data Set', 'File'])
for entry in entries:
uri = "srm://ceph-se.osgconnect.net:8443/srm/v2/" + \
"server?SFN=/cephfs/srm/xenon/" + \
entry.replace('/xenon/', '')
csv_writer.writerow([run_name, directory, uri])
if __name__ == '__main__':
sys.exit(run_main())
|
normal
|
{
"blob_id": "6e6c6c5795e8723a86ae5dfc8f40df57d3dd10f7",
"index": 3336,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef run_main():\n \"\"\"\n Main function to process user input and then generate the description files for each run\n\n :return: exit code -- 0 on success, 1 otherwise\n \"\"\"\n parser = argparse.ArgumentParser(description=\n 'Scan a run directory and create files to ')\n parser.add_argument('--run-directory', dest='run_directory', action=\n 'store', default='', help='path to directory with xed files to process'\n )\n args = parser.parse_args(sys.argv[1:])\n if not os.path.isdir(args.run_directory):\n sys.stderr.write('{0} is not a directory, exiting\\n'.format(args.\n run_directory))\n return 1\n run_name = os.path.abspath(args.run_directory)\n if os.path.basename(run_name):\n run_name = os.path.basename(run_name)\n else:\n run_name = os.path.split(run_name)[0].split('/')[-1]\n if not os.path.exists('info'):\n os.mkdir('info')\n for directory in os.listdir(args.run_directory):\n if not os.path.isdir(os.path.join(args.run_directory, directory)):\n continue\n csv_filename = 'info/{0}_{1}_files.csv'.format(run_name, directory)\n entries = glob.glob(os.path.join(args.run_directory, directory,\n '*.xed'))\n if len(entries) == 0:\n continue\n with open(csv_filename, 'w') as file_obj:\n csv_writer = csv.writer(file_obj)\n csv_writer.writerow(['Run', 'Data Set', 'File'])\n for entry in entries:\n uri = ('srm://ceph-se.osgconnect.net:8443/srm/v2/' +\n 'server?SFN=/cephfs/srm/xenon/' + entry.replace(\n '/xenon/', ''))\n csv_writer.writerow([run_name, directory, uri])\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef run_main():\n \"\"\"\n Main function to process user input and then generate the description files for each run\n\n :return: exit code -- 0 on success, 1 otherwise\n \"\"\"\n parser = argparse.ArgumentParser(description=\n 'Scan a run directory and create files to ')\n parser.add_argument('--run-directory', dest='run_directory', action=\n 'store', default='', help='path to directory with xed files to process'\n )\n args = parser.parse_args(sys.argv[1:])\n if not os.path.isdir(args.run_directory):\n sys.stderr.write('{0} is not a directory, exiting\\n'.format(args.\n run_directory))\n return 1\n run_name = os.path.abspath(args.run_directory)\n if os.path.basename(run_name):\n run_name = os.path.basename(run_name)\n else:\n run_name = os.path.split(run_name)[0].split('/')[-1]\n if not os.path.exists('info'):\n os.mkdir('info')\n for directory in os.listdir(args.run_directory):\n if not os.path.isdir(os.path.join(args.run_directory, directory)):\n continue\n csv_filename = 'info/{0}_{1}_files.csv'.format(run_name, directory)\n entries = glob.glob(os.path.join(args.run_directory, directory,\n '*.xed'))\n if len(entries) == 0:\n continue\n with open(csv_filename, 'w') as file_obj:\n csv_writer = csv.writer(file_obj)\n csv_writer.writerow(['Run', 'Data Set', 'File'])\n for entry in entries:\n uri = ('srm://ceph-se.osgconnect.net:8443/srm/v2/' +\n 'server?SFN=/cephfs/srm/xenon/' + entry.replace(\n '/xenon/', ''))\n csv_writer.writerow([run_name, directory, uri])\n\n\nif __name__ == '__main__':\n sys.exit(run_main())\n",
"step-4": "import argparse\nimport csv\nimport glob\nimport os\nimport sys\n\n\ndef run_main():\n \"\"\"\n Main function to process user input and then generate the description files for each run\n\n :return: exit code -- 0 on success, 1 otherwise\n \"\"\"\n parser = argparse.ArgumentParser(description=\n 'Scan a run directory and create files to ')\n parser.add_argument('--run-directory', dest='run_directory', action=\n 'store', default='', help='path to directory with xed files to process'\n )\n args = parser.parse_args(sys.argv[1:])\n if not os.path.isdir(args.run_directory):\n sys.stderr.write('{0} is not a directory, exiting\\n'.format(args.\n run_directory))\n return 1\n run_name = os.path.abspath(args.run_directory)\n if os.path.basename(run_name):\n run_name = os.path.basename(run_name)\n else:\n run_name = os.path.split(run_name)[0].split('/')[-1]\n if not os.path.exists('info'):\n os.mkdir('info')\n for directory in os.listdir(args.run_directory):\n if not os.path.isdir(os.path.join(args.run_directory, directory)):\n continue\n csv_filename = 'info/{0}_{1}_files.csv'.format(run_name, directory)\n entries = glob.glob(os.path.join(args.run_directory, directory,\n '*.xed'))\n if len(entries) == 0:\n continue\n with open(csv_filename, 'w') as file_obj:\n csv_writer = csv.writer(file_obj)\n csv_writer.writerow(['Run', 'Data Set', 'File'])\n for entry in entries:\n uri = ('srm://ceph-se.osgconnect.net:8443/srm/v2/' +\n 'server?SFN=/cephfs/srm/xenon/' + entry.replace(\n '/xenon/', ''))\n csv_writer.writerow([run_name, directory, uri])\n\n\nif __name__ == '__main__':\n sys.exit(run_main())\n",
"step-5": "#!/usr/bin/env python\n\nimport argparse\nimport csv\nimport glob\nimport os\nimport sys\n\n\ndef run_main():\n \"\"\"\n Main function to process user input and then generate the description files for each run\n\n :return: exit code -- 0 on success, 1 otherwise\n \"\"\"\n\n parser = argparse.ArgumentParser(description=\"Scan a run directory and create files to \")\n parser.add_argument('--run-directory', dest='run_directory',\n action='store', default='',\n help='path to directory with xed files to process')\n args = parser.parse_args(sys.argv[1:])\n\n if not os.path.isdir(args.run_directory):\n sys.stderr.write(\"{0} is not a directory, exiting\\n\".format(args.run_directory))\n return 1\n run_name = os.path.abspath(args.run_directory)\n\n if os.path.basename(run_name):\n run_name = os.path.basename(run_name)\n else:\n run_name = os.path.split(run_name)[0].split('/')[-1]\n\n if not os.path.exists('info'):\n os.mkdir('info')\n\n for directory in os.listdir(args.run_directory):\n if not os.path.isdir(os.path.join(args.run_directory, directory)):\n continue\n csv_filename = \"info/{0}_{1}_files.csv\".format(run_name, directory)\n entries = glob.glob(os.path.join(args.run_directory, directory, '*.xed'))\n if len(entries) == 0:\n continue\n with open(csv_filename, 'w') as file_obj:\n csv_writer = csv.writer(file_obj)\n csv_writer.writerow(['Run', 'Data Set', 'File'])\n for entry in entries:\n uri = \"srm://ceph-se.osgconnect.net:8443/srm/v2/\" + \\\n \"server?SFN=/cephfs/srm/xenon/\" + \\\n entry.replace('/xenon/', '')\n csv_writer.writerow([run_name, directory, uri])\n\n\nif __name__ == '__main__':\n sys.exit(run_main())\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
# Generated by Django 4.0.5 on 2023-02-14 18:57
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('core', '0020_festival_boxoffice_close_festival_boxoffice_open'),
]
operations = [
migrations.AlterModelOptions(
name='user',
options={},
),
]
|
normal
|
{
"blob_id": "e9bf5a40360d35f32bd2ad5aa404225f49895a14",
"index": 4221,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('core',\n '0020_festival_boxoffice_close_festival_boxoffice_open')]\n operations = [migrations.AlterModelOptions(name='user', options={})]\n",
"step-4": "from django.db import migrations\n\n\nclass Migration(migrations.Migration):\n dependencies = [('core',\n '0020_festival_boxoffice_close_festival_boxoffice_open')]\n operations = [migrations.AlterModelOptions(name='user', options={})]\n",
"step-5": "# Generated by Django 4.0.5 on 2023-02-14 18:57\n\nfrom django.db import migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('core', '0020_festival_boxoffice_close_festival_boxoffice_open'),\n ]\n\n operations = [\n migrations.AlterModelOptions(\n name='user',\n options={},\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
os.system('clear')
<|reserved_special_token_0|>
print(banner)
<|reserved_special_token_0|>
os.system('clear')
print('\n' + '\x1b[94m - Loading Data ...')
time.sleep(3)
for u in usrf:
userlist.append(u.replace('\n', ''))
for p in pasf:
passlist.append(p.replace('\n', ''))
os.system('clear')
print('\n' + ' - Combo List Makeing ...')
time.sleep(3)
<|reserved_special_token_0|>
if len(userlist) > len(passlist):
for num in range(len(passlist)):
username = userlist[num]
password = passlist[num]
combo = username + ':' + password
combof.write(combo + '\n')
print(combo)
elif len(userlist) < len(passlist):
for num in range(len(userlist)):
username = userlist[num]
password = passlist[num]
combo = username + ':' + password
combof.write(combo + '\n')
print(combo)
if len(userlist) == len(passlist):
for num in range(len(passlist)):
username = userlist[num]
password = passlist[num]
combo = username + ':' + password
combof.write(combo + '\n')
print(combo)
combof.close()
os.system('clear')
print('\n' + ' - Combo List Maked ;')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
os.system('clear')
banner = """
[92m .o88b. .d88b. .88b d88. d8888b. .d88b.
d8P Y8 .8P Y8. 88'YbdP`88 88 `8D .8P Y8.
8P 88 88 88 88 88 88oooY' 88 88
8b 88 88 88 88 88 88~~~b. 88 88
Y8b d8 `8b d8' 88 88 88 88 8D `8b d8'
`Y88P' `Y88P' YP YP YP Y8888P' `Y88P'
t.me/LinuxArmy
---------------
.88b d88. .d8b. db dD d88888b d8888b. Code by it4min
88'YbdP`88 d8' `8b 88 ,8P' 88' 88 `8D
88 88 88 88ooo88 88,8P 88ooooo 88oobY'
88 88 88 88~~~88 88`8b 88~~~~~ 88`8b
88 88 88 88 88 88 `88. 88. 88 `88.
YP YP YP YP YP YP YD Y88888P 88 YD
"""
print(banner)
userf = input('\x1b[91m>>> \x1b[93mEnter the username address: ')
passf = input('\x1b[91m>>> \x1b[93mEnter the password address: ')
usrf = open(userf, 'r').read().splitlines()
pasf = open(passf, 'r').read().splitlines()
userlist = []
passlist = []
os.system('clear')
print('\n' + '\x1b[94m - Loading Data ...')
time.sleep(3)
for u in usrf:
userlist.append(u.replace('\n', ''))
for p in pasf:
passlist.append(p.replace('\n', ''))
os.system('clear')
print('\n' + ' - Combo List Makeing ...')
time.sleep(3)
combof = open('ComboList.txt', 'a')
if len(userlist) > len(passlist):
for num in range(len(passlist)):
username = userlist[num]
password = passlist[num]
combo = username + ':' + password
combof.write(combo + '\n')
print(combo)
elif len(userlist) < len(passlist):
for num in range(len(userlist)):
username = userlist[num]
password = passlist[num]
combo = username + ':' + password
combof.write(combo + '\n')
print(combo)
if len(userlist) == len(passlist):
for num in range(len(passlist)):
username = userlist[num]
password = passlist[num]
combo = username + ':' + password
combof.write(combo + '\n')
print(combo)
combof.close()
os.system('clear')
print('\n' + ' - Combo List Maked ;')
<|reserved_special_token_1|>
import time, os
os.system('clear')
banner = """
[92m .o88b. .d88b. .88b d88. d8888b. .d88b.
d8P Y8 .8P Y8. 88'YbdP`88 88 `8D .8P Y8.
8P 88 88 88 88 88 88oooY' 88 88
8b 88 88 88 88 88 88~~~b. 88 88
Y8b d8 `8b d8' 88 88 88 88 8D `8b d8'
`Y88P' `Y88P' YP YP YP Y8888P' `Y88P'
t.me/LinuxArmy
---------------
.88b d88. .d8b. db dD d88888b d8888b. Code by it4min
88'YbdP`88 d8' `8b 88 ,8P' 88' 88 `8D
88 88 88 88ooo88 88,8P 88ooooo 88oobY'
88 88 88 88~~~88 88`8b 88~~~~~ 88`8b
88 88 88 88 88 88 `88. 88. 88 `88.
YP YP YP YP YP YP YD Y88888P 88 YD
"""
print(banner)
userf = input('\x1b[91m>>> \x1b[93mEnter the username address: ')
passf = input('\x1b[91m>>> \x1b[93mEnter the password address: ')
usrf = open(userf, 'r').read().splitlines()
pasf = open(passf, 'r').read().splitlines()
userlist = []
passlist = []
os.system('clear')
print('\n' + '\x1b[94m - Loading Data ...')
time.sleep(3)
for u in usrf:
userlist.append(u.replace('\n', ''))
for p in pasf:
passlist.append(p.replace('\n', ''))
os.system('clear')
print('\n' + ' - Combo List Makeing ...')
time.sleep(3)
combof = open('ComboList.txt', 'a')
if len(userlist) > len(passlist):
for num in range(len(passlist)):
username = userlist[num]
password = passlist[num]
combo = username + ':' + password
combof.write(combo + '\n')
print(combo)
elif len(userlist) < len(passlist):
for num in range(len(userlist)):
username = userlist[num]
password = passlist[num]
combo = username + ':' + password
combof.write(combo + '\n')
print(combo)
if len(userlist) == len(passlist):
for num in range(len(passlist)):
username = userlist[num]
password = passlist[num]
combo = username + ':' + password
combof.write(combo + '\n')
print(combo)
combof.close()
os.system('clear')
print('\n' + ' - Combo List Maked ;')
<|reserved_special_token_1|>
# Code By it4min
# t.me/it4min
# t.me/LinuxArmy
# -- Combo List Maker v1 --
import time, os
os.system("clear")
banner = '''
\033[92m .o88b. .d88b. .88b d88. d8888b. .d88b.
d8P Y8 .8P Y8. 88'YbdP`88 88 `8D .8P Y8.
8P 88 88 88 88 88 88oooY' 88 88
8b 88 88 88 88 88 88~~~b. 88 88
Y8b d8 `8b d8' 88 88 88 88 8D `8b d8'
`Y88P' `Y88P' YP YP YP Y8888P' `Y88P'
t.me/LinuxArmy
---------------
.88b d88. .d8b. db dD d88888b d8888b. Code by it4min
88'YbdP`88 d8' `8b 88 ,8P' 88' 88 `8D
88 88 88 88ooo88 88,8P 88ooooo 88oobY'
88 88 88 88~~~88 88`8b 88~~~~~ 88`8b
88 88 88 88 88 88 `88. 88. 88 `88.
YP YP YP YP YP YP YD Y88888P 88 YD
'''
print(banner)
userf = input("\033[91m>>> \033[93mEnter the username address: ")
passf = input("\033[91m>>> \033[93mEnter the password address: ")
usrf = open(userf, "r").read().splitlines()
pasf = open(passf, "r").read().splitlines()
userlist = []
passlist = []
os.system("clear")
print ('\n'+"\033[94m - Loading Data ...")
time.sleep(3)
for u in usrf:
userlist.append(u.replace('\n',""))
for p in pasf:
passlist.append(p.replace('\n',""))
os.system("clear")
print ('\n'+" - Combo List Makeing ...")
time.sleep(3)
combof = open("ComboList.txt","a")
if len(userlist) > len(passlist):
for num in range(len(passlist)):
username = userlist[num]
password = passlist[num]
combo = username+":"+password
combof.write(combo+'\n')
print (combo)
elif len(userlist) < len(passlist):
for num in range(len(userlist)):
username = userlist[num]
password = passlist[num]
combo = username+":"+password
combof.write(combo+'\n')
print (combo)
if len(userlist) == len(passlist):
for num in range(len(passlist)):
username = userlist[num]
password = passlist[num]
combo = username+":"+password
combof.write(combo+'\n')
print (combo)
combof.close()
os.system("clear")
print ('\n'+" - Combo List Maked ;")
|
flexible
|
{
"blob_id": "6ab5ac0caa44366268bb8b70ac044376d9c062f0",
"index": 6976,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nos.system('clear')\n<mask token>\nprint(banner)\n<mask token>\nos.system('clear')\nprint('\\n' + '\\x1b[94m - Loading Data ...')\ntime.sleep(3)\nfor u in usrf:\n userlist.append(u.replace('\\n', ''))\nfor p in pasf:\n passlist.append(p.replace('\\n', ''))\nos.system('clear')\nprint('\\n' + ' - Combo List Makeing ...')\ntime.sleep(3)\n<mask token>\nif len(userlist) > len(passlist):\n for num in range(len(passlist)):\n username = userlist[num]\n password = passlist[num]\n combo = username + ':' + password\n combof.write(combo + '\\n')\n print(combo)\nelif len(userlist) < len(passlist):\n for num in range(len(userlist)):\n username = userlist[num]\n password = passlist[num]\n combo = username + ':' + password\n combof.write(combo + '\\n')\n print(combo)\nif len(userlist) == len(passlist):\n for num in range(len(passlist)):\n username = userlist[num]\n password = passlist[num]\n combo = username + ':' + password\n combof.write(combo + '\\n')\n print(combo)\ncombof.close()\nos.system('clear')\nprint('\\n' + ' - Combo List Maked ;')\n",
"step-3": "<mask token>\nos.system('clear')\nbanner = \"\"\"\n\u001b[92m .o88b. .d88b. .88b d88. d8888b. .d88b. \nd8P Y8 .8P Y8. 88'YbdP`88 88 `8D .8P Y8. \n8P 88 88 88 88 88 88oooY' 88 88 \n8b 88 88 88 88 88 88~~~b. 88 88 \nY8b d8 `8b d8' 88 88 88 88 8D `8b d8' \n `Y88P' `Y88P' YP YP YP Y8888P' `Y88P' \n t.me/LinuxArmy\n ---------------\n.88b d88. .d8b. db dD d88888b d8888b. Code by it4min\n88'YbdP`88 d8' `8b 88 ,8P' 88' 88 `8D \n88 88 88 88ooo88 88,8P 88ooooo 88oobY' \n88 88 88 88~~~88 88`8b 88~~~~~ 88`8b \n88 88 88 88 88 88 `88. 88. 88 `88. \nYP YP YP YP YP YP YD Y88888P 88 YD \n \n \n\"\"\"\nprint(banner)\nuserf = input('\\x1b[91m>>> \\x1b[93mEnter the username address: ')\npassf = input('\\x1b[91m>>> \\x1b[93mEnter the password address: ')\nusrf = open(userf, 'r').read().splitlines()\npasf = open(passf, 'r').read().splitlines()\nuserlist = []\npasslist = []\nos.system('clear')\nprint('\\n' + '\\x1b[94m - Loading Data ...')\ntime.sleep(3)\nfor u in usrf:\n userlist.append(u.replace('\\n', ''))\nfor p in pasf:\n passlist.append(p.replace('\\n', ''))\nos.system('clear')\nprint('\\n' + ' - Combo List Makeing ...')\ntime.sleep(3)\ncombof = open('ComboList.txt', 'a')\nif len(userlist) > len(passlist):\n for num in range(len(passlist)):\n username = userlist[num]\n password = passlist[num]\n combo = username + ':' + password\n combof.write(combo + '\\n')\n print(combo)\nelif len(userlist) < len(passlist):\n for num in range(len(userlist)):\n username = userlist[num]\n password = passlist[num]\n combo = username + ':' + password\n combof.write(combo + '\\n')\n print(combo)\nif len(userlist) == len(passlist):\n for num in range(len(passlist)):\n username = userlist[num]\n password = passlist[num]\n combo = username + ':' + password\n combof.write(combo + '\\n')\n print(combo)\ncombof.close()\nos.system('clear')\nprint('\\n' + ' - Combo List Maked ;')\n",
"step-4": "import time, os\nos.system('clear')\nbanner = \"\"\"\n\u001b[92m .o88b. .d88b. .88b d88. d8888b. .d88b. \nd8P Y8 .8P Y8. 88'YbdP`88 88 `8D .8P Y8. \n8P 88 88 88 88 88 88oooY' 88 88 \n8b 88 88 88 88 88 88~~~b. 88 88 \nY8b d8 `8b d8' 88 88 88 88 8D `8b d8' \n `Y88P' `Y88P' YP YP YP Y8888P' `Y88P' \n t.me/LinuxArmy\n ---------------\n.88b d88. .d8b. db dD d88888b d8888b. Code by it4min\n88'YbdP`88 d8' `8b 88 ,8P' 88' 88 `8D \n88 88 88 88ooo88 88,8P 88ooooo 88oobY' \n88 88 88 88~~~88 88`8b 88~~~~~ 88`8b \n88 88 88 88 88 88 `88. 88. 88 `88. \nYP YP YP YP YP YP YD Y88888P 88 YD \n \n \n\"\"\"\nprint(banner)\nuserf = input('\\x1b[91m>>> \\x1b[93mEnter the username address: ')\npassf = input('\\x1b[91m>>> \\x1b[93mEnter the password address: ')\nusrf = open(userf, 'r').read().splitlines()\npasf = open(passf, 'r').read().splitlines()\nuserlist = []\npasslist = []\nos.system('clear')\nprint('\\n' + '\\x1b[94m - Loading Data ...')\ntime.sleep(3)\nfor u in usrf:\n userlist.append(u.replace('\\n', ''))\nfor p in pasf:\n passlist.append(p.replace('\\n', ''))\nos.system('clear')\nprint('\\n' + ' - Combo List Makeing ...')\ntime.sleep(3)\ncombof = open('ComboList.txt', 'a')\nif len(userlist) > len(passlist):\n for num in range(len(passlist)):\n username = userlist[num]\n password = passlist[num]\n combo = username + ':' + password\n combof.write(combo + '\\n')\n print(combo)\nelif len(userlist) < len(passlist):\n for num in range(len(userlist)):\n username = userlist[num]\n password = passlist[num]\n combo = username + ':' + password\n combof.write(combo + '\\n')\n print(combo)\nif len(userlist) == len(passlist):\n for num in range(len(passlist)):\n username = userlist[num]\n password = passlist[num]\n combo = username + ':' + password\n combof.write(combo + '\\n')\n print(combo)\ncombof.close()\nos.system('clear')\nprint('\\n' + ' - Combo List Maked ;')\n",
"step-5": "# Code By it4min\n# t.me/it4min\n# t.me/LinuxArmy\n# -- Combo List Maker v1 --\nimport time, os\n\nos.system(\"clear\")\nbanner = '''\n\\033[92m .o88b. .d88b. .88b d88. d8888b. .d88b. \nd8P Y8 .8P Y8. 88'YbdP`88 88 `8D .8P Y8. \n8P 88 88 88 88 88 88oooY' 88 88 \n8b 88 88 88 88 88 88~~~b. 88 88 \nY8b d8 `8b d8' 88 88 88 88 8D `8b d8' \n `Y88P' `Y88P' YP YP YP Y8888P' `Y88P' \n t.me/LinuxArmy\n ---------------\n.88b d88. .d8b. db dD d88888b d8888b. Code by it4min\n88'YbdP`88 d8' `8b 88 ,8P' 88' 88 `8D \n88 88 88 88ooo88 88,8P 88ooooo 88oobY' \n88 88 88 88~~~88 88`8b 88~~~~~ 88`8b \n88 88 88 88 88 88 `88. 88. 88 `88. \nYP YP YP YP YP YP YD Y88888P 88 YD \n \n \n'''\nprint(banner)\nuserf = input(\"\\033[91m>>> \\033[93mEnter the username address: \")\npassf = input(\"\\033[91m>>> \\033[93mEnter the password address: \")\nusrf = open(userf, \"r\").read().splitlines()\npasf = open(passf, \"r\").read().splitlines()\n\nuserlist = []\npasslist = []\n\nos.system(\"clear\")\nprint ('\\n'+\"\\033[94m - Loading Data ...\")\ntime.sleep(3)\n\nfor u in usrf:\n userlist.append(u.replace('\\n',\"\"))\n\nfor p in pasf:\n passlist.append(p.replace('\\n',\"\"))\n\nos.system(\"clear\")\nprint ('\\n'+\" - Combo List Makeing ...\")\ntime.sleep(3)\n\ncombof = open(\"ComboList.txt\",\"a\")\n\nif len(userlist) > len(passlist):\n for num in range(len(passlist)):\n username = userlist[num]\n password = passlist[num]\n combo = username+\":\"+password\n combof.write(combo+'\\n')\n print (combo)\n\nelif len(userlist) < len(passlist):\n for num in range(len(userlist)):\n username = userlist[num]\n password = passlist[num]\n combo = username+\":\"+password\n combof.write(combo+'\\n')\n print (combo)\n\nif len(userlist) == len(passlist):\n for num in range(len(passlist)):\n username = userlist[num]\n password = passlist[num]\n combo = username+\":\"+password\n combof.write(combo+'\\n')\n print (combo)\ncombof.close()\n\nos.system(\"clear\")\nprint ('\\n'+\" - Combo List Maked ;\")\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
N=int(input("N="))
K=int()
K=0
while N>=2:
N=N/2
K=K+1
print("K=",K)
|
normal
|
{
"blob_id": "7f4c6e4a5627b44b9a700d2de4f9caca0ae8b17c",
"index": 2808,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwhile N >= 2:\n N = N / 2\n K = K + 1\nprint('K=', K)\n",
"step-3": "N = int(input('N='))\nK = int()\nK = 0\nwhile N >= 2:\n N = N / 2\n K = K + 1\nprint('K=', K)\n",
"step-4": "N=int(input(\"N=\"))\nK=int()\n\nK=0\nwhile N>=2:\n N=N/2\n K=K+1\nprint(\"K=\",K)",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
while True:
if suppressRoomPrint:
suppressRoomPrint = False
else:
print(player.location)
print(
f"""
{player.location.name}
{player.location.description}
{player.location.getItems()}
"""
)
inp = input('What is your command: ')
if inp == 'q':
break
if inp == 'n' or inp == 's' or inp == 'w' or inp == 'e':
newRoom = player.location.getRoomInDirection(inp)
if newRoom == None:
print('\x1b[1;37;41m + \nImpossible, try again.\n\x1b[0m')
suppressRoomPrint = True
else:
player.change_location(newRoom)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
items = {'scimitar': Item('Scimitar', '+7 Attack'), 'mace': Item('Mace',
'+13 Attack'), 'tower_shield': Item('Tower Shield', '+8 Block'),
'heraldic_shield': Item('Heraldic Shield', '+12 Block'), 'chainmail':
Item('Chainmail', '+15 Defense'), 'gold_plate': Item('Gold Plate',
'+25 Defense'), 'health_potion': Item('Health Potion', 'Heal 10 HP'),
'mana_potion': Item('Mana Potion', 'Restore 20 Mana'), 'gold': Item(
'Gold', 'Currency for other items from vendors'), 'demon_heart': Item(
'Demon Heart', 'Bestows owner with great power')}
room = {'outside': Room('Outside Cave Entrance',
'North of you, the cave mount beckons', [items['scimitar'], items[
'health_potion']]), 'foyer': Room('Foyer',
"""Dim light filters in from the south. Dusty
passages run north and east."""
, [items['tower_shield'], items['chainmail']]), 'overlook': Room(
'Grand Overlook',
"""A steep cliff appears before you, falling
into the darkness. Ahead to the north, a light flickers in
the distance, but there is no way across the chasm."""
, [items['mace'], items['mana_potion']]), 'narrow': Room(
'Narrow Passage',
"""The narrow passage bends here from west
to north. The smell of gold permeates the air."""
, [items['gold_plate'], items['heraldic_shield']]), 'treasure': Room(
'Treasure Chamber',
"""You've found the long-lost treasure
chamber! Sadly, it has already been completely emptied by
earlier adventurers. The only exit is to the south."""
, [items['gold'], items['demon_heart']])}
room['outside'].n_to = room['foyer']
room['foyer'].s_to = room['outside']
room['foyer'].n_to = room['overlook']
room['foyer'].e_to = room['narrow']
room['overlook'].s_to = room['foyer']
room['narrow'].w_to = room['foyer']
room['narrow'].n_to = room['treasure']
room['treasure'].s_to = room['narrow']
player = Player(room['outside'])
suppressRoomPrint = False
while True:
if suppressRoomPrint:
suppressRoomPrint = False
else:
print(player.location)
print(
f"""
{player.location.name}
{player.location.description}
{player.location.getItems()}
"""
)
inp = input('What is your command: ')
if inp == 'q':
break
if inp == 'n' or inp == 's' or inp == 'w' or inp == 'e':
newRoom = player.location.getRoomInDirection(inp)
if newRoom == None:
print('\x1b[1;37;41m + \nImpossible, try again.\n\x1b[0m')
suppressRoomPrint = True
else:
player.change_location(newRoom)
<|reserved_special_token_1|>
from room import Room
from player import Player
from item import Item
items = {'scimitar': Item('Scimitar', '+7 Attack'), 'mace': Item('Mace',
'+13 Attack'), 'tower_shield': Item('Tower Shield', '+8 Block'),
'heraldic_shield': Item('Heraldic Shield', '+12 Block'), 'chainmail':
Item('Chainmail', '+15 Defense'), 'gold_plate': Item('Gold Plate',
'+25 Defense'), 'health_potion': Item('Health Potion', 'Heal 10 HP'),
'mana_potion': Item('Mana Potion', 'Restore 20 Mana'), 'gold': Item(
'Gold', 'Currency for other items from vendors'), 'demon_heart': Item(
'Demon Heart', 'Bestows owner with great power')}
room = {'outside': Room('Outside Cave Entrance',
'North of you, the cave mount beckons', [items['scimitar'], items[
'health_potion']]), 'foyer': Room('Foyer',
"""Dim light filters in from the south. Dusty
passages run north and east."""
, [items['tower_shield'], items['chainmail']]), 'overlook': Room(
'Grand Overlook',
"""A steep cliff appears before you, falling
into the darkness. Ahead to the north, a light flickers in
the distance, but there is no way across the chasm."""
, [items['mace'], items['mana_potion']]), 'narrow': Room(
'Narrow Passage',
"""The narrow passage bends here from west
to north. The smell of gold permeates the air."""
, [items['gold_plate'], items['heraldic_shield']]), 'treasure': Room(
'Treasure Chamber',
"""You've found the long-lost treasure
chamber! Sadly, it has already been completely emptied by
earlier adventurers. The only exit is to the south."""
, [items['gold'], items['demon_heart']])}
room['outside'].n_to = room['foyer']
room['foyer'].s_to = room['outside']
room['foyer'].n_to = room['overlook']
room['foyer'].e_to = room['narrow']
room['overlook'].s_to = room['foyer']
room['narrow'].w_to = room['foyer']
room['narrow'].n_to = room['treasure']
room['treasure'].s_to = room['narrow']
player = Player(room['outside'])
suppressRoomPrint = False
while True:
if suppressRoomPrint:
suppressRoomPrint = False
else:
print(player.location)
print(
f"""
{player.location.name}
{player.location.description}
{player.location.getItems()}
"""
)
inp = input('What is your command: ')
if inp == 'q':
break
if inp == 'n' or inp == 's' or inp == 'w' or inp == 'e':
newRoom = player.location.getRoomInDirection(inp)
if newRoom == None:
print('\x1b[1;37;41m + \nImpossible, try again.\n\x1b[0m')
suppressRoomPrint = True
else:
player.change_location(newRoom)
<|reserved_special_token_1|>
from room import Room
from player import Player
from item import Item
# Declare all the rooms
items = {
'scimitar': Item('Scimitar', '+7 Attack'),
'mace': Item('Mace', '+13 Attack'),
'tower_shield': Item('Tower Shield', '+8 Block'),
'heraldic_shield': Item('Heraldic Shield', '+12 Block'),
'chainmail': Item('Chainmail', '+15 Defense'),
'gold_plate': Item('Gold Plate', '+25 Defense'),
'health_potion': Item('Health Potion', 'Heal 10 HP'),
'mana_potion': Item('Mana Potion', 'Restore 20 Mana'),
'gold': Item('Gold', 'Currency for other items from vendors'),
'demon_heart': Item('Demon Heart', 'Bestows owner with great power')
}
room = {
'outside': Room("Outside Cave Entrance",
"""North of you, the cave mount beckons""",
[items['scimitar'], items['health_potion']]),
'foyer': Room("Foyer", """Dim light filters in from the south. Dusty
passages run north and east.""",
[items['tower_shield'], items['chainmail']]),
'overlook': Room("Grand Overlook", """A steep cliff appears before you, falling
into the darkness. Ahead to the north, a light flickers in
the distance, but there is no way across the chasm.""",
[items['mace'], items['mana_potion']]),
'narrow': Room("Narrow Passage", """The narrow passage bends here from west
to north. The smell of gold permeates the air.""",
[items['gold_plate'], items['heraldic_shield']]),
'treasure': Room("Treasure Chamber", """You've found the long-lost treasure
chamber! Sadly, it has already been completely emptied by
earlier adventurers. The only exit is to the south.""",
[items['gold'], items['demon_heart']]),
}
# Link rooms together
room['outside'].n_to = room['foyer']
room['foyer'].s_to = room['outside']
room['foyer'].n_to = room['overlook']
room['foyer'].e_to = room['narrow']
room['overlook'].s_to = room['foyer']
room['narrow'].w_to = room['foyer']
room['narrow'].n_to = room['treasure']
room['treasure'].s_to = room['narrow']
# Main
player = Player(room['outside'])
suppressRoomPrint = False
while True:
if suppressRoomPrint:
suppressRoomPrint = False
else:
print (player.location)
print (f'\n{player.location.name}\n {player.location.description}\n {player.location.getItems()}\n')
inp = input("What is your command: ")
if inp == "q":
break
if inp == "n" or inp == "s" or inp == "w" or inp == "e":
newRoom = player.location.getRoomInDirection(inp)
if newRoom == None:
print('\x1b[1;37;41m + \nImpossible, try again.\n\x1b[0m')
suppressRoomPrint = True
else:
player.change_location(newRoom)
|
flexible
|
{
"blob_id": "07a172c28057dc803efdbdc10a9e2e11df4e527b",
"index": 3134,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwhile True:\n if suppressRoomPrint:\n suppressRoomPrint = False\n else:\n print(player.location)\n print(\n f\"\"\"\n{player.location.name}\n {player.location.description}\n {player.location.getItems()}\n\"\"\"\n )\n inp = input('What is your command: ')\n if inp == 'q':\n break\n if inp == 'n' or inp == 's' or inp == 'w' or inp == 'e':\n newRoom = player.location.getRoomInDirection(inp)\n if newRoom == None:\n print('\\x1b[1;37;41m + \\nImpossible, try again.\\n\\x1b[0m')\n suppressRoomPrint = True\n else:\n player.change_location(newRoom)\n",
"step-3": "<mask token>\nitems = {'scimitar': Item('Scimitar', '+7 Attack'), 'mace': Item('Mace',\n '+13 Attack'), 'tower_shield': Item('Tower Shield', '+8 Block'),\n 'heraldic_shield': Item('Heraldic Shield', '+12 Block'), 'chainmail':\n Item('Chainmail', '+15 Defense'), 'gold_plate': Item('Gold Plate',\n '+25 Defense'), 'health_potion': Item('Health Potion', 'Heal 10 HP'),\n 'mana_potion': Item('Mana Potion', 'Restore 20 Mana'), 'gold': Item(\n 'Gold', 'Currency for other items from vendors'), 'demon_heart': Item(\n 'Demon Heart', 'Bestows owner with great power')}\nroom = {'outside': Room('Outside Cave Entrance',\n 'North of you, the cave mount beckons', [items['scimitar'], items[\n 'health_potion']]), 'foyer': Room('Foyer',\n \"\"\"Dim light filters in from the south. Dusty\npassages run north and east.\"\"\"\n , [items['tower_shield'], items['chainmail']]), 'overlook': Room(\n 'Grand Overlook',\n \"\"\"A steep cliff appears before you, falling\ninto the darkness. Ahead to the north, a light flickers in\nthe distance, but there is no way across the chasm.\"\"\"\n , [items['mace'], items['mana_potion']]), 'narrow': Room(\n 'Narrow Passage',\n \"\"\"The narrow passage bends here from west\nto north. The smell of gold permeates the air.\"\"\"\n , [items['gold_plate'], items['heraldic_shield']]), 'treasure': Room(\n 'Treasure Chamber',\n \"\"\"You've found the long-lost treasure\nchamber! Sadly, it has already been completely emptied by\nearlier adventurers. The only exit is to the south.\"\"\"\n , [items['gold'], items['demon_heart']])}\nroom['outside'].n_to = room['foyer']\nroom['foyer'].s_to = room['outside']\nroom['foyer'].n_to = room['overlook']\nroom['foyer'].e_to = room['narrow']\nroom['overlook'].s_to = room['foyer']\nroom['narrow'].w_to = room['foyer']\nroom['narrow'].n_to = room['treasure']\nroom['treasure'].s_to = room['narrow']\nplayer = Player(room['outside'])\nsuppressRoomPrint = False\nwhile True:\n if suppressRoomPrint:\n suppressRoomPrint = False\n else:\n print(player.location)\n print(\n f\"\"\"\n{player.location.name}\n {player.location.description}\n {player.location.getItems()}\n\"\"\"\n )\n inp = input('What is your command: ')\n if inp == 'q':\n break\n if inp == 'n' or inp == 's' or inp == 'w' or inp == 'e':\n newRoom = player.location.getRoomInDirection(inp)\n if newRoom == None:\n print('\\x1b[1;37;41m + \\nImpossible, try again.\\n\\x1b[0m')\n suppressRoomPrint = True\n else:\n player.change_location(newRoom)\n",
"step-4": "from room import Room\nfrom player import Player\nfrom item import Item\nitems = {'scimitar': Item('Scimitar', '+7 Attack'), 'mace': Item('Mace',\n '+13 Attack'), 'tower_shield': Item('Tower Shield', '+8 Block'),\n 'heraldic_shield': Item('Heraldic Shield', '+12 Block'), 'chainmail':\n Item('Chainmail', '+15 Defense'), 'gold_plate': Item('Gold Plate',\n '+25 Defense'), 'health_potion': Item('Health Potion', 'Heal 10 HP'),\n 'mana_potion': Item('Mana Potion', 'Restore 20 Mana'), 'gold': Item(\n 'Gold', 'Currency for other items from vendors'), 'demon_heart': Item(\n 'Demon Heart', 'Bestows owner with great power')}\nroom = {'outside': Room('Outside Cave Entrance',\n 'North of you, the cave mount beckons', [items['scimitar'], items[\n 'health_potion']]), 'foyer': Room('Foyer',\n \"\"\"Dim light filters in from the south. Dusty\npassages run north and east.\"\"\"\n , [items['tower_shield'], items['chainmail']]), 'overlook': Room(\n 'Grand Overlook',\n \"\"\"A steep cliff appears before you, falling\ninto the darkness. Ahead to the north, a light flickers in\nthe distance, but there is no way across the chasm.\"\"\"\n , [items['mace'], items['mana_potion']]), 'narrow': Room(\n 'Narrow Passage',\n \"\"\"The narrow passage bends here from west\nto north. The smell of gold permeates the air.\"\"\"\n , [items['gold_plate'], items['heraldic_shield']]), 'treasure': Room(\n 'Treasure Chamber',\n \"\"\"You've found the long-lost treasure\nchamber! Sadly, it has already been completely emptied by\nearlier adventurers. The only exit is to the south.\"\"\"\n , [items['gold'], items['demon_heart']])}\nroom['outside'].n_to = room['foyer']\nroom['foyer'].s_to = room['outside']\nroom['foyer'].n_to = room['overlook']\nroom['foyer'].e_to = room['narrow']\nroom['overlook'].s_to = room['foyer']\nroom['narrow'].w_to = room['foyer']\nroom['narrow'].n_to = room['treasure']\nroom['treasure'].s_to = room['narrow']\nplayer = Player(room['outside'])\nsuppressRoomPrint = False\nwhile True:\n if suppressRoomPrint:\n suppressRoomPrint = False\n else:\n print(player.location)\n print(\n f\"\"\"\n{player.location.name}\n {player.location.description}\n {player.location.getItems()}\n\"\"\"\n )\n inp = input('What is your command: ')\n if inp == 'q':\n break\n if inp == 'n' or inp == 's' or inp == 'w' or inp == 'e':\n newRoom = player.location.getRoomInDirection(inp)\n if newRoom == None:\n print('\\x1b[1;37;41m + \\nImpossible, try again.\\n\\x1b[0m')\n suppressRoomPrint = True\n else:\n player.change_location(newRoom)\n",
"step-5": "from room import Room\nfrom player import Player\nfrom item import Item\n# Declare all the rooms\nitems = {\n 'scimitar': Item('Scimitar', '+7 Attack'),\n 'mace': Item('Mace', '+13 Attack'),\n 'tower_shield': Item('Tower Shield', '+8 Block'),\n 'heraldic_shield': Item('Heraldic Shield', '+12 Block'),\n 'chainmail': Item('Chainmail', '+15 Defense'),\n 'gold_plate': Item('Gold Plate', '+25 Defense'),\n 'health_potion': Item('Health Potion', 'Heal 10 HP'),\n 'mana_potion': Item('Mana Potion', 'Restore 20 Mana'),\n 'gold': Item('Gold', 'Currency for other items from vendors'),\n 'demon_heart': Item('Demon Heart', 'Bestows owner with great power')\n}\n\nroom = {\n 'outside': Room(\"Outside Cave Entrance\",\n \"\"\"North of you, the cave mount beckons\"\"\",\n [items['scimitar'], items['health_potion']]),\n\n 'foyer': Room(\"Foyer\", \"\"\"Dim light filters in from the south. Dusty\npassages run north and east.\"\"\",\n[items['tower_shield'], items['chainmail']]),\n\n 'overlook': Room(\"Grand Overlook\", \"\"\"A steep cliff appears before you, falling\ninto the darkness. Ahead to the north, a light flickers in\nthe distance, but there is no way across the chasm.\"\"\",\n[items['mace'], items['mana_potion']]),\n\n 'narrow': Room(\"Narrow Passage\", \"\"\"The narrow passage bends here from west\nto north. The smell of gold permeates the air.\"\"\",\n[items['gold_plate'], items['heraldic_shield']]),\n\n 'treasure': Room(\"Treasure Chamber\", \"\"\"You've found the long-lost treasure\nchamber! Sadly, it has already been completely emptied by\nearlier adventurers. The only exit is to the south.\"\"\",\n[items['gold'], items['demon_heart']]),\n}\n\n# Link rooms together\nroom['outside'].n_to = room['foyer']\nroom['foyer'].s_to = room['outside']\nroom['foyer'].n_to = room['overlook']\nroom['foyer'].e_to = room['narrow']\nroom['overlook'].s_to = room['foyer']\nroom['narrow'].w_to = room['foyer']\nroom['narrow'].n_to = room['treasure']\nroom['treasure'].s_to = room['narrow']\n\n# Main\n\nplayer = Player(room['outside'])\n\nsuppressRoomPrint = False\n\nwhile True:\n if suppressRoomPrint:\n suppressRoomPrint = False\n else:\n print (player.location)\n print (f'\\n{player.location.name}\\n {player.location.description}\\n {player.location.getItems()}\\n')\n inp = input(\"What is your command: \")\n\n if inp == \"q\":\n break\n if inp == \"n\" or inp == \"s\" or inp == \"w\" or inp == \"e\":\n newRoom = player.location.getRoomInDirection(inp)\n if newRoom == None:\n print('\\x1b[1;37;41m + \\nImpossible, try again.\\n\\x1b[0m')\n suppressRoomPrint = True\n else:\n player.change_location(newRoom)",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import re
from django import forms
from django.contrib.auth import password_validation
from django.contrib.auth.forms import PasswordChangeForm
from django.contrib.auth.password_validation import validate_password
from .models import Account
class EditProfileModelForm(forms.ModelForm):
class Meta:
model = Account
fields = ['first_name', 'last_name', 'dob', 'email', 'email_confirmation', 'bio', 'avatar']
def clean(self, *args, **kwargs):
cleaned_data = super(EditProfileModelForm, self).clean()
email = self.cleaned_data.get('email')
email_confirmation = self.cleaned_data.get('email_confirmation')
if email and email_confirmation and email != email_confirmation:
raise forms.ValidationError("Emails do not match")
return cleaned_data
class PasswordChangeFormExt(PasswordChangeForm):
"""Form for changing user's password."""
def clean(self):
user = self.user
new_password = self.cleaned_data.get('new_password1')
old_password = self.cleaned_data.get('old_password')
validate_password(new_password, user)
if user.check_password(old_password):
if new_password == old_password:
raise forms.ValidationError("New password must be different than the old password")
if (user.first_name != "" and user.first_name.lower() in new_password.lower()
or user.last_name != "" and user.last_name.lower() in new_password.lower()):
raise forms.ValidationError("You cannot use personal information in your password")
if new_password.isupper() or new_password.islower():
raise forms.ValidationError("Password must contain uppercase and lowercase letters")
if re.match("^[a-zA-Z0-9]*$", new_password):
raise forms.ValidationError("Password must contain a special character")
return self.cleaned_data
|
normal
|
{
"blob_id": "af442d4a78930a0ebcd85a1cdfe4aa86461be5c1",
"index": 1274,
"step-1": "<mask token>\n\n\nclass PasswordChangeFormExt(PasswordChangeForm):\n \"\"\"Form for changing user's password.\"\"\"\n\n def clean(self):\n user = self.user\n new_password = self.cleaned_data.get('new_password1')\n old_password = self.cleaned_data.get('old_password')\n validate_password(new_password, user)\n if user.check_password(old_password):\n if new_password == old_password:\n raise forms.ValidationError(\n 'New password must be different than the old password')\n if user.first_name != '' and user.first_name.lower(\n ) in new_password.lower(\n ) or user.last_name != '' and user.last_name.lower(\n ) in new_password.lower():\n raise forms.ValidationError(\n 'You cannot use personal information in your password')\n if new_password.isupper() or new_password.islower():\n raise forms.ValidationError(\n 'Password must contain uppercase and lowercase letters')\n if re.match('^[a-zA-Z0-9]*$', new_password):\n raise forms.ValidationError(\n 'Password must contain a special character')\n return self.cleaned_data\n",
"step-2": "<mask token>\n\n\nclass EditProfileModelForm(forms.ModelForm):\n\n\n class Meta:\n model = Account\n fields = ['first_name', 'last_name', 'dob', 'email',\n 'email_confirmation', 'bio', 'avatar']\n <mask token>\n\n\nclass PasswordChangeFormExt(PasswordChangeForm):\n \"\"\"Form for changing user's password.\"\"\"\n\n def clean(self):\n user = self.user\n new_password = self.cleaned_data.get('new_password1')\n old_password = self.cleaned_data.get('old_password')\n validate_password(new_password, user)\n if user.check_password(old_password):\n if new_password == old_password:\n raise forms.ValidationError(\n 'New password must be different than the old password')\n if user.first_name != '' and user.first_name.lower(\n ) in new_password.lower(\n ) or user.last_name != '' and user.last_name.lower(\n ) in new_password.lower():\n raise forms.ValidationError(\n 'You cannot use personal information in your password')\n if new_password.isupper() or new_password.islower():\n raise forms.ValidationError(\n 'Password must contain uppercase and lowercase letters')\n if re.match('^[a-zA-Z0-9]*$', new_password):\n raise forms.ValidationError(\n 'Password must contain a special character')\n return self.cleaned_data\n",
"step-3": "<mask token>\n\n\nclass EditProfileModelForm(forms.ModelForm):\n\n\n class Meta:\n model = Account\n fields = ['first_name', 'last_name', 'dob', 'email',\n 'email_confirmation', 'bio', 'avatar']\n\n def clean(self, *args, **kwargs):\n cleaned_data = super(EditProfileModelForm, self).clean()\n email = self.cleaned_data.get('email')\n email_confirmation = self.cleaned_data.get('email_confirmation')\n if email and email_confirmation and email != email_confirmation:\n raise forms.ValidationError('Emails do not match')\n return cleaned_data\n\n\nclass PasswordChangeFormExt(PasswordChangeForm):\n \"\"\"Form for changing user's password.\"\"\"\n\n def clean(self):\n user = self.user\n new_password = self.cleaned_data.get('new_password1')\n old_password = self.cleaned_data.get('old_password')\n validate_password(new_password, user)\n if user.check_password(old_password):\n if new_password == old_password:\n raise forms.ValidationError(\n 'New password must be different than the old password')\n if user.first_name != '' and user.first_name.lower(\n ) in new_password.lower(\n ) or user.last_name != '' and user.last_name.lower(\n ) in new_password.lower():\n raise forms.ValidationError(\n 'You cannot use personal information in your password')\n if new_password.isupper() or new_password.islower():\n raise forms.ValidationError(\n 'Password must contain uppercase and lowercase letters')\n if re.match('^[a-zA-Z0-9]*$', new_password):\n raise forms.ValidationError(\n 'Password must contain a special character')\n return self.cleaned_data\n",
"step-4": "import re\nfrom django import forms\nfrom django.contrib.auth import password_validation\nfrom django.contrib.auth.forms import PasswordChangeForm\nfrom django.contrib.auth.password_validation import validate_password\nfrom .models import Account\n\n\nclass EditProfileModelForm(forms.ModelForm):\n\n\n class Meta:\n model = Account\n fields = ['first_name', 'last_name', 'dob', 'email',\n 'email_confirmation', 'bio', 'avatar']\n\n def clean(self, *args, **kwargs):\n cleaned_data = super(EditProfileModelForm, self).clean()\n email = self.cleaned_data.get('email')\n email_confirmation = self.cleaned_data.get('email_confirmation')\n if email and email_confirmation and email != email_confirmation:\n raise forms.ValidationError('Emails do not match')\n return cleaned_data\n\n\nclass PasswordChangeFormExt(PasswordChangeForm):\n \"\"\"Form for changing user's password.\"\"\"\n\n def clean(self):\n user = self.user\n new_password = self.cleaned_data.get('new_password1')\n old_password = self.cleaned_data.get('old_password')\n validate_password(new_password, user)\n if user.check_password(old_password):\n if new_password == old_password:\n raise forms.ValidationError(\n 'New password must be different than the old password')\n if user.first_name != '' and user.first_name.lower(\n ) in new_password.lower(\n ) or user.last_name != '' and user.last_name.lower(\n ) in new_password.lower():\n raise forms.ValidationError(\n 'You cannot use personal information in your password')\n if new_password.isupper() or new_password.islower():\n raise forms.ValidationError(\n 'Password must contain uppercase and lowercase letters')\n if re.match('^[a-zA-Z0-9]*$', new_password):\n raise forms.ValidationError(\n 'Password must contain a special character')\n return self.cleaned_data\n",
"step-5": "import re\n\nfrom django import forms\nfrom django.contrib.auth import password_validation\nfrom django.contrib.auth.forms import PasswordChangeForm\nfrom django.contrib.auth.password_validation import validate_password\n\nfrom .models import Account\n\n\nclass EditProfileModelForm(forms.ModelForm):\n class Meta:\n model = Account\n fields = ['first_name', 'last_name', 'dob', 'email', 'email_confirmation', 'bio', 'avatar']\n\n def clean(self, *args, **kwargs):\n cleaned_data = super(EditProfileModelForm, self).clean()\n email = self.cleaned_data.get('email')\n email_confirmation = self.cleaned_data.get('email_confirmation')\n if email and email_confirmation and email != email_confirmation:\n raise forms.ValidationError(\"Emails do not match\")\n return cleaned_data\n\n\nclass PasswordChangeFormExt(PasswordChangeForm):\n \"\"\"Form for changing user's password.\"\"\"\n\n def clean(self):\n user = self.user\n new_password = self.cleaned_data.get('new_password1')\n old_password = self.cleaned_data.get('old_password')\n\n validate_password(new_password, user)\n\n if user.check_password(old_password):\n if new_password == old_password:\n raise forms.ValidationError(\"New password must be different than the old password\")\n\n if (user.first_name != \"\" and user.first_name.lower() in new_password.lower()\n or user.last_name != \"\" and user.last_name.lower() in new_password.lower()):\n raise forms.ValidationError(\"You cannot use personal information in your password\")\n\n if new_password.isupper() or new_password.islower():\n raise forms.ValidationError(\"Password must contain uppercase and lowercase letters\")\n\n if re.match(\"^[a-zA-Z0-9]*$\", new_password):\n raise forms.ValidationError(\"Password must contain a special character\")\n\n return self.cleaned_data\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
"""Handles loading and tokenising of datasets"""
import enum
import numpy as np
import os.path
import pickle
from tqdm import tqdm
import nltk
from nltk import WordPunctTokenizer
nltk.download('punkt')
from nltk.tokenize import word_tokenize
from lib.utils import DATASETS_BASE_PATH, SAVED_POS_BASE_PATH
from lib.pos import get_pos_tags
class DatasetType(enum.Enum):
"""
Represents the type of dataset
"""
TRAIN = 0
VAL = 1
TEST = 2
class Language(enum.Enum):
"""
Represents the dataset language
"""
GERMAN = 0
ENGLISH = 1
CHINESE = 2
def load_text(path):
"""
Given a path to csv file, loads the data and
returns it as a numpy array
"""
with open(path) as f:
read_text = f.read().splitlines()
return np.array(read_text)
def load_data(data_type=DatasetType.TRAIN, target_language=Language.GERMAN, augmented=False):
"""
Given the dataset type, target language and whether or not to use augmented data,
loads and returns numpy array representations of the source text, translation text and scores.
"""
if target_language == Language.ENGLISH:
raise ValueError("Target language cannot be english")
base_path = DATASETS_BASE_PATH
if target_language == Language.GERMAN:
language_folder = "en-de" if not augmented else "en-de-aug"
language = "ende"
path = os.path.join(base_path, language_folder)
else:
language_folder = "en-zh"
language = "enzh"
path = os.path.join(base_path, language_folder)
if data_type == DatasetType.TRAIN:
prefix = "train"
elif data_type == DatasetType.VAL:
prefix = "dev"
elif data_type == DatasetType.TEST:
prefix = "test"
src_file = os.path.abspath(os.path.join(path, f'{prefix}.{language}.src'))
translation_file = os.path.abspath(os.path.join(path, f'{prefix}.{language}.mt'))
scores = None
if data_type != DatasetType.TEST:
score_file = os.path.abspath(os.path.join(path, f'{prefix}.{language}.scores'))
scores = np.loadtxt(score_file)
src = load_text(src_file)
translated = load_text(translation_file)
return src, translated, scores
def tokenize(text_array, use_pos=False, data_type=None, lang=None):
"""
Given an array of sentences, returns:
If use_pos:
An array of tokenised sentences (where each tokenised sentence is an array of tokens)
else:
An array of tokenised sentences (where each tokenised sentence is an array of tuples of (token, POS tag))
NOTE: If use_pos is False, the rest of the kwargs are ignored
"""
if use_pos:
# Since POS tags take long to generate, use cached version if exists
cache_path = None
if data_type == DatasetType.TRAIN:
cache_path = os.path.join(SAVED_POS_BASE_PATH, f'train-{lang}-pos.pickle')
elif data_type == DatasetType.VAL:
cache_path = os.path.join(SAVED_POS_BASE_PATH, f'val-{lang}-pos.pickle')
elif data_type == DatasetType.TEST:
cache_path = os.path.join(SAVED_POS_BASE_PATH, f'test-{lang}-pos.pickle')
if os.path.isfile(cache_path):
with open(cache_path, 'rb') as handle:
sentences = pickle.load(handle)
return sentences
tokeniser = WordPunctTokenizer()
sentences = []
with tqdm(total=len(text_array)) as pbar:
for sentence in text_array:
tokens = tokeniser.tokenize(sentence)
lower_cased_tokens = []
for tok in tokens:
tok_lower = tok.lower()
lower_cased_tokens.append(tok_lower)
if use_pos:
# Store tokenised sentence i.e. arrays of (token, POS_TAG) tuples
try:
sentences.append(get_pos_tags(lower_cased_tokens, lang))
except:
sentences.append([get_pos_tags([tok], lang)[0] for tok in lower_cased_tokens])
else:
# Store tokenised sentence
sentences.append(lower_cased_tokens)
pbar.update(1)
if use_pos:
# Store POS tags to allow faster loading on next invocation
with open(cache_path, 'wb') as handle:
pickle.dump(sentences, handle)
return sentences
def pad_to_length(word_embeddings, length, padding):
"""
Given some data (word_embeddings or other), of shape (x, variable, dimensionality)
returns the data padded in the 2nd dimension to size length i.e. (x, length, dimensionality)
"""
for sentence in word_embeddings:
num_to_append = length - len(sentence)
assert num_to_append >= 0
for _ in range(num_to_append):
sentence.append(padding)
|
normal
|
{
"blob_id": "0150e1db3ef2f6c07280f21971b43ac71fc4cada",
"index": 8984,
"step-1": "<mask token>\n\n\nclass DatasetType(enum.Enum):\n \"\"\"\n Represents the type of dataset\n \"\"\"\n TRAIN = 0\n VAL = 1\n TEST = 2\n\n\nclass Language(enum.Enum):\n \"\"\"\n Represents the dataset language\n \"\"\"\n GERMAN = 0\n ENGLISH = 1\n CHINESE = 2\n\n\ndef load_text(path):\n \"\"\"\n Given a path to csv file, loads the data and \n returns it as a numpy array\n \"\"\"\n with open(path) as f:\n read_text = f.read().splitlines()\n return np.array(read_text)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass DatasetType(enum.Enum):\n \"\"\"\n Represents the type of dataset\n \"\"\"\n TRAIN = 0\n VAL = 1\n TEST = 2\n\n\nclass Language(enum.Enum):\n \"\"\"\n Represents the dataset language\n \"\"\"\n GERMAN = 0\n ENGLISH = 1\n CHINESE = 2\n\n\ndef load_text(path):\n \"\"\"\n Given a path to csv file, loads the data and \n returns it as a numpy array\n \"\"\"\n with open(path) as f:\n read_text = f.read().splitlines()\n return np.array(read_text)\n\n\n<mask token>\n\n\ndef pad_to_length(word_embeddings, length, padding):\n \"\"\"\n Given some data (word_embeddings or other), of shape (x, variable, dimensionality) \n returns the data padded in the 2nd dimension to size length i.e. (x, length, dimensionality) \n \"\"\"\n for sentence in word_embeddings:\n num_to_append = length - len(sentence)\n assert num_to_append >= 0\n for _ in range(num_to_append):\n sentence.append(padding)\n",
"step-3": "<mask token>\nnltk.download('punkt')\n<mask token>\n\n\nclass DatasetType(enum.Enum):\n \"\"\"\n Represents the type of dataset\n \"\"\"\n TRAIN = 0\n VAL = 1\n TEST = 2\n\n\nclass Language(enum.Enum):\n \"\"\"\n Represents the dataset language\n \"\"\"\n GERMAN = 0\n ENGLISH = 1\n CHINESE = 2\n\n\ndef load_text(path):\n \"\"\"\n Given a path to csv file, loads the data and \n returns it as a numpy array\n \"\"\"\n with open(path) as f:\n read_text = f.read().splitlines()\n return np.array(read_text)\n\n\ndef load_data(data_type=DatasetType.TRAIN, target_language=Language.GERMAN,\n augmented=False):\n \"\"\"\n Given the dataset type, target language and whether or not to use augmented data, \n loads and returns numpy array representations of the source text, translation text and scores.\n \"\"\"\n if target_language == Language.ENGLISH:\n raise ValueError('Target language cannot be english')\n base_path = DATASETS_BASE_PATH\n if target_language == Language.GERMAN:\n language_folder = 'en-de' if not augmented else 'en-de-aug'\n language = 'ende'\n path = os.path.join(base_path, language_folder)\n else:\n language_folder = 'en-zh'\n language = 'enzh'\n path = os.path.join(base_path, language_folder)\n if data_type == DatasetType.TRAIN:\n prefix = 'train'\n elif data_type == DatasetType.VAL:\n prefix = 'dev'\n elif data_type == DatasetType.TEST:\n prefix = 'test'\n src_file = os.path.abspath(os.path.join(path, f'{prefix}.{language}.src'))\n translation_file = os.path.abspath(os.path.join(path,\n f'{prefix}.{language}.mt'))\n scores = None\n if data_type != DatasetType.TEST:\n score_file = os.path.abspath(os.path.join(path,\n f'{prefix}.{language}.scores'))\n scores = np.loadtxt(score_file)\n src = load_text(src_file)\n translated = load_text(translation_file)\n return src, translated, scores\n\n\ndef tokenize(text_array, use_pos=False, data_type=None, lang=None):\n \"\"\"\n Given an array of sentences, returns:\n If use_pos:\n An array of tokenised sentences (where each tokenised sentence is an array of tokens) \n else:\n An array of tokenised sentences (where each tokenised sentence is an array of tuples of (token, POS tag))\n NOTE: If use_pos is False, the rest of the kwargs are ignored\n \"\"\"\n if use_pos:\n cache_path = None\n if data_type == DatasetType.TRAIN:\n cache_path = os.path.join(SAVED_POS_BASE_PATH,\n f'train-{lang}-pos.pickle')\n elif data_type == DatasetType.VAL:\n cache_path = os.path.join(SAVED_POS_BASE_PATH,\n f'val-{lang}-pos.pickle')\n elif data_type == DatasetType.TEST:\n cache_path = os.path.join(SAVED_POS_BASE_PATH,\n f'test-{lang}-pos.pickle')\n if os.path.isfile(cache_path):\n with open(cache_path, 'rb') as handle:\n sentences = pickle.load(handle)\n return sentences\n tokeniser = WordPunctTokenizer()\n sentences = []\n with tqdm(total=len(text_array)) as pbar:\n for sentence in text_array:\n tokens = tokeniser.tokenize(sentence)\n lower_cased_tokens = []\n for tok in tokens:\n tok_lower = tok.lower()\n lower_cased_tokens.append(tok_lower)\n if use_pos:\n try:\n sentences.append(get_pos_tags(lower_cased_tokens, lang))\n except:\n sentences.append([get_pos_tags([tok], lang)[0] for tok in\n lower_cased_tokens])\n else:\n sentences.append(lower_cased_tokens)\n pbar.update(1)\n if use_pos:\n with open(cache_path, 'wb') as handle:\n pickle.dump(sentences, handle)\n return sentences\n\n\ndef pad_to_length(word_embeddings, length, padding):\n \"\"\"\n Given some data (word_embeddings or other), of shape (x, variable, dimensionality) \n returns the data padded in the 2nd dimension to size length i.e. (x, length, dimensionality) \n \"\"\"\n for sentence in word_embeddings:\n num_to_append = length - len(sentence)\n assert num_to_append >= 0\n for _ in range(num_to_append):\n sentence.append(padding)\n",
"step-4": "<mask token>\nimport enum\nimport numpy as np\nimport os.path\nimport pickle\nfrom tqdm import tqdm\nimport nltk\nfrom nltk import WordPunctTokenizer\nnltk.download('punkt')\nfrom nltk.tokenize import word_tokenize\nfrom lib.utils import DATASETS_BASE_PATH, SAVED_POS_BASE_PATH\nfrom lib.pos import get_pos_tags\n\n\nclass DatasetType(enum.Enum):\n \"\"\"\n Represents the type of dataset\n \"\"\"\n TRAIN = 0\n VAL = 1\n TEST = 2\n\n\nclass Language(enum.Enum):\n \"\"\"\n Represents the dataset language\n \"\"\"\n GERMAN = 0\n ENGLISH = 1\n CHINESE = 2\n\n\ndef load_text(path):\n \"\"\"\n Given a path to csv file, loads the data and \n returns it as a numpy array\n \"\"\"\n with open(path) as f:\n read_text = f.read().splitlines()\n return np.array(read_text)\n\n\ndef load_data(data_type=DatasetType.TRAIN, target_language=Language.GERMAN,\n augmented=False):\n \"\"\"\n Given the dataset type, target language and whether or not to use augmented data, \n loads and returns numpy array representations of the source text, translation text and scores.\n \"\"\"\n if target_language == Language.ENGLISH:\n raise ValueError('Target language cannot be english')\n base_path = DATASETS_BASE_PATH\n if target_language == Language.GERMAN:\n language_folder = 'en-de' if not augmented else 'en-de-aug'\n language = 'ende'\n path = os.path.join(base_path, language_folder)\n else:\n language_folder = 'en-zh'\n language = 'enzh'\n path = os.path.join(base_path, language_folder)\n if data_type == DatasetType.TRAIN:\n prefix = 'train'\n elif data_type == DatasetType.VAL:\n prefix = 'dev'\n elif data_type == DatasetType.TEST:\n prefix = 'test'\n src_file = os.path.abspath(os.path.join(path, f'{prefix}.{language}.src'))\n translation_file = os.path.abspath(os.path.join(path,\n f'{prefix}.{language}.mt'))\n scores = None\n if data_type != DatasetType.TEST:\n score_file = os.path.abspath(os.path.join(path,\n f'{prefix}.{language}.scores'))\n scores = np.loadtxt(score_file)\n src = load_text(src_file)\n translated = load_text(translation_file)\n return src, translated, scores\n\n\ndef tokenize(text_array, use_pos=False, data_type=None, lang=None):\n \"\"\"\n Given an array of sentences, returns:\n If use_pos:\n An array of tokenised sentences (where each tokenised sentence is an array of tokens) \n else:\n An array of tokenised sentences (where each tokenised sentence is an array of tuples of (token, POS tag))\n NOTE: If use_pos is False, the rest of the kwargs are ignored\n \"\"\"\n if use_pos:\n cache_path = None\n if data_type == DatasetType.TRAIN:\n cache_path = os.path.join(SAVED_POS_BASE_PATH,\n f'train-{lang}-pos.pickle')\n elif data_type == DatasetType.VAL:\n cache_path = os.path.join(SAVED_POS_BASE_PATH,\n f'val-{lang}-pos.pickle')\n elif data_type == DatasetType.TEST:\n cache_path = os.path.join(SAVED_POS_BASE_PATH,\n f'test-{lang}-pos.pickle')\n if os.path.isfile(cache_path):\n with open(cache_path, 'rb') as handle:\n sentences = pickle.load(handle)\n return sentences\n tokeniser = WordPunctTokenizer()\n sentences = []\n with tqdm(total=len(text_array)) as pbar:\n for sentence in text_array:\n tokens = tokeniser.tokenize(sentence)\n lower_cased_tokens = []\n for tok in tokens:\n tok_lower = tok.lower()\n lower_cased_tokens.append(tok_lower)\n if use_pos:\n try:\n sentences.append(get_pos_tags(lower_cased_tokens, lang))\n except:\n sentences.append([get_pos_tags([tok], lang)[0] for tok in\n lower_cased_tokens])\n else:\n sentences.append(lower_cased_tokens)\n pbar.update(1)\n if use_pos:\n with open(cache_path, 'wb') as handle:\n pickle.dump(sentences, handle)\n return sentences\n\n\ndef pad_to_length(word_embeddings, length, padding):\n \"\"\"\n Given some data (word_embeddings or other), of shape (x, variable, dimensionality) \n returns the data padded in the 2nd dimension to size length i.e. (x, length, dimensionality) \n \"\"\"\n for sentence in word_embeddings:\n num_to_append = length - len(sentence)\n assert num_to_append >= 0\n for _ in range(num_to_append):\n sentence.append(padding)\n",
"step-5": "\"\"\"Handles loading and tokenising of datasets\"\"\"\n\nimport enum\nimport numpy as np\nimport os.path\nimport pickle\nfrom tqdm import tqdm\nimport nltk\nfrom nltk import WordPunctTokenizer\nnltk.download('punkt')\nfrom nltk.tokenize import word_tokenize\nfrom lib.utils import DATASETS_BASE_PATH, SAVED_POS_BASE_PATH\nfrom lib.pos import get_pos_tags\n\nclass DatasetType(enum.Enum):\n \"\"\"\n Represents the type of dataset\n \"\"\"\n\n TRAIN = 0\n VAL = 1\n TEST = 2\n\nclass Language(enum.Enum):\n \"\"\"\n Represents the dataset language\n \"\"\"\n\n GERMAN = 0\n ENGLISH = 1\n CHINESE = 2\n\ndef load_text(path):\n \"\"\"\n Given a path to csv file, loads the data and \n returns it as a numpy array\n \"\"\"\n\n with open(path) as f:\n read_text = f.read().splitlines()\n \n return np.array(read_text)\n \n\ndef load_data(data_type=DatasetType.TRAIN, target_language=Language.GERMAN, augmented=False):\n \"\"\"\n Given the dataset type, target language and whether or not to use augmented data, \n loads and returns numpy array representations of the source text, translation text and scores.\n \"\"\"\n\n if target_language == Language.ENGLISH:\n raise ValueError(\"Target language cannot be english\")\n \n base_path = DATASETS_BASE_PATH\n if target_language == Language.GERMAN:\n language_folder = \"en-de\" if not augmented else \"en-de-aug\"\n language = \"ende\"\n path = os.path.join(base_path, language_folder)\n else:\n language_folder = \"en-zh\"\n language = \"enzh\"\n path = os.path.join(base_path, language_folder)\n\n if data_type == DatasetType.TRAIN:\n prefix = \"train\"\n elif data_type == DatasetType.VAL:\n prefix = \"dev\"\n elif data_type == DatasetType.TEST:\n prefix = \"test\"\n \n src_file = os.path.abspath(os.path.join(path, f'{prefix}.{language}.src'))\n translation_file = os.path.abspath(os.path.join(path, f'{prefix}.{language}.mt'))\n\n scores = None\n if data_type != DatasetType.TEST:\n score_file = os.path.abspath(os.path.join(path, f'{prefix}.{language}.scores'))\n scores = np.loadtxt(score_file)\n \n src = load_text(src_file)\n translated = load_text(translation_file)\n \n return src, translated, scores\n\ndef tokenize(text_array, use_pos=False, data_type=None, lang=None):\n \"\"\"\n Given an array of sentences, returns:\n If use_pos:\n An array of tokenised sentences (where each tokenised sentence is an array of tokens) \n else:\n An array of tokenised sentences (where each tokenised sentence is an array of tuples of (token, POS tag))\n NOTE: If use_pos is False, the rest of the kwargs are ignored\n \"\"\"\n\n if use_pos:\n # Since POS tags take long to generate, use cached version if exists\n\n cache_path = None\n \n if data_type == DatasetType.TRAIN:\n cache_path = os.path.join(SAVED_POS_BASE_PATH, f'train-{lang}-pos.pickle')\n elif data_type == DatasetType.VAL:\n cache_path = os.path.join(SAVED_POS_BASE_PATH, f'val-{lang}-pos.pickle')\n elif data_type == DatasetType.TEST:\n cache_path = os.path.join(SAVED_POS_BASE_PATH, f'test-{lang}-pos.pickle')\n\n if os.path.isfile(cache_path):\n with open(cache_path, 'rb') as handle:\n sentences = pickle.load(handle)\n return sentences\n\n tokeniser = WordPunctTokenizer()\n\n sentences = []\n with tqdm(total=len(text_array)) as pbar:\n for sentence in text_array:\n tokens = tokeniser.tokenize(sentence)\n lower_cased_tokens = []\n for tok in tokens:\n tok_lower = tok.lower()\n lower_cased_tokens.append(tok_lower)\n \n if use_pos:\n # Store tokenised sentence i.e. arrays of (token, POS_TAG) tuples\n try:\n sentences.append(get_pos_tags(lower_cased_tokens, lang))\n except:\n sentences.append([get_pos_tags([tok], lang)[0] for tok in lower_cased_tokens])\n else:\n # Store tokenised sentence\n sentences.append(lower_cased_tokens)\n pbar.update(1)\n\n if use_pos:\n # Store POS tags to allow faster loading on next invocation\n with open(cache_path, 'wb') as handle:\n pickle.dump(sentences, handle)\n\n return sentences\n\ndef pad_to_length(word_embeddings, length, padding):\n \"\"\"\n Given some data (word_embeddings or other), of shape (x, variable, dimensionality) \n returns the data padded in the 2nd dimension to size length i.e. (x, length, dimensionality) \n \"\"\"\n\n for sentence in word_embeddings:\n num_to_append = length - len(sentence)\n assert num_to_append >= 0\n for _ in range(num_to_append):\n sentence.append(padding)\n",
"step-ids": [
7,
8,
11,
12,
13
]
}
|
[
7,
8,
11,
12,
13
] |
from setuptools import setup
setup(name='google-drive-helpers',
version='0.1',
description='Helper functions for google drive',
url='https://github.com/jdoepfert/google-drive-helpers',
license='MIT',
packages=['gdrive_helpers'],
install_requires=[
'google-api-python-client',
],
zip_safe=False)
|
normal
|
{
"blob_id": "c0218acadb9e03359ac898cf3bb4898f516400e5",
"index": 5361,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nsetup(name='google-drive-helpers', version='0.1', description=\n 'Helper functions for google drive', url=\n 'https://github.com/jdoepfert/google-drive-helpers', license='MIT',\n packages=['gdrive_helpers'], install_requires=[\n 'google-api-python-client'], zip_safe=False)\n",
"step-3": "from setuptools import setup\nsetup(name='google-drive-helpers', version='0.1', description=\n 'Helper functions for google drive', url=\n 'https://github.com/jdoepfert/google-drive-helpers', license='MIT',\n packages=['gdrive_helpers'], install_requires=[\n 'google-api-python-client'], zip_safe=False)\n",
"step-4": "from setuptools import setup\n\nsetup(name='google-drive-helpers',\n version='0.1',\n description='Helper functions for google drive',\n url='https://github.com/jdoepfert/google-drive-helpers',\n license='MIT',\n packages=['gdrive_helpers'],\n install_requires=[\n 'google-api-python-client',\n ],\n zip_safe=False)\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
def train(cls, data, target, model_path):
cls = cls.fit(data, target)
with open(model_path, 'wb') as f:
pickle.dump(cls, f)
<|reserved_special_token_0|>
def load_models(matrix_path, model_path):
tfidf, cls = None, None
if os.path.isfile(model_path):
with open(model_path, 'rb') as f:
cls = pickle.load(f)
if os.path.isfile(matrix_path):
with open(matrix_path, 'rb') as f:
tfidf = pickle.load(f)
return tfidf, cls
def test(matrix_path, model_path, data_path, outdir):
curr_time = datetime.datetime.now()
time_str = curr_time.strftime('%Y-%m-%d %H-%M-%S')
out_path = outdir + '/%s/' % time_str
out_file = os.path.join(out_path, 'results.txt')
if not os.path.exists(out_path):
os.makedirs(out_path)
data, target = get_data(data_path)
tfidf, cls = load_models(matrix_path, model_path)
if tfidf == None or cls == None:
print('cannot load models........')
return
feature = tfidf.transform(data)
predicted = cls.predict(feature)
acc = metrics.accuracy_score(target, predicted)
pre = metrics.precision_score(target, predicted)
recall = metrics.recall_score(target, predicted)
f1 = metrics.f1_score(target, predicted)
fpr, tpr, thresholds = metrics.roc_curve(target, predicted)
auc = metrics.auc(fpr, tpr)
print('accuracy_score: ', acc)
print('precision_score: ', pre)
print('recall_score: ', recall)
print('f1_score: ', f1)
print('auc: ', auc)
with open(out_file, 'w', encoding='utf-8') as f:
for label in predicted:
f.write(str(label) + '\n')
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def train(cls, data, target, model_path):
cls = cls.fit(data, target)
with open(model_path, 'wb') as f:
pickle.dump(cls, f)
def trans(data, matrix_path, stopword_path):
with open(stopword_path, 'r', encoding='utf-8') as fs:
stop_words = [line.strip() for line in fs.readline()]
tfidf = TfidfVectorizer(token_pattern='(?u)\\b\\w+\\b', stop_words=
stop_words)
features = tfidf.fit_transform(data)
with open(matrix_path, 'wb') as f:
pickle.dump(tfidf, f)
return features
def load_models(matrix_path, model_path):
tfidf, cls = None, None
if os.path.isfile(model_path):
with open(model_path, 'rb') as f:
cls = pickle.load(f)
if os.path.isfile(matrix_path):
with open(matrix_path, 'rb') as f:
tfidf = pickle.load(f)
return tfidf, cls
def test(matrix_path, model_path, data_path, outdir):
curr_time = datetime.datetime.now()
time_str = curr_time.strftime('%Y-%m-%d %H-%M-%S')
out_path = outdir + '/%s/' % time_str
out_file = os.path.join(out_path, 'results.txt')
if not os.path.exists(out_path):
os.makedirs(out_path)
data, target = get_data(data_path)
tfidf, cls = load_models(matrix_path, model_path)
if tfidf == None or cls == None:
print('cannot load models........')
return
feature = tfidf.transform(data)
predicted = cls.predict(feature)
acc = metrics.accuracy_score(target, predicted)
pre = metrics.precision_score(target, predicted)
recall = metrics.recall_score(target, predicted)
f1 = metrics.f1_score(target, predicted)
fpr, tpr, thresholds = metrics.roc_curve(target, predicted)
auc = metrics.auc(fpr, tpr)
print('accuracy_score: ', acc)
print('precision_score: ', pre)
print('recall_score: ', recall)
print('f1_score: ', f1)
print('auc: ', auc)
with open(out_file, 'w', encoding='utf-8') as f:
for label in predicted:
f.write(str(label) + '\n')
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def get_data(train_file):
target = []
data = []
with open(train_file, 'r', encoding='utf-8') as f:
for line in f.readlines():
line = line.strip().split('\t')
if len(line) == 1:
continue
target.append(int(line[0]))
data.append(line[1])
data = list(map(jieba.lcut, data))
data = [' '.join(d) for d in data]
return data, target
def train(cls, data, target, model_path):
cls = cls.fit(data, target)
with open(model_path, 'wb') as f:
pickle.dump(cls, f)
def trans(data, matrix_path, stopword_path):
with open(stopword_path, 'r', encoding='utf-8') as fs:
stop_words = [line.strip() for line in fs.readline()]
tfidf = TfidfVectorizer(token_pattern='(?u)\\b\\w+\\b', stop_words=
stop_words)
features = tfidf.fit_transform(data)
with open(matrix_path, 'wb') as f:
pickle.dump(tfidf, f)
return features
def load_models(matrix_path, model_path):
tfidf, cls = None, None
if os.path.isfile(model_path):
with open(model_path, 'rb') as f:
cls = pickle.load(f)
if os.path.isfile(matrix_path):
with open(matrix_path, 'rb') as f:
tfidf = pickle.load(f)
return tfidf, cls
def test(matrix_path, model_path, data_path, outdir):
curr_time = datetime.datetime.now()
time_str = curr_time.strftime('%Y-%m-%d %H-%M-%S')
out_path = outdir + '/%s/' % time_str
out_file = os.path.join(out_path, 'results.txt')
if not os.path.exists(out_path):
os.makedirs(out_path)
data, target = get_data(data_path)
tfidf, cls = load_models(matrix_path, model_path)
if tfidf == None or cls == None:
print('cannot load models........')
return
feature = tfidf.transform(data)
predicted = cls.predict(feature)
acc = metrics.accuracy_score(target, predicted)
pre = metrics.precision_score(target, predicted)
recall = metrics.recall_score(target, predicted)
f1 = metrics.f1_score(target, predicted)
fpr, tpr, thresholds = metrics.roc_curve(target, predicted)
auc = metrics.auc(fpr, tpr)
print('accuracy_score: ', acc)
print('precision_score: ', pre)
print('recall_score: ', recall)
print('f1_score: ', f1)
print('auc: ', auc)
with open(out_file, 'w', encoding='utf-8') as f:
for label in predicted:
f.write(str(label) + '\n')
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
warnings.filterwarnings('ignore')
def get_data(train_file):
target = []
data = []
with open(train_file, 'r', encoding='utf-8') as f:
for line in f.readlines():
line = line.strip().split('\t')
if len(line) == 1:
continue
target.append(int(line[0]))
data.append(line[1])
data = list(map(jieba.lcut, data))
data = [' '.join(d) for d in data]
return data, target
def train(cls, data, target, model_path):
cls = cls.fit(data, target)
with open(model_path, 'wb') as f:
pickle.dump(cls, f)
def trans(data, matrix_path, stopword_path):
with open(stopword_path, 'r', encoding='utf-8') as fs:
stop_words = [line.strip() for line in fs.readline()]
tfidf = TfidfVectorizer(token_pattern='(?u)\\b\\w+\\b', stop_words=
stop_words)
features = tfidf.fit_transform(data)
with open(matrix_path, 'wb') as f:
pickle.dump(tfidf, f)
return features
def load_models(matrix_path, model_path):
tfidf, cls = None, None
if os.path.isfile(model_path):
with open(model_path, 'rb') as f:
cls = pickle.load(f)
if os.path.isfile(matrix_path):
with open(matrix_path, 'rb') as f:
tfidf = pickle.load(f)
return tfidf, cls
def test(matrix_path, model_path, data_path, outdir):
curr_time = datetime.datetime.now()
time_str = curr_time.strftime('%Y-%m-%d %H-%M-%S')
out_path = outdir + '/%s/' % time_str
out_file = os.path.join(out_path, 'results.txt')
if not os.path.exists(out_path):
os.makedirs(out_path)
data, target = get_data(data_path)
tfidf, cls = load_models(matrix_path, model_path)
if tfidf == None or cls == None:
print('cannot load models........')
return
feature = tfidf.transform(data)
predicted = cls.predict(feature)
acc = metrics.accuracy_score(target, predicted)
pre = metrics.precision_score(target, predicted)
recall = metrics.recall_score(target, predicted)
f1 = metrics.f1_score(target, predicted)
fpr, tpr, thresholds = metrics.roc_curve(target, predicted)
auc = metrics.auc(fpr, tpr)
print('accuracy_score: ', acc)
print('precision_score: ', pre)
print('recall_score: ', recall)
print('f1_score: ', f1)
print('auc: ', auc)
with open(out_file, 'w', encoding='utf-8') as f:
for label in predicted:
f.write(str(label) + '\n')
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--train', type=str, default='./data/train.txt',
help='training data')
parser.add_argument('--test', type=str, default='./data/test.txt', help
='test data')
parser.add_argument('--stopwords', type=str, default=
'./data/hit_stopwords.txt', help='stop words')
parser.add_argument('--model', type=str, default=
'./model/svm_model.pkl', help='classification model')
parser.add_argument('--matrix', type=str, default='./model/tfidf.pkl',
help='tfidf model')
parser.add_argument('--outpath', type=str, default='./results/', help=
'out path')
args = parser.parse_args()
print('data processing.......')
data, target = get_data(args.train)
print('transform data.......')
features = trans(data, args.matrix, args.stopwords)
print('training model.......')
cls = svm.LinearSVC()
train(cls, features, target, args.model)
print('test.......')
test(args.matrix, args.model, args.test, args.outpath)
<|reserved_special_token_1|>
# -*- coding: utf-8 -*-
# caixinjun
import argparse
from sklearn import metrics
import datetime
import jieba
from sklearn.feature_extraction.text import TfidfVectorizer
import pickle
from sklearn import svm
import os
import warnings
warnings.filterwarnings('ignore')
def get_data(train_file):
target = []
data = []
with open(train_file, 'r', encoding='utf-8') as f:
for line in f.readlines():
line = line.strip().split("\t")
if len(line) == 1:
continue
target.append(int(line[0]))
data.append(line[1])
data = list(map(jieba.lcut, data))
data = [" ".join(d) for d in data]
return data, target
def train(cls, data, target, model_path):
cls = cls.fit(data, target)
with open(model_path, 'wb') as f:
pickle.dump(cls, f)
def trans(data, matrix_path, stopword_path):
with open(stopword_path, 'r', encoding='utf-8') as fs:
stop_words = [line.strip() for line in fs.readline()]
tfidf = TfidfVectorizer(token_pattern=r"(?u)\b\w+\b", stop_words=stop_words)
features = tfidf.fit_transform(data)
with open(matrix_path, 'wb') as f:
pickle.dump(tfidf, f)
return features
def load_models(matrix_path, model_path):
tfidf, cls = None, None
if os.path.isfile(model_path):
with open(model_path, 'rb') as f:
cls = pickle.load(f)
if os.path.isfile(matrix_path):
with open(matrix_path, 'rb') as f:
tfidf = pickle.load(f)
return tfidf, cls
def test(matrix_path, model_path, data_path, outdir):
curr_time = datetime.datetime.now()
time_str = curr_time.strftime("%Y-%m-%d %H-%M-%S")
out_path = outdir + '/%s/' % time_str
out_file = os.path.join(out_path, "results.txt")
if not os.path.exists(out_path):
os.makedirs(out_path)
data, target = get_data(data_path)
tfidf, cls = load_models(matrix_path, model_path)
if tfidf==None or cls==None:
print("cannot load models........")
return
feature = tfidf.transform(data)
predicted = cls.predict(feature)
acc = metrics.accuracy_score(target, predicted)
pre = metrics.precision_score(target, predicted)
recall = metrics.recall_score(target, predicted)
f1 = metrics.f1_score(target, predicted)
fpr, tpr, thresholds = metrics.roc_curve(target, predicted)
auc = metrics.auc(fpr, tpr)
print("accuracy_score: ", acc)
print("precision_score: ", pre)
print("recall_score: ", recall)
print("f1_score: ", f1)
print("auc: ", auc)
with open(out_file, 'w', encoding='utf-8') as f:
for label in predicted:
f.write(str(label) + '\n')
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--train', type=str, default='./data/train.txt', help='training data')
parser.add_argument('--test', type=str, default='./data/test.txt', help='test data')
parser.add_argument('--stopwords', type=str, default='./data/hit_stopwords.txt', help='stop words')
parser.add_argument('--model', type=str, default='./model/svm_model.pkl', help='classification model')
parser.add_argument('--matrix', type=str, default='./model/tfidf.pkl', help='tfidf model')
parser.add_argument('--outpath', type=str, default='./results/', help='out path')
args = parser.parse_args()
print("data processing.......")
data, target = get_data(args.train)
print("transform data.......")
features = trans(data, args.matrix, args.stopwords)
print("training model.......")
cls = svm.LinearSVC()
train(cls, features, target, args.model)
print("test.......")
test(args.matrix, args.model, args.test, args.outpath)
|
flexible
|
{
"blob_id": "199872ea459a9dba9975c6531034bdbc1e77f1db",
"index": 5875,
"step-1": "<mask token>\n\n\ndef train(cls, data, target, model_path):\n cls = cls.fit(data, target)\n with open(model_path, 'wb') as f:\n pickle.dump(cls, f)\n\n\n<mask token>\n\n\ndef load_models(matrix_path, model_path):\n tfidf, cls = None, None\n if os.path.isfile(model_path):\n with open(model_path, 'rb') as f:\n cls = pickle.load(f)\n if os.path.isfile(matrix_path):\n with open(matrix_path, 'rb') as f:\n tfidf = pickle.load(f)\n return tfidf, cls\n\n\ndef test(matrix_path, model_path, data_path, outdir):\n curr_time = datetime.datetime.now()\n time_str = curr_time.strftime('%Y-%m-%d %H-%M-%S')\n out_path = outdir + '/%s/' % time_str\n out_file = os.path.join(out_path, 'results.txt')\n if not os.path.exists(out_path):\n os.makedirs(out_path)\n data, target = get_data(data_path)\n tfidf, cls = load_models(matrix_path, model_path)\n if tfidf == None or cls == None:\n print('cannot load models........')\n return\n feature = tfidf.transform(data)\n predicted = cls.predict(feature)\n acc = metrics.accuracy_score(target, predicted)\n pre = metrics.precision_score(target, predicted)\n recall = metrics.recall_score(target, predicted)\n f1 = metrics.f1_score(target, predicted)\n fpr, tpr, thresholds = metrics.roc_curve(target, predicted)\n auc = metrics.auc(fpr, tpr)\n print('accuracy_score: ', acc)\n print('precision_score: ', pre)\n print('recall_score: ', recall)\n print('f1_score: ', f1)\n print('auc: ', auc)\n with open(out_file, 'w', encoding='utf-8') as f:\n for label in predicted:\n f.write(str(label) + '\\n')\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef train(cls, data, target, model_path):\n cls = cls.fit(data, target)\n with open(model_path, 'wb') as f:\n pickle.dump(cls, f)\n\n\ndef trans(data, matrix_path, stopword_path):\n with open(stopword_path, 'r', encoding='utf-8') as fs:\n stop_words = [line.strip() for line in fs.readline()]\n tfidf = TfidfVectorizer(token_pattern='(?u)\\\\b\\\\w+\\\\b', stop_words=\n stop_words)\n features = tfidf.fit_transform(data)\n with open(matrix_path, 'wb') as f:\n pickle.dump(tfidf, f)\n return features\n\n\ndef load_models(matrix_path, model_path):\n tfidf, cls = None, None\n if os.path.isfile(model_path):\n with open(model_path, 'rb') as f:\n cls = pickle.load(f)\n if os.path.isfile(matrix_path):\n with open(matrix_path, 'rb') as f:\n tfidf = pickle.load(f)\n return tfidf, cls\n\n\ndef test(matrix_path, model_path, data_path, outdir):\n curr_time = datetime.datetime.now()\n time_str = curr_time.strftime('%Y-%m-%d %H-%M-%S')\n out_path = outdir + '/%s/' % time_str\n out_file = os.path.join(out_path, 'results.txt')\n if not os.path.exists(out_path):\n os.makedirs(out_path)\n data, target = get_data(data_path)\n tfidf, cls = load_models(matrix_path, model_path)\n if tfidf == None or cls == None:\n print('cannot load models........')\n return\n feature = tfidf.transform(data)\n predicted = cls.predict(feature)\n acc = metrics.accuracy_score(target, predicted)\n pre = metrics.precision_score(target, predicted)\n recall = metrics.recall_score(target, predicted)\n f1 = metrics.f1_score(target, predicted)\n fpr, tpr, thresholds = metrics.roc_curve(target, predicted)\n auc = metrics.auc(fpr, tpr)\n print('accuracy_score: ', acc)\n print('precision_score: ', pre)\n print('recall_score: ', recall)\n print('f1_score: ', f1)\n print('auc: ', auc)\n with open(out_file, 'w', encoding='utf-8') as f:\n for label in predicted:\n f.write(str(label) + '\\n')\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef get_data(train_file):\n target = []\n data = []\n with open(train_file, 'r', encoding='utf-8') as f:\n for line in f.readlines():\n line = line.strip().split('\\t')\n if len(line) == 1:\n continue\n target.append(int(line[0]))\n data.append(line[1])\n data = list(map(jieba.lcut, data))\n data = [' '.join(d) for d in data]\n return data, target\n\n\ndef train(cls, data, target, model_path):\n cls = cls.fit(data, target)\n with open(model_path, 'wb') as f:\n pickle.dump(cls, f)\n\n\ndef trans(data, matrix_path, stopword_path):\n with open(stopword_path, 'r', encoding='utf-8') as fs:\n stop_words = [line.strip() for line in fs.readline()]\n tfidf = TfidfVectorizer(token_pattern='(?u)\\\\b\\\\w+\\\\b', stop_words=\n stop_words)\n features = tfidf.fit_transform(data)\n with open(matrix_path, 'wb') as f:\n pickle.dump(tfidf, f)\n return features\n\n\ndef load_models(matrix_path, model_path):\n tfidf, cls = None, None\n if os.path.isfile(model_path):\n with open(model_path, 'rb') as f:\n cls = pickle.load(f)\n if os.path.isfile(matrix_path):\n with open(matrix_path, 'rb') as f:\n tfidf = pickle.load(f)\n return tfidf, cls\n\n\ndef test(matrix_path, model_path, data_path, outdir):\n curr_time = datetime.datetime.now()\n time_str = curr_time.strftime('%Y-%m-%d %H-%M-%S')\n out_path = outdir + '/%s/' % time_str\n out_file = os.path.join(out_path, 'results.txt')\n if not os.path.exists(out_path):\n os.makedirs(out_path)\n data, target = get_data(data_path)\n tfidf, cls = load_models(matrix_path, model_path)\n if tfidf == None or cls == None:\n print('cannot load models........')\n return\n feature = tfidf.transform(data)\n predicted = cls.predict(feature)\n acc = metrics.accuracy_score(target, predicted)\n pre = metrics.precision_score(target, predicted)\n recall = metrics.recall_score(target, predicted)\n f1 = metrics.f1_score(target, predicted)\n fpr, tpr, thresholds = metrics.roc_curve(target, predicted)\n auc = metrics.auc(fpr, tpr)\n print('accuracy_score: ', acc)\n print('precision_score: ', pre)\n print('recall_score: ', recall)\n print('f1_score: ', f1)\n print('auc: ', auc)\n with open(out_file, 'w', encoding='utf-8') as f:\n for label in predicted:\n f.write(str(label) + '\\n')\n\n\n<mask token>\n",
"step-4": "<mask token>\nwarnings.filterwarnings('ignore')\n\n\ndef get_data(train_file):\n target = []\n data = []\n with open(train_file, 'r', encoding='utf-8') as f:\n for line in f.readlines():\n line = line.strip().split('\\t')\n if len(line) == 1:\n continue\n target.append(int(line[0]))\n data.append(line[1])\n data = list(map(jieba.lcut, data))\n data = [' '.join(d) for d in data]\n return data, target\n\n\ndef train(cls, data, target, model_path):\n cls = cls.fit(data, target)\n with open(model_path, 'wb') as f:\n pickle.dump(cls, f)\n\n\ndef trans(data, matrix_path, stopword_path):\n with open(stopword_path, 'r', encoding='utf-8') as fs:\n stop_words = [line.strip() for line in fs.readline()]\n tfidf = TfidfVectorizer(token_pattern='(?u)\\\\b\\\\w+\\\\b', stop_words=\n stop_words)\n features = tfidf.fit_transform(data)\n with open(matrix_path, 'wb') as f:\n pickle.dump(tfidf, f)\n return features\n\n\ndef load_models(matrix_path, model_path):\n tfidf, cls = None, None\n if os.path.isfile(model_path):\n with open(model_path, 'rb') as f:\n cls = pickle.load(f)\n if os.path.isfile(matrix_path):\n with open(matrix_path, 'rb') as f:\n tfidf = pickle.load(f)\n return tfidf, cls\n\n\ndef test(matrix_path, model_path, data_path, outdir):\n curr_time = datetime.datetime.now()\n time_str = curr_time.strftime('%Y-%m-%d %H-%M-%S')\n out_path = outdir + '/%s/' % time_str\n out_file = os.path.join(out_path, 'results.txt')\n if not os.path.exists(out_path):\n os.makedirs(out_path)\n data, target = get_data(data_path)\n tfidf, cls = load_models(matrix_path, model_path)\n if tfidf == None or cls == None:\n print('cannot load models........')\n return\n feature = tfidf.transform(data)\n predicted = cls.predict(feature)\n acc = metrics.accuracy_score(target, predicted)\n pre = metrics.precision_score(target, predicted)\n recall = metrics.recall_score(target, predicted)\n f1 = metrics.f1_score(target, predicted)\n fpr, tpr, thresholds = metrics.roc_curve(target, predicted)\n auc = metrics.auc(fpr, tpr)\n print('accuracy_score: ', acc)\n print('precision_score: ', pre)\n print('recall_score: ', recall)\n print('f1_score: ', f1)\n print('auc: ', auc)\n with open(out_file, 'w', encoding='utf-8') as f:\n for label in predicted:\n f.write(str(label) + '\\n')\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--train', type=str, default='./data/train.txt',\n help='training data')\n parser.add_argument('--test', type=str, default='./data/test.txt', help\n ='test data')\n parser.add_argument('--stopwords', type=str, default=\n './data/hit_stopwords.txt', help='stop words')\n parser.add_argument('--model', type=str, default=\n './model/svm_model.pkl', help='classification model')\n parser.add_argument('--matrix', type=str, default='./model/tfidf.pkl',\n help='tfidf model')\n parser.add_argument('--outpath', type=str, default='./results/', help=\n 'out path')\n args = parser.parse_args()\n print('data processing.......')\n data, target = get_data(args.train)\n print('transform data.......')\n features = trans(data, args.matrix, args.stopwords)\n print('training model.......')\n cls = svm.LinearSVC()\n train(cls, features, target, args.model)\n print('test.......')\n test(args.matrix, args.model, args.test, args.outpath)\n",
"step-5": "# -*- coding: utf-8 -*-\r\n# caixinjun\r\n\r\nimport argparse\r\nfrom sklearn import metrics\r\nimport datetime\r\nimport jieba\r\nfrom sklearn.feature_extraction.text import TfidfVectorizer\r\nimport pickle\r\nfrom sklearn import svm\r\nimport os\r\nimport warnings\r\nwarnings.filterwarnings('ignore')\r\n\r\n\r\ndef get_data(train_file):\r\n target = []\r\n data = []\r\n with open(train_file, 'r', encoding='utf-8') as f:\r\n for line in f.readlines():\r\n line = line.strip().split(\"\\t\")\r\n if len(line) == 1:\r\n continue\r\n target.append(int(line[0]))\r\n data.append(line[1])\r\n data = list(map(jieba.lcut, data))\r\n data = [\" \".join(d) for d in data]\r\n return data, target\r\n\r\n\r\ndef train(cls, data, target, model_path):\r\n cls = cls.fit(data, target)\r\n with open(model_path, 'wb') as f:\r\n pickle.dump(cls, f)\r\n\r\ndef trans(data, matrix_path, stopword_path):\r\n with open(stopword_path, 'r', encoding='utf-8') as fs:\r\n stop_words = [line.strip() for line in fs.readline()]\r\n tfidf = TfidfVectorizer(token_pattern=r\"(?u)\\b\\w+\\b\", stop_words=stop_words)\r\n features = tfidf.fit_transform(data)\r\n with open(matrix_path, 'wb') as f:\r\n pickle.dump(tfidf, f)\r\n return features\r\n\r\n\r\ndef load_models(matrix_path, model_path):\r\n tfidf, cls = None, None\r\n if os.path.isfile(model_path):\r\n with open(model_path, 'rb') as f:\r\n cls = pickle.load(f)\r\n if os.path.isfile(matrix_path):\r\n with open(matrix_path, 'rb') as f:\r\n tfidf = pickle.load(f)\r\n return tfidf, cls\r\n\r\ndef test(matrix_path, model_path, data_path, outdir):\r\n\r\n curr_time = datetime.datetime.now()\r\n time_str = curr_time.strftime(\"%Y-%m-%d %H-%M-%S\")\r\n out_path = outdir + '/%s/' % time_str\r\n out_file = os.path.join(out_path, \"results.txt\")\r\n if not os.path.exists(out_path):\r\n os.makedirs(out_path)\r\n data, target = get_data(data_path)\r\n tfidf, cls = load_models(matrix_path, model_path)\r\n if tfidf==None or cls==None:\r\n print(\"cannot load models........\")\r\n return\r\n\r\n feature = tfidf.transform(data)\r\n predicted = cls.predict(feature)\r\n\r\n acc = metrics.accuracy_score(target, predicted)\r\n pre = metrics.precision_score(target, predicted)\r\n recall = metrics.recall_score(target, predicted)\r\n f1 = metrics.f1_score(target, predicted)\r\n fpr, tpr, thresholds = metrics.roc_curve(target, predicted)\r\n auc = metrics.auc(fpr, tpr)\r\n\r\n print(\"accuracy_score: \", acc)\r\n print(\"precision_score: \", pre)\r\n print(\"recall_score: \", recall)\r\n print(\"f1_score: \", f1)\r\n print(\"auc: \", auc)\r\n\r\n with open(out_file, 'w', encoding='utf-8') as f:\r\n for label in predicted:\r\n f.write(str(label) + '\\n')\r\n\r\n\r\nif __name__ == '__main__':\r\n parser = argparse.ArgumentParser()\r\n parser.add_argument('--train', type=str, default='./data/train.txt', help='training data')\r\n parser.add_argument('--test', type=str, default='./data/test.txt', help='test data')\r\n parser.add_argument('--stopwords', type=str, default='./data/hit_stopwords.txt', help='stop words')\r\n parser.add_argument('--model', type=str, default='./model/svm_model.pkl', help='classification model')\r\n parser.add_argument('--matrix', type=str, default='./model/tfidf.pkl', help='tfidf model')\r\n parser.add_argument('--outpath', type=str, default='./results/', help='out path')\r\n args = parser.parse_args()\r\n\r\n print(\"data processing.......\")\r\n data, target = get_data(args.train)\r\n\r\n print(\"transform data.......\")\r\n features = trans(data, args.matrix, args.stopwords)\r\n\r\n print(\"training model.......\")\r\n cls = svm.LinearSVC()\r\n train(cls, features, target, args.model)\r\n\r\n print(\"test.......\")\r\n test(args.matrix, args.model, args.test, args.outpath)\r\n\r\n",
"step-ids": [
3,
4,
5,
6,
8
]
}
|
[
3,
4,
5,
6,
8
] |
class Port(object):
def __init__(self, mac):
self.mac = mac
|
normal
|
{
"blob_id": "cd89c9eaea9d331288fd07f1968ef9dce89b4a4b",
"index": 7228,
"step-1": "<mask token>\n",
"step-2": "class Port(object):\n <mask token>\n",
"step-3": "class Port(object):\n\n def __init__(self, mac):\n self.mac = mac\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
<|reserved_special_token_0|>
class Google_Cloud:
<|reserved_special_token_0|>
def sentiment(self):
google_sentiment = self.client.analyze_sentiment(self.document
).document_sentiment
sent = {}
sent['sentiment'] = google_sentiment.score
sent['magnitude'] = google_sentiment.magnitude
return sent
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
class Google_ST:
def __init__(self, file, rate):
self.audio_file = file
self.client = speech1.SpeechClient()
self.rate = rate
def printFields(self):
print(type(self.audio_file))
print(type(self.audio_file.read()))
def transcribe_file(self, uri):
if uri.endswith('.wav'):
try:
config = speech1.types.RecognitionConfig(encoding=speech1.
enums.RecognitionConfig.AudioEncoding.LINEAR16,
language_code='en-US', audio_channel_count=2,
enable_separate_recognition_per_channel=True)
audio = speech1.types.RecognitionAudio(uri=uri)
response = self.client.recognize(config, audio)
result_str = ''
for result in response.results:
result_str += result.alternatives[0].transcript
print('Transcript: {}'.format(result.alternatives[0].
transcript))
return result_str
except Exception as e:
try:
config = speech1.types.RecognitionConfig(encoding=
speech1.enums.RecognitionConfig.AudioEncoding.
LINEAR16, language_code='en-US')
audio = speech1.types.RecognitionAudio(uri=uri)
response = self.client.recognize(config, audio)
result_str = ''
for result in response.results:
result_str += result.alternatives[0].transcript
print('Transcript: {}'.format(result.alternatives[0
].transcript))
return result_str
except Exception as e2:
try:
result_str = self.transcribe_long_file(uri)
return result_str
except Exception as e3:
print(e3)
elif uri.endswith('.flac'):
try:
config = speech1.types.RecognitionConfig(encoding=speech1.
enums.RecognitionConfig.AudioEncoding.FLAC,
language_code='en-US')
audio = speech1.types.RecognitionAudio(uri=uri)
response = self.client.recognize(config, audio)
result_str = ''
for result in response.results:
result_str += result.alternatives[0].transcript
print('Transcript: {}'.format(result.alternatives[0].
transcript))
return result_str
except Exception as e:
print(e)
else:
return 'Please use .wav or .flac audio files'
def transcribe_long_file(self, uri):
config = speech1.types.RecognitionConfig(encoding=speech2.enums.
RecognitionConfig.AudioEncoding.LINEAR16, language_code='en-US')
audio = speech1.types.RecognitionAudio(uri=uri)
operation = self.client.long_running_recognize(config, audio)
print('Waiting for operation to complete')
response = operation.result(timeout=90)
result_str = ''
for result in response.results:
result_str += result.alternatives[0].transcript
return result_str
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Google_Cloud:
<|reserved_special_token_0|>
def sentiment(self):
google_sentiment = self.client.analyze_sentiment(self.document
).document_sentiment
sent = {}
sent['sentiment'] = google_sentiment.score
sent['magnitude'] = google_sentiment.magnitude
return sent
def entities(self):
google_entities = self.client.analyze_entities(self.document).entities
entities = []
for entity in google_entities:
entities.append(entity.name.lower())
entities.sort()
return entities
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
class Google_ST:
def __init__(self, file, rate):
self.audio_file = file
self.client = speech1.SpeechClient()
self.rate = rate
def printFields(self):
print(type(self.audio_file))
print(type(self.audio_file.read()))
def transcribe_file(self, uri):
if uri.endswith('.wav'):
try:
config = speech1.types.RecognitionConfig(encoding=speech1.
enums.RecognitionConfig.AudioEncoding.LINEAR16,
language_code='en-US', audio_channel_count=2,
enable_separate_recognition_per_channel=True)
audio = speech1.types.RecognitionAudio(uri=uri)
response = self.client.recognize(config, audio)
result_str = ''
for result in response.results:
result_str += result.alternatives[0].transcript
print('Transcript: {}'.format(result.alternatives[0].
transcript))
return result_str
except Exception as e:
try:
config = speech1.types.RecognitionConfig(encoding=
speech1.enums.RecognitionConfig.AudioEncoding.
LINEAR16, language_code='en-US')
audio = speech1.types.RecognitionAudio(uri=uri)
response = self.client.recognize(config, audio)
result_str = ''
for result in response.results:
result_str += result.alternatives[0].transcript
print('Transcript: {}'.format(result.alternatives[0
].transcript))
return result_str
except Exception as e2:
try:
result_str = self.transcribe_long_file(uri)
return result_str
except Exception as e3:
print(e3)
elif uri.endswith('.flac'):
try:
config = speech1.types.RecognitionConfig(encoding=speech1.
enums.RecognitionConfig.AudioEncoding.FLAC,
language_code='en-US')
audio = speech1.types.RecognitionAudio(uri=uri)
response = self.client.recognize(config, audio)
result_str = ''
for result in response.results:
result_str += result.alternatives[0].transcript
print('Transcript: {}'.format(result.alternatives[0].
transcript))
return result_str
except Exception as e:
print(e)
else:
return 'Please use .wav or .flac audio files'
def transcribe_long_file(self, uri):
config = speech1.types.RecognitionConfig(encoding=speech2.enums.
RecognitionConfig.AudioEncoding.LINEAR16, language_code='en-US')
audio = speech1.types.RecognitionAudio(uri=uri)
operation = self.client.long_running_recognize(config, audio)
print('Waiting for operation to complete')
response = operation.result(timeout=90)
result_str = ''
for result in response.results:
result_str += result.alternatives[0].transcript
return result_str
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Google_Cloud:
<|reserved_special_token_0|>
def sentiment(self):
google_sentiment = self.client.analyze_sentiment(self.document
).document_sentiment
sent = {}
sent['sentiment'] = google_sentiment.score
sent['magnitude'] = google_sentiment.magnitude
return sent
def entities(self):
google_entities = self.client.analyze_entities(self.document).entities
entities = []
for entity in google_entities:
entities.append(entity.name.lower())
entities.sort()
return entities
<|reserved_special_token_0|>
def syntax(self):
"""Detects syntax in the text."""
tokens = self.client.analyze_syntax(self.document).tokens
pos_tag = ('UNKNOWN', 'ADJ', 'ADP', 'ADV', 'CONJ', 'DET', 'NOUN',
'NUM', 'PRON', 'PRT', 'PUNCT', 'VERB', 'X', 'AFFIX')
result = []
for token in tokens:
result.append(u'{}: {}'.format(pos_tag[token.part_of_speech.tag
], token.text.content))
return result
def categories(self):
"""Classifies content categories of the provided text."""
categories = self.client.classify_text(self.document).categories
result = []
for category in categories:
result.append(category.name)
return result
class Google_ST:
def __init__(self, file, rate):
self.audio_file = file
self.client = speech1.SpeechClient()
self.rate = rate
def printFields(self):
print(type(self.audio_file))
print(type(self.audio_file.read()))
def transcribe_file(self, uri):
if uri.endswith('.wav'):
try:
config = speech1.types.RecognitionConfig(encoding=speech1.
enums.RecognitionConfig.AudioEncoding.LINEAR16,
language_code='en-US', audio_channel_count=2,
enable_separate_recognition_per_channel=True)
audio = speech1.types.RecognitionAudio(uri=uri)
response = self.client.recognize(config, audio)
result_str = ''
for result in response.results:
result_str += result.alternatives[0].transcript
print('Transcript: {}'.format(result.alternatives[0].
transcript))
return result_str
except Exception as e:
try:
config = speech1.types.RecognitionConfig(encoding=
speech1.enums.RecognitionConfig.AudioEncoding.
LINEAR16, language_code='en-US')
audio = speech1.types.RecognitionAudio(uri=uri)
response = self.client.recognize(config, audio)
result_str = ''
for result in response.results:
result_str += result.alternatives[0].transcript
print('Transcript: {}'.format(result.alternatives[0
].transcript))
return result_str
except Exception as e2:
try:
result_str = self.transcribe_long_file(uri)
return result_str
except Exception as e3:
print(e3)
elif uri.endswith('.flac'):
try:
config = speech1.types.RecognitionConfig(encoding=speech1.
enums.RecognitionConfig.AudioEncoding.FLAC,
language_code='en-US')
audio = speech1.types.RecognitionAudio(uri=uri)
response = self.client.recognize(config, audio)
result_str = ''
for result in response.results:
result_str += result.alternatives[0].transcript
print('Transcript: {}'.format(result.alternatives[0].
transcript))
return result_str
except Exception as e:
print(e)
else:
return 'Please use .wav or .flac audio files'
def transcribe_long_file(self, uri):
config = speech1.types.RecognitionConfig(encoding=speech2.enums.
RecognitionConfig.AudioEncoding.LINEAR16, language_code='en-US')
audio = speech1.types.RecognitionAudio(uri=uri)
operation = self.client.long_running_recognize(config, audio)
print('Waiting for operation to complete')
response = operation.result(timeout=90)
result_str = ''
for result in response.results:
result_str += result.alternatives[0].transcript
return result_str
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Google_Cloud:
def __init__(self, text):
print(text)
self.client = language.LanguageServiceClient()
if isinstance(text, six.binary_type):
text = text.decode('utf-8')
self.document = types.Document(content=text.encode('utf-8'), type=
enums.Document.Type.PLAIN_TEXT)
def sentiment(self):
google_sentiment = self.client.analyze_sentiment(self.document
).document_sentiment
sent = {}
sent['sentiment'] = google_sentiment.score
sent['magnitude'] = google_sentiment.magnitude
return sent
def entities(self):
google_entities = self.client.analyze_entities(self.document).entities
entities = []
for entity in google_entities:
entities.append(entity.name.lower())
entities.sort()
return entities
<|reserved_special_token_0|>
def syntax(self):
"""Detects syntax in the text."""
tokens = self.client.analyze_syntax(self.document).tokens
pos_tag = ('UNKNOWN', 'ADJ', 'ADP', 'ADV', 'CONJ', 'DET', 'NOUN',
'NUM', 'PRON', 'PRT', 'PUNCT', 'VERB', 'X', 'AFFIX')
result = []
for token in tokens:
result.append(u'{}: {}'.format(pos_tag[token.part_of_speech.tag
], token.text.content))
return result
def categories(self):
"""Classifies content categories of the provided text."""
categories = self.client.classify_text(self.document).categories
result = []
for category in categories:
result.append(category.name)
return result
class Google_ST:
def __init__(self, file, rate):
self.audio_file = file
self.client = speech1.SpeechClient()
self.rate = rate
def printFields(self):
print(type(self.audio_file))
print(type(self.audio_file.read()))
def transcribe_file(self, uri):
if uri.endswith('.wav'):
try:
config = speech1.types.RecognitionConfig(encoding=speech1.
enums.RecognitionConfig.AudioEncoding.LINEAR16,
language_code='en-US', audio_channel_count=2,
enable_separate_recognition_per_channel=True)
audio = speech1.types.RecognitionAudio(uri=uri)
response = self.client.recognize(config, audio)
result_str = ''
for result in response.results:
result_str += result.alternatives[0].transcript
print('Transcript: {}'.format(result.alternatives[0].
transcript))
return result_str
except Exception as e:
try:
config = speech1.types.RecognitionConfig(encoding=
speech1.enums.RecognitionConfig.AudioEncoding.
LINEAR16, language_code='en-US')
audio = speech1.types.RecognitionAudio(uri=uri)
response = self.client.recognize(config, audio)
result_str = ''
for result in response.results:
result_str += result.alternatives[0].transcript
print('Transcript: {}'.format(result.alternatives[0
].transcript))
return result_str
except Exception as e2:
try:
result_str = self.transcribe_long_file(uri)
return result_str
except Exception as e3:
print(e3)
elif uri.endswith('.flac'):
try:
config = speech1.types.RecognitionConfig(encoding=speech1.
enums.RecognitionConfig.AudioEncoding.FLAC,
language_code='en-US')
audio = speech1.types.RecognitionAudio(uri=uri)
response = self.client.recognize(config, audio)
result_str = ''
for result in response.results:
result_str += result.alternatives[0].transcript
print('Transcript: {}'.format(result.alternatives[0].
transcript))
return result_str
except Exception as e:
print(e)
else:
return 'Please use .wav or .flac audio files'
def transcribe_long_file(self, uri):
config = speech1.types.RecognitionConfig(encoding=speech2.enums.
RecognitionConfig.AudioEncoding.LINEAR16, language_code='en-US')
audio = speech1.types.RecognitionAudio(uri=uri)
operation = self.client.long_running_recognize(config, audio)
print('Waiting for operation to complete')
response = operation.result(timeout=90)
result_str = ''
for result in response.results:
result_str += result.alternatives[0].transcript
return result_str
<|reserved_special_token_1|>
from __future__ import division
import re
import sys
import six
from six.moves import queue
import os
import io
from google.cloud import language
from google.cloud.language import enums
from google.cloud.language import types
from google.cloud import speech as speech1
from google.cloud.speech import enums as enums2
from google.cloud.speech import types as types2
from google.cloud import speech_v1p1beta1 as speech2
class Google_Cloud:
def __init__(self, text):
print(text)
self.client = language.LanguageServiceClient()
if isinstance(text, six.binary_type):
text = text.decode('utf-8')
self.document = types.Document(
content=text.encode('utf-8'),
type=enums.Document.Type.PLAIN_TEXT)
def sentiment(self):
google_sentiment = self.client.analyze_sentiment(self.document).document_sentiment
sent = {}
sent['sentiment'] = google_sentiment.score
sent['magnitude'] = google_sentiment.magnitude
return sent
def entities(self):
google_entities = self.client.analyze_entities(self.document).entities
entities = []
for entity in google_entities:
entities.append(entity.name.lower())
entities.sort()
return entities
def entity_sentiment(self):
# Detect and send native Python encoding to receive correct word offsets.
encoding = enums.EncodingType.UTF32
if sys.maxunicode == 65535:
encoding = enums.EncodingType.UTF16
result = self.client.analyze_entity_sentiment(self.document, encoding)
entities = {}
for entity in result.entities:
entity_str = ""
entity_str += 'Mentions: '
entity_str += (u'Name: "{}"'.format(entity.name))
name = entity.name
entities[name] = entity.sentiment
return entities
def syntax(self):
"""Detects syntax in the text."""
# Detects syntax in the document. You can also analyze HTML with:
# document.type == enums.Document.Type.HTML
tokens = self.client.analyze_syntax(self.document).tokens
# part-of-speech tags from enums.PartOfSpeech.Tag
pos_tag = ('UNKNOWN', 'ADJ', 'ADP', 'ADV', 'CONJ', 'DET', 'NOUN', 'NUM',
'PRON', 'PRT', 'PUNCT', 'VERB', 'X', 'AFFIX')
result = []
for token in tokens:
result.append((u'{}: {}'.format(pos_tag[token.part_of_speech.tag],
token.text.content)))
return result
def categories(self):
"""Classifies content categories of the provided text."""
categories = self.client.classify_text(self.document).categories
result = []
for category in categories:
result.append(category.name)
return result
class Google_ST:
def __init__(self, file, rate):
self.audio_file = file
self.client = speech1.SpeechClient()
self.rate = rate
def printFields(self):
print(type(self.audio_file))
print(type(self.audio_file.read()))
def transcribe_file(self, uri):
#with io.open(self.audio_file, 'rb') as audio_file:
# content = audio_file.read()
#print(type(content))
#audio = types2.RecognitionAudio(uri=uri)
if uri.endswith('.wav'):
try:
config = speech1.types.RecognitionConfig(
encoding=speech1.enums.RecognitionConfig.AudioEncoding.LINEAR16,
#sample_rate_hertz=self.rate,
language_code='en-US',
audio_channel_count=2,
enable_separate_recognition_per_channel=True
)
audio = speech1.types.RecognitionAudio(uri=uri)
response = self.client.recognize(config, audio)
result_str = ''
for result in response.results:
result_str += result.alternatives[0].transcript
print('Transcript: {}'.format(result.alternatives[0].transcript))
return result_str
except Exception as e:
try:
config = speech1.types.RecognitionConfig(
encoding=speech1.enums.RecognitionConfig.AudioEncoding.LINEAR16,
#sample_rate_hertz=self.rate,
language_code='en-US',
)
audio = speech1.types.RecognitionAudio(uri=uri)
response = self.client.recognize(config, audio)
result_str = ''
for result in response.results:
result_str += result.alternatives[0].transcript
print('Transcript: {}'.format(result.alternatives[0].transcript))
return result_str
except Exception as e2:
try:
result_str = self.transcribe_long_file(uri)
return result_str
except Exception as e3:
print(e3)
elif uri.endswith('.flac'):
try:
config = speech1.types.RecognitionConfig(
encoding=speech1.enums.RecognitionConfig.AudioEncoding.FLAC,
#sample_rate_hertz=self.rate,
language_code='en-US',
)
audio = speech1.types.RecognitionAudio(uri=uri)
response = self.client.recognize(config, audio)
result_str = ''
for result in response.results:
result_str += result.alternatives[0].transcript
print('Transcript: {}'.format(result.alternatives[0].transcript))
return result_str
except Exception as e:
print(e)
else:
return "Please use .wav or .flac audio files"
def transcribe_long_file(self, uri):
config = speech1.types.RecognitionConfig(
encoding=speech2.enums.RecognitionConfig.AudioEncoding.LINEAR16,
#sample_rate_hertz=self.rate,
language_code='en-US',
)
audio = speech1.types.RecognitionAudio(uri=uri)
operation = self.client.long_running_recognize(config, audio)
print('Waiting for operation to complete')
response = operation.result(timeout=90)
result_str = ''
for result in response.results:
result_str += result.alternatives[0].transcript
return result_str
|
flexible
|
{
"blob_id": "6868a8b5d36403f1417301acdca5f5dc9e45c682",
"index": 9849,
"step-1": "<mask token>\n\n\nclass Google_Cloud:\n <mask token>\n\n def sentiment(self):\n google_sentiment = self.client.analyze_sentiment(self.document\n ).document_sentiment\n sent = {}\n sent['sentiment'] = google_sentiment.score\n sent['magnitude'] = google_sentiment.magnitude\n return sent\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\nclass Google_ST:\n\n def __init__(self, file, rate):\n self.audio_file = file\n self.client = speech1.SpeechClient()\n self.rate = rate\n\n def printFields(self):\n print(type(self.audio_file))\n print(type(self.audio_file.read()))\n\n def transcribe_file(self, uri):\n if uri.endswith('.wav'):\n try:\n config = speech1.types.RecognitionConfig(encoding=speech1.\n enums.RecognitionConfig.AudioEncoding.LINEAR16,\n language_code='en-US', audio_channel_count=2,\n enable_separate_recognition_per_channel=True)\n audio = speech1.types.RecognitionAudio(uri=uri)\n response = self.client.recognize(config, audio)\n result_str = ''\n for result in response.results:\n result_str += result.alternatives[0].transcript\n print('Transcript: {}'.format(result.alternatives[0].\n transcript))\n return result_str\n except Exception as e:\n try:\n config = speech1.types.RecognitionConfig(encoding=\n speech1.enums.RecognitionConfig.AudioEncoding.\n LINEAR16, language_code='en-US')\n audio = speech1.types.RecognitionAudio(uri=uri)\n response = self.client.recognize(config, audio)\n result_str = ''\n for result in response.results:\n result_str += result.alternatives[0].transcript\n print('Transcript: {}'.format(result.alternatives[0\n ].transcript))\n return result_str\n except Exception as e2:\n try:\n result_str = self.transcribe_long_file(uri)\n return result_str\n except Exception as e3:\n print(e3)\n elif uri.endswith('.flac'):\n try:\n config = speech1.types.RecognitionConfig(encoding=speech1.\n enums.RecognitionConfig.AudioEncoding.FLAC,\n language_code='en-US')\n audio = speech1.types.RecognitionAudio(uri=uri)\n response = self.client.recognize(config, audio)\n result_str = ''\n for result in response.results:\n result_str += result.alternatives[0].transcript\n print('Transcript: {}'.format(result.alternatives[0].\n transcript))\n return result_str\n except Exception as e:\n print(e)\n else:\n return 'Please use .wav or .flac audio files'\n\n def transcribe_long_file(self, uri):\n config = speech1.types.RecognitionConfig(encoding=speech2.enums.\n RecognitionConfig.AudioEncoding.LINEAR16, language_code='en-US')\n audio = speech1.types.RecognitionAudio(uri=uri)\n operation = self.client.long_running_recognize(config, audio)\n print('Waiting for operation to complete')\n response = operation.result(timeout=90)\n result_str = ''\n for result in response.results:\n result_str += result.alternatives[0].transcript\n return result_str\n",
"step-2": "<mask token>\n\n\nclass Google_Cloud:\n <mask token>\n\n def sentiment(self):\n google_sentiment = self.client.analyze_sentiment(self.document\n ).document_sentiment\n sent = {}\n sent['sentiment'] = google_sentiment.score\n sent['magnitude'] = google_sentiment.magnitude\n return sent\n\n def entities(self):\n google_entities = self.client.analyze_entities(self.document).entities\n entities = []\n for entity in google_entities:\n entities.append(entity.name.lower())\n entities.sort()\n return entities\n <mask token>\n <mask token>\n <mask token>\n\n\nclass Google_ST:\n\n def __init__(self, file, rate):\n self.audio_file = file\n self.client = speech1.SpeechClient()\n self.rate = rate\n\n def printFields(self):\n print(type(self.audio_file))\n print(type(self.audio_file.read()))\n\n def transcribe_file(self, uri):\n if uri.endswith('.wav'):\n try:\n config = speech1.types.RecognitionConfig(encoding=speech1.\n enums.RecognitionConfig.AudioEncoding.LINEAR16,\n language_code='en-US', audio_channel_count=2,\n enable_separate_recognition_per_channel=True)\n audio = speech1.types.RecognitionAudio(uri=uri)\n response = self.client.recognize(config, audio)\n result_str = ''\n for result in response.results:\n result_str += result.alternatives[0].transcript\n print('Transcript: {}'.format(result.alternatives[0].\n transcript))\n return result_str\n except Exception as e:\n try:\n config = speech1.types.RecognitionConfig(encoding=\n speech1.enums.RecognitionConfig.AudioEncoding.\n LINEAR16, language_code='en-US')\n audio = speech1.types.RecognitionAudio(uri=uri)\n response = self.client.recognize(config, audio)\n result_str = ''\n for result in response.results:\n result_str += result.alternatives[0].transcript\n print('Transcript: {}'.format(result.alternatives[0\n ].transcript))\n return result_str\n except Exception as e2:\n try:\n result_str = self.transcribe_long_file(uri)\n return result_str\n except Exception as e3:\n print(e3)\n elif uri.endswith('.flac'):\n try:\n config = speech1.types.RecognitionConfig(encoding=speech1.\n enums.RecognitionConfig.AudioEncoding.FLAC,\n language_code='en-US')\n audio = speech1.types.RecognitionAudio(uri=uri)\n response = self.client.recognize(config, audio)\n result_str = ''\n for result in response.results:\n result_str += result.alternatives[0].transcript\n print('Transcript: {}'.format(result.alternatives[0].\n transcript))\n return result_str\n except Exception as e:\n print(e)\n else:\n return 'Please use .wav or .flac audio files'\n\n def transcribe_long_file(self, uri):\n config = speech1.types.RecognitionConfig(encoding=speech2.enums.\n RecognitionConfig.AudioEncoding.LINEAR16, language_code='en-US')\n audio = speech1.types.RecognitionAudio(uri=uri)\n operation = self.client.long_running_recognize(config, audio)\n print('Waiting for operation to complete')\n response = operation.result(timeout=90)\n result_str = ''\n for result in response.results:\n result_str += result.alternatives[0].transcript\n return result_str\n",
"step-3": "<mask token>\n\n\nclass Google_Cloud:\n <mask token>\n\n def sentiment(self):\n google_sentiment = self.client.analyze_sentiment(self.document\n ).document_sentiment\n sent = {}\n sent['sentiment'] = google_sentiment.score\n sent['magnitude'] = google_sentiment.magnitude\n return sent\n\n def entities(self):\n google_entities = self.client.analyze_entities(self.document).entities\n entities = []\n for entity in google_entities:\n entities.append(entity.name.lower())\n entities.sort()\n return entities\n <mask token>\n\n def syntax(self):\n \"\"\"Detects syntax in the text.\"\"\"\n tokens = self.client.analyze_syntax(self.document).tokens\n pos_tag = ('UNKNOWN', 'ADJ', 'ADP', 'ADV', 'CONJ', 'DET', 'NOUN',\n 'NUM', 'PRON', 'PRT', 'PUNCT', 'VERB', 'X', 'AFFIX')\n result = []\n for token in tokens:\n result.append(u'{}: {}'.format(pos_tag[token.part_of_speech.tag\n ], token.text.content))\n return result\n\n def categories(self):\n \"\"\"Classifies content categories of the provided text.\"\"\"\n categories = self.client.classify_text(self.document).categories\n result = []\n for category in categories:\n result.append(category.name)\n return result\n\n\nclass Google_ST:\n\n def __init__(self, file, rate):\n self.audio_file = file\n self.client = speech1.SpeechClient()\n self.rate = rate\n\n def printFields(self):\n print(type(self.audio_file))\n print(type(self.audio_file.read()))\n\n def transcribe_file(self, uri):\n if uri.endswith('.wav'):\n try:\n config = speech1.types.RecognitionConfig(encoding=speech1.\n enums.RecognitionConfig.AudioEncoding.LINEAR16,\n language_code='en-US', audio_channel_count=2,\n enable_separate_recognition_per_channel=True)\n audio = speech1.types.RecognitionAudio(uri=uri)\n response = self.client.recognize(config, audio)\n result_str = ''\n for result in response.results:\n result_str += result.alternatives[0].transcript\n print('Transcript: {}'.format(result.alternatives[0].\n transcript))\n return result_str\n except Exception as e:\n try:\n config = speech1.types.RecognitionConfig(encoding=\n speech1.enums.RecognitionConfig.AudioEncoding.\n LINEAR16, language_code='en-US')\n audio = speech1.types.RecognitionAudio(uri=uri)\n response = self.client.recognize(config, audio)\n result_str = ''\n for result in response.results:\n result_str += result.alternatives[0].transcript\n print('Transcript: {}'.format(result.alternatives[0\n ].transcript))\n return result_str\n except Exception as e2:\n try:\n result_str = self.transcribe_long_file(uri)\n return result_str\n except Exception as e3:\n print(e3)\n elif uri.endswith('.flac'):\n try:\n config = speech1.types.RecognitionConfig(encoding=speech1.\n enums.RecognitionConfig.AudioEncoding.FLAC,\n language_code='en-US')\n audio = speech1.types.RecognitionAudio(uri=uri)\n response = self.client.recognize(config, audio)\n result_str = ''\n for result in response.results:\n result_str += result.alternatives[0].transcript\n print('Transcript: {}'.format(result.alternatives[0].\n transcript))\n return result_str\n except Exception as e:\n print(e)\n else:\n return 'Please use .wav or .flac audio files'\n\n def transcribe_long_file(self, uri):\n config = speech1.types.RecognitionConfig(encoding=speech2.enums.\n RecognitionConfig.AudioEncoding.LINEAR16, language_code='en-US')\n audio = speech1.types.RecognitionAudio(uri=uri)\n operation = self.client.long_running_recognize(config, audio)\n print('Waiting for operation to complete')\n response = operation.result(timeout=90)\n result_str = ''\n for result in response.results:\n result_str += result.alternatives[0].transcript\n return result_str\n",
"step-4": "<mask token>\n\n\nclass Google_Cloud:\n\n def __init__(self, text):\n print(text)\n self.client = language.LanguageServiceClient()\n if isinstance(text, six.binary_type):\n text = text.decode('utf-8')\n self.document = types.Document(content=text.encode('utf-8'), type=\n enums.Document.Type.PLAIN_TEXT)\n\n def sentiment(self):\n google_sentiment = self.client.analyze_sentiment(self.document\n ).document_sentiment\n sent = {}\n sent['sentiment'] = google_sentiment.score\n sent['magnitude'] = google_sentiment.magnitude\n return sent\n\n def entities(self):\n google_entities = self.client.analyze_entities(self.document).entities\n entities = []\n for entity in google_entities:\n entities.append(entity.name.lower())\n entities.sort()\n return entities\n <mask token>\n\n def syntax(self):\n \"\"\"Detects syntax in the text.\"\"\"\n tokens = self.client.analyze_syntax(self.document).tokens\n pos_tag = ('UNKNOWN', 'ADJ', 'ADP', 'ADV', 'CONJ', 'DET', 'NOUN',\n 'NUM', 'PRON', 'PRT', 'PUNCT', 'VERB', 'X', 'AFFIX')\n result = []\n for token in tokens:\n result.append(u'{}: {}'.format(pos_tag[token.part_of_speech.tag\n ], token.text.content))\n return result\n\n def categories(self):\n \"\"\"Classifies content categories of the provided text.\"\"\"\n categories = self.client.classify_text(self.document).categories\n result = []\n for category in categories:\n result.append(category.name)\n return result\n\n\nclass Google_ST:\n\n def __init__(self, file, rate):\n self.audio_file = file\n self.client = speech1.SpeechClient()\n self.rate = rate\n\n def printFields(self):\n print(type(self.audio_file))\n print(type(self.audio_file.read()))\n\n def transcribe_file(self, uri):\n if uri.endswith('.wav'):\n try:\n config = speech1.types.RecognitionConfig(encoding=speech1.\n enums.RecognitionConfig.AudioEncoding.LINEAR16,\n language_code='en-US', audio_channel_count=2,\n enable_separate_recognition_per_channel=True)\n audio = speech1.types.RecognitionAudio(uri=uri)\n response = self.client.recognize(config, audio)\n result_str = ''\n for result in response.results:\n result_str += result.alternatives[0].transcript\n print('Transcript: {}'.format(result.alternatives[0].\n transcript))\n return result_str\n except Exception as e:\n try:\n config = speech1.types.RecognitionConfig(encoding=\n speech1.enums.RecognitionConfig.AudioEncoding.\n LINEAR16, language_code='en-US')\n audio = speech1.types.RecognitionAudio(uri=uri)\n response = self.client.recognize(config, audio)\n result_str = ''\n for result in response.results:\n result_str += result.alternatives[0].transcript\n print('Transcript: {}'.format(result.alternatives[0\n ].transcript))\n return result_str\n except Exception as e2:\n try:\n result_str = self.transcribe_long_file(uri)\n return result_str\n except Exception as e3:\n print(e3)\n elif uri.endswith('.flac'):\n try:\n config = speech1.types.RecognitionConfig(encoding=speech1.\n enums.RecognitionConfig.AudioEncoding.FLAC,\n language_code='en-US')\n audio = speech1.types.RecognitionAudio(uri=uri)\n response = self.client.recognize(config, audio)\n result_str = ''\n for result in response.results:\n result_str += result.alternatives[0].transcript\n print('Transcript: {}'.format(result.alternatives[0].\n transcript))\n return result_str\n except Exception as e:\n print(e)\n else:\n return 'Please use .wav or .flac audio files'\n\n def transcribe_long_file(self, uri):\n config = speech1.types.RecognitionConfig(encoding=speech2.enums.\n RecognitionConfig.AudioEncoding.LINEAR16, language_code='en-US')\n audio = speech1.types.RecognitionAudio(uri=uri)\n operation = self.client.long_running_recognize(config, audio)\n print('Waiting for operation to complete')\n response = operation.result(timeout=90)\n result_str = ''\n for result in response.results:\n result_str += result.alternatives[0].transcript\n return result_str\n",
"step-5": "from __future__ import division\n\nimport re\nimport sys\nimport six\nfrom six.moves import queue\nimport os\nimport io\nfrom google.cloud import language\nfrom google.cloud.language import enums\nfrom google.cloud.language import types\nfrom google.cloud import speech as speech1\nfrom google.cloud.speech import enums as enums2\nfrom google.cloud.speech import types as types2\nfrom google.cloud import speech_v1p1beta1 as speech2\n\n\nclass Google_Cloud:\n\n def __init__(self, text):\n print(text)\n self.client = language.LanguageServiceClient()\n\n if isinstance(text, six.binary_type):\n text = text.decode('utf-8')\n\n self.document = types.Document(\n content=text.encode('utf-8'),\n type=enums.Document.Type.PLAIN_TEXT)\n\n def sentiment(self):\n google_sentiment = self.client.analyze_sentiment(self.document).document_sentiment\n sent = {}\n sent['sentiment'] = google_sentiment.score\n sent['magnitude'] = google_sentiment.magnitude\n return sent\n \n def entities(self):\n google_entities = self.client.analyze_entities(self.document).entities\n \n entities = []\n for entity in google_entities:\n entities.append(entity.name.lower())\n\n entities.sort()\n return entities\n\n def entity_sentiment(self):\n # Detect and send native Python encoding to receive correct word offsets.\n encoding = enums.EncodingType.UTF32\n if sys.maxunicode == 65535:\n encoding = enums.EncodingType.UTF16\n\n result = self.client.analyze_entity_sentiment(self.document, encoding)\n\n entities = {}\n for entity in result.entities:\n entity_str = \"\"\n entity_str += 'Mentions: '\n entity_str += (u'Name: \"{}\"'.format(entity.name))\n name = entity.name\n entities[name] = entity.sentiment\n\n return entities\n\n def syntax(self):\n \"\"\"Detects syntax in the text.\"\"\"\n\n # Detects syntax in the document. You can also analyze HTML with:\n # document.type == enums.Document.Type.HTML\n tokens = self.client.analyze_syntax(self.document).tokens\n\n # part-of-speech tags from enums.PartOfSpeech.Tag\n pos_tag = ('UNKNOWN', 'ADJ', 'ADP', 'ADV', 'CONJ', 'DET', 'NOUN', 'NUM',\n 'PRON', 'PRT', 'PUNCT', 'VERB', 'X', 'AFFIX')\n\n result = []\n for token in tokens:\n result.append((u'{}: {}'.format(pos_tag[token.part_of_speech.tag],\n token.text.content)))\n \n return result\n\n def categories(self):\n \"\"\"Classifies content categories of the provided text.\"\"\"\n categories = self.client.classify_text(self.document).categories\n\n result = []\n for category in categories:\n result.append(category.name)\n\n return result\n \nclass Google_ST:\n def __init__(self, file, rate):\n self.audio_file = file\n self.client = speech1.SpeechClient()\n self.rate = rate\n\n def printFields(self):\n print(type(self.audio_file))\n print(type(self.audio_file.read()))\n\n def transcribe_file(self, uri):\n #with io.open(self.audio_file, 'rb') as audio_file:\n # content = audio_file.read()\n #print(type(content))\n #audio = types2.RecognitionAudio(uri=uri)\n\n if uri.endswith('.wav'):\n try:\n config = speech1.types.RecognitionConfig(\n encoding=speech1.enums.RecognitionConfig.AudioEncoding.LINEAR16,\n #sample_rate_hertz=self.rate,\n language_code='en-US',\n audio_channel_count=2,\n enable_separate_recognition_per_channel=True\n )\n audio = speech1.types.RecognitionAudio(uri=uri)\n \n response = self.client.recognize(config, audio)\n result_str = ''\n for result in response.results:\n result_str += result.alternatives[0].transcript\n print('Transcript: {}'.format(result.alternatives[0].transcript))\n\n return result_str\n\n except Exception as e:\n try:\n config = speech1.types.RecognitionConfig(\n encoding=speech1.enums.RecognitionConfig.AudioEncoding.LINEAR16,\n #sample_rate_hertz=self.rate,\n language_code='en-US',\n )\n audio = speech1.types.RecognitionAudio(uri=uri)\n \n response = self.client.recognize(config, audio)\n result_str = ''\n for result in response.results:\n result_str += result.alternatives[0].transcript\n print('Transcript: {}'.format(result.alternatives[0].transcript))\n\n return result_str\n\n except Exception as e2:\n try:\n result_str = self.transcribe_long_file(uri)\n return result_str\n except Exception as e3:\n print(e3)\n\n elif uri.endswith('.flac'):\n try:\n config = speech1.types.RecognitionConfig(\n encoding=speech1.enums.RecognitionConfig.AudioEncoding.FLAC,\n #sample_rate_hertz=self.rate,\n language_code='en-US',\n )\n audio = speech1.types.RecognitionAudio(uri=uri)\n \n response = self.client.recognize(config, audio)\n result_str = ''\n for result in response.results:\n result_str += result.alternatives[0].transcript\n print('Transcript: {}'.format(result.alternatives[0].transcript))\n\n return result_str \n except Exception as e:\n print(e)\n\n else:\n return \"Please use .wav or .flac audio files\"\n\n \n def transcribe_long_file(self, uri):\n config = speech1.types.RecognitionConfig(\n encoding=speech2.enums.RecognitionConfig.AudioEncoding.LINEAR16,\n #sample_rate_hertz=self.rate,\n language_code='en-US',\n )\n audio = speech1.types.RecognitionAudio(uri=uri)\n \n operation = self.client.long_running_recognize(config, audio)\n print('Waiting for operation to complete')\n response = operation.result(timeout=90)\n\n result_str = ''\n for result in response.results:\n result_str += result.alternatives[0].transcript\n \n return result_str\n\n \n",
"step-ids": [
7,
8,
10,
11,
14
]
}
|
[
7,
8,
10,
11,
14
] |
import string
import pandas as pd
import nltk
from nltk import word_tokenize
from nltk.stem import SnowballStemmer
from nltk.tokenize import WordPunctTokenizer
import json
from sklearn.model_selection import train_test_split
from keras.preprocessing.text import Tokenizer
import pickle
import re
import nlpaug.augmenter.word as naw
import nlpaug.flow as naf
class Processing:
def __init__(self, stopwords_path='data/', tokenizer_path='models/', max_len=80):
# It needs a stopwords file to init
stop_words = pd.read_csv(stopwords_path + 'stopwords-es.txt', header=None)
stop_words = stop_words[0].tolist() + ['secuela']
self.stop_words = stop_words
self.n_words = 8000
self.max_len = max_len
# self.aug = naf.Sequential([
# naw.ContextualWordEmbsAug(model_path='bert-base-multilingual-cased', action="insert", aug_p=0.1),
# naw.ContextualWordEmbsAug(model_path='bert-base-multilingual-cased', action="substitute", aug_p=0.9),
# naw.RandomWordAug(action="delete", aug_p=0.1)
# ])
try:
self.stemmer = SnowballStemmer("spanish", ignore_stopwords=True)
except:
nltk.download("popular")
self.stemmer = SnowballStemmer("spanish", ignore_stopwords=True)
# loading
with open(tokenizer_path + 'tokenizer.pickle', 'rb') as handle:
self.tokenizer = pickle.load(handle)
self.__vocab_size = len(self.tokenizer.word_index) + 1
@property
def vocab_size(self):
return self.__vocab_size
def normalize(self, s):
s = s.lower()
replacements = (
("á", "a"),
("é", "e"),
("í", "i"),
("ó", "o"),
("ú", "u"),
("ñ", "n")
)
for a, b in replacements:
s = s.replace(a, b).replace(a.upper(), b.upper())
return s
def split_punt(self, x):
words = WordPunctTokenizer().tokenize(x)
x = str(' '.join(words))
x = re.sub(' +', ' ', x)
return x
def delete_stop_words(self, x):
x = x.translate(str.maketrans('', '', string.punctuation))
x = x.translate(str.maketrans('', '', '1234567890ªº¡¿'))
words = x.split(' ')
words = [word for word in words if word not in self.stop_words]
x = str(' '.join(words))
return x
def stem_sentence(self, sentence):
# Stem the sentence
stemmed_text = [self.stemmer.stem(word) for word in word_tokenize(sentence)]
return " ".join(stemmed_text)
def augment(self, x):
try:
return self.aug.augment(x)
except:
return None
def clean_overview(self, df):
# Execute the full cleaning process into every overview
df['overview'] = df['overview'].apply(lambda x: self.normalize(x))
df['overview'] = df['overview'].apply(lambda x: self.delete_stop_words(x))
df['overview'] = df['overview'].apply(lambda x: self.stem_sentence(x))
df['overview'] = df.apply(lambda x: self.get_actors(x['cast']) + ' ' + x['overview'], axis=1)
df['overview'] = df.apply(lambda x: self.get_director(x['crew']) + x['overview'], axis=1)
df['overview'] = df['overview'].apply(lambda x: self.normalize(x))
df['overview'] = df['overview'].apply(lambda x: self.delete_stop_words(x))
return df
# Get staff and paste to overview
@staticmethod
def eval_cell(cell):
try:
cell_array = eval(cell)
except:
cell_array = []
return cell_array
def get_actors(self, cast):
eval_cast = self.eval_cell(cast)
if len(eval_cast) > 2:
up = 3
else:
up = len(eval_cast)
actors = ''
for i in range(0, up):
actor = eval_cast[i]['name']
actor = self.normalize(actor.replace(' ', '_').lower())
actors = actors + ' ' + actor
return actors
def get_director(self, crew):
eval_crew = self.eval_cell(crew)
directors = [member['name'] for member in eval_crew if member['job'] == 'Director']
directors = [self.normalize(director.replace(' ', '_').lower()) for director in directors]
directors = str(' '.join(directors))
return directors
def paste_cast(self, data):
data['overview'] = data.apply(lambda x: self.get_actors(x['cast']) + ' ' + x['overview'], axis=1)
data['overview'] = data.apply(lambda x: self.get_director(x['crew']) + x['overview'], axis=1)
return data
# Split train_test
def split_data(self, data):
overviews = data['overview'].values
y = data['like'].values
overviews_train, overviews_test, y_train, y_test = train_test_split(overviews, y, test_size=0.15, stratify=y,
random_state=9)
return overviews_train, overviews_test, y_train, y_test
def fit_tokenizer(self, overviews_train, num_words):
self.tokenizer = Tokenizer(num_words)
self.tokenizer.fit_on_texts(overviews_train)
# Adding 1 because of reserved 0 index
self.vocab_size = len(self.tokenizer.word_index) + 1
def tokenize_overview(self, overviews, max_len):
X = self.tokenizer.texts_to_sequences(overviews)
# print(len(max(X, key=len)))
from keras.preprocessing.sequence import pad_sequences
# We pad the sentence for the left to fit with max_len
X = pad_sequences(X, padding='pre', maxlen=max_len)
# print(X[1])
return X
def process(self, data, train_dev):
df = self.clean_overview(data)
df = self.paste_cast(df)
if train_dev:
X_train, X_test, y_train, y_test = self.split_data(df)
self.fit_tokenizer(X_train, self.n_words)
X_train = self.tokenize_overview(X_train, self.max_len)
X_test = self.tokenize_overview(X_test, self.max_len)
return X_train, X_test
else:
X = df['overview'].values
X = self.tokenize_overview(X, self.max_len)
return X
|
normal
|
{
"blob_id": "326b2dcbef339aeb196bef23debad75fa079b121",
"index": 6435,
"step-1": "<mask token>\n\n\nclass Processing:\n <mask token>\n\n @property\n def vocab_size(self):\n return self.__vocab_size\n\n def normalize(self, s):\n s = s.lower()\n replacements = ('á', 'a'), ('é', 'e'), ('í', 'i'), ('ó', 'o'), ('ú',\n 'u'), ('ñ', 'n')\n for a, b in replacements:\n s = s.replace(a, b).replace(a.upper(), b.upper())\n return s\n\n def split_punt(self, x):\n words = WordPunctTokenizer().tokenize(x)\n x = str(' '.join(words))\n x = re.sub(' +', ' ', x)\n return x\n\n def delete_stop_words(self, x):\n x = x.translate(str.maketrans('', '', string.punctuation))\n x = x.translate(str.maketrans('', '', '1234567890ªº¡¿'))\n words = x.split(' ')\n words = [word for word in words if word not in self.stop_words]\n x = str(' '.join(words))\n return x\n\n def stem_sentence(self, sentence):\n stemmed_text = [self.stemmer.stem(word) for word in word_tokenize(\n sentence)]\n return ' '.join(stemmed_text)\n <mask token>\n <mask token>\n\n @staticmethod\n def eval_cell(cell):\n try:\n cell_array = eval(cell)\n except:\n cell_array = []\n return cell_array\n\n def get_actors(self, cast):\n eval_cast = self.eval_cell(cast)\n if len(eval_cast) > 2:\n up = 3\n else:\n up = len(eval_cast)\n actors = ''\n for i in range(0, up):\n actor = eval_cast[i]['name']\n actor = self.normalize(actor.replace(' ', '_').lower())\n actors = actors + ' ' + actor\n return actors\n <mask token>\n <mask token>\n\n def split_data(self, data):\n overviews = data['overview'].values\n y = data['like'].values\n overviews_train, overviews_test, y_train, y_test = train_test_split(\n overviews, y, test_size=0.15, stratify=y, random_state=9)\n return overviews_train, overviews_test, y_train, y_test\n\n def fit_tokenizer(self, overviews_train, num_words):\n self.tokenizer = Tokenizer(num_words)\n self.tokenizer.fit_on_texts(overviews_train)\n self.vocab_size = len(self.tokenizer.word_index) + 1\n\n def tokenize_overview(self, overviews, max_len):\n X = self.tokenizer.texts_to_sequences(overviews)\n from keras.preprocessing.sequence import pad_sequences\n X = pad_sequences(X, padding='pre', maxlen=max_len)\n return X\n\n def process(self, data, train_dev):\n df = self.clean_overview(data)\n df = self.paste_cast(df)\n if train_dev:\n X_train, X_test, y_train, y_test = self.split_data(df)\n self.fit_tokenizer(X_train, self.n_words)\n X_train = self.tokenize_overview(X_train, self.max_len)\n X_test = self.tokenize_overview(X_test, self.max_len)\n return X_train, X_test\n else:\n X = df['overview'].values\n X = self.tokenize_overview(X, self.max_len)\n return X\n",
"step-2": "<mask token>\n\n\nclass Processing:\n\n def __init__(self, stopwords_path='data/', tokenizer_path='models/',\n max_len=80):\n stop_words = pd.read_csv(stopwords_path + 'stopwords-es.txt',\n header=None)\n stop_words = stop_words[0].tolist() + ['secuela']\n self.stop_words = stop_words\n self.n_words = 8000\n self.max_len = max_len\n try:\n self.stemmer = SnowballStemmer('spanish', ignore_stopwords=True)\n except:\n nltk.download('popular')\n self.stemmer = SnowballStemmer('spanish', ignore_stopwords=True)\n with open(tokenizer_path + 'tokenizer.pickle', 'rb') as handle:\n self.tokenizer = pickle.load(handle)\n self.__vocab_size = len(self.tokenizer.word_index) + 1\n\n @property\n def vocab_size(self):\n return self.__vocab_size\n\n def normalize(self, s):\n s = s.lower()\n replacements = ('á', 'a'), ('é', 'e'), ('í', 'i'), ('ó', 'o'), ('ú',\n 'u'), ('ñ', 'n')\n for a, b in replacements:\n s = s.replace(a, b).replace(a.upper(), b.upper())\n return s\n\n def split_punt(self, x):\n words = WordPunctTokenizer().tokenize(x)\n x = str(' '.join(words))\n x = re.sub(' +', ' ', x)\n return x\n\n def delete_stop_words(self, x):\n x = x.translate(str.maketrans('', '', string.punctuation))\n x = x.translate(str.maketrans('', '', '1234567890ªº¡¿'))\n words = x.split(' ')\n words = [word for word in words if word not in self.stop_words]\n x = str(' '.join(words))\n return x\n\n def stem_sentence(self, sentence):\n stemmed_text = [self.stemmer.stem(word) for word in word_tokenize(\n sentence)]\n return ' '.join(stemmed_text)\n <mask token>\n\n def clean_overview(self, df):\n df['overview'] = df['overview'].apply(lambda x: self.normalize(x))\n df['overview'] = df['overview'].apply(lambda x: self.\n delete_stop_words(x))\n df['overview'] = df['overview'].apply(lambda x: self.stem_sentence(x))\n df['overview'] = df.apply(lambda x: self.get_actors(x['cast']) +\n ' ' + x['overview'], axis=1)\n df['overview'] = df.apply(lambda x: self.get_director(x['crew']) +\n x['overview'], axis=1)\n df['overview'] = df['overview'].apply(lambda x: self.normalize(x))\n df['overview'] = df['overview'].apply(lambda x: self.\n delete_stop_words(x))\n return df\n\n @staticmethod\n def eval_cell(cell):\n try:\n cell_array = eval(cell)\n except:\n cell_array = []\n return cell_array\n\n def get_actors(self, cast):\n eval_cast = self.eval_cell(cast)\n if len(eval_cast) > 2:\n up = 3\n else:\n up = len(eval_cast)\n actors = ''\n for i in range(0, up):\n actor = eval_cast[i]['name']\n actor = self.normalize(actor.replace(' ', '_').lower())\n actors = actors + ' ' + actor\n return actors\n\n def get_director(self, crew):\n eval_crew = self.eval_cell(crew)\n directors = [member['name'] for member in eval_crew if member['job'\n ] == 'Director']\n directors = [self.normalize(director.replace(' ', '_').lower()) for\n director in directors]\n directors = str(' '.join(directors))\n return directors\n\n def paste_cast(self, data):\n data['overview'] = data.apply(lambda x: self.get_actors(x['cast']) +\n ' ' + x['overview'], axis=1)\n data['overview'] = data.apply(lambda x: self.get_director(x['crew']\n ) + x['overview'], axis=1)\n return data\n\n def split_data(self, data):\n overviews = data['overview'].values\n y = data['like'].values\n overviews_train, overviews_test, y_train, y_test = train_test_split(\n overviews, y, test_size=0.15, stratify=y, random_state=9)\n return overviews_train, overviews_test, y_train, y_test\n\n def fit_tokenizer(self, overviews_train, num_words):\n self.tokenizer = Tokenizer(num_words)\n self.tokenizer.fit_on_texts(overviews_train)\n self.vocab_size = len(self.tokenizer.word_index) + 1\n\n def tokenize_overview(self, overviews, max_len):\n X = self.tokenizer.texts_to_sequences(overviews)\n from keras.preprocessing.sequence import pad_sequences\n X = pad_sequences(X, padding='pre', maxlen=max_len)\n return X\n\n def process(self, data, train_dev):\n df = self.clean_overview(data)\n df = self.paste_cast(df)\n if train_dev:\n X_train, X_test, y_train, y_test = self.split_data(df)\n self.fit_tokenizer(X_train, self.n_words)\n X_train = self.tokenize_overview(X_train, self.max_len)\n X_test = self.tokenize_overview(X_test, self.max_len)\n return X_train, X_test\n else:\n X = df['overview'].values\n X = self.tokenize_overview(X, self.max_len)\n return X\n",
"step-3": "<mask token>\n\n\nclass Processing:\n\n def __init__(self, stopwords_path='data/', tokenizer_path='models/',\n max_len=80):\n stop_words = pd.read_csv(stopwords_path + 'stopwords-es.txt',\n header=None)\n stop_words = stop_words[0].tolist() + ['secuela']\n self.stop_words = stop_words\n self.n_words = 8000\n self.max_len = max_len\n try:\n self.stemmer = SnowballStemmer('spanish', ignore_stopwords=True)\n except:\n nltk.download('popular')\n self.stemmer = SnowballStemmer('spanish', ignore_stopwords=True)\n with open(tokenizer_path + 'tokenizer.pickle', 'rb') as handle:\n self.tokenizer = pickle.load(handle)\n self.__vocab_size = len(self.tokenizer.word_index) + 1\n\n @property\n def vocab_size(self):\n return self.__vocab_size\n\n def normalize(self, s):\n s = s.lower()\n replacements = ('á', 'a'), ('é', 'e'), ('í', 'i'), ('ó', 'o'), ('ú',\n 'u'), ('ñ', 'n')\n for a, b in replacements:\n s = s.replace(a, b).replace(a.upper(), b.upper())\n return s\n\n def split_punt(self, x):\n words = WordPunctTokenizer().tokenize(x)\n x = str(' '.join(words))\n x = re.sub(' +', ' ', x)\n return x\n\n def delete_stop_words(self, x):\n x = x.translate(str.maketrans('', '', string.punctuation))\n x = x.translate(str.maketrans('', '', '1234567890ªº¡¿'))\n words = x.split(' ')\n words = [word for word in words if word not in self.stop_words]\n x = str(' '.join(words))\n return x\n\n def stem_sentence(self, sentence):\n stemmed_text = [self.stemmer.stem(word) for word in word_tokenize(\n sentence)]\n return ' '.join(stemmed_text)\n\n def augment(self, x):\n try:\n return self.aug.augment(x)\n except:\n return None\n\n def clean_overview(self, df):\n df['overview'] = df['overview'].apply(lambda x: self.normalize(x))\n df['overview'] = df['overview'].apply(lambda x: self.\n delete_stop_words(x))\n df['overview'] = df['overview'].apply(lambda x: self.stem_sentence(x))\n df['overview'] = df.apply(lambda x: self.get_actors(x['cast']) +\n ' ' + x['overview'], axis=1)\n df['overview'] = df.apply(lambda x: self.get_director(x['crew']) +\n x['overview'], axis=1)\n df['overview'] = df['overview'].apply(lambda x: self.normalize(x))\n df['overview'] = df['overview'].apply(lambda x: self.\n delete_stop_words(x))\n return df\n\n @staticmethod\n def eval_cell(cell):\n try:\n cell_array = eval(cell)\n except:\n cell_array = []\n return cell_array\n\n def get_actors(self, cast):\n eval_cast = self.eval_cell(cast)\n if len(eval_cast) > 2:\n up = 3\n else:\n up = len(eval_cast)\n actors = ''\n for i in range(0, up):\n actor = eval_cast[i]['name']\n actor = self.normalize(actor.replace(' ', '_').lower())\n actors = actors + ' ' + actor\n return actors\n\n def get_director(self, crew):\n eval_crew = self.eval_cell(crew)\n directors = [member['name'] for member in eval_crew if member['job'\n ] == 'Director']\n directors = [self.normalize(director.replace(' ', '_').lower()) for\n director in directors]\n directors = str(' '.join(directors))\n return directors\n\n def paste_cast(self, data):\n data['overview'] = data.apply(lambda x: self.get_actors(x['cast']) +\n ' ' + x['overview'], axis=1)\n data['overview'] = data.apply(lambda x: self.get_director(x['crew']\n ) + x['overview'], axis=1)\n return data\n\n def split_data(self, data):\n overviews = data['overview'].values\n y = data['like'].values\n overviews_train, overviews_test, y_train, y_test = train_test_split(\n overviews, y, test_size=0.15, stratify=y, random_state=9)\n return overviews_train, overviews_test, y_train, y_test\n\n def fit_tokenizer(self, overviews_train, num_words):\n self.tokenizer = Tokenizer(num_words)\n self.tokenizer.fit_on_texts(overviews_train)\n self.vocab_size = len(self.tokenizer.word_index) + 1\n\n def tokenize_overview(self, overviews, max_len):\n X = self.tokenizer.texts_to_sequences(overviews)\n from keras.preprocessing.sequence import pad_sequences\n X = pad_sequences(X, padding='pre', maxlen=max_len)\n return X\n\n def process(self, data, train_dev):\n df = self.clean_overview(data)\n df = self.paste_cast(df)\n if train_dev:\n X_train, X_test, y_train, y_test = self.split_data(df)\n self.fit_tokenizer(X_train, self.n_words)\n X_train = self.tokenize_overview(X_train, self.max_len)\n X_test = self.tokenize_overview(X_test, self.max_len)\n return X_train, X_test\n else:\n X = df['overview'].values\n X = self.tokenize_overview(X, self.max_len)\n return X\n",
"step-4": "import string\nimport pandas as pd\nimport nltk\nfrom nltk import word_tokenize\nfrom nltk.stem import SnowballStemmer\nfrom nltk.tokenize import WordPunctTokenizer\nimport json\nfrom sklearn.model_selection import train_test_split\nfrom keras.preprocessing.text import Tokenizer\nimport pickle\nimport re\nimport nlpaug.augmenter.word as naw\nimport nlpaug.flow as naf\n\n\nclass Processing:\n\n def __init__(self, stopwords_path='data/', tokenizer_path='models/',\n max_len=80):\n stop_words = pd.read_csv(stopwords_path + 'stopwords-es.txt',\n header=None)\n stop_words = stop_words[0].tolist() + ['secuela']\n self.stop_words = stop_words\n self.n_words = 8000\n self.max_len = max_len\n try:\n self.stemmer = SnowballStemmer('spanish', ignore_stopwords=True)\n except:\n nltk.download('popular')\n self.stemmer = SnowballStemmer('spanish', ignore_stopwords=True)\n with open(tokenizer_path + 'tokenizer.pickle', 'rb') as handle:\n self.tokenizer = pickle.load(handle)\n self.__vocab_size = len(self.tokenizer.word_index) + 1\n\n @property\n def vocab_size(self):\n return self.__vocab_size\n\n def normalize(self, s):\n s = s.lower()\n replacements = ('á', 'a'), ('é', 'e'), ('í', 'i'), ('ó', 'o'), ('ú',\n 'u'), ('ñ', 'n')\n for a, b in replacements:\n s = s.replace(a, b).replace(a.upper(), b.upper())\n return s\n\n def split_punt(self, x):\n words = WordPunctTokenizer().tokenize(x)\n x = str(' '.join(words))\n x = re.sub(' +', ' ', x)\n return x\n\n def delete_stop_words(self, x):\n x = x.translate(str.maketrans('', '', string.punctuation))\n x = x.translate(str.maketrans('', '', '1234567890ªº¡¿'))\n words = x.split(' ')\n words = [word for word in words if word not in self.stop_words]\n x = str(' '.join(words))\n return x\n\n def stem_sentence(self, sentence):\n stemmed_text = [self.stemmer.stem(word) for word in word_tokenize(\n sentence)]\n return ' '.join(stemmed_text)\n\n def augment(self, x):\n try:\n return self.aug.augment(x)\n except:\n return None\n\n def clean_overview(self, df):\n df['overview'] = df['overview'].apply(lambda x: self.normalize(x))\n df['overview'] = df['overview'].apply(lambda x: self.\n delete_stop_words(x))\n df['overview'] = df['overview'].apply(lambda x: self.stem_sentence(x))\n df['overview'] = df.apply(lambda x: self.get_actors(x['cast']) +\n ' ' + x['overview'], axis=1)\n df['overview'] = df.apply(lambda x: self.get_director(x['crew']) +\n x['overview'], axis=1)\n df['overview'] = df['overview'].apply(lambda x: self.normalize(x))\n df['overview'] = df['overview'].apply(lambda x: self.\n delete_stop_words(x))\n return df\n\n @staticmethod\n def eval_cell(cell):\n try:\n cell_array = eval(cell)\n except:\n cell_array = []\n return cell_array\n\n def get_actors(self, cast):\n eval_cast = self.eval_cell(cast)\n if len(eval_cast) > 2:\n up = 3\n else:\n up = len(eval_cast)\n actors = ''\n for i in range(0, up):\n actor = eval_cast[i]['name']\n actor = self.normalize(actor.replace(' ', '_').lower())\n actors = actors + ' ' + actor\n return actors\n\n def get_director(self, crew):\n eval_crew = self.eval_cell(crew)\n directors = [member['name'] for member in eval_crew if member['job'\n ] == 'Director']\n directors = [self.normalize(director.replace(' ', '_').lower()) for\n director in directors]\n directors = str(' '.join(directors))\n return directors\n\n def paste_cast(self, data):\n data['overview'] = data.apply(lambda x: self.get_actors(x['cast']) +\n ' ' + x['overview'], axis=1)\n data['overview'] = data.apply(lambda x: self.get_director(x['crew']\n ) + x['overview'], axis=1)\n return data\n\n def split_data(self, data):\n overviews = data['overview'].values\n y = data['like'].values\n overviews_train, overviews_test, y_train, y_test = train_test_split(\n overviews, y, test_size=0.15, stratify=y, random_state=9)\n return overviews_train, overviews_test, y_train, y_test\n\n def fit_tokenizer(self, overviews_train, num_words):\n self.tokenizer = Tokenizer(num_words)\n self.tokenizer.fit_on_texts(overviews_train)\n self.vocab_size = len(self.tokenizer.word_index) + 1\n\n def tokenize_overview(self, overviews, max_len):\n X = self.tokenizer.texts_to_sequences(overviews)\n from keras.preprocessing.sequence import pad_sequences\n X = pad_sequences(X, padding='pre', maxlen=max_len)\n return X\n\n def process(self, data, train_dev):\n df = self.clean_overview(data)\n df = self.paste_cast(df)\n if train_dev:\n X_train, X_test, y_train, y_test = self.split_data(df)\n self.fit_tokenizer(X_train, self.n_words)\n X_train = self.tokenize_overview(X_train, self.max_len)\n X_test = self.tokenize_overview(X_test, self.max_len)\n return X_train, X_test\n else:\n X = df['overview'].values\n X = self.tokenize_overview(X, self.max_len)\n return X\n",
"step-5": "import string\nimport pandas as pd\nimport nltk\nfrom nltk import word_tokenize\nfrom nltk.stem import SnowballStemmer\nfrom nltk.tokenize import WordPunctTokenizer\nimport json\nfrom sklearn.model_selection import train_test_split\nfrom keras.preprocessing.text import Tokenizer\nimport pickle\nimport re\nimport nlpaug.augmenter.word as naw\nimport nlpaug.flow as naf\n\n\nclass Processing:\n\n def __init__(self, stopwords_path='data/', tokenizer_path='models/', max_len=80):\n # It needs a stopwords file to init\n stop_words = pd.read_csv(stopwords_path + 'stopwords-es.txt', header=None)\n stop_words = stop_words[0].tolist() + ['secuela']\n self.stop_words = stop_words\n self.n_words = 8000\n self.max_len = max_len\n # self.aug = naf.Sequential([\n # naw.ContextualWordEmbsAug(model_path='bert-base-multilingual-cased', action=\"insert\", aug_p=0.1),\n # naw.ContextualWordEmbsAug(model_path='bert-base-multilingual-cased', action=\"substitute\", aug_p=0.9),\n # naw.RandomWordAug(action=\"delete\", aug_p=0.1)\n # ])\n\n try:\n self.stemmer = SnowballStemmer(\"spanish\", ignore_stopwords=True)\n except:\n nltk.download(\"popular\")\n self.stemmer = SnowballStemmer(\"spanish\", ignore_stopwords=True)\n\n # loading\n with open(tokenizer_path + 'tokenizer.pickle', 'rb') as handle:\n self.tokenizer = pickle.load(handle)\n self.__vocab_size = len(self.tokenizer.word_index) + 1\n\n @property\n def vocab_size(self):\n return self.__vocab_size\n\n def normalize(self, s):\n s = s.lower()\n replacements = (\n (\"á\", \"a\"),\n (\"é\", \"e\"),\n (\"í\", \"i\"),\n (\"ó\", \"o\"),\n (\"ú\", \"u\"),\n (\"ñ\", \"n\")\n )\n for a, b in replacements:\n s = s.replace(a, b).replace(a.upper(), b.upper())\n\n return s\n\n def split_punt(self, x):\n words = WordPunctTokenizer().tokenize(x)\n x = str(' '.join(words))\n x = re.sub(' +', ' ', x)\n\n return x\n\n def delete_stop_words(self, x):\n x = x.translate(str.maketrans('', '', string.punctuation))\n x = x.translate(str.maketrans('', '', '1234567890ªº¡¿'))\n words = x.split(' ')\n words = [word for word in words if word not in self.stop_words]\n x = str(' '.join(words))\n\n return x\n\n def stem_sentence(self, sentence):\n # Stem the sentence\n stemmed_text = [self.stemmer.stem(word) for word in word_tokenize(sentence)]\n\n return \" \".join(stemmed_text)\n\n def augment(self, x):\n try:\n return self.aug.augment(x)\n except:\n return None\n\n def clean_overview(self, df):\n # Execute the full cleaning process into every overview\n df['overview'] = df['overview'].apply(lambda x: self.normalize(x))\n df['overview'] = df['overview'].apply(lambda x: self.delete_stop_words(x))\n df['overview'] = df['overview'].apply(lambda x: self.stem_sentence(x))\n df['overview'] = df.apply(lambda x: self.get_actors(x['cast']) + ' ' + x['overview'], axis=1)\n df['overview'] = df.apply(lambda x: self.get_director(x['crew']) + x['overview'], axis=1)\n df['overview'] = df['overview'].apply(lambda x: self.normalize(x))\n df['overview'] = df['overview'].apply(lambda x: self.delete_stop_words(x))\n\n return df\n\n # Get staff and paste to overview\n @staticmethod\n def eval_cell(cell):\n\n try:\n\n cell_array = eval(cell)\n\n except:\n\n cell_array = []\n\n return cell_array\n\n def get_actors(self, cast):\n\n eval_cast = self.eval_cell(cast)\n\n if len(eval_cast) > 2:\n up = 3\n else:\n up = len(eval_cast)\n\n actors = ''\n\n for i in range(0, up):\n actor = eval_cast[i]['name']\n actor = self.normalize(actor.replace(' ', '_').lower())\n\n actors = actors + ' ' + actor\n\n return actors\n\n def get_director(self, crew):\n\n eval_crew = self.eval_cell(crew)\n\n directors = [member['name'] for member in eval_crew if member['job'] == 'Director']\n directors = [self.normalize(director.replace(' ', '_').lower()) for director in directors]\n directors = str(' '.join(directors))\n\n return directors\n\n def paste_cast(self, data):\n\n data['overview'] = data.apply(lambda x: self.get_actors(x['cast']) + ' ' + x['overview'], axis=1)\n data['overview'] = data.apply(lambda x: self.get_director(x['crew']) + x['overview'], axis=1)\n\n return data\n\n # Split train_test\n def split_data(self, data):\n\n overviews = data['overview'].values\n y = data['like'].values\n\n overviews_train, overviews_test, y_train, y_test = train_test_split(overviews, y, test_size=0.15, stratify=y,\n random_state=9)\n\n return overviews_train, overviews_test, y_train, y_test\n\n def fit_tokenizer(self, overviews_train, num_words):\n self.tokenizer = Tokenizer(num_words)\n self.tokenizer.fit_on_texts(overviews_train)\n # Adding 1 because of reserved 0 index\n self.vocab_size = len(self.tokenizer.word_index) + 1\n\n def tokenize_overview(self, overviews, max_len):\n\n X = self.tokenizer.texts_to_sequences(overviews)\n # print(len(max(X, key=len)))\n from keras.preprocessing.sequence import pad_sequences\n\n # We pad the sentence for the left to fit with max_len\n X = pad_sequences(X, padding='pre', maxlen=max_len)\n # print(X[1])\n\n return X\n\n def process(self, data, train_dev):\n\n df = self.clean_overview(data)\n df = self.paste_cast(df)\n\n if train_dev:\n\n X_train, X_test, y_train, y_test = self.split_data(df)\n\n self.fit_tokenizer(X_train, self.n_words)\n X_train = self.tokenize_overview(X_train, self.max_len)\n X_test = self.tokenize_overview(X_test, self.max_len)\n\n return X_train, X_test\n\n else:\n\n X = df['overview'].values\n X = self.tokenize_overview(X, self.max_len)\n\n return X\n\n\n",
"step-ids": [
12,
16,
17,
18,
19
]
}
|
[
12,
16,
17,
18,
19
] |
<|reserved_special_token_0|>
def set_gcs_credentials():
if os.path.exists(GLOBALS.google_application_credentials):
return
secrets_client = boto3.client('secretsmanager', region_name=GLOBALS.
aws_region, endpoint_url=GLOBALS.aws_endpoint_uri)
response = secrets_client.get_secret_value(SecretId=GLOBALS.
gcs_key_secret_arn)
os.makedirs(os.path.dirname(GLOBALS.google_application_credentials),
exist_ok=True)
with open(GLOBALS.google_application_credentials, 'w') as f:
f.write(response['SecretString'])
def get_gs_files(bucket: str, prefix: str, limit: Optional[int]=None,
exit_after_max: Optional[int]=None, extensions: Sequence[str]=tuple()
) ->List[str]:
"""Get all matching files in GCS.
Adapted from data API.
"""
set_gcs_credentials()
storage_client = storage.Client.from_service_account_json(GLOBALS.
google_application_credentials)
matches: List[str] = list()
num_matches: int = 0
blobs = list(storage_client.list_blobs(bucket, prefix=prefix,
max_results=limit))
LOGGER.info(f'Found files under gs://{bucket}/{prefix}: {blobs}')
for blob in blobs:
if not extensions or any(blob.name.endswith(ext) for ext in extensions
):
matches.append(blob.name)
num_matches += 1
if exit_after_max and num_matches >= exit_after_max:
break
return matches
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def set_gcs_credentials():
if os.path.exists(GLOBALS.google_application_credentials):
return
secrets_client = boto3.client('secretsmanager', region_name=GLOBALS.
aws_region, endpoint_url=GLOBALS.aws_endpoint_uri)
response = secrets_client.get_secret_value(SecretId=GLOBALS.
gcs_key_secret_arn)
os.makedirs(os.path.dirname(GLOBALS.google_application_credentials),
exist_ok=True)
with open(GLOBALS.google_application_credentials, 'w') as f:
f.write(response['SecretString'])
def get_gs_files(bucket: str, prefix: str, limit: Optional[int]=None,
exit_after_max: Optional[int]=None, extensions: Sequence[str]=tuple()
) ->List[str]:
"""Get all matching files in GCS.
Adapted from data API.
"""
set_gcs_credentials()
storage_client = storage.Client.from_service_account_json(GLOBALS.
google_application_credentials)
matches: List[str] = list()
num_matches: int = 0
blobs = list(storage_client.list_blobs(bucket, prefix=prefix,
max_results=limit))
LOGGER.info(f'Found files under gs://{bucket}/{prefix}: {blobs}')
for blob in blobs:
if not extensions or any(blob.name.endswith(ext) for ext in extensions
):
matches.append(blob.name)
num_matches += 1
if exit_after_max and num_matches >= exit_after_max:
break
return matches
def get_gs_subfolders(bucket: str, prefix: str) ->List[str]:
set_gcs_credentials()
storage_client = storage.Client.from_service_account_json(GLOBALS.
google_application_credentials)
delimiter = '/'
if not prefix.endswith(delimiter):
prefix = prefix + delimiter
blobs = storage_client.list_blobs(bucket, prefix=prefix, delimiter=
delimiter)
try:
_ = next(blobs)
except StopIteration:
pass
found_prefixes = [found_prefix.lstrip(prefix).strip('/') for
found_prefix in blobs.prefixes]
return found_prefixes
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def set_gcs_credentials():
if os.path.exists(GLOBALS.google_application_credentials):
return
secrets_client = boto3.client('secretsmanager', region_name=GLOBALS.
aws_region, endpoint_url=GLOBALS.aws_endpoint_uri)
response = secrets_client.get_secret_value(SecretId=GLOBALS.
gcs_key_secret_arn)
os.makedirs(os.path.dirname(GLOBALS.google_application_credentials),
exist_ok=True)
with open(GLOBALS.google_application_credentials, 'w') as f:
f.write(response['SecretString'])
def get_gs_files(bucket: str, prefix: str, limit: Optional[int]=None,
exit_after_max: Optional[int]=None, extensions: Sequence[str]=tuple()
) ->List[str]:
"""Get all matching files in GCS.
Adapted from data API.
"""
set_gcs_credentials()
storage_client = storage.Client.from_service_account_json(GLOBALS.
google_application_credentials)
matches: List[str] = list()
num_matches: int = 0
blobs = list(storage_client.list_blobs(bucket, prefix=prefix,
max_results=limit))
LOGGER.info(f'Found files under gs://{bucket}/{prefix}: {blobs}')
for blob in blobs:
if not extensions or any(blob.name.endswith(ext) for ext in extensions
):
matches.append(blob.name)
num_matches += 1
if exit_after_max and num_matches >= exit_after_max:
break
return matches
def get_gs_subfolders(bucket: str, prefix: str) ->List[str]:
set_gcs_credentials()
storage_client = storage.Client.from_service_account_json(GLOBALS.
google_application_credentials)
delimiter = '/'
if not prefix.endswith(delimiter):
prefix = prefix + delimiter
blobs = storage_client.list_blobs(bucket, prefix=prefix, delimiter=
delimiter)
try:
_ = next(blobs)
except StopIteration:
pass
found_prefixes = [found_prefix.lstrip(prefix).strip('/') for
found_prefix in blobs.prefixes]
return found_prefixes
def get_gs_file_as_text(bucket: str, key: str) ->str:
"""
Get contents of a file as a string
"""
set_gcs_credentials()
storage_client = storage.Client.from_service_account_json(GLOBALS.
google_application_credentials)
blob = storage_client.get_bucket(bucket).get_blob(key)
return blob.download_as_text(encoding='utf-8')
<|reserved_special_token_1|>
import os
from typing import List, Optional, Sequence
import boto3
from google.cloud import storage
from ..globals import GLOBALS, LOGGER
def set_gcs_credentials():
if os.path.exists(GLOBALS.google_application_credentials):
return
secrets_client = boto3.client('secretsmanager', region_name=GLOBALS.
aws_region, endpoint_url=GLOBALS.aws_endpoint_uri)
response = secrets_client.get_secret_value(SecretId=GLOBALS.
gcs_key_secret_arn)
os.makedirs(os.path.dirname(GLOBALS.google_application_credentials),
exist_ok=True)
with open(GLOBALS.google_application_credentials, 'w') as f:
f.write(response['SecretString'])
def get_gs_files(bucket: str, prefix: str, limit: Optional[int]=None,
exit_after_max: Optional[int]=None, extensions: Sequence[str]=tuple()
) ->List[str]:
"""Get all matching files in GCS.
Adapted from data API.
"""
set_gcs_credentials()
storage_client = storage.Client.from_service_account_json(GLOBALS.
google_application_credentials)
matches: List[str] = list()
num_matches: int = 0
blobs = list(storage_client.list_blobs(bucket, prefix=prefix,
max_results=limit))
LOGGER.info(f'Found files under gs://{bucket}/{prefix}: {blobs}')
for blob in blobs:
if not extensions or any(blob.name.endswith(ext) for ext in extensions
):
matches.append(blob.name)
num_matches += 1
if exit_after_max and num_matches >= exit_after_max:
break
return matches
def get_gs_subfolders(bucket: str, prefix: str) ->List[str]:
set_gcs_credentials()
storage_client = storage.Client.from_service_account_json(GLOBALS.
google_application_credentials)
delimiter = '/'
if not prefix.endswith(delimiter):
prefix = prefix + delimiter
blobs = storage_client.list_blobs(bucket, prefix=prefix, delimiter=
delimiter)
try:
_ = next(blobs)
except StopIteration:
pass
found_prefixes = [found_prefix.lstrip(prefix).strip('/') for
found_prefix in blobs.prefixes]
return found_prefixes
def get_gs_file_as_text(bucket: str, key: str) ->str:
"""
Get contents of a file as a string
"""
set_gcs_credentials()
storage_client = storage.Client.from_service_account_json(GLOBALS.
google_application_credentials)
blob = storage_client.get_bucket(bucket).get_blob(key)
return blob.download_as_text(encoding='utf-8')
<|reserved_special_token_1|>
import os
from typing import List, Optional, Sequence
import boto3
from google.cloud import storage
from ..globals import GLOBALS, LOGGER
def set_gcs_credentials():
if os.path.exists(GLOBALS.google_application_credentials):
return
secrets_client = boto3.client(
"secretsmanager",
region_name=GLOBALS.aws_region,
endpoint_url=GLOBALS.aws_endpoint_uri,
)
response = secrets_client.get_secret_value(SecretId=GLOBALS.gcs_key_secret_arn)
os.makedirs(
os.path.dirname(GLOBALS.google_application_credentials),
exist_ok=True,
)
with open(GLOBALS.google_application_credentials, "w") as f:
f.write(response["SecretString"])
def get_gs_files(
bucket: str,
prefix: str,
limit: Optional[int] = None,
exit_after_max: Optional[int] = None,
extensions: Sequence[str] = tuple(),
) -> List[str]:
"""Get all matching files in GCS.
Adapted from data API.
"""
set_gcs_credentials()
storage_client = storage.Client.from_service_account_json(
GLOBALS.google_application_credentials
)
matches: List[str] = list()
num_matches: int = 0
blobs = list(storage_client.list_blobs(bucket, prefix=prefix, max_results=limit))
LOGGER.info(f"Found files under gs://{bucket}/{prefix}: {blobs}")
for blob in blobs:
if not extensions or any(blob.name.endswith(ext) for ext in extensions):
matches.append(blob.name)
num_matches += 1
if exit_after_max and num_matches >= exit_after_max:
break
return matches
def get_gs_subfolders(
bucket: str,
prefix: str,
) -> List[str]:
set_gcs_credentials()
storage_client = storage.Client.from_service_account_json(
GLOBALS.google_application_credentials
)
delimiter = "/"
if not prefix.endswith(delimiter):
prefix = prefix + delimiter
blobs = storage_client.list_blobs(bucket, prefix=prefix, delimiter=delimiter)
try:
_ = next(blobs)
except StopIteration:
pass
found_prefixes = [
found_prefix.lstrip(prefix).strip("/") for found_prefix in blobs.prefixes
]
return found_prefixes
def get_gs_file_as_text(
bucket: str,
key: str,
) -> str:
"""
Get contents of a file as a string
"""
set_gcs_credentials()
storage_client = storage.Client.from_service_account_json(
GLOBALS.google_application_credentials
)
blob = storage_client.get_bucket(bucket).get_blob(key)
return blob.download_as_text(encoding="utf-8")
|
flexible
|
{
"blob_id": "a5eeafef694db04770833a4063358e8f32f467b0",
"index": 8310,
"step-1": "<mask token>\n\n\ndef set_gcs_credentials():\n if os.path.exists(GLOBALS.google_application_credentials):\n return\n secrets_client = boto3.client('secretsmanager', region_name=GLOBALS.\n aws_region, endpoint_url=GLOBALS.aws_endpoint_uri)\n response = secrets_client.get_secret_value(SecretId=GLOBALS.\n gcs_key_secret_arn)\n os.makedirs(os.path.dirname(GLOBALS.google_application_credentials),\n exist_ok=True)\n with open(GLOBALS.google_application_credentials, 'w') as f:\n f.write(response['SecretString'])\n\n\ndef get_gs_files(bucket: str, prefix: str, limit: Optional[int]=None,\n exit_after_max: Optional[int]=None, extensions: Sequence[str]=tuple()\n ) ->List[str]:\n \"\"\"Get all matching files in GCS.\n Adapted from data API.\n \"\"\"\n set_gcs_credentials()\n storage_client = storage.Client.from_service_account_json(GLOBALS.\n google_application_credentials)\n matches: List[str] = list()\n num_matches: int = 0\n blobs = list(storage_client.list_blobs(bucket, prefix=prefix,\n max_results=limit))\n LOGGER.info(f'Found files under gs://{bucket}/{prefix}: {blobs}')\n for blob in blobs:\n if not extensions or any(blob.name.endswith(ext) for ext in extensions\n ):\n matches.append(blob.name)\n num_matches += 1\n if exit_after_max and num_matches >= exit_after_max:\n break\n return matches\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef set_gcs_credentials():\n if os.path.exists(GLOBALS.google_application_credentials):\n return\n secrets_client = boto3.client('secretsmanager', region_name=GLOBALS.\n aws_region, endpoint_url=GLOBALS.aws_endpoint_uri)\n response = secrets_client.get_secret_value(SecretId=GLOBALS.\n gcs_key_secret_arn)\n os.makedirs(os.path.dirname(GLOBALS.google_application_credentials),\n exist_ok=True)\n with open(GLOBALS.google_application_credentials, 'w') as f:\n f.write(response['SecretString'])\n\n\ndef get_gs_files(bucket: str, prefix: str, limit: Optional[int]=None,\n exit_after_max: Optional[int]=None, extensions: Sequence[str]=tuple()\n ) ->List[str]:\n \"\"\"Get all matching files in GCS.\n Adapted from data API.\n \"\"\"\n set_gcs_credentials()\n storage_client = storage.Client.from_service_account_json(GLOBALS.\n google_application_credentials)\n matches: List[str] = list()\n num_matches: int = 0\n blobs = list(storage_client.list_blobs(bucket, prefix=prefix,\n max_results=limit))\n LOGGER.info(f'Found files under gs://{bucket}/{prefix}: {blobs}')\n for blob in blobs:\n if not extensions or any(blob.name.endswith(ext) for ext in extensions\n ):\n matches.append(blob.name)\n num_matches += 1\n if exit_after_max and num_matches >= exit_after_max:\n break\n return matches\n\n\ndef get_gs_subfolders(bucket: str, prefix: str) ->List[str]:\n set_gcs_credentials()\n storage_client = storage.Client.from_service_account_json(GLOBALS.\n google_application_credentials)\n delimiter = '/'\n if not prefix.endswith(delimiter):\n prefix = prefix + delimiter\n blobs = storage_client.list_blobs(bucket, prefix=prefix, delimiter=\n delimiter)\n try:\n _ = next(blobs)\n except StopIteration:\n pass\n found_prefixes = [found_prefix.lstrip(prefix).strip('/') for\n found_prefix in blobs.prefixes]\n return found_prefixes\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef set_gcs_credentials():\n if os.path.exists(GLOBALS.google_application_credentials):\n return\n secrets_client = boto3.client('secretsmanager', region_name=GLOBALS.\n aws_region, endpoint_url=GLOBALS.aws_endpoint_uri)\n response = secrets_client.get_secret_value(SecretId=GLOBALS.\n gcs_key_secret_arn)\n os.makedirs(os.path.dirname(GLOBALS.google_application_credentials),\n exist_ok=True)\n with open(GLOBALS.google_application_credentials, 'w') as f:\n f.write(response['SecretString'])\n\n\ndef get_gs_files(bucket: str, prefix: str, limit: Optional[int]=None,\n exit_after_max: Optional[int]=None, extensions: Sequence[str]=tuple()\n ) ->List[str]:\n \"\"\"Get all matching files in GCS.\n Adapted from data API.\n \"\"\"\n set_gcs_credentials()\n storage_client = storage.Client.from_service_account_json(GLOBALS.\n google_application_credentials)\n matches: List[str] = list()\n num_matches: int = 0\n blobs = list(storage_client.list_blobs(bucket, prefix=prefix,\n max_results=limit))\n LOGGER.info(f'Found files under gs://{bucket}/{prefix}: {blobs}')\n for blob in blobs:\n if not extensions or any(blob.name.endswith(ext) for ext in extensions\n ):\n matches.append(blob.name)\n num_matches += 1\n if exit_after_max and num_matches >= exit_after_max:\n break\n return matches\n\n\ndef get_gs_subfolders(bucket: str, prefix: str) ->List[str]:\n set_gcs_credentials()\n storage_client = storage.Client.from_service_account_json(GLOBALS.\n google_application_credentials)\n delimiter = '/'\n if not prefix.endswith(delimiter):\n prefix = prefix + delimiter\n blobs = storage_client.list_blobs(bucket, prefix=prefix, delimiter=\n delimiter)\n try:\n _ = next(blobs)\n except StopIteration:\n pass\n found_prefixes = [found_prefix.lstrip(prefix).strip('/') for\n found_prefix in blobs.prefixes]\n return found_prefixes\n\n\ndef get_gs_file_as_text(bucket: str, key: str) ->str:\n \"\"\"\n Get contents of a file as a string\n \"\"\"\n set_gcs_credentials()\n storage_client = storage.Client.from_service_account_json(GLOBALS.\n google_application_credentials)\n blob = storage_client.get_bucket(bucket).get_blob(key)\n return blob.download_as_text(encoding='utf-8')\n",
"step-4": "import os\nfrom typing import List, Optional, Sequence\nimport boto3\nfrom google.cloud import storage\nfrom ..globals import GLOBALS, LOGGER\n\n\ndef set_gcs_credentials():\n if os.path.exists(GLOBALS.google_application_credentials):\n return\n secrets_client = boto3.client('secretsmanager', region_name=GLOBALS.\n aws_region, endpoint_url=GLOBALS.aws_endpoint_uri)\n response = secrets_client.get_secret_value(SecretId=GLOBALS.\n gcs_key_secret_arn)\n os.makedirs(os.path.dirname(GLOBALS.google_application_credentials),\n exist_ok=True)\n with open(GLOBALS.google_application_credentials, 'w') as f:\n f.write(response['SecretString'])\n\n\ndef get_gs_files(bucket: str, prefix: str, limit: Optional[int]=None,\n exit_after_max: Optional[int]=None, extensions: Sequence[str]=tuple()\n ) ->List[str]:\n \"\"\"Get all matching files in GCS.\n Adapted from data API.\n \"\"\"\n set_gcs_credentials()\n storage_client = storage.Client.from_service_account_json(GLOBALS.\n google_application_credentials)\n matches: List[str] = list()\n num_matches: int = 0\n blobs = list(storage_client.list_blobs(bucket, prefix=prefix,\n max_results=limit))\n LOGGER.info(f'Found files under gs://{bucket}/{prefix}: {blobs}')\n for blob in blobs:\n if not extensions or any(blob.name.endswith(ext) for ext in extensions\n ):\n matches.append(blob.name)\n num_matches += 1\n if exit_after_max and num_matches >= exit_after_max:\n break\n return matches\n\n\ndef get_gs_subfolders(bucket: str, prefix: str) ->List[str]:\n set_gcs_credentials()\n storage_client = storage.Client.from_service_account_json(GLOBALS.\n google_application_credentials)\n delimiter = '/'\n if not prefix.endswith(delimiter):\n prefix = prefix + delimiter\n blobs = storage_client.list_blobs(bucket, prefix=prefix, delimiter=\n delimiter)\n try:\n _ = next(blobs)\n except StopIteration:\n pass\n found_prefixes = [found_prefix.lstrip(prefix).strip('/') for\n found_prefix in blobs.prefixes]\n return found_prefixes\n\n\ndef get_gs_file_as_text(bucket: str, key: str) ->str:\n \"\"\"\n Get contents of a file as a string\n \"\"\"\n set_gcs_credentials()\n storage_client = storage.Client.from_service_account_json(GLOBALS.\n google_application_credentials)\n blob = storage_client.get_bucket(bucket).get_blob(key)\n return blob.download_as_text(encoding='utf-8')\n",
"step-5": "import os\nfrom typing import List, Optional, Sequence\n\nimport boto3\nfrom google.cloud import storage\n\nfrom ..globals import GLOBALS, LOGGER\n\n\ndef set_gcs_credentials():\n if os.path.exists(GLOBALS.google_application_credentials):\n return\n\n secrets_client = boto3.client(\n \"secretsmanager\",\n region_name=GLOBALS.aws_region,\n endpoint_url=GLOBALS.aws_endpoint_uri,\n )\n\n response = secrets_client.get_secret_value(SecretId=GLOBALS.gcs_key_secret_arn)\n\n os.makedirs(\n os.path.dirname(GLOBALS.google_application_credentials),\n exist_ok=True,\n )\n\n with open(GLOBALS.google_application_credentials, \"w\") as f:\n f.write(response[\"SecretString\"])\n\n\ndef get_gs_files(\n bucket: str,\n prefix: str,\n limit: Optional[int] = None,\n exit_after_max: Optional[int] = None,\n extensions: Sequence[str] = tuple(),\n) -> List[str]:\n \"\"\"Get all matching files in GCS.\n Adapted from data API.\n \"\"\"\n set_gcs_credentials()\n\n storage_client = storage.Client.from_service_account_json(\n GLOBALS.google_application_credentials\n )\n\n matches: List[str] = list()\n num_matches: int = 0\n\n blobs = list(storage_client.list_blobs(bucket, prefix=prefix, max_results=limit))\n\n LOGGER.info(f\"Found files under gs://{bucket}/{prefix}: {blobs}\")\n for blob in blobs:\n if not extensions or any(blob.name.endswith(ext) for ext in extensions):\n matches.append(blob.name)\n num_matches += 1\n if exit_after_max and num_matches >= exit_after_max:\n break\n\n return matches\n\n\ndef get_gs_subfolders(\n bucket: str,\n prefix: str,\n) -> List[str]:\n set_gcs_credentials()\n\n storage_client = storage.Client.from_service_account_json(\n GLOBALS.google_application_credentials\n )\n\n delimiter = \"/\"\n if not prefix.endswith(delimiter):\n prefix = prefix + delimiter\n\n blobs = storage_client.list_blobs(bucket, prefix=prefix, delimiter=delimiter)\n\n try:\n _ = next(blobs)\n except StopIteration:\n pass\n\n found_prefixes = [\n found_prefix.lstrip(prefix).strip(\"/\") for found_prefix in blobs.prefixes\n ]\n\n return found_prefixes\n\n\ndef get_gs_file_as_text(\n bucket: str,\n key: str,\n) -> str:\n \"\"\"\n Get contents of a file as a string\n \"\"\"\n set_gcs_credentials()\n\n storage_client = storage.Client.from_service_account_json(\n GLOBALS.google_application_credentials\n )\n\n blob = storage_client.get_bucket(bucket).get_blob(key)\n return blob.download_as_text(encoding=\"utf-8\")\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
<|reserved_special_token_0|>
def test_evm_contracts_data(globaldb):
"""Test that all evm contract entries in the packaged global DB have legal data"""
serialized_chain_ids = [x.serialize_for_db() for x in ChainID]
with globaldb.conn.read_ctx() as cursor:
cursor.execute(
'SELECT address, chain_id, abi, deployed_block FROM contract_data')
for entry in cursor:
assert is_checksum_address(entry[0])
assert isinstance(entry[1], int) and entry[1
] in serialized_chain_ids
assert isinstance(entry[2], int)
assert isinstance(entry[3], int) and entry[3] > 0
<|reserved_special_token_0|>
@pytest.mark.parametrize('sql_vm_instructions_cb', [2])
def test_fallback_to_packaged_db(ethereum_inquirer: 'EthereumInquirer'):
"""
Test that if a contract / abi is missing in the globaldb, it is searched in the packaged db.
"""
with GlobalDBHandler().conn.read_ctx() as cursor:
cursor.execute(
'SELECT contract_data.address, contract_abi.value FROM contract_data INNER JOIN contract_abi ON contract_data.abi=contract_abi.id WHERE chain_id=1 LIMIT 1'
)
address, abi = cursor.fetchone()
cursor.execute(
'DELETE FROM contract_data WHERE address=? AND chain_id=1', (
address,))
cursor.execute('DELETE FROM contract_abi WHERE value=?', (abi,))
ethereum_inquirer.contracts.contract(address)
with GlobalDBHandler().conn.read_ctx() as cursor:
cursor.execute(
'SELECT COUNT(*) FROM contract_data INNER JOIN contract_abi ON contract_data.abi=contract_abi.id WHERE chain_id=1 AND contract_data.address=? AND contract_abi.value=?'
, (address, abi))
assert cursor.fetchone()[0] == 1
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def test_evm_contracts_data(globaldb):
"""Test that all evm contract entries in the packaged global DB have legal data"""
serialized_chain_ids = [x.serialize_for_db() for x in ChainID]
with globaldb.conn.read_ctx() as cursor:
cursor.execute(
'SELECT address, chain_id, abi, deployed_block FROM contract_data')
for entry in cursor:
assert is_checksum_address(entry[0])
assert isinstance(entry[1], int) and entry[1
] in serialized_chain_ids
assert isinstance(entry[2], int)
assert isinstance(entry[3], int) and entry[3] > 0
def test_evm_abi_data(globaldb):
"""Test that the evm abi entries in the packaged globalDB have legal data"""
abis_set = {0}
with globaldb.conn.read_ctx() as cursor:
cursor.execute('SELECT id, value FROM contract_abi')
for entry in cursor:
assert isinstance(entry[0], int)
assert isinstance(entry[1], str)
json_abi = json.loads(entry[1])
serialized_abi = json.dumps(json_abi, separators=(',', ':'))
assert serialized_abi == entry[1]
assert entry[1] not in abis_set
abis_set.add(entry[1])
@pytest.mark.parametrize('sql_vm_instructions_cb', [2])
def test_fallback_to_packaged_db(ethereum_inquirer: 'EthereumInquirer'):
"""
Test that if a contract / abi is missing in the globaldb, it is searched in the packaged db.
"""
with GlobalDBHandler().conn.read_ctx() as cursor:
cursor.execute(
'SELECT contract_data.address, contract_abi.value FROM contract_data INNER JOIN contract_abi ON contract_data.abi=contract_abi.id WHERE chain_id=1 LIMIT 1'
)
address, abi = cursor.fetchone()
cursor.execute(
'DELETE FROM contract_data WHERE address=? AND chain_id=1', (
address,))
cursor.execute('DELETE FROM contract_abi WHERE value=?', (abi,))
ethereum_inquirer.contracts.contract(address)
with GlobalDBHandler().conn.read_ctx() as cursor:
cursor.execute(
'SELECT COUNT(*) FROM contract_data INNER JOIN contract_abi ON contract_data.abi=contract_abi.id WHERE chain_id=1 AND contract_data.address=? AND contract_abi.value=?'
, (address, abi))
assert cursor.fetchone()[0] == 1
<|reserved_special_token_1|>
<|reserved_special_token_0|>
if TYPE_CHECKING:
from rotkehlchen.chain.ethereum.node_inquirer import EthereumInquirer
def test_evm_contracts_data(globaldb):
"""Test that all evm contract entries in the packaged global DB have legal data"""
serialized_chain_ids = [x.serialize_for_db() for x in ChainID]
with globaldb.conn.read_ctx() as cursor:
cursor.execute(
'SELECT address, chain_id, abi, deployed_block FROM contract_data')
for entry in cursor:
assert is_checksum_address(entry[0])
assert isinstance(entry[1], int) and entry[1
] in serialized_chain_ids
assert isinstance(entry[2], int)
assert isinstance(entry[3], int) and entry[3] > 0
def test_evm_abi_data(globaldb):
"""Test that the evm abi entries in the packaged globalDB have legal data"""
abis_set = {0}
with globaldb.conn.read_ctx() as cursor:
cursor.execute('SELECT id, value FROM contract_abi')
for entry in cursor:
assert isinstance(entry[0], int)
assert isinstance(entry[1], str)
json_abi = json.loads(entry[1])
serialized_abi = json.dumps(json_abi, separators=(',', ':'))
assert serialized_abi == entry[1]
assert entry[1] not in abis_set
abis_set.add(entry[1])
@pytest.mark.parametrize('sql_vm_instructions_cb', [2])
def test_fallback_to_packaged_db(ethereum_inquirer: 'EthereumInquirer'):
"""
Test that if a contract / abi is missing in the globaldb, it is searched in the packaged db.
"""
with GlobalDBHandler().conn.read_ctx() as cursor:
cursor.execute(
'SELECT contract_data.address, contract_abi.value FROM contract_data INNER JOIN contract_abi ON contract_data.abi=contract_abi.id WHERE chain_id=1 LIMIT 1'
)
address, abi = cursor.fetchone()
cursor.execute(
'DELETE FROM contract_data WHERE address=? AND chain_id=1', (
address,))
cursor.execute('DELETE FROM contract_abi WHERE value=?', (abi,))
ethereum_inquirer.contracts.contract(address)
with GlobalDBHandler().conn.read_ctx() as cursor:
cursor.execute(
'SELECT COUNT(*) FROM contract_data INNER JOIN contract_abi ON contract_data.abi=contract_abi.id WHERE chain_id=1 AND contract_data.address=? AND contract_abi.value=?'
, (address, abi))
assert cursor.fetchone()[0] == 1
<|reserved_special_token_1|>
import json
from typing import TYPE_CHECKING
import pytest
from eth_utils import is_checksum_address
from rotkehlchen.globaldb.handler import GlobalDBHandler
from rotkehlchen.types import ChainID
if TYPE_CHECKING:
from rotkehlchen.chain.ethereum.node_inquirer import EthereumInquirer
def test_evm_contracts_data(globaldb):
"""Test that all evm contract entries in the packaged global DB have legal data"""
serialized_chain_ids = [x.serialize_for_db() for x in ChainID]
with globaldb.conn.read_ctx() as cursor:
cursor.execute(
'SELECT address, chain_id, abi, deployed_block FROM contract_data')
for entry in cursor:
assert is_checksum_address(entry[0])
assert isinstance(entry[1], int) and entry[1
] in serialized_chain_ids
assert isinstance(entry[2], int)
assert isinstance(entry[3], int) and entry[3] > 0
def test_evm_abi_data(globaldb):
"""Test that the evm abi entries in the packaged globalDB have legal data"""
abis_set = {0}
with globaldb.conn.read_ctx() as cursor:
cursor.execute('SELECT id, value FROM contract_abi')
for entry in cursor:
assert isinstance(entry[0], int)
assert isinstance(entry[1], str)
json_abi = json.loads(entry[1])
serialized_abi = json.dumps(json_abi, separators=(',', ':'))
assert serialized_abi == entry[1]
assert entry[1] not in abis_set
abis_set.add(entry[1])
@pytest.mark.parametrize('sql_vm_instructions_cb', [2])
def test_fallback_to_packaged_db(ethereum_inquirer: 'EthereumInquirer'):
"""
Test that if a contract / abi is missing in the globaldb, it is searched in the packaged db.
"""
with GlobalDBHandler().conn.read_ctx() as cursor:
cursor.execute(
'SELECT contract_data.address, contract_abi.value FROM contract_data INNER JOIN contract_abi ON contract_data.abi=contract_abi.id WHERE chain_id=1 LIMIT 1'
)
address, abi = cursor.fetchone()
cursor.execute(
'DELETE FROM contract_data WHERE address=? AND chain_id=1', (
address,))
cursor.execute('DELETE FROM contract_abi WHERE value=?', (abi,))
ethereum_inquirer.contracts.contract(address)
with GlobalDBHandler().conn.read_ctx() as cursor:
cursor.execute(
'SELECT COUNT(*) FROM contract_data INNER JOIN contract_abi ON contract_data.abi=contract_abi.id WHERE chain_id=1 AND contract_data.address=? AND contract_abi.value=?'
, (address, abi))
assert cursor.fetchone()[0] == 1
<|reserved_special_token_1|>
import json
from typing import TYPE_CHECKING
import pytest
from eth_utils import is_checksum_address
from rotkehlchen.globaldb.handler import GlobalDBHandler
from rotkehlchen.types import ChainID
if TYPE_CHECKING:
from rotkehlchen.chain.ethereum.node_inquirer import EthereumInquirer
def test_evm_contracts_data(globaldb):
"""Test that all evm contract entries in the packaged global DB have legal data"""
serialized_chain_ids = [x.serialize_for_db() for x in ChainID]
with globaldb.conn.read_ctx() as cursor:
cursor.execute('SELECT address, chain_id, abi, deployed_block FROM contract_data')
for entry in cursor:
assert is_checksum_address(entry[0])
assert isinstance(entry[1], int) and entry[1] in serialized_chain_ids
assert isinstance(entry[2], int)
assert isinstance(entry[3], int) and entry[3] > 0
def test_evm_abi_data(globaldb):
"""Test that the evm abi entries in the packaged globalDB have legal data"""
abis_set = {0}
with globaldb.conn.read_ctx() as cursor:
cursor.execute('SELECT id, value FROM contract_abi')
for entry in cursor:
assert isinstance(entry[0], int)
# read the abi, and make sure it's the most compressed version it can be
# and that it's unique
assert isinstance(entry[1], str)
json_abi = json.loads(entry[1])
serialized_abi = json.dumps(json_abi, separators=(',', ':'))
assert serialized_abi == entry[1]
assert entry[1] not in abis_set
abis_set.add(entry[1])
@pytest.mark.parametrize('sql_vm_instructions_cb', [2])
def test_fallback_to_packaged_db(ethereum_inquirer: 'EthereumInquirer'):
"""
Test that if a contract / abi is missing in the globaldb, it is searched in the packaged db.
"""
with GlobalDBHandler().conn.read_ctx() as cursor:
# Delete one contract and its abi
cursor.execute(
'SELECT contract_data.address, contract_abi.value FROM contract_data INNER JOIN '
'contract_abi ON contract_data.abi=contract_abi.id WHERE chain_id=1 LIMIT 1',
)
(address, abi) = cursor.fetchone() # There has to be at least one entry
cursor.execute('DELETE FROM contract_data WHERE address=? AND chain_id=1', (address,))
cursor.execute('DELETE FROM contract_abi WHERE value=?', (abi,))
# Now query the contract, let it get to packaged global DB and also see that
# database packaged_db is locked is also not raised
ethereum_inquirer.contracts.contract(address)
with GlobalDBHandler().conn.read_ctx() as cursor:
# Check that the contract and the abi were copied to the global db
cursor.execute(
'SELECT COUNT(*) FROM contract_data INNER JOIN '
'contract_abi ON contract_data.abi=contract_abi.id WHERE chain_id=1 AND '
'contract_data.address=? AND contract_abi.value=?',
(address, abi),
)
assert cursor.fetchone()[0] == 1
|
flexible
|
{
"blob_id": "52dc8a4f9165a88dddc1da16e0adb045c4d851ed",
"index": 5017,
"step-1": "<mask token>\n\n\ndef test_evm_contracts_data(globaldb):\n \"\"\"Test that all evm contract entries in the packaged global DB have legal data\"\"\"\n serialized_chain_ids = [x.serialize_for_db() for x in ChainID]\n with globaldb.conn.read_ctx() as cursor:\n cursor.execute(\n 'SELECT address, chain_id, abi, deployed_block FROM contract_data')\n for entry in cursor:\n assert is_checksum_address(entry[0])\n assert isinstance(entry[1], int) and entry[1\n ] in serialized_chain_ids\n assert isinstance(entry[2], int)\n assert isinstance(entry[3], int) and entry[3] > 0\n\n\n<mask token>\n\n\n@pytest.mark.parametrize('sql_vm_instructions_cb', [2])\ndef test_fallback_to_packaged_db(ethereum_inquirer: 'EthereumInquirer'):\n \"\"\"\n Test that if a contract / abi is missing in the globaldb, it is searched in the packaged db.\n \"\"\"\n with GlobalDBHandler().conn.read_ctx() as cursor:\n cursor.execute(\n 'SELECT contract_data.address, contract_abi.value FROM contract_data INNER JOIN contract_abi ON contract_data.abi=contract_abi.id WHERE chain_id=1 LIMIT 1'\n )\n address, abi = cursor.fetchone()\n cursor.execute(\n 'DELETE FROM contract_data WHERE address=? AND chain_id=1', (\n address,))\n cursor.execute('DELETE FROM contract_abi WHERE value=?', (abi,))\n ethereum_inquirer.contracts.contract(address)\n with GlobalDBHandler().conn.read_ctx() as cursor:\n cursor.execute(\n 'SELECT COUNT(*) FROM contract_data INNER JOIN contract_abi ON contract_data.abi=contract_abi.id WHERE chain_id=1 AND contract_data.address=? AND contract_abi.value=?'\n , (address, abi))\n assert cursor.fetchone()[0] == 1\n",
"step-2": "<mask token>\n\n\ndef test_evm_contracts_data(globaldb):\n \"\"\"Test that all evm contract entries in the packaged global DB have legal data\"\"\"\n serialized_chain_ids = [x.serialize_for_db() for x in ChainID]\n with globaldb.conn.read_ctx() as cursor:\n cursor.execute(\n 'SELECT address, chain_id, abi, deployed_block FROM contract_data')\n for entry in cursor:\n assert is_checksum_address(entry[0])\n assert isinstance(entry[1], int) and entry[1\n ] in serialized_chain_ids\n assert isinstance(entry[2], int)\n assert isinstance(entry[3], int) and entry[3] > 0\n\n\ndef test_evm_abi_data(globaldb):\n \"\"\"Test that the evm abi entries in the packaged globalDB have legal data\"\"\"\n abis_set = {0}\n with globaldb.conn.read_ctx() as cursor:\n cursor.execute('SELECT id, value FROM contract_abi')\n for entry in cursor:\n assert isinstance(entry[0], int)\n assert isinstance(entry[1], str)\n json_abi = json.loads(entry[1])\n serialized_abi = json.dumps(json_abi, separators=(',', ':'))\n assert serialized_abi == entry[1]\n assert entry[1] not in abis_set\n abis_set.add(entry[1])\n\n\n@pytest.mark.parametrize('sql_vm_instructions_cb', [2])\ndef test_fallback_to_packaged_db(ethereum_inquirer: 'EthereumInquirer'):\n \"\"\"\n Test that if a contract / abi is missing in the globaldb, it is searched in the packaged db.\n \"\"\"\n with GlobalDBHandler().conn.read_ctx() as cursor:\n cursor.execute(\n 'SELECT contract_data.address, contract_abi.value FROM contract_data INNER JOIN contract_abi ON contract_data.abi=contract_abi.id WHERE chain_id=1 LIMIT 1'\n )\n address, abi = cursor.fetchone()\n cursor.execute(\n 'DELETE FROM contract_data WHERE address=? AND chain_id=1', (\n address,))\n cursor.execute('DELETE FROM contract_abi WHERE value=?', (abi,))\n ethereum_inquirer.contracts.contract(address)\n with GlobalDBHandler().conn.read_ctx() as cursor:\n cursor.execute(\n 'SELECT COUNT(*) FROM contract_data INNER JOIN contract_abi ON contract_data.abi=contract_abi.id WHERE chain_id=1 AND contract_data.address=? AND contract_abi.value=?'\n , (address, abi))\n assert cursor.fetchone()[0] == 1\n",
"step-3": "<mask token>\nif TYPE_CHECKING:\n from rotkehlchen.chain.ethereum.node_inquirer import EthereumInquirer\n\n\ndef test_evm_contracts_data(globaldb):\n \"\"\"Test that all evm contract entries in the packaged global DB have legal data\"\"\"\n serialized_chain_ids = [x.serialize_for_db() for x in ChainID]\n with globaldb.conn.read_ctx() as cursor:\n cursor.execute(\n 'SELECT address, chain_id, abi, deployed_block FROM contract_data')\n for entry in cursor:\n assert is_checksum_address(entry[0])\n assert isinstance(entry[1], int) and entry[1\n ] in serialized_chain_ids\n assert isinstance(entry[2], int)\n assert isinstance(entry[3], int) and entry[3] > 0\n\n\ndef test_evm_abi_data(globaldb):\n \"\"\"Test that the evm abi entries in the packaged globalDB have legal data\"\"\"\n abis_set = {0}\n with globaldb.conn.read_ctx() as cursor:\n cursor.execute('SELECT id, value FROM contract_abi')\n for entry in cursor:\n assert isinstance(entry[0], int)\n assert isinstance(entry[1], str)\n json_abi = json.loads(entry[1])\n serialized_abi = json.dumps(json_abi, separators=(',', ':'))\n assert serialized_abi == entry[1]\n assert entry[1] not in abis_set\n abis_set.add(entry[1])\n\n\n@pytest.mark.parametrize('sql_vm_instructions_cb', [2])\ndef test_fallback_to_packaged_db(ethereum_inquirer: 'EthereumInquirer'):\n \"\"\"\n Test that if a contract / abi is missing in the globaldb, it is searched in the packaged db.\n \"\"\"\n with GlobalDBHandler().conn.read_ctx() as cursor:\n cursor.execute(\n 'SELECT contract_data.address, contract_abi.value FROM contract_data INNER JOIN contract_abi ON contract_data.abi=contract_abi.id WHERE chain_id=1 LIMIT 1'\n )\n address, abi = cursor.fetchone()\n cursor.execute(\n 'DELETE FROM contract_data WHERE address=? AND chain_id=1', (\n address,))\n cursor.execute('DELETE FROM contract_abi WHERE value=?', (abi,))\n ethereum_inquirer.contracts.contract(address)\n with GlobalDBHandler().conn.read_ctx() as cursor:\n cursor.execute(\n 'SELECT COUNT(*) FROM contract_data INNER JOIN contract_abi ON contract_data.abi=contract_abi.id WHERE chain_id=1 AND contract_data.address=? AND contract_abi.value=?'\n , (address, abi))\n assert cursor.fetchone()[0] == 1\n",
"step-4": "import json\nfrom typing import TYPE_CHECKING\nimport pytest\nfrom eth_utils import is_checksum_address\nfrom rotkehlchen.globaldb.handler import GlobalDBHandler\nfrom rotkehlchen.types import ChainID\nif TYPE_CHECKING:\n from rotkehlchen.chain.ethereum.node_inquirer import EthereumInquirer\n\n\ndef test_evm_contracts_data(globaldb):\n \"\"\"Test that all evm contract entries in the packaged global DB have legal data\"\"\"\n serialized_chain_ids = [x.serialize_for_db() for x in ChainID]\n with globaldb.conn.read_ctx() as cursor:\n cursor.execute(\n 'SELECT address, chain_id, abi, deployed_block FROM contract_data')\n for entry in cursor:\n assert is_checksum_address(entry[0])\n assert isinstance(entry[1], int) and entry[1\n ] in serialized_chain_ids\n assert isinstance(entry[2], int)\n assert isinstance(entry[3], int) and entry[3] > 0\n\n\ndef test_evm_abi_data(globaldb):\n \"\"\"Test that the evm abi entries in the packaged globalDB have legal data\"\"\"\n abis_set = {0}\n with globaldb.conn.read_ctx() as cursor:\n cursor.execute('SELECT id, value FROM contract_abi')\n for entry in cursor:\n assert isinstance(entry[0], int)\n assert isinstance(entry[1], str)\n json_abi = json.loads(entry[1])\n serialized_abi = json.dumps(json_abi, separators=(',', ':'))\n assert serialized_abi == entry[1]\n assert entry[1] not in abis_set\n abis_set.add(entry[1])\n\n\n@pytest.mark.parametrize('sql_vm_instructions_cb', [2])\ndef test_fallback_to_packaged_db(ethereum_inquirer: 'EthereumInquirer'):\n \"\"\"\n Test that if a contract / abi is missing in the globaldb, it is searched in the packaged db.\n \"\"\"\n with GlobalDBHandler().conn.read_ctx() as cursor:\n cursor.execute(\n 'SELECT contract_data.address, contract_abi.value FROM contract_data INNER JOIN contract_abi ON contract_data.abi=contract_abi.id WHERE chain_id=1 LIMIT 1'\n )\n address, abi = cursor.fetchone()\n cursor.execute(\n 'DELETE FROM contract_data WHERE address=? AND chain_id=1', (\n address,))\n cursor.execute('DELETE FROM contract_abi WHERE value=?', (abi,))\n ethereum_inquirer.contracts.contract(address)\n with GlobalDBHandler().conn.read_ctx() as cursor:\n cursor.execute(\n 'SELECT COUNT(*) FROM contract_data INNER JOIN contract_abi ON contract_data.abi=contract_abi.id WHERE chain_id=1 AND contract_data.address=? AND contract_abi.value=?'\n , (address, abi))\n assert cursor.fetchone()[0] == 1\n",
"step-5": "import json\nfrom typing import TYPE_CHECKING\n\nimport pytest\nfrom eth_utils import is_checksum_address\n\nfrom rotkehlchen.globaldb.handler import GlobalDBHandler\nfrom rotkehlchen.types import ChainID\n\nif TYPE_CHECKING:\n from rotkehlchen.chain.ethereum.node_inquirer import EthereumInquirer\n\n\ndef test_evm_contracts_data(globaldb):\n \"\"\"Test that all evm contract entries in the packaged global DB have legal data\"\"\"\n serialized_chain_ids = [x.serialize_for_db() for x in ChainID]\n with globaldb.conn.read_ctx() as cursor:\n cursor.execute('SELECT address, chain_id, abi, deployed_block FROM contract_data')\n for entry in cursor:\n assert is_checksum_address(entry[0])\n assert isinstance(entry[1], int) and entry[1] in serialized_chain_ids\n assert isinstance(entry[2], int)\n assert isinstance(entry[3], int) and entry[3] > 0\n\n\ndef test_evm_abi_data(globaldb):\n \"\"\"Test that the evm abi entries in the packaged globalDB have legal data\"\"\"\n abis_set = {0}\n with globaldb.conn.read_ctx() as cursor:\n cursor.execute('SELECT id, value FROM contract_abi')\n for entry in cursor:\n assert isinstance(entry[0], int)\n # read the abi, and make sure it's the most compressed version it can be\n # and that it's unique\n assert isinstance(entry[1], str)\n json_abi = json.loads(entry[1])\n serialized_abi = json.dumps(json_abi, separators=(',', ':'))\n assert serialized_abi == entry[1]\n assert entry[1] not in abis_set\n abis_set.add(entry[1])\n\n\n@pytest.mark.parametrize('sql_vm_instructions_cb', [2])\ndef test_fallback_to_packaged_db(ethereum_inquirer: 'EthereumInquirer'):\n \"\"\"\n Test that if a contract / abi is missing in the globaldb, it is searched in the packaged db.\n \"\"\"\n with GlobalDBHandler().conn.read_ctx() as cursor:\n # Delete one contract and its abi\n cursor.execute(\n 'SELECT contract_data.address, contract_abi.value FROM contract_data INNER JOIN '\n 'contract_abi ON contract_data.abi=contract_abi.id WHERE chain_id=1 LIMIT 1',\n )\n (address, abi) = cursor.fetchone() # There has to be at least one entry\n cursor.execute('DELETE FROM contract_data WHERE address=? AND chain_id=1', (address,))\n cursor.execute('DELETE FROM contract_abi WHERE value=?', (abi,))\n\n # Now query the contract, let it get to packaged global DB and also see that\n # database packaged_db is locked is also not raised\n ethereum_inquirer.contracts.contract(address)\n\n with GlobalDBHandler().conn.read_ctx() as cursor:\n # Check that the contract and the abi were copied to the global db\n cursor.execute(\n 'SELECT COUNT(*) FROM contract_data INNER JOIN '\n 'contract_abi ON contract_data.abi=contract_abi.id WHERE chain_id=1 AND '\n 'contract_data.address=? AND contract_abi.value=?',\n (address, abi),\n )\n assert cursor.fetchone()[0] == 1\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
<|reserved_special_token_0|>
class RNNClassifier(nn.Module):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class RNNClassifier(nn.Module):
def __init__(self, batch_size, num_classes, hidden_size, vocab_size,
embed_size, weights):
super(RNNClassifier, self).__init__()
self.batch_size = batch_size
self.num_classes = num_classes
self.hidden_size = hidden_size
self.vocab_size = vocab_size
self.embed_size = embed_size
self.word_embeddings = nn.Embedding(vocab_size, embed_size)
self.word_embeddings.weight = nn.Parameter(weights, requires_grad=False
)
self.rnn = nn.RNN(embed_size, hidden_size, num_layers=2,
bidirectional=True)
self.proj = nn.Linear(4 * hidden_size, num_classes)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class RNNClassifier(nn.Module):
def __init__(self, batch_size, num_classes, hidden_size, vocab_size,
embed_size, weights):
super(RNNClassifier, self).__init__()
self.batch_size = batch_size
self.num_classes = num_classes
self.hidden_size = hidden_size
self.vocab_size = vocab_size
self.embed_size = embed_size
self.word_embeddings = nn.Embedding(vocab_size, embed_size)
self.word_embeddings.weight = nn.Parameter(weights, requires_grad=False
)
self.rnn = nn.RNN(embed_size, hidden_size, num_layers=2,
bidirectional=True)
self.proj = nn.Linear(4 * hidden_size, num_classes)
def forward(self, input_sentence):
batch_size = input_sentence.size()[0]
input = self.word_embeddings(input_sentence)
input = input.permute(1, 0, 2).contiguous()
h_0 = Variable(torch.zeros(4, batch_size, self.hidden_size).cuda())
output, h_n = self.rnn(input, h_0)
h_n = h_n.permute(1, 0, 2).contiguous()
h_n = h_n.contiguous().view(h_n.size()[0], h_n.size()[1] * h_n.size
()[2])
logtis = self.proj(h_n)
return logtis
<|reserved_special_token_1|>
<|reserved_special_token_0|>
import torch
import torch.nn as nn
from torch.autograd import Variable
import torch.nn.functional as F
class RNNClassifier(nn.Module):
def __init__(self, batch_size, num_classes, hidden_size, vocab_size,
embed_size, weights):
super(RNNClassifier, self).__init__()
self.batch_size = batch_size
self.num_classes = num_classes
self.hidden_size = hidden_size
self.vocab_size = vocab_size
self.embed_size = embed_size
self.word_embeddings = nn.Embedding(vocab_size, embed_size)
self.word_embeddings.weight = nn.Parameter(weights, requires_grad=False
)
self.rnn = nn.RNN(embed_size, hidden_size, num_layers=2,
bidirectional=True)
self.proj = nn.Linear(4 * hidden_size, num_classes)
def forward(self, input_sentence):
batch_size = input_sentence.size()[0]
input = self.word_embeddings(input_sentence)
input = input.permute(1, 0, 2).contiguous()
h_0 = Variable(torch.zeros(4, batch_size, self.hidden_size).cuda())
output, h_n = self.rnn(input, h_0)
h_n = h_n.permute(1, 0, 2).contiguous()
h_n = h_n.contiguous().view(h_n.size()[0], h_n.size()[1] * h_n.size
()[2])
logtis = self.proj(h_n)
return logtis
<|reserved_special_token_1|>
"""
Created on 01/10/18.
Author: morgan
Copyright defined in text_classification/LICENSE.txt
"""
import torch
import torch.nn as nn
from torch.autograd import Variable
import torch.nn.functional as F
class RNNClassifier(nn.Module):
def __init__(self, batch_size, num_classes, hidden_size, vocab_size, embed_size, weights):
super(RNNClassifier, self).__init__()
# weights: Pre-trained GloVe word_embeddings that we will use to create our word_embedding lookup table
self.batch_size = batch_size
self.num_classes = num_classes
self.hidden_size = hidden_size
self.vocab_size = vocab_size
self.embed_size = embed_size
self.word_embeddings = nn.Embedding(vocab_size, embed_size) # initialize the lookup table
# Assigning the look-up table to the pre-trained GloVe word embedding.
self.word_embeddings.weight = nn.Parameter(weights, requires_grad=False)
self.rnn = nn.RNN(embed_size, hidden_size, num_layers=2, bidirectional=True)
self.proj = nn.Linear(4*hidden_size, num_classes)
def forward(self, input_sentence):
batch_size = input_sentence.size()[0]
# input: [batch_size, seq_len], [64, 100]
# print('input 0:', input_sentence.size())
input = self.word_embeddings(input_sentence) # [batch_size, seq_len, embed_size]p
# print('input 1:', input.size())
input = input.permute(1, 0, 2).contiguous() # [seq_len, batch_size, embed_size]
# Initiate hidden/cell state of the LSTM
h_0 = Variable(torch.zeros(4, batch_size, self.hidden_size).cuda())
# [4, batch_size, hidden_size]
output, h_n = self.rnn(input, h_0)
# h_n: [4, batch_size, hidden_size]
# output: [max_len, batch_size, hidden]
# print('h_n:', h_n.size())
# print('output', output.size())
h_n = h_n.permute(1, 0, 2).contiguous() #[batch_size, 4, hidden_size]
# print('h_n1:', h_n.size())
h_n = h_n.contiguous().view(h_n.size()[0], h_n.size()[1]*h_n.size()[2])
# [batch_size, 4*hidden_size]
# print('h_n2:', h_n.size())
# final_hidden_state: [1, batch_size, hidden_size]
logtis = self.proj(h_n)
# print('logtis:', logtis.size())
# final_output: [batch_size, num_classes]
return logtis
|
flexible
|
{
"blob_id": "41417e3ce52edf6aee432886bbab6d16ec5bc88d",
"index": 164,
"step-1": "<mask token>\n\n\nclass RNNClassifier(nn.Module):\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass RNNClassifier(nn.Module):\n\n def __init__(self, batch_size, num_classes, hidden_size, vocab_size,\n embed_size, weights):\n super(RNNClassifier, self).__init__()\n self.batch_size = batch_size\n self.num_classes = num_classes\n self.hidden_size = hidden_size\n self.vocab_size = vocab_size\n self.embed_size = embed_size\n self.word_embeddings = nn.Embedding(vocab_size, embed_size)\n self.word_embeddings.weight = nn.Parameter(weights, requires_grad=False\n )\n self.rnn = nn.RNN(embed_size, hidden_size, num_layers=2,\n bidirectional=True)\n self.proj = nn.Linear(4 * hidden_size, num_classes)\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass RNNClassifier(nn.Module):\n\n def __init__(self, batch_size, num_classes, hidden_size, vocab_size,\n embed_size, weights):\n super(RNNClassifier, self).__init__()\n self.batch_size = batch_size\n self.num_classes = num_classes\n self.hidden_size = hidden_size\n self.vocab_size = vocab_size\n self.embed_size = embed_size\n self.word_embeddings = nn.Embedding(vocab_size, embed_size)\n self.word_embeddings.weight = nn.Parameter(weights, requires_grad=False\n )\n self.rnn = nn.RNN(embed_size, hidden_size, num_layers=2,\n bidirectional=True)\n self.proj = nn.Linear(4 * hidden_size, num_classes)\n\n def forward(self, input_sentence):\n batch_size = input_sentence.size()[0]\n input = self.word_embeddings(input_sentence)\n input = input.permute(1, 0, 2).contiguous()\n h_0 = Variable(torch.zeros(4, batch_size, self.hidden_size).cuda())\n output, h_n = self.rnn(input, h_0)\n h_n = h_n.permute(1, 0, 2).contiguous()\n h_n = h_n.contiguous().view(h_n.size()[0], h_n.size()[1] * h_n.size\n ()[2])\n logtis = self.proj(h_n)\n return logtis\n",
"step-4": "<mask token>\nimport torch\nimport torch.nn as nn\nfrom torch.autograd import Variable\nimport torch.nn.functional as F\n\n\nclass RNNClassifier(nn.Module):\n\n def __init__(self, batch_size, num_classes, hidden_size, vocab_size,\n embed_size, weights):\n super(RNNClassifier, self).__init__()\n self.batch_size = batch_size\n self.num_classes = num_classes\n self.hidden_size = hidden_size\n self.vocab_size = vocab_size\n self.embed_size = embed_size\n self.word_embeddings = nn.Embedding(vocab_size, embed_size)\n self.word_embeddings.weight = nn.Parameter(weights, requires_grad=False\n )\n self.rnn = nn.RNN(embed_size, hidden_size, num_layers=2,\n bidirectional=True)\n self.proj = nn.Linear(4 * hidden_size, num_classes)\n\n def forward(self, input_sentence):\n batch_size = input_sentence.size()[0]\n input = self.word_embeddings(input_sentence)\n input = input.permute(1, 0, 2).contiguous()\n h_0 = Variable(torch.zeros(4, batch_size, self.hidden_size).cuda())\n output, h_n = self.rnn(input, h_0)\n h_n = h_n.permute(1, 0, 2).contiguous()\n h_n = h_n.contiguous().view(h_n.size()[0], h_n.size()[1] * h_n.size\n ()[2])\n logtis = self.proj(h_n)\n return logtis\n",
"step-5": "\"\"\"\nCreated on 01/10/18.\nAuthor: morgan\nCopyright defined in text_classification/LICENSE.txt\n\"\"\"\nimport torch\nimport torch.nn as nn\nfrom torch.autograd import Variable\nimport torch.nn.functional as F\n\n\nclass RNNClassifier(nn.Module):\n def __init__(self, batch_size, num_classes, hidden_size, vocab_size, embed_size, weights):\n super(RNNClassifier, self).__init__()\n # weights: Pre-trained GloVe word_embeddings that we will use to create our word_embedding lookup table\n self.batch_size = batch_size\n self.num_classes = num_classes\n self.hidden_size = hidden_size\n self.vocab_size = vocab_size\n self.embed_size = embed_size\n self.word_embeddings = nn.Embedding(vocab_size, embed_size) # initialize the lookup table\n # Assigning the look-up table to the pre-trained GloVe word embedding.\n self.word_embeddings.weight = nn.Parameter(weights, requires_grad=False)\n self.rnn = nn.RNN(embed_size, hidden_size, num_layers=2, bidirectional=True)\n self.proj = nn.Linear(4*hidden_size, num_classes)\n\n def forward(self, input_sentence):\n batch_size = input_sentence.size()[0]\n\n # input: [batch_size, seq_len], [64, 100]\n # print('input 0:', input_sentence.size())\n input = self.word_embeddings(input_sentence) # [batch_size, seq_len, embed_size]p\n # print('input 1:', input.size())\n input = input.permute(1, 0, 2).contiguous() # [seq_len, batch_size, embed_size]\n\n # Initiate hidden/cell state of the LSTM\n h_0 = Variable(torch.zeros(4, batch_size, self.hidden_size).cuda())\n # [4, batch_size, hidden_size]\n\n output, h_n = self.rnn(input, h_0)\n # h_n: [4, batch_size, hidden_size]\n # output: [max_len, batch_size, hidden]\n # print('h_n:', h_n.size())\n # print('output', output.size())\n h_n = h_n.permute(1, 0, 2).contiguous() #[batch_size, 4, hidden_size]\n # print('h_n1:', h_n.size())\n\n h_n = h_n.contiguous().view(h_n.size()[0], h_n.size()[1]*h_n.size()[2])\n # [batch_size, 4*hidden_size]\n\n # print('h_n2:', h_n.size())\n # final_hidden_state: [1, batch_size, hidden_size]\n\n logtis = self.proj(h_n)\n # print('logtis:', logtis.size())\n # final_output: [batch_size, num_classes]\n\n return logtis\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Migration(migrations.Migration):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Migration(migrations.Migration):
dependencies = [('fuser', '0009_movement_type')]
operations = [migrations.AlterField(model_name='movementpassmodel',
name='movement_type', field=models.ForeignKey(null=True, on_delete=
django.db.models.deletion.DO_NOTHING, to='fuser.movement_type'))]
<|reserved_special_token_1|>
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [('fuser', '0009_movement_type')]
operations = [migrations.AlterField(model_name='movementpassmodel',
name='movement_type', field=models.ForeignKey(null=True, on_delete=
django.db.models.deletion.DO_NOTHING, to='fuser.movement_type'))]
<|reserved_special_token_1|>
# Generated by Django 3.1.5 on 2021-05-30 14:27
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('fuser', '0009_movement_type'),
]
operations = [
migrations.AlterField(
model_name='movementpassmodel',
name='movement_type',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.DO_NOTHING, to='fuser.movement_type'),
),
]
|
flexible
|
{
"blob_id": "848374ea7d706bbd2ef5a76489cabeff998acb82",
"index": 6040,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('fuser', '0009_movement_type')]\n operations = [migrations.AlterField(model_name='movementpassmodel',\n name='movement_type', field=models.ForeignKey(null=True, on_delete=\n django.db.models.deletion.DO_NOTHING, to='fuser.movement_type'))]\n",
"step-4": "from django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n dependencies = [('fuser', '0009_movement_type')]\n operations = [migrations.AlterField(model_name='movementpassmodel',\n name='movement_type', field=models.ForeignKey(null=True, on_delete=\n django.db.models.deletion.DO_NOTHING, to='fuser.movement_type'))]\n",
"step-5": "# Generated by Django 3.1.5 on 2021-05-30 14:27\n\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('fuser', '0009_movement_type'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='movementpassmodel',\n name='movement_type',\n field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.DO_NOTHING, to='fuser.movement_type'),\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
'''
O(n) time complexity
O(n) space complexity
'''
class Solution:
def twoSum(self, nums, target):
"""
:type nums: List[int]
:type target: int
:rtype: List[int]
"""
seenum = dict()
for idx, val in enumerate(nums):
if target - val in seenum:
return [seenum[target-val], idx]
seenum[val] = idx
return [-1, -1]
if __name__ == "__main__":
nums = [2,7,11,15]
target = 9
sol = Solution()
print(sol.twoSum(nums, target))
|
normal
|
{
"blob_id": "b3f62c331ff4ae9f909fc90cc7303997b32daceb",
"index": 1876,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Solution:\n <mask token>\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass Solution:\n\n def twoSum(self, nums, target):\n \"\"\"\n :type nums: List[int]\n :type target: int\n :rtype: List[int]\n \"\"\"\n seenum = dict()\n for idx, val in enumerate(nums):\n if target - val in seenum:\n return [seenum[target - val], idx]\n seenum[val] = idx\n return [-1, -1]\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\nclass Solution:\n\n def twoSum(self, nums, target):\n \"\"\"\n :type nums: List[int]\n :type target: int\n :rtype: List[int]\n \"\"\"\n seenum = dict()\n for idx, val in enumerate(nums):\n if target - val in seenum:\n return [seenum[target - val], idx]\n seenum[val] = idx\n return [-1, -1]\n\n\nif __name__ == '__main__':\n nums = [2, 7, 11, 15]\n target = 9\n sol = Solution()\n print(sol.twoSum(nums, target))\n",
"step-5": "'''\nO(n) time complexity\nO(n) space complexity\n'''\n\nclass Solution:\n def twoSum(self, nums, target):\n \"\"\"\n :type nums: List[int]\n :type target: int\n :rtype: List[int]\n \"\"\"\n seenum = dict()\n for idx, val in enumerate(nums):\n if target - val in seenum:\n return [seenum[target-val], idx]\n seenum[val] = idx\n return [-1, -1]\n\nif __name__ == \"__main__\":\n nums = [2,7,11,15]\n target = 9\n sol = Solution()\n print(sol.twoSum(nums, target))",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
#!/usr/bin/env python3
"""(Optional) Test for GameDealer class."""
import unittest
import os, sys
from functools import reduce
sys.path.insert(0, os.path.join(os.path.split(__file__)[0], ".."))
import Lab19_Extending_Builtins.lab19_3 as game_dealer
WHOLE_DECK = sorted(game_dealer.Deck())
class ReportingDealer(game_dealer.GameDealer):
"""GameDealer only had methods that output strings,
so here we provide a list version for testing.
"""
def Report(self):
"""For testing."""
return [p.hand for p in self.players]
class TestPlayCards(unittest.TestCase):
def testSmall(self):
little = ReportingDealer(1, 1).Report()
self.assertEqual(len(little), 1)
self.assertEqual(len(little[0]), 1)
self.assertTrue(little[0][0] in WHOLE_DECK)
def testZilch(self):
self.assertEqual([], ReportingDealer(0, 1).Report())
self.assertEqual([[]], ReportingDealer(1, 0).Report())
self.assertEqual([], ReportingDealer(0, 0).Report())
def testWholeDealer(self):
all_hands = ReportingDealer(9, 6).Report()
for hand in all_hands:
self.assertEqual(len(hand), 6)
self.assertEqual(len(all_hands), 9)
all_hands_collapsed = sorted(
reduce(lambda x, y: x + y, all_hands))
self.assertEqual(all_hands_collapsed, WHOLE_DECK)
def testTooMany(self):
too_many = ReportingDealer(11, 5).Report()
too_many_collapsed = reduce(lambda x, y: x + y,
too_many)
self.assertTrue("Sorry" in too_many_collapsed)
too_many_collapsed.remove("Sorry")
too_many_collapsed.sort()
self.assertEqual(too_many_collapsed, WHOLE_DECK)
def testWayTooMany(self):
way_too_many = ReportingDealer(11, 6).Report()
way_too_many_collapsed = reduce(lambda x, y: x + y,
way_too_many)
self.assertEqual(len(way_too_many_collapsed), 66)
self.assertEqual(way_too_many_collapsed.count("Sorry"),
12)
for i in range(12):
way_too_many_collapsed.remove("Sorry")
way_too_many_collapsed.sort()
self.assertEqual(way_too_many_collapsed, WHOLE_DECK)
if __name__ == "__main__":
unittest.main()
|
normal
|
{
"blob_id": "06a721c12e3140d4d1cf544a598f512595c4ab66",
"index": 3013,
"step-1": "<mask token>\n\n\nclass ReportingDealer(game_dealer.GameDealer):\n <mask token>\n\n def Report(self):\n \"\"\"For testing.\"\"\"\n return [p.hand for p in self.players]\n\n\nclass TestPlayCards(unittest.TestCase):\n\n def testSmall(self):\n little = ReportingDealer(1, 1).Report()\n self.assertEqual(len(little), 1)\n self.assertEqual(len(little[0]), 1)\n self.assertTrue(little[0][0] in WHOLE_DECK)\n\n def testZilch(self):\n self.assertEqual([], ReportingDealer(0, 1).Report())\n self.assertEqual([[]], ReportingDealer(1, 0).Report())\n self.assertEqual([], ReportingDealer(0, 0).Report())\n\n def testWholeDealer(self):\n all_hands = ReportingDealer(9, 6).Report()\n for hand in all_hands:\n self.assertEqual(len(hand), 6)\n self.assertEqual(len(all_hands), 9)\n all_hands_collapsed = sorted(reduce(lambda x, y: x + y, all_hands))\n self.assertEqual(all_hands_collapsed, WHOLE_DECK)\n\n def testTooMany(self):\n too_many = ReportingDealer(11, 5).Report()\n too_many_collapsed = reduce(lambda x, y: x + y, too_many)\n self.assertTrue('Sorry' in too_many_collapsed)\n too_many_collapsed.remove('Sorry')\n too_many_collapsed.sort()\n self.assertEqual(too_many_collapsed, WHOLE_DECK)\n\n def testWayTooMany(self):\n way_too_many = ReportingDealer(11, 6).Report()\n way_too_many_collapsed = reduce(lambda x, y: x + y, way_too_many)\n self.assertEqual(len(way_too_many_collapsed), 66)\n self.assertEqual(way_too_many_collapsed.count('Sorry'), 12)\n for i in range(12):\n way_too_many_collapsed.remove('Sorry')\n way_too_many_collapsed.sort()\n self.assertEqual(way_too_many_collapsed, WHOLE_DECK)\n\n\n<mask token>\n",
"step-2": "<mask token>\nsys.path.insert(0, os.path.join(os.path.split(__file__)[0], '..'))\n<mask token>\n\n\nclass ReportingDealer(game_dealer.GameDealer):\n \"\"\"GameDealer only had methods that output strings,\n so here we provide a list version for testing.\n \"\"\"\n\n def Report(self):\n \"\"\"For testing.\"\"\"\n return [p.hand for p in self.players]\n\n\nclass TestPlayCards(unittest.TestCase):\n\n def testSmall(self):\n little = ReportingDealer(1, 1).Report()\n self.assertEqual(len(little), 1)\n self.assertEqual(len(little[0]), 1)\n self.assertTrue(little[0][0] in WHOLE_DECK)\n\n def testZilch(self):\n self.assertEqual([], ReportingDealer(0, 1).Report())\n self.assertEqual([[]], ReportingDealer(1, 0).Report())\n self.assertEqual([], ReportingDealer(0, 0).Report())\n\n def testWholeDealer(self):\n all_hands = ReportingDealer(9, 6).Report()\n for hand in all_hands:\n self.assertEqual(len(hand), 6)\n self.assertEqual(len(all_hands), 9)\n all_hands_collapsed = sorted(reduce(lambda x, y: x + y, all_hands))\n self.assertEqual(all_hands_collapsed, WHOLE_DECK)\n\n def testTooMany(self):\n too_many = ReportingDealer(11, 5).Report()\n too_many_collapsed = reduce(lambda x, y: x + y, too_many)\n self.assertTrue('Sorry' in too_many_collapsed)\n too_many_collapsed.remove('Sorry')\n too_many_collapsed.sort()\n self.assertEqual(too_many_collapsed, WHOLE_DECK)\n\n def testWayTooMany(self):\n way_too_many = ReportingDealer(11, 6).Report()\n way_too_many_collapsed = reduce(lambda x, y: x + y, way_too_many)\n self.assertEqual(len(way_too_many_collapsed), 66)\n self.assertEqual(way_too_many_collapsed.count('Sorry'), 12)\n for i in range(12):\n way_too_many_collapsed.remove('Sorry')\n way_too_many_collapsed.sort()\n self.assertEqual(way_too_many_collapsed, WHOLE_DECK)\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"step-3": "<mask token>\nsys.path.insert(0, os.path.join(os.path.split(__file__)[0], '..'))\n<mask token>\nWHOLE_DECK = sorted(game_dealer.Deck())\n\n\nclass ReportingDealer(game_dealer.GameDealer):\n \"\"\"GameDealer only had methods that output strings,\n so here we provide a list version for testing.\n \"\"\"\n\n def Report(self):\n \"\"\"For testing.\"\"\"\n return [p.hand for p in self.players]\n\n\nclass TestPlayCards(unittest.TestCase):\n\n def testSmall(self):\n little = ReportingDealer(1, 1).Report()\n self.assertEqual(len(little), 1)\n self.assertEqual(len(little[0]), 1)\n self.assertTrue(little[0][0] in WHOLE_DECK)\n\n def testZilch(self):\n self.assertEqual([], ReportingDealer(0, 1).Report())\n self.assertEqual([[]], ReportingDealer(1, 0).Report())\n self.assertEqual([], ReportingDealer(0, 0).Report())\n\n def testWholeDealer(self):\n all_hands = ReportingDealer(9, 6).Report()\n for hand in all_hands:\n self.assertEqual(len(hand), 6)\n self.assertEqual(len(all_hands), 9)\n all_hands_collapsed = sorted(reduce(lambda x, y: x + y, all_hands))\n self.assertEqual(all_hands_collapsed, WHOLE_DECK)\n\n def testTooMany(self):\n too_many = ReportingDealer(11, 5).Report()\n too_many_collapsed = reduce(lambda x, y: x + y, too_many)\n self.assertTrue('Sorry' in too_many_collapsed)\n too_many_collapsed.remove('Sorry')\n too_many_collapsed.sort()\n self.assertEqual(too_many_collapsed, WHOLE_DECK)\n\n def testWayTooMany(self):\n way_too_many = ReportingDealer(11, 6).Report()\n way_too_many_collapsed = reduce(lambda x, y: x + y, way_too_many)\n self.assertEqual(len(way_too_many_collapsed), 66)\n self.assertEqual(way_too_many_collapsed.count('Sorry'), 12)\n for i in range(12):\n way_too_many_collapsed.remove('Sorry')\n way_too_many_collapsed.sort()\n self.assertEqual(way_too_many_collapsed, WHOLE_DECK)\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"step-4": "<mask token>\nimport unittest\nimport os, sys\nfrom functools import reduce\nsys.path.insert(0, os.path.join(os.path.split(__file__)[0], '..'))\nimport Lab19_Extending_Builtins.lab19_3 as game_dealer\nWHOLE_DECK = sorted(game_dealer.Deck())\n\n\nclass ReportingDealer(game_dealer.GameDealer):\n \"\"\"GameDealer only had methods that output strings,\n so here we provide a list version for testing.\n \"\"\"\n\n def Report(self):\n \"\"\"For testing.\"\"\"\n return [p.hand for p in self.players]\n\n\nclass TestPlayCards(unittest.TestCase):\n\n def testSmall(self):\n little = ReportingDealer(1, 1).Report()\n self.assertEqual(len(little), 1)\n self.assertEqual(len(little[0]), 1)\n self.assertTrue(little[0][0] in WHOLE_DECK)\n\n def testZilch(self):\n self.assertEqual([], ReportingDealer(0, 1).Report())\n self.assertEqual([[]], ReportingDealer(1, 0).Report())\n self.assertEqual([], ReportingDealer(0, 0).Report())\n\n def testWholeDealer(self):\n all_hands = ReportingDealer(9, 6).Report()\n for hand in all_hands:\n self.assertEqual(len(hand), 6)\n self.assertEqual(len(all_hands), 9)\n all_hands_collapsed = sorted(reduce(lambda x, y: x + y, all_hands))\n self.assertEqual(all_hands_collapsed, WHOLE_DECK)\n\n def testTooMany(self):\n too_many = ReportingDealer(11, 5).Report()\n too_many_collapsed = reduce(lambda x, y: x + y, too_many)\n self.assertTrue('Sorry' in too_many_collapsed)\n too_many_collapsed.remove('Sorry')\n too_many_collapsed.sort()\n self.assertEqual(too_many_collapsed, WHOLE_DECK)\n\n def testWayTooMany(self):\n way_too_many = ReportingDealer(11, 6).Report()\n way_too_many_collapsed = reduce(lambda x, y: x + y, way_too_many)\n self.assertEqual(len(way_too_many_collapsed), 66)\n self.assertEqual(way_too_many_collapsed.count('Sorry'), 12)\n for i in range(12):\n way_too_many_collapsed.remove('Sorry')\n way_too_many_collapsed.sort()\n self.assertEqual(way_too_many_collapsed, WHOLE_DECK)\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"step-5": "#!/usr/bin/env python3\n\"\"\"(Optional) Test for GameDealer class.\"\"\"\n\nimport unittest\n\nimport os, sys\nfrom functools import reduce\nsys.path.insert(0, os.path.join(os.path.split(__file__)[0], \"..\"))\n \nimport Lab19_Extending_Builtins.lab19_3 as game_dealer\n\nWHOLE_DECK = sorted(game_dealer.Deck())\n\nclass ReportingDealer(game_dealer.GameDealer):\n \"\"\"GameDealer only had methods that output strings,\n so here we provide a list version for testing.\n \"\"\"\n def Report(self):\n \"\"\"For testing.\"\"\"\n return [p.hand for p in self.players]\n\nclass TestPlayCards(unittest.TestCase):\n\n def testSmall(self):\n little = ReportingDealer(1, 1).Report()\n self.assertEqual(len(little), 1)\n self.assertEqual(len(little[0]), 1)\n self.assertTrue(little[0][0] in WHOLE_DECK)\n\n def testZilch(self):\n self.assertEqual([], ReportingDealer(0, 1).Report())\n self.assertEqual([[]], ReportingDealer(1, 0).Report())\n self.assertEqual([], ReportingDealer(0, 0).Report())\n\n def testWholeDealer(self):\n all_hands = ReportingDealer(9, 6).Report()\n for hand in all_hands:\n self.assertEqual(len(hand), 6)\n self.assertEqual(len(all_hands), 9)\n all_hands_collapsed = sorted(\n reduce(lambda x, y: x + y, all_hands))\n self.assertEqual(all_hands_collapsed, WHOLE_DECK)\n\n\n\n\n \n def testTooMany(self):\n too_many = ReportingDealer(11, 5).Report()\n too_many_collapsed = reduce(lambda x, y: x + y,\n too_many)\n self.assertTrue(\"Sorry\" in too_many_collapsed)\n too_many_collapsed.remove(\"Sorry\")\n too_many_collapsed.sort()\n self.assertEqual(too_many_collapsed, WHOLE_DECK)\n\n def testWayTooMany(self):\n way_too_many = ReportingDealer(11, 6).Report()\n way_too_many_collapsed = reduce(lambda x, y: x + y,\n way_too_many)\n self.assertEqual(len(way_too_many_collapsed), 66)\n self.assertEqual(way_too_many_collapsed.count(\"Sorry\"),\n 12)\n for i in range(12):\n way_too_many_collapsed.remove(\"Sorry\")\n way_too_many_collapsed.sort()\n self.assertEqual(way_too_many_collapsed, WHOLE_DECK)\n\nif __name__ == \"__main__\":\n unittest.main()\n\n",
"step-ids": [
8,
10,
11,
12,
13
]
}
|
[
8,
10,
11,
12,
13
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def iteration_spider():
max_errors = 5
num_errors = 0
for page in itertools.count(1):
url = 'http://example.webscraping.com/view/-{}'.format(page)
html = download(url)
if html is None:
num_errors += 1
if num_errors == max_errors:
break
else:
num_errors = 0
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def iteration_spider():
max_errors = 5
num_errors = 0
for page in itertools.count(1):
url = 'http://example.webscraping.com/view/-{}'.format(page)
html = download(url)
if html is None:
num_errors += 1
if num_errors == max_errors:
break
else:
num_errors = 0
if __name__ == '__main__':
iteration_spider()
<|reserved_special_token_1|>
from common import *
import itertools
def iteration_spider():
max_errors = 5
num_errors = 0
for page in itertools.count(1):
url = 'http://example.webscraping.com/view/-{}'.format(page)
html = download(url)
if html is None:
num_errors += 1
if num_errors == max_errors:
break
else:
num_errors = 0
if __name__ == '__main__':
iteration_spider()
<|reserved_special_token_1|>
# -*- coding:utf-8 -*-
from common import *
import itertools
def iteration_spider():
max_errors = 5
num_errors = 0
for page in itertools.count(1):
url = 'http://example.webscraping.com/view/-{}'.format(page)
html = download(url)
if html is None:
num_errors += 1
if num_errors == max_errors:
break
else:
num_errors = 0
if __name__ == '__main__':
iteration_spider()
|
flexible
|
{
"blob_id": "0eaba8f570772de864f52168a597b47a4150d015",
"index": 5924,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef iteration_spider():\n max_errors = 5\n num_errors = 0\n for page in itertools.count(1):\n url = 'http://example.webscraping.com/view/-{}'.format(page)\n html = download(url)\n if html is None:\n num_errors += 1\n if num_errors == max_errors:\n break\n else:\n num_errors = 0\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef iteration_spider():\n max_errors = 5\n num_errors = 0\n for page in itertools.count(1):\n url = 'http://example.webscraping.com/view/-{}'.format(page)\n html = download(url)\n if html is None:\n num_errors += 1\n if num_errors == max_errors:\n break\n else:\n num_errors = 0\n\n\nif __name__ == '__main__':\n iteration_spider()\n",
"step-4": "from common import *\nimport itertools\n\n\ndef iteration_spider():\n max_errors = 5\n num_errors = 0\n for page in itertools.count(1):\n url = 'http://example.webscraping.com/view/-{}'.format(page)\n html = download(url)\n if html is None:\n num_errors += 1\n if num_errors == max_errors:\n break\n else:\n num_errors = 0\n\n\nif __name__ == '__main__':\n iteration_spider()\n",
"step-5": "# -*- coding:utf-8 -*-\n\nfrom common import *\nimport itertools\n\ndef iteration_spider():\n\tmax_errors = 5\n\tnum_errors = 0\n\tfor page in itertools.count(1):\n\t\turl = 'http://example.webscraping.com/view/-{}'.format(page)\n\t\thtml = download(url)\n\t\tif html is None:\n\t\t\tnum_errors += 1\n\t\t\tif num_errors == max_errors:\n\t\t\t\tbreak\n\t\telse:\n\t\t\tnum_errors = 0\n\t\t\t\n\nif __name__ == '__main__':\n\titeration_spider()",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
a=raw_input("Enter the column\n")
b=raw_input("Enter the row\n")
i=0
k=0
m=0
c=""
d=""
while (m<int(b)):
while(i<int(a)):
c=c+" "
for j in xrange(1,4):
c=c+"-"
i=i+1
while(k<int(a)):
d=d+"|"
for l in xrange(1,4):
d=d+" "
k=k+1
m=m+1
print c
print d+"|"
print c
|
normal
|
{
"blob_id": "c28d7fc45be9a6efa7b7ef00520898c3d238ac63",
"index": 5518,
"step-1": "a=raw_input(\"Enter the column\\n\")\nb=raw_input(\"Enter the row\\n\")\ni=0\nk=0\nm=0\nc=\"\"\nd=\"\"\nwhile (m<int(b)):\n while(i<int(a)):\n c=c+\" \"\n for j in xrange(1,4):\n c=c+\"-\"\n i=i+1\n while(k<int(a)):\n d=d+\"|\"\n for l in xrange(1,4):\n d=d+\" \"\n k=k+1\n m=m+1\n print c\n print d+\"|\"\nprint c\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
## Author: Aleem Juma
import os
from app import app
import pandas as pd
# read in the quotes database
q = pd.read_csv(os.path.join('app','data','quotes_all.csv'), sep=';', skiprows=1, header=0)
# there are a few quote genres that don't occur in the model vocab
# replace them with appropriate words so the similarity search works
replace = {
'movingon':'moving',
'fathersday': 'fathers',
'memorialday': 'memorial',
'mothersday': 'mothers',
'newyears': 'year',
'saintpatricksday': 'ireland',
'valentinesday': 'valentine'
}
q['GENRE'].replace(to_replace=replace, inplace=True)
import spacy
nlp = spacy.load('en_core_web_md')
# cache the computed tokens for the genres in the dataset
cache = {genre:nlp(genre) for genre in q.GENRE.unique()}
def get_similarity(word1, word2):
'''
Returns a similarity score between two words
'''
tok1 = cache.get(word1, nlp(word1))
tok2 = cache.get(word2, nlp(word2))
return tok1.similarity(tok2)
def get_random_word():
'''
Returns a random category label from the data
'''
random_word = q['GENRE'].sample(1).iloc[0]
return random_word
def get_closest_words(word, choices, n=1):
'''
Returns the n closest matches in the model vocab
Parameters:
word word to search
choices available matches
n number of results to return
Returns:
A list of n tuples in the form (word (str), similarity (float))
'''
app.logger.info(f'Finding closest words to "{word}"')
if word in choices:
# if the word is already in the list return the same word with 100% match
return [(word, 1.0)]
if word in nlp.vocab.strings:
# if not in the list, find the closest words
similarities = [(choice, get_similarity(word, choice)) for choice in choices]
# sort, reverse, and return the top n (word,similarity) tuples
return sorted(similarities, key=lambda x: x[1])[::-1][:n]
else:
app.logger.info(f'Not in model vocab: "{word}"')
# if the requested label isn't in the model vocab, return a random genre
return [(get_random_word(), 1.0), (word, 0.0)]
def find_matching_quote(genre, top_n=5):
'''
Returns a matching quote and up to 5 of the most similar genres with similarity measures
Paramters:
genre genre to match
Returns:
(str) Quote
(str) Author
(list) List of tuples in the form (word (str), simliarity (float))
'''
# find closest matches
matched_genres = get_closest_words(genre, q.GENRE.unique(), top_n)
# get the best one
closest = matched_genres[0][0]
app.logger.info(f'Finding quote for: "{closest}"')
# get a quote from that genre
matching_quote = q[q['GENRE']==closest].sample(1).iloc[0]
quote = matching_quote.QUOTE
author = matching_quote.AUTHOR
# return the quote and the genres
return quote, author, matched_genres
|
normal
|
{
"blob_id": "8f854f4f2c807f988945af4dc53dba93cfb31168",
"index": 9441,
"step-1": "<mask token>\n\n\ndef get_similarity(word1, word2):\n \"\"\"\n Returns a similarity score between two words\n \"\"\"\n tok1 = cache.get(word1, nlp(word1))\n tok2 = cache.get(word2, nlp(word2))\n return tok1.similarity(tok2)\n\n\n<mask token>\n\n\ndef get_closest_words(word, choices, n=1):\n \"\"\"\n Returns the n closest matches in the model vocab\n Parameters:\n word word to search\n choices available matches\n n number of results to return\n\n Returns:\n A list of n tuples in the form (word (str), similarity (float))\n \"\"\"\n app.logger.info(f'Finding closest words to \"{word}\"')\n if word in choices:\n return [(word, 1.0)]\n if word in nlp.vocab.strings:\n similarities = [(choice, get_similarity(word, choice)) for choice in\n choices]\n return sorted(similarities, key=lambda x: x[1])[::-1][:n]\n else:\n app.logger.info(f'Not in model vocab: \"{word}\"')\n return [(get_random_word(), 1.0), (word, 0.0)]\n\n\ndef find_matching_quote(genre, top_n=5):\n \"\"\"\n Returns a matching quote and up to 5 of the most similar genres with similarity measures\n Paramters:\n genre genre to match\n\n Returns:\n (str) Quote\n (str) Author\n (list) List of tuples in the form (word (str), simliarity (float))\n \"\"\"\n matched_genres = get_closest_words(genre, q.GENRE.unique(), top_n)\n closest = matched_genres[0][0]\n app.logger.info(f'Finding quote for: \"{closest}\"')\n matching_quote = q[q['GENRE'] == closest].sample(1).iloc[0]\n quote = matching_quote.QUOTE\n author = matching_quote.AUTHOR\n return quote, author, matched_genres\n",
"step-2": "<mask token>\nq['GENRE'].replace(to_replace=replace, inplace=True)\n<mask token>\n\n\ndef get_similarity(word1, word2):\n \"\"\"\n Returns a similarity score between two words\n \"\"\"\n tok1 = cache.get(word1, nlp(word1))\n tok2 = cache.get(word2, nlp(word2))\n return tok1.similarity(tok2)\n\n\ndef get_random_word():\n \"\"\"\n Returns a random category label from the data\n \"\"\"\n random_word = q['GENRE'].sample(1).iloc[0]\n return random_word\n\n\ndef get_closest_words(word, choices, n=1):\n \"\"\"\n Returns the n closest matches in the model vocab\n Parameters:\n word word to search\n choices available matches\n n number of results to return\n\n Returns:\n A list of n tuples in the form (word (str), similarity (float))\n \"\"\"\n app.logger.info(f'Finding closest words to \"{word}\"')\n if word in choices:\n return [(word, 1.0)]\n if word in nlp.vocab.strings:\n similarities = [(choice, get_similarity(word, choice)) for choice in\n choices]\n return sorted(similarities, key=lambda x: x[1])[::-1][:n]\n else:\n app.logger.info(f'Not in model vocab: \"{word}\"')\n return [(get_random_word(), 1.0), (word, 0.0)]\n\n\ndef find_matching_quote(genre, top_n=5):\n \"\"\"\n Returns a matching quote and up to 5 of the most similar genres with similarity measures\n Paramters:\n genre genre to match\n\n Returns:\n (str) Quote\n (str) Author\n (list) List of tuples in the form (word (str), simliarity (float))\n \"\"\"\n matched_genres = get_closest_words(genre, q.GENRE.unique(), top_n)\n closest = matched_genres[0][0]\n app.logger.info(f'Finding quote for: \"{closest}\"')\n matching_quote = q[q['GENRE'] == closest].sample(1).iloc[0]\n quote = matching_quote.QUOTE\n author = matching_quote.AUTHOR\n return quote, author, matched_genres\n",
"step-3": "<mask token>\nq = pd.read_csv(os.path.join('app', 'data', 'quotes_all.csv'), sep=';',\n skiprows=1, header=0)\nreplace = {'movingon': 'moving', 'fathersday': 'fathers', 'memorialday':\n 'memorial', 'mothersday': 'mothers', 'newyears': 'year',\n 'saintpatricksday': 'ireland', 'valentinesday': 'valentine'}\nq['GENRE'].replace(to_replace=replace, inplace=True)\n<mask token>\nnlp = spacy.load('en_core_web_md')\ncache = {genre: nlp(genre) for genre in q.GENRE.unique()}\n\n\ndef get_similarity(word1, word2):\n \"\"\"\n Returns a similarity score between two words\n \"\"\"\n tok1 = cache.get(word1, nlp(word1))\n tok2 = cache.get(word2, nlp(word2))\n return tok1.similarity(tok2)\n\n\ndef get_random_word():\n \"\"\"\n Returns a random category label from the data\n \"\"\"\n random_word = q['GENRE'].sample(1).iloc[0]\n return random_word\n\n\ndef get_closest_words(word, choices, n=1):\n \"\"\"\n Returns the n closest matches in the model vocab\n Parameters:\n word word to search\n choices available matches\n n number of results to return\n\n Returns:\n A list of n tuples in the form (word (str), similarity (float))\n \"\"\"\n app.logger.info(f'Finding closest words to \"{word}\"')\n if word in choices:\n return [(word, 1.0)]\n if word in nlp.vocab.strings:\n similarities = [(choice, get_similarity(word, choice)) for choice in\n choices]\n return sorted(similarities, key=lambda x: x[1])[::-1][:n]\n else:\n app.logger.info(f'Not in model vocab: \"{word}\"')\n return [(get_random_word(), 1.0), (word, 0.0)]\n\n\ndef find_matching_quote(genre, top_n=5):\n \"\"\"\n Returns a matching quote and up to 5 of the most similar genres with similarity measures\n Paramters:\n genre genre to match\n\n Returns:\n (str) Quote\n (str) Author\n (list) List of tuples in the form (word (str), simliarity (float))\n \"\"\"\n matched_genres = get_closest_words(genre, q.GENRE.unique(), top_n)\n closest = matched_genres[0][0]\n app.logger.info(f'Finding quote for: \"{closest}\"')\n matching_quote = q[q['GENRE'] == closest].sample(1).iloc[0]\n quote = matching_quote.QUOTE\n author = matching_quote.AUTHOR\n return quote, author, matched_genres\n",
"step-4": "import os\nfrom app import app\nimport pandas as pd\nq = pd.read_csv(os.path.join('app', 'data', 'quotes_all.csv'), sep=';',\n skiprows=1, header=0)\nreplace = {'movingon': 'moving', 'fathersday': 'fathers', 'memorialday':\n 'memorial', 'mothersday': 'mothers', 'newyears': 'year',\n 'saintpatricksday': 'ireland', 'valentinesday': 'valentine'}\nq['GENRE'].replace(to_replace=replace, inplace=True)\nimport spacy\nnlp = spacy.load('en_core_web_md')\ncache = {genre: nlp(genre) for genre in q.GENRE.unique()}\n\n\ndef get_similarity(word1, word2):\n \"\"\"\n Returns a similarity score between two words\n \"\"\"\n tok1 = cache.get(word1, nlp(word1))\n tok2 = cache.get(word2, nlp(word2))\n return tok1.similarity(tok2)\n\n\ndef get_random_word():\n \"\"\"\n Returns a random category label from the data\n \"\"\"\n random_word = q['GENRE'].sample(1).iloc[0]\n return random_word\n\n\ndef get_closest_words(word, choices, n=1):\n \"\"\"\n Returns the n closest matches in the model vocab\n Parameters:\n word word to search\n choices available matches\n n number of results to return\n\n Returns:\n A list of n tuples in the form (word (str), similarity (float))\n \"\"\"\n app.logger.info(f'Finding closest words to \"{word}\"')\n if word in choices:\n return [(word, 1.0)]\n if word in nlp.vocab.strings:\n similarities = [(choice, get_similarity(word, choice)) for choice in\n choices]\n return sorted(similarities, key=lambda x: x[1])[::-1][:n]\n else:\n app.logger.info(f'Not in model vocab: \"{word}\"')\n return [(get_random_word(), 1.0), (word, 0.0)]\n\n\ndef find_matching_quote(genre, top_n=5):\n \"\"\"\n Returns a matching quote and up to 5 of the most similar genres with similarity measures\n Paramters:\n genre genre to match\n\n Returns:\n (str) Quote\n (str) Author\n (list) List of tuples in the form (word (str), simliarity (float))\n \"\"\"\n matched_genres = get_closest_words(genre, q.GENRE.unique(), top_n)\n closest = matched_genres[0][0]\n app.logger.info(f'Finding quote for: \"{closest}\"')\n matching_quote = q[q['GENRE'] == closest].sample(1).iloc[0]\n quote = matching_quote.QUOTE\n author = matching_quote.AUTHOR\n return quote, author, matched_genres\n",
"step-5": "## Author: Aleem Juma\n\nimport os\nfrom app import app\nimport pandas as pd\n\n# read in the quotes database\nq = pd.read_csv(os.path.join('app','data','quotes_all.csv'), sep=';', skiprows=1, header=0)\n\n# there are a few quote genres that don't occur in the model vocab\n# replace them with appropriate words so the similarity search works\nreplace = {\n 'movingon':'moving',\n 'fathersday': 'fathers',\n 'memorialday': 'memorial',\n 'mothersday': 'mothers',\n 'newyears': 'year',\n 'saintpatricksday': 'ireland',\n 'valentinesday': 'valentine'\n}\nq['GENRE'].replace(to_replace=replace, inplace=True)\n\nimport spacy\nnlp = spacy.load('en_core_web_md')\n# cache the computed tokens for the genres in the dataset\ncache = {genre:nlp(genre) for genre in q.GENRE.unique()}\n\ndef get_similarity(word1, word2):\n '''\n Returns a similarity score between two words\n '''\n tok1 = cache.get(word1, nlp(word1))\n tok2 = cache.get(word2, nlp(word2))\n return tok1.similarity(tok2)\n\ndef get_random_word():\n '''\n Returns a random category label from the data\n '''\n random_word = q['GENRE'].sample(1).iloc[0]\n return random_word\n\ndef get_closest_words(word, choices, n=1):\n '''\n Returns the n closest matches in the model vocab\n Parameters:\n word word to search\n choices available matches\n n number of results to return\n\n Returns:\n A list of n tuples in the form (word (str), similarity (float))\n '''\n app.logger.info(f'Finding closest words to \"{word}\"')\n if word in choices:\n # if the word is already in the list return the same word with 100% match\n return [(word, 1.0)]\n if word in nlp.vocab.strings:\n # if not in the list, find the closest words\n similarities = [(choice, get_similarity(word, choice)) for choice in choices]\n # sort, reverse, and return the top n (word,similarity) tuples\n return sorted(similarities, key=lambda x: x[1])[::-1][:n]\n else:\n app.logger.info(f'Not in model vocab: \"{word}\"')\n # if the requested label isn't in the model vocab, return a random genre\n return [(get_random_word(), 1.0), (word, 0.0)]\n\ndef find_matching_quote(genre, top_n=5):\n '''\n Returns a matching quote and up to 5 of the most similar genres with similarity measures\n Paramters:\n genre genre to match\n\n Returns:\n (str) Quote\n (str) Author\n (list) List of tuples in the form (word (str), simliarity (float))\n '''\n # find closest matches\n matched_genres = get_closest_words(genre, q.GENRE.unique(), top_n)\n # get the best one\n closest = matched_genres[0][0]\n app.logger.info(f'Finding quote for: \"{closest}\"')\n # get a quote from that genre\n matching_quote = q[q['GENRE']==closest].sample(1).iloc[0]\n quote = matching_quote.QUOTE\n author = matching_quote.AUTHOR\n # return the quote and the genres\n return quote, author, matched_genres\n",
"step-ids": [
3,
5,
6,
7,
8
]
}
|
[
3,
5,
6,
7,
8
] |
from azureml.core.compute import AksCompute
from azureml.core.model import Model, InferenceConfig
from azureml.core.webservice import AksWebservice
workspace_name = ""
subscription_id = "XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX"
resource_group = "XXXXXXXXXXXXXXXXX"
workspace_region = "eastus2"
https_cert = "XXXXX"
aks_name = "XXXXXXX"
aks_service_name = 'XXXXXXXXX'
ws = Workspace.create(name=workspace_name,
subscription_id=subscription_id,
resource_group=resource_group,
location=workspace_region,
exist_ok=True)
# Provision AKS cluster
prov_config = AksCompute.provisioning_configuration(vm_size="Standard_D14")
prov_config.enable_ssl(leaf_domain_label=https_cert)
# Create the cluster
aks_target = ComputeTarget.create(
workspace=ws, name=aks_name, provisioning_configuration=prov_config
)
inference_config = InferenceConfig(runtime="python",
entry_script="aml_app.py",
conda_file="myenv.yml",
extra_docker_file_steps='dockerfile'
)
aks_python_bot = AksWebservice.deploy_configuration(autoscale_enabled=False,
num_replicas=3,
cpu_cores=2,
memory_gb=4,
auth_enabled=False)
aks_service = Model.deploy(ws,
models=['aml_app.py'],
inference_config=inference_config,
deployment_config=aks_python_bot,
deployment_target=aks_target,
name=aks_service_name)
aks_service.wait_for_deployment(show_output=True)
print(aks_service.state)
|
normal
|
{
"blob_id": "2941ecde72325d46b5c3899d4b1a213daff67147",
"index": 2613,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprov_config.enable_ssl(leaf_domain_label=https_cert)\n<mask token>\naks_service.wait_for_deployment(show_output=True)\nprint(aks_service.state)\n",
"step-3": "<mask token>\nworkspace_name = ''\nsubscription_id = 'XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX'\nresource_group = 'XXXXXXXXXXXXXXXXX'\nworkspace_region = 'eastus2'\nhttps_cert = 'XXXXX'\naks_name = 'XXXXXXX'\naks_service_name = 'XXXXXXXXX'\nws = Workspace.create(name=workspace_name, subscription_id=subscription_id,\n resource_group=resource_group, location=workspace_region, exist_ok=True)\nprov_config = AksCompute.provisioning_configuration(vm_size='Standard_D14')\nprov_config.enable_ssl(leaf_domain_label=https_cert)\naks_target = ComputeTarget.create(workspace=ws, name=aks_name,\n provisioning_configuration=prov_config)\ninference_config = InferenceConfig(runtime='python', entry_script=\n 'aml_app.py', conda_file='myenv.yml', extra_docker_file_steps='dockerfile')\naks_python_bot = AksWebservice.deploy_configuration(autoscale_enabled=False,\n num_replicas=3, cpu_cores=2, memory_gb=4, auth_enabled=False)\naks_service = Model.deploy(ws, models=['aml_app.py'], inference_config=\n inference_config, deployment_config=aks_python_bot, deployment_target=\n aks_target, name=aks_service_name)\naks_service.wait_for_deployment(show_output=True)\nprint(aks_service.state)\n",
"step-4": "from azureml.core.compute import AksCompute\nfrom azureml.core.model import Model, InferenceConfig\nfrom azureml.core.webservice import AksWebservice\nworkspace_name = ''\nsubscription_id = 'XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX'\nresource_group = 'XXXXXXXXXXXXXXXXX'\nworkspace_region = 'eastus2'\nhttps_cert = 'XXXXX'\naks_name = 'XXXXXXX'\naks_service_name = 'XXXXXXXXX'\nws = Workspace.create(name=workspace_name, subscription_id=subscription_id,\n resource_group=resource_group, location=workspace_region, exist_ok=True)\nprov_config = AksCompute.provisioning_configuration(vm_size='Standard_D14')\nprov_config.enable_ssl(leaf_domain_label=https_cert)\naks_target = ComputeTarget.create(workspace=ws, name=aks_name,\n provisioning_configuration=prov_config)\ninference_config = InferenceConfig(runtime='python', entry_script=\n 'aml_app.py', conda_file='myenv.yml', extra_docker_file_steps='dockerfile')\naks_python_bot = AksWebservice.deploy_configuration(autoscale_enabled=False,\n num_replicas=3, cpu_cores=2, memory_gb=4, auth_enabled=False)\naks_service = Model.deploy(ws, models=['aml_app.py'], inference_config=\n inference_config, deployment_config=aks_python_bot, deployment_target=\n aks_target, name=aks_service_name)\naks_service.wait_for_deployment(show_output=True)\nprint(aks_service.state)\n",
"step-5": "from azureml.core.compute import AksCompute\nfrom azureml.core.model import Model, InferenceConfig\nfrom azureml.core.webservice import AksWebservice\n\nworkspace_name = \"\"\nsubscription_id = \"XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX\"\nresource_group = \"XXXXXXXXXXXXXXXXX\"\nworkspace_region = \"eastus2\"\nhttps_cert = \"XXXXX\"\naks_name = \"XXXXXXX\"\naks_service_name = 'XXXXXXXXX'\n\nws = Workspace.create(name=workspace_name,\n subscription_id=subscription_id,\n resource_group=resource_group,\n location=workspace_region,\n exist_ok=True)\n\n# Provision AKS cluster\nprov_config = AksCompute.provisioning_configuration(vm_size=\"Standard_D14\")\nprov_config.enable_ssl(leaf_domain_label=https_cert)\n# Create the cluster\naks_target = ComputeTarget.create(\n workspace=ws, name=aks_name, provisioning_configuration=prov_config\n)\n\ninference_config = InferenceConfig(runtime=\"python\",\n entry_script=\"aml_app.py\",\n conda_file=\"myenv.yml\",\n extra_docker_file_steps='dockerfile'\n )\n\n\naks_python_bot = AksWebservice.deploy_configuration(autoscale_enabled=False,\n num_replicas=3,\n cpu_cores=2,\n memory_gb=4,\n auth_enabled=False)\n\naks_service = Model.deploy(ws,\n models=['aml_app.py'],\n inference_config=inference_config,\n deployment_config=aks_python_bot,\n deployment_target=aks_target,\n name=aks_service_name)\n\naks_service.wait_for_deployment(show_output=True)\nprint(aks_service.state)\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
try:
in_str = input()
except Exception as e:
print('WRONG FORMAT!')
sys.exit(0)
<|reserved_special_token_0|>
try:
in_exp = eval(in_str)
except Exception as e:
print('WRONG FORMAT!')
sys.exit(0)
<|reserved_special_token_0|>
print(str(res).replace('**', '^'))
<|reserved_special_token_1|>
<|reserved_special_token_0|>
x = Symbol('x')
try:
in_str = input()
except Exception as e:
print('WRONG FORMAT!')
sys.exit(0)
in_str = in_str.replace('^', '**')
try:
in_exp = eval(in_str)
except Exception as e:
print('WRONG FORMAT!')
sys.exit(0)
res = diff(in_exp)
print(str(res).replace('**', '^'))
<|reserved_special_token_1|>
from sympy import *
import sys
x = Symbol('x')
try:
in_str = input()
except Exception as e:
print('WRONG FORMAT!')
sys.exit(0)
in_str = in_str.replace('^', '**')
try:
in_exp = eval(in_str)
except Exception as e:
print('WRONG FORMAT!')
sys.exit(0)
res = diff(in_exp)
print(str(res).replace('**', '^'))
<|reserved_special_token_1|>
from sympy import *
import sys
x = Symbol("x")
# EOF
try:
in_str = input()
except Exception as e:
print("WRONG FORMAT!") # Wrong Format!
sys.exit(0)
in_str = in_str.replace("^", "**") #change '^'into'**' for recognition
# wrong expression
try:
in_exp = eval(in_str) # turn str into expression
except Exception as e:
print("WRONG FORMAT!") # Wrong Format!
sys.exit(0)
res = diff(in_exp)
print(str(res).replace("**", "^"))
#res = diff(in_exp).subs(x,2)
#print(res)
|
flexible
|
{
"blob_id": "1634ae0e329b4f277fa96a870fbd19626c0ece81",
"index": 6516,
"step-1": "<mask token>\n",
"step-2": "<mask token>\ntry:\n in_str = input()\nexcept Exception as e:\n print('WRONG FORMAT!')\n sys.exit(0)\n<mask token>\ntry:\n in_exp = eval(in_str)\nexcept Exception as e:\n print('WRONG FORMAT!')\n sys.exit(0)\n<mask token>\nprint(str(res).replace('**', '^'))\n",
"step-3": "<mask token>\nx = Symbol('x')\ntry:\n in_str = input()\nexcept Exception as e:\n print('WRONG FORMAT!')\n sys.exit(0)\nin_str = in_str.replace('^', '**')\ntry:\n in_exp = eval(in_str)\nexcept Exception as e:\n print('WRONG FORMAT!')\n sys.exit(0)\nres = diff(in_exp)\nprint(str(res).replace('**', '^'))\n",
"step-4": "from sympy import *\nimport sys\nx = Symbol('x')\ntry:\n in_str = input()\nexcept Exception as e:\n print('WRONG FORMAT!')\n sys.exit(0)\nin_str = in_str.replace('^', '**')\ntry:\n in_exp = eval(in_str)\nexcept Exception as e:\n print('WRONG FORMAT!')\n sys.exit(0)\nres = diff(in_exp)\nprint(str(res).replace('**', '^'))\n",
"step-5": "from sympy import *\nimport sys\nx = Symbol(\"x\")\n# EOF\ntry:\n in_str = input()\nexcept Exception as e:\n print(\"WRONG FORMAT!\") # Wrong Format!\n sys.exit(0)\n\nin_str = in_str.replace(\"^\", \"**\") #change '^'into'**' for recognition\n\n# wrong expression\ntry:\n in_exp = eval(in_str) # turn str into expression\nexcept Exception as e:\n print(\"WRONG FORMAT!\") # Wrong Format!\n sys.exit(0)\n\nres = diff(in_exp)\nprint(str(res).replace(\"**\", \"^\"))\n#res = diff(in_exp).subs(x,2)\n#print(res)\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import sys
import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sea
import sklearn
import glob
import pydub
from pydub import AudioSegment
import time
import librosa
import noisereduce as nr
from scipy.io import wavfile
import IPython
import sounddevice as sd
from pysndfx import AudioEffectsChain
import python_speech_features
import sox
import math
#y,sr=librosa.load(r"C:\Users\pranj\OneDrive\Desktop\Project\72843_lonemonk_approx-800-laughter-only-1.wav")
my,sr=librosa.load(r"C:\Users\pranj\Downloads\IEMOCAP_full_release_withoutVideos\IEMOCAP_full_release\Session1\sentences\wav\Ses01F_impro01\Ses01F_impro01_F000.wav")
reduced_noise = nr.reduce_noise(audio_clip=my, noise_clip=my, verbose=True,prop_decrease=0.8)
print(IPython.display.Audio(data=my, rate=sr))
sd.play(my, sr)
status = sd.wait()
|
normal
|
{
"blob_id": "14bf4befdce4270b4514b4e643964182f9c49ff4",
"index": 8434,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(IPython.display.Audio(data=my, rate=sr))\nsd.play(my, sr)\n<mask token>\n",
"step-3": "<mask token>\nmy, sr = librosa.load(\n 'C:\\\\Users\\\\pranj\\\\Downloads\\\\IEMOCAP_full_release_withoutVideos\\\\IEMOCAP_full_release\\\\Session1\\\\sentences\\\\wav\\\\Ses01F_impro01\\\\Ses01F_impro01_F000.wav'\n )\nreduced_noise = nr.reduce_noise(audio_clip=my, noise_clip=my, verbose=True,\n prop_decrease=0.8)\nprint(IPython.display.Audio(data=my, rate=sr))\nsd.play(my, sr)\nstatus = sd.wait()\n",
"step-4": "import sys\nimport os\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sea\nimport sklearn\nimport glob\nimport pydub\nfrom pydub import AudioSegment\nimport time\nimport librosa\nimport noisereduce as nr\nfrom scipy.io import wavfile\nimport IPython\nimport sounddevice as sd\nfrom pysndfx import AudioEffectsChain\nimport python_speech_features\nimport sox\nimport math\nmy, sr = librosa.load(\n 'C:\\\\Users\\\\pranj\\\\Downloads\\\\IEMOCAP_full_release_withoutVideos\\\\IEMOCAP_full_release\\\\Session1\\\\sentences\\\\wav\\\\Ses01F_impro01\\\\Ses01F_impro01_F000.wav'\n )\nreduced_noise = nr.reduce_noise(audio_clip=my, noise_clip=my, verbose=True,\n prop_decrease=0.8)\nprint(IPython.display.Audio(data=my, rate=sr))\nsd.play(my, sr)\nstatus = sd.wait()\n",
"step-5": "import sys\nimport os\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sea\nimport sklearn \nimport glob\nimport pydub\nfrom pydub import AudioSegment\nimport time\nimport librosa\nimport noisereduce as nr\nfrom scipy.io import wavfile\nimport IPython\nimport sounddevice as sd\nfrom pysndfx import AudioEffectsChain\nimport python_speech_features\nimport sox\nimport math\n\n\n\n#y,sr=librosa.load(r\"C:\\Users\\pranj\\OneDrive\\Desktop\\Project\\72843_lonemonk_approx-800-laughter-only-1.wav\")\nmy,sr=librosa.load(r\"C:\\Users\\pranj\\Downloads\\IEMOCAP_full_release_withoutVideos\\IEMOCAP_full_release\\Session1\\sentences\\wav\\Ses01F_impro01\\Ses01F_impro01_F000.wav\")\nreduced_noise = nr.reduce_noise(audio_clip=my, noise_clip=my, verbose=True,prop_decrease=0.8)\nprint(IPython.display.Audio(data=my, rate=sr))\nsd.play(my, sr)\nstatus = sd.wait()",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
@app.route('/api/v1/users/<int:user_id>', methods=['GET'])
def get_user(user_id):
try:
user = User.query.filter_by(id=user_id).first()
return jsonify({'user': user.serialize})
except:
abort(404)
@app.route('/api/v1/users', methods=['POST'])
def create_user():
if not request.json or not 'firstName' or not 'lastName' in request.json:
abort(400)
user = User(request.get_json()['firstName'], request.get_json()['lastName']
)
db.session.add(user)
db.session.commit()
return jsonify({'user': user.serialize}), 201
@app.errorhandler(404)
def not_found(error):
return make_response(jsonify({'error': 'Not found'}), 404)
<|reserved_special_token_0|>
@app.errorhandler(405)
def not_found(error):
return make_response(jsonify({'error': 'Method Not Allowed'}), 405)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
app.config.from_object(app_config[os.getenv('FLASK_ENV', 'production')])
<|reserved_special_token_0|>
@app.route('/api/v1/users/<int:user_id>', methods=['GET'])
def get_user(user_id):
try:
user = User.query.filter_by(id=user_id).first()
return jsonify({'user': user.serialize})
except:
abort(404)
@app.route('/api/v1/users', methods=['POST'])
def create_user():
if not request.json or not 'firstName' or not 'lastName' in request.json:
abort(400)
user = User(request.get_json()['firstName'], request.get_json()['lastName']
)
db.session.add(user)
db.session.commit()
return jsonify({'user': user.serialize}), 201
@app.errorhandler(404)
def not_found(error):
return make_response(jsonify({'error': 'Not found'}), 404)
@app.errorhandler(400)
def not_found(error):
return make_response(jsonify({'error': 'Bad Request'}), 400)
@app.errorhandler(405)
def not_found(error):
return make_response(jsonify({'error': 'Method Not Allowed'}), 405)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
app = Flask(__name__)
app.config.from_object(app_config[os.getenv('FLASK_ENV', 'production')])
db = SQLAlchemy(app)
@app.route('/api/v1/users/<int:user_id>', methods=['GET'])
def get_user(user_id):
try:
user = User.query.filter_by(id=user_id).first()
return jsonify({'user': user.serialize})
except:
abort(404)
@app.route('/api/v1/users', methods=['POST'])
def create_user():
if not request.json or not 'firstName' or not 'lastName' in request.json:
abort(400)
user = User(request.get_json()['firstName'], request.get_json()['lastName']
)
db.session.add(user)
db.session.commit()
return jsonify({'user': user.serialize}), 201
@app.errorhandler(404)
def not_found(error):
return make_response(jsonify({'error': 'Not found'}), 404)
@app.errorhandler(400)
def not_found(error):
return make_response(jsonify({'error': 'Bad Request'}), 400)
@app.errorhandler(405)
def not_found(error):
return make_response(jsonify({'error': 'Method Not Allowed'}), 405)
<|reserved_special_token_1|>
import os
from flask import Flask, jsonify, request, abort, make_response
from flask_sqlalchemy import SQLAlchemy
from .models import User
from .config import app_config
app = Flask(__name__)
app.config.from_object(app_config[os.getenv('FLASK_ENV', 'production')])
db = SQLAlchemy(app)
@app.route('/api/v1/users/<int:user_id>', methods=['GET'])
def get_user(user_id):
try:
user = User.query.filter_by(id=user_id).first()
return jsonify({'user': user.serialize})
except:
abort(404)
@app.route('/api/v1/users', methods=['POST'])
def create_user():
if not request.json or not 'firstName' or not 'lastName' in request.json:
abort(400)
user = User(request.get_json()['firstName'], request.get_json()['lastName']
)
db.session.add(user)
db.session.commit()
return jsonify({'user': user.serialize}), 201
@app.errorhandler(404)
def not_found(error):
return make_response(jsonify({'error': 'Not found'}), 404)
@app.errorhandler(400)
def not_found(error):
return make_response(jsonify({'error': 'Bad Request'}), 400)
@app.errorhandler(405)
def not_found(error):
return make_response(jsonify({'error': 'Method Not Allowed'}), 405)
|
flexible
|
{
"blob_id": "f4519fa82ffc6bf945c7bb36d3761a708a06f641",
"index": 5933,
"step-1": "<mask token>\n\n\n@app.route('/api/v1/users/<int:user_id>', methods=['GET'])\ndef get_user(user_id):\n try:\n user = User.query.filter_by(id=user_id).first()\n return jsonify({'user': user.serialize})\n except:\n abort(404)\n\n\n@app.route('/api/v1/users', methods=['POST'])\ndef create_user():\n if not request.json or not 'firstName' or not 'lastName' in request.json:\n abort(400)\n user = User(request.get_json()['firstName'], request.get_json()['lastName']\n )\n db.session.add(user)\n db.session.commit()\n return jsonify({'user': user.serialize}), 201\n\n\n@app.errorhandler(404)\ndef not_found(error):\n return make_response(jsonify({'error': 'Not found'}), 404)\n\n\n<mask token>\n\n\n@app.errorhandler(405)\ndef not_found(error):\n return make_response(jsonify({'error': 'Method Not Allowed'}), 405)\n",
"step-2": "<mask token>\napp.config.from_object(app_config[os.getenv('FLASK_ENV', 'production')])\n<mask token>\n\n\n@app.route('/api/v1/users/<int:user_id>', methods=['GET'])\ndef get_user(user_id):\n try:\n user = User.query.filter_by(id=user_id).first()\n return jsonify({'user': user.serialize})\n except:\n abort(404)\n\n\n@app.route('/api/v1/users', methods=['POST'])\ndef create_user():\n if not request.json or not 'firstName' or not 'lastName' in request.json:\n abort(400)\n user = User(request.get_json()['firstName'], request.get_json()['lastName']\n )\n db.session.add(user)\n db.session.commit()\n return jsonify({'user': user.serialize}), 201\n\n\n@app.errorhandler(404)\ndef not_found(error):\n return make_response(jsonify({'error': 'Not found'}), 404)\n\n\n@app.errorhandler(400)\ndef not_found(error):\n return make_response(jsonify({'error': 'Bad Request'}), 400)\n\n\n@app.errorhandler(405)\ndef not_found(error):\n return make_response(jsonify({'error': 'Method Not Allowed'}), 405)\n",
"step-3": "<mask token>\napp = Flask(__name__)\napp.config.from_object(app_config[os.getenv('FLASK_ENV', 'production')])\ndb = SQLAlchemy(app)\n\n\n@app.route('/api/v1/users/<int:user_id>', methods=['GET'])\ndef get_user(user_id):\n try:\n user = User.query.filter_by(id=user_id).first()\n return jsonify({'user': user.serialize})\n except:\n abort(404)\n\n\n@app.route('/api/v1/users', methods=['POST'])\ndef create_user():\n if not request.json or not 'firstName' or not 'lastName' in request.json:\n abort(400)\n user = User(request.get_json()['firstName'], request.get_json()['lastName']\n )\n db.session.add(user)\n db.session.commit()\n return jsonify({'user': user.serialize}), 201\n\n\n@app.errorhandler(404)\ndef not_found(error):\n return make_response(jsonify({'error': 'Not found'}), 404)\n\n\n@app.errorhandler(400)\ndef not_found(error):\n return make_response(jsonify({'error': 'Bad Request'}), 400)\n\n\n@app.errorhandler(405)\ndef not_found(error):\n return make_response(jsonify({'error': 'Method Not Allowed'}), 405)\n",
"step-4": "import os\nfrom flask import Flask, jsonify, request, abort, make_response\nfrom flask_sqlalchemy import SQLAlchemy\nfrom .models import User\nfrom .config import app_config\napp = Flask(__name__)\napp.config.from_object(app_config[os.getenv('FLASK_ENV', 'production')])\ndb = SQLAlchemy(app)\n\n\n@app.route('/api/v1/users/<int:user_id>', methods=['GET'])\ndef get_user(user_id):\n try:\n user = User.query.filter_by(id=user_id).first()\n return jsonify({'user': user.serialize})\n except:\n abort(404)\n\n\n@app.route('/api/v1/users', methods=['POST'])\ndef create_user():\n if not request.json or not 'firstName' or not 'lastName' in request.json:\n abort(400)\n user = User(request.get_json()['firstName'], request.get_json()['lastName']\n )\n db.session.add(user)\n db.session.commit()\n return jsonify({'user': user.serialize}), 201\n\n\n@app.errorhandler(404)\ndef not_found(error):\n return make_response(jsonify({'error': 'Not found'}), 404)\n\n\n@app.errorhandler(400)\ndef not_found(error):\n return make_response(jsonify({'error': 'Bad Request'}), 400)\n\n\n@app.errorhandler(405)\ndef not_found(error):\n return make_response(jsonify({'error': 'Method Not Allowed'}), 405)\n",
"step-5": null,
"step-ids": [
4,
6,
7,
8
]
}
|
[
4,
6,
7,
8
] |
from mathmodule import *
import sys
print("Welcome to my basic \'Calculator\'")
print("Please choose your best option (+, -, *, /) ")
# user input part
while True:
try:
A = int(input("Now Enter your first Value="))
break
except:
print("Oops!", sys.exc_info()[0], "occurred.")
while True:
mathoparetor = input("Enter your Math oparetor=")
try:
if mathoparetor in ['+','-','*','/']:
break
else:
raise Exception
except:
print("Opp, Enter Math again")
while True:
try:
B = int(input("Now Enter your second Value="))
break
except:
print("Oops!", sys.exc_info()[0], "occurred.")
# programing for perform
if mathoparetor == '+':
print('The addition number is', add(A,B))
elif mathoparetor == '-':
print('The subtraction number is', sub(A,B))
elif mathoparetor == '*':
print('The multiaplication number is', mull(A,B))
elif mathoparetor == '/':
print('The division number is', divi(A,B))
|
normal
|
{
"blob_id": "1cca94040cdd8db9d98f587c62eff7c58eae7535",
"index": 6974,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(\"Welcome to my basic 'Calculator'\")\nprint('Please choose your best option (+, -, *, /) ')\nwhile True:\n try:\n A = int(input('Now Enter your first Value='))\n break\n except:\n print('Oops!', sys.exc_info()[0], 'occurred.')\nwhile True:\n mathoparetor = input('Enter your Math oparetor=')\n try:\n if mathoparetor in ['+', '-', '*', '/']:\n break\n else:\n raise Exception\n except:\n print('Opp, Enter Math again')\nwhile True:\n try:\n B = int(input('Now Enter your second Value='))\n break\n except:\n print('Oops!', sys.exc_info()[0], 'occurred.')\nif mathoparetor == '+':\n print('The addition number is', add(A, B))\nelif mathoparetor == '-':\n print('The subtraction number is', sub(A, B))\nelif mathoparetor == '*':\n print('The multiaplication number is', mull(A, B))\nelif mathoparetor == '/':\n print('The division number is', divi(A, B))\n",
"step-3": "from mathmodule import *\nimport sys\nprint(\"Welcome to my basic 'Calculator'\")\nprint('Please choose your best option (+, -, *, /) ')\nwhile True:\n try:\n A = int(input('Now Enter your first Value='))\n break\n except:\n print('Oops!', sys.exc_info()[0], 'occurred.')\nwhile True:\n mathoparetor = input('Enter your Math oparetor=')\n try:\n if mathoparetor in ['+', '-', '*', '/']:\n break\n else:\n raise Exception\n except:\n print('Opp, Enter Math again')\nwhile True:\n try:\n B = int(input('Now Enter your second Value='))\n break\n except:\n print('Oops!', sys.exc_info()[0], 'occurred.')\nif mathoparetor == '+':\n print('The addition number is', add(A, B))\nelif mathoparetor == '-':\n print('The subtraction number is', sub(A, B))\nelif mathoparetor == '*':\n print('The multiaplication number is', mull(A, B))\nelif mathoparetor == '/':\n print('The division number is', divi(A, B))\n",
"step-4": "from mathmodule import *\nimport sys\n\nprint(\"Welcome to my basic \\'Calculator\\'\")\n\nprint(\"Please choose your best option (+, -, *, /) \")\n\n# user input part \nwhile True:\n try:\n A = int(input(\"Now Enter your first Value=\"))\n break\n except:\n print(\"Oops!\", sys.exc_info()[0], \"occurred.\")\nwhile True:\n mathoparetor = input(\"Enter your Math oparetor=\")\n try:\n if mathoparetor in ['+','-','*','/']:\n break\n else:\n raise Exception\n except:\n print(\"Opp, Enter Math again\")\n\nwhile True:\n try:\n B = int(input(\"Now Enter your second Value=\"))\n break\n except:\n print(\"Oops!\", sys.exc_info()[0], \"occurred.\")\n\n\n\n# programing for perform\nif mathoparetor == '+':\n print('The addition number is', add(A,B))\n\nelif mathoparetor == '-':\n print('The subtraction number is', sub(A,B))\n\nelif mathoparetor == '*':\n print('The multiaplication number is', mull(A,B))\n\nelif mathoparetor == '/':\n print('The division number is', divi(A,B))",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
target = []
with open('IntegerArray.txt', 'r') as f:
target = f.readlines()
for x in range(len(target)):
target[x] = int(target[x])
def f(A):
if len(A) == 1:
return 0
else:
rightStart = len(A) // 2
leftArray = A[0:rightStart]
righArray = A[rightStart:]
B, b = count_and_sort(leftArray)
C, c = count_and_sort(righArray)
D, d = count_and_sort_split(B, C)
return b + c + d
def count_and_sort(A):
if len(A) == 1:
return A, 0
elif len(A) == 2:
if A[0] < A[1]:
return A, 0
else:
temp = A[0]
A[0] = A[1]
A[1] = temp
return A, 1
else:
rightStart = len(A) // 2
leftArray = A[0:rightStart]
righArray = A[rightStart:]
B, b = count_and_sort(leftArray)
C, c = count_and_sort(righArray)
D, d = count_and_sort_split(B, C)
return D, b + c + d
def count_and_sort_split(B, C):
result = []
nums = 0
i = 0
j = 0
while i < len(B) or j < len(C):
if i >= len(B):
result = result + C[j:]
break
elif j >= len(C):
result = result + B[i:]
break
if B[i] < C[j]:
result.append(B[i])
i += 1
elif B[i] > C[j]:
result.append(C[j])
nums = nums + len(B[i:])
j += 1
return result, nums
print(f(target))
|
normal
|
{
"blob_id": "b5611c668a40e1735c92d6d00867885023ad713f",
"index": 248,
"step-1": "<mask token>\n\n\ndef f(A):\n if len(A) == 1:\n return 0\n else:\n rightStart = len(A) // 2\n leftArray = A[0:rightStart]\n righArray = A[rightStart:]\n B, b = count_and_sort(leftArray)\n C, c = count_and_sort(righArray)\n D, d = count_and_sort_split(B, C)\n return b + c + d\n\n\n<mask token>\n\n\ndef count_and_sort_split(B, C):\n result = []\n nums = 0\n i = 0\n j = 0\n while i < len(B) or j < len(C):\n if i >= len(B):\n result = result + C[j:]\n break\n elif j >= len(C):\n result = result + B[i:]\n break\n if B[i] < C[j]:\n result.append(B[i])\n i += 1\n elif B[i] > C[j]:\n result.append(C[j])\n nums = nums + len(B[i:])\n j += 1\n return result, nums\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef f(A):\n if len(A) == 1:\n return 0\n else:\n rightStart = len(A) // 2\n leftArray = A[0:rightStart]\n righArray = A[rightStart:]\n B, b = count_and_sort(leftArray)\n C, c = count_and_sort(righArray)\n D, d = count_and_sort_split(B, C)\n return b + c + d\n\n\ndef count_and_sort(A):\n if len(A) == 1:\n return A, 0\n elif len(A) == 2:\n if A[0] < A[1]:\n return A, 0\n else:\n temp = A[0]\n A[0] = A[1]\n A[1] = temp\n return A, 1\n else:\n rightStart = len(A) // 2\n leftArray = A[0:rightStart]\n righArray = A[rightStart:]\n B, b = count_and_sort(leftArray)\n C, c = count_and_sort(righArray)\n D, d = count_and_sort_split(B, C)\n return D, b + c + d\n\n\ndef count_and_sort_split(B, C):\n result = []\n nums = 0\n i = 0\n j = 0\n while i < len(B) or j < len(C):\n if i >= len(B):\n result = result + C[j:]\n break\n elif j >= len(C):\n result = result + B[i:]\n break\n if B[i] < C[j]:\n result.append(B[i])\n i += 1\n elif B[i] > C[j]:\n result.append(C[j])\n nums = nums + len(B[i:])\n j += 1\n return result, nums\n\n\n<mask token>\n",
"step-3": "<mask token>\nwith open('IntegerArray.txt', 'r') as f:\n target = f.readlines()\nfor x in range(len(target)):\n target[x] = int(target[x])\n\n\ndef f(A):\n if len(A) == 1:\n return 0\n else:\n rightStart = len(A) // 2\n leftArray = A[0:rightStart]\n righArray = A[rightStart:]\n B, b = count_and_sort(leftArray)\n C, c = count_and_sort(righArray)\n D, d = count_and_sort_split(B, C)\n return b + c + d\n\n\ndef count_and_sort(A):\n if len(A) == 1:\n return A, 0\n elif len(A) == 2:\n if A[0] < A[1]:\n return A, 0\n else:\n temp = A[0]\n A[0] = A[1]\n A[1] = temp\n return A, 1\n else:\n rightStart = len(A) // 2\n leftArray = A[0:rightStart]\n righArray = A[rightStart:]\n B, b = count_and_sort(leftArray)\n C, c = count_and_sort(righArray)\n D, d = count_and_sort_split(B, C)\n return D, b + c + d\n\n\ndef count_and_sort_split(B, C):\n result = []\n nums = 0\n i = 0\n j = 0\n while i < len(B) or j < len(C):\n if i >= len(B):\n result = result + C[j:]\n break\n elif j >= len(C):\n result = result + B[i:]\n break\n if B[i] < C[j]:\n result.append(B[i])\n i += 1\n elif B[i] > C[j]:\n result.append(C[j])\n nums = nums + len(B[i:])\n j += 1\n return result, nums\n\n\nprint(f(target))\n",
"step-4": "target = []\nwith open('IntegerArray.txt', 'r') as f:\n target = f.readlines()\nfor x in range(len(target)):\n target[x] = int(target[x])\n\n\ndef f(A):\n if len(A) == 1:\n return 0\n else:\n rightStart = len(A) // 2\n leftArray = A[0:rightStart]\n righArray = A[rightStart:]\n B, b = count_and_sort(leftArray)\n C, c = count_and_sort(righArray)\n D, d = count_and_sort_split(B, C)\n return b + c + d\n\n\ndef count_and_sort(A):\n if len(A) == 1:\n return A, 0\n elif len(A) == 2:\n if A[0] < A[1]:\n return A, 0\n else:\n temp = A[0]\n A[0] = A[1]\n A[1] = temp\n return A, 1\n else:\n rightStart = len(A) // 2\n leftArray = A[0:rightStart]\n righArray = A[rightStart:]\n B, b = count_and_sort(leftArray)\n C, c = count_and_sort(righArray)\n D, d = count_and_sort_split(B, C)\n return D, b + c + d\n\n\ndef count_and_sort_split(B, C):\n result = []\n nums = 0\n i = 0\n j = 0\n while i < len(B) or j < len(C):\n if i >= len(B):\n result = result + C[j:]\n break\n elif j >= len(C):\n result = result + B[i:]\n break\n if B[i] < C[j]:\n result.append(B[i])\n i += 1\n elif B[i] > C[j]:\n result.append(C[j])\n nums = nums + len(B[i:])\n j += 1\n return result, nums\n\n\nprint(f(target))\n",
"step-5": null,
"step-ids": [
2,
3,
4,
5
]
}
|
[
2,
3,
4,
5
] |
<|reserved_special_token_0|>
class Widget2:
def setup(self, MainWindow, res):
self.widget = QWidget()
self.grid = QGridLayout()
self.results = QLineEdit()
self.results.setText(res)
row = 3
col = 0
self.cb = QComboBox()
self.cb.addItems(['Advance Mode', 'Normal Mode'])
self.grid.addWidget(self.cb, 0, 1, 1, 2)
self.grid.addWidget(self.results, 1, 0, 2, 4)
buttons = ['AC', 'DEL', 'SIN', 'COS', 7, 8, 9, 'MOD', 4, 5, 6,
'TAN', 1, 2, 3, 'LOG', 0, 'SQUARE', 'CBRT', 'x!']
for button in buttons:
if col > 3:
col = 0
row += 1
buttonObject = Button(button, self.results)
self.grid.addWidget(buttonObject.b, row, col, 1, 1)
col += 1
self.widget.setLayout(self.grid)
MainWindow.setCentralWidget(self.widget)
class MainWindow(QMainWindow):
def __init__(self):
super().__init__()
self.setWindowTitle('Calculator')
self.widget1 = Widget1()
self.widget2 = Widget2()
self.startWidget1('')
def startWidget1(self, res):
global Data
self.widget1.setup(self, res)
Data = self.widget1.results.text()
self.widget1.cb.currentIndexChanged.connect(self.selectionchange1)
self.show()
def startWidget2(self, res):
global Data
self.widget2.setup(self, res)
Data = self.widget2.results.text()
self.widget2.cb.currentIndexChanged.connect(self.selectionchange2)
self.show()
def selectionchange1(self, i):
global Data
res = Data
self.startWidget2(res)
def selectionchange2(self, i):
global Data
res = Data
self.startWidget1(res)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Widget1:
<|reserved_special_token_0|>
class Widget2:
def setup(self, MainWindow, res):
self.widget = QWidget()
self.grid = QGridLayout()
self.results = QLineEdit()
self.results.setText(res)
row = 3
col = 0
self.cb = QComboBox()
self.cb.addItems(['Advance Mode', 'Normal Mode'])
self.grid.addWidget(self.cb, 0, 1, 1, 2)
self.grid.addWidget(self.results, 1, 0, 2, 4)
buttons = ['AC', 'DEL', 'SIN', 'COS', 7, 8, 9, 'MOD', 4, 5, 6,
'TAN', 1, 2, 3, 'LOG', 0, 'SQUARE', 'CBRT', 'x!']
for button in buttons:
if col > 3:
col = 0
row += 1
buttonObject = Button(button, self.results)
self.grid.addWidget(buttonObject.b, row, col, 1, 1)
col += 1
self.widget.setLayout(self.grid)
MainWindow.setCentralWidget(self.widget)
class MainWindow(QMainWindow):
def __init__(self):
super().__init__()
self.setWindowTitle('Calculator')
self.widget1 = Widget1()
self.widget2 = Widget2()
self.startWidget1('')
def startWidget1(self, res):
global Data
self.widget1.setup(self, res)
Data = self.widget1.results.text()
self.widget1.cb.currentIndexChanged.connect(self.selectionchange1)
self.show()
def startWidget2(self, res):
global Data
self.widget2.setup(self, res)
Data = self.widget2.results.text()
self.widget2.cb.currentIndexChanged.connect(self.selectionchange2)
self.show()
def selectionchange1(self, i):
global Data
res = Data
self.startWidget2(res)
def selectionchange2(self, i):
global Data
res = Data
self.startWidget1(res)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Button:
<|reserved_special_token_0|>
<|reserved_special_token_0|>
class Widget1:
def setup(self, MainWindow, res):
self.widget = QWidget()
self.grid = QGridLayout()
self.results = QLineEdit()
self.results.setText(res)
row = 3
col = 0
self.cb = QComboBox()
self.cb.addItems(['Basic Mode', 'Advanced Mode'])
self.grid.addWidget(self.cb, 0, 1, 1, 2)
self.grid.addWidget(self.results, 1, 0, 2, 4)
buttons = ['AC', 'DEL', '√', '/', 7, 8, 9, '*', 4, 5, 6, '-', 1, 2,
3, '+', 0, '.', '=']
for button in buttons:
if col > 3:
col = 0
row += 1
buttonObject = Button(button, self.results)
if button == 0:
self.grid.addWidget(buttonObject.b, row, col, 1, 2)
col += 1
else:
self.grid.addWidget(buttonObject.b, row, col, 1, 1)
col += 1
self.widget.setLayout(self.grid)
MainWindow.setCentralWidget(self.widget)
class Widget2:
def setup(self, MainWindow, res):
self.widget = QWidget()
self.grid = QGridLayout()
self.results = QLineEdit()
self.results.setText(res)
row = 3
col = 0
self.cb = QComboBox()
self.cb.addItems(['Advance Mode', 'Normal Mode'])
self.grid.addWidget(self.cb, 0, 1, 1, 2)
self.grid.addWidget(self.results, 1, 0, 2, 4)
buttons = ['AC', 'DEL', 'SIN', 'COS', 7, 8, 9, 'MOD', 4, 5, 6,
'TAN', 1, 2, 3, 'LOG', 0, 'SQUARE', 'CBRT', 'x!']
for button in buttons:
if col > 3:
col = 0
row += 1
buttonObject = Button(button, self.results)
self.grid.addWidget(buttonObject.b, row, col, 1, 1)
col += 1
self.widget.setLayout(self.grid)
MainWindow.setCentralWidget(self.widget)
class MainWindow(QMainWindow):
def __init__(self):
super().__init__()
self.setWindowTitle('Calculator')
self.widget1 = Widget1()
self.widget2 = Widget2()
self.startWidget1('')
def startWidget1(self, res):
global Data
self.widget1.setup(self, res)
Data = self.widget1.results.text()
self.widget1.cb.currentIndexChanged.connect(self.selectionchange1)
self.show()
def startWidget2(self, res):
global Data
self.widget2.setup(self, res)
Data = self.widget2.results.text()
self.widget2.cb.currentIndexChanged.connect(self.selectionchange2)
self.show()
def selectionchange1(self, i):
global Data
res = Data
self.startWidget2(res)
def selectionchange2(self, i):
global Data
res = Data
self.startWidget1(res)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Button:
def __init__(self, text, results):
self.b = QPushButton(str(text))
self.text = text
self.results = results
self.b.clicked.connect(lambda : self.handleInput(self.text))
def handleInput(self, v):
global Data
try:
if self.results.text() == 'INVALID!':
self.results.setText('')
if self.results.text() != '':
if self.results.text()[-1] in ['*', '+', '-', '/'] and v in [
'-', '*', '+', '/', '√', 'CBRT', 'SIN', 'COS', 'LOG',
'MOD', 'TAN', 'MOD']:
return
elif v == 'CBRT':
self.results.setText(str(round(float(eval(self.results.
text())) ** (1 / 3), 4)))
elif v == 'MOD':
if '.' in self.results.text():
self.results.setText(str(abs(float(self.results.
text()))))
else:
self.results.setText(str(abs(int(self.results.text())))
)
elif v == 'LOG':
self.results.setText(str(math.log10(abs(float(eval(self
.results.text()))))))
elif v == 'SQUARE':
if '.' in self.results.text():
self.results.setText(str(float(self.results.text()) **
2))
else:
self.results.setText(str(int(self.results.text()) ** 2)
)
elif v == 'SIN':
self.results.setText(str(math.sin(float(eval(self.
results.text())))))
elif v == 'COS':
self.results.setText(str(math.cos(float(eval(self.
results.text())))))
elif v == 'TAN':
self.results.setText(str(math.tan(float(eval(self.
results.text())))))
elif v == 'x!':
if '.' in str(eval(self.results.text())):
self.results.setText('INVALID!')
else:
self.results.setText(str(math.factorial(abs(int(
eval(self.results.text()))))))
elif self.results.text()[-1] == '/' and v == 0:
return
elif v == '=':
if self.results.text()[-1] in ['*', '-', '.', '+', '/']:
return
res = eval(self.results.text())
self.results.setText(str(res))
elif v == 'AC':
self.results.setText('')
elif v == 'DEL':
self.results.setText(self.results.text()[:-1])
elif v == '√' and self.results.text() != '':
self.results.setText(str(float(self.results.text()) ** 0.5)
)
elif v == '√' and self.results.text() == '':
return
else:
current_value = self.results.text()
new_value = current_value + str(v)
self.results.setText(new_value)
elif type(v) == int:
current_value = self.results.text()
new_value = current_value + str(v)
self.results.setText(new_value)
except:
self.results.setText('INVALID!')
Data = self.results.text()
class Widget1:
def setup(self, MainWindow, res):
self.widget = QWidget()
self.grid = QGridLayout()
self.results = QLineEdit()
self.results.setText(res)
row = 3
col = 0
self.cb = QComboBox()
self.cb.addItems(['Basic Mode', 'Advanced Mode'])
self.grid.addWidget(self.cb, 0, 1, 1, 2)
self.grid.addWidget(self.results, 1, 0, 2, 4)
buttons = ['AC', 'DEL', '√', '/', 7, 8, 9, '*', 4, 5, 6, '-', 1, 2,
3, '+', 0, '.', '=']
for button in buttons:
if col > 3:
col = 0
row += 1
buttonObject = Button(button, self.results)
if button == 0:
self.grid.addWidget(buttonObject.b, row, col, 1, 2)
col += 1
else:
self.grid.addWidget(buttonObject.b, row, col, 1, 1)
col += 1
self.widget.setLayout(self.grid)
MainWindow.setCentralWidget(self.widget)
class Widget2:
def setup(self, MainWindow, res):
self.widget = QWidget()
self.grid = QGridLayout()
self.results = QLineEdit()
self.results.setText(res)
row = 3
col = 0
self.cb = QComboBox()
self.cb.addItems(['Advance Mode', 'Normal Mode'])
self.grid.addWidget(self.cb, 0, 1, 1, 2)
self.grid.addWidget(self.results, 1, 0, 2, 4)
buttons = ['AC', 'DEL', 'SIN', 'COS', 7, 8, 9, 'MOD', 4, 5, 6,
'TAN', 1, 2, 3, 'LOG', 0, 'SQUARE', 'CBRT', 'x!']
for button in buttons:
if col > 3:
col = 0
row += 1
buttonObject = Button(button, self.results)
self.grid.addWidget(buttonObject.b, row, col, 1, 1)
col += 1
self.widget.setLayout(self.grid)
MainWindow.setCentralWidget(self.widget)
class MainWindow(QMainWindow):
def __init__(self):
super().__init__()
self.setWindowTitle('Calculator')
self.widget1 = Widget1()
self.widget2 = Widget2()
self.startWidget1('')
def startWidget1(self, res):
global Data
self.widget1.setup(self, res)
Data = self.widget1.results.text()
self.widget1.cb.currentIndexChanged.connect(self.selectionchange1)
self.show()
def startWidget2(self, res):
global Data
self.widget2.setup(self, res)
Data = self.widget2.results.text()
self.widget2.cb.currentIndexChanged.connect(self.selectionchange2)
self.show()
def selectionchange1(self, i):
global Data
res = Data
self.startWidget2(res)
def selectionchange2(self, i):
global Data
res = Data
self.startWidget1(res)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
from PyQt5.QtWidgets import *
import sys
import math
Data = ''
class Button:
def __init__(self, text, results):
self.b = QPushButton(str(text))
self.text = text
self.results = results
self.b.clicked.connect(lambda: self.handleInput(
self.text)) # Important because we need to pass only function name with arguments here that is why we use lambda here
def handleInput(self, v):
global Data
try:
if self.results.text() == 'INVALID!':
self.results.setText("")
if self.results.text() != '':
if self.results.text()[-1] in ['*', '+', '-', '/'] and v in ['-', '*', '+', '/', '√', 'CBRT', "SIN",
"COS", "LOG", "MOD", "TAN", "MOD"]:
return
elif v == 'CBRT':
self.results.setText(str(round(float(eval(self.results.text())) ** (1 / 3), 4), ))
elif v == 'MOD':
if '.' in self.results.text():
self.results.setText(str(abs(float(self.results.text()))))
else:
self.results.setText(str(abs(int(self.results.text()))))
elif v == 'LOG':
self.results.setText(str(math.log10(abs(float(eval(self.results.text()))))))
elif v == 'SQUARE':
if '.' in self.results.text():
self.results.setText(str(float(self.results.text()) ** 2))
else:
self.results.setText(str(int(self.results.text()) ** 2))
elif v == "SIN":
self.results.setText(str(math.sin(float(eval(self.results.text())))))
elif v == "COS":
self.results.setText(str(math.cos(float(eval(self.results.text())))))
elif v == "TAN":
self.results.setText(str(math.tan(float(eval(self.results.text())))))
elif v == 'x!':
if '.' in str(eval(self.results.text())):
self.results.setText("INVALID!")
else:
self.results.setText(str(math.factorial(abs(int(eval(self.results.text()))))))
elif self.results.text()[-1] == '/' and v == 0:
return
elif v == "=":
if self.results.text()[-1] in ['*', '-', '.', '+', '/']:
return
res = eval(self.results.text())
self.results.setText(str(res))
elif v == "AC":
self.results.setText("")
elif v == "DEL":
self.results.setText(self.results.text()[:-1])
elif v == "√" and self.results.text() != '':
self.results.setText(str(float(self.results.text()) ** 0.5))
elif v == "√" and self.results.text() == '':
return
else:
current_value = self.results.text()
new_value = current_value + str(v)
self.results.setText(new_value)
else:
if type(v) == int:
current_value = self.results.text()
new_value = current_value + str(v)
self.results.setText(new_value)
except:
self.results.setText("INVALID!")
Data = self.results.text()
class Widget1():
def setup(self, MainWindow, res):
self.widget = QWidget()
self.grid = QGridLayout()
self.results = QLineEdit()
self.results.setText(res)
row = 3
col = 0
self.cb = QComboBox()
self.cb.addItems(["Basic Mode", "Advanced Mode"])
self.grid.addWidget(self.cb, 0, 1, 1, 2)
self.grid.addWidget(self.results, 1, 0, 2, 4)
buttons = ["AC", "DEL", "√", "/",
7, 8, 9, "*",
4, 5, 6, "-",
1, 2, 3, "+",
0, ".", "="]
for button in buttons:
if col > 3:
col = 0
row += 1
buttonObject = Button(button, self.results)
if button == 0:
self.grid.addWidget(buttonObject.b, row, col, 1, 2)
col += 1
else:
self.grid.addWidget(buttonObject.b, row, col, 1, 1)
col += 1
self.widget.setLayout(self.grid)
MainWindow.setCentralWidget(self.widget)
class Widget2():
def setup(self, MainWindow, res):
self.widget = QWidget()
self.grid = QGridLayout()
self.results = QLineEdit()
self.results.setText(res)
row = 3
col = 0
self.cb = QComboBox()
self.cb.addItems(["Advance Mode", "Normal Mode"])
self.grid.addWidget(self.cb, 0, 1, 1, 2)
self.grid.addWidget(self.results, 1, 0, 2, 4)
buttons = ["AC", "DEL", "SIN", "COS",
7, 8, 9, "MOD",
4, 5, 6, "TAN",
1, 2, 3, "LOG",
0, "SQUARE", "CBRT", 'x!']
for button in buttons:
if col > 3:
col = 0
row += 1
buttonObject = Button(button, self.results)
self.grid.addWidget(buttonObject.b, row, col, 1, 1)
col += 1
self.widget.setLayout(self.grid)
MainWindow.setCentralWidget(self.widget)
class MainWindow(QMainWindow):
def __init__(self):
super().__init__()
self.setWindowTitle("Calculator")
self.widget1 = Widget1()
self.widget2 = Widget2()
self.startWidget1("")
def startWidget1(self, res):
global Data
self.widget1.setup(self, res)
Data = self.widget1.results.text()
self.widget1.cb.currentIndexChanged.connect(self.selectionchange1)
self.show()
def startWidget2(self, res):
global Data
self.widget2.setup(self, res)
Data = self.widget2.results.text()
self.widget2.cb.currentIndexChanged.connect(self.selectionchange2)
self.show()
def selectionchange1(self, i):
global Data
res = Data
self.startWidget2(res)
def selectionchange2(self, i):
global Data
res = Data
self.startWidget1(res)
if __name__ == "__main__":
app = QApplication(sys.argv)
w = MainWindow()
sys.exit(app.exec_())
|
flexible
|
{
"blob_id": "b08cface601ee07125090f3ae03a3120974688f2",
"index": 8765,
"step-1": "<mask token>\n\n\nclass Widget2:\n\n def setup(self, MainWindow, res):\n self.widget = QWidget()\n self.grid = QGridLayout()\n self.results = QLineEdit()\n self.results.setText(res)\n row = 3\n col = 0\n self.cb = QComboBox()\n self.cb.addItems(['Advance Mode', 'Normal Mode'])\n self.grid.addWidget(self.cb, 0, 1, 1, 2)\n self.grid.addWidget(self.results, 1, 0, 2, 4)\n buttons = ['AC', 'DEL', 'SIN', 'COS', 7, 8, 9, 'MOD', 4, 5, 6,\n 'TAN', 1, 2, 3, 'LOG', 0, 'SQUARE', 'CBRT', 'x!']\n for button in buttons:\n if col > 3:\n col = 0\n row += 1\n buttonObject = Button(button, self.results)\n self.grid.addWidget(buttonObject.b, row, col, 1, 1)\n col += 1\n self.widget.setLayout(self.grid)\n MainWindow.setCentralWidget(self.widget)\n\n\nclass MainWindow(QMainWindow):\n\n def __init__(self):\n super().__init__()\n self.setWindowTitle('Calculator')\n self.widget1 = Widget1()\n self.widget2 = Widget2()\n self.startWidget1('')\n\n def startWidget1(self, res):\n global Data\n self.widget1.setup(self, res)\n Data = self.widget1.results.text()\n self.widget1.cb.currentIndexChanged.connect(self.selectionchange1)\n self.show()\n\n def startWidget2(self, res):\n global Data\n self.widget2.setup(self, res)\n Data = self.widget2.results.text()\n self.widget2.cb.currentIndexChanged.connect(self.selectionchange2)\n self.show()\n\n def selectionchange1(self, i):\n global Data\n res = Data\n self.startWidget2(res)\n\n def selectionchange2(self, i):\n global Data\n res = Data\n self.startWidget1(res)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Widget1:\n <mask token>\n\n\nclass Widget2:\n\n def setup(self, MainWindow, res):\n self.widget = QWidget()\n self.grid = QGridLayout()\n self.results = QLineEdit()\n self.results.setText(res)\n row = 3\n col = 0\n self.cb = QComboBox()\n self.cb.addItems(['Advance Mode', 'Normal Mode'])\n self.grid.addWidget(self.cb, 0, 1, 1, 2)\n self.grid.addWidget(self.results, 1, 0, 2, 4)\n buttons = ['AC', 'DEL', 'SIN', 'COS', 7, 8, 9, 'MOD', 4, 5, 6,\n 'TAN', 1, 2, 3, 'LOG', 0, 'SQUARE', 'CBRT', 'x!']\n for button in buttons:\n if col > 3:\n col = 0\n row += 1\n buttonObject = Button(button, self.results)\n self.grid.addWidget(buttonObject.b, row, col, 1, 1)\n col += 1\n self.widget.setLayout(self.grid)\n MainWindow.setCentralWidget(self.widget)\n\n\nclass MainWindow(QMainWindow):\n\n def __init__(self):\n super().__init__()\n self.setWindowTitle('Calculator')\n self.widget1 = Widget1()\n self.widget2 = Widget2()\n self.startWidget1('')\n\n def startWidget1(self, res):\n global Data\n self.widget1.setup(self, res)\n Data = self.widget1.results.text()\n self.widget1.cb.currentIndexChanged.connect(self.selectionchange1)\n self.show()\n\n def startWidget2(self, res):\n global Data\n self.widget2.setup(self, res)\n Data = self.widget2.results.text()\n self.widget2.cb.currentIndexChanged.connect(self.selectionchange2)\n self.show()\n\n def selectionchange1(self, i):\n global Data\n res = Data\n self.startWidget2(res)\n\n def selectionchange2(self, i):\n global Data\n res = Data\n self.startWidget1(res)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass Button:\n <mask token>\n <mask token>\n\n\nclass Widget1:\n\n def setup(self, MainWindow, res):\n self.widget = QWidget()\n self.grid = QGridLayout()\n self.results = QLineEdit()\n self.results.setText(res)\n row = 3\n col = 0\n self.cb = QComboBox()\n self.cb.addItems(['Basic Mode', 'Advanced Mode'])\n self.grid.addWidget(self.cb, 0, 1, 1, 2)\n self.grid.addWidget(self.results, 1, 0, 2, 4)\n buttons = ['AC', 'DEL', '√', '/', 7, 8, 9, '*', 4, 5, 6, '-', 1, 2,\n 3, '+', 0, '.', '=']\n for button in buttons:\n if col > 3:\n col = 0\n row += 1\n buttonObject = Button(button, self.results)\n if button == 0:\n self.grid.addWidget(buttonObject.b, row, col, 1, 2)\n col += 1\n else:\n self.grid.addWidget(buttonObject.b, row, col, 1, 1)\n col += 1\n self.widget.setLayout(self.grid)\n MainWindow.setCentralWidget(self.widget)\n\n\nclass Widget2:\n\n def setup(self, MainWindow, res):\n self.widget = QWidget()\n self.grid = QGridLayout()\n self.results = QLineEdit()\n self.results.setText(res)\n row = 3\n col = 0\n self.cb = QComboBox()\n self.cb.addItems(['Advance Mode', 'Normal Mode'])\n self.grid.addWidget(self.cb, 0, 1, 1, 2)\n self.grid.addWidget(self.results, 1, 0, 2, 4)\n buttons = ['AC', 'DEL', 'SIN', 'COS', 7, 8, 9, 'MOD', 4, 5, 6,\n 'TAN', 1, 2, 3, 'LOG', 0, 'SQUARE', 'CBRT', 'x!']\n for button in buttons:\n if col > 3:\n col = 0\n row += 1\n buttonObject = Button(button, self.results)\n self.grid.addWidget(buttonObject.b, row, col, 1, 1)\n col += 1\n self.widget.setLayout(self.grid)\n MainWindow.setCentralWidget(self.widget)\n\n\nclass MainWindow(QMainWindow):\n\n def __init__(self):\n super().__init__()\n self.setWindowTitle('Calculator')\n self.widget1 = Widget1()\n self.widget2 = Widget2()\n self.startWidget1('')\n\n def startWidget1(self, res):\n global Data\n self.widget1.setup(self, res)\n Data = self.widget1.results.text()\n self.widget1.cb.currentIndexChanged.connect(self.selectionchange1)\n self.show()\n\n def startWidget2(self, res):\n global Data\n self.widget2.setup(self, res)\n Data = self.widget2.results.text()\n self.widget2.cb.currentIndexChanged.connect(self.selectionchange2)\n self.show()\n\n def selectionchange1(self, i):\n global Data\n res = Data\n self.startWidget2(res)\n\n def selectionchange2(self, i):\n global Data\n res = Data\n self.startWidget1(res)\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\nclass Button:\n\n def __init__(self, text, results):\n self.b = QPushButton(str(text))\n self.text = text\n self.results = results\n self.b.clicked.connect(lambda : self.handleInput(self.text))\n\n def handleInput(self, v):\n global Data\n try:\n if self.results.text() == 'INVALID!':\n self.results.setText('')\n if self.results.text() != '':\n if self.results.text()[-1] in ['*', '+', '-', '/'] and v in [\n '-', '*', '+', '/', '√', 'CBRT', 'SIN', 'COS', 'LOG',\n 'MOD', 'TAN', 'MOD']:\n return\n elif v == 'CBRT':\n self.results.setText(str(round(float(eval(self.results.\n text())) ** (1 / 3), 4)))\n elif v == 'MOD':\n if '.' in self.results.text():\n self.results.setText(str(abs(float(self.results.\n text()))))\n else:\n self.results.setText(str(abs(int(self.results.text())))\n )\n elif v == 'LOG':\n self.results.setText(str(math.log10(abs(float(eval(self\n .results.text()))))))\n elif v == 'SQUARE':\n if '.' in self.results.text():\n self.results.setText(str(float(self.results.text()) **\n 2))\n else:\n self.results.setText(str(int(self.results.text()) ** 2)\n )\n elif v == 'SIN':\n self.results.setText(str(math.sin(float(eval(self.\n results.text())))))\n elif v == 'COS':\n self.results.setText(str(math.cos(float(eval(self.\n results.text())))))\n elif v == 'TAN':\n self.results.setText(str(math.tan(float(eval(self.\n results.text())))))\n elif v == 'x!':\n if '.' in str(eval(self.results.text())):\n self.results.setText('INVALID!')\n else:\n self.results.setText(str(math.factorial(abs(int(\n eval(self.results.text()))))))\n elif self.results.text()[-1] == '/' and v == 0:\n return\n elif v == '=':\n if self.results.text()[-1] in ['*', '-', '.', '+', '/']:\n return\n res = eval(self.results.text())\n self.results.setText(str(res))\n elif v == 'AC':\n self.results.setText('')\n elif v == 'DEL':\n self.results.setText(self.results.text()[:-1])\n elif v == '√' and self.results.text() != '':\n self.results.setText(str(float(self.results.text()) ** 0.5)\n )\n elif v == '√' and self.results.text() == '':\n return\n else:\n current_value = self.results.text()\n new_value = current_value + str(v)\n self.results.setText(new_value)\n elif type(v) == int:\n current_value = self.results.text()\n new_value = current_value + str(v)\n self.results.setText(new_value)\n except:\n self.results.setText('INVALID!')\n Data = self.results.text()\n\n\nclass Widget1:\n\n def setup(self, MainWindow, res):\n self.widget = QWidget()\n self.grid = QGridLayout()\n self.results = QLineEdit()\n self.results.setText(res)\n row = 3\n col = 0\n self.cb = QComboBox()\n self.cb.addItems(['Basic Mode', 'Advanced Mode'])\n self.grid.addWidget(self.cb, 0, 1, 1, 2)\n self.grid.addWidget(self.results, 1, 0, 2, 4)\n buttons = ['AC', 'DEL', '√', '/', 7, 8, 9, '*', 4, 5, 6, '-', 1, 2,\n 3, '+', 0, '.', '=']\n for button in buttons:\n if col > 3:\n col = 0\n row += 1\n buttonObject = Button(button, self.results)\n if button == 0:\n self.grid.addWidget(buttonObject.b, row, col, 1, 2)\n col += 1\n else:\n self.grid.addWidget(buttonObject.b, row, col, 1, 1)\n col += 1\n self.widget.setLayout(self.grid)\n MainWindow.setCentralWidget(self.widget)\n\n\nclass Widget2:\n\n def setup(self, MainWindow, res):\n self.widget = QWidget()\n self.grid = QGridLayout()\n self.results = QLineEdit()\n self.results.setText(res)\n row = 3\n col = 0\n self.cb = QComboBox()\n self.cb.addItems(['Advance Mode', 'Normal Mode'])\n self.grid.addWidget(self.cb, 0, 1, 1, 2)\n self.grid.addWidget(self.results, 1, 0, 2, 4)\n buttons = ['AC', 'DEL', 'SIN', 'COS', 7, 8, 9, 'MOD', 4, 5, 6,\n 'TAN', 1, 2, 3, 'LOG', 0, 'SQUARE', 'CBRT', 'x!']\n for button in buttons:\n if col > 3:\n col = 0\n row += 1\n buttonObject = Button(button, self.results)\n self.grid.addWidget(buttonObject.b, row, col, 1, 1)\n col += 1\n self.widget.setLayout(self.grid)\n MainWindow.setCentralWidget(self.widget)\n\n\nclass MainWindow(QMainWindow):\n\n def __init__(self):\n super().__init__()\n self.setWindowTitle('Calculator')\n self.widget1 = Widget1()\n self.widget2 = Widget2()\n self.startWidget1('')\n\n def startWidget1(self, res):\n global Data\n self.widget1.setup(self, res)\n Data = self.widget1.results.text()\n self.widget1.cb.currentIndexChanged.connect(self.selectionchange1)\n self.show()\n\n def startWidget2(self, res):\n global Data\n self.widget2.setup(self, res)\n Data = self.widget2.results.text()\n self.widget2.cb.currentIndexChanged.connect(self.selectionchange2)\n self.show()\n\n def selectionchange1(self, i):\n global Data\n res = Data\n self.startWidget2(res)\n\n def selectionchange2(self, i):\n global Data\n res = Data\n self.startWidget1(res)\n\n\n<mask token>\n",
"step-5": "from PyQt5.QtWidgets import *\nimport sys\nimport math\n\nData = ''\n\n\nclass Button:\n def __init__(self, text, results):\n self.b = QPushButton(str(text))\n self.text = text\n self.results = results\n self.b.clicked.connect(lambda: self.handleInput(\n self.text)) # Important because we need to pass only function name with arguments here that is why we use lambda here\n\n def handleInput(self, v):\n global Data\n try:\n if self.results.text() == 'INVALID!':\n self.results.setText(\"\")\n if self.results.text() != '':\n if self.results.text()[-1] in ['*', '+', '-', '/'] and v in ['-', '*', '+', '/', '√', 'CBRT', \"SIN\",\n \"COS\", \"LOG\", \"MOD\", \"TAN\", \"MOD\"]:\n return\n elif v == 'CBRT':\n self.results.setText(str(round(float(eval(self.results.text())) ** (1 / 3), 4), ))\n elif v == 'MOD':\n if '.' in self.results.text():\n self.results.setText(str(abs(float(self.results.text()))))\n else:\n self.results.setText(str(abs(int(self.results.text()))))\n elif v == 'LOG':\n self.results.setText(str(math.log10(abs(float(eval(self.results.text()))))))\n elif v == 'SQUARE':\n if '.' in self.results.text():\n self.results.setText(str(float(self.results.text()) ** 2))\n else:\n self.results.setText(str(int(self.results.text()) ** 2))\n elif v == \"SIN\":\n self.results.setText(str(math.sin(float(eval(self.results.text())))))\n elif v == \"COS\":\n self.results.setText(str(math.cos(float(eval(self.results.text())))))\n elif v == \"TAN\":\n self.results.setText(str(math.tan(float(eval(self.results.text())))))\n elif v == 'x!':\n if '.' in str(eval(self.results.text())):\n self.results.setText(\"INVALID!\")\n else:\n self.results.setText(str(math.factorial(abs(int(eval(self.results.text()))))))\n elif self.results.text()[-1] == '/' and v == 0:\n return\n elif v == \"=\":\n if self.results.text()[-1] in ['*', '-', '.', '+', '/']:\n return\n res = eval(self.results.text())\n self.results.setText(str(res))\n elif v == \"AC\":\n self.results.setText(\"\")\n elif v == \"DEL\":\n self.results.setText(self.results.text()[:-1])\n elif v == \"√\" and self.results.text() != '':\n self.results.setText(str(float(self.results.text()) ** 0.5))\n elif v == \"√\" and self.results.text() == '':\n return\n else:\n current_value = self.results.text()\n new_value = current_value + str(v)\n self.results.setText(new_value)\n else:\n if type(v) == int:\n current_value = self.results.text()\n new_value = current_value + str(v)\n self.results.setText(new_value)\n except:\n self.results.setText(\"INVALID!\")\n Data = self.results.text()\n\n\nclass Widget1():\n def setup(self, MainWindow, res):\n self.widget = QWidget()\n self.grid = QGridLayout()\n self.results = QLineEdit()\n self.results.setText(res)\n\n row = 3\n col = 0\n self.cb = QComboBox()\n self.cb.addItems([\"Basic Mode\", \"Advanced Mode\"])\n self.grid.addWidget(self.cb, 0, 1, 1, 2)\n self.grid.addWidget(self.results, 1, 0, 2, 4)\n buttons = [\"AC\", \"DEL\", \"√\", \"/\",\n 7, 8, 9, \"*\",\n 4, 5, 6, \"-\",\n 1, 2, 3, \"+\",\n 0, \".\", \"=\"]\n for button in buttons:\n if col > 3:\n col = 0\n row += 1\n\n buttonObject = Button(button, self.results)\n\n if button == 0:\n self.grid.addWidget(buttonObject.b, row, col, 1, 2)\n col += 1\n else:\n self.grid.addWidget(buttonObject.b, row, col, 1, 1)\n\n col += 1\n\n self.widget.setLayout(self.grid)\n MainWindow.setCentralWidget(self.widget)\n\n\nclass Widget2():\n def setup(self, MainWindow, res):\n self.widget = QWidget()\n self.grid = QGridLayout()\n self.results = QLineEdit()\n self.results.setText(res)\n\n row = 3\n col = 0\n self.cb = QComboBox()\n self.cb.addItems([\"Advance Mode\", \"Normal Mode\"])\n self.grid.addWidget(self.cb, 0, 1, 1, 2)\n self.grid.addWidget(self.results, 1, 0, 2, 4)\n buttons = [\"AC\", \"DEL\", \"SIN\", \"COS\",\n 7, 8, 9, \"MOD\",\n 4, 5, 6, \"TAN\",\n 1, 2, 3, \"LOG\",\n 0, \"SQUARE\", \"CBRT\", 'x!']\n for button in buttons:\n if col > 3:\n col = 0\n row += 1\n buttonObject = Button(button, self.results)\n\n self.grid.addWidget(buttonObject.b, row, col, 1, 1)\n\n col += 1\n\n self.widget.setLayout(self.grid)\n MainWindow.setCentralWidget(self.widget)\n\n\nclass MainWindow(QMainWindow):\n def __init__(self):\n super().__init__()\n self.setWindowTitle(\"Calculator\")\n self.widget1 = Widget1()\n self.widget2 = Widget2()\n self.startWidget1(\"\")\n\n def startWidget1(self, res):\n global Data\n self.widget1.setup(self, res)\n Data = self.widget1.results.text()\n self.widget1.cb.currentIndexChanged.connect(self.selectionchange1)\n self.show()\n\n def startWidget2(self, res):\n global Data\n self.widget2.setup(self, res)\n Data = self.widget2.results.text()\n self.widget2.cb.currentIndexChanged.connect(self.selectionchange2)\n self.show()\n\n def selectionchange1(self, i):\n global Data\n res = Data\n self.startWidget2(res)\n\n def selectionchange2(self, i):\n global Data\n res = Data\n self.startWidget1(res)\n\n\nif __name__ == \"__main__\":\n app = QApplication(sys.argv)\n w = MainWindow()\n sys.exit(app.exec_())\n\n\n\n\n",
"step-ids": [
8,
9,
11,
13,
17
]
}
|
[
8,
9,
11,
13,
17
] |
<|reserved_special_token_0|>
def fibonaci(n):
if n <= 1:
return n
F = np.empty(shape=n + 1)
F[0] = 0
F[1] = 1
for i in range(2, len(F)):
F[i] = F[i - 1] + F[i - 2]
return F[n]
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def fibonaci(n):
if n <= 1:
return n
F = np.empty(shape=n + 1)
F[0] = 0
F[1] = 1
for i in range(2, len(F)):
F[i] = F[i - 1] + F[i - 2]
return F[n]
<|reserved_special_token_0|>
print(int(fibonaci(n)))
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def fibonaci(n):
if n <= 1:
return n
F = np.empty(shape=n + 1)
F[0] = 0
F[1] = 1
for i in range(2, len(F)):
F[i] = F[i - 1] + F[i - 2]
return F[n]
n = int(input())
print(int(fibonaci(n)))
<|reserved_special_token_1|>
import numpy as np
def fibonaci(n):
if n <= 1:
return n
F = np.empty(shape=n + 1)
F[0] = 0
F[1] = 1
for i in range(2, len(F)):
F[i] = F[i - 1] + F[i - 2]
return F[n]
n = int(input())
print(int(fibonaci(n)))
<|reserved_special_token_1|>
# Uses python3
import numpy as np
def fibonaci(n):
if n <= 1:
return n
F = np.empty(shape=(n + 1))
F[0] = 0
F[1] = 1
for i in range(2, len(F)):
F[i] = F[i - 1] + F[i - 2]
return F[n]
n = int(input())
print(int(fibonaci(n)))
|
flexible
|
{
"blob_id": "67516551b595c02e70a0ba4005df8a97ba71b17e",
"index": 1419,
"step-1": "<mask token>\n\n\ndef fibonaci(n):\n if n <= 1:\n return n\n F = np.empty(shape=n + 1)\n F[0] = 0\n F[1] = 1\n for i in range(2, len(F)):\n F[i] = F[i - 1] + F[i - 2]\n return F[n]\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef fibonaci(n):\n if n <= 1:\n return n\n F = np.empty(shape=n + 1)\n F[0] = 0\n F[1] = 1\n for i in range(2, len(F)):\n F[i] = F[i - 1] + F[i - 2]\n return F[n]\n\n\n<mask token>\nprint(int(fibonaci(n)))\n",
"step-3": "<mask token>\n\n\ndef fibonaci(n):\n if n <= 1:\n return n\n F = np.empty(shape=n + 1)\n F[0] = 0\n F[1] = 1\n for i in range(2, len(F)):\n F[i] = F[i - 1] + F[i - 2]\n return F[n]\n\n\nn = int(input())\nprint(int(fibonaci(n)))\n",
"step-4": "import numpy as np\n\n\ndef fibonaci(n):\n if n <= 1:\n return n\n F = np.empty(shape=n + 1)\n F[0] = 0\n F[1] = 1\n for i in range(2, len(F)):\n F[i] = F[i - 1] + F[i - 2]\n return F[n]\n\n\nn = int(input())\nprint(int(fibonaci(n)))\n",
"step-5": "# Uses python3\nimport numpy as np\n\n\ndef fibonaci(n):\n if n <= 1:\n return n\n\n F = np.empty(shape=(n + 1))\n F[0] = 0\n F[1] = 1\n for i in range(2, len(F)):\n F[i] = F[i - 1] + F[i - 2]\n\n return F[n]\n\n\nn = int(input())\nprint(int(fibonaci(n)))\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def main():
ap = argparse.ArgumentParser()
ap.add_argument('-i', '--image', required=True, help='Path to the image')
args = vars(ap.parse_args())
image = cv2.imread(args['image'])
image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
blurred = cv2.GaussianBlur(image, (5, 5), 0)
cv2.imshow('Image', image)
T = mahotas.thresholding.otsu(blurred)
print("[INFO] Otsu's threshold {}".format(T))
thresh = image.copy()
thresh[thresh > T] = 255
thresh[thresh < 255] = 0
thresh = cv2.bitwise_not(thresh)
cv2.imshow('Otsu', thresh)
T = mahotas.thresholding.rc(blurred)
print('[INFO] Riddler-Calvard: {}'.format(T))
thresh = image.copy()
thresh[thresh > T] = 255
thresh[thresh < 255] = 0
thresh = cv2.bitwise_not(thresh)
cv2.imshow('Riddler-Calvard', thresh)
cv2.waitKey(0)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def main():
ap = argparse.ArgumentParser()
ap.add_argument('-i', '--image', required=True, help='Path to the image')
args = vars(ap.parse_args())
image = cv2.imread(args['image'])
image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
blurred = cv2.GaussianBlur(image, (5, 5), 0)
cv2.imshow('Image', image)
T = mahotas.thresholding.otsu(blurred)
print("[INFO] Otsu's threshold {}".format(T))
thresh = image.copy()
thresh[thresh > T] = 255
thresh[thresh < 255] = 0
thresh = cv2.bitwise_not(thresh)
cv2.imshow('Otsu', thresh)
T = mahotas.thresholding.rc(blurred)
print('[INFO] Riddler-Calvard: {}'.format(T))
thresh = image.copy()
thresh[thresh > T] = 255
thresh[thresh < 255] = 0
thresh = cv2.bitwise_not(thresh)
cv2.imshow('Riddler-Calvard', thresh)
cv2.waitKey(0)
if __name__ == '__main__':
main()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
import numpy as np
import argparse
import mahotas
import cv2
from numpy.matrixlib.defmatrix import matrix
def main():
ap = argparse.ArgumentParser()
ap.add_argument('-i', '--image', required=True, help='Path to the image')
args = vars(ap.parse_args())
image = cv2.imread(args['image'])
image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
blurred = cv2.GaussianBlur(image, (5, 5), 0)
cv2.imshow('Image', image)
T = mahotas.thresholding.otsu(blurred)
print("[INFO] Otsu's threshold {}".format(T))
thresh = image.copy()
thresh[thresh > T] = 255
thresh[thresh < 255] = 0
thresh = cv2.bitwise_not(thresh)
cv2.imshow('Otsu', thresh)
T = mahotas.thresholding.rc(blurred)
print('[INFO] Riddler-Calvard: {}'.format(T))
thresh = image.copy()
thresh[thresh > T] = 255
thresh[thresh < 255] = 0
thresh = cv2.bitwise_not(thresh)
cv2.imshow('Riddler-Calvard', thresh)
cv2.waitKey(0)
if __name__ == '__main__':
main()
<|reserved_special_token_1|>
#!/usr/bin/env python
"""
Otsu method for automatic estimation of $T$ threshold value
- assumes two maxima of grayscale histogram & searches for optimal separation
Parameters
Usage
Example
$ python <scriptname>.py --image ../img/<filename>.png
## Explain
"""
import numpy as np
import argparse
import mahotas
import cv2
from numpy.matrixlib.defmatrix import matrix
def main():
ap = argparse.ArgumentParser()
ap.add_argument("-i", "--image", required=True, help="Path to the image")
args = vars(ap.parse_args())
image = cv2.imread(args["image"])
#preprocessing
image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
blurred = cv2.GaussianBlur(image, (5,5), 0)
cv2.imshow("Image", image)
# Otsu
T = mahotas.thresholding.otsu(blurred)
print("[INFO] Otsu's threshold {}".format(T))
thresh = image.copy()
thresh[thresh > T] = 255
thresh[thresh < 255] = 0
thresh = cv2.bitwise_not(thresh)
cv2.imshow("Otsu", thresh)
# Riddler-Calvard
T = mahotas.thresholding.rc(blurred)
print("[INFO] Riddler-Calvard: {}".format(T))
thresh = image.copy()
thresh[thresh > T] = 255
thresh[thresh < 255] = 0
thresh = cv2.bitwise_not(thresh)
cv2.imshow("Riddler-Calvard", thresh)
cv2.waitKey(0)
if __name__=="__main__":
main()
|
flexible
|
{
"blob_id": "0547751af7bbac42351476dde591d13d40fb37eb",
"index": 7811,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef main():\n ap = argparse.ArgumentParser()\n ap.add_argument('-i', '--image', required=True, help='Path to the image')\n args = vars(ap.parse_args())\n image = cv2.imread(args['image'])\n image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n blurred = cv2.GaussianBlur(image, (5, 5), 0)\n cv2.imshow('Image', image)\n T = mahotas.thresholding.otsu(blurred)\n print(\"[INFO] Otsu's threshold {}\".format(T))\n thresh = image.copy()\n thresh[thresh > T] = 255\n thresh[thresh < 255] = 0\n thresh = cv2.bitwise_not(thresh)\n cv2.imshow('Otsu', thresh)\n T = mahotas.thresholding.rc(blurred)\n print('[INFO] Riddler-Calvard: {}'.format(T))\n thresh = image.copy()\n thresh[thresh > T] = 255\n thresh[thresh < 255] = 0\n thresh = cv2.bitwise_not(thresh)\n cv2.imshow('Riddler-Calvard', thresh)\n cv2.waitKey(0)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef main():\n ap = argparse.ArgumentParser()\n ap.add_argument('-i', '--image', required=True, help='Path to the image')\n args = vars(ap.parse_args())\n image = cv2.imread(args['image'])\n image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n blurred = cv2.GaussianBlur(image, (5, 5), 0)\n cv2.imshow('Image', image)\n T = mahotas.thresholding.otsu(blurred)\n print(\"[INFO] Otsu's threshold {}\".format(T))\n thresh = image.copy()\n thresh[thresh > T] = 255\n thresh[thresh < 255] = 0\n thresh = cv2.bitwise_not(thresh)\n cv2.imshow('Otsu', thresh)\n T = mahotas.thresholding.rc(blurred)\n print('[INFO] Riddler-Calvard: {}'.format(T))\n thresh = image.copy()\n thresh[thresh > T] = 255\n thresh[thresh < 255] = 0\n thresh = cv2.bitwise_not(thresh)\n cv2.imshow('Riddler-Calvard', thresh)\n cv2.waitKey(0)\n\n\nif __name__ == '__main__':\n main()\n",
"step-4": "<mask token>\nimport numpy as np\nimport argparse\nimport mahotas\nimport cv2\nfrom numpy.matrixlib.defmatrix import matrix\n\n\ndef main():\n ap = argparse.ArgumentParser()\n ap.add_argument('-i', '--image', required=True, help='Path to the image')\n args = vars(ap.parse_args())\n image = cv2.imread(args['image'])\n image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n blurred = cv2.GaussianBlur(image, (5, 5), 0)\n cv2.imshow('Image', image)\n T = mahotas.thresholding.otsu(blurred)\n print(\"[INFO] Otsu's threshold {}\".format(T))\n thresh = image.copy()\n thresh[thresh > T] = 255\n thresh[thresh < 255] = 0\n thresh = cv2.bitwise_not(thresh)\n cv2.imshow('Otsu', thresh)\n T = mahotas.thresholding.rc(blurred)\n print('[INFO] Riddler-Calvard: {}'.format(T))\n thresh = image.copy()\n thresh[thresh > T] = 255\n thresh[thresh < 255] = 0\n thresh = cv2.bitwise_not(thresh)\n cv2.imshow('Riddler-Calvard', thresh)\n cv2.waitKey(0)\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "#!/usr/bin/env python\n\"\"\"\nOtsu method for automatic estimation of $T$ threshold value\n - assumes two maxima of grayscale histogram & searches for optimal separation\n\nParameters\n\nUsage\n\nExample\n $ python <scriptname>.py --image ../img/<filename>.png\n\n## Explain\n\n\"\"\"\nimport numpy as np\nimport argparse\nimport mahotas\nimport cv2\nfrom numpy.matrixlib.defmatrix import matrix\n\ndef main():\n ap = argparse.ArgumentParser()\n ap.add_argument(\"-i\", \"--image\", required=True, help=\"Path to the image\")\n args = vars(ap.parse_args())\n\n image = cv2.imread(args[\"image\"])\n #preprocessing\n image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n blurred = cv2.GaussianBlur(image, (5,5), 0)\n cv2.imshow(\"Image\", image)\n\n # Otsu\n T = mahotas.thresholding.otsu(blurred)\n print(\"[INFO] Otsu's threshold {}\".format(T))\n\n thresh = image.copy()\n thresh[thresh > T] = 255\n thresh[thresh < 255] = 0\n thresh = cv2.bitwise_not(thresh)\n cv2.imshow(\"Otsu\", thresh)\n\n # Riddler-Calvard\n T = mahotas.thresholding.rc(blurred)\n print(\"[INFO] Riddler-Calvard: {}\".format(T))\n thresh = image.copy()\n thresh[thresh > T] = 255\n thresh[thresh < 255] = 0\n thresh = cv2.bitwise_not(thresh)\n cv2.imshow(\"Riddler-Calvard\", thresh)\n\n cv2.waitKey(0)\n\nif __name__==\"__main__\":\n main()",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from __future__ import absolute_import, division, print_function, unicode_literals
import os
from collections import defaultdict
from past.builtins import basestring
from pycolocstats.core.config import REF_COLL_GSUITES_PATH
__metaclass__ = type
class RefTrackCollectionRegistry(object):
PREBUILT = '__prebuilt__'
def __init__(self):
self._genome2TrackIndexReg = defaultdict(set)
self._trackIndex2CollectionReg = defaultdict(set)
self._allCollections = set()
if not os.path.exists(REF_COLL_GSUITES_PATH):
return
for root, dirs, files in os.walk(REF_COLL_GSUITES_PATH):
for fn in files:
trackIndex, genome, trackCollection = os.path.join(root, fn).split(os.sep)[-3:]
self._genome2TrackIndexReg[genome].add(trackIndex)
if not trackCollection.endswith('.gsuite'):
continue
trackCollection = trackCollection[:-7]
self._trackIndex2CollectionReg[trackIndex].add(trackCollection)
self._allCollections.add(trackCollection)
def getTrackCollectionList(self, genome):
if genome not in self._genome2TrackIndexReg:
return []
collStrList = []
for trackIndex in sorted(self._genome2TrackIndexReg[genome]):
for trackCollection in sorted(self._trackIndex2CollectionReg[trackIndex]):
collStrList.append('{}: {}'.format(trackIndex, trackCollection))
return collStrList
# Temporary solution. Should be refactored to not make use of setReferenceTrackFileNames()
# in Method classes.
@classmethod
def getTrackCollSpecFromCollStr(cls, collStr):
if collStr:
return [cls.PREBUILT] + collStr.split(': ')
else:
return [cls.PREBUILT]
def isPartOfTrackCollSpec(self, trackFile):
return isinstance(trackFile, basestring) and \
(trackFile == self.PREBUILT or
trackFile in self._trackIndex2CollectionReg or
trackFile in self._allCollections)
def isTrackCollSpec(self, trackFiles):
if not all(isinstance(trackFile, basestring) for trackFile in trackFiles):
return False
return (len(trackFiles) == 1 and
trackFiles[0] == self.PREBUILT) or \
(len(trackFiles) == 3 and
trackFiles[0] == self.PREBUILT and
trackFiles[1] in self._trackIndex2CollectionReg and
trackFiles[2] in self._allCollections)
@staticmethod
def getTrackIndexAndCollFromTrackCollSpec(trackFiles):
if len(trackFiles) == 3:
return trackFiles[1], trackFiles[2]
else:
return '', ''
refTrackCollRegistry = RefTrackCollectionRegistry()
|
normal
|
{
"blob_id": "9c2cc5b993f020b8a1c96ea4cd5c2fb2da44a251",
"index": 1534,
"step-1": "<mask token>\n\n\nclass RefTrackCollectionRegistry(object):\n <mask token>\n\n def __init__(self):\n self._genome2TrackIndexReg = defaultdict(set)\n self._trackIndex2CollectionReg = defaultdict(set)\n self._allCollections = set()\n if not os.path.exists(REF_COLL_GSUITES_PATH):\n return\n for root, dirs, files in os.walk(REF_COLL_GSUITES_PATH):\n for fn in files:\n trackIndex, genome, trackCollection = os.path.join(root, fn\n ).split(os.sep)[-3:]\n self._genome2TrackIndexReg[genome].add(trackIndex)\n if not trackCollection.endswith('.gsuite'):\n continue\n trackCollection = trackCollection[:-7]\n self._trackIndex2CollectionReg[trackIndex].add(trackCollection)\n self._allCollections.add(trackCollection)\n\n def getTrackCollectionList(self, genome):\n if genome not in self._genome2TrackIndexReg:\n return []\n collStrList = []\n for trackIndex in sorted(self._genome2TrackIndexReg[genome]):\n for trackCollection in sorted(self._trackIndex2CollectionReg[\n trackIndex]):\n collStrList.append('{}: {}'.format(trackIndex, trackCollection)\n )\n return collStrList\n\n @classmethod\n def getTrackCollSpecFromCollStr(cls, collStr):\n if collStr:\n return [cls.PREBUILT] + collStr.split(': ')\n else:\n return [cls.PREBUILT]\n\n def isPartOfTrackCollSpec(self, trackFile):\n return isinstance(trackFile, basestring) and (trackFile == self.\n PREBUILT or trackFile in self._trackIndex2CollectionReg or \n trackFile in self._allCollections)\n <mask token>\n\n @staticmethod\n def getTrackIndexAndCollFromTrackCollSpec(trackFiles):\n if len(trackFiles) == 3:\n return trackFiles[1], trackFiles[2]\n else:\n return '', ''\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass RefTrackCollectionRegistry(object):\n <mask token>\n\n def __init__(self):\n self._genome2TrackIndexReg = defaultdict(set)\n self._trackIndex2CollectionReg = defaultdict(set)\n self._allCollections = set()\n if not os.path.exists(REF_COLL_GSUITES_PATH):\n return\n for root, dirs, files in os.walk(REF_COLL_GSUITES_PATH):\n for fn in files:\n trackIndex, genome, trackCollection = os.path.join(root, fn\n ).split(os.sep)[-3:]\n self._genome2TrackIndexReg[genome].add(trackIndex)\n if not trackCollection.endswith('.gsuite'):\n continue\n trackCollection = trackCollection[:-7]\n self._trackIndex2CollectionReg[trackIndex].add(trackCollection)\n self._allCollections.add(trackCollection)\n\n def getTrackCollectionList(self, genome):\n if genome not in self._genome2TrackIndexReg:\n return []\n collStrList = []\n for trackIndex in sorted(self._genome2TrackIndexReg[genome]):\n for trackCollection in sorted(self._trackIndex2CollectionReg[\n trackIndex]):\n collStrList.append('{}: {}'.format(trackIndex, trackCollection)\n )\n return collStrList\n\n @classmethod\n def getTrackCollSpecFromCollStr(cls, collStr):\n if collStr:\n return [cls.PREBUILT] + collStr.split(': ')\n else:\n return [cls.PREBUILT]\n\n def isPartOfTrackCollSpec(self, trackFile):\n return isinstance(trackFile, basestring) and (trackFile == self.\n PREBUILT or trackFile in self._trackIndex2CollectionReg or \n trackFile in self._allCollections)\n\n def isTrackCollSpec(self, trackFiles):\n if not all(isinstance(trackFile, basestring) for trackFile in\n trackFiles):\n return False\n return len(trackFiles) == 1 and trackFiles[0] == self.PREBUILT or len(\n trackFiles) == 3 and trackFiles[0] == self.PREBUILT and trackFiles[\n 1] in self._trackIndex2CollectionReg and trackFiles[2\n ] in self._allCollections\n\n @staticmethod\n def getTrackIndexAndCollFromTrackCollSpec(trackFiles):\n if len(trackFiles) == 3:\n return trackFiles[1], trackFiles[2]\n else:\n return '', ''\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass RefTrackCollectionRegistry(object):\n PREBUILT = '__prebuilt__'\n\n def __init__(self):\n self._genome2TrackIndexReg = defaultdict(set)\n self._trackIndex2CollectionReg = defaultdict(set)\n self._allCollections = set()\n if not os.path.exists(REF_COLL_GSUITES_PATH):\n return\n for root, dirs, files in os.walk(REF_COLL_GSUITES_PATH):\n for fn in files:\n trackIndex, genome, trackCollection = os.path.join(root, fn\n ).split(os.sep)[-3:]\n self._genome2TrackIndexReg[genome].add(trackIndex)\n if not trackCollection.endswith('.gsuite'):\n continue\n trackCollection = trackCollection[:-7]\n self._trackIndex2CollectionReg[trackIndex].add(trackCollection)\n self._allCollections.add(trackCollection)\n\n def getTrackCollectionList(self, genome):\n if genome not in self._genome2TrackIndexReg:\n return []\n collStrList = []\n for trackIndex in sorted(self._genome2TrackIndexReg[genome]):\n for trackCollection in sorted(self._trackIndex2CollectionReg[\n trackIndex]):\n collStrList.append('{}: {}'.format(trackIndex, trackCollection)\n )\n return collStrList\n\n @classmethod\n def getTrackCollSpecFromCollStr(cls, collStr):\n if collStr:\n return [cls.PREBUILT] + collStr.split(': ')\n else:\n return [cls.PREBUILT]\n\n def isPartOfTrackCollSpec(self, trackFile):\n return isinstance(trackFile, basestring) and (trackFile == self.\n PREBUILT or trackFile in self._trackIndex2CollectionReg or \n trackFile in self._allCollections)\n\n def isTrackCollSpec(self, trackFiles):\n if not all(isinstance(trackFile, basestring) for trackFile in\n trackFiles):\n return False\n return len(trackFiles) == 1 and trackFiles[0] == self.PREBUILT or len(\n trackFiles) == 3 and trackFiles[0] == self.PREBUILT and trackFiles[\n 1] in self._trackIndex2CollectionReg and trackFiles[2\n ] in self._allCollections\n\n @staticmethod\n def getTrackIndexAndCollFromTrackCollSpec(trackFiles):\n if len(trackFiles) == 3:\n return trackFiles[1], trackFiles[2]\n else:\n return '', ''\n\n\n<mask token>\n",
"step-4": "from __future__ import absolute_import, division, print_function, unicode_literals\nimport os\nfrom collections import defaultdict\nfrom past.builtins import basestring\nfrom pycolocstats.core.config import REF_COLL_GSUITES_PATH\n__metaclass__ = type\n\n\nclass RefTrackCollectionRegistry(object):\n PREBUILT = '__prebuilt__'\n\n def __init__(self):\n self._genome2TrackIndexReg = defaultdict(set)\n self._trackIndex2CollectionReg = defaultdict(set)\n self._allCollections = set()\n if not os.path.exists(REF_COLL_GSUITES_PATH):\n return\n for root, dirs, files in os.walk(REF_COLL_GSUITES_PATH):\n for fn in files:\n trackIndex, genome, trackCollection = os.path.join(root, fn\n ).split(os.sep)[-3:]\n self._genome2TrackIndexReg[genome].add(trackIndex)\n if not trackCollection.endswith('.gsuite'):\n continue\n trackCollection = trackCollection[:-7]\n self._trackIndex2CollectionReg[trackIndex].add(trackCollection)\n self._allCollections.add(trackCollection)\n\n def getTrackCollectionList(self, genome):\n if genome not in self._genome2TrackIndexReg:\n return []\n collStrList = []\n for trackIndex in sorted(self._genome2TrackIndexReg[genome]):\n for trackCollection in sorted(self._trackIndex2CollectionReg[\n trackIndex]):\n collStrList.append('{}: {}'.format(trackIndex, trackCollection)\n )\n return collStrList\n\n @classmethod\n def getTrackCollSpecFromCollStr(cls, collStr):\n if collStr:\n return [cls.PREBUILT] + collStr.split(': ')\n else:\n return [cls.PREBUILT]\n\n def isPartOfTrackCollSpec(self, trackFile):\n return isinstance(trackFile, basestring) and (trackFile == self.\n PREBUILT or trackFile in self._trackIndex2CollectionReg or \n trackFile in self._allCollections)\n\n def isTrackCollSpec(self, trackFiles):\n if not all(isinstance(trackFile, basestring) for trackFile in\n trackFiles):\n return False\n return len(trackFiles) == 1 and trackFiles[0] == self.PREBUILT or len(\n trackFiles) == 3 and trackFiles[0] == self.PREBUILT and trackFiles[\n 1] in self._trackIndex2CollectionReg and trackFiles[2\n ] in self._allCollections\n\n @staticmethod\n def getTrackIndexAndCollFromTrackCollSpec(trackFiles):\n if len(trackFiles) == 3:\n return trackFiles[1], trackFiles[2]\n else:\n return '', ''\n\n\nrefTrackCollRegistry = RefTrackCollectionRegistry()\n",
"step-5": "from __future__ import absolute_import, division, print_function, unicode_literals\n\nimport os\n\nfrom collections import defaultdict\nfrom past.builtins import basestring\nfrom pycolocstats.core.config import REF_COLL_GSUITES_PATH\n\n__metaclass__ = type\n\n\nclass RefTrackCollectionRegistry(object):\n PREBUILT = '__prebuilt__'\n\n def __init__(self):\n self._genome2TrackIndexReg = defaultdict(set)\n self._trackIndex2CollectionReg = defaultdict(set)\n self._allCollections = set()\n\n if not os.path.exists(REF_COLL_GSUITES_PATH):\n return\n\n for root, dirs, files in os.walk(REF_COLL_GSUITES_PATH):\n for fn in files:\n trackIndex, genome, trackCollection = os.path.join(root, fn).split(os.sep)[-3:]\n self._genome2TrackIndexReg[genome].add(trackIndex)\n if not trackCollection.endswith('.gsuite'):\n continue\n trackCollection = trackCollection[:-7]\n self._trackIndex2CollectionReg[trackIndex].add(trackCollection)\n self._allCollections.add(trackCollection)\n\n def getTrackCollectionList(self, genome):\n if genome not in self._genome2TrackIndexReg:\n return []\n\n collStrList = []\n for trackIndex in sorted(self._genome2TrackIndexReg[genome]):\n for trackCollection in sorted(self._trackIndex2CollectionReg[trackIndex]):\n collStrList.append('{}: {}'.format(trackIndex, trackCollection))\n return collStrList\n\n # Temporary solution. Should be refactored to not make use of setReferenceTrackFileNames()\n # in Method classes.\n\n @classmethod\n def getTrackCollSpecFromCollStr(cls, collStr):\n if collStr:\n return [cls.PREBUILT] + collStr.split(': ')\n else:\n return [cls.PREBUILT]\n\n def isPartOfTrackCollSpec(self, trackFile):\n return isinstance(trackFile, basestring) and \\\n (trackFile == self.PREBUILT or\n trackFile in self._trackIndex2CollectionReg or\n trackFile in self._allCollections)\n\n def isTrackCollSpec(self, trackFiles):\n if not all(isinstance(trackFile, basestring) for trackFile in trackFiles):\n return False\n return (len(trackFiles) == 1 and\n trackFiles[0] == self.PREBUILT) or \\\n (len(trackFiles) == 3 and\n trackFiles[0] == self.PREBUILT and\n trackFiles[1] in self._trackIndex2CollectionReg and\n trackFiles[2] in self._allCollections)\n\n @staticmethod\n def getTrackIndexAndCollFromTrackCollSpec(trackFiles):\n if len(trackFiles) == 3:\n return trackFiles[1], trackFiles[2]\n else:\n return '', ''\n\n\nrefTrackCollRegistry = RefTrackCollectionRegistry()\n",
"step-ids": [
6,
7,
8,
10,
11
]
}
|
[
6,
7,
8,
10,
11
] |
from pyplasm import *
import random as r
def gen_windows(plan_grid, n, m, window_model):
return STRUCT([
T([1,2])([j,i])(
gen_cube_windows(plan_grid, window_model)(i, j, n, m))
for i in range(n)
for j in range(m)
if plan_grid[i][j]])
def gen_cube_windows(plan_grid, window_model):
w = window_model
hpcs = [CUBE(0.00001)]
def gen_cube0(i, j, n, m):
if j+1 == m or not plan_grid[i][j+1]:
hpcs.append(T([1, 2])([1, .5])(MAP([S2, S1, S3])(w)))
if j-1 < 0 or not plan_grid[i][j-1]:
hpcs.append(T(2)(.5)(MAP([S2, S1, S3])(w)))
if i+1 == n or not plan_grid[i+1][j]:
hpcs.append(T([1, 2])([.5, 1])(w))
if i-1 < 0 or not plan_grid[i-1][j]:
hpcs.append(T(1)(.5)(w))
return STRUCT(hpcs)
return gen_cube0
def gen_body(plan_grid, n, m):
c = CUBE(1)
return STRUCT([
T([1,2])([j,i])(c)
for i in range(n)
for j in range(m)
if plan_grid[i][j]])
def gen_house(
box,
plan_grid,
door_model,
window_model,
roof_model):
n = len(plan_grid)
m = len(plan_grid[0])
body = STRUCT([
gen_body(plan_grid, n, m),
T(3)(1),
roof_model])
l2s_scale = map(lambda x,y: x/y, SIZE([1,2,3])(body), box)
s2l_scale = [1/elem for elem in l2s_scale]
scaled_win = S([1,2,3])(l2s_scale)(window_model)
windows = gen_windows(plan_grid, n, m, scaled_win)
house = STRUCT([body, windows])
return TEXTURE(['wood.jpg',True, True, 300,300, r.random()*3.1415, .1,.1, 0,0])(
S([1,2,3])(s2l_scale)(house))
def l_shaped_house(box):
grid = [
[False, False, True],
[True, True, True]]
roof = MKPOL([
[
[ 2, 0, 0],
[2.5, 0, .5],
[ 3, 0, 0],
[ 3, 2, 0],
[ 0, 2, 0],
[ 0, 1.5, .5],
[ 0, 1, 0],
[ 2, 1, 0],
[2.5, 1.5, .5]
],
[
[3,2,1],
[9,2,3,4],
[5,6,9,4],
[7,6,5],
[7,8,9,6],
[9,8,1,2]
],
[1]])
window = T([1,2,3])([-.75, -.1, 1.2])(CUBOID([1.5, .2, 2]))
return gen_house(box, grid, None, window, roof)
def q_shaped_house(box):
grid = [
[True, True, True],
[True, True, True],
[True, False, False]]
roof = MKPOL([
[
[0,0,0], #1
[3,0,0], #2
[3,2,0], #3
[1,2,0], #4
[1,3,0], #5
[.5,3,.5], #6
[0,3,0], #7
[.5,.5,.5], #8
[2.5,.5,.5], #9
[2.5,1.5,.5], #10
[.5,1.5,.5] #11
],
[
[1,8,6,7],
[1,2,9,8],
[2,3,10,9],
[10,3,4,11],
[4,5,6,11],
[6,5,7],
[8,9,10,11]
],
[1]])
window = T([1,2,3])([-.75, -.1, 1.2])(CUBOID([1.5, .2, 2]))
return gen_house(box, grid, None, window, roof)
def rectangular_house(box):
grid = [
[True, True],
[True, True],
[True, True]]
roof = MKPOL([
[
[0,0,0], #1
[1,0,1], #2
[2,0,0], #3
[2,3,0], #4
[1,3,1], #5
[0,3,0] #6
],
[
[1,2,5,6],
[2,3,4,5],
[1,3,2],
[5,4,6]
],
[1]])
window = T([1,2,3])([-.75, -.1, 1.2])(CUBOID([1.5, .2, 2]))
return gen_house(box, grid, None, window, roof)
def squared_house(box):
grid = [
[True, True, True],
[True, True, True],
[True, True, True]]
roof = MKPOL([
[
[0,0,0], #1
[3,0,0], #2
[3,3,0], #3
[0,3,0], #4
[1.5,1.5,1] #5
],
[
[5,1,2],
[5,2,3],
[5,3,4],
[5,4,1]
],
[1]])
window = T([1,2,3])([-.75, -.1, 1.2])(CUBOID([1.5, .2, 2]))
return gen_house(box, grid, None, window, roof)
if __name__=='__main__':
VIEW(squared_house([15, 15, 8]))
|
normal
|
{
"blob_id": "cb48a1601798f72f9cf3759d3c13969bc824a0f6",
"index": 707,
"step-1": "<mask token>\n\n\ndef gen_windows(plan_grid, n, m, window_model):\n return STRUCT([T([1, 2])([j, i])(gen_cube_windows(plan_grid,\n window_model)(i, j, n, m)) for i in range(n) for j in range(m) if\n plan_grid[i][j]])\n\n\n<mask token>\n\n\ndef gen_body(plan_grid, n, m):\n c = CUBE(1)\n return STRUCT([T([1, 2])([j, i])(c) for i in range(n) for j in range(m) if\n plan_grid[i][j]])\n\n\n<mask token>\n\n\ndef q_shaped_house(box):\n grid = [[True, True, True], [True, True, True], [True, False, False]]\n roof = MKPOL([[[0, 0, 0], [3, 0, 0], [3, 2, 0], [1, 2, 0], [1, 3, 0], [\n 0.5, 3, 0.5], [0, 3, 0], [0.5, 0.5, 0.5], [2.5, 0.5, 0.5], [2.5, \n 1.5, 0.5], [0.5, 1.5, 0.5]], [[1, 8, 6, 7], [1, 2, 9, 8], [2, 3, 10,\n 9], [10, 3, 4, 11], [4, 5, 6, 11], [6, 5, 7], [8, 9, 10, 11]], [1]])\n window = T([1, 2, 3])([-0.75, -0.1, 1.2])(CUBOID([1.5, 0.2, 2]))\n return gen_house(box, grid, None, window, roof)\n\n\ndef rectangular_house(box):\n grid = [[True, True], [True, True], [True, True]]\n roof = MKPOL([[[0, 0, 0], [1, 0, 1], [2, 0, 0], [2, 3, 0], [1, 3, 1], [\n 0, 3, 0]], [[1, 2, 5, 6], [2, 3, 4, 5], [1, 3, 2], [5, 4, 6]], [1]])\n window = T([1, 2, 3])([-0.75, -0.1, 1.2])(CUBOID([1.5, 0.2, 2]))\n return gen_house(box, grid, None, window, roof)\n\n\ndef squared_house(box):\n grid = [[True, True, True], [True, True, True], [True, True, True]]\n roof = MKPOL([[[0, 0, 0], [3, 0, 0], [3, 3, 0], [0, 3, 0], [1.5, 1.5, 1\n ]], [[5, 1, 2], [5, 2, 3], [5, 3, 4], [5, 4, 1]], [1]])\n window = T([1, 2, 3])([-0.75, -0.1, 1.2])(CUBOID([1.5, 0.2, 2]))\n return gen_house(box, grid, None, window, roof)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef gen_windows(plan_grid, n, m, window_model):\n return STRUCT([T([1, 2])([j, i])(gen_cube_windows(plan_grid,\n window_model)(i, j, n, m)) for i in range(n) for j in range(m) if\n plan_grid[i][j]])\n\n\n<mask token>\n\n\ndef gen_body(plan_grid, n, m):\n c = CUBE(1)\n return STRUCT([T([1, 2])([j, i])(c) for i in range(n) for j in range(m) if\n plan_grid[i][j]])\n\n\ndef gen_house(box, plan_grid, door_model, window_model, roof_model):\n n = len(plan_grid)\n m = len(plan_grid[0])\n body = STRUCT([gen_body(plan_grid, n, m), T(3)(1), roof_model])\n l2s_scale = map(lambda x, y: x / y, SIZE([1, 2, 3])(body), box)\n s2l_scale = [(1 / elem) for elem in l2s_scale]\n scaled_win = S([1, 2, 3])(l2s_scale)(window_model)\n windows = gen_windows(plan_grid, n, m, scaled_win)\n house = STRUCT([body, windows])\n return TEXTURE(['wood.jpg', True, True, 300, 300, r.random() * 3.1415, \n 0.1, 0.1, 0, 0])(S([1, 2, 3])(s2l_scale)(house))\n\n\ndef l_shaped_house(box):\n grid = [[False, False, True], [True, True, True]]\n roof = MKPOL([[[2, 0, 0], [2.5, 0, 0.5], [3, 0, 0], [3, 2, 0], [0, 2, 0\n ], [0, 1.5, 0.5], [0, 1, 0], [2, 1, 0], [2.5, 1.5, 0.5]], [[3, 2, 1\n ], [9, 2, 3, 4], [5, 6, 9, 4], [7, 6, 5], [7, 8, 9, 6], [9, 8, 1, 2\n ]], [1]])\n window = T([1, 2, 3])([-0.75, -0.1, 1.2])(CUBOID([1.5, 0.2, 2]))\n return gen_house(box, grid, None, window, roof)\n\n\ndef q_shaped_house(box):\n grid = [[True, True, True], [True, True, True], [True, False, False]]\n roof = MKPOL([[[0, 0, 0], [3, 0, 0], [3, 2, 0], [1, 2, 0], [1, 3, 0], [\n 0.5, 3, 0.5], [0, 3, 0], [0.5, 0.5, 0.5], [2.5, 0.5, 0.5], [2.5, \n 1.5, 0.5], [0.5, 1.5, 0.5]], [[1, 8, 6, 7], [1, 2, 9, 8], [2, 3, 10,\n 9], [10, 3, 4, 11], [4, 5, 6, 11], [6, 5, 7], [8, 9, 10, 11]], [1]])\n window = T([1, 2, 3])([-0.75, -0.1, 1.2])(CUBOID([1.5, 0.2, 2]))\n return gen_house(box, grid, None, window, roof)\n\n\ndef rectangular_house(box):\n grid = [[True, True], [True, True], [True, True]]\n roof = MKPOL([[[0, 0, 0], [1, 0, 1], [2, 0, 0], [2, 3, 0], [1, 3, 1], [\n 0, 3, 0]], [[1, 2, 5, 6], [2, 3, 4, 5], [1, 3, 2], [5, 4, 6]], [1]])\n window = T([1, 2, 3])([-0.75, -0.1, 1.2])(CUBOID([1.5, 0.2, 2]))\n return gen_house(box, grid, None, window, roof)\n\n\ndef squared_house(box):\n grid = [[True, True, True], [True, True, True], [True, True, True]]\n roof = MKPOL([[[0, 0, 0], [3, 0, 0], [3, 3, 0], [0, 3, 0], [1.5, 1.5, 1\n ]], [[5, 1, 2], [5, 2, 3], [5, 3, 4], [5, 4, 1]], [1]])\n window = T([1, 2, 3])([-0.75, -0.1, 1.2])(CUBOID([1.5, 0.2, 2]))\n return gen_house(box, grid, None, window, roof)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef gen_windows(plan_grid, n, m, window_model):\n return STRUCT([T([1, 2])([j, i])(gen_cube_windows(plan_grid,\n window_model)(i, j, n, m)) for i in range(n) for j in range(m) if\n plan_grid[i][j]])\n\n\ndef gen_cube_windows(plan_grid, window_model):\n w = window_model\n hpcs = [CUBE(1e-05)]\n\n def gen_cube0(i, j, n, m):\n if j + 1 == m or not plan_grid[i][j + 1]:\n hpcs.append(T([1, 2])([1, 0.5])(MAP([S2, S1, S3])(w)))\n if j - 1 < 0 or not plan_grid[i][j - 1]:\n hpcs.append(T(2)(0.5)(MAP([S2, S1, S3])(w)))\n if i + 1 == n or not plan_grid[i + 1][j]:\n hpcs.append(T([1, 2])([0.5, 1])(w))\n if i - 1 < 0 or not plan_grid[i - 1][j]:\n hpcs.append(T(1)(0.5)(w))\n return STRUCT(hpcs)\n return gen_cube0\n\n\ndef gen_body(plan_grid, n, m):\n c = CUBE(1)\n return STRUCT([T([1, 2])([j, i])(c) for i in range(n) for j in range(m) if\n plan_grid[i][j]])\n\n\ndef gen_house(box, plan_grid, door_model, window_model, roof_model):\n n = len(plan_grid)\n m = len(plan_grid[0])\n body = STRUCT([gen_body(plan_grid, n, m), T(3)(1), roof_model])\n l2s_scale = map(lambda x, y: x / y, SIZE([1, 2, 3])(body), box)\n s2l_scale = [(1 / elem) for elem in l2s_scale]\n scaled_win = S([1, 2, 3])(l2s_scale)(window_model)\n windows = gen_windows(plan_grid, n, m, scaled_win)\n house = STRUCT([body, windows])\n return TEXTURE(['wood.jpg', True, True, 300, 300, r.random() * 3.1415, \n 0.1, 0.1, 0, 0])(S([1, 2, 3])(s2l_scale)(house))\n\n\ndef l_shaped_house(box):\n grid = [[False, False, True], [True, True, True]]\n roof = MKPOL([[[2, 0, 0], [2.5, 0, 0.5], [3, 0, 0], [3, 2, 0], [0, 2, 0\n ], [0, 1.5, 0.5], [0, 1, 0], [2, 1, 0], [2.5, 1.5, 0.5]], [[3, 2, 1\n ], [9, 2, 3, 4], [5, 6, 9, 4], [7, 6, 5], [7, 8, 9, 6], [9, 8, 1, 2\n ]], [1]])\n window = T([1, 2, 3])([-0.75, -0.1, 1.2])(CUBOID([1.5, 0.2, 2]))\n return gen_house(box, grid, None, window, roof)\n\n\ndef q_shaped_house(box):\n grid = [[True, True, True], [True, True, True], [True, False, False]]\n roof = MKPOL([[[0, 0, 0], [3, 0, 0], [3, 2, 0], [1, 2, 0], [1, 3, 0], [\n 0.5, 3, 0.5], [0, 3, 0], [0.5, 0.5, 0.5], [2.5, 0.5, 0.5], [2.5, \n 1.5, 0.5], [0.5, 1.5, 0.5]], [[1, 8, 6, 7], [1, 2, 9, 8], [2, 3, 10,\n 9], [10, 3, 4, 11], [4, 5, 6, 11], [6, 5, 7], [8, 9, 10, 11]], [1]])\n window = T([1, 2, 3])([-0.75, -0.1, 1.2])(CUBOID([1.5, 0.2, 2]))\n return gen_house(box, grid, None, window, roof)\n\n\ndef rectangular_house(box):\n grid = [[True, True], [True, True], [True, True]]\n roof = MKPOL([[[0, 0, 0], [1, 0, 1], [2, 0, 0], [2, 3, 0], [1, 3, 1], [\n 0, 3, 0]], [[1, 2, 5, 6], [2, 3, 4, 5], [1, 3, 2], [5, 4, 6]], [1]])\n window = T([1, 2, 3])([-0.75, -0.1, 1.2])(CUBOID([1.5, 0.2, 2]))\n return gen_house(box, grid, None, window, roof)\n\n\ndef squared_house(box):\n grid = [[True, True, True], [True, True, True], [True, True, True]]\n roof = MKPOL([[[0, 0, 0], [3, 0, 0], [3, 3, 0], [0, 3, 0], [1.5, 1.5, 1\n ]], [[5, 1, 2], [5, 2, 3], [5, 3, 4], [5, 4, 1]], [1]])\n window = T([1, 2, 3])([-0.75, -0.1, 1.2])(CUBOID([1.5, 0.2, 2]))\n return gen_house(box, grid, None, window, roof)\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\ndef gen_windows(plan_grid, n, m, window_model):\n return STRUCT([T([1, 2])([j, i])(gen_cube_windows(plan_grid,\n window_model)(i, j, n, m)) for i in range(n) for j in range(m) if\n plan_grid[i][j]])\n\n\ndef gen_cube_windows(plan_grid, window_model):\n w = window_model\n hpcs = [CUBE(1e-05)]\n\n def gen_cube0(i, j, n, m):\n if j + 1 == m or not plan_grid[i][j + 1]:\n hpcs.append(T([1, 2])([1, 0.5])(MAP([S2, S1, S3])(w)))\n if j - 1 < 0 or not plan_grid[i][j - 1]:\n hpcs.append(T(2)(0.5)(MAP([S2, S1, S3])(w)))\n if i + 1 == n or not plan_grid[i + 1][j]:\n hpcs.append(T([1, 2])([0.5, 1])(w))\n if i - 1 < 0 or not plan_grid[i - 1][j]:\n hpcs.append(T(1)(0.5)(w))\n return STRUCT(hpcs)\n return gen_cube0\n\n\ndef gen_body(plan_grid, n, m):\n c = CUBE(1)\n return STRUCT([T([1, 2])([j, i])(c) for i in range(n) for j in range(m) if\n plan_grid[i][j]])\n\n\ndef gen_house(box, plan_grid, door_model, window_model, roof_model):\n n = len(plan_grid)\n m = len(plan_grid[0])\n body = STRUCT([gen_body(plan_grid, n, m), T(3)(1), roof_model])\n l2s_scale = map(lambda x, y: x / y, SIZE([1, 2, 3])(body), box)\n s2l_scale = [(1 / elem) for elem in l2s_scale]\n scaled_win = S([1, 2, 3])(l2s_scale)(window_model)\n windows = gen_windows(plan_grid, n, m, scaled_win)\n house = STRUCT([body, windows])\n return TEXTURE(['wood.jpg', True, True, 300, 300, r.random() * 3.1415, \n 0.1, 0.1, 0, 0])(S([1, 2, 3])(s2l_scale)(house))\n\n\ndef l_shaped_house(box):\n grid = [[False, False, True], [True, True, True]]\n roof = MKPOL([[[2, 0, 0], [2.5, 0, 0.5], [3, 0, 0], [3, 2, 0], [0, 2, 0\n ], [0, 1.5, 0.5], [0, 1, 0], [2, 1, 0], [2.5, 1.5, 0.5]], [[3, 2, 1\n ], [9, 2, 3, 4], [5, 6, 9, 4], [7, 6, 5], [7, 8, 9, 6], [9, 8, 1, 2\n ]], [1]])\n window = T([1, 2, 3])([-0.75, -0.1, 1.2])(CUBOID([1.5, 0.2, 2]))\n return gen_house(box, grid, None, window, roof)\n\n\ndef q_shaped_house(box):\n grid = [[True, True, True], [True, True, True], [True, False, False]]\n roof = MKPOL([[[0, 0, 0], [3, 0, 0], [3, 2, 0], [1, 2, 0], [1, 3, 0], [\n 0.5, 3, 0.5], [0, 3, 0], [0.5, 0.5, 0.5], [2.5, 0.5, 0.5], [2.5, \n 1.5, 0.5], [0.5, 1.5, 0.5]], [[1, 8, 6, 7], [1, 2, 9, 8], [2, 3, 10,\n 9], [10, 3, 4, 11], [4, 5, 6, 11], [6, 5, 7], [8, 9, 10, 11]], [1]])\n window = T([1, 2, 3])([-0.75, -0.1, 1.2])(CUBOID([1.5, 0.2, 2]))\n return gen_house(box, grid, None, window, roof)\n\n\ndef rectangular_house(box):\n grid = [[True, True], [True, True], [True, True]]\n roof = MKPOL([[[0, 0, 0], [1, 0, 1], [2, 0, 0], [2, 3, 0], [1, 3, 1], [\n 0, 3, 0]], [[1, 2, 5, 6], [2, 3, 4, 5], [1, 3, 2], [5, 4, 6]], [1]])\n window = T([1, 2, 3])([-0.75, -0.1, 1.2])(CUBOID([1.5, 0.2, 2]))\n return gen_house(box, grid, None, window, roof)\n\n\ndef squared_house(box):\n grid = [[True, True, True], [True, True, True], [True, True, True]]\n roof = MKPOL([[[0, 0, 0], [3, 0, 0], [3, 3, 0], [0, 3, 0], [1.5, 1.5, 1\n ]], [[5, 1, 2], [5, 2, 3], [5, 3, 4], [5, 4, 1]], [1]])\n window = T([1, 2, 3])([-0.75, -0.1, 1.2])(CUBOID([1.5, 0.2, 2]))\n return gen_house(box, grid, None, window, roof)\n\n\nif __name__ == '__main__':\n VIEW(squared_house([15, 15, 8]))\n",
"step-5": "from pyplasm import *\nimport random as r\n\ndef gen_windows(plan_grid, n, m, window_model):\n return STRUCT([\n T([1,2])([j,i])(\n gen_cube_windows(plan_grid, window_model)(i, j, n, m))\n for i in range(n) \n for j in range(m) \n if plan_grid[i][j]])\n\ndef gen_cube_windows(plan_grid, window_model):\n w = window_model\n hpcs = [CUBE(0.00001)]\n \n def gen_cube0(i, j, n, m):\n if j+1 == m or not plan_grid[i][j+1]:\n hpcs.append(T([1, 2])([1, .5])(MAP([S2, S1, S3])(w)))\n \n if j-1 < 0 or not plan_grid[i][j-1]:\n hpcs.append(T(2)(.5)(MAP([S2, S1, S3])(w)))\n \n if i+1 == n or not plan_grid[i+1][j]:\n hpcs.append(T([1, 2])([.5, 1])(w))\n \n if i-1 < 0 or not plan_grid[i-1][j]:\n hpcs.append(T(1)(.5)(w))\n \n return STRUCT(hpcs)\n \n return gen_cube0\n \n\ndef gen_body(plan_grid, n, m):\n c = CUBE(1)\n return STRUCT([\n T([1,2])([j,i])(c)\n for i in range(n) \n for j in range(m) \n if plan_grid[i][j]])\n\n\ndef gen_house(\n box,\n plan_grid,\n door_model,\n window_model,\n roof_model):\n \n n = len(plan_grid)\n m = len(plan_grid[0])\n \n body = STRUCT([\n gen_body(plan_grid, n, m),\n T(3)(1),\n roof_model])\n \n l2s_scale = map(lambda x,y: x/y, SIZE([1,2,3])(body), box)\n s2l_scale = [1/elem for elem in l2s_scale]\n \n scaled_win = S([1,2,3])(l2s_scale)(window_model)\n \n windows = gen_windows(plan_grid, n, m, scaled_win)\n \n house = STRUCT([body, windows])\n \n return TEXTURE(['wood.jpg',True, True, 300,300, r.random()*3.1415, .1,.1, 0,0])(\n S([1,2,3])(s2l_scale)(house))\n\n\ndef l_shaped_house(box):\n \n grid = [\n [False, False, True],\n [True, True, True]]\n \n roof = MKPOL([\n [\n [ 2, 0, 0],\n [2.5, 0, .5],\n [ 3, 0, 0],\n [ 3, 2, 0],\n [ 0, 2, 0],\n [ 0, 1.5, .5],\n [ 0, 1, 0],\n [ 2, 1, 0],\n [2.5, 1.5, .5]\n ],\n [\n [3,2,1],\n [9,2,3,4],\n [5,6,9,4],\n [7,6,5],\n [7,8,9,6],\n [9,8,1,2]\n ],\n [1]])\n \n window = T([1,2,3])([-.75, -.1, 1.2])(CUBOID([1.5, .2, 2]))\n return gen_house(box, grid, None, window, roof)\n \ndef q_shaped_house(box):\n\n grid = [\n [True, True, True],\n [True, True, True],\n [True, False, False]]\n roof = MKPOL([\n [\n [0,0,0], #1\n [3,0,0], #2\n [3,2,0], #3\n [1,2,0], #4\n [1,3,0], #5\n [.5,3,.5], #6\n [0,3,0], #7\n [.5,.5,.5], #8\n [2.5,.5,.5], #9\n [2.5,1.5,.5], #10\n [.5,1.5,.5] #11\n ],\n [\n [1,8,6,7],\n [1,2,9,8],\n [2,3,10,9],\n [10,3,4,11],\n [4,5,6,11],\n [6,5,7],\n [8,9,10,11]\n ],\n [1]])\n \n window = T([1,2,3])([-.75, -.1, 1.2])(CUBOID([1.5, .2, 2]))\n return gen_house(box, grid, None, window, roof)\n\n\ndef rectangular_house(box):\n\n grid = [\n [True, True],\n [True, True],\n [True, True]]\n roof = MKPOL([\n [\n [0,0,0], #1\n [1,0,1], #2\n [2,0,0], #3\n [2,3,0], #4\n [1,3,1], #5\n [0,3,0] #6\n ],\n [\n [1,2,5,6],\n [2,3,4,5],\n [1,3,2],\n [5,4,6]\n ],\n [1]])\n \n window = T([1,2,3])([-.75, -.1, 1.2])(CUBOID([1.5, .2, 2]))\n return gen_house(box, grid, None, window, roof)\n\n\ndef squared_house(box):\n \n grid = [\n [True, True, True],\n [True, True, True],\n [True, True, True]]\n roof = MKPOL([\n [\n [0,0,0], #1\n [3,0,0], #2\n [3,3,0], #3\n [0,3,0], #4\n [1.5,1.5,1] #5\n ],\n [\n [5,1,2],\n [5,2,3],\n [5,3,4],\n [5,4,1]\n ],\n [1]])\n \n window = T([1,2,3])([-.75, -.1, 1.2])(CUBOID([1.5, .2, 2]))\n return gen_house(box, grid, None, window, roof)\n \n\nif __name__=='__main__':\n VIEW(squared_house([15, 15, 8]))\n\n",
"step-ids": [
5,
7,
8,
9,
11
]
}
|
[
5,
7,
8,
9,
11
] |
'''
Given an expression with numbers, brackets and operators. But in this task only brackets are important. Brackets can be one of three types -- "{}" "()" "[]". Brackets are determine the scope or restricted some expression. So each if was opened, then must be closed with the same type. The scopes of brackets must not intersected. You should to make a decision correct an expression or not. Don't care about operators and operands.
Input: An expression with different of types brackets.
Output: A boolean. Correct an expression or not.
Example:
?
1
2
3
4
5
checkio("((5+3)*2+1)") == True
checkio("{[(3+1)+2]+}") == True
checkio("(3+{1-1)}") == False
checkio("[1+1]+(2*2)-{3/3}") == True
checkio("(({[(((1)-2)+3)-3]/3}-3)") == False
'''
def checkio(data):
#replace this for solution
return True or False
|
normal
|
{
"blob_id": "f69b4d022ebed5a0b660f55704bbe762d5d765d5",
"index": 1332,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef checkio(data):\n return True or False\n",
"step-3": "'''\nGiven an expression with numbers, brackets and operators. But in this task only brackets are important. Brackets can be one of three types -- \"{}\" \"()\" \"[]\". Brackets are determine the scope or restricted some expression. So each if was opened, then must be closed with the same type. The scopes of brackets must not intersected. You should to make a decision correct an expression or not. Don't care about operators and operands.\nInput: An expression with different of types brackets.\nOutput: A boolean. Correct an expression or not.\nExample:\n?\n1\n2\n3\n4\n5\ncheckio(\"((5+3)*2+1)\") == True\ncheckio(\"{[(3+1)+2]+}\") == True\ncheckio(\"(3+{1-1)}\") == False\ncheckio(\"[1+1]+(2*2)-{3/3}\") == True\ncheckio(\"(({[(((1)-2)+3)-3]/3}-3)\") == False\n\n'''\ndef checkio(data):\n #replace this for solution\n return True or False",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
<|reserved_special_token_0|>
class Game:
<|reserved_special_token_0|>
def __init__(self, grid_size):
self.grid_size = grid_size
self.start_game(grid_size)
plt.title("Nate's Lame Game")
def start_game(self, grid_size):
self.score = 0
self.goal_pos = 0, 0
self.wall_pos = grid_size // 2, np.arange(5)
self.board = draw_board(grid_size, self.goal_pos, self.wall_pos)
self.player_pos = 9, 9
self.board[self.player_pos] = 0.5
def show_board(self):
plt.imshow(self.board)
def update_board(self, new_pos, show_plt=False):
if np.sum(np.abs(np.array(new_pos) - np.array(self.goal_pos))) == 1:
self.score += 100
self.board[self.player_pos] = 1
self.board[new_pos] = 0.5
self.player_pos = new_pos
if show_plt:
self.show_board()
if self.check_end():
print('Game over yo')
self.start_game(self.grid_size)
return True
return False
def get_actions(self):
x, y = self.player_pos
actions = [(x + 1, y), (x, y + 1), (x - 1, y), (x, y - 1)]
v_dim = self.board.shape[0]
valid = []
for a in actions:
if a[0] < v_dim and a[1] < v_dim and a[0] > -1 and a[1
] > -1 and self.board[a] != 10:
valid.append(a)
return valid
def check_end(self):
if self.player_pos == self.goal_pos:
print('game is finished')
self.score = 0
return True
else:
return False
def example(self):
"""
Illustrates how to play the game.
"""
while self.check_end() == False:
plt.pause(0.25)
end = self.update_board(random.choice(self.get_actions()), True)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Game:
"""
A class which implements the Gobble game. Initializes with a grid_size
and path_radius. There is an "example" method to illustrate how the
game is played.
"""
def __init__(self, grid_size):
self.grid_size = grid_size
self.start_game(grid_size)
plt.title("Nate's Lame Game")
def start_game(self, grid_size):
self.score = 0
self.goal_pos = 0, 0
self.wall_pos = grid_size // 2, np.arange(5)
self.board = draw_board(grid_size, self.goal_pos, self.wall_pos)
self.player_pos = 9, 9
self.board[self.player_pos] = 0.5
def show_board(self):
plt.imshow(self.board)
def update_board(self, new_pos, show_plt=False):
if np.sum(np.abs(np.array(new_pos) - np.array(self.goal_pos))) == 1:
self.score += 100
self.board[self.player_pos] = 1
self.board[new_pos] = 0.5
self.player_pos = new_pos
if show_plt:
self.show_board()
if self.check_end():
print('Game over yo')
self.start_game(self.grid_size)
return True
return False
def get_actions(self):
x, y = self.player_pos
actions = [(x + 1, y), (x, y + 1), (x - 1, y), (x, y - 1)]
v_dim = self.board.shape[0]
valid = []
for a in actions:
if a[0] < v_dim and a[1] < v_dim and a[0] > -1 and a[1
] > -1 and self.board[a] != 10:
valid.append(a)
return valid
def check_end(self):
if self.player_pos == self.goal_pos:
print('game is finished')
self.score = 0
return True
else:
return False
def example(self):
"""
Illustrates how to play the game.
"""
while self.check_end() == False:
plt.pause(0.25)
end = self.update_board(random.choice(self.get_actions()), True)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def draw_board(grid_size, hole_pos, wall_pos):
board = np.ones((grid_size, grid_size))
board[wall_pos] = 10
board[hole_pos] = 0
return board
class Game:
"""
A class which implements the Gobble game. Initializes with a grid_size
and path_radius. There is an "example" method to illustrate how the
game is played.
"""
def __init__(self, grid_size):
self.grid_size = grid_size
self.start_game(grid_size)
plt.title("Nate's Lame Game")
def start_game(self, grid_size):
self.score = 0
self.goal_pos = 0, 0
self.wall_pos = grid_size // 2, np.arange(5)
self.board = draw_board(grid_size, self.goal_pos, self.wall_pos)
self.player_pos = 9, 9
self.board[self.player_pos] = 0.5
def show_board(self):
plt.imshow(self.board)
def update_board(self, new_pos, show_plt=False):
if np.sum(np.abs(np.array(new_pos) - np.array(self.goal_pos))) == 1:
self.score += 100
self.board[self.player_pos] = 1
self.board[new_pos] = 0.5
self.player_pos = new_pos
if show_plt:
self.show_board()
if self.check_end():
print('Game over yo')
self.start_game(self.grid_size)
return True
return False
def get_actions(self):
x, y = self.player_pos
actions = [(x + 1, y), (x, y + 1), (x - 1, y), (x, y - 1)]
v_dim = self.board.shape[0]
valid = []
for a in actions:
if a[0] < v_dim and a[1] < v_dim and a[0] > -1 and a[1
] > -1 and self.board[a] != 10:
valid.append(a)
return valid
def check_end(self):
if self.player_pos == self.goal_pos:
print('game is finished')
self.score = 0
return True
else:
return False
def example(self):
"""
Illustrates how to play the game.
"""
while self.check_end() == False:
plt.pause(0.25)
end = self.update_board(random.choice(self.get_actions()), True)
<|reserved_special_token_1|>
import matplotlib.pyplot as plt
import numpy as np
import random
plt.ion()
def draw_board(grid_size, hole_pos, wall_pos):
board = np.ones((grid_size, grid_size))
board[wall_pos] = 10
board[hole_pos] = 0
return board
class Game:
"""
A class which implements the Gobble game. Initializes with a grid_size
and path_radius. There is an "example" method to illustrate how the
game is played.
"""
def __init__(self, grid_size):
self.grid_size = grid_size
self.start_game(grid_size)
plt.title("Nate's Lame Game")
def start_game(self, grid_size):
self.score = 0
self.goal_pos = 0, 0
self.wall_pos = grid_size // 2, np.arange(5)
self.board = draw_board(grid_size, self.goal_pos, self.wall_pos)
self.player_pos = 9, 9
self.board[self.player_pos] = 0.5
def show_board(self):
plt.imshow(self.board)
def update_board(self, new_pos, show_plt=False):
if np.sum(np.abs(np.array(new_pos) - np.array(self.goal_pos))) == 1:
self.score += 100
self.board[self.player_pos] = 1
self.board[new_pos] = 0.5
self.player_pos = new_pos
if show_plt:
self.show_board()
if self.check_end():
print('Game over yo')
self.start_game(self.grid_size)
return True
return False
def get_actions(self):
x, y = self.player_pos
actions = [(x + 1, y), (x, y + 1), (x - 1, y), (x, y - 1)]
v_dim = self.board.shape[0]
valid = []
for a in actions:
if a[0] < v_dim and a[1] < v_dim and a[0] > -1 and a[1
] > -1 and self.board[a] != 10:
valid.append(a)
return valid
def check_end(self):
if self.player_pos == self.goal_pos:
print('game is finished')
self.score = 0
return True
else:
return False
def example(self):
"""
Illustrates how to play the game.
"""
while self.check_end() == False:
plt.pause(0.25)
end = self.update_board(random.choice(self.get_actions()), True)
<|reserved_special_token_1|>
import matplotlib.pyplot as plt
import numpy as np
import random
plt.ion()
def draw_board(grid_size, hole_pos,wall_pos):
board = np.ones((grid_size,grid_size))
board[wall_pos] = 10
board[hole_pos] = 0
return board
class Game():
"""
A class which implements the Gobble game. Initializes with a grid_size
and path_radius. There is an "example" method to illustrate how the
game is played.
"""
def __init__(self, grid_size):
self.grid_size = grid_size
#self.player_pos = (np.random.randint(grid_size),np.random.randint(grid_size))
self.start_game(grid_size)
#self.show_board()
plt.title("Nate's Lame Game")
def start_game(self, grid_size):
self.score = 0
self.goal_pos = (0,0)
self.wall_pos = (grid_size//2,np.arange(5))
self.board = draw_board(grid_size, self.goal_pos, self.wall_pos)
self.player_pos = (9,9)
self.board[self.player_pos] = .5
# self.board[self.player_pos] = .5
def show_board(self):
plt.imshow(self.board)
def update_board(self, new_pos, show_plt=False):
# if np.sum(np.abs(np.array(new_pos) - np.array(self.goal_pos))) < np.sum(np.abs(np.array(self.player_pos) - np.array(self.goal_pos))):
# self.score += 1
# else:
# self.score -= 1
if np.sum(np.abs(np.array(new_pos) - np.array(self.goal_pos))) == 1:
self.score += 100
self.board[self.player_pos] = 1
self.board[new_pos] = .5
self.player_pos = new_pos
if show_plt:
self.show_board()
if self.check_end():
print('Game over yo')
self.start_game(self.grid_size)
return True
return False
def get_actions(self):
x,y = self.player_pos
actions = [(x+1,y), (x,y+1),
(x-1,y), (x,y-1)]
v_dim = self.board.shape[0]
valid = []
for a in actions:
if a[0] < v_dim and a[1] < v_dim and a[0] > -1 and a[1] > -1 and self.board[a] != 10:
valid.append(a)
return valid
def check_end(self):
if self.player_pos == self.goal_pos:
print('game is finished')
self.score = 0
return True
else:
return False
def example(self):
"""
Illustrates how to play the game.
"""
while self.check_end() == False:
plt.pause(0.25)
end = self.update_board(random.choice(self.get_actions()), True)
|
flexible
|
{
"blob_id": "a74f2050a057f579a8a8b77ac04ef09073cdb6cf",
"index": 6057,
"step-1": "<mask token>\n\n\nclass Game:\n <mask token>\n\n def __init__(self, grid_size):\n self.grid_size = grid_size\n self.start_game(grid_size)\n plt.title(\"Nate's Lame Game\")\n\n def start_game(self, grid_size):\n self.score = 0\n self.goal_pos = 0, 0\n self.wall_pos = grid_size // 2, np.arange(5)\n self.board = draw_board(grid_size, self.goal_pos, self.wall_pos)\n self.player_pos = 9, 9\n self.board[self.player_pos] = 0.5\n\n def show_board(self):\n plt.imshow(self.board)\n\n def update_board(self, new_pos, show_plt=False):\n if np.sum(np.abs(np.array(new_pos) - np.array(self.goal_pos))) == 1:\n self.score += 100\n self.board[self.player_pos] = 1\n self.board[new_pos] = 0.5\n self.player_pos = new_pos\n if show_plt:\n self.show_board()\n if self.check_end():\n print('Game over yo')\n self.start_game(self.grid_size)\n return True\n return False\n\n def get_actions(self):\n x, y = self.player_pos\n actions = [(x + 1, y), (x, y + 1), (x - 1, y), (x, y - 1)]\n v_dim = self.board.shape[0]\n valid = []\n for a in actions:\n if a[0] < v_dim and a[1] < v_dim and a[0] > -1 and a[1\n ] > -1 and self.board[a] != 10:\n valid.append(a)\n return valid\n\n def check_end(self):\n if self.player_pos == self.goal_pos:\n print('game is finished')\n self.score = 0\n return True\n else:\n return False\n\n def example(self):\n \"\"\"\n Illustrates how to play the game.\n \"\"\"\n while self.check_end() == False:\n plt.pause(0.25)\n end = self.update_board(random.choice(self.get_actions()), True)\n",
"step-2": "<mask token>\n\n\nclass Game:\n \"\"\"\n A class which implements the Gobble game. Initializes with a grid_size\n and path_radius. There is an \"example\" method to illustrate how the\n game is played.\n \"\"\"\n\n def __init__(self, grid_size):\n self.grid_size = grid_size\n self.start_game(grid_size)\n plt.title(\"Nate's Lame Game\")\n\n def start_game(self, grid_size):\n self.score = 0\n self.goal_pos = 0, 0\n self.wall_pos = grid_size // 2, np.arange(5)\n self.board = draw_board(grid_size, self.goal_pos, self.wall_pos)\n self.player_pos = 9, 9\n self.board[self.player_pos] = 0.5\n\n def show_board(self):\n plt.imshow(self.board)\n\n def update_board(self, new_pos, show_plt=False):\n if np.sum(np.abs(np.array(new_pos) - np.array(self.goal_pos))) == 1:\n self.score += 100\n self.board[self.player_pos] = 1\n self.board[new_pos] = 0.5\n self.player_pos = new_pos\n if show_plt:\n self.show_board()\n if self.check_end():\n print('Game over yo')\n self.start_game(self.grid_size)\n return True\n return False\n\n def get_actions(self):\n x, y = self.player_pos\n actions = [(x + 1, y), (x, y + 1), (x - 1, y), (x, y - 1)]\n v_dim = self.board.shape[0]\n valid = []\n for a in actions:\n if a[0] < v_dim and a[1] < v_dim and a[0] > -1 and a[1\n ] > -1 and self.board[a] != 10:\n valid.append(a)\n return valid\n\n def check_end(self):\n if self.player_pos == self.goal_pos:\n print('game is finished')\n self.score = 0\n return True\n else:\n return False\n\n def example(self):\n \"\"\"\n Illustrates how to play the game.\n \"\"\"\n while self.check_end() == False:\n plt.pause(0.25)\n end = self.update_board(random.choice(self.get_actions()), True)\n",
"step-3": "<mask token>\n\n\ndef draw_board(grid_size, hole_pos, wall_pos):\n board = np.ones((grid_size, grid_size))\n board[wall_pos] = 10\n board[hole_pos] = 0\n return board\n\n\nclass Game:\n \"\"\"\n A class which implements the Gobble game. Initializes with a grid_size\n and path_radius. There is an \"example\" method to illustrate how the\n game is played.\n \"\"\"\n\n def __init__(self, grid_size):\n self.grid_size = grid_size\n self.start_game(grid_size)\n plt.title(\"Nate's Lame Game\")\n\n def start_game(self, grid_size):\n self.score = 0\n self.goal_pos = 0, 0\n self.wall_pos = grid_size // 2, np.arange(5)\n self.board = draw_board(grid_size, self.goal_pos, self.wall_pos)\n self.player_pos = 9, 9\n self.board[self.player_pos] = 0.5\n\n def show_board(self):\n plt.imshow(self.board)\n\n def update_board(self, new_pos, show_plt=False):\n if np.sum(np.abs(np.array(new_pos) - np.array(self.goal_pos))) == 1:\n self.score += 100\n self.board[self.player_pos] = 1\n self.board[new_pos] = 0.5\n self.player_pos = new_pos\n if show_plt:\n self.show_board()\n if self.check_end():\n print('Game over yo')\n self.start_game(self.grid_size)\n return True\n return False\n\n def get_actions(self):\n x, y = self.player_pos\n actions = [(x + 1, y), (x, y + 1), (x - 1, y), (x, y - 1)]\n v_dim = self.board.shape[0]\n valid = []\n for a in actions:\n if a[0] < v_dim and a[1] < v_dim and a[0] > -1 and a[1\n ] > -1 and self.board[a] != 10:\n valid.append(a)\n return valid\n\n def check_end(self):\n if self.player_pos == self.goal_pos:\n print('game is finished')\n self.score = 0\n return True\n else:\n return False\n\n def example(self):\n \"\"\"\n Illustrates how to play the game.\n \"\"\"\n while self.check_end() == False:\n plt.pause(0.25)\n end = self.update_board(random.choice(self.get_actions()), True)\n",
"step-4": "import matplotlib.pyplot as plt\nimport numpy as np\nimport random\nplt.ion()\n\n\ndef draw_board(grid_size, hole_pos, wall_pos):\n board = np.ones((grid_size, grid_size))\n board[wall_pos] = 10\n board[hole_pos] = 0\n return board\n\n\nclass Game:\n \"\"\"\n A class which implements the Gobble game. Initializes with a grid_size\n and path_radius. There is an \"example\" method to illustrate how the\n game is played.\n \"\"\"\n\n def __init__(self, grid_size):\n self.grid_size = grid_size\n self.start_game(grid_size)\n plt.title(\"Nate's Lame Game\")\n\n def start_game(self, grid_size):\n self.score = 0\n self.goal_pos = 0, 0\n self.wall_pos = grid_size // 2, np.arange(5)\n self.board = draw_board(grid_size, self.goal_pos, self.wall_pos)\n self.player_pos = 9, 9\n self.board[self.player_pos] = 0.5\n\n def show_board(self):\n plt.imshow(self.board)\n\n def update_board(self, new_pos, show_plt=False):\n if np.sum(np.abs(np.array(new_pos) - np.array(self.goal_pos))) == 1:\n self.score += 100\n self.board[self.player_pos] = 1\n self.board[new_pos] = 0.5\n self.player_pos = new_pos\n if show_plt:\n self.show_board()\n if self.check_end():\n print('Game over yo')\n self.start_game(self.grid_size)\n return True\n return False\n\n def get_actions(self):\n x, y = self.player_pos\n actions = [(x + 1, y), (x, y + 1), (x - 1, y), (x, y - 1)]\n v_dim = self.board.shape[0]\n valid = []\n for a in actions:\n if a[0] < v_dim and a[1] < v_dim and a[0] > -1 and a[1\n ] > -1 and self.board[a] != 10:\n valid.append(a)\n return valid\n\n def check_end(self):\n if self.player_pos == self.goal_pos:\n print('game is finished')\n self.score = 0\n return True\n else:\n return False\n\n def example(self):\n \"\"\"\n Illustrates how to play the game.\n \"\"\"\n while self.check_end() == False:\n plt.pause(0.25)\n end = self.update_board(random.choice(self.get_actions()), True)\n",
"step-5": "import matplotlib.pyplot as plt\nimport numpy as np\nimport random\n\nplt.ion()\n\ndef draw_board(grid_size, hole_pos,wall_pos):\n board = np.ones((grid_size,grid_size))\n board[wall_pos] = 10\n board[hole_pos] = 0\n return board\n\nclass Game():\n \"\"\"\n A class which implements the Gobble game. Initializes with a grid_size\n and path_radius. There is an \"example\" method to illustrate how the\n game is played.\n \"\"\"\n def __init__(self, grid_size):\n self.grid_size = grid_size\n #self.player_pos = (np.random.randint(grid_size),np.random.randint(grid_size))\n self.start_game(grid_size)\n #self.show_board()\n plt.title(\"Nate's Lame Game\")\n\n def start_game(self, grid_size):\n self.score = 0\n self.goal_pos = (0,0)\n self.wall_pos = (grid_size//2,np.arange(5))\n self.board = draw_board(grid_size, self.goal_pos, self.wall_pos)\n self.player_pos = (9,9)\n self.board[self.player_pos] = .5\n \n # self.board[self.player_pos] = .5\n\n def show_board(self):\n plt.imshow(self.board)\n\n def update_board(self, new_pos, show_plt=False):\n # if np.sum(np.abs(np.array(new_pos) - np.array(self.goal_pos))) < np.sum(np.abs(np.array(self.player_pos) - np.array(self.goal_pos))):\n # self.score += 1\n # else:\n # self.score -= 1\n if np.sum(np.abs(np.array(new_pos) - np.array(self.goal_pos))) == 1:\n self.score += 100\n\n self.board[self.player_pos] = 1\n self.board[new_pos] = .5\n self.player_pos = new_pos\n\n if show_plt:\n self.show_board()\n if self.check_end():\n print('Game over yo')\n self.start_game(self.grid_size)\n return True\n\n\n return False\n\n def get_actions(self):\n x,y = self.player_pos\n actions = [(x+1,y), (x,y+1),\n (x-1,y), (x,y-1)]\n\n v_dim = self.board.shape[0]\n valid = []\n for a in actions:\n if a[0] < v_dim and a[1] < v_dim and a[0] > -1 and a[1] > -1 and self.board[a] != 10:\n valid.append(a)\n\n return valid\n\n\n def check_end(self):\n if self.player_pos == self.goal_pos:\n print('game is finished')\n self.score = 0\n return True\n else:\n return False\n\n def example(self):\n \"\"\"\n Illustrates how to play the game.\n \"\"\"\n while self.check_end() == False:\n plt.pause(0.25)\n end = self.update_board(random.choice(self.get_actions()), True)\n\n",
"step-ids": [
8,
9,
10,
12,
13
]
}
|
[
8,
9,
10,
12,
13
] |
<|reserved_special_token_0|>
class ApiException(Exception):
def __init__(self, message, code=400, data=None):
Exception.__init__(self, message)
self.code = code
self.msg = message
self.data = data
def __str__(self):
return self.msg
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class ApiException(Exception):
def __init__(self, message, code=400, data=None):
Exception.__init__(self, message)
self.code = code
self.msg = message
self.data = data
def __str__(self):
return self.msg
def to_dict(self):
res = dict(self.data or ())
res['msg'] = self.msg
res['code'] = self.code
return res
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class ApiException(Exception):
def __init__(self, message, code=400, data=None):
Exception.__init__(self, message)
self.code = code
self.msg = message
self.data = data
def __str__(self):
return self.msg
def to_dict(self):
res = dict(self.data or ())
res['msg'] = self.msg
res['code'] = self.code
return res
def error_handle(msg='', data=None):
service_logger.error(data={'msg': msg, 'data': data})
raise ApiException(msg)
<|reserved_special_token_1|>
from service import service_logger
from service.TaskService import TaskService
class ApiException(Exception):
def __init__(self, message, code=400, data=None):
Exception.__init__(self, message)
self.code = code
self.msg = message
self.data = data
def __str__(self):
return self.msg
def to_dict(self):
res = dict(self.data or ())
res['msg'] = self.msg
res['code'] = self.code
return res
def error_handle(msg='', data=None):
service_logger.error(data={'msg': msg, 'data': data})
raise ApiException(msg)
<|reserved_special_token_1|>
# _*_ coding: utf-8 _*_
from service import service_logger
from service.TaskService import TaskService
class ApiException(Exception):
def __init__(self, message, code=400, data=None):
Exception.__init__(self, message)
self.code = code
self.msg = message
self.data = data
def __str__(self):
return self.msg
def to_dict(self):
res = dict(self.data or ())
res['msg'] = self.msg
res['code'] = self.code
return res
def error_handle(msg='', data=None):
service_logger.error(data={"msg": msg, "data": data})
raise ApiException(msg)
|
flexible
|
{
"blob_id": "0ac14b023c51bfd1cf99bd2d991baa30a671e066",
"index": 9994,
"step-1": "<mask token>\n\n\nclass ApiException(Exception):\n\n def __init__(self, message, code=400, data=None):\n Exception.__init__(self, message)\n self.code = code\n self.msg = message\n self.data = data\n\n def __str__(self):\n return self.msg\n <mask token>\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass ApiException(Exception):\n\n def __init__(self, message, code=400, data=None):\n Exception.__init__(self, message)\n self.code = code\n self.msg = message\n self.data = data\n\n def __str__(self):\n return self.msg\n\n def to_dict(self):\n res = dict(self.data or ())\n res['msg'] = self.msg\n res['code'] = self.code\n return res\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass ApiException(Exception):\n\n def __init__(self, message, code=400, data=None):\n Exception.__init__(self, message)\n self.code = code\n self.msg = message\n self.data = data\n\n def __str__(self):\n return self.msg\n\n def to_dict(self):\n res = dict(self.data or ())\n res['msg'] = self.msg\n res['code'] = self.code\n return res\n\n\ndef error_handle(msg='', data=None):\n service_logger.error(data={'msg': msg, 'data': data})\n raise ApiException(msg)\n",
"step-4": "from service import service_logger\nfrom service.TaskService import TaskService\n\n\nclass ApiException(Exception):\n\n def __init__(self, message, code=400, data=None):\n Exception.__init__(self, message)\n self.code = code\n self.msg = message\n self.data = data\n\n def __str__(self):\n return self.msg\n\n def to_dict(self):\n res = dict(self.data or ())\n res['msg'] = self.msg\n res['code'] = self.code\n return res\n\n\ndef error_handle(msg='', data=None):\n service_logger.error(data={'msg': msg, 'data': data})\n raise ApiException(msg)\n",
"step-5": "# _*_ coding: utf-8 _*_\r\nfrom service import service_logger\r\nfrom service.TaskService import TaskService\r\n\r\nclass ApiException(Exception):\r\n\r\n def __init__(self, message, code=400, data=None):\r\n Exception.__init__(self, message)\r\n\r\n self.code = code\r\n self.msg = message\r\n self.data = data\r\n\r\n def __str__(self):\r\n return self.msg\r\n\r\n def to_dict(self):\r\n res = dict(self.data or ())\r\n res['msg'] = self.msg\r\n res['code'] = self.code\r\n\r\n return res\r\n\r\n\r\ndef error_handle(msg='', data=None):\r\n service_logger.error(data={\"msg\": msg, \"data\": data})\r\n raise ApiException(msg)",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
<|reserved_special_token_0|>
class EncyclopediaDao:
<|reserved_special_token_0|>
<|reserved_special_token_0|>
@staticmethod
def get_faq_content(query: str, page: str) ->list:
"""
获取指定query的faq检索内容
:param query:
:param page:
:return:
"""
url = 'https://zhidao.baidu.com/search?'
parm = {'lm': '0', 'rn': '5', 'pn': page, 'fr': 'search', 'ie':
'gbk', 'word': query}
reptile = Reptile()
page_content = reptile.get_page_content(url + '&'.join([(key + '=' +
parm[key]) for key in parm]), timeout=3, is_cookie=True,
charset='gbk')
bs = BeautifulSoup(page_content, 'html.parser')
content_list = bs.body.find_all('dl', {'class': 'dl'})
data = []
for item in content_list:
entry = {'create_date': item.find('dd', {'class':
'dd explain f-light'}).span.text, 'title': item.a.text,
'abstract': item.find('dd', {'class': 'dd answer'}).text,
'url': item.a.get('href')}
data.append(entry)
return data
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class EncyclopediaDao:
<|reserved_special_token_0|>
@staticmethod
def get_key_title(key: str) ->list:
"""
获取指定关键字的百科内容检索标题
:param key:
:return:
"""
url = 'https://zh.wikipedia.org/w/api.php?'
parm = {'action': 'opensearch', 'search': key, 'format': 'json',
'formatversion': '2'}
reptile = Reptile()
page_content = reptile.get_page_content(url + '&'.join([(key + '=' +
parm[key]) for key in parm]), timeout=3)
content_list = json.loads(page_content)[1]
data = []
prefix = 'https://zh.wikipedia.org/wiki/'
for index, item in enumerate(content_list):
entry = {'index': index, 'title': item, 'url': prefix + item}
data.append(entry)
return data
@staticmethod
def get_faq_content(query: str, page: str) ->list:
"""
获取指定query的faq检索内容
:param query:
:param page:
:return:
"""
url = 'https://zhidao.baidu.com/search?'
parm = {'lm': '0', 'rn': '5', 'pn': page, 'fr': 'search', 'ie':
'gbk', 'word': query}
reptile = Reptile()
page_content = reptile.get_page_content(url + '&'.join([(key + '=' +
parm[key]) for key in parm]), timeout=3, is_cookie=True,
charset='gbk')
bs = BeautifulSoup(page_content, 'html.parser')
content_list = bs.body.find_all('dl', {'class': 'dl'})
data = []
for item in content_list:
entry = {'create_date': item.find('dd', {'class':
'dd explain f-light'}).span.text, 'title': item.a.text,
'abstract': item.find('dd', {'class': 'dd answer'}).text,
'url': item.a.get('href')}
data.append(entry)
return data
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class EncyclopediaDao:
@staticmethod
def get_key_content(key: str) ->list:
"""
获取指定关键字的百科内容检索内容
:param key:
:return:
"""
url = 'https://zh.wikipedia.org/w/api.php?'
parm = {'action': 'query', 'list': 'search', 'srsearch': key,
'format': 'json', 'formatversion': '2'}
reptile = Reptile()
page_content = reptile.get_page_content(url + '&'.join([(key + '=' +
parm[key]) for key in parm]), timeout=3)
content_list = json.loads(page_content)['query']['search']
data = []
prefix = 'https://zh.wikipedia.org/wiki/'
for index, item in enumerate(content_list):
date, time = item['timestamp'].rstrip('Z').split('T')
entry = {'id': item['pageid'], 'index': index, 'create_date':
date, 'create_time': time, 'title': item['title'],
'abstract': re.sub('[<span class="searchmatch">,</span>]',
'', item['snippet']), 'url': prefix + item['title']}
data.append(entry)
return data
@staticmethod
def get_key_title(key: str) ->list:
"""
获取指定关键字的百科内容检索标题
:param key:
:return:
"""
url = 'https://zh.wikipedia.org/w/api.php?'
parm = {'action': 'opensearch', 'search': key, 'format': 'json',
'formatversion': '2'}
reptile = Reptile()
page_content = reptile.get_page_content(url + '&'.join([(key + '=' +
parm[key]) for key in parm]), timeout=3)
content_list = json.loads(page_content)[1]
data = []
prefix = 'https://zh.wikipedia.org/wiki/'
for index, item in enumerate(content_list):
entry = {'index': index, 'title': item, 'url': prefix + item}
data.append(entry)
return data
@staticmethod
def get_faq_content(query: str, page: str) ->list:
"""
获取指定query的faq检索内容
:param query:
:param page:
:return:
"""
url = 'https://zhidao.baidu.com/search?'
parm = {'lm': '0', 'rn': '5', 'pn': page, 'fr': 'search', 'ie':
'gbk', 'word': query}
reptile = Reptile()
page_content = reptile.get_page_content(url + '&'.join([(key + '=' +
parm[key]) for key in parm]), timeout=3, is_cookie=True,
charset='gbk')
bs = BeautifulSoup(page_content, 'html.parser')
content_list = bs.body.find_all('dl', {'class': 'dl'})
data = []
for item in content_list:
entry = {'create_date': item.find('dd', {'class':
'dd explain f-light'}).span.text, 'title': item.a.text,
'abstract': item.find('dd', {'class': 'dd answer'}).text,
'url': item.a.get('href')}
data.append(entry)
return data
<|reserved_special_token_1|>
<|reserved_special_token_0|>
import json
import re
from bs4 import BeautifulSoup
from src.util.reptile import *
class EncyclopediaDao:
@staticmethod
def get_key_content(key: str) ->list:
"""
获取指定关键字的百科内容检索内容
:param key:
:return:
"""
url = 'https://zh.wikipedia.org/w/api.php?'
parm = {'action': 'query', 'list': 'search', 'srsearch': key,
'format': 'json', 'formatversion': '2'}
reptile = Reptile()
page_content = reptile.get_page_content(url + '&'.join([(key + '=' +
parm[key]) for key in parm]), timeout=3)
content_list = json.loads(page_content)['query']['search']
data = []
prefix = 'https://zh.wikipedia.org/wiki/'
for index, item in enumerate(content_list):
date, time = item['timestamp'].rstrip('Z').split('T')
entry = {'id': item['pageid'], 'index': index, 'create_date':
date, 'create_time': time, 'title': item['title'],
'abstract': re.sub('[<span class="searchmatch">,</span>]',
'', item['snippet']), 'url': prefix + item['title']}
data.append(entry)
return data
@staticmethod
def get_key_title(key: str) ->list:
"""
获取指定关键字的百科内容检索标题
:param key:
:return:
"""
url = 'https://zh.wikipedia.org/w/api.php?'
parm = {'action': 'opensearch', 'search': key, 'format': 'json',
'formatversion': '2'}
reptile = Reptile()
page_content = reptile.get_page_content(url + '&'.join([(key + '=' +
parm[key]) for key in parm]), timeout=3)
content_list = json.loads(page_content)[1]
data = []
prefix = 'https://zh.wikipedia.org/wiki/'
for index, item in enumerate(content_list):
entry = {'index': index, 'title': item, 'url': prefix + item}
data.append(entry)
return data
@staticmethod
def get_faq_content(query: str, page: str) ->list:
"""
获取指定query的faq检索内容
:param query:
:param page:
:return:
"""
url = 'https://zhidao.baidu.com/search?'
parm = {'lm': '0', 'rn': '5', 'pn': page, 'fr': 'search', 'ie':
'gbk', 'word': query}
reptile = Reptile()
page_content = reptile.get_page_content(url + '&'.join([(key + '=' +
parm[key]) for key in parm]), timeout=3, is_cookie=True,
charset='gbk')
bs = BeautifulSoup(page_content, 'html.parser')
content_list = bs.body.find_all('dl', {'class': 'dl'})
data = []
for item in content_list:
entry = {'create_date': item.find('dd', {'class':
'dd explain f-light'}).span.text, 'title': item.a.text,
'abstract': item.find('dd', {'class': 'dd answer'}).text,
'url': item.a.get('href')}
data.append(entry)
return data
<|reserved_special_token_1|>
# coding=utf-8
"""
author: wlc
function: 百科检索数据层
"""
# 引入外部库
import json
import re
from bs4 import BeautifulSoup
# 引入内部库
from src.util.reptile import *
class EncyclopediaDao:
@staticmethod
def get_key_content (key: str) -> list:
"""
获取指定关键字的百科内容检索内容
:param key:
:return:
"""
# 1.参数设置
url = 'https://zh.wikipedia.org/w/api.php?'
parm = {
'action': 'query',
'list': 'search',
'srsearch': key,
'format': 'json',
'formatversion': '2'
}
# 2.百科内容获取
reptile = Reptile()
page_content = reptile.get_page_content(url + '&'.join([key + '=' + parm[key] for key in parm]), timeout=3)
content_list = json.loads(page_content)['query']['search']
# 3.百科内容格式化
data = []
prefix = 'https://zh.wikipedia.org/wiki/'
for index, item in enumerate(content_list):
date, time = item['timestamp'].rstrip('Z').split('T')
entry = {
'id': item['pageid'],
'index': index,
'create_date': date,
'create_time': time,
'title': item['title'],
'abstract': re.sub('[<span class=\"searchmatch\">,</span>]', '', item['snippet']),
'url': prefix + item['title'],
}
data.append(entry)
return data
@staticmethod
def get_key_title(key: str) -> list:
"""
获取指定关键字的百科内容检索标题
:param key:
:return:
"""
# 1.参数设置
url = 'https://zh.wikipedia.org/w/api.php?'
parm = {
'action': 'opensearch',
'search': key,
'format': 'json',
'formatversion': '2'
}
# 2.百科内容获取
reptile = Reptile()
page_content = reptile.get_page_content(url + '&'.join([key + '=' + parm[key] for key in parm]), timeout=3)
content_list = json.loads(page_content)[1]
# 3.百科内容格式化
data = []
prefix = 'https://zh.wikipedia.org/wiki/'
for index, item in enumerate(content_list):
entry = {
'index': index,
'title': item,
'url': prefix + item,
}
data.append(entry)
return data
@staticmethod
def get_faq_content(query: str, page: str) -> list:
"""
获取指定query的faq检索内容
:param query:
:param page:
:return:
"""
# 1.参数设置
url = 'https://zhidao.baidu.com/search?'
parm = {
'lm': '0',
'rn': '5',
'pn': page,
'fr': 'search',
'ie': 'gbk',
'word': query
}
# 2.百科内容获取
reptile = Reptile()
page_content = reptile.get_page_content(url + '&'.join([key + '=' + parm[key] for key in parm]), timeout=3, is_cookie=True, charset='gbk')
bs = BeautifulSoup(page_content, "html.parser")
content_list = bs.body.find_all("dl", {'class': 'dl'})
# 3.百科内容格式化
data = []
for item in content_list:
entry = {
'create_date': item.find("dd", {'class': 'dd explain f-light'}).span.text,
'title': item.a.text,
'abstract': item.find("dd", {'class': 'dd answer'}).text,
'url': item.a.get('href')
}
data.append(entry)
return data
|
flexible
|
{
"blob_id": "a7f348b258e1d6b02a79c60e4fe54b6d53801f70",
"index": 3877,
"step-1": "<mask token>\n\n\nclass EncyclopediaDao:\n <mask token>\n <mask token>\n\n @staticmethod\n def get_faq_content(query: str, page: str) ->list:\n \"\"\"\n\t\t获取指定query的faq检索内容\n\t\t:param query:\n\t\t:param page:\n\t\t:return:\n\t\t\"\"\"\n url = 'https://zhidao.baidu.com/search?'\n parm = {'lm': '0', 'rn': '5', 'pn': page, 'fr': 'search', 'ie':\n 'gbk', 'word': query}\n reptile = Reptile()\n page_content = reptile.get_page_content(url + '&'.join([(key + '=' +\n parm[key]) for key in parm]), timeout=3, is_cookie=True,\n charset='gbk')\n bs = BeautifulSoup(page_content, 'html.parser')\n content_list = bs.body.find_all('dl', {'class': 'dl'})\n data = []\n for item in content_list:\n entry = {'create_date': item.find('dd', {'class':\n 'dd explain f-light'}).span.text, 'title': item.a.text,\n 'abstract': item.find('dd', {'class': 'dd answer'}).text,\n 'url': item.a.get('href')}\n data.append(entry)\n return data\n",
"step-2": "<mask token>\n\n\nclass EncyclopediaDao:\n <mask token>\n\n @staticmethod\n def get_key_title(key: str) ->list:\n \"\"\"\n\t\t获取指定关键字的百科内容检索标题\n\t\t:param key:\n\t\t:return:\n\t\t\"\"\"\n url = 'https://zh.wikipedia.org/w/api.php?'\n parm = {'action': 'opensearch', 'search': key, 'format': 'json',\n 'formatversion': '2'}\n reptile = Reptile()\n page_content = reptile.get_page_content(url + '&'.join([(key + '=' +\n parm[key]) for key in parm]), timeout=3)\n content_list = json.loads(page_content)[1]\n data = []\n prefix = 'https://zh.wikipedia.org/wiki/'\n for index, item in enumerate(content_list):\n entry = {'index': index, 'title': item, 'url': prefix + item}\n data.append(entry)\n return data\n\n @staticmethod\n def get_faq_content(query: str, page: str) ->list:\n \"\"\"\n\t\t获取指定query的faq检索内容\n\t\t:param query:\n\t\t:param page:\n\t\t:return:\n\t\t\"\"\"\n url = 'https://zhidao.baidu.com/search?'\n parm = {'lm': '0', 'rn': '5', 'pn': page, 'fr': 'search', 'ie':\n 'gbk', 'word': query}\n reptile = Reptile()\n page_content = reptile.get_page_content(url + '&'.join([(key + '=' +\n parm[key]) for key in parm]), timeout=3, is_cookie=True,\n charset='gbk')\n bs = BeautifulSoup(page_content, 'html.parser')\n content_list = bs.body.find_all('dl', {'class': 'dl'})\n data = []\n for item in content_list:\n entry = {'create_date': item.find('dd', {'class':\n 'dd explain f-light'}).span.text, 'title': item.a.text,\n 'abstract': item.find('dd', {'class': 'dd answer'}).text,\n 'url': item.a.get('href')}\n data.append(entry)\n return data\n",
"step-3": "<mask token>\n\n\nclass EncyclopediaDao:\n\n @staticmethod\n def get_key_content(key: str) ->list:\n \"\"\"\n\t\t获取指定关键字的百科内容检索内容\n\t\t:param key:\n\t\t:return:\n\t\t\"\"\"\n url = 'https://zh.wikipedia.org/w/api.php?'\n parm = {'action': 'query', 'list': 'search', 'srsearch': key,\n 'format': 'json', 'formatversion': '2'}\n reptile = Reptile()\n page_content = reptile.get_page_content(url + '&'.join([(key + '=' +\n parm[key]) for key in parm]), timeout=3)\n content_list = json.loads(page_content)['query']['search']\n data = []\n prefix = 'https://zh.wikipedia.org/wiki/'\n for index, item in enumerate(content_list):\n date, time = item['timestamp'].rstrip('Z').split('T')\n entry = {'id': item['pageid'], 'index': index, 'create_date':\n date, 'create_time': time, 'title': item['title'],\n 'abstract': re.sub('[<span class=\"searchmatch\">,</span>]',\n '', item['snippet']), 'url': prefix + item['title']}\n data.append(entry)\n return data\n\n @staticmethod\n def get_key_title(key: str) ->list:\n \"\"\"\n\t\t获取指定关键字的百科内容检索标题\n\t\t:param key:\n\t\t:return:\n\t\t\"\"\"\n url = 'https://zh.wikipedia.org/w/api.php?'\n parm = {'action': 'opensearch', 'search': key, 'format': 'json',\n 'formatversion': '2'}\n reptile = Reptile()\n page_content = reptile.get_page_content(url + '&'.join([(key + '=' +\n parm[key]) for key in parm]), timeout=3)\n content_list = json.loads(page_content)[1]\n data = []\n prefix = 'https://zh.wikipedia.org/wiki/'\n for index, item in enumerate(content_list):\n entry = {'index': index, 'title': item, 'url': prefix + item}\n data.append(entry)\n return data\n\n @staticmethod\n def get_faq_content(query: str, page: str) ->list:\n \"\"\"\n\t\t获取指定query的faq检索内容\n\t\t:param query:\n\t\t:param page:\n\t\t:return:\n\t\t\"\"\"\n url = 'https://zhidao.baidu.com/search?'\n parm = {'lm': '0', 'rn': '5', 'pn': page, 'fr': 'search', 'ie':\n 'gbk', 'word': query}\n reptile = Reptile()\n page_content = reptile.get_page_content(url + '&'.join([(key + '=' +\n parm[key]) for key in parm]), timeout=3, is_cookie=True,\n charset='gbk')\n bs = BeautifulSoup(page_content, 'html.parser')\n content_list = bs.body.find_all('dl', {'class': 'dl'})\n data = []\n for item in content_list:\n entry = {'create_date': item.find('dd', {'class':\n 'dd explain f-light'}).span.text, 'title': item.a.text,\n 'abstract': item.find('dd', {'class': 'dd answer'}).text,\n 'url': item.a.get('href')}\n data.append(entry)\n return data\n",
"step-4": "<mask token>\nimport json\nimport re\nfrom bs4 import BeautifulSoup\nfrom src.util.reptile import *\n\n\nclass EncyclopediaDao:\n\n @staticmethod\n def get_key_content(key: str) ->list:\n \"\"\"\n\t\t获取指定关键字的百科内容检索内容\n\t\t:param key:\n\t\t:return:\n\t\t\"\"\"\n url = 'https://zh.wikipedia.org/w/api.php?'\n parm = {'action': 'query', 'list': 'search', 'srsearch': key,\n 'format': 'json', 'formatversion': '2'}\n reptile = Reptile()\n page_content = reptile.get_page_content(url + '&'.join([(key + '=' +\n parm[key]) for key in parm]), timeout=3)\n content_list = json.loads(page_content)['query']['search']\n data = []\n prefix = 'https://zh.wikipedia.org/wiki/'\n for index, item in enumerate(content_list):\n date, time = item['timestamp'].rstrip('Z').split('T')\n entry = {'id': item['pageid'], 'index': index, 'create_date':\n date, 'create_time': time, 'title': item['title'],\n 'abstract': re.sub('[<span class=\"searchmatch\">,</span>]',\n '', item['snippet']), 'url': prefix + item['title']}\n data.append(entry)\n return data\n\n @staticmethod\n def get_key_title(key: str) ->list:\n \"\"\"\n\t\t获取指定关键字的百科内容检索标题\n\t\t:param key:\n\t\t:return:\n\t\t\"\"\"\n url = 'https://zh.wikipedia.org/w/api.php?'\n parm = {'action': 'opensearch', 'search': key, 'format': 'json',\n 'formatversion': '2'}\n reptile = Reptile()\n page_content = reptile.get_page_content(url + '&'.join([(key + '=' +\n parm[key]) for key in parm]), timeout=3)\n content_list = json.loads(page_content)[1]\n data = []\n prefix = 'https://zh.wikipedia.org/wiki/'\n for index, item in enumerate(content_list):\n entry = {'index': index, 'title': item, 'url': prefix + item}\n data.append(entry)\n return data\n\n @staticmethod\n def get_faq_content(query: str, page: str) ->list:\n \"\"\"\n\t\t获取指定query的faq检索内容\n\t\t:param query:\n\t\t:param page:\n\t\t:return:\n\t\t\"\"\"\n url = 'https://zhidao.baidu.com/search?'\n parm = {'lm': '0', 'rn': '5', 'pn': page, 'fr': 'search', 'ie':\n 'gbk', 'word': query}\n reptile = Reptile()\n page_content = reptile.get_page_content(url + '&'.join([(key + '=' +\n parm[key]) for key in parm]), timeout=3, is_cookie=True,\n charset='gbk')\n bs = BeautifulSoup(page_content, 'html.parser')\n content_list = bs.body.find_all('dl', {'class': 'dl'})\n data = []\n for item in content_list:\n entry = {'create_date': item.find('dd', {'class':\n 'dd explain f-light'}).span.text, 'title': item.a.text,\n 'abstract': item.find('dd', {'class': 'dd answer'}).text,\n 'url': item.a.get('href')}\n data.append(entry)\n return data\n",
"step-5": "# coding=utf-8\n\n\"\"\"\nauthor: wlc\nfunction: 百科检索数据层\n\"\"\"\n\n# 引入外部库\nimport json\nimport re\nfrom bs4 import BeautifulSoup\n\n# 引入内部库\nfrom src.util.reptile import *\n\n\nclass EncyclopediaDao:\n\t@staticmethod\n\tdef get_key_content (key: str) -> list:\n\t\t\"\"\"\n\t\t获取指定关键字的百科内容检索内容\n\t\t:param key:\n\t\t:return:\n\t\t\"\"\"\n\t\t# 1.参数设置\n\t\turl = 'https://zh.wikipedia.org/w/api.php?'\n\t\tparm = {\n\t\t\t'action': 'query',\n\t\t\t'list': 'search',\n\t\t\t'srsearch': key,\n\t\t\t'format': 'json',\n\t\t\t'formatversion': '2'\n\t\t}\n\n\t\t# 2.百科内容获取\n\t\treptile = Reptile()\n\t\tpage_content = reptile.get_page_content(url + '&'.join([key + '=' + parm[key] for key in parm]), timeout=3)\n\t\tcontent_list = json.loads(page_content)['query']['search']\n\n\t\t# 3.百科内容格式化\n\t\tdata = []\n\t\tprefix = 'https://zh.wikipedia.org/wiki/'\n\t\tfor index, item in enumerate(content_list):\n\t\t\tdate, time = item['timestamp'].rstrip('Z').split('T')\n\t\t\tentry = {\n\t\t\t\t'id': item['pageid'],\n\t\t\t\t'index': index,\n\t\t\t\t'create_date': date,\n\t\t\t\t'create_time': time,\n\t\t\t\t'title': item['title'],\n\t\t\t\t'abstract': re.sub('[<span class=\\\"searchmatch\\\">,</span>]', '', item['snippet']),\n\t\t\t\t'url': prefix + item['title'],\n\t\t\t}\n\t\t\tdata.append(entry)\n\n\t\treturn data\n\n\t@staticmethod\n\tdef get_key_title(key: str) -> list:\n\t\t\"\"\"\n\t\t获取指定关键字的百科内容检索标题\n\t\t:param key:\n\t\t:return:\n\t\t\"\"\"\n\t\t# 1.参数设置\n\t\turl = 'https://zh.wikipedia.org/w/api.php?'\n\t\tparm = {\n\t\t\t'action': 'opensearch',\n\t\t\t'search': key,\n\t\t\t'format': 'json',\n\t\t\t'formatversion': '2'\n\t\t}\n\n\t\t# 2.百科内容获取\n\t\treptile = Reptile()\n\t\tpage_content = reptile.get_page_content(url + '&'.join([key + '=' + parm[key] for key in parm]), timeout=3)\n\t\tcontent_list = json.loads(page_content)[1]\n\n\t\t# 3.百科内容格式化\n\t\tdata = []\n\t\tprefix = 'https://zh.wikipedia.org/wiki/'\n\t\tfor index, item in enumerate(content_list):\n\t\t\tentry = {\n\t\t\t\t'index': index,\n\t\t\t\t'title': item,\n\t\t\t\t'url': prefix + item,\n\t\t\t}\n\t\t\tdata.append(entry)\n\n\t\treturn data\n\n\t@staticmethod\n\tdef get_faq_content(query: str, page: str) -> list:\n\t\t\"\"\"\n\t\t获取指定query的faq检索内容\n\t\t:param query:\n\t\t:param page:\n\t\t:return:\n\t\t\"\"\"\n\t\t# 1.参数设置\n\t\turl = 'https://zhidao.baidu.com/search?'\n\t\tparm = {\n\t\t\t'lm': '0',\n\t\t\t'rn': '5',\n\t\t\t'pn': page,\n\t\t\t'fr': 'search',\n\t\t\t'ie': 'gbk',\n\t\t\t'word': query\n\t\t}\n\n\t\t# 2.百科内容获取\n\t\treptile = Reptile()\n\t\tpage_content = reptile.get_page_content(url + '&'.join([key + '=' + parm[key] for key in parm]), timeout=3, is_cookie=True, charset='gbk')\n\t\tbs = BeautifulSoup(page_content, \"html.parser\")\n\t\tcontent_list = bs.body.find_all(\"dl\", {'class': 'dl'})\n\n\t\t# 3.百科内容格式化\n\t\tdata = []\n\t\tfor item in content_list:\n\t\t\tentry = {\n\t\t\t\t'create_date': item.find(\"dd\", {'class': 'dd explain f-light'}).span.text,\n\t\t\t\t'title': item.a.text,\n\t\t\t\t'abstract': item.find(\"dd\", {'class': 'dd answer'}).text,\n\t\t\t\t'url': item.a.get('href')\n\t\t\t}\n\t\t\tdata.append(entry)\n\n\t\treturn data\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
from flask import render_template, flash, redirect, url_for, request
from flask_login import current_user, login_user, logout_user, login_required
from werkzeug.urls import url_parse
from app import db
# from app.main.forms import [list forms here]
from app.models import User
from app.main import bp
@bp.route('/')
@bp.route('/index')
@login_required
def index():
return render_template('index.html')
|
normal
|
{
"blob_id": "495d606304e07a097033366d1a7e1d856a4cf61f",
"index": 1935,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\n@bp.route('/')\n@bp.route('/index')\n@login_required\ndef index():\n return render_template('index.html')\n",
"step-3": "from flask import render_template, flash, redirect, url_for, request\nfrom flask_login import current_user, login_user, logout_user, login_required\nfrom werkzeug.urls import url_parse\nfrom app import db\nfrom app.models import User\nfrom app.main import bp\n\n\n@bp.route('/')\n@bp.route('/index')\n@login_required\ndef index():\n return render_template('index.html')\n",
"step-4": "from flask import render_template, flash, redirect, url_for, request\nfrom flask_login import current_user, login_user, logout_user, login_required\nfrom werkzeug.urls import url_parse\nfrom app import db\n# from app.main.forms import [list forms here]\nfrom app.models import User\nfrom app.main import bp\n\n@bp.route('/')\n@bp.route('/index')\n@login_required\ndef index():\n\treturn render_template('index.html')",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
from django.db import models
class Category(models.Model):
name = models.CharField(max_length=50, unique=True)
created_at = models.DateTimeField(auto_now_add=True)
def __str__(self):
return self.name
class Meta:
verbose_name = 'Categoria'
class Books(models.Model):
name = models.CharField(max_length=100)
created_at = models.DateTimeField(auto_now_add=True)
category = models.ForeignKey(
Category, on_delete=models.CASCADE, related_name='category')
def __str__(self):
return self.name
class Meta:
verbose_name = 'Livro'
class Student(models.Model):
name = models.CharField(max_length=70)
cpf = models.CharField(max_length=14)
birth_date = models.DateField()
city = models.CharField(max_length=50)
registration_date = models.DateTimeField(auto_now_add=True)
email = models.EmailField(max_length=50)
tel = models.CharField(max_length=15)
book= models.ForeignKey(
Books, on_delete=models.CASCADE, related_name='book')
class Meta:
verbose_name = 'Estudante'
ordering = ['-id']
def __str__(self):
return self.name
|
normal
|
{
"blob_id": "0584ff5cb252fba0fe1fc350a5fb023ab5cbb02b",
"index": 6750,
"step-1": "<mask token>\n\n\nclass Student(models.Model):\n name = models.CharField(max_length=70)\n cpf = models.CharField(max_length=14)\n birth_date = models.DateField()\n city = models.CharField(max_length=50)\n registration_date = models.DateTimeField(auto_now_add=True)\n email = models.EmailField(max_length=50)\n tel = models.CharField(max_length=15)\n book = models.ForeignKey(Books, on_delete=models.CASCADE, related_name=\n 'book')\n\n\n class Meta:\n verbose_name = 'Estudante'\n ordering = ['-id']\n\n def __str__(self):\n return self.name\n",
"step-2": "<mask token>\n\n\nclass Category(models.Model):\n <mask token>\n <mask token>\n <mask token>\n\n\n class Meta:\n verbose_name = 'Categoria'\n\n\nclass Books(models.Model):\n name = models.CharField(max_length=100)\n created_at = models.DateTimeField(auto_now_add=True)\n category = models.ForeignKey(Category, on_delete=models.CASCADE,\n related_name='category')\n\n def __str__(self):\n return self.name\n\n\n class Meta:\n verbose_name = 'Livro'\n\n\nclass Student(models.Model):\n name = models.CharField(max_length=70)\n cpf = models.CharField(max_length=14)\n birth_date = models.DateField()\n city = models.CharField(max_length=50)\n registration_date = models.DateTimeField(auto_now_add=True)\n email = models.EmailField(max_length=50)\n tel = models.CharField(max_length=15)\n book = models.ForeignKey(Books, on_delete=models.CASCADE, related_name=\n 'book')\n\n\n class Meta:\n verbose_name = 'Estudante'\n ordering = ['-id']\n\n def __str__(self):\n return self.name\n",
"step-3": "<mask token>\n\n\nclass Category(models.Model):\n <mask token>\n <mask token>\n\n def __str__(self):\n return self.name\n\n\n class Meta:\n verbose_name = 'Categoria'\n\n\nclass Books(models.Model):\n name = models.CharField(max_length=100)\n created_at = models.DateTimeField(auto_now_add=True)\n category = models.ForeignKey(Category, on_delete=models.CASCADE,\n related_name='category')\n\n def __str__(self):\n return self.name\n\n\n class Meta:\n verbose_name = 'Livro'\n\n\nclass Student(models.Model):\n name = models.CharField(max_length=70)\n cpf = models.CharField(max_length=14)\n birth_date = models.DateField()\n city = models.CharField(max_length=50)\n registration_date = models.DateTimeField(auto_now_add=True)\n email = models.EmailField(max_length=50)\n tel = models.CharField(max_length=15)\n book = models.ForeignKey(Books, on_delete=models.CASCADE, related_name=\n 'book')\n\n\n class Meta:\n verbose_name = 'Estudante'\n ordering = ['-id']\n\n def __str__(self):\n return self.name\n",
"step-4": "<mask token>\n\n\nclass Category(models.Model):\n name = models.CharField(max_length=50, unique=True)\n created_at = models.DateTimeField(auto_now_add=True)\n\n def __str__(self):\n return self.name\n\n\n class Meta:\n verbose_name = 'Categoria'\n\n\nclass Books(models.Model):\n name = models.CharField(max_length=100)\n created_at = models.DateTimeField(auto_now_add=True)\n category = models.ForeignKey(Category, on_delete=models.CASCADE,\n related_name='category')\n\n def __str__(self):\n return self.name\n\n\n class Meta:\n verbose_name = 'Livro'\n\n\nclass Student(models.Model):\n name = models.CharField(max_length=70)\n cpf = models.CharField(max_length=14)\n birth_date = models.DateField()\n city = models.CharField(max_length=50)\n registration_date = models.DateTimeField(auto_now_add=True)\n email = models.EmailField(max_length=50)\n tel = models.CharField(max_length=15)\n book = models.ForeignKey(Books, on_delete=models.CASCADE, related_name=\n 'book')\n\n\n class Meta:\n verbose_name = 'Estudante'\n ordering = ['-id']\n\n def __str__(self):\n return self.name\n",
"step-5": "from django.db import models\n\n\nclass Category(models.Model):\n name = models.CharField(max_length=50, unique=True)\n created_at = models.DateTimeField(auto_now_add=True)\n\n def __str__(self):\n return self.name\n\n class Meta:\n verbose_name = 'Categoria'\n\n\nclass Books(models.Model):\n name = models.CharField(max_length=100)\n created_at = models.DateTimeField(auto_now_add=True)\n category = models.ForeignKey(\n Category, on_delete=models.CASCADE, related_name='category')\n\n def __str__(self):\n return self.name\n\n class Meta:\n verbose_name = 'Livro'\n\n\nclass Student(models.Model):\n name = models.CharField(max_length=70)\n cpf = models.CharField(max_length=14)\n birth_date = models.DateField()\n city = models.CharField(max_length=50)\n registration_date = models.DateTimeField(auto_now_add=True)\n email = models.EmailField(max_length=50)\n tel = models.CharField(max_length=15)\n book= models.ForeignKey(\n Books, on_delete=models.CASCADE, related_name='book')\n\n class Meta:\n verbose_name = 'Estudante'\n ordering = ['-id']\n\n def __str__(self):\n return self.name\n\n",
"step-ids": [
3,
7,
8,
9,
11
]
}
|
[
3,
7,
8,
9,
11
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
myLabel1.grid(row=0, column=0)
myLabel2.grid(row=1, column=0)
root.mainloop()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
root = Tk()
myLabel1 = Label(root, text='Hello User!')
myLabel2 = Label(root, text='Welcome to medBOT')
myLabel1.grid(row=0, column=0)
myLabel2.grid(row=1, column=0)
root.mainloop()
<|reserved_special_token_1|>
from tkinter import *
root = Tk()
myLabel1 = Label(root, text='Hello User!')
myLabel2 = Label(root, text='Welcome to medBOT')
myLabel1.grid(row=0, column=0)
myLabel2.grid(row=1, column=0)
root.mainloop()
<|reserved_special_token_1|>
from tkinter import *
# Everything in tkinter is a widget
# We start with the Root Widget
root = Tk()
# Creating a Label Widget
myLabel1 = Label(root, text="Hello User!")
myLabel2 = Label(root, text="Welcome to medBOT")
# Put labels onto the screen
myLabel1.grid(row=0, column=0)
myLabel2.grid(row=1, column=0)
# Grid assigns the texts exacts in the position
# Grid creates a relative position
root.mainloop()
|
flexible
|
{
"blob_id": "93fe16e5a97ec2652c4f6b8be844244d9776ea2e",
"index": 4921,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nmyLabel1.grid(row=0, column=0)\nmyLabel2.grid(row=1, column=0)\nroot.mainloop()\n",
"step-3": "<mask token>\nroot = Tk()\nmyLabel1 = Label(root, text='Hello User!')\nmyLabel2 = Label(root, text='Welcome to medBOT')\nmyLabel1.grid(row=0, column=0)\nmyLabel2.grid(row=1, column=0)\nroot.mainloop()\n",
"step-4": "from tkinter import *\nroot = Tk()\nmyLabel1 = Label(root, text='Hello User!')\nmyLabel2 = Label(root, text='Welcome to medBOT')\nmyLabel1.grid(row=0, column=0)\nmyLabel2.grid(row=1, column=0)\nroot.mainloop()\n",
"step-5": "from tkinter import *\n\n# Everything in tkinter is a widget\n# We start with the Root Widget\n\nroot = Tk()\n# Creating a Label Widget\nmyLabel1 = Label(root, text=\"Hello User!\")\nmyLabel2 = Label(root, text=\"Welcome to medBOT\")\n\n# Put labels onto the screen\nmyLabel1.grid(row=0, column=0)\nmyLabel2.grid(row=1, column=0)\n# Grid assigns the texts exacts in the position\n# Grid creates a relative position\n\nroot.mainloop()\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
# coding=utf-8
import datetime
from django.http import JsonResponse
from django.shortcuts import render, redirect
from models import *
from hashlib import sha1
from user_decorators import user_login
from df_goods.models import GoodsInfo
# Create your views here.
def register(request):
context={'title':'注册','top':'0'}
return render(request, 'df_user/register.html',context)
def login(request):
context = {'title': '登录','top':'0'}
return render(request, 'df_user/login.html',context)
def register_handle(request):
dict=request.POST
print(dict)
uname=dict.get('user_name')
upwd=dict.get('pwd')
upwd2=dict.get('cpwd')
uemail=dict.get('email')
if upwd != upwd2:
return redirect('/user/register/')
s1=sha1()
s1.update(upwd)
upwd_sha1=s1.hexdigest()
user=UserInfo()
user.uname=uname
user.upwd=upwd_sha1
user.uemail=uemail
result = UserInfo.objects.filter(uname=uname).count()
if result == 0:
user.save()
else:
return redirect('/user/register/')
return redirect('/user/login/')
def register_valid(request):
uname=request.GET.get('uname')
result=UserInfo.objects.filter(uname=uname).count()
context={'valid':result}
return JsonResponse(context)
def login_handle(request):
dict=request.POST
uname=dict.get('username')
upwd=dict.get('pwd')
uname_jz=dict.get('name_jz','0')
s1=sha1()
s1.update(upwd)
upwd_sha1=s1.hexdigest()
context={'title': '登录','uname':uname,'upwd':upwd,'top':'0'}
users=UserInfo.objects.filter(uname=uname)
if len(users)==0:
# 用户名错误
context['name_error']='1'
return render(request, 'df_user/login.html',context)
else:
if users[0].upwd == upwd_sha1:#登陆成功
#记录当前登录的用户
request.session['uid'] = users[0].id
request.session['uname'] = uname
# 重定向 从哪来 回哪去
path = request.session.get('url_path', '/')
response = redirect(path)
if uname_jz == '1':
response.set_cookie('uname',uname, expires=datetime.datetime.now()+datetime.timedelta(days=7))
else:
response.set_cookie('uname','',max_age=-1)
return response
else:
#密码错误
context['pwd_error']='1'
return render(request,'df_user/login.html',context)
# @user_login
# def info(request):
# if request.session.has_key('uid'):
# return render(request, 'df_user/info.html')
# else:
# return redirect('/user/login/')
@user_login
def info(request):
user=UserInfo.objects.get(pk=request.session['uid'])
#读取最近浏览商品
goods_ids = request.COOKIES.get('goods_ids','').split(',')
goods_list= []
for gid in goods_ids:
if gid:
goods_list.append(GoodsInfo.objects.get(id=gid))
context={'title':'用户中心','user':user,'goods_list':goods_list}
return render(request, 'df_user/info.html',context)
@user_login
def order(request):
return render(request, 'df_user/order.html')
@user_login
def site(request):
user = UserInfo.objects.get(pk=request.session['uid'])
if request.method == 'POST':
dict=request.POST
user.ushou=dict.get('ushou')
user.uaddress = dict.get('uaddress')
user.uphone = dict.get('uphone')
user.save()
context = {'title': '收货地址', 'user': user}
return render(request, 'df_user/site.html',context)
def logout(request):
request.session.flush()
return redirect('/user/login/')
def islogin(request):
result=0
if request.session.has_key('uid'):
result=1
return JsonResponse({'islogin':result})
|
normal
|
{
"blob_id": "1ef40d4162ca1b1bd6a5a5010485c78eb9d8d736",
"index": 9621,
"step-1": "<mask token>\n\n\ndef register(request):\n context = {'title': '注册', 'top': '0'}\n return render(request, 'df_user/register.html', context)\n\n\ndef login(request):\n context = {'title': '登录', 'top': '0'}\n return render(request, 'df_user/login.html', context)\n\n\ndef register_handle(request):\n dict = request.POST\n print(dict)\n uname = dict.get('user_name')\n upwd = dict.get('pwd')\n upwd2 = dict.get('cpwd')\n uemail = dict.get('email')\n if upwd != upwd2:\n return redirect('/user/register/')\n s1 = sha1()\n s1.update(upwd)\n upwd_sha1 = s1.hexdigest()\n user = UserInfo()\n user.uname = uname\n user.upwd = upwd_sha1\n user.uemail = uemail\n result = UserInfo.objects.filter(uname=uname).count()\n if result == 0:\n user.save()\n else:\n return redirect('/user/register/')\n return redirect('/user/login/')\n\n\n<mask token>\n\n\ndef login_handle(request):\n dict = request.POST\n uname = dict.get('username')\n upwd = dict.get('pwd')\n uname_jz = dict.get('name_jz', '0')\n s1 = sha1()\n s1.update(upwd)\n upwd_sha1 = s1.hexdigest()\n context = {'title': '登录', 'uname': uname, 'upwd': upwd, 'top': '0'}\n users = UserInfo.objects.filter(uname=uname)\n if len(users) == 0:\n context['name_error'] = '1'\n return render(request, 'df_user/login.html', context)\n elif users[0].upwd == upwd_sha1:\n request.session['uid'] = users[0].id\n request.session['uname'] = uname\n path = request.session.get('url_path', '/')\n response = redirect(path)\n if uname_jz == '1':\n response.set_cookie('uname', uname, expires=datetime.datetime.\n now() + datetime.timedelta(days=7))\n else:\n response.set_cookie('uname', '', max_age=-1)\n return response\n else:\n context['pwd_error'] = '1'\n return render(request, 'df_user/login.html', context)\n\n\n<mask token>\n\n\n@user_login\ndef order(request):\n return render(request, 'df_user/order.html')\n\n\n<mask token>\n\n\ndef islogin(request):\n result = 0\n if request.session.has_key('uid'):\n result = 1\n return JsonResponse({'islogin': result})\n",
"step-2": "<mask token>\n\n\ndef register(request):\n context = {'title': '注册', 'top': '0'}\n return render(request, 'df_user/register.html', context)\n\n\ndef login(request):\n context = {'title': '登录', 'top': '0'}\n return render(request, 'df_user/login.html', context)\n\n\ndef register_handle(request):\n dict = request.POST\n print(dict)\n uname = dict.get('user_name')\n upwd = dict.get('pwd')\n upwd2 = dict.get('cpwd')\n uemail = dict.get('email')\n if upwd != upwd2:\n return redirect('/user/register/')\n s1 = sha1()\n s1.update(upwd)\n upwd_sha1 = s1.hexdigest()\n user = UserInfo()\n user.uname = uname\n user.upwd = upwd_sha1\n user.uemail = uemail\n result = UserInfo.objects.filter(uname=uname).count()\n if result == 0:\n user.save()\n else:\n return redirect('/user/register/')\n return redirect('/user/login/')\n\n\ndef register_valid(request):\n uname = request.GET.get('uname')\n result = UserInfo.objects.filter(uname=uname).count()\n context = {'valid': result}\n return JsonResponse(context)\n\n\ndef login_handle(request):\n dict = request.POST\n uname = dict.get('username')\n upwd = dict.get('pwd')\n uname_jz = dict.get('name_jz', '0')\n s1 = sha1()\n s1.update(upwd)\n upwd_sha1 = s1.hexdigest()\n context = {'title': '登录', 'uname': uname, 'upwd': upwd, 'top': '0'}\n users = UserInfo.objects.filter(uname=uname)\n if len(users) == 0:\n context['name_error'] = '1'\n return render(request, 'df_user/login.html', context)\n elif users[0].upwd == upwd_sha1:\n request.session['uid'] = users[0].id\n request.session['uname'] = uname\n path = request.session.get('url_path', '/')\n response = redirect(path)\n if uname_jz == '1':\n response.set_cookie('uname', uname, expires=datetime.datetime.\n now() + datetime.timedelta(days=7))\n else:\n response.set_cookie('uname', '', max_age=-1)\n return response\n else:\n context['pwd_error'] = '1'\n return render(request, 'df_user/login.html', context)\n\n\n<mask token>\n\n\n@user_login\ndef order(request):\n return render(request, 'df_user/order.html')\n\n\n<mask token>\n\n\ndef islogin(request):\n result = 0\n if request.session.has_key('uid'):\n result = 1\n return JsonResponse({'islogin': result})\n",
"step-3": "<mask token>\n\n\ndef register(request):\n context = {'title': '注册', 'top': '0'}\n return render(request, 'df_user/register.html', context)\n\n\ndef login(request):\n context = {'title': '登录', 'top': '0'}\n return render(request, 'df_user/login.html', context)\n\n\ndef register_handle(request):\n dict = request.POST\n print(dict)\n uname = dict.get('user_name')\n upwd = dict.get('pwd')\n upwd2 = dict.get('cpwd')\n uemail = dict.get('email')\n if upwd != upwd2:\n return redirect('/user/register/')\n s1 = sha1()\n s1.update(upwd)\n upwd_sha1 = s1.hexdigest()\n user = UserInfo()\n user.uname = uname\n user.upwd = upwd_sha1\n user.uemail = uemail\n result = UserInfo.objects.filter(uname=uname).count()\n if result == 0:\n user.save()\n else:\n return redirect('/user/register/')\n return redirect('/user/login/')\n\n\ndef register_valid(request):\n uname = request.GET.get('uname')\n result = UserInfo.objects.filter(uname=uname).count()\n context = {'valid': result}\n return JsonResponse(context)\n\n\ndef login_handle(request):\n dict = request.POST\n uname = dict.get('username')\n upwd = dict.get('pwd')\n uname_jz = dict.get('name_jz', '0')\n s1 = sha1()\n s1.update(upwd)\n upwd_sha1 = s1.hexdigest()\n context = {'title': '登录', 'uname': uname, 'upwd': upwd, 'top': '0'}\n users = UserInfo.objects.filter(uname=uname)\n if len(users) == 0:\n context['name_error'] = '1'\n return render(request, 'df_user/login.html', context)\n elif users[0].upwd == upwd_sha1:\n request.session['uid'] = users[0].id\n request.session['uname'] = uname\n path = request.session.get('url_path', '/')\n response = redirect(path)\n if uname_jz == '1':\n response.set_cookie('uname', uname, expires=datetime.datetime.\n now() + datetime.timedelta(days=7))\n else:\n response.set_cookie('uname', '', max_age=-1)\n return response\n else:\n context['pwd_error'] = '1'\n return render(request, 'df_user/login.html', context)\n\n\n@user_login\ndef info(request):\n user = UserInfo.objects.get(pk=request.session['uid'])\n goods_ids = request.COOKIES.get('goods_ids', '').split(',')\n goods_list = []\n for gid in goods_ids:\n if gid:\n goods_list.append(GoodsInfo.objects.get(id=gid))\n context = {'title': '用户中心', 'user': user, 'goods_list': goods_list}\n return render(request, 'df_user/info.html', context)\n\n\n@user_login\ndef order(request):\n return render(request, 'df_user/order.html')\n\n\n<mask token>\n\n\ndef logout(request):\n request.session.flush()\n return redirect('/user/login/')\n\n\ndef islogin(request):\n result = 0\n if request.session.has_key('uid'):\n result = 1\n return JsonResponse({'islogin': result})\n",
"step-4": "import datetime\nfrom django.http import JsonResponse\nfrom django.shortcuts import render, redirect\nfrom models import *\nfrom hashlib import sha1\nfrom user_decorators import user_login\nfrom df_goods.models import GoodsInfo\n\n\ndef register(request):\n context = {'title': '注册', 'top': '0'}\n return render(request, 'df_user/register.html', context)\n\n\ndef login(request):\n context = {'title': '登录', 'top': '0'}\n return render(request, 'df_user/login.html', context)\n\n\ndef register_handle(request):\n dict = request.POST\n print(dict)\n uname = dict.get('user_name')\n upwd = dict.get('pwd')\n upwd2 = dict.get('cpwd')\n uemail = dict.get('email')\n if upwd != upwd2:\n return redirect('/user/register/')\n s1 = sha1()\n s1.update(upwd)\n upwd_sha1 = s1.hexdigest()\n user = UserInfo()\n user.uname = uname\n user.upwd = upwd_sha1\n user.uemail = uemail\n result = UserInfo.objects.filter(uname=uname).count()\n if result == 0:\n user.save()\n else:\n return redirect('/user/register/')\n return redirect('/user/login/')\n\n\ndef register_valid(request):\n uname = request.GET.get('uname')\n result = UserInfo.objects.filter(uname=uname).count()\n context = {'valid': result}\n return JsonResponse(context)\n\n\ndef login_handle(request):\n dict = request.POST\n uname = dict.get('username')\n upwd = dict.get('pwd')\n uname_jz = dict.get('name_jz', '0')\n s1 = sha1()\n s1.update(upwd)\n upwd_sha1 = s1.hexdigest()\n context = {'title': '登录', 'uname': uname, 'upwd': upwd, 'top': '0'}\n users = UserInfo.objects.filter(uname=uname)\n if len(users) == 0:\n context['name_error'] = '1'\n return render(request, 'df_user/login.html', context)\n elif users[0].upwd == upwd_sha1:\n request.session['uid'] = users[0].id\n request.session['uname'] = uname\n path = request.session.get('url_path', '/')\n response = redirect(path)\n if uname_jz == '1':\n response.set_cookie('uname', uname, expires=datetime.datetime.\n now() + datetime.timedelta(days=7))\n else:\n response.set_cookie('uname', '', max_age=-1)\n return response\n else:\n context['pwd_error'] = '1'\n return render(request, 'df_user/login.html', context)\n\n\n@user_login\ndef info(request):\n user = UserInfo.objects.get(pk=request.session['uid'])\n goods_ids = request.COOKIES.get('goods_ids', '').split(',')\n goods_list = []\n for gid in goods_ids:\n if gid:\n goods_list.append(GoodsInfo.objects.get(id=gid))\n context = {'title': '用户中心', 'user': user, 'goods_list': goods_list}\n return render(request, 'df_user/info.html', context)\n\n\n@user_login\ndef order(request):\n return render(request, 'df_user/order.html')\n\n\n@user_login\ndef site(request):\n user = UserInfo.objects.get(pk=request.session['uid'])\n if request.method == 'POST':\n dict = request.POST\n user.ushou = dict.get('ushou')\n user.uaddress = dict.get('uaddress')\n user.uphone = dict.get('uphone')\n user.save()\n context = {'title': '收货地址', 'user': user}\n return render(request, 'df_user/site.html', context)\n\n\ndef logout(request):\n request.session.flush()\n return redirect('/user/login/')\n\n\ndef islogin(request):\n result = 0\n if request.session.has_key('uid'):\n result = 1\n return JsonResponse({'islogin': result})\n",
"step-5": "# coding=utf-8\nimport datetime\nfrom django.http import JsonResponse\nfrom django.shortcuts import render, redirect\nfrom models import *\nfrom hashlib import sha1\nfrom user_decorators import user_login\nfrom df_goods.models import GoodsInfo\n# Create your views here.\ndef register(request):\n context={'title':'注册','top':'0'}\n return render(request, 'df_user/register.html',context)\n\n\ndef login(request):\n\n context = {'title': '登录','top':'0'}\n return render(request, 'df_user/login.html',context)\n\n\ndef register_handle(request):\n dict=request.POST\n print(dict)\n uname=dict.get('user_name')\n upwd=dict.get('pwd')\n upwd2=dict.get('cpwd')\n uemail=dict.get('email')\n\n if upwd != upwd2:\n return redirect('/user/register/')\n\n\n s1=sha1()\n s1.update(upwd)\n upwd_sha1=s1.hexdigest()\n\n user=UserInfo()\n user.uname=uname\n user.upwd=upwd_sha1\n user.uemail=uemail\n result = UserInfo.objects.filter(uname=uname).count()\n\n if result == 0:\n user.save()\n else:\n return redirect('/user/register/')\n return redirect('/user/login/')\n\n\ndef register_valid(request):\n uname=request.GET.get('uname')\n result=UserInfo.objects.filter(uname=uname).count()\n context={'valid':result}\n return JsonResponse(context)\n\n\ndef login_handle(request):\n dict=request.POST\n uname=dict.get('username')\n upwd=dict.get('pwd')\n uname_jz=dict.get('name_jz','0')\n\n s1=sha1()\n s1.update(upwd)\n upwd_sha1=s1.hexdigest()\n\n context={'title': '登录','uname':uname,'upwd':upwd,'top':'0'}\n\n users=UserInfo.objects.filter(uname=uname)\n if len(users)==0:\n # 用户名错误\n context['name_error']='1'\n return render(request, 'df_user/login.html',context)\n else:\n if users[0].upwd == upwd_sha1:#登陆成功\n #记录当前登录的用户\n request.session['uid'] = users[0].id\n request.session['uname'] = uname\n\n # 重定向 从哪来 回哪去\n path = request.session.get('url_path', '/')\n response = redirect(path)\n\n if uname_jz == '1':\n response.set_cookie('uname',uname, expires=datetime.datetime.now()+datetime.timedelta(days=7))\n else:\n response.set_cookie('uname','',max_age=-1)\n\n return response\n else:\n #密码错误\n context['pwd_error']='1'\n return render(request,'df_user/login.html',context)\n\n\n# @user_login\n# def info(request):\n# if request.session.has_key('uid'):\n# return render(request, 'df_user/info.html')\n# else:\n# return redirect('/user/login/')\n\n@user_login\ndef info(request):\n user=UserInfo.objects.get(pk=request.session['uid'])\n #读取最近浏览商品\n goods_ids = request.COOKIES.get('goods_ids','').split(',')\n\n goods_list= []\n for gid in goods_ids:\n if gid:\n goods_list.append(GoodsInfo.objects.get(id=gid))\n\n\n context={'title':'用户中心','user':user,'goods_list':goods_list}\n return render(request, 'df_user/info.html',context)\n\n\n@user_login\ndef order(request):\n return render(request, 'df_user/order.html')\n\n\n@user_login\ndef site(request):\n user = UserInfo.objects.get(pk=request.session['uid'])\n if request.method == 'POST':\n dict=request.POST\n user.ushou=dict.get('ushou')\n user.uaddress = dict.get('uaddress')\n user.uphone = dict.get('uphone')\n user.save()\n context = {'title': '收货地址', 'user': user}\n return render(request, 'df_user/site.html',context)\n\n\ndef logout(request):\n request.session.flush()\n return redirect('/user/login/')\n\ndef islogin(request):\n result=0\n if request.session.has_key('uid'):\n result=1\n return JsonResponse({'islogin':result})",
"step-ids": [
6,
7,
9,
11,
12
]
}
|
[
6,
7,
9,
11,
12
] |
import json
import logging
import os
import sys
from io import StringIO
import pytest
from allure.constants import AttachmentType
from utils.tools import close_popups
_beautiful_json = dict(indent=2, ensure_ascii=False, sort_keys=True)
# LOGGING console ####################################################################################################
# Reserved name for custom logging
logging.addLevelName(15, "SUBDEBUG")
logging.addLevelName(5, "TEST")
# Logger formating
log_formatter = logging.Formatter("%(asctime)s [%(threadName)s] [%(levelname)s] - %(message)s",
datefmt='%Y-%m-%d %H:%M:%S')
class CustomLogger(logging.Logger):
test_log = StringIO()
# Metod formating message
@staticmethod
def format_message(message):
return json.dumps(message, **_beautiful_json) if isinstance(message, (dict, list, tuple)) else str(message)
# Custom level of logging
def subdebug(self, message, *args, **kwargs):
if self.isEnabledFor(15):
self._log(15, message, args, **kwargs)
# Method to attached data to report (one class dependency)
def attach_debug(self, name, message):
if self.isEnabledFor(10):
pytest.allure.attach(name, self.format_message(message))
def attach_subdebug(self, name, message):
if self.isEnabledFor(15):
pytest.allure.attach(name, self.format_message(message))
def attach_info(self, name, message):
if self.isEnabledFor(20):
pytest.allure.attach(name, self.format_message(message))
def attach_error(self, name, message):
pytest.allure.attach(name, self.format_message(message))
@staticmethod
def attach_png(name, message):
pytest.allure.attach(name, message, type=AttachmentType.PNG)
def attach_selenium_screenshot(self, attach_name, selenium_driver):
if selenium_driver:
try:
close_popups(selenium_driver)
self.debug('Attach screenshot')
self.attach_png(attach_name, selenium_driver.get_screenshot_as_png())
self.debug('...Done')
except Exception as e:
self.error('Cannot get screenshot from SeleniumWebDriver')
pytest.allure.attach(attach_name, str(e))
else:
self.error('No browser is define')
def add_handler(self, file_name, mode='a'):
file_handler = logging.FileHandler(filename=file_name, mode=mode)
file_handler.setFormatter(log_formatter)
file_handler.setLevel(os.getenv('LOGGING_LEVEL_TO_CONSOLE', 'WARN'))
self.addHandler(file_handler)
def setup_logging():
# Logging setup
logger = CustomLogger('root')
# Level of handler
console_handler = logging.StreamHandler(sys.stdout)
console_handler.setLevel(os.getenv('LOGGING_LEVEL_TO_CONSOLE', 'WARN'))
# Create a method of message
console_handler.setFormatter(log_formatter)
logger.addHandler(console_handler)
# Level of handler
string_io = logging.StreamHandler(logger.test_log)
string_io.setLevel(os.getenv('LOGGING_LEVEL', 'INFO'))
# Create a method of message
string_io.setFormatter(log_formatter)
logger.addHandler(string_io)
return logger
logger = setup_logging()
|
normal
|
{
"blob_id": "37fdfddb471e2eec9e5867d685c7c56fc38c5ae7",
"index": 8363,
"step-1": "<mask token>\n\n\nclass CustomLogger(logging.Logger):\n <mask token>\n\n @staticmethod\n def format_message(message):\n return json.dumps(message, **_beautiful_json) if isinstance(message,\n (dict, list, tuple)) else str(message)\n\n def subdebug(self, message, *args, **kwargs):\n if self.isEnabledFor(15):\n self._log(15, message, args, **kwargs)\n\n def attach_debug(self, name, message):\n if self.isEnabledFor(10):\n pytest.allure.attach(name, self.format_message(message))\n\n def attach_subdebug(self, name, message):\n if self.isEnabledFor(15):\n pytest.allure.attach(name, self.format_message(message))\n\n def attach_info(self, name, message):\n if self.isEnabledFor(20):\n pytest.allure.attach(name, self.format_message(message))\n\n def attach_error(self, name, message):\n pytest.allure.attach(name, self.format_message(message))\n\n @staticmethod\n def attach_png(name, message):\n pytest.allure.attach(name, message, type=AttachmentType.PNG)\n\n def attach_selenium_screenshot(self, attach_name, selenium_driver):\n if selenium_driver:\n try:\n close_popups(selenium_driver)\n self.debug('Attach screenshot')\n self.attach_png(attach_name, selenium_driver.\n get_screenshot_as_png())\n self.debug('...Done')\n except Exception as e:\n self.error('Cannot get screenshot from SeleniumWebDriver')\n pytest.allure.attach(attach_name, str(e))\n else:\n self.error('No browser is define')\n\n def add_handler(self, file_name, mode='a'):\n file_handler = logging.FileHandler(filename=file_name, mode=mode)\n file_handler.setFormatter(log_formatter)\n file_handler.setLevel(os.getenv('LOGGING_LEVEL_TO_CONSOLE', 'WARN'))\n self.addHandler(file_handler)\n\n\n<mask token>\n",
"step-2": "<mask token>\nlogging.addLevelName(15, 'SUBDEBUG')\nlogging.addLevelName(5, 'TEST')\n<mask token>\n\n\nclass CustomLogger(logging.Logger):\n test_log = StringIO()\n\n @staticmethod\n def format_message(message):\n return json.dumps(message, **_beautiful_json) if isinstance(message,\n (dict, list, tuple)) else str(message)\n\n def subdebug(self, message, *args, **kwargs):\n if self.isEnabledFor(15):\n self._log(15, message, args, **kwargs)\n\n def attach_debug(self, name, message):\n if self.isEnabledFor(10):\n pytest.allure.attach(name, self.format_message(message))\n\n def attach_subdebug(self, name, message):\n if self.isEnabledFor(15):\n pytest.allure.attach(name, self.format_message(message))\n\n def attach_info(self, name, message):\n if self.isEnabledFor(20):\n pytest.allure.attach(name, self.format_message(message))\n\n def attach_error(self, name, message):\n pytest.allure.attach(name, self.format_message(message))\n\n @staticmethod\n def attach_png(name, message):\n pytest.allure.attach(name, message, type=AttachmentType.PNG)\n\n def attach_selenium_screenshot(self, attach_name, selenium_driver):\n if selenium_driver:\n try:\n close_popups(selenium_driver)\n self.debug('Attach screenshot')\n self.attach_png(attach_name, selenium_driver.\n get_screenshot_as_png())\n self.debug('...Done')\n except Exception as e:\n self.error('Cannot get screenshot from SeleniumWebDriver')\n pytest.allure.attach(attach_name, str(e))\n else:\n self.error('No browser is define')\n\n def add_handler(self, file_name, mode='a'):\n file_handler = logging.FileHandler(filename=file_name, mode=mode)\n file_handler.setFormatter(log_formatter)\n file_handler.setLevel(os.getenv('LOGGING_LEVEL_TO_CONSOLE', 'WARN'))\n self.addHandler(file_handler)\n\n\ndef setup_logging():\n logger = CustomLogger('root')\n console_handler = logging.StreamHandler(sys.stdout)\n console_handler.setLevel(os.getenv('LOGGING_LEVEL_TO_CONSOLE', 'WARN'))\n console_handler.setFormatter(log_formatter)\n logger.addHandler(console_handler)\n string_io = logging.StreamHandler(logger.test_log)\n string_io.setLevel(os.getenv('LOGGING_LEVEL', 'INFO'))\n string_io.setFormatter(log_formatter)\n logger.addHandler(string_io)\n return logger\n\n\n<mask token>\n",
"step-3": "<mask token>\n_beautiful_json = dict(indent=2, ensure_ascii=False, sort_keys=True)\nlogging.addLevelName(15, 'SUBDEBUG')\nlogging.addLevelName(5, 'TEST')\nlog_formatter = logging.Formatter(\n '%(asctime)s [%(threadName)s] [%(levelname)s] - %(message)s', datefmt=\n '%Y-%m-%d %H:%M:%S')\n\n\nclass CustomLogger(logging.Logger):\n test_log = StringIO()\n\n @staticmethod\n def format_message(message):\n return json.dumps(message, **_beautiful_json) if isinstance(message,\n (dict, list, tuple)) else str(message)\n\n def subdebug(self, message, *args, **kwargs):\n if self.isEnabledFor(15):\n self._log(15, message, args, **kwargs)\n\n def attach_debug(self, name, message):\n if self.isEnabledFor(10):\n pytest.allure.attach(name, self.format_message(message))\n\n def attach_subdebug(self, name, message):\n if self.isEnabledFor(15):\n pytest.allure.attach(name, self.format_message(message))\n\n def attach_info(self, name, message):\n if self.isEnabledFor(20):\n pytest.allure.attach(name, self.format_message(message))\n\n def attach_error(self, name, message):\n pytest.allure.attach(name, self.format_message(message))\n\n @staticmethod\n def attach_png(name, message):\n pytest.allure.attach(name, message, type=AttachmentType.PNG)\n\n def attach_selenium_screenshot(self, attach_name, selenium_driver):\n if selenium_driver:\n try:\n close_popups(selenium_driver)\n self.debug('Attach screenshot')\n self.attach_png(attach_name, selenium_driver.\n get_screenshot_as_png())\n self.debug('...Done')\n except Exception as e:\n self.error('Cannot get screenshot from SeleniumWebDriver')\n pytest.allure.attach(attach_name, str(e))\n else:\n self.error('No browser is define')\n\n def add_handler(self, file_name, mode='a'):\n file_handler = logging.FileHandler(filename=file_name, mode=mode)\n file_handler.setFormatter(log_formatter)\n file_handler.setLevel(os.getenv('LOGGING_LEVEL_TO_CONSOLE', 'WARN'))\n self.addHandler(file_handler)\n\n\ndef setup_logging():\n logger = CustomLogger('root')\n console_handler = logging.StreamHandler(sys.stdout)\n console_handler.setLevel(os.getenv('LOGGING_LEVEL_TO_CONSOLE', 'WARN'))\n console_handler.setFormatter(log_formatter)\n logger.addHandler(console_handler)\n string_io = logging.StreamHandler(logger.test_log)\n string_io.setLevel(os.getenv('LOGGING_LEVEL', 'INFO'))\n string_io.setFormatter(log_formatter)\n logger.addHandler(string_io)\n return logger\n\n\nlogger = setup_logging()\n",
"step-4": "import json\nimport logging\nimport os\nimport sys\nfrom io import StringIO\nimport pytest\nfrom allure.constants import AttachmentType\nfrom utils.tools import close_popups\n_beautiful_json = dict(indent=2, ensure_ascii=False, sort_keys=True)\nlogging.addLevelName(15, 'SUBDEBUG')\nlogging.addLevelName(5, 'TEST')\nlog_formatter = logging.Formatter(\n '%(asctime)s [%(threadName)s] [%(levelname)s] - %(message)s', datefmt=\n '%Y-%m-%d %H:%M:%S')\n\n\nclass CustomLogger(logging.Logger):\n test_log = StringIO()\n\n @staticmethod\n def format_message(message):\n return json.dumps(message, **_beautiful_json) if isinstance(message,\n (dict, list, tuple)) else str(message)\n\n def subdebug(self, message, *args, **kwargs):\n if self.isEnabledFor(15):\n self._log(15, message, args, **kwargs)\n\n def attach_debug(self, name, message):\n if self.isEnabledFor(10):\n pytest.allure.attach(name, self.format_message(message))\n\n def attach_subdebug(self, name, message):\n if self.isEnabledFor(15):\n pytest.allure.attach(name, self.format_message(message))\n\n def attach_info(self, name, message):\n if self.isEnabledFor(20):\n pytest.allure.attach(name, self.format_message(message))\n\n def attach_error(self, name, message):\n pytest.allure.attach(name, self.format_message(message))\n\n @staticmethod\n def attach_png(name, message):\n pytest.allure.attach(name, message, type=AttachmentType.PNG)\n\n def attach_selenium_screenshot(self, attach_name, selenium_driver):\n if selenium_driver:\n try:\n close_popups(selenium_driver)\n self.debug('Attach screenshot')\n self.attach_png(attach_name, selenium_driver.\n get_screenshot_as_png())\n self.debug('...Done')\n except Exception as e:\n self.error('Cannot get screenshot from SeleniumWebDriver')\n pytest.allure.attach(attach_name, str(e))\n else:\n self.error('No browser is define')\n\n def add_handler(self, file_name, mode='a'):\n file_handler = logging.FileHandler(filename=file_name, mode=mode)\n file_handler.setFormatter(log_formatter)\n file_handler.setLevel(os.getenv('LOGGING_LEVEL_TO_CONSOLE', 'WARN'))\n self.addHandler(file_handler)\n\n\ndef setup_logging():\n logger = CustomLogger('root')\n console_handler = logging.StreamHandler(sys.stdout)\n console_handler.setLevel(os.getenv('LOGGING_LEVEL_TO_CONSOLE', 'WARN'))\n console_handler.setFormatter(log_formatter)\n logger.addHandler(console_handler)\n string_io = logging.StreamHandler(logger.test_log)\n string_io.setLevel(os.getenv('LOGGING_LEVEL', 'INFO'))\n string_io.setFormatter(log_formatter)\n logger.addHandler(string_io)\n return logger\n\n\nlogger = setup_logging()\n",
"step-5": "import json\nimport logging\nimport os\nimport sys\nfrom io import StringIO\n\nimport pytest\nfrom allure.constants import AttachmentType\n\nfrom utils.tools import close_popups\n\n_beautiful_json = dict(indent=2, ensure_ascii=False, sort_keys=True)\n\n# LOGGING console ####################################################################################################\n# Reserved name for custom logging\nlogging.addLevelName(15, \"SUBDEBUG\")\nlogging.addLevelName(5, \"TEST\")\n\n# Logger formating\nlog_formatter = logging.Formatter(\"%(asctime)s [%(threadName)s] [%(levelname)s] - %(message)s\",\n datefmt='%Y-%m-%d %H:%M:%S')\n\n\nclass CustomLogger(logging.Logger):\n test_log = StringIO()\n\n # Metod formating message\n @staticmethod\n def format_message(message):\n return json.dumps(message, **_beautiful_json) if isinstance(message, (dict, list, tuple)) else str(message)\n\n # Custom level of logging\n def subdebug(self, message, *args, **kwargs):\n if self.isEnabledFor(15):\n self._log(15, message, args, **kwargs)\n\n # Method to attached data to report (one class dependency)\n def attach_debug(self, name, message):\n if self.isEnabledFor(10):\n pytest.allure.attach(name, self.format_message(message))\n\n def attach_subdebug(self, name, message):\n if self.isEnabledFor(15):\n pytest.allure.attach(name, self.format_message(message))\n\n def attach_info(self, name, message):\n if self.isEnabledFor(20):\n pytest.allure.attach(name, self.format_message(message))\n\n def attach_error(self, name, message):\n pytest.allure.attach(name, self.format_message(message))\n\n @staticmethod\n def attach_png(name, message):\n pytest.allure.attach(name, message, type=AttachmentType.PNG)\n\n def attach_selenium_screenshot(self, attach_name, selenium_driver):\n if selenium_driver:\n try:\n close_popups(selenium_driver)\n self.debug('Attach screenshot')\n self.attach_png(attach_name, selenium_driver.get_screenshot_as_png())\n self.debug('...Done')\n except Exception as e:\n self.error('Cannot get screenshot from SeleniumWebDriver')\n pytest.allure.attach(attach_name, str(e))\n\n else:\n self.error('No browser is define')\n\n def add_handler(self, file_name, mode='a'):\n file_handler = logging.FileHandler(filename=file_name, mode=mode)\n file_handler.setFormatter(log_formatter)\n file_handler.setLevel(os.getenv('LOGGING_LEVEL_TO_CONSOLE', 'WARN'))\n self.addHandler(file_handler)\n\n\ndef setup_logging():\n # Logging setup\n logger = CustomLogger('root')\n\n # Level of handler\n console_handler = logging.StreamHandler(sys.stdout)\n console_handler.setLevel(os.getenv('LOGGING_LEVEL_TO_CONSOLE', 'WARN'))\n # Create a method of message\n console_handler.setFormatter(log_formatter)\n logger.addHandler(console_handler)\n\n # Level of handler\n string_io = logging.StreamHandler(logger.test_log)\n string_io.setLevel(os.getenv('LOGGING_LEVEL', 'INFO'))\n # Create a method of message\n string_io.setFormatter(log_formatter)\n logger.addHandler(string_io)\n return logger\n\n\nlogger = setup_logging()\n",
"step-ids": [
10,
13,
14,
15,
16
]
}
|
[
10,
13,
14,
15,
16
] |
# cook your dish here
t=int(input())
while t:
n=int(input())
a=list(map(int,input().split()))
a.sort(reverse=True)
s=0
for i in range(n):
k=a[i]-i
if k>=0:
s+=k
print(s%1000000007)
t-=1
|
normal
|
{
"blob_id": "44bf409d627a6029ab4c4f1fff99f102b8d57279",
"index": 3954,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwhile t:\n n = int(input())\n a = list(map(int, input().split()))\n a.sort(reverse=True)\n s = 0\n for i in range(n):\n k = a[i] - i\n if k >= 0:\n s += k\n print(s % 1000000007)\n t -= 1\n",
"step-3": "t = int(input())\nwhile t:\n n = int(input())\n a = list(map(int, input().split()))\n a.sort(reverse=True)\n s = 0\n for i in range(n):\n k = a[i] - i\n if k >= 0:\n s += k\n print(s % 1000000007)\n t -= 1\n",
"step-4": "# cook your dish here\nt=int(input())\nwhile t:\n n=int(input())\n a=list(map(int,input().split()))\n a.sort(reverse=True)\n s=0\n for i in range(n):\n k=a[i]-i\n if k>=0:\n s+=k\n print(s%1000000007)\n t-=1\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def full_adder(a: bool, b: bool, c: bool) ->(bool, bool):
"""Returns a + b + c in the form of a tuple of two bools representing the two
bits.
Carried value is ignored.
"""
nand_a_b = nand(a, b)
nand_c = nand(nand_a_b, a)
nand_d = nand(nand_a_b, b)
low_a_b = nand(nand_c, nand_d)
nand_low_a_b_c = nand(low_a_b, c)
nand_e = nand(low_a_b, nand_low_a_b_c)
nand_f = nand(nand_low_a_b_c, c)
high = nand(nand_a_b, nand_low_a_b_c)
low = nand(nand_e, nand_f)
return high, low
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def half_adder(a: bool, b: bool) ->(bool, bool):
"""Returns a + b in the form of a tuple of two bools representing the two
bits."""
nand_a_b = nand(a, b)
nand_c = nand(nand_a_b, a)
nand_d = nand(nand_a_b, b)
high = nand(nand_a_b, nand_a_b)
low = nand(nand_c, nand_d)
return high, low
def full_adder(a: bool, b: bool, c: bool) ->(bool, bool):
"""Returns a + b + c in the form of a tuple of two bools representing the two
bits.
Carried value is ignored.
"""
nand_a_b = nand(a, b)
nand_c = nand(nand_a_b, a)
nand_d = nand(nand_a_b, b)
low_a_b = nand(nand_c, nand_d)
nand_low_a_b_c = nand(low_a_b, c)
nand_e = nand(low_a_b, nand_low_a_b_c)
nand_f = nand(nand_low_a_b_c, c)
high = nand(nand_a_b, nand_low_a_b_c)
low = nand(nand_e, nand_f)
return high, low
<|reserved_special_token_1|>
from pypc.a_primitives.nand import nand
def half_adder(a: bool, b: bool) ->(bool, bool):
"""Returns a + b in the form of a tuple of two bools representing the two
bits."""
nand_a_b = nand(a, b)
nand_c = nand(nand_a_b, a)
nand_d = nand(nand_a_b, b)
high = nand(nand_a_b, nand_a_b)
low = nand(nand_c, nand_d)
return high, low
def full_adder(a: bool, b: bool, c: bool) ->(bool, bool):
"""Returns a + b + c in the form of a tuple of two bools representing the two
bits.
Carried value is ignored.
"""
nand_a_b = nand(a, b)
nand_c = nand(nand_a_b, a)
nand_d = nand(nand_a_b, b)
low_a_b = nand(nand_c, nand_d)
nand_low_a_b_c = nand(low_a_b, c)
nand_e = nand(low_a_b, nand_low_a_b_c)
nand_f = nand(nand_low_a_b_c, c)
high = nand(nand_a_b, nand_low_a_b_c)
low = nand(nand_e, nand_f)
return high, low
<|reserved_special_token_1|>
from pypc.a_primitives.nand import nand
# nand gates used: 5
def half_adder(a: bool, b: bool) -> (bool, bool):
"""Returns a + b in the form of a tuple of two bools representing the two
bits."""
nand_a_b = nand(a, b)
nand_c = nand(nand_a_b, a)
nand_d = nand(nand_a_b, b)
high = nand(nand_a_b, nand_a_b)
low = nand(nand_c, nand_d)
return high, low
# nand gates used: 9
def full_adder(a: bool, b: bool, c: bool) -> (bool, bool):
"""Returns a + b + c in the form of a tuple of two bools representing the two
bits.
Carried value is ignored.
"""
nand_a_b = nand(a, b)
nand_c = nand(nand_a_b, a)
nand_d = nand(nand_a_b, b)
low_a_b = nand(nand_c, nand_d)
nand_low_a_b_c = nand(low_a_b, c)
nand_e = nand(low_a_b, nand_low_a_b_c)
nand_f = nand(nand_low_a_b_c, c)
high = nand(nand_a_b, nand_low_a_b_c)
low = nand(nand_e, nand_f)
return high, low
|
flexible
|
{
"blob_id": "66f6639ae62fe8c0b42171cf3e3fb450d8eee2b2",
"index": 7671,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef full_adder(a: bool, b: bool, c: bool) ->(bool, bool):\n \"\"\"Returns a + b + c in the form of a tuple of two bools representing the two\n bits.\n \n Carried value is ignored.\n \"\"\"\n nand_a_b = nand(a, b)\n nand_c = nand(nand_a_b, a)\n nand_d = nand(nand_a_b, b)\n low_a_b = nand(nand_c, nand_d)\n nand_low_a_b_c = nand(low_a_b, c)\n nand_e = nand(low_a_b, nand_low_a_b_c)\n nand_f = nand(nand_low_a_b_c, c)\n high = nand(nand_a_b, nand_low_a_b_c)\n low = nand(nand_e, nand_f)\n return high, low\n",
"step-3": "<mask token>\n\n\ndef half_adder(a: bool, b: bool) ->(bool, bool):\n \"\"\"Returns a + b in the form of a tuple of two bools representing the two\n bits.\"\"\"\n nand_a_b = nand(a, b)\n nand_c = nand(nand_a_b, a)\n nand_d = nand(nand_a_b, b)\n high = nand(nand_a_b, nand_a_b)\n low = nand(nand_c, nand_d)\n return high, low\n\n\ndef full_adder(a: bool, b: bool, c: bool) ->(bool, bool):\n \"\"\"Returns a + b + c in the form of a tuple of two bools representing the two\n bits.\n \n Carried value is ignored.\n \"\"\"\n nand_a_b = nand(a, b)\n nand_c = nand(nand_a_b, a)\n nand_d = nand(nand_a_b, b)\n low_a_b = nand(nand_c, nand_d)\n nand_low_a_b_c = nand(low_a_b, c)\n nand_e = nand(low_a_b, nand_low_a_b_c)\n nand_f = nand(nand_low_a_b_c, c)\n high = nand(nand_a_b, nand_low_a_b_c)\n low = nand(nand_e, nand_f)\n return high, low\n",
"step-4": "from pypc.a_primitives.nand import nand\n\n\ndef half_adder(a: bool, b: bool) ->(bool, bool):\n \"\"\"Returns a + b in the form of a tuple of two bools representing the two\n bits.\"\"\"\n nand_a_b = nand(a, b)\n nand_c = nand(nand_a_b, a)\n nand_d = nand(nand_a_b, b)\n high = nand(nand_a_b, nand_a_b)\n low = nand(nand_c, nand_d)\n return high, low\n\n\ndef full_adder(a: bool, b: bool, c: bool) ->(bool, bool):\n \"\"\"Returns a + b + c in the form of a tuple of two bools representing the two\n bits.\n \n Carried value is ignored.\n \"\"\"\n nand_a_b = nand(a, b)\n nand_c = nand(nand_a_b, a)\n nand_d = nand(nand_a_b, b)\n low_a_b = nand(nand_c, nand_d)\n nand_low_a_b_c = nand(low_a_b, c)\n nand_e = nand(low_a_b, nand_low_a_b_c)\n nand_f = nand(nand_low_a_b_c, c)\n high = nand(nand_a_b, nand_low_a_b_c)\n low = nand(nand_e, nand_f)\n return high, low\n",
"step-5": "from pypc.a_primitives.nand import nand\r\n\r\n\r\n# nand gates used: 5\r\ndef half_adder(a: bool, b: bool) -> (bool, bool):\r\n \"\"\"Returns a + b in the form of a tuple of two bools representing the two\r\n bits.\"\"\"\r\n nand_a_b = nand(a, b)\r\n nand_c = nand(nand_a_b, a)\r\n nand_d = nand(nand_a_b, b)\r\n high = nand(nand_a_b, nand_a_b)\r\n low = nand(nand_c, nand_d)\r\n return high, low\r\n\r\n\r\n# nand gates used: 9\r\ndef full_adder(a: bool, b: bool, c: bool) -> (bool, bool):\r\n \"\"\"Returns a + b + c in the form of a tuple of two bools representing the two\r\n bits.\r\n \r\n Carried value is ignored.\r\n \"\"\"\r\n nand_a_b = nand(a, b)\r\n nand_c = nand(nand_a_b, a)\r\n nand_d = nand(nand_a_b, b)\r\n low_a_b = nand(nand_c, nand_d)\r\n nand_low_a_b_c = nand(low_a_b, c)\r\n nand_e = nand(low_a_b, nand_low_a_b_c)\r\n nand_f = nand(nand_low_a_b_c, c)\r\n high = nand(nand_a_b, nand_low_a_b_c)\r\n low = nand(nand_e, nand_f)\r\n return high, low\r\n\r\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class priority_customer(models.Model):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class priority_customer(models.Model):
_inherit = 'res.partner'
is_priority = fields.Boolean('Is Priority Partner:?')
registration_date = fields.Date('Registration Date:')
liability_card_number = fields.Char('Liability Card Number:')
<|reserved_special_token_1|>
from openerp import models, fields, api, _
class priority_customer(models.Model):
_inherit = 'res.partner'
is_priority = fields.Boolean('Is Priority Partner:?')
registration_date = fields.Date('Registration Date:')
liability_card_number = fields.Char('Liability Card Number:')
<|reserved_special_token_1|>
from openerp import models, fields, api, _
class priority_customer(models.Model):
_inherit = 'res.partner'
is_priority = fields.Boolean("Is Priority Partner:?")
registration_date = fields.Date("Registration Date:")
liability_card_number = fields.Char("Liability Card Number:")
|
flexible
|
{
"blob_id": "f2bb00d06023ef7b3ea3dc33f7ec00d1f48d46ae",
"index": 8477,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass priority_customer(models.Model):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass priority_customer(models.Model):\n _inherit = 'res.partner'\n is_priority = fields.Boolean('Is Priority Partner:?')\n registration_date = fields.Date('Registration Date:')\n liability_card_number = fields.Char('Liability Card Number:')\n",
"step-4": "from openerp import models, fields, api, _\n\n\nclass priority_customer(models.Model):\n _inherit = 'res.partner'\n is_priority = fields.Boolean('Is Priority Partner:?')\n registration_date = fields.Date('Registration Date:')\n liability_card_number = fields.Char('Liability Card Number:')\n",
"step-5": "from openerp import models, fields, api, _\n\n\nclass priority_customer(models.Model):\n\n _inherit = 'res.partner'\n\n is_priority = fields.Boolean(\"Is Priority Partner:?\")\n registration_date = fields.Date(\"Registration Date:\")\n liability_card_number = fields.Char(\"Liability Card Number:\")\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
def entete():
entete = """
<!DOCTYPE HTML>
<html lang=“fr”>
<head>
<title>AMAP'PATATE</title>
<meta charset="UTF-8" />
<link rel="stylesheet" type="text/css" href="/IENAC15/amapatate/css/font-awesome.min.css" />
<link rel="stylesheet" type="text/css" href="/IENAC15/amapatate/css/bootstrap.min.css" />
<link rel="stylesheet" type="text/css" href="/IENAC15/amapatate/css/style.css" />
<link rel="stylesheet" type="text/css" href="/IENAC15/amapatate/css/menu.css" />
<link rel="stylesheet" type="text/css" href="/IENAC15/amapatate/css/form.css" />
<link rel="stylesheet" type="text/css" href="/IENAC15/amapatate/css/button.css" />
<script type="text/javascript" src= " /IENAC15/amapatate/js/jquery-2.2.0.min.js" ></script>
<script type="text/javascript" src= " /IENAC15/amapatate/js/bootstrap.min.js" ></script>
</head>
<body>
"""
return entete
<|reserved_special_token_0|>
<|reserved_special_token_1|>
def entete():
entete = """
<!DOCTYPE HTML>
<html lang=“fr”>
<head>
<title>AMAP'PATATE</title>
<meta charset="UTF-8" />
<link rel="stylesheet" type="text/css" href="/IENAC15/amapatate/css/font-awesome.min.css" />
<link rel="stylesheet" type="text/css" href="/IENAC15/amapatate/css/bootstrap.min.css" />
<link rel="stylesheet" type="text/css" href="/IENAC15/amapatate/css/style.css" />
<link rel="stylesheet" type="text/css" href="/IENAC15/amapatate/css/menu.css" />
<link rel="stylesheet" type="text/css" href="/IENAC15/amapatate/css/form.css" />
<link rel="stylesheet" type="text/css" href="/IENAC15/amapatate/css/button.css" />
<script type="text/javascript" src= " /IENAC15/amapatate/js/jquery-2.2.0.min.js" ></script>
<script type="text/javascript" src= " /IENAC15/amapatate/js/bootstrap.min.js" ></script>
</head>
<body>
"""
return entete
<|reserved_special_token_0|>
def footer():
footer = """
<footer>© All right reserved ENAC
</footer>
</body>
</html>
"""
return footer
<|reserved_special_token_1|>
def entete():
entete = """
<!DOCTYPE HTML>
<html lang=“fr”>
<head>
<title>AMAP'PATATE</title>
<meta charset="UTF-8" />
<link rel="stylesheet" type="text/css" href="/IENAC15/amapatate/css/font-awesome.min.css" />
<link rel="stylesheet" type="text/css" href="/IENAC15/amapatate/css/bootstrap.min.css" />
<link rel="stylesheet" type="text/css" href="/IENAC15/amapatate/css/style.css" />
<link rel="stylesheet" type="text/css" href="/IENAC15/amapatate/css/menu.css" />
<link rel="stylesheet" type="text/css" href="/IENAC15/amapatate/css/form.css" />
<link rel="stylesheet" type="text/css" href="/IENAC15/amapatate/css/button.css" />
<script type="text/javascript" src= " /IENAC15/amapatate/js/jquery-2.2.0.min.js" ></script>
<script type="text/javascript" src= " /IENAC15/amapatate/js/bootstrap.min.js" ></script>
</head>
<body>
"""
return entete
<|reserved_special_token_0|>
def titre(intitule):
titre = """
<header>
<h1>""" + intitule + """</h1>
<p>L'AMAP fruits et légumes qui vous donne la patate </p>
</header>
"""
return titre
def footer():
footer = """
<footer>© All right reserved ENAC
</footer>
</body>
</html>
"""
return footer
<|reserved_special_token_1|>
def entete():
entete = """
<!DOCTYPE HTML>
<html lang=“fr”>
<head>
<title>AMAP'PATATE</title>
<meta charset="UTF-8" />
<link rel="stylesheet" type="text/css" href="/IENAC15/amapatate/css/font-awesome.min.css" />
<link rel="stylesheet" type="text/css" href="/IENAC15/amapatate/css/bootstrap.min.css" />
<link rel="stylesheet" type="text/css" href="/IENAC15/amapatate/css/style.css" />
<link rel="stylesheet" type="text/css" href="/IENAC15/amapatate/css/menu.css" />
<link rel="stylesheet" type="text/css" href="/IENAC15/amapatate/css/form.css" />
<link rel="stylesheet" type="text/css" href="/IENAC15/amapatate/css/button.css" />
<script type="text/javascript" src= " /IENAC15/amapatate/js/jquery-2.2.0.min.js" ></script>
<script type="text/javascript" src= " /IENAC15/amapatate/js/bootstrap.min.js" ></script>
</head>
<body>
"""
return entete
def nav():
nav = """
<nav>
<ul>
<li><a href="/IENAC15/amapatate/index.py">
<span class="fa-stack fa-lg">
<i class="fa fa-circle fa-stack-2x"></i>
<i class="fa fa-home fa-stack-1x fa-inverse"></i>
</span>
Accueil</a>
</li>
<li><a href="/IENAC15/amapatate/index.py#ecole">
<span class="fa-stack fa-lg">
<i class="fa fa-circle fa-stack-2x"></i>
<i class="fa fa-plane fa-stack-1x fa-inverse"></i>
</span>
L'école</a>
<ul>
<li><a href="http://www.eag-tournament.com">
<i class="fa fa-soccer-ball-o fa-fw"></i>EAG</a>
</li>
<li><a href="index.html#contacter">
<i class="fa fa-phone fa-fw"></i>Nous Contacter</a>
</li>
</ul>
</li>
<li><a href="/IENAC15/amapatate/python/clubs.py">
<span class="fa-stack fa-lg">
<i class="fa fa-circle fa-stack-2x"></i>
<i class="fa fa-bicycle fa-stack-1x fa-inverse"></i>
</span>
Les clubs</a>
</li>
<li><a href="/IENAC15/amapatate/python/connecter.py">
<span class="fa-stack fa-lg">
<i class="fa fa-circle fa-stack-2x"></i>
<i class="fa fa-user fa-stack-1x fa-inverse"></i>
</span>
Se connecter</a>
</li>
"""
if 'nom' in Session() and Session()['nom'] != '':
nav += """
<li><a href="/IENAC15/amapatate/python/page_prive.py">
<span class="fa-stack fa-lg">
<i class="fa fa-circle fa-stack-2x"></i>
<i class="fa fa-user fa-stack-1x fa-inverse"></i>
</span>
Page privée</a>
</li>
"""
nav += """
</ul>
</nav>
"""
return nav
def titre(intitule):
titre = """
<header>
<h1>""" + intitule + """</h1>
<p>L'AMAP fruits et légumes qui vous donne la patate </p>
</header>
"""
return titre
def footer():
footer = """
<footer>© All right reserved ENAC
</footer>
</body>
</html>
"""
return footer
<|reserved_special_token_1|>
def entete():
entete='''
<!DOCTYPE HTML>
<html lang=“fr”>
<head>
<title>AMAP'PATATE</title>
<meta charset="UTF-8" />
<link rel="stylesheet" type="text/css" href="/IENAC15/amapatate/css/font-awesome.min.css" />
<link rel="stylesheet" type="text/css" href="/IENAC15/amapatate/css/bootstrap.min.css" />
<link rel="stylesheet" type="text/css" href="/IENAC15/amapatate/css/style.css" />
<link rel="stylesheet" type="text/css" href="/IENAC15/amapatate/css/menu.css" />
<link rel="stylesheet" type="text/css" href="/IENAC15/amapatate/css/form.css" />
<link rel="stylesheet" type="text/css" href="/IENAC15/amapatate/css/button.css" />
<script type="text/javascript" src= " /IENAC15/amapatate/js/jquery-2.2.0.min.js" ></script>
<script type="text/javascript" src= " /IENAC15/amapatate/js/bootstrap.min.js" ></script>
</head>
<body>
'''
return entete
def nav():
nav='''
<nav>
<ul>
<li><a href="/IENAC15/amapatate/index.py">
<span class="fa-stack fa-lg">
<i class="fa fa-circle fa-stack-2x"></i>
<i class="fa fa-home fa-stack-1x fa-inverse"></i>
</span>
Accueil</a>
</li>
<li><a href="/IENAC15/amapatate/index.py#ecole">
<span class="fa-stack fa-lg">
<i class="fa fa-circle fa-stack-2x"></i>
<i class="fa fa-plane fa-stack-1x fa-inverse"></i>
</span>
L'école</a>
<ul>
<li><a href="http://www.eag-tournament.com">
<i class="fa fa-soccer-ball-o fa-fw"></i>EAG</a>
</li>
<li><a href="index.html#contacter">
<i class="fa fa-phone fa-fw"></i>Nous Contacter</a>
</li>
</ul>
</li>
<li><a href="/IENAC15/amapatate/python/clubs.py">
<span class="fa-stack fa-lg">
<i class="fa fa-circle fa-stack-2x"></i>
<i class="fa fa-bicycle fa-stack-1x fa-inverse"></i>
</span>
Les clubs</a>
</li>
<li><a href="/IENAC15/amapatate/python/connecter.py">
<span class="fa-stack fa-lg">
<i class="fa fa-circle fa-stack-2x"></i>
<i class="fa fa-user fa-stack-1x fa-inverse"></i>
</span>
Se connecter</a>
</li>
'''
if "nom" in Session() and Session()["nom"]!='':
nav+='''
<li><a href="/IENAC15/amapatate/python/page_prive.py">
<span class="fa-stack fa-lg">
<i class="fa fa-circle fa-stack-2x"></i>
<i class="fa fa-user fa-stack-1x fa-inverse"></i>
</span>
Page privée</a>
</li>
'''
nav+='''
</ul>
</nav>
'''
return nav
def titre(intitule):
titre='''
<header>
<h1>'''+intitule+'''</h1>
<p>L'AMAP fruits et légumes qui vous donne la patate </p>
</header>
'''
return titre
def footer():
footer='''
<footer>© All right reserved ENAC
</footer>
</body>
</html>
'''
return footer
|
flexible
|
{
"blob_id": "933758002c5851a2655ed4c51b2bed0102165116",
"index": 4742,
"step-1": "def entete():\n entete = \"\"\"\n <!DOCTYPE HTML>\n<html lang=“fr”>\n <head>\n <title>AMAP'PATATE</title>\n <meta charset=\"UTF-8\" />\n <link rel=\"stylesheet\" type=\"text/css\" href=\"/IENAC15/amapatate/css/font-awesome.min.css\" />\n <link rel=\"stylesheet\" type=\"text/css\" href=\"/IENAC15/amapatate/css/bootstrap.min.css\" />\n <link rel=\"stylesheet\" type=\"text/css\" href=\"/IENAC15/amapatate/css/style.css\" />\n <link rel=\"stylesheet\" type=\"text/css\" href=\"/IENAC15/amapatate/css/menu.css\" />\n <link rel=\"stylesheet\" type=\"text/css\" href=\"/IENAC15/amapatate/css/form.css\" />\n <link rel=\"stylesheet\" type=\"text/css\" href=\"/IENAC15/amapatate/css/button.css\" />\n <script type=\"text/javascript\" src= \" /IENAC15/amapatate/js/jquery-2.2.0.min.js\" ></script>\n\t\t\t\t\t<script type=\"text/javascript\" src= \" /IENAC15/amapatate/js/bootstrap.min.js\" ></script>\n </head>\n <body>\n\n \"\"\"\n return entete\n\n\n<mask token>\n",
"step-2": "def entete():\n entete = \"\"\"\n <!DOCTYPE HTML>\n<html lang=“fr”>\n <head>\n <title>AMAP'PATATE</title>\n <meta charset=\"UTF-8\" />\n <link rel=\"stylesheet\" type=\"text/css\" href=\"/IENAC15/amapatate/css/font-awesome.min.css\" />\n <link rel=\"stylesheet\" type=\"text/css\" href=\"/IENAC15/amapatate/css/bootstrap.min.css\" />\n <link rel=\"stylesheet\" type=\"text/css\" href=\"/IENAC15/amapatate/css/style.css\" />\n <link rel=\"stylesheet\" type=\"text/css\" href=\"/IENAC15/amapatate/css/menu.css\" />\n <link rel=\"stylesheet\" type=\"text/css\" href=\"/IENAC15/amapatate/css/form.css\" />\n <link rel=\"stylesheet\" type=\"text/css\" href=\"/IENAC15/amapatate/css/button.css\" />\n <script type=\"text/javascript\" src= \" /IENAC15/amapatate/js/jquery-2.2.0.min.js\" ></script>\n\t\t\t\t\t<script type=\"text/javascript\" src= \" /IENAC15/amapatate/js/bootstrap.min.js\" ></script>\n </head>\n <body>\n\n \"\"\"\n return entete\n\n\n<mask token>\n\n\ndef footer():\n footer = \"\"\"\n <footer>© All right reserved ENAC\n </footer>\n </body>\n </html>\n \"\"\"\n return footer\n",
"step-3": "def entete():\n entete = \"\"\"\n <!DOCTYPE HTML>\n<html lang=“fr”>\n <head>\n <title>AMAP'PATATE</title>\n <meta charset=\"UTF-8\" />\n <link rel=\"stylesheet\" type=\"text/css\" href=\"/IENAC15/amapatate/css/font-awesome.min.css\" />\n <link rel=\"stylesheet\" type=\"text/css\" href=\"/IENAC15/amapatate/css/bootstrap.min.css\" />\n <link rel=\"stylesheet\" type=\"text/css\" href=\"/IENAC15/amapatate/css/style.css\" />\n <link rel=\"stylesheet\" type=\"text/css\" href=\"/IENAC15/amapatate/css/menu.css\" />\n <link rel=\"stylesheet\" type=\"text/css\" href=\"/IENAC15/amapatate/css/form.css\" />\n <link rel=\"stylesheet\" type=\"text/css\" href=\"/IENAC15/amapatate/css/button.css\" />\n <script type=\"text/javascript\" src= \" /IENAC15/amapatate/js/jquery-2.2.0.min.js\" ></script>\n\t\t\t\t\t<script type=\"text/javascript\" src= \" /IENAC15/amapatate/js/bootstrap.min.js\" ></script>\n </head>\n <body>\n\n \"\"\"\n return entete\n\n\n<mask token>\n\n\ndef titre(intitule):\n titre = \"\"\"\n <header>\n <h1>\"\"\" + intitule + \"\"\"</h1>\n <p>L'AMAP fruits et légumes qui vous donne la patate </p>\n </header>\n \"\"\"\n return titre\n\n\ndef footer():\n footer = \"\"\"\n <footer>© All right reserved ENAC\n </footer>\n </body>\n </html>\n \"\"\"\n return footer\n",
"step-4": "def entete():\n entete = \"\"\"\n <!DOCTYPE HTML>\n<html lang=“fr”>\n <head>\n <title>AMAP'PATATE</title>\n <meta charset=\"UTF-8\" />\n <link rel=\"stylesheet\" type=\"text/css\" href=\"/IENAC15/amapatate/css/font-awesome.min.css\" />\n <link rel=\"stylesheet\" type=\"text/css\" href=\"/IENAC15/amapatate/css/bootstrap.min.css\" />\n <link rel=\"stylesheet\" type=\"text/css\" href=\"/IENAC15/amapatate/css/style.css\" />\n <link rel=\"stylesheet\" type=\"text/css\" href=\"/IENAC15/amapatate/css/menu.css\" />\n <link rel=\"stylesheet\" type=\"text/css\" href=\"/IENAC15/amapatate/css/form.css\" />\n <link rel=\"stylesheet\" type=\"text/css\" href=\"/IENAC15/amapatate/css/button.css\" />\n <script type=\"text/javascript\" src= \" /IENAC15/amapatate/js/jquery-2.2.0.min.js\" ></script>\n\t\t\t\t\t<script type=\"text/javascript\" src= \" /IENAC15/amapatate/js/bootstrap.min.js\" ></script>\n </head>\n <body>\n\n \"\"\"\n return entete\n\n\ndef nav():\n nav = \"\"\"\n <nav>\n <ul>\n\t <li><a href=\"/IENAC15/amapatate/index.py\">\n\t \t<span class=\"fa-stack fa-lg\">\n \t \t\t\t<i class=\"fa fa-circle fa-stack-2x\"></i>\n \t \t\t\t<i class=\"fa fa-home fa-stack-1x fa-inverse\"></i>\n \t \t\t</span>\n\t \tAccueil</a>\n\t </li>\n <li><a href=\"/IENAC15/amapatate/index.py#ecole\">\n <span class=\"fa-stack fa-lg\">\n \t \t\t\t<i class=\"fa fa-circle fa-stack-2x\"></i>\n \t \t\t\t<i class=\"fa fa-plane fa-stack-1x fa-inverse\"></i>\n \t \t\t</span>\n L'école</a>\n \t<ul>\n \t\t\t<li><a href=\"http://www.eag-tournament.com\">\n \t\t\t <i class=\"fa fa-soccer-ball-o fa-fw\"></i>EAG</a>\n \t\t\t</li>\n \t\t\t<li><a href=\"index.html#contacter\">\n \t\t\t\t<i class=\"fa fa-phone fa-fw\"></i>Nous Contacter</a>\n \t\t\t</li>\n \t\t\t</ul>\n </li>\n\t\t \t<li><a href=\"/IENAC15/amapatate/python/clubs.py\">\n\t\t \t\t<span class=\"fa-stack fa-lg\">\n \t \t\t\t<i class=\"fa fa-circle fa-stack-2x\"></i>\n \t \t\t\t<i class=\"fa fa-bicycle fa-stack-1x fa-inverse\"></i>\n \t \t\t</span>\n\t\t \t\tLes clubs</a>\n\t\t \t</li>\n\t\t \t<li><a href=\"/IENAC15/amapatate/python/connecter.py\">\n\t\t \t\t<span class=\"fa-stack fa-lg\">\n \t \t\t\t<i class=\"fa fa-circle fa-stack-2x\"></i>\n \t \t\t\t<i class=\"fa fa-user fa-stack-1x fa-inverse\"></i>\n \t \t\t</span>\n\t\t \tSe connecter</a>\n\t\t \t</li>\n\t\t \t\"\"\"\n if 'nom' in Session() and Session()['nom'] != '':\n nav += \"\"\"\n <li><a href=\"/IENAC15/amapatate/python/page_prive.py\">\n\t\t \t\t<span class=\"fa-stack fa-lg\">\n \t \t\t\t<i class=\"fa fa-circle fa-stack-2x\"></i>\n \t \t\t\t<i class=\"fa fa-user fa-stack-1x fa-inverse\"></i>\n \t \t\t</span>\n\t\t \tPage privée</a>\n\t\t \t</li>\n\t\t \t\"\"\"\n nav += \"\"\"\n \t\t\t</ul>\n </nav>\n \"\"\"\n return nav\n\n\ndef titre(intitule):\n titre = \"\"\"\n <header>\n <h1>\"\"\" + intitule + \"\"\"</h1>\n <p>L'AMAP fruits et légumes qui vous donne la patate </p>\n </header>\n \"\"\"\n return titre\n\n\ndef footer():\n footer = \"\"\"\n <footer>© All right reserved ENAC\n </footer>\n </body>\n </html>\n \"\"\"\n return footer\n",
"step-5": "def entete():\n entete='''\n <!DOCTYPE HTML>\n<html lang=“fr”>\n <head>\n <title>AMAP'PATATE</title>\n <meta charset=\"UTF-8\" />\n <link rel=\"stylesheet\" type=\"text/css\" href=\"/IENAC15/amapatate/css/font-awesome.min.css\" />\n <link rel=\"stylesheet\" type=\"text/css\" href=\"/IENAC15/amapatate/css/bootstrap.min.css\" />\n <link rel=\"stylesheet\" type=\"text/css\" href=\"/IENAC15/amapatate/css/style.css\" />\n <link rel=\"stylesheet\" type=\"text/css\" href=\"/IENAC15/amapatate/css/menu.css\" />\n <link rel=\"stylesheet\" type=\"text/css\" href=\"/IENAC15/amapatate/css/form.css\" />\n <link rel=\"stylesheet\" type=\"text/css\" href=\"/IENAC15/amapatate/css/button.css\" />\n <script type=\"text/javascript\" src= \" /IENAC15/amapatate/js/jquery-2.2.0.min.js\" ></script>\n\t\t\t\t\t<script type=\"text/javascript\" src= \" /IENAC15/amapatate/js/bootstrap.min.js\" ></script>\n </head>\n <body>\n\n '''\n return entete\n\ndef nav():\n nav='''\n <nav>\n <ul>\n\t <li><a href=\"/IENAC15/amapatate/index.py\">\n\t \t<span class=\"fa-stack fa-lg\">\n \t \t\t\t<i class=\"fa fa-circle fa-stack-2x\"></i>\n \t \t\t\t<i class=\"fa fa-home fa-stack-1x fa-inverse\"></i>\n \t \t\t</span>\n\t \tAccueil</a>\n\t </li>\n <li><a href=\"/IENAC15/amapatate/index.py#ecole\">\n <span class=\"fa-stack fa-lg\">\n \t \t\t\t<i class=\"fa fa-circle fa-stack-2x\"></i>\n \t \t\t\t<i class=\"fa fa-plane fa-stack-1x fa-inverse\"></i>\n \t \t\t</span>\n L'école</a>\n \t<ul>\n \t\t\t<li><a href=\"http://www.eag-tournament.com\">\n \t\t\t <i class=\"fa fa-soccer-ball-o fa-fw\"></i>EAG</a>\n \t\t\t</li>\n \t\t\t<li><a href=\"index.html#contacter\">\n \t\t\t\t<i class=\"fa fa-phone fa-fw\"></i>Nous Contacter</a>\n \t\t\t</li>\n \t\t\t</ul>\n </li>\n\t\t \t<li><a href=\"/IENAC15/amapatate/python/clubs.py\">\n\t\t \t\t<span class=\"fa-stack fa-lg\">\n \t \t\t\t<i class=\"fa fa-circle fa-stack-2x\"></i>\n \t \t\t\t<i class=\"fa fa-bicycle fa-stack-1x fa-inverse\"></i>\n \t \t\t</span>\n\t\t \t\tLes clubs</a>\n\t\t \t</li>\n\t\t \t<li><a href=\"/IENAC15/amapatate/python/connecter.py\">\n\t\t \t\t<span class=\"fa-stack fa-lg\">\n \t \t\t\t<i class=\"fa fa-circle fa-stack-2x\"></i>\n \t \t\t\t<i class=\"fa fa-user fa-stack-1x fa-inverse\"></i>\n \t \t\t</span>\n\t\t \tSe connecter</a>\n\t\t \t</li>\n\t\t \t'''\n if \"nom\" in Session() and Session()[\"nom\"]!='':\n nav+='''\n <li><a href=\"/IENAC15/amapatate/python/page_prive.py\">\n\t\t \t\t<span class=\"fa-stack fa-lg\">\n \t \t\t\t<i class=\"fa fa-circle fa-stack-2x\"></i>\n \t \t\t\t<i class=\"fa fa-user fa-stack-1x fa-inverse\"></i>\n \t \t\t</span>\n\t\t \tPage privée</a>\n\t\t \t</li>\n\t\t \t'''\n nav+='''\n \t\t\t</ul>\n </nav>\n '''\n return nav\n\ndef titre(intitule):\n titre='''\n <header>\n <h1>'''+intitule+'''</h1>\n <p>L'AMAP fruits et légumes qui vous donne la patate </p>\n </header>\n '''\n return titre\n\ndef footer():\n footer='''\n <footer>© All right reserved ENAC\n </footer>\n </body>\n </html>\n '''\n return footer\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
import re
import itertools
import setpath
import functions
import lib.jopts as jopts
from operator import itemgetter
import random
__docformat__ = 'reStructuredText en'
re_params=re.compile('(\w*):(.*)')
def consumer(func):
"""A decorator, advances func to its first yield point when called.
"""
from functools import wraps
@wraps(func)
def wrapper(*args,**kw):
gen = func(*args, **kw)
gen.next()
return gen
return wrapper
class freqitemsets:
"""
.. function:: freqitemsets(datacol, [threshold, noautothres, stats, maxlen]) -> [itemset_id:int, itemset_length:int, itemset_frequency:int, item:text]
Calculates frequent itemsets on a given column (datacol). The algorithm is tuned for the
case when we have many different items (in the order of millions), many input itemsets, but
small itemset length (10-20).
Returned table schema:
:itemset_id: Automatic itemset id
:itemset_length: Length of itemset
:itemset_frequency: How many times an itemset has been found
:item: Itemset's item value
Parameters:
:datacol:
Column on which to calculate frequent itemsets
:threshold: Default is 2
How many times an freq. itemset must appear for it to appear in the results
:noautothres: 1/0 (Default is 0)
Do not calculate the threshold automatically
:stats: 1/0 (Default is 0)
Return frequent itemset statistics
:maxlen: NUMBER (Default is no limit at all)
Maximum itemset length to search
Examples:
>>> table1('''
... 'car wood bike' 'first group'
... 'car car wood' 'first group'
... 'car wood' 'first group'
... 'car wood ice' 'first group'
... 'ice' 'second group'
... 'car ice' 'second group'
... 'car cream toy' 'second group'
... 'icecream ice car toy' 'second group'
... ''')
>>> sql("select b,freqitemsets(a, 'threshold:2', 'noautothres:1', 'maxlen:2') from table1 group by b")
b | itemset_id | itemset_length | itemset_frequency | item
---------------------------------------------------------------------
first group | 1 | 1 | 4 | wood
first group | 2 | 1 | 4 | car
first group | 3 | 2 | 4 | car
first group | 3 | 2 | 4 | wood
second group | 1 | 1 | 3 | ice
second group | 2 | 1 | 3 | car
second group | 3 | 1 | 2 | toy
second group | 4 | 2 | 2 | car
second group | 4 | 2 | 2 | ice
second group | 5 | 2 | 2 | car
second group | 5 | 2 | 2 | toy
>>> sql("select b,freqitemsets(a, 'stats:1') from table1 group by b")
b | MaxTransactionLength | CombinationCount | PassedTransactions | ValidKeywords
-------------------------------------------------------------------------------------------
first group | 3 | 2 | 3 | 2
first group | 3 | 1 | 1 | 2
first group | 3 | 0 | 0 | 0
second group | 4 | 3 | 3 | 3
second group | 4 | 0 | 3 | 0
"""
registered=True
multiset=True
def __init__(self):
self.threshold=2
self.startingthreshold=2
self.autothres=1
self.compress=0
self.initstatic=False
self.input={}
self.maxlength=0
self.kwcode={}
self.codekw={}
self.maxkwcode=0
self.overthres={}
self.belowthres={}
self.passedkw={}
self.init=True
self.itemset_id=0
self.maxlen=None
self.stats=False
def initargs(self, args):
self.init=False
for i in xrange(1, len(args)):
v=re_params.match(args[i])
if v is not None and v.groups()[0]!='' and v.groups()[1]!='' and i>0:
v=v.groups()
if v[0]=='threshold':
try:
self.threshold=int(v[1])
self.startingthreshold=self.threshold
except KeyboardInterrupt:
raise
except:
raise functions.OperatorError("FreqItemsets",'No integer value given for threshold')
if v[0]=='noautothres':
self.autothres=0
if v[0]=='compress':
self.compress=1
if v[0]=='maxlen':
self.maxlen=int(v[1])
if v[0]=='stats':
self.stats=True
def demultiplex(self, data):
iterable=None
iterpos=-1
for i in xrange(len(data)):
if hasattr(data[i],'__iter__')==True:
iterable=data[i]
iterpos=i
break
if iterpos==-1:
yield list(data)
else:
pre=list(data[0:iterpos])
post=list(data[iterpos+1:])
for i in iterable:
if hasattr(i,'__iter__')==False:
yield pre+[i]+post
else:
yield pre+list(i)+post
def insertcombfreq(self, comb, freq):
if comb in self.overthres:
self.overthres[comb]+=freq
else:
if comb in self.belowthres:
self.belowthres[comb]+=freq
else:
self.belowthres[comb]=freq
if self.belowthres[comb]>=self.threshold:
self.overthres[comb]=self.belowthres[comb]
del(self.belowthres[comb])
for k in comb:
if self.compress==0:
self.passedkw[k]=True
elif not k in self.passedkw:
self.passedkw[k]=self.overthres[comb]
else:
self.passedkw[k]+=self.overthres[comb]
def insertitemset(self, itemset):
if itemset not in self.input:
self.input[itemset]=1
else:
self.input[itemset]+=1
def cleanitemsets(self, minlength):
newitemsets={}
for k,v in self.input.iteritems():
itemset=tuple(i for i in k if i in self.passedkw)
if self.compress==1:
esoteric_itemset=tuple(i for i in itemset if self.passedkw[i]==v)
if len(esoteric_itemset)>0:
if len(itemset)>=minlength:
self.overthres[itemset]=v
itemset=tuple(i for i in itemset if self.passedkw[i]!=v)
if len(itemset)>=minlength:
if itemset not in newitemsets:
newitemsets[itemset]=v
else:
newitemsets[itemset]+=v
self.input=newitemsets
def step(self, *args):
if self.init==True:
self.initargs(args)
if len(args[0])==0:
return
itms=sorted(set(args[0].split(' ')))
itms=[x for x in itms if x!='']
li=len(itms)
if li>0:
if li>self.maxlength:
self.maxlength=li
inputkws=[]
for kw in itms:
if len(kw)==0:
print itms, args[0], len(args[0]), li
if kw not in self.kwcode:
self.kwcode[kw]=self.maxkwcode
self.codekw[self.maxkwcode]=kw
inputkws.append(self.maxkwcode)
self.insertcombfreq( (self.maxkwcode,),1 )
self.maxkwcode+=1
else:
itm=self.kwcode[kw]
self.insertcombfreq( (itm,),1 )
inputkws.append(itm)
if len(inputkws)>1:
self.insertitemset(tuple(inputkws))
def final(self):
if not self.stats:
yield ('itemset_id', 'itemset_length', 'itemset_frequency', 'item')
else:
yield ('MaxTransactionLength', 'CombinationCount', 'PassedTransactions', 'ValidKeywords')
splist=[{},{}]
del(self.kwcode)
splist[1]=self.overthres
if self.stats:
yield [self.maxlength, len(splist[1]), len(self.input), len(self.passedkw)]
if not self.stats:
for its,v in sorted(splist[1].items(), key=itemgetter(1),reverse=True):
self.itemset_id+=1
for i in self.demultiplex( (self.itemset_id, len([self.codekw[i] for i in its]), v, [self.codekw[i] for i in its]) ):
yield i
if self.maxlen==None:
self.maxlen=self.maxlength
for l in xrange(2, min(self.maxlength+1, self.maxlen+1)):
splist.append({})
self.belowthres={}
self.overthres={}
prevl=l-1
# Autothresholding
if self.autothres==1:
if len(self.input)==0 or len(self.passedkw)==0:
break
else:
self.threshold=self.startingthreshold + int(len(self.passedkw)/len(self.input))
self.cleanitemsets(l)
self.passedkw={}
prevsplist = splist[prevl]
icombs = itertools.combinations
insertcomb = self.insertcombfreq
for k,v in self.input.iteritems():
for k in icombs(k,l):
insertit=True
for i1 in icombs(k, prevl):
if i1 not in prevsplist:
insertit=False
break
if insertit:
insertcomb( k,v )
splist[l-1]={}
splist[l]=self.overthres
if self.stats:
yield [self.maxlength, len(splist[l]), len(self.input), len(self.passedkw)]
if not self.stats:
for its,v in sorted(splist[l].items(), key=itemgetter(1),reverse=True):
self.itemset_id+=1
for i in self.demultiplex( (self.itemset_id, len([self.codekw[i] for i in its]), v, [self.codekw[i] for i in its]) ):
yield i
del(self.overthres)
del(self.belowthres)
del(self.passedkw)
del(self.input)
del(self.codekw)
del(splist)
class sampledistvals:
"""
.. function:: sampledistvals(sample_size, C1, C2, C3) -> [C1, C2, C3]
Sampledistvals returns sample_size distinct values for each of the input C1..Cn columns.
>>> table1('''
... test1 2 3
... test1 2 3
... test2 4 2
... test4 2 t
... ''')
>>> sql("select sampledistvals(3, a, b, c) from table1")
C1 | C2 | C3
---------------------------------------------
["test1","test2","test4"] | [2,4] | [2,3,"t"]
"""
registered=True
def __init__(self):
self.vals=None
self.lenargs = -1
self.init=True
def step(self, *args):
if self.init:
self.lenargs = len(args)
self.vals = a=[set() for i in xrange(self.lenargs-1)]
self.init = False
for i in xrange(1, self.lenargs):
if len(self.vals[i-1])<args[0] and args[i] not in self.vals[i-1]:
self.vals[i-1].add(args[i])
def final(self):
yield tuple(['C'+str(i) for i in xrange(1, self.lenargs)] )
yield [jopts.toj(list(i)) for i in self.vals]
class sample:
"""
.. function:: sample(sample_size, C1, C2, C3)
Sample returns a random sample_size set of rows.
>>> table1('''
... test1 2 3
... test1 2 3
... test2 4 2
... test4 2 t
... ''')
>>> sql("select sample(2, a, b, c) from table1") # doctest: +ELLIPSIS
C1 | C2 | C3
---------------
...
"""
registered=True
def __init__(self):
self.samplelist = []
self.index = 0
def step(self, *args):
sample_count = args[0]
# Generate the reservoir
if self.index < sample_count:
self.samplelist.append(args[1:])
else:
r = random.randint(0, self.index)
if r < sample_count:
self.samplelist[r] = args[1:]
self.index += 1
def final(self):
if len(self.samplelist) == []:
yield tuple(['C1'])
else:
yield tuple(['C'+str(i) for i in xrange(1, len(self.samplelist[0]) + 1)] )
for r in self.samplelist:
yield list(r)
if not ('.' in __name__):
"""
This is needed to be able to test the function, put it at the end of every
new function you create
"""
import sys
import setpath
from functions import *
testfunction()
if __name__ == "__main__":
reload(sys)
sys.setdefaultencoding('utf-8')
import doctest
doctest.testmod()
|
normal
|
{
"blob_id": "60411e922bfec8f98028f959a370f954eef5437e",
"index": 1329,
"step-1": "import re\nimport itertools\nimport setpath\nimport functions\nimport lib.jopts as jopts\nfrom operator import itemgetter\nimport random\n\n__docformat__ = 'reStructuredText en'\n\nre_params=re.compile('(\\w*):(.*)')\n\ndef consumer(func):\n \"\"\"A decorator, advances func to its first yield point when called.\n \"\"\"\n\n from functools import wraps\n\n @wraps(func)\n def wrapper(*args,**kw):\n gen = func(*args, **kw)\n gen.next()\n return gen\n return wrapper\n\n\nclass freqitemsets:\n \"\"\"\n .. function:: freqitemsets(datacol, [threshold, noautothres, stats, maxlen]) -> [itemset_id:int, itemset_length:int, itemset_frequency:int, item:text]\n\n Calculates frequent itemsets on a given column (datacol). The algorithm is tuned for the\n case when we have many different items (in the order of millions), many input itemsets, but\n small itemset length (10-20).\n\n Returned table schema:\n\n :itemset_id: Automatic itemset id\n :itemset_length: Length of itemset\n :itemset_frequency: How many times an itemset has been found\n :item: Itemset's item value\n\n Parameters:\n\n :datacol:\n\n Column on which to calculate frequent itemsets\n\n :threshold: Default is 2\n\n How many times an freq. itemset must appear for it to appear in the results\n\n :noautothres: 1/0 (Default is 0)\n\n Do not calculate the threshold automatically\n\n :stats: 1/0 (Default is 0)\n\n Return frequent itemset statistics\n\n :maxlen: NUMBER (Default is no limit at all)\n\n Maximum itemset length to search\n\n Examples:\n \n >>> table1('''\n ... 'car wood bike' 'first group'\n ... 'car car wood' 'first group'\n ... 'car wood' 'first group'\n ... 'car wood ice' 'first group'\n ... 'ice' 'second group'\n ... 'car ice' 'second group'\n ... 'car cream toy' 'second group'\n ... 'icecream ice car toy' 'second group'\n ... ''')\n >>> sql(\"select b,freqitemsets(a, 'threshold:2', 'noautothres:1', 'maxlen:2') from table1 group by b\")\n b | itemset_id | itemset_length | itemset_frequency | item\n ---------------------------------------------------------------------\n first group | 1 | 1 | 4 | wood\n first group | 2 | 1 | 4 | car\n first group | 3 | 2 | 4 | car\n first group | 3 | 2 | 4 | wood\n second group | 1 | 1 | 3 | ice\n second group | 2 | 1 | 3 | car\n second group | 3 | 1 | 2 | toy\n second group | 4 | 2 | 2 | car\n second group | 4 | 2 | 2 | ice\n second group | 5 | 2 | 2 | car\n second group | 5 | 2 | 2 | toy\n\n >>> sql(\"select b,freqitemsets(a, 'stats:1') from table1 group by b\")\n b | MaxTransactionLength | CombinationCount | PassedTransactions | ValidKeywords\n -------------------------------------------------------------------------------------------\n first group | 3 | 2 | 3 | 2\n first group | 3 | 1 | 1 | 2\n first group | 3 | 0 | 0 | 0\n second group | 4 | 3 | 3 | 3\n second group | 4 | 0 | 3 | 0\n \"\"\"\n\n\n registered=True\n multiset=True\n\n def __init__(self):\n self.threshold=2\n self.startingthreshold=2\n self.autothres=1\n self.compress=0\n self.initstatic=False\n self.input={}\n self.maxlength=0\n self.kwcode={}\n self.codekw={}\n self.maxkwcode=0\n self.overthres={}\n self.belowthres={}\n self.passedkw={}\n self.init=True\n self.itemset_id=0\n self.maxlen=None\n self.stats=False\n\n def initargs(self, args):\n self.init=False\n for i in xrange(1, len(args)):\n v=re_params.match(args[i])\n if v is not None and v.groups()[0]!='' and v.groups()[1]!='' and i>0:\n v=v.groups()\n if v[0]=='threshold':\n try:\n self.threshold=int(v[1])\n self.startingthreshold=self.threshold\n except KeyboardInterrupt:\n raise \n except:\n raise functions.OperatorError(\"FreqItemsets\",'No integer value given for threshold')\n if v[0]=='noautothres':\n self.autothres=0\n if v[0]=='compress':\n self.compress=1\n if v[0]=='maxlen':\n self.maxlen=int(v[1])\n if v[0]=='stats':\n self.stats=True\n\n def demultiplex(self, data):\n iterable=None\n iterpos=-1\n\n for i in xrange(len(data)):\n if hasattr(data[i],'__iter__')==True:\n iterable=data[i]\n iterpos=i\n break\n\n if iterpos==-1:\n yield list(data)\n else:\n pre=list(data[0:iterpos])\n post=list(data[iterpos+1:])\n for i in iterable:\n if hasattr(i,'__iter__')==False:\n yield pre+[i]+post\n else:\n yield pre+list(i)+post\n \n def insertcombfreq(self, comb, freq):\n if comb in self.overthres:\n self.overthres[comb]+=freq\n else:\n if comb in self.belowthres:\n self.belowthres[comb]+=freq\n else:\n self.belowthres[comb]=freq\n\n if self.belowthres[comb]>=self.threshold:\n self.overthres[comb]=self.belowthres[comb]\n del(self.belowthres[comb])\n for k in comb:\n if self.compress==0:\n self.passedkw[k]=True\n elif not k in self.passedkw:\n self.passedkw[k]=self.overthres[comb]\n else:\n self.passedkw[k]+=self.overthres[comb]\n\n def insertitemset(self, itemset):\n if itemset not in self.input:\n self.input[itemset]=1\n else:\n self.input[itemset]+=1\n\n def cleanitemsets(self, minlength):\n newitemsets={}\n for k,v in self.input.iteritems():\n itemset=tuple(i for i in k if i in self.passedkw)\n if self.compress==1:\n esoteric_itemset=tuple(i for i in itemset if self.passedkw[i]==v)\n if len(esoteric_itemset)>0:\n if len(itemset)>=minlength:\n self.overthres[itemset]=v\n itemset=tuple(i for i in itemset if self.passedkw[i]!=v)\n if len(itemset)>=minlength:\n if itemset not in newitemsets:\n newitemsets[itemset]=v\n else:\n newitemsets[itemset]+=v\n\n self.input=newitemsets\n\n def step(self, *args):\n if self.init==True:\n self.initargs(args)\n\n if len(args[0])==0:\n return\n \n itms=sorted(set(args[0].split(' ')))\n itms=[x for x in itms if x!='']\n li=len(itms)\n if li>0:\n if li>self.maxlength:\n self.maxlength=li\n\n inputkws=[]\n for kw in itms:\n if len(kw)==0:\n print itms, args[0], len(args[0]), li\n if kw not in self.kwcode:\n self.kwcode[kw]=self.maxkwcode\n self.codekw[self.maxkwcode]=kw\n inputkws.append(self.maxkwcode)\n self.insertcombfreq( (self.maxkwcode,),1 )\n self.maxkwcode+=1\n else:\n itm=self.kwcode[kw]\n self.insertcombfreq( (itm,),1 )\n inputkws.append(itm)\n\n if len(inputkws)>1:\n self.insertitemset(tuple(inputkws))\n\n def final(self):\n if not self.stats:\n yield ('itemset_id', 'itemset_length', 'itemset_frequency', 'item')\n else:\n yield ('MaxTransactionLength', 'CombinationCount', 'PassedTransactions', 'ValidKeywords')\n\n splist=[{},{}]\n del(self.kwcode)\n splist[1]=self.overthres\n\n if self.stats:\n yield [self.maxlength, len(splist[1]), len(self.input), len(self.passedkw)]\n\n if not self.stats:\n for its,v in sorted(splist[1].items(), key=itemgetter(1),reverse=True):\n self.itemset_id+=1\n for i in self.demultiplex( (self.itemset_id, len([self.codekw[i] for i in its]), v, [self.codekw[i] for i in its]) ):\n yield i\n\n if self.maxlen==None:\n self.maxlen=self.maxlength\n for l in xrange(2, min(self.maxlength+1, self.maxlen+1)):\n splist.append({})\n self.belowthres={}\n self.overthres={}\n prevl=l-1\n\n # Autothresholding\n if self.autothres==1:\n if len(self.input)==0 or len(self.passedkw)==0:\n break\n else:\n self.threshold=self.startingthreshold + int(len(self.passedkw)/len(self.input))\n\n self.cleanitemsets(l)\n self.passedkw={}\n prevsplist = splist[prevl]\n icombs = itertools.combinations\n insertcomb = self.insertcombfreq\n\n for k,v in self.input.iteritems():\n for k in icombs(k,l):\n insertit=True\n for i1 in icombs(k, prevl):\n if i1 not in prevsplist:\n insertit=False\n break\n\n if insertit:\n insertcomb( k,v )\n\n splist[l-1]={}\n splist[l]=self.overthres\n\n if self.stats:\n yield [self.maxlength, len(splist[l]), len(self.input), len(self.passedkw)]\n\n if not self.stats:\n for its,v in sorted(splist[l].items(), key=itemgetter(1),reverse=True):\n self.itemset_id+=1\n for i in self.demultiplex( (self.itemset_id, len([self.codekw[i] for i in its]), v, [self.codekw[i] for i in its]) ):\n yield i\n\n del(self.overthres)\n del(self.belowthres)\n del(self.passedkw)\n del(self.input)\n del(self.codekw)\n del(splist)\n\nclass sampledistvals:\n \"\"\"\n\n .. function:: sampledistvals(sample_size, C1, C2, C3) -> [C1, C2, C3]\n\n Sampledistvals returns sample_size distinct values for each of the input C1..Cn columns.\n\n >>> table1('''\n ... test1 2 3\n ... test1 2 3\n ... test2 4 2\n ... test4 2 t\n ... ''')\n >>> sql(\"select sampledistvals(3, a, b, c) from table1\")\n C1 | C2 | C3\n ---------------------------------------------\n [\"test1\",\"test2\",\"test4\"] | [2,4] | [2,3,\"t\"]\n \"\"\"\n registered=True\n\n def __init__(self):\n self.vals=None\n self.lenargs = -1\n self.init=True\n\n def step(self, *args):\n if self.init:\n self.lenargs = len(args)\n self.vals = a=[set() for i in xrange(self.lenargs-1)]\n self.init = False\n\n for i in xrange(1, self.lenargs):\n if len(self.vals[i-1])<args[0] and args[i] not in self.vals[i-1]:\n self.vals[i-1].add(args[i])\n\n def final(self):\n yield tuple(['C'+str(i) for i in xrange(1, self.lenargs)] )\n yield [jopts.toj(list(i)) for i in self.vals]\n\nclass sample:\n \"\"\"\n\n .. function:: sample(sample_size, C1, C2, C3)\n\n Sample returns a random sample_size set of rows.\n\n >>> table1('''\n ... test1 2 3\n ... test1 2 3\n ... test2 4 2\n ... test4 2 t\n ... ''')\n\n >>> sql(\"select sample(2, a, b, c) from table1\") # doctest: +ELLIPSIS\n C1 | C2 | C3\n ---------------\n ...\n \"\"\"\n registered=True\n\n def __init__(self):\n self.samplelist = []\n self.index = 0\n\n def step(self, *args):\n sample_count = args[0]\n\n # Generate the reservoir\n if self.index < sample_count:\n self.samplelist.append(args[1:])\n else:\n r = random.randint(0, self.index)\n if r < sample_count:\n self.samplelist[r] = args[1:]\n\n self.index += 1\n\n\n def final(self):\n if len(self.samplelist) == []:\n yield tuple(['C1'])\n else:\n yield tuple(['C'+str(i) for i in xrange(1, len(self.samplelist[0]) + 1)] )\n for r in self.samplelist:\n yield list(r)\n\nif not ('.' in __name__):\n \"\"\"\n This is needed to be able to test the function, put it at the end of every\n new function you create\n \"\"\"\n import sys\n import setpath\n from functions import *\n testfunction()\n if __name__ == \"__main__\":\n reload(sys)\n sys.setdefaultencoding('utf-8')\n import doctest\n doctest.testmod()\n\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
import mysql.connector
import json
mysql_user = 'root'
mysql_pass = 'funwfats'
mysql_host = 'localhost'
mysql_base = 'sys'
wn8_file = "wn8exp.json"
def fill_wn8_table():
with open(wn8_file, encoding="utf-8") as file:
wn8_dict = json.loads(file.read())
cnx_wn8 = mysql.connector.connect(user=mysql_user, password=mysql_pass, host=mysql_host, database=mysql_base)
cursor_wn8 = cnx_wn8.cursor()
for tank in wn8_dict['data']:
add = "INSERT into wn8exp (id) VALUES (" + str(tank['IDNum']) + ");"
tid = tank['IDNum']
cursor_wn8.execute(add)
for stat in tank:
if stat != 'IDNum':
update = "UPDATE wn8exp SET " + stat + " = " + "\'" + str(tank[stat]) \
+ "\'" + " WHERE id = " + str(tid) + ";"
cursor_wn8.execute(update)
# for tank in wn8_dict['data']:
# for stat in tank:
# if stat == "IDNum":
# add = "INSERT into wn8exp (id) VALUES (" + str(tank[stat]) + ");"
# tid = tank[stat]
# cursor_wn8.execute(add)
# else:
# update = "UPDATE wn8exp SET " + stat + " = " + "\'" + str(tank[stat]) \
# + "\'" + " WHERE id = " + str(tid) + ";"
# cursor_wn8.execute(update)
cnx_wn8.commit()
cursor_wn8.close()
cnx_wn8.close()
if __name__ == '__main__':
fill_wn8_table()
|
normal
|
{
"blob_id": "291052c22059b32f3f300c323a10b260fbd0c20f",
"index": 9210,
"step-1": "import mysql.connector\r\nimport json\r\n\r\nmysql_user = 'root'\r\nmysql_pass = 'funwfats'\r\nmysql_host = 'localhost'\r\nmysql_base = 'sys'\r\nwn8_file = \"wn8exp.json\"\r\n\r\n\r\ndef fill_wn8_table():\r\n with open(wn8_file, encoding=\"utf-8\") as file:\r\n wn8_dict = json.loads(file.read())\r\n cnx_wn8 = mysql.connector.connect(user=mysql_user, password=mysql_pass, host=mysql_host, database=mysql_base)\r\n cursor_wn8 = cnx_wn8.cursor()\r\n for tank in wn8_dict['data']:\r\n add = \"INSERT into wn8exp (id) VALUES (\" + str(tank['IDNum']) + \");\"\r\n tid = tank['IDNum']\r\n cursor_wn8.execute(add)\r\n for stat in tank:\r\n if stat != 'IDNum':\r\n update = \"UPDATE wn8exp SET \" + stat + \" = \" + \"\\'\" + str(tank[stat]) \\\r\n + \"\\'\" + \" WHERE id = \" + str(tid) + \";\"\r\n cursor_wn8.execute(update)\r\n # for tank in wn8_dict['data']:\r\n # for stat in tank:\r\n # if stat == \"IDNum\":\r\n # add = \"INSERT into wn8exp (id) VALUES (\" + str(tank[stat]) + \");\"\r\n # tid = tank[stat]\r\n # cursor_wn8.execute(add)\r\n # else:\r\n # update = \"UPDATE wn8exp SET \" + stat + \" = \" + \"\\'\" + str(tank[stat]) \\\r\n # + \"\\'\" + \" WHERE id = \" + str(tid) + \";\"\r\n # cursor_wn8.execute(update)\r\n cnx_wn8.commit()\r\n cursor_wn8.close()\r\n cnx_wn8.close()\r\n\r\n\r\nif __name__ == '__main__':\r\n fill_wn8_table()\r\n\r\n\r\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
<|reserved_special_token_0|>
class ChamferCylinder(pynewton.ChamferCylinder):
pass
class ConvexHull(pynewton.ConvexHull):
pass
class ConvexHullModifier(pynewton.ConvexHullModifier):
pass
class NullCollider(pynewton.NullCollider):
pass
class TreeCollision(pynewton.TreeCollision):
pass
class TreeCollisionUserCallback(pynewton.TreeCollisionUserCallback):
def __init__(self, func):
self.callbackFunc = func
def OnCallback(self, bodyWithTreeCollision, body, vertices,
vertexstrideInBytes, indexCount, indexArray):
if self.callbackFunc != None:
self.callbackFunc(bodyWithTreeCollision, body, vertices,
vertexstrideInBytes, indexCount, indexArray)
pass
class Material(pynewton.Material):
pass
class BallJoint(pynewton.BallJoint):
def __init__(self, *args, **kwargs):
self.callback = None
return pynewton.BallJoint.__init__(self, *args, **kwargs)
def OnCallback():
if self.callback != None:
self.callback()
pass
class Hinge(pynewton.Hinge):
def __init__(self, *args, **kwargs):
self.callback = None
return pynewton.Hinge.__init__(*args, **kwargs)
def OnCallback(desc):
if self.callback != None:
return self.callback(desc)
return 0
class Slider(pynewton.Slider):
def __init__(self, *args, **kwargs):
self.callback = None
return pynewton.Slider.__init__(self, *args, **kwargs)
class Corkscrew(pynewton.Corkscrew):
def __init__(self, *args, **kwargs):
self.callback = None
pynewton.Corkscrew.__init__(self, *args, **kwargs)
def OnCallback(desc):
if self.callback != None:
return self.callback(desc)
return 0
class UniversalJoint(pynewton.UniversalJoint):
def __init__(self, *args, **kwargs):
self.callback = None
return pynewton.UniversalJoint.__init__(self, *args, **kwargs)
def OnCallback(desc):
if self.callback != None:
return self.callback(desc)
return 0
class UpVector(pynewton.UpVector):
def __init__(self, *args, **kwargs):
self.callback = None
return pynewton.UpVector.__init__(self, *args, **kwargs)
def OnCallback():
if self.callback != None:
self.callback()
class Tire(pynewton.Tire):
pass
class Vehicle(pynewton.Vehicle):
def __init__(self, *args, **kwargs):
self.tires = []
self.UpdateTireCallback = None
return pynewton.Vehicle.__init__(self, *args, **kwargs)
def AddTire(self, matrix, pin, mass, width, radius, suspensionShock,
suspensionSpring, suspensionLength, userData, collisionID):
tire = pynewton.Vehicle.AddTire(self, matrix, pin, mass, width,
radius, suspensionShock, suspensionSpring, suspensionLength,
userData, collisionID)
tires.append(tire)
return tire
def RemoveTire(self, tire):
del tires[tires.index(tire)]
tire = pynewton.Vehicle.RemoveTire(self, tire)
def OnCallback(self):
if self.UpdateTireCallback != None:
self.UpdateTireCallback(self)
class HeightField(pynewton.HeightField):
pass
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class World(pynewton.World):
def __init__(self):
self.bodyList = []
self.newtonBodyLookup = {}
self.materialCallbacks = {}
self.currentCallback = None
self.raycastUserData = None
self.raycastCallback = None
pynewton.World.__init__(self)
def RegisterBody(self, body):
self.bodyList.append(body)
self.newtonBodyLookup[body.IDKey()] = body
def UnregisterBody(self, body):
self.bodyList.remove(bodyList.index(body))
del self.newtonBodyLookup[body.m_body]
<|reserved_special_token_0|>
def ForEachBodyDo(self, function):
for body in self.bodyList:
function(body)
<|reserved_special_token_0|>
def RayCastCallback(self, body, nx, ny, nz, collisionID, intersectParam):
return self.raycastCallback(body, (nx, ny, nz), collisionID, self.
raycastUserData, intersectParam)
def MaterialSetCollisionCallback(self, id1, id2, userdata=None,
begin_func=None, process_func=None, end_func=None):
self.materialCallbacks[id1, id2] = _materialCallback(id1, id2,
begin_func, process_func, end_func, userdata)
self.RegisterMaterialCallbackBetween(id1, id2)
def GetMaterialCallback(self, material, body1, body2):
id1 = body1.MaterialGroupID()
id2 = body2.MaterialGroupID()
cb = self.materialCallbacks[id1, id2]
return cb
def MaterialBeginCollision(self, material, b1, b2):
body1 = self.newtonBodyLookup[int(b1)]
body2 = self.newtonBodyLookup[int(b2)]
self.currentCallback = self.GetMaterialCallback(material, body1, body2)
if self.currentCallback.beginCallback:
self.currentCallback.beginCallback(material, body1, body2, self
.currentCallback.userobject)
def MaterialProcessCollision(self, material, contactHandle):
if self.currentCallback.processCallback:
self.currentCallback.processCallback(material, contactHandle,
self.currentCallback.userobject)
def MaterialEndCollision(self, material):
if self.currentCallback.endCallback:
self.currentCallback.endCallback(material, self.currentCallback
.userobject)
class CollisionGeometry(pynewton.CollisionGeometry):
def draw(self):
if not GlPresent:
raise 'OpenGL module could not be loaded'
class Sphere(pynewton.Sphere):
def __init__(self, world, w, h, d, offset_matrix=None):
pynewton.Sphere.__init__(self, world, w, h, d, offset_matrix)
self.width = w
self.height = h
self.depth = d
if GLPresent:
self.quad = GLU.gluNewQuadric()
def draw(self):
if not GLPresent:
raise 'OpenGL module could not be loaded'
GL.glPushMatrix()
GL.glScalef(self.width, self.height, self.depth)
GL.glPolygonMode(GL.GL_FRONT_AND_BACK, GL.GL_LINE)
GLU.gluSphere(self.quad, 1.0, 12, 12)
GL.glPolygonMode(GL.GL_FRONT_AND_BACK, GL.GL_FILL)
GL.glPopMatrix()
class Box(pynewton.Box):
pass
class Cone(pynewton.Cone):
pass
class Cylinder(pynewton.Cylinder):
pass
class ChamferCylinder(pynewton.ChamferCylinder):
pass
class ConvexHull(pynewton.ConvexHull):
pass
class ConvexHullModifier(pynewton.ConvexHullModifier):
pass
class NullCollider(pynewton.NullCollider):
pass
class TreeCollision(pynewton.TreeCollision):
pass
class TreeCollisionUserCallback(pynewton.TreeCollisionUserCallback):
def __init__(self, func):
self.callbackFunc = func
def OnCallback(self, bodyWithTreeCollision, body, vertices,
vertexstrideInBytes, indexCount, indexArray):
if self.callbackFunc != None:
self.callbackFunc(bodyWithTreeCollision, body, vertices,
vertexstrideInBytes, indexCount, indexArray)
pass
class Material(pynewton.Material):
pass
class BallJoint(pynewton.BallJoint):
def __init__(self, *args, **kwargs):
self.callback = None
return pynewton.BallJoint.__init__(self, *args, **kwargs)
def OnCallback():
if self.callback != None:
self.callback()
pass
class Hinge(pynewton.Hinge):
def __init__(self, *args, **kwargs):
self.callback = None
return pynewton.Hinge.__init__(*args, **kwargs)
def OnCallback(desc):
if self.callback != None:
return self.callback(desc)
return 0
class Slider(pynewton.Slider):
def __init__(self, *args, **kwargs):
self.callback = None
return pynewton.Slider.__init__(self, *args, **kwargs)
class Corkscrew(pynewton.Corkscrew):
def __init__(self, *args, **kwargs):
self.callback = None
pynewton.Corkscrew.__init__(self, *args, **kwargs)
def OnCallback(desc):
if self.callback != None:
return self.callback(desc)
return 0
class UniversalJoint(pynewton.UniversalJoint):
def __init__(self, *args, **kwargs):
self.callback = None
return pynewton.UniversalJoint.__init__(self, *args, **kwargs)
def OnCallback(desc):
if self.callback != None:
return self.callback(desc)
return 0
class UpVector(pynewton.UpVector):
def __init__(self, *args, **kwargs):
self.callback = None
return pynewton.UpVector.__init__(self, *args, **kwargs)
def OnCallback():
if self.callback != None:
self.callback()
class Tire(pynewton.Tire):
pass
class Vehicle(pynewton.Vehicle):
def __init__(self, *args, **kwargs):
self.tires = []
self.UpdateTireCallback = None
return pynewton.Vehicle.__init__(self, *args, **kwargs)
def AddTire(self, matrix, pin, mass, width, radius, suspensionShock,
suspensionSpring, suspensionLength, userData, collisionID):
tire = pynewton.Vehicle.AddTire(self, matrix, pin, mass, width,
radius, suspensionShock, suspensionSpring, suspensionLength,
userData, collisionID)
tires.append(tire)
return tire
def RemoveTire(self, tire):
del tires[tires.index(tire)]
tire = pynewton.Vehicle.RemoveTire(self, tire)
def OnCallback(self):
if self.UpdateTireCallback != None:
self.UpdateTireCallback(self)
class HeightField(pynewton.HeightField):
pass
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Body(pynewton.Body):
def __init__(self, world, cg):
self.ApplyForceAndTorqueCallback = None
self.TransformCallback = None
self.AutoactiveCallback = None
self.DestructorCallback = None
self.TreeCollisionCallback = None
pynewton.Body.__init__(self, world, cg)
world.RegisterBody(self)
self.py_cg = cg
<|reserved_special_token_0|>
def SetAutoactiveCallback(self, callback):
self.AutoactiveCallback = callback
def GetCollision(self):
return self.py_cg
def OnApplyForceAndTorque(self):
if self.ApplyForceAndTorqueCallback != None:
self.ApplyForceAndTorqueCallback(self)
def OnAutoactive(self, state):
if self.AutoactiveCallback != None:
self.AutoactiveCallback(self, state)
def OnTransform(self):
matrix = self.GetMatrix()
if self.TransformCallback != None:
self.TransformCallback(self, matrix)
def OnDestruct(self):
if self.DestructorCallback != None:
self.DestructorCallback(self, matrix)
def OnTreeCollisionWith(self, body):
if self.TreeCollisionCallback != None:
self.TreeCollisionCallback(body)
def Draw(self):
m = self.GetMatrix()
if not GLPresent:
raise 'OpenGL module not loaded, cannot draw'
GL.glPushMatrix()
GL.glMultMatrixf(m)
c = self.GetCollision()
c.draw()
GL.glPopMatrix()
class _materialCallback(object):
def __init__(self, id1, id2, begin_function, process_function,
end_function, userobject):
self.id1 = id1
self.id2 = id2
self.beginCallback = begin_function
self.processCallback = process_function
self.endCallback = end_function
self.userobject = userobject
class World(pynewton.World):
def __init__(self):
self.bodyList = []
self.newtonBodyLookup = {}
self.materialCallbacks = {}
self.currentCallback = None
self.raycastUserData = None
self.raycastCallback = None
pynewton.World.__init__(self)
def RegisterBody(self, body):
self.bodyList.append(body)
self.newtonBodyLookup[body.IDKey()] = body
def UnregisterBody(self, body):
self.bodyList.remove(bodyList.index(body))
del self.newtonBodyLookup[body.m_body]
def NewtonBodyToBody(self, ptr):
return self.newtonBodyLookup[int(ptr)]
def ForEachBodyDo(self, function):
for body in self.bodyList:
function(body)
def RayCast(self, p0, p1, callback, userdata):
"""Casts a ray in the world defined by p0 and p1 and calls callback
with the body, normal, collision id, user data and intersection distance"""
self.raycastUserData = userdata
self.raycastCallback = callback
self.CppRayCast.__call__(p0[0], p0[1], p0[2], p1[0], p1[1], p1[2])
def RayCastCallback(self, body, nx, ny, nz, collisionID, intersectParam):
return self.raycastCallback(body, (nx, ny, nz), collisionID, self.
raycastUserData, intersectParam)
def MaterialSetCollisionCallback(self, id1, id2, userdata=None,
begin_func=None, process_func=None, end_func=None):
self.materialCallbacks[id1, id2] = _materialCallback(id1, id2,
begin_func, process_func, end_func, userdata)
self.RegisterMaterialCallbackBetween(id1, id2)
def GetMaterialCallback(self, material, body1, body2):
id1 = body1.MaterialGroupID()
id2 = body2.MaterialGroupID()
cb = self.materialCallbacks[id1, id2]
return cb
def MaterialBeginCollision(self, material, b1, b2):
body1 = self.newtonBodyLookup[int(b1)]
body2 = self.newtonBodyLookup[int(b2)]
self.currentCallback = self.GetMaterialCallback(material, body1, body2)
if self.currentCallback.beginCallback:
self.currentCallback.beginCallback(material, body1, body2, self
.currentCallback.userobject)
def MaterialProcessCollision(self, material, contactHandle):
if self.currentCallback.processCallback:
self.currentCallback.processCallback(material, contactHandle,
self.currentCallback.userobject)
def MaterialEndCollision(self, material):
if self.currentCallback.endCallback:
self.currentCallback.endCallback(material, self.currentCallback
.userobject)
class CollisionGeometry(pynewton.CollisionGeometry):
def draw(self):
if not GlPresent:
raise 'OpenGL module could not be loaded'
class Sphere(pynewton.Sphere):
def __init__(self, world, w, h, d, offset_matrix=None):
pynewton.Sphere.__init__(self, world, w, h, d, offset_matrix)
self.width = w
self.height = h
self.depth = d
if GLPresent:
self.quad = GLU.gluNewQuadric()
def draw(self):
if not GLPresent:
raise 'OpenGL module could not be loaded'
GL.glPushMatrix()
GL.glScalef(self.width, self.height, self.depth)
GL.glPolygonMode(GL.GL_FRONT_AND_BACK, GL.GL_LINE)
GLU.gluSphere(self.quad, 1.0, 12, 12)
GL.glPolygonMode(GL.GL_FRONT_AND_BACK, GL.GL_FILL)
GL.glPopMatrix()
class Box(pynewton.Box):
pass
class Cone(pynewton.Cone):
pass
class Cylinder(pynewton.Cylinder):
pass
class ChamferCylinder(pynewton.ChamferCylinder):
pass
class ConvexHull(pynewton.ConvexHull):
pass
class ConvexHullModifier(pynewton.ConvexHullModifier):
pass
class NullCollider(pynewton.NullCollider):
pass
class TreeCollision(pynewton.TreeCollision):
pass
class TreeCollisionUserCallback(pynewton.TreeCollisionUserCallback):
def __init__(self, func):
self.callbackFunc = func
def OnCallback(self, bodyWithTreeCollision, body, vertices,
vertexstrideInBytes, indexCount, indexArray):
if self.callbackFunc != None:
self.callbackFunc(bodyWithTreeCollision, body, vertices,
vertexstrideInBytes, indexCount, indexArray)
pass
class Material(pynewton.Material):
pass
class BallJoint(pynewton.BallJoint):
def __init__(self, *args, **kwargs):
self.callback = None
return pynewton.BallJoint.__init__(self, *args, **kwargs)
def OnCallback():
if self.callback != None:
self.callback()
pass
class Hinge(pynewton.Hinge):
def __init__(self, *args, **kwargs):
self.callback = None
return pynewton.Hinge.__init__(*args, **kwargs)
def OnCallback(desc):
if self.callback != None:
return self.callback(desc)
return 0
class Slider(pynewton.Slider):
def __init__(self, *args, **kwargs):
self.callback = None
return pynewton.Slider.__init__(self, *args, **kwargs)
class Corkscrew(pynewton.Corkscrew):
def __init__(self, *args, **kwargs):
self.callback = None
pynewton.Corkscrew.__init__(self, *args, **kwargs)
def OnCallback(desc):
if self.callback != None:
return self.callback(desc)
return 0
class UniversalJoint(pynewton.UniversalJoint):
def __init__(self, *args, **kwargs):
self.callback = None
return pynewton.UniversalJoint.__init__(self, *args, **kwargs)
def OnCallback(desc):
if self.callback != None:
return self.callback(desc)
return 0
class UpVector(pynewton.UpVector):
def __init__(self, *args, **kwargs):
self.callback = None
return pynewton.UpVector.__init__(self, *args, **kwargs)
def OnCallback():
if self.callback != None:
self.callback()
class Tire(pynewton.Tire):
pass
class Vehicle(pynewton.Vehicle):
def __init__(self, *args, **kwargs):
self.tires = []
self.UpdateTireCallback = None
return pynewton.Vehicle.__init__(self, *args, **kwargs)
def AddTire(self, matrix, pin, mass, width, radius, suspensionShock,
suspensionSpring, suspensionLength, userData, collisionID):
tire = pynewton.Vehicle.AddTire(self, matrix, pin, mass, width,
radius, suspensionShock, suspensionSpring, suspensionLength,
userData, collisionID)
tires.append(tire)
return tire
def RemoveTire(self, tire):
del tires[tires.index(tire)]
tire = pynewton.Vehicle.RemoveTire(self, tire)
def OnCallback(self):
if self.UpdateTireCallback != None:
self.UpdateTireCallback(self)
class HeightField(pynewton.HeightField):
pass
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def GetEulerAngle(matrix):
return pynewton.GetEulerAngle(matrix)
<|reserved_special_token_0|>
class Body(pynewton.Body):
def __init__(self, world, cg):
self.ApplyForceAndTorqueCallback = None
self.TransformCallback = None
self.AutoactiveCallback = None
self.DestructorCallback = None
self.TreeCollisionCallback = None
pynewton.Body.__init__(self, world, cg)
world.RegisterBody(self)
self.py_cg = cg
def SetApplyForceAndTorqueCallback(self, callback):
self.ApplyForceAndTorqueCallback = callback
def SetAutoactiveCallback(self, callback):
self.AutoactiveCallback = callback
def GetCollision(self):
return self.py_cg
def OnApplyForceAndTorque(self):
if self.ApplyForceAndTorqueCallback != None:
self.ApplyForceAndTorqueCallback(self)
def OnAutoactive(self, state):
if self.AutoactiveCallback != None:
self.AutoactiveCallback(self, state)
def OnTransform(self):
matrix = self.GetMatrix()
if self.TransformCallback != None:
self.TransformCallback(self, matrix)
def OnDestruct(self):
if self.DestructorCallback != None:
self.DestructorCallback(self, matrix)
def OnTreeCollisionWith(self, body):
if self.TreeCollisionCallback != None:
self.TreeCollisionCallback(body)
def Draw(self):
m = self.GetMatrix()
if not GLPresent:
raise 'OpenGL module not loaded, cannot draw'
GL.glPushMatrix()
GL.glMultMatrixf(m)
c = self.GetCollision()
c.draw()
GL.glPopMatrix()
class _materialCallback(object):
def __init__(self, id1, id2, begin_function, process_function,
end_function, userobject):
self.id1 = id1
self.id2 = id2
self.beginCallback = begin_function
self.processCallback = process_function
self.endCallback = end_function
self.userobject = userobject
class World(pynewton.World):
def __init__(self):
self.bodyList = []
self.newtonBodyLookup = {}
self.materialCallbacks = {}
self.currentCallback = None
self.raycastUserData = None
self.raycastCallback = None
pynewton.World.__init__(self)
def RegisterBody(self, body):
self.bodyList.append(body)
self.newtonBodyLookup[body.IDKey()] = body
def UnregisterBody(self, body):
self.bodyList.remove(bodyList.index(body))
del self.newtonBodyLookup[body.m_body]
def NewtonBodyToBody(self, ptr):
return self.newtonBodyLookup[int(ptr)]
def ForEachBodyDo(self, function):
for body in self.bodyList:
function(body)
def RayCast(self, p0, p1, callback, userdata):
"""Casts a ray in the world defined by p0 and p1 and calls callback
with the body, normal, collision id, user data and intersection distance"""
self.raycastUserData = userdata
self.raycastCallback = callback
self.CppRayCast.__call__(p0[0], p0[1], p0[2], p1[0], p1[1], p1[2])
def RayCastCallback(self, body, nx, ny, nz, collisionID, intersectParam):
return self.raycastCallback(body, (nx, ny, nz), collisionID, self.
raycastUserData, intersectParam)
def MaterialSetCollisionCallback(self, id1, id2, userdata=None,
begin_func=None, process_func=None, end_func=None):
self.materialCallbacks[id1, id2] = _materialCallback(id1, id2,
begin_func, process_func, end_func, userdata)
self.RegisterMaterialCallbackBetween(id1, id2)
def GetMaterialCallback(self, material, body1, body2):
id1 = body1.MaterialGroupID()
id2 = body2.MaterialGroupID()
cb = self.materialCallbacks[id1, id2]
return cb
def MaterialBeginCollision(self, material, b1, b2):
body1 = self.newtonBodyLookup[int(b1)]
body2 = self.newtonBodyLookup[int(b2)]
self.currentCallback = self.GetMaterialCallback(material, body1, body2)
if self.currentCallback.beginCallback:
self.currentCallback.beginCallback(material, body1, body2, self
.currentCallback.userobject)
def MaterialProcessCollision(self, material, contactHandle):
if self.currentCallback.processCallback:
self.currentCallback.processCallback(material, contactHandle,
self.currentCallback.userobject)
def MaterialEndCollision(self, material):
if self.currentCallback.endCallback:
self.currentCallback.endCallback(material, self.currentCallback
.userobject)
class CollisionGeometry(pynewton.CollisionGeometry):
def draw(self):
if not GlPresent:
raise 'OpenGL module could not be loaded'
class Sphere(pynewton.Sphere):
def __init__(self, world, w, h, d, offset_matrix=None):
pynewton.Sphere.__init__(self, world, w, h, d, offset_matrix)
self.width = w
self.height = h
self.depth = d
if GLPresent:
self.quad = GLU.gluNewQuadric()
def draw(self):
if not GLPresent:
raise 'OpenGL module could not be loaded'
GL.glPushMatrix()
GL.glScalef(self.width, self.height, self.depth)
GL.glPolygonMode(GL.GL_FRONT_AND_BACK, GL.GL_LINE)
GLU.gluSphere(self.quad, 1.0, 12, 12)
GL.glPolygonMode(GL.GL_FRONT_AND_BACK, GL.GL_FILL)
GL.glPopMatrix()
class Box(pynewton.Box):
pass
class Cone(pynewton.Cone):
pass
class Cylinder(pynewton.Cylinder):
pass
class ChamferCylinder(pynewton.ChamferCylinder):
pass
class ConvexHull(pynewton.ConvexHull):
pass
class ConvexHullModifier(pynewton.ConvexHullModifier):
pass
class NullCollider(pynewton.NullCollider):
pass
class TreeCollision(pynewton.TreeCollision):
pass
class TreeCollisionUserCallback(pynewton.TreeCollisionUserCallback):
def __init__(self, func):
self.callbackFunc = func
def OnCallback(self, bodyWithTreeCollision, body, vertices,
vertexstrideInBytes, indexCount, indexArray):
if self.callbackFunc != None:
self.callbackFunc(bodyWithTreeCollision, body, vertices,
vertexstrideInBytes, indexCount, indexArray)
pass
class Material(pynewton.Material):
pass
class BallJoint(pynewton.BallJoint):
def __init__(self, *args, **kwargs):
self.callback = None
return pynewton.BallJoint.__init__(self, *args, **kwargs)
def OnCallback():
if self.callback != None:
self.callback()
pass
class Hinge(pynewton.Hinge):
def __init__(self, *args, **kwargs):
self.callback = None
return pynewton.Hinge.__init__(*args, **kwargs)
def OnCallback(desc):
if self.callback != None:
return self.callback(desc)
return 0
class Slider(pynewton.Slider):
def __init__(self, *args, **kwargs):
self.callback = None
return pynewton.Slider.__init__(self, *args, **kwargs)
class Corkscrew(pynewton.Corkscrew):
def __init__(self, *args, **kwargs):
self.callback = None
pynewton.Corkscrew.__init__(self, *args, **kwargs)
def OnCallback(desc):
if self.callback != None:
return self.callback(desc)
return 0
class UniversalJoint(pynewton.UniversalJoint):
def __init__(self, *args, **kwargs):
self.callback = None
return pynewton.UniversalJoint.__init__(self, *args, **kwargs)
def OnCallback(desc):
if self.callback != None:
return self.callback(desc)
return 0
class UpVector(pynewton.UpVector):
def __init__(self, *args, **kwargs):
self.callback = None
return pynewton.UpVector.__init__(self, *args, **kwargs)
def OnCallback():
if self.callback != None:
self.callback()
class Tire(pynewton.Tire):
pass
class Vehicle(pynewton.Vehicle):
def __init__(self, *args, **kwargs):
self.tires = []
self.UpdateTireCallback = None
return pynewton.Vehicle.__init__(self, *args, **kwargs)
def AddTire(self, matrix, pin, mass, width, radius, suspensionShock,
suspensionSpring, suspensionLength, userData, collisionID):
tire = pynewton.Vehicle.AddTire(self, matrix, pin, mass, width,
radius, suspensionShock, suspensionSpring, suspensionLength,
userData, collisionID)
tires.append(tire)
return tire
def RemoveTire(self, tire):
del tires[tires.index(tire)]
tire = pynewton.Vehicle.RemoveTire(self, tire)
def OnCallback(self):
if self.UpdateTireCallback != None:
self.UpdateTireCallback(self)
class HeightField(pynewton.HeightField):
pass
<|reserved_special_token_1|>
# The purpose of this module is essentially to subclass the basic SWIG generated
# pynewton classes and add a bit of functionality to them (mostly callback related
# stuff). This could be done in the SWIG interface file, but it's easier to do it
# here since it makes adding python-specific extensions to newton easier.
import pynewton
try:
import OpenGL.GL as GL
import OpenGL.GLU as GLU
GLPresent = True
except:
GLPresent = False
def GetEulerAngle ( matrix ):
return pynewton.GetEulerAngle( matrix )
def SetEulerAngle ( angle ):
return pynewton.SetEulerAngle( angle )
#extensions to body
def NullApplyForceAndTorqueCallback( body ) :
pass
def NullTransformCallback( body, matrix ):
pass
def NullAutoactiveCallback( body, state ):
pass
def NullBodyDestructCallback( body ):
pass
class Body( pynewton.Body ):
def __init__( self, world, cg ):
self.ApplyForceAndTorqueCallback = None
self.TransformCallback = None
self.AutoactiveCallback = None
self.DestructorCallback = None
self.TreeCollisionCallback = None
pynewton.Body.__init__(self, world, cg )
world.RegisterBody( self )
self.py_cg = cg;
def SetApplyForceAndTorqueCallback( self, callback ):
self.ApplyForceAndTorqueCallback = callback
def SetAutoactiveCallback( self, callback ):
self.AutoactiveCallback = callback
def GetCollision( self ):
return self.py_cg
def OnApplyForceAndTorque(self):
if self.ApplyForceAndTorqueCallback != None:
self.ApplyForceAndTorqueCallback( self )
def OnAutoactive(self, state ):
if self.AutoactiveCallback != None:
self.AutoactiveCallback( self, state )
def OnTransform( self ):
matrix = self.GetMatrix()
if self.TransformCallback != None:
self.TransformCallback( self, matrix )
def OnDestruct( self ):
if self.DestructorCallback != None:
self.DestructorCallback( self, matrix )
def OnTreeCollisionWith( self, body ):
if self.TreeCollisionCallback != None:
self.TreeCollisionCallback(body)
def Draw( self ):
m = self.GetMatrix()
if not GLPresent: raise "OpenGL module not loaded, cannot draw"
GL.glPushMatrix()
GL.glMultMatrixf( m )
c = self.GetCollision()
c.draw()
GL.glPopMatrix()
class _materialCallback( object ):
def __init__(self, id1, id2, begin_function, process_function, end_function, userobject):
self.id1 = id1
self.id2 = id2
self.beginCallback = begin_function
self.processCallback = process_function
self.endCallback = end_function
self.userobject = userobject
#extensions to world
class World( pynewton.World ):
def __init__(self ):
self.bodyList = []
self.newtonBodyLookup = {}
self.materialCallbacks = {}
self.currentCallback = None
self.raycastUserData = None
self.raycastCallback = None
pynewton.World.__init__(self)
def RegisterBody( self, body ):
self.bodyList.append( body )
self.newtonBodyLookup[body.IDKey()] = body
def UnregisterBody( self, body ):
self.bodyList.remove( bodyList.index(body) )
del self.newtonBodyLookup[body.m_body]
def NewtonBodyToBody( self, ptr ):
return self.newtonBodyLookup[int(ptr)]
def ForEachBodyDo( self, function ):
for body in self.bodyList:
function( body )
def RayCast( self, p0, p1, callback, userdata):
"""Casts a ray in the world defined by p0 and p1 and calls callback
with the body, normal, collision id, user data and intersection distance"""
self.raycastUserData = userdata
self.raycastCallback = callback
self.CppRayCast.__call__(p0[0], p0[1], p0[2], p1[0], p1[1], p1[2])
def RayCastCallback( self, body, nx, ny, nz, collisionID, intersectParam ):
#delegate this off to the user specified function
return self.raycastCallback( body, (nx, ny, nz), collisionID, self.raycastUserData, intersectParam )
def MaterialSetCollisionCallback( self, id1, id2, userdata=None, begin_func=None, process_func=None, end_func=None ):
self.materialCallbacks[(id1,id2)] = _materialCallback( id1, id2, begin_func, process_func, end_func, userdata)
self.RegisterMaterialCallbackBetween( id1, id2)
def GetMaterialCallback(self, material, body1, body2):
id1 = body1.MaterialGroupID()
id2 = body2.MaterialGroupID()
cb = self.materialCallbacks[(id1,id2)]
return cb
def MaterialBeginCollision( self, material, b1, b2 ):
body1 = self.newtonBodyLookup[int(b1)]
body2 = self.newtonBodyLookup[int(b2)]
self.currentCallback = self.GetMaterialCallback( material, body1, body2 )
if self.currentCallback.beginCallback:
self.currentCallback.beginCallback(material,
body1,
body2,
self.currentCallback.userobject )
def MaterialProcessCollision( self, material, contactHandle ):
if self.currentCallback.processCallback:
self.currentCallback.processCallback(material,
contactHandle,
self.currentCallback.userobject )
def MaterialEndCollision( self, material ):
if self.currentCallback.endCallback:
self.currentCallback.endCallback( material,
self.currentCallback.userobject )
#collision extensions
class CollisionGeometry( pynewton.CollisionGeometry ):
def draw(self):
if not GlPresent: raise "OpenGL module could not be loaded"
class Sphere ( pynewton.Sphere ):
def __init__(self, world, w, h, d, offset_matrix=None):
pynewton.Sphere.__init__( self, world, w, h, d, offset_matrix )
self.width = w
self.height = h
self.depth = d
if GLPresent:
self.quad = GLU.gluNewQuadric()
def draw(self):
if not GLPresent: raise "OpenGL module could not be loaded"
GL.glPushMatrix()
GL.glScalef( self.width, self.height, self.depth )
GL.glPolygonMode( GL.GL_FRONT_AND_BACK, GL.GL_LINE )
GLU.gluSphere( self.quad, 1.0, 12, 12 )
GL.glPolygonMode( GL.GL_FRONT_AND_BACK, GL.GL_FILL )
GL.glPopMatrix()
class Box ( pynewton.Box ):
pass
class Cone ( pynewton.Cone ):
pass
class Cylinder (pynewton.Cylinder):
pass
class ChamferCylinder (pynewton.ChamferCylinder):
pass
class ConvexHull (pynewton.ConvexHull):
pass
class ConvexHullModifier (pynewton.ConvexHullModifier):
pass
class NullCollider (pynewton.NullCollider ):
pass
class TreeCollision (pynewton.TreeCollision):
pass
class TreeCollisionUserCallback ( pynewton.TreeCollisionUserCallback ):
def __init__( self, func ):
self.callbackFunc = func
def OnCallback (self, bodyWithTreeCollision, body, vertices, vertexstrideInBytes, indexCount, indexArray):
if self.callbackFunc != None:
self.callbackFunc( bodyWithTreeCollision, body, vertices, vertexstrideInBytes, indexCount, indexArray)
pass
#material extensions
class Material ( pynewton.Material ):
pass
#joint extensions
class BallJoint ( pynewton.BallJoint ):
def __init__(self, *args, **kwargs ):
self.callback = None
return pynewton.BallJoint.__init__(self, *args, **kwargs )
def OnCallback():
if self.callback != None:
self.callback( )
pass
class Hinge ( pynewton.Hinge ):
def __init__(self, *args, **kwargs ):
self.callback = None
return pynewton.Hinge.__init__( *args, **kwargs )
def OnCallback( desc ):
if self.callback != None:
return self.callback( desc )
return 0
class Slider ( pynewton.Slider ):
def __init__( self, *args, **kwargs ):
self.callback = None
return pynewton.Slider.__init__( self, *args, **kwargs )
# def OnCallback( desc ):
# if self.callback != None:
# return self.callback( desc )
# return 0
class Corkscrew ( pynewton.Corkscrew ):
def __init__(self, *args, **kwargs ):
self.callback = None
pynewton.Corkscrew.__init__(self, *args, **kwargs )
def OnCallback( desc ):
if self.callback != None:
return self.callback( desc )
return 0
class UniversalJoint ( pynewton.UniversalJoint ):
def __init__(self, *args, **kwargs ):
self.callback = None
return pynewton.UniversalJoint.__init__( self, *args, **kwargs )
def OnCallback( desc ):
if self.callback != None:
return self.callback( desc )
return 0
class UpVector ( pynewton.UpVector ):
def __init__(self, *args, **kwargs ):
self.callback = None
return pynewton.UpVector.__init__(self, *args, **kwargs )
def OnCallback():
if self.callback != None:
self.callback( )
class Tire ( pynewton.Tire ):
pass
class Vehicle ( pynewton.Vehicle ):
def __init__(self, *args, **kwargs ):
self.tires = []
self.UpdateTireCallback = None
return pynewton.Vehicle.__init__(self, *args, **kwargs )
def AddTire ( self, matrix, pin, mass, width, radius, suspensionShock, suspensionSpring, suspensionLength, userData, collisionID):
tire = pynewton.Vehicle.AddTire(self, matrix, pin, mass, width, radius, suspensionShock, suspensionSpring, suspensionLength, userData, collisionID)
tires.append( tire )
return tire
def RemoveTire( self, tire ):
del tires[tires.index(tire)]
tire = pynewton.Vehicle.RemoveTire( self, tire )
def OnCallback( self):
if self.UpdateTireCallback != None:
self.UpdateTireCallback(self)
#Heightmap
class HeightField ( pynewton.HeightField ):
pass
|
flexible
|
{
"blob_id": "90d792fe18e589a0d74d36797b46c6ac1d7946be",
"index": 4303,
"step-1": "<mask token>\n\n\nclass ChamferCylinder(pynewton.ChamferCylinder):\n pass\n\n\nclass ConvexHull(pynewton.ConvexHull):\n pass\n\n\nclass ConvexHullModifier(pynewton.ConvexHullModifier):\n pass\n\n\nclass NullCollider(pynewton.NullCollider):\n pass\n\n\nclass TreeCollision(pynewton.TreeCollision):\n pass\n\n\nclass TreeCollisionUserCallback(pynewton.TreeCollisionUserCallback):\n\n def __init__(self, func):\n self.callbackFunc = func\n\n def OnCallback(self, bodyWithTreeCollision, body, vertices,\n vertexstrideInBytes, indexCount, indexArray):\n if self.callbackFunc != None:\n self.callbackFunc(bodyWithTreeCollision, body, vertices,\n vertexstrideInBytes, indexCount, indexArray)\n pass\n\n\nclass Material(pynewton.Material):\n pass\n\n\nclass BallJoint(pynewton.BallJoint):\n\n def __init__(self, *args, **kwargs):\n self.callback = None\n return pynewton.BallJoint.__init__(self, *args, **kwargs)\n\n def OnCallback():\n if self.callback != None:\n self.callback()\n pass\n\n\nclass Hinge(pynewton.Hinge):\n\n def __init__(self, *args, **kwargs):\n self.callback = None\n return pynewton.Hinge.__init__(*args, **kwargs)\n\n def OnCallback(desc):\n if self.callback != None:\n return self.callback(desc)\n return 0\n\n\nclass Slider(pynewton.Slider):\n\n def __init__(self, *args, **kwargs):\n self.callback = None\n return pynewton.Slider.__init__(self, *args, **kwargs)\n\n\nclass Corkscrew(pynewton.Corkscrew):\n\n def __init__(self, *args, **kwargs):\n self.callback = None\n pynewton.Corkscrew.__init__(self, *args, **kwargs)\n\n def OnCallback(desc):\n if self.callback != None:\n return self.callback(desc)\n return 0\n\n\nclass UniversalJoint(pynewton.UniversalJoint):\n\n def __init__(self, *args, **kwargs):\n self.callback = None\n return pynewton.UniversalJoint.__init__(self, *args, **kwargs)\n\n def OnCallback(desc):\n if self.callback != None:\n return self.callback(desc)\n return 0\n\n\nclass UpVector(pynewton.UpVector):\n\n def __init__(self, *args, **kwargs):\n self.callback = None\n return pynewton.UpVector.__init__(self, *args, **kwargs)\n\n def OnCallback():\n if self.callback != None:\n self.callback()\n\n\nclass Tire(pynewton.Tire):\n pass\n\n\nclass Vehicle(pynewton.Vehicle):\n\n def __init__(self, *args, **kwargs):\n self.tires = []\n self.UpdateTireCallback = None\n return pynewton.Vehicle.__init__(self, *args, **kwargs)\n\n def AddTire(self, matrix, pin, mass, width, radius, suspensionShock,\n suspensionSpring, suspensionLength, userData, collisionID):\n tire = pynewton.Vehicle.AddTire(self, matrix, pin, mass, width,\n radius, suspensionShock, suspensionSpring, suspensionLength,\n userData, collisionID)\n tires.append(tire)\n return tire\n\n def RemoveTire(self, tire):\n del tires[tires.index(tire)]\n tire = pynewton.Vehicle.RemoveTire(self, tire)\n\n def OnCallback(self):\n if self.UpdateTireCallback != None:\n self.UpdateTireCallback(self)\n\n\nclass HeightField(pynewton.HeightField):\n pass\n",
"step-2": "<mask token>\n\n\nclass World(pynewton.World):\n\n def __init__(self):\n self.bodyList = []\n self.newtonBodyLookup = {}\n self.materialCallbacks = {}\n self.currentCallback = None\n self.raycastUserData = None\n self.raycastCallback = None\n pynewton.World.__init__(self)\n\n def RegisterBody(self, body):\n self.bodyList.append(body)\n self.newtonBodyLookup[body.IDKey()] = body\n\n def UnregisterBody(self, body):\n self.bodyList.remove(bodyList.index(body))\n del self.newtonBodyLookup[body.m_body]\n <mask token>\n\n def ForEachBodyDo(self, function):\n for body in self.bodyList:\n function(body)\n <mask token>\n\n def RayCastCallback(self, body, nx, ny, nz, collisionID, intersectParam):\n return self.raycastCallback(body, (nx, ny, nz), collisionID, self.\n raycastUserData, intersectParam)\n\n def MaterialSetCollisionCallback(self, id1, id2, userdata=None,\n begin_func=None, process_func=None, end_func=None):\n self.materialCallbacks[id1, id2] = _materialCallback(id1, id2,\n begin_func, process_func, end_func, userdata)\n self.RegisterMaterialCallbackBetween(id1, id2)\n\n def GetMaterialCallback(self, material, body1, body2):\n id1 = body1.MaterialGroupID()\n id2 = body2.MaterialGroupID()\n cb = self.materialCallbacks[id1, id2]\n return cb\n\n def MaterialBeginCollision(self, material, b1, b2):\n body1 = self.newtonBodyLookup[int(b1)]\n body2 = self.newtonBodyLookup[int(b2)]\n self.currentCallback = self.GetMaterialCallback(material, body1, body2)\n if self.currentCallback.beginCallback:\n self.currentCallback.beginCallback(material, body1, body2, self\n .currentCallback.userobject)\n\n def MaterialProcessCollision(self, material, contactHandle):\n if self.currentCallback.processCallback:\n self.currentCallback.processCallback(material, contactHandle,\n self.currentCallback.userobject)\n\n def MaterialEndCollision(self, material):\n if self.currentCallback.endCallback:\n self.currentCallback.endCallback(material, self.currentCallback\n .userobject)\n\n\nclass CollisionGeometry(pynewton.CollisionGeometry):\n\n def draw(self):\n if not GlPresent:\n raise 'OpenGL module could not be loaded'\n\n\nclass Sphere(pynewton.Sphere):\n\n def __init__(self, world, w, h, d, offset_matrix=None):\n pynewton.Sphere.__init__(self, world, w, h, d, offset_matrix)\n self.width = w\n self.height = h\n self.depth = d\n if GLPresent:\n self.quad = GLU.gluNewQuadric()\n\n def draw(self):\n if not GLPresent:\n raise 'OpenGL module could not be loaded'\n GL.glPushMatrix()\n GL.glScalef(self.width, self.height, self.depth)\n GL.glPolygonMode(GL.GL_FRONT_AND_BACK, GL.GL_LINE)\n GLU.gluSphere(self.quad, 1.0, 12, 12)\n GL.glPolygonMode(GL.GL_FRONT_AND_BACK, GL.GL_FILL)\n GL.glPopMatrix()\n\n\nclass Box(pynewton.Box):\n pass\n\n\nclass Cone(pynewton.Cone):\n pass\n\n\nclass Cylinder(pynewton.Cylinder):\n pass\n\n\nclass ChamferCylinder(pynewton.ChamferCylinder):\n pass\n\n\nclass ConvexHull(pynewton.ConvexHull):\n pass\n\n\nclass ConvexHullModifier(pynewton.ConvexHullModifier):\n pass\n\n\nclass NullCollider(pynewton.NullCollider):\n pass\n\n\nclass TreeCollision(pynewton.TreeCollision):\n pass\n\n\nclass TreeCollisionUserCallback(pynewton.TreeCollisionUserCallback):\n\n def __init__(self, func):\n self.callbackFunc = func\n\n def OnCallback(self, bodyWithTreeCollision, body, vertices,\n vertexstrideInBytes, indexCount, indexArray):\n if self.callbackFunc != None:\n self.callbackFunc(bodyWithTreeCollision, body, vertices,\n vertexstrideInBytes, indexCount, indexArray)\n pass\n\n\nclass Material(pynewton.Material):\n pass\n\n\nclass BallJoint(pynewton.BallJoint):\n\n def __init__(self, *args, **kwargs):\n self.callback = None\n return pynewton.BallJoint.__init__(self, *args, **kwargs)\n\n def OnCallback():\n if self.callback != None:\n self.callback()\n pass\n\n\nclass Hinge(pynewton.Hinge):\n\n def __init__(self, *args, **kwargs):\n self.callback = None\n return pynewton.Hinge.__init__(*args, **kwargs)\n\n def OnCallback(desc):\n if self.callback != None:\n return self.callback(desc)\n return 0\n\n\nclass Slider(pynewton.Slider):\n\n def __init__(self, *args, **kwargs):\n self.callback = None\n return pynewton.Slider.__init__(self, *args, **kwargs)\n\n\nclass Corkscrew(pynewton.Corkscrew):\n\n def __init__(self, *args, **kwargs):\n self.callback = None\n pynewton.Corkscrew.__init__(self, *args, **kwargs)\n\n def OnCallback(desc):\n if self.callback != None:\n return self.callback(desc)\n return 0\n\n\nclass UniversalJoint(pynewton.UniversalJoint):\n\n def __init__(self, *args, **kwargs):\n self.callback = None\n return pynewton.UniversalJoint.__init__(self, *args, **kwargs)\n\n def OnCallback(desc):\n if self.callback != None:\n return self.callback(desc)\n return 0\n\n\nclass UpVector(pynewton.UpVector):\n\n def __init__(self, *args, **kwargs):\n self.callback = None\n return pynewton.UpVector.__init__(self, *args, **kwargs)\n\n def OnCallback():\n if self.callback != None:\n self.callback()\n\n\nclass Tire(pynewton.Tire):\n pass\n\n\nclass Vehicle(pynewton.Vehicle):\n\n def __init__(self, *args, **kwargs):\n self.tires = []\n self.UpdateTireCallback = None\n return pynewton.Vehicle.__init__(self, *args, **kwargs)\n\n def AddTire(self, matrix, pin, mass, width, radius, suspensionShock,\n suspensionSpring, suspensionLength, userData, collisionID):\n tire = pynewton.Vehicle.AddTire(self, matrix, pin, mass, width,\n radius, suspensionShock, suspensionSpring, suspensionLength,\n userData, collisionID)\n tires.append(tire)\n return tire\n\n def RemoveTire(self, tire):\n del tires[tires.index(tire)]\n tire = pynewton.Vehicle.RemoveTire(self, tire)\n\n def OnCallback(self):\n if self.UpdateTireCallback != None:\n self.UpdateTireCallback(self)\n\n\nclass HeightField(pynewton.HeightField):\n pass\n",
"step-3": "<mask token>\n\n\nclass Body(pynewton.Body):\n\n def __init__(self, world, cg):\n self.ApplyForceAndTorqueCallback = None\n self.TransformCallback = None\n self.AutoactiveCallback = None\n self.DestructorCallback = None\n self.TreeCollisionCallback = None\n pynewton.Body.__init__(self, world, cg)\n world.RegisterBody(self)\n self.py_cg = cg\n <mask token>\n\n def SetAutoactiveCallback(self, callback):\n self.AutoactiveCallback = callback\n\n def GetCollision(self):\n return self.py_cg\n\n def OnApplyForceAndTorque(self):\n if self.ApplyForceAndTorqueCallback != None:\n self.ApplyForceAndTorqueCallback(self)\n\n def OnAutoactive(self, state):\n if self.AutoactiveCallback != None:\n self.AutoactiveCallback(self, state)\n\n def OnTransform(self):\n matrix = self.GetMatrix()\n if self.TransformCallback != None:\n self.TransformCallback(self, matrix)\n\n def OnDestruct(self):\n if self.DestructorCallback != None:\n self.DestructorCallback(self, matrix)\n\n def OnTreeCollisionWith(self, body):\n if self.TreeCollisionCallback != None:\n self.TreeCollisionCallback(body)\n\n def Draw(self):\n m = self.GetMatrix()\n if not GLPresent:\n raise 'OpenGL module not loaded, cannot draw'\n GL.glPushMatrix()\n GL.glMultMatrixf(m)\n c = self.GetCollision()\n c.draw()\n GL.glPopMatrix()\n\n\nclass _materialCallback(object):\n\n def __init__(self, id1, id2, begin_function, process_function,\n end_function, userobject):\n self.id1 = id1\n self.id2 = id2\n self.beginCallback = begin_function\n self.processCallback = process_function\n self.endCallback = end_function\n self.userobject = userobject\n\n\nclass World(pynewton.World):\n\n def __init__(self):\n self.bodyList = []\n self.newtonBodyLookup = {}\n self.materialCallbacks = {}\n self.currentCallback = None\n self.raycastUserData = None\n self.raycastCallback = None\n pynewton.World.__init__(self)\n\n def RegisterBody(self, body):\n self.bodyList.append(body)\n self.newtonBodyLookup[body.IDKey()] = body\n\n def UnregisterBody(self, body):\n self.bodyList.remove(bodyList.index(body))\n del self.newtonBodyLookup[body.m_body]\n\n def NewtonBodyToBody(self, ptr):\n return self.newtonBodyLookup[int(ptr)]\n\n def ForEachBodyDo(self, function):\n for body in self.bodyList:\n function(body)\n\n def RayCast(self, p0, p1, callback, userdata):\n \"\"\"Casts a ray in the world defined by p0 and p1 and calls callback\n\t\twith the body, normal, collision id, user data and intersection distance\"\"\"\n self.raycastUserData = userdata\n self.raycastCallback = callback\n self.CppRayCast.__call__(p0[0], p0[1], p0[2], p1[0], p1[1], p1[2])\n\n def RayCastCallback(self, body, nx, ny, nz, collisionID, intersectParam):\n return self.raycastCallback(body, (nx, ny, nz), collisionID, self.\n raycastUserData, intersectParam)\n\n def MaterialSetCollisionCallback(self, id1, id2, userdata=None,\n begin_func=None, process_func=None, end_func=None):\n self.materialCallbacks[id1, id2] = _materialCallback(id1, id2,\n begin_func, process_func, end_func, userdata)\n self.RegisterMaterialCallbackBetween(id1, id2)\n\n def GetMaterialCallback(self, material, body1, body2):\n id1 = body1.MaterialGroupID()\n id2 = body2.MaterialGroupID()\n cb = self.materialCallbacks[id1, id2]\n return cb\n\n def MaterialBeginCollision(self, material, b1, b2):\n body1 = self.newtonBodyLookup[int(b1)]\n body2 = self.newtonBodyLookup[int(b2)]\n self.currentCallback = self.GetMaterialCallback(material, body1, body2)\n if self.currentCallback.beginCallback:\n self.currentCallback.beginCallback(material, body1, body2, self\n .currentCallback.userobject)\n\n def MaterialProcessCollision(self, material, contactHandle):\n if self.currentCallback.processCallback:\n self.currentCallback.processCallback(material, contactHandle,\n self.currentCallback.userobject)\n\n def MaterialEndCollision(self, material):\n if self.currentCallback.endCallback:\n self.currentCallback.endCallback(material, self.currentCallback\n .userobject)\n\n\nclass CollisionGeometry(pynewton.CollisionGeometry):\n\n def draw(self):\n if not GlPresent:\n raise 'OpenGL module could not be loaded'\n\n\nclass Sphere(pynewton.Sphere):\n\n def __init__(self, world, w, h, d, offset_matrix=None):\n pynewton.Sphere.__init__(self, world, w, h, d, offset_matrix)\n self.width = w\n self.height = h\n self.depth = d\n if GLPresent:\n self.quad = GLU.gluNewQuadric()\n\n def draw(self):\n if not GLPresent:\n raise 'OpenGL module could not be loaded'\n GL.glPushMatrix()\n GL.glScalef(self.width, self.height, self.depth)\n GL.glPolygonMode(GL.GL_FRONT_AND_BACK, GL.GL_LINE)\n GLU.gluSphere(self.quad, 1.0, 12, 12)\n GL.glPolygonMode(GL.GL_FRONT_AND_BACK, GL.GL_FILL)\n GL.glPopMatrix()\n\n\nclass Box(pynewton.Box):\n pass\n\n\nclass Cone(pynewton.Cone):\n pass\n\n\nclass Cylinder(pynewton.Cylinder):\n pass\n\n\nclass ChamferCylinder(pynewton.ChamferCylinder):\n pass\n\n\nclass ConvexHull(pynewton.ConvexHull):\n pass\n\n\nclass ConvexHullModifier(pynewton.ConvexHullModifier):\n pass\n\n\nclass NullCollider(pynewton.NullCollider):\n pass\n\n\nclass TreeCollision(pynewton.TreeCollision):\n pass\n\n\nclass TreeCollisionUserCallback(pynewton.TreeCollisionUserCallback):\n\n def __init__(self, func):\n self.callbackFunc = func\n\n def OnCallback(self, bodyWithTreeCollision, body, vertices,\n vertexstrideInBytes, indexCount, indexArray):\n if self.callbackFunc != None:\n self.callbackFunc(bodyWithTreeCollision, body, vertices,\n vertexstrideInBytes, indexCount, indexArray)\n pass\n\n\nclass Material(pynewton.Material):\n pass\n\n\nclass BallJoint(pynewton.BallJoint):\n\n def __init__(self, *args, **kwargs):\n self.callback = None\n return pynewton.BallJoint.__init__(self, *args, **kwargs)\n\n def OnCallback():\n if self.callback != None:\n self.callback()\n pass\n\n\nclass Hinge(pynewton.Hinge):\n\n def __init__(self, *args, **kwargs):\n self.callback = None\n return pynewton.Hinge.__init__(*args, **kwargs)\n\n def OnCallback(desc):\n if self.callback != None:\n return self.callback(desc)\n return 0\n\n\nclass Slider(pynewton.Slider):\n\n def __init__(self, *args, **kwargs):\n self.callback = None\n return pynewton.Slider.__init__(self, *args, **kwargs)\n\n\nclass Corkscrew(pynewton.Corkscrew):\n\n def __init__(self, *args, **kwargs):\n self.callback = None\n pynewton.Corkscrew.__init__(self, *args, **kwargs)\n\n def OnCallback(desc):\n if self.callback != None:\n return self.callback(desc)\n return 0\n\n\nclass UniversalJoint(pynewton.UniversalJoint):\n\n def __init__(self, *args, **kwargs):\n self.callback = None\n return pynewton.UniversalJoint.__init__(self, *args, **kwargs)\n\n def OnCallback(desc):\n if self.callback != None:\n return self.callback(desc)\n return 0\n\n\nclass UpVector(pynewton.UpVector):\n\n def __init__(self, *args, **kwargs):\n self.callback = None\n return pynewton.UpVector.__init__(self, *args, **kwargs)\n\n def OnCallback():\n if self.callback != None:\n self.callback()\n\n\nclass Tire(pynewton.Tire):\n pass\n\n\nclass Vehicle(pynewton.Vehicle):\n\n def __init__(self, *args, **kwargs):\n self.tires = []\n self.UpdateTireCallback = None\n return pynewton.Vehicle.__init__(self, *args, **kwargs)\n\n def AddTire(self, matrix, pin, mass, width, radius, suspensionShock,\n suspensionSpring, suspensionLength, userData, collisionID):\n tire = pynewton.Vehicle.AddTire(self, matrix, pin, mass, width,\n radius, suspensionShock, suspensionSpring, suspensionLength,\n userData, collisionID)\n tires.append(tire)\n return tire\n\n def RemoveTire(self, tire):\n del tires[tires.index(tire)]\n tire = pynewton.Vehicle.RemoveTire(self, tire)\n\n def OnCallback(self):\n if self.UpdateTireCallback != None:\n self.UpdateTireCallback(self)\n\n\nclass HeightField(pynewton.HeightField):\n pass\n",
"step-4": "<mask token>\n\n\ndef GetEulerAngle(matrix):\n return pynewton.GetEulerAngle(matrix)\n\n\n<mask token>\n\n\nclass Body(pynewton.Body):\n\n def __init__(self, world, cg):\n self.ApplyForceAndTorqueCallback = None\n self.TransformCallback = None\n self.AutoactiveCallback = None\n self.DestructorCallback = None\n self.TreeCollisionCallback = None\n pynewton.Body.__init__(self, world, cg)\n world.RegisterBody(self)\n self.py_cg = cg\n\n def SetApplyForceAndTorqueCallback(self, callback):\n self.ApplyForceAndTorqueCallback = callback\n\n def SetAutoactiveCallback(self, callback):\n self.AutoactiveCallback = callback\n\n def GetCollision(self):\n return self.py_cg\n\n def OnApplyForceAndTorque(self):\n if self.ApplyForceAndTorqueCallback != None:\n self.ApplyForceAndTorqueCallback(self)\n\n def OnAutoactive(self, state):\n if self.AutoactiveCallback != None:\n self.AutoactiveCallback(self, state)\n\n def OnTransform(self):\n matrix = self.GetMatrix()\n if self.TransformCallback != None:\n self.TransformCallback(self, matrix)\n\n def OnDestruct(self):\n if self.DestructorCallback != None:\n self.DestructorCallback(self, matrix)\n\n def OnTreeCollisionWith(self, body):\n if self.TreeCollisionCallback != None:\n self.TreeCollisionCallback(body)\n\n def Draw(self):\n m = self.GetMatrix()\n if not GLPresent:\n raise 'OpenGL module not loaded, cannot draw'\n GL.glPushMatrix()\n GL.glMultMatrixf(m)\n c = self.GetCollision()\n c.draw()\n GL.glPopMatrix()\n\n\nclass _materialCallback(object):\n\n def __init__(self, id1, id2, begin_function, process_function,\n end_function, userobject):\n self.id1 = id1\n self.id2 = id2\n self.beginCallback = begin_function\n self.processCallback = process_function\n self.endCallback = end_function\n self.userobject = userobject\n\n\nclass World(pynewton.World):\n\n def __init__(self):\n self.bodyList = []\n self.newtonBodyLookup = {}\n self.materialCallbacks = {}\n self.currentCallback = None\n self.raycastUserData = None\n self.raycastCallback = None\n pynewton.World.__init__(self)\n\n def RegisterBody(self, body):\n self.bodyList.append(body)\n self.newtonBodyLookup[body.IDKey()] = body\n\n def UnregisterBody(self, body):\n self.bodyList.remove(bodyList.index(body))\n del self.newtonBodyLookup[body.m_body]\n\n def NewtonBodyToBody(self, ptr):\n return self.newtonBodyLookup[int(ptr)]\n\n def ForEachBodyDo(self, function):\n for body in self.bodyList:\n function(body)\n\n def RayCast(self, p0, p1, callback, userdata):\n \"\"\"Casts a ray in the world defined by p0 and p1 and calls callback\n\t\twith the body, normal, collision id, user data and intersection distance\"\"\"\n self.raycastUserData = userdata\n self.raycastCallback = callback\n self.CppRayCast.__call__(p0[0], p0[1], p0[2], p1[0], p1[1], p1[2])\n\n def RayCastCallback(self, body, nx, ny, nz, collisionID, intersectParam):\n return self.raycastCallback(body, (nx, ny, nz), collisionID, self.\n raycastUserData, intersectParam)\n\n def MaterialSetCollisionCallback(self, id1, id2, userdata=None,\n begin_func=None, process_func=None, end_func=None):\n self.materialCallbacks[id1, id2] = _materialCallback(id1, id2,\n begin_func, process_func, end_func, userdata)\n self.RegisterMaterialCallbackBetween(id1, id2)\n\n def GetMaterialCallback(self, material, body1, body2):\n id1 = body1.MaterialGroupID()\n id2 = body2.MaterialGroupID()\n cb = self.materialCallbacks[id1, id2]\n return cb\n\n def MaterialBeginCollision(self, material, b1, b2):\n body1 = self.newtonBodyLookup[int(b1)]\n body2 = self.newtonBodyLookup[int(b2)]\n self.currentCallback = self.GetMaterialCallback(material, body1, body2)\n if self.currentCallback.beginCallback:\n self.currentCallback.beginCallback(material, body1, body2, self\n .currentCallback.userobject)\n\n def MaterialProcessCollision(self, material, contactHandle):\n if self.currentCallback.processCallback:\n self.currentCallback.processCallback(material, contactHandle,\n self.currentCallback.userobject)\n\n def MaterialEndCollision(self, material):\n if self.currentCallback.endCallback:\n self.currentCallback.endCallback(material, self.currentCallback\n .userobject)\n\n\nclass CollisionGeometry(pynewton.CollisionGeometry):\n\n def draw(self):\n if not GlPresent:\n raise 'OpenGL module could not be loaded'\n\n\nclass Sphere(pynewton.Sphere):\n\n def __init__(self, world, w, h, d, offset_matrix=None):\n pynewton.Sphere.__init__(self, world, w, h, d, offset_matrix)\n self.width = w\n self.height = h\n self.depth = d\n if GLPresent:\n self.quad = GLU.gluNewQuadric()\n\n def draw(self):\n if not GLPresent:\n raise 'OpenGL module could not be loaded'\n GL.glPushMatrix()\n GL.glScalef(self.width, self.height, self.depth)\n GL.glPolygonMode(GL.GL_FRONT_AND_BACK, GL.GL_LINE)\n GLU.gluSphere(self.quad, 1.0, 12, 12)\n GL.glPolygonMode(GL.GL_FRONT_AND_BACK, GL.GL_FILL)\n GL.glPopMatrix()\n\n\nclass Box(pynewton.Box):\n pass\n\n\nclass Cone(pynewton.Cone):\n pass\n\n\nclass Cylinder(pynewton.Cylinder):\n pass\n\n\nclass ChamferCylinder(pynewton.ChamferCylinder):\n pass\n\n\nclass ConvexHull(pynewton.ConvexHull):\n pass\n\n\nclass ConvexHullModifier(pynewton.ConvexHullModifier):\n pass\n\n\nclass NullCollider(pynewton.NullCollider):\n pass\n\n\nclass TreeCollision(pynewton.TreeCollision):\n pass\n\n\nclass TreeCollisionUserCallback(pynewton.TreeCollisionUserCallback):\n\n def __init__(self, func):\n self.callbackFunc = func\n\n def OnCallback(self, bodyWithTreeCollision, body, vertices,\n vertexstrideInBytes, indexCount, indexArray):\n if self.callbackFunc != None:\n self.callbackFunc(bodyWithTreeCollision, body, vertices,\n vertexstrideInBytes, indexCount, indexArray)\n pass\n\n\nclass Material(pynewton.Material):\n pass\n\n\nclass BallJoint(pynewton.BallJoint):\n\n def __init__(self, *args, **kwargs):\n self.callback = None\n return pynewton.BallJoint.__init__(self, *args, **kwargs)\n\n def OnCallback():\n if self.callback != None:\n self.callback()\n pass\n\n\nclass Hinge(pynewton.Hinge):\n\n def __init__(self, *args, **kwargs):\n self.callback = None\n return pynewton.Hinge.__init__(*args, **kwargs)\n\n def OnCallback(desc):\n if self.callback != None:\n return self.callback(desc)\n return 0\n\n\nclass Slider(pynewton.Slider):\n\n def __init__(self, *args, **kwargs):\n self.callback = None\n return pynewton.Slider.__init__(self, *args, **kwargs)\n\n\nclass Corkscrew(pynewton.Corkscrew):\n\n def __init__(self, *args, **kwargs):\n self.callback = None\n pynewton.Corkscrew.__init__(self, *args, **kwargs)\n\n def OnCallback(desc):\n if self.callback != None:\n return self.callback(desc)\n return 0\n\n\nclass UniversalJoint(pynewton.UniversalJoint):\n\n def __init__(self, *args, **kwargs):\n self.callback = None\n return pynewton.UniversalJoint.__init__(self, *args, **kwargs)\n\n def OnCallback(desc):\n if self.callback != None:\n return self.callback(desc)\n return 0\n\n\nclass UpVector(pynewton.UpVector):\n\n def __init__(self, *args, **kwargs):\n self.callback = None\n return pynewton.UpVector.__init__(self, *args, **kwargs)\n\n def OnCallback():\n if self.callback != None:\n self.callback()\n\n\nclass Tire(pynewton.Tire):\n pass\n\n\nclass Vehicle(pynewton.Vehicle):\n\n def __init__(self, *args, **kwargs):\n self.tires = []\n self.UpdateTireCallback = None\n return pynewton.Vehicle.__init__(self, *args, **kwargs)\n\n def AddTire(self, matrix, pin, mass, width, radius, suspensionShock,\n suspensionSpring, suspensionLength, userData, collisionID):\n tire = pynewton.Vehicle.AddTire(self, matrix, pin, mass, width,\n radius, suspensionShock, suspensionSpring, suspensionLength,\n userData, collisionID)\n tires.append(tire)\n return tire\n\n def RemoveTire(self, tire):\n del tires[tires.index(tire)]\n tire = pynewton.Vehicle.RemoveTire(self, tire)\n\n def OnCallback(self):\n if self.UpdateTireCallback != None:\n self.UpdateTireCallback(self)\n\n\nclass HeightField(pynewton.HeightField):\n pass\n",
"step-5": "# The purpose of this module is essentially to subclass the basic SWIG generated\n# pynewton classes and add a bit of functionality to them (mostly callback related\n# stuff). This could be done in the SWIG interface file, but it's easier to do it\n# here since it makes adding python-specific extensions to newton easier.\nimport pynewton\n\ntry:\n\timport OpenGL.GL as GL\n\timport OpenGL.GLU as GLU\n\tGLPresent = True\nexcept:\n\tGLPresent = False\n\n\ndef GetEulerAngle ( matrix ):\n\treturn pynewton.GetEulerAngle( matrix )\n\ndef SetEulerAngle ( angle ):\n\treturn pynewton.SetEulerAngle( angle )\n\n#extensions to body\ndef NullApplyForceAndTorqueCallback( body ) :\n\tpass\n\ndef NullTransformCallback( body, matrix ):\n\tpass\n\ndef NullAutoactiveCallback( body, state ):\n\tpass\n\ndef NullBodyDestructCallback( body ):\n\tpass\n\nclass Body( pynewton.Body ):\n\tdef __init__( self, world, cg ):\n\t\tself.ApplyForceAndTorqueCallback = None\n\t\tself.TransformCallback = None\n\t\tself.AutoactiveCallback = None\n\t\tself.DestructorCallback = None\n\t\tself.TreeCollisionCallback = None\n\n\t\tpynewton.Body.__init__(self, world, cg )\n\t\tworld.RegisterBody( self )\n\t\tself.py_cg = cg;\n\n\tdef SetApplyForceAndTorqueCallback( self, callback ):\n\t\tself.ApplyForceAndTorqueCallback = callback\n\n\tdef SetAutoactiveCallback( self, callback ):\n\t\tself.AutoactiveCallback = callback\n\n\tdef GetCollision( self ):\n\t\treturn self.py_cg\n\n\tdef OnApplyForceAndTorque(self):\n\t\tif self.ApplyForceAndTorqueCallback != None:\n\t\t\tself.ApplyForceAndTorqueCallback( self )\n\n\tdef OnAutoactive(self, state ):\n\t\tif self.AutoactiveCallback != None:\n\t\t\tself.AutoactiveCallback( self, state )\n\n\tdef OnTransform( self ):\n\t\tmatrix = self.GetMatrix()\n\t\tif self.TransformCallback != None:\n\t\t\tself.TransformCallback( self, matrix )\n\n\tdef OnDestruct( self ):\n\t\tif self.DestructorCallback != None:\n\t\t\tself.DestructorCallback( self, matrix )\n\n\tdef OnTreeCollisionWith( self, body ):\n\t\tif self.TreeCollisionCallback != None:\n\t\t\tself.TreeCollisionCallback(body)\n\n\tdef Draw( self ):\n\t\tm = self.GetMatrix()\n\t\tif not GLPresent: raise \"OpenGL module not loaded, cannot draw\"\n\t\tGL.glPushMatrix()\n\t\tGL.glMultMatrixf( m )\n\t\tc = self.GetCollision()\n\t\tc.draw()\n\t\tGL.glPopMatrix()\n\n\n\nclass _materialCallback( object ):\n\tdef __init__(self, id1, id2, begin_function, process_function, end_function, userobject):\n\t\tself.id1 = id1\n\t\tself.id2 = id2\n\t\tself.beginCallback = begin_function\n\t\tself.processCallback = process_function\n\t\tself.endCallback = end_function\n\t\tself.userobject = userobject\n\n\n#extensions to world\nclass World( pynewton.World ):\n\tdef __init__(self ):\n\t\tself.bodyList = []\n\t\tself.newtonBodyLookup = {}\n\t\tself.materialCallbacks = {}\n\t\tself.currentCallback = None\n\t\tself.raycastUserData = None\n\t\tself.raycastCallback = None\n\t\tpynewton.World.__init__(self)\n\n\tdef RegisterBody( self, body ):\n\t\tself.bodyList.append( body )\n\t\tself.newtonBodyLookup[body.IDKey()] = body\n\n\tdef UnregisterBody( self, body ):\n\t\tself.bodyList.remove( bodyList.index(body) )\n\t\tdel self.newtonBodyLookup[body.m_body]\n\n\tdef NewtonBodyToBody( self, ptr ):\n\t\treturn self.newtonBodyLookup[int(ptr)]\n\n\tdef ForEachBodyDo( self, function ):\n\t\tfor body in self.bodyList:\n\t\t\tfunction( body )\n\t\n\tdef RayCast( self, p0, p1, callback, userdata):\n\t\t\"\"\"Casts a ray in the world defined by p0 and p1 and calls callback\n\t\twith the body, normal, collision id, user data and intersection distance\"\"\"\n\t\tself.raycastUserData = userdata\n\t\tself.raycastCallback = callback\n\t\tself.CppRayCast.__call__(p0[0], p0[1], p0[2], p1[0], p1[1], p1[2])\n\t\n\tdef RayCastCallback( self, body, nx, ny, nz, collisionID, intersectParam ):\n\t\t#delegate this off to the user specified function\n\t\treturn self.raycastCallback( body, (nx, ny, nz), collisionID, self.raycastUserData, intersectParam )\n\n\tdef MaterialSetCollisionCallback( self, id1, id2, userdata=None, begin_func=None, process_func=None, end_func=None ):\n\t\tself.materialCallbacks[(id1,id2)] = _materialCallback( id1, id2, begin_func, process_func, end_func, userdata)\n\t\tself.RegisterMaterialCallbackBetween( id1, id2)\n\n\tdef GetMaterialCallback(self, material, body1, body2):\n\t\tid1 = body1.MaterialGroupID()\n\t\tid2 = body2.MaterialGroupID()\n\t\tcb = self.materialCallbacks[(id1,id2)]\n\t\treturn cb\n\n\n\tdef MaterialBeginCollision( self, material, b1, b2 ):\n\t\tbody1 = self.newtonBodyLookup[int(b1)]\n\t\tbody2 = self.newtonBodyLookup[int(b2)]\n\t\tself.currentCallback = self.GetMaterialCallback( material, body1, body2 )\n\t\tif self.currentCallback.beginCallback:\n\t\t\tself.currentCallback.beginCallback(material,\n\t\t\t\t\t\t\t\t\t\t\t body1,\n\t\t\t\t\t\t\t\t\t\t\t body2,\n\t\t\t\t\t\t\t\t\t\t\t self.currentCallback.userobject )\n\n\tdef MaterialProcessCollision( self, material, contactHandle ):\n\t\tif self.currentCallback.processCallback:\n\t\t\tself.currentCallback.processCallback(material,\n\t\t\t\t\t\t\t\t\t\t\t\t contactHandle,\n\t\t\t\t\t\t\t\t\t\t\t\t self.currentCallback.userobject )\n\n\tdef MaterialEndCollision( self, material ):\n\t\tif self.currentCallback.endCallback:\n\t\t\tself.currentCallback.endCallback( material,\n\t\t\t\t\t\t\t\t\t\t\t self.currentCallback.userobject )\n\n#collision extensions\nclass CollisionGeometry( pynewton.CollisionGeometry ):\n\tdef draw(self):\n\t\tif not GlPresent: raise \"OpenGL module could not be loaded\"\n\nclass Sphere ( pynewton.Sphere ):\n\tdef __init__(self, world, w, h, d, offset_matrix=None):\n\t\tpynewton.Sphere.__init__( self, world, w, h, d, offset_matrix )\n\t\tself.width = w\n\t\tself.height = h\n\t\tself.depth = d\n\t\tif GLPresent:\n\t\t\tself.quad = GLU.gluNewQuadric()\n\n\tdef draw(self):\n\t\tif not GLPresent: raise \"OpenGL module could not be loaded\"\n\t\tGL.glPushMatrix()\n\t\tGL.glScalef( self.width, self.height, self.depth )\n\t\tGL.glPolygonMode( GL.GL_FRONT_AND_BACK, GL.GL_LINE )\n\t\tGLU.gluSphere( self.quad, 1.0, 12, 12 )\n\t\tGL.glPolygonMode( GL.GL_FRONT_AND_BACK, GL.GL_FILL )\n\t\tGL.glPopMatrix()\n\n\n\nclass Box ( pynewton.Box ):\n\tpass\n\nclass Cone ( pynewton.Cone ):\n\tpass\n\nclass Cylinder (pynewton.Cylinder):\n\tpass\n\nclass ChamferCylinder (pynewton.ChamferCylinder):\n\tpass\n\nclass ConvexHull (pynewton.ConvexHull):\n\tpass\n\nclass ConvexHullModifier (pynewton.ConvexHullModifier):\n\tpass\n\nclass NullCollider (pynewton.NullCollider ):\n\tpass\n\nclass TreeCollision (pynewton.TreeCollision):\n\tpass\n\nclass TreeCollisionUserCallback ( pynewton.TreeCollisionUserCallback ):\n\tdef __init__( self, func ):\n\t\tself.callbackFunc = func\n\n\tdef OnCallback (self, bodyWithTreeCollision, body, vertices, vertexstrideInBytes, indexCount, indexArray):\n\t\tif self.callbackFunc != None:\n\t\t\tself.callbackFunc( bodyWithTreeCollision, body, vertices, vertexstrideInBytes, indexCount, indexArray)\n\t\tpass\n\n#material extensions\nclass Material ( pynewton.Material ):\n\tpass\n\n\n#joint extensions\nclass BallJoint ( pynewton.BallJoint ):\n\tdef __init__(self, *args, **kwargs ):\n\t\tself.callback = None\n\t\treturn pynewton.BallJoint.__init__(self, *args, **kwargs )\n\n\tdef OnCallback():\n\t\tif self.callback != None:\n\t\t\tself.callback( )\n\t\tpass\n\nclass Hinge ( pynewton.Hinge ):\n\tdef __init__(self, *args, **kwargs ):\n\t\tself.callback = None\n\t\treturn pynewton.Hinge.__init__( *args, **kwargs )\n\n\tdef OnCallback( desc ):\n\t\tif self.callback != None:\n\t\t\treturn self.callback( desc )\n\t\treturn 0\n\nclass Slider ( pynewton.Slider ):\n\tdef __init__( self, *args, **kwargs ):\n\t\tself.callback = None\n\t\treturn pynewton.Slider.__init__( self, *args, **kwargs )\n\n#\tdef OnCallback( desc ):\n#\t\tif self.callback != None:\n#\t\t\treturn self.callback( desc )\n#\t\treturn 0\n\nclass Corkscrew ( pynewton.Corkscrew ):\n\tdef __init__(self, *args, **kwargs ):\n\t\tself.callback = None\n\t\tpynewton.Corkscrew.__init__(self, *args, **kwargs )\n\n\tdef OnCallback( desc ):\n\t\tif self.callback != None:\n\t\t\treturn self.callback( desc )\n\t\treturn 0\n\nclass UniversalJoint ( pynewton.UniversalJoint ):\n\tdef __init__(self, *args, **kwargs ):\n\t\tself.callback = None\n\t\treturn pynewton.UniversalJoint.__init__( self, *args, **kwargs )\n\n\tdef OnCallback( desc ):\n\t\tif self.callback != None:\n\t\t\treturn self.callback( desc )\n\t\treturn 0\n\nclass UpVector ( pynewton.UpVector ):\n\tdef __init__(self, *args, **kwargs ):\n\t\tself.callback = None\n\t\treturn pynewton.UpVector.__init__(self, *args, **kwargs )\n\n\tdef OnCallback():\n\t\tif self.callback != None:\n\t\t\tself.callback( )\n\n\nclass Tire ( pynewton.Tire ):\n\tpass\n\nclass Vehicle ( pynewton.Vehicle ):\n\n\tdef __init__(self, *args, **kwargs ):\n\t\tself.tires = []\n\t\tself.UpdateTireCallback = None\n\t\treturn pynewton.Vehicle.__init__(self, *args, **kwargs )\n\n\tdef AddTire ( self, matrix, pin, mass, width, radius, suspensionShock, suspensionSpring, suspensionLength, userData, collisionID):\n\t\ttire = pynewton.Vehicle.AddTire(self, matrix, pin, mass, width, radius, suspensionShock, suspensionSpring, suspensionLength, userData, collisionID)\n\t\ttires.append( tire )\n\t\treturn tire\n\n\tdef RemoveTire( self, tire ):\n\t\tdel tires[tires.index(tire)]\n\t\ttire = pynewton.Vehicle.RemoveTire( self, tire )\n\n\tdef OnCallback( self):\n\t\tif self.UpdateTireCallback != None:\n\t\t\tself.UpdateTireCallback(self)\n\n#Heightmap\nclass HeightField ( pynewton.HeightField ):\n\tpass\n",
"step-ids": [
33,
52,
66,
68,
76
]
}
|
[
33,
52,
66,
68,
76
] |
class UF(object):
def __init__(self, n):
self.parents = [i for i in range(n)]
self.weights = [1 for i in range(n)]
self.n = n
def find(self, i):
while i != self.parents[i]:
self.parents[i] = self.parents[self.parents[i]]
i = self.parents[i]
return i
def union(self, p, q):
i = self.find(p)
j = self.find(q)
if i == j:
return
if self.weights[i] < self.weights[j]:
self.parents[i] = j
self.weights[j] += self.weights[i]
else:
self.parents[j] = i
self.weights[i] += self.weights[j]
self.n -= 1
def is_connected(self, p, q):
i = self.find(p)
j = self.find(q)
return i== j
def __len__(self):
return self.n
if __name__ == '__main__':
uf = UF(10)
uf.union(1, 2)
uf.union(3, 4)
uf.union(2, 4)
assert len(uf) == 7
assert uf.is_connected(1, 4)
assert not uf.is_connected(1, 5)
|
normal
|
{
"blob_id": "c8d5b8515a468190d14311118e12a7d414908be6",
"index": 8109,
"step-1": "class UF(object):\n <mask token>\n\n def find(self, i):\n while i != self.parents[i]:\n self.parents[i] = self.parents[self.parents[i]]\n i = self.parents[i]\n return i\n\n def union(self, p, q):\n i = self.find(p)\n j = self.find(q)\n if i == j:\n return\n if self.weights[i] < self.weights[j]:\n self.parents[i] = j\n self.weights[j] += self.weights[i]\n else:\n self.parents[j] = i\n self.weights[i] += self.weights[j]\n self.n -= 1\n\n def is_connected(self, p, q):\n i = self.find(p)\n j = self.find(q)\n return i == j\n <mask token>\n\n\n<mask token>\n",
"step-2": "class UF(object):\n <mask token>\n\n def find(self, i):\n while i != self.parents[i]:\n self.parents[i] = self.parents[self.parents[i]]\n i = self.parents[i]\n return i\n\n def union(self, p, q):\n i = self.find(p)\n j = self.find(q)\n if i == j:\n return\n if self.weights[i] < self.weights[j]:\n self.parents[i] = j\n self.weights[j] += self.weights[i]\n else:\n self.parents[j] = i\n self.weights[i] += self.weights[j]\n self.n -= 1\n\n def is_connected(self, p, q):\n i = self.find(p)\n j = self.find(q)\n return i == j\n\n def __len__(self):\n return self.n\n\n\n<mask token>\n",
"step-3": "class UF(object):\n\n def __init__(self, n):\n self.parents = [i for i in range(n)]\n self.weights = [(1) for i in range(n)]\n self.n = n\n\n def find(self, i):\n while i != self.parents[i]:\n self.parents[i] = self.parents[self.parents[i]]\n i = self.parents[i]\n return i\n\n def union(self, p, q):\n i = self.find(p)\n j = self.find(q)\n if i == j:\n return\n if self.weights[i] < self.weights[j]:\n self.parents[i] = j\n self.weights[j] += self.weights[i]\n else:\n self.parents[j] = i\n self.weights[i] += self.weights[j]\n self.n -= 1\n\n def is_connected(self, p, q):\n i = self.find(p)\n j = self.find(q)\n return i == j\n\n def __len__(self):\n return self.n\n\n\n<mask token>\n",
"step-4": "class UF(object):\n\n def __init__(self, n):\n self.parents = [i for i in range(n)]\n self.weights = [(1) for i in range(n)]\n self.n = n\n\n def find(self, i):\n while i != self.parents[i]:\n self.parents[i] = self.parents[self.parents[i]]\n i = self.parents[i]\n return i\n\n def union(self, p, q):\n i = self.find(p)\n j = self.find(q)\n if i == j:\n return\n if self.weights[i] < self.weights[j]:\n self.parents[i] = j\n self.weights[j] += self.weights[i]\n else:\n self.parents[j] = i\n self.weights[i] += self.weights[j]\n self.n -= 1\n\n def is_connected(self, p, q):\n i = self.find(p)\n j = self.find(q)\n return i == j\n\n def __len__(self):\n return self.n\n\n\nif __name__ == '__main__':\n uf = UF(10)\n uf.union(1, 2)\n uf.union(3, 4)\n uf.union(2, 4)\n assert len(uf) == 7\n assert uf.is_connected(1, 4)\n assert not uf.is_connected(1, 5)\n",
"step-5": "class UF(object):\n def __init__(self, n):\n self.parents = [i for i in range(n)]\n self.weights = [1 for i in range(n)]\n self.n = n\n\n def find(self, i):\n while i != self.parents[i]:\n self.parents[i] = self.parents[self.parents[i]]\n i = self.parents[i]\n return i\n\n def union(self, p, q):\n i = self.find(p)\n j = self.find(q)\n if i == j:\n return\n\n if self.weights[i] < self.weights[j]:\n self.parents[i] = j\n self.weights[j] += self.weights[i]\n else:\n self.parents[j] = i\n self.weights[i] += self.weights[j]\n\n self.n -= 1\n\n def is_connected(self, p, q):\n i = self.find(p)\n j = self.find(q)\n return i== j\n\n def __len__(self):\n return self.n\n\n\nif __name__ == '__main__':\n uf = UF(10)\n uf.union(1, 2)\n uf.union(3, 4)\n uf.union(2, 4)\n\n assert len(uf) == 7\n\n assert uf.is_connected(1, 4)\n assert not uf.is_connected(1, 5)\n",
"step-ids": [
4,
5,
6,
7,
8
]
}
|
[
4,
5,
6,
7,
8
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def module_exists(module_name):
try:
__import__(module_name)
except ImportError:
return False
else:
return True
def quote(items):
return [("'" + item + "'") for item in items]
if module_exists('urllib.parse'):
from urllib.parse import unquote
else:
from urllib import unquote
<|reserved_special_token_0|>
parser.add_argument('url', help='The url to send the request to.')
parser.add_argument('--data')
parser.add_argument('-H', action='append', dest='headers')
parser.add_argument('--verbose', action='store_true')
parser.add_argument('--timeout', type=int)
parser.add_argument('-q', '--quiet', action='store_true')
parser.add_argument('--compressed', action='store_true')
<|reserved_special_token_0|>
if args.data:
data = quote(unquote(args.data).split('&'))
method = 'POST'
if 'Content-Type: application/x-www-form-urlencoded' in args.headers:
flags.append('-f')
<|reserved_special_token_0|>
if len(flags) > 0:
httpieArgs.append(' '.join(flags))
httpieArgs.append(method)
httpieArgs.append("'" + args.url + "'")
if headers and len(headers) > 0:
httpieArgs.append(' '.join(headers))
if data and len(data) > 0:
httpieArgs.append(' '.join(data))
if args.verbose:
httpieArgs.append('--verbose')
if args.timeout is not None:
httpieArgs.append('--timeout ' + args.timeout)
<|reserved_special_token_0|>
if not args.quiet:
print('\n' + cmd + '\n')
subprocess.call(cmd, shell=True)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def module_exists(module_name):
try:
__import__(module_name)
except ImportError:
return False
else:
return True
def quote(items):
return [("'" + item + "'") for item in items]
if module_exists('urllib.parse'):
from urllib.parse import unquote
else:
from urllib import unquote
parser = argparse.ArgumentParser()
parser.add_argument('url', help='The url to send the request to.')
parser.add_argument('--data')
parser.add_argument('-H', action='append', dest='headers')
parser.add_argument('--verbose', action='store_true')
parser.add_argument('--timeout', type=int)
parser.add_argument('-q', '--quiet', action='store_true')
parser.add_argument('--compressed', action='store_true')
args = parser.parse_args()
flags = []
method = 'GET'
data = None
if args.data:
data = quote(unquote(args.data).split('&'))
method = 'POST'
if 'Content-Type: application/x-www-form-urlencoded' in args.headers:
flags.append('-f')
headers = quote(args.headers)
httpieArgs = []
if len(flags) > 0:
httpieArgs.append(' '.join(flags))
httpieArgs.append(method)
httpieArgs.append("'" + args.url + "'")
if headers and len(headers) > 0:
httpieArgs.append(' '.join(headers))
if data and len(data) > 0:
httpieArgs.append(' '.join(data))
if args.verbose:
httpieArgs.append('--verbose')
if args.timeout is not None:
httpieArgs.append('--timeout ' + args.timeout)
cmd = 'http ' + ' '.join(httpieArgs)
if not args.quiet:
print('\n' + cmd + '\n')
subprocess.call(cmd, shell=True)
<|reserved_special_token_1|>
import argparse
import subprocess
def module_exists(module_name):
try:
__import__(module_name)
except ImportError:
return False
else:
return True
def quote(items):
return [("'" + item + "'") for item in items]
if module_exists('urllib.parse'):
from urllib.parse import unquote
else:
from urllib import unquote
parser = argparse.ArgumentParser()
parser.add_argument('url', help='The url to send the request to.')
parser.add_argument('--data')
parser.add_argument('-H', action='append', dest='headers')
parser.add_argument('--verbose', action='store_true')
parser.add_argument('--timeout', type=int)
parser.add_argument('-q', '--quiet', action='store_true')
parser.add_argument('--compressed', action='store_true')
args = parser.parse_args()
flags = []
method = 'GET'
data = None
if args.data:
data = quote(unquote(args.data).split('&'))
method = 'POST'
if 'Content-Type: application/x-www-form-urlencoded' in args.headers:
flags.append('-f')
headers = quote(args.headers)
httpieArgs = []
if len(flags) > 0:
httpieArgs.append(' '.join(flags))
httpieArgs.append(method)
httpieArgs.append("'" + args.url + "'")
if headers and len(headers) > 0:
httpieArgs.append(' '.join(headers))
if data and len(data) > 0:
httpieArgs.append(' '.join(data))
if args.verbose:
httpieArgs.append('--verbose')
if args.timeout is not None:
httpieArgs.append('--timeout ' + args.timeout)
cmd = 'http ' + ' '.join(httpieArgs)
if not args.quiet:
print('\n' + cmd + '\n')
subprocess.call(cmd, shell=True)
<|reserved_special_token_1|>
#!/usr/bin/env python
import argparse
import subprocess
def module_exists(module_name):
try:
__import__(module_name)
except ImportError:
return False
else:
return True
def quote(items):
return ["'" + item + "'" for item in items]
if module_exists('urllib.parse'):
from urllib.parse import unquote
else:
from urllib import unquote
parser = argparse.ArgumentParser()
parser.add_argument("url", help="The url to send the request to.")
parser.add_argument("--data")
parser.add_argument("-H", action="append", dest='headers')
# HTTPie arguments
parser.add_argument("--verbose", action="store_true")
parser.add_argument("--timeout", type=int)
# curlie arguments
parser.add_argument("-q", "--quiet", action="store_true")
# ignored curl arguments
parser.add_argument("--compressed", action="store_true")
args = parser.parse_args()
flags = []
method = "GET"
data = None
if args.data:
data = quote(unquote(args.data).split("&"))
method = "POST"
if "Content-Type: application/x-www-form-urlencoded" in args.headers:
flags.append("-f")
headers = quote(args.headers)
httpieArgs = []
if len(flags) > 0:
httpieArgs.append(" ".join(flags))
httpieArgs.append(method)
httpieArgs.append("'" + args.url + "'")
if headers and len(headers) > 0:
httpieArgs.append(" ".join(headers))
if data and len(data) > 0:
httpieArgs.append(' '.join(data))
if args.verbose:
httpieArgs.append("--verbose")
if args.timeout is not None:
httpieArgs.append("--timeout " + args.timeout)
cmd = "http " + " ".join(httpieArgs)
if not args.quiet:
print("\n" + cmd + "\n")
subprocess.call(cmd, shell=True)
|
flexible
|
{
"blob_id": "68371acc58da6d986d94d746abb4fea541d65fdd",
"index": 3384,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef module_exists(module_name):\n try:\n __import__(module_name)\n except ImportError:\n return False\n else:\n return True\n\n\ndef quote(items):\n return [(\"'\" + item + \"'\") for item in items]\n\n\nif module_exists('urllib.parse'):\n from urllib.parse import unquote\nelse:\n from urllib import unquote\n<mask token>\nparser.add_argument('url', help='The url to send the request to.')\nparser.add_argument('--data')\nparser.add_argument('-H', action='append', dest='headers')\nparser.add_argument('--verbose', action='store_true')\nparser.add_argument('--timeout', type=int)\nparser.add_argument('-q', '--quiet', action='store_true')\nparser.add_argument('--compressed', action='store_true')\n<mask token>\nif args.data:\n data = quote(unquote(args.data).split('&'))\n method = 'POST'\nif 'Content-Type: application/x-www-form-urlencoded' in args.headers:\n flags.append('-f')\n<mask token>\nif len(flags) > 0:\n httpieArgs.append(' '.join(flags))\nhttpieArgs.append(method)\nhttpieArgs.append(\"'\" + args.url + \"'\")\nif headers and len(headers) > 0:\n httpieArgs.append(' '.join(headers))\nif data and len(data) > 0:\n httpieArgs.append(' '.join(data))\nif args.verbose:\n httpieArgs.append('--verbose')\nif args.timeout is not None:\n httpieArgs.append('--timeout ' + args.timeout)\n<mask token>\nif not args.quiet:\n print('\\n' + cmd + '\\n')\nsubprocess.call(cmd, shell=True)\n",
"step-3": "<mask token>\n\n\ndef module_exists(module_name):\n try:\n __import__(module_name)\n except ImportError:\n return False\n else:\n return True\n\n\ndef quote(items):\n return [(\"'\" + item + \"'\") for item in items]\n\n\nif module_exists('urllib.parse'):\n from urllib.parse import unquote\nelse:\n from urllib import unquote\nparser = argparse.ArgumentParser()\nparser.add_argument('url', help='The url to send the request to.')\nparser.add_argument('--data')\nparser.add_argument('-H', action='append', dest='headers')\nparser.add_argument('--verbose', action='store_true')\nparser.add_argument('--timeout', type=int)\nparser.add_argument('-q', '--quiet', action='store_true')\nparser.add_argument('--compressed', action='store_true')\nargs = parser.parse_args()\nflags = []\nmethod = 'GET'\ndata = None\nif args.data:\n data = quote(unquote(args.data).split('&'))\n method = 'POST'\nif 'Content-Type: application/x-www-form-urlencoded' in args.headers:\n flags.append('-f')\nheaders = quote(args.headers)\nhttpieArgs = []\nif len(flags) > 0:\n httpieArgs.append(' '.join(flags))\nhttpieArgs.append(method)\nhttpieArgs.append(\"'\" + args.url + \"'\")\nif headers and len(headers) > 0:\n httpieArgs.append(' '.join(headers))\nif data and len(data) > 0:\n httpieArgs.append(' '.join(data))\nif args.verbose:\n httpieArgs.append('--verbose')\nif args.timeout is not None:\n httpieArgs.append('--timeout ' + args.timeout)\ncmd = 'http ' + ' '.join(httpieArgs)\nif not args.quiet:\n print('\\n' + cmd + '\\n')\nsubprocess.call(cmd, shell=True)\n",
"step-4": "import argparse\nimport subprocess\n\n\ndef module_exists(module_name):\n try:\n __import__(module_name)\n except ImportError:\n return False\n else:\n return True\n\n\ndef quote(items):\n return [(\"'\" + item + \"'\") for item in items]\n\n\nif module_exists('urllib.parse'):\n from urllib.parse import unquote\nelse:\n from urllib import unquote\nparser = argparse.ArgumentParser()\nparser.add_argument('url', help='The url to send the request to.')\nparser.add_argument('--data')\nparser.add_argument('-H', action='append', dest='headers')\nparser.add_argument('--verbose', action='store_true')\nparser.add_argument('--timeout', type=int)\nparser.add_argument('-q', '--quiet', action='store_true')\nparser.add_argument('--compressed', action='store_true')\nargs = parser.parse_args()\nflags = []\nmethod = 'GET'\ndata = None\nif args.data:\n data = quote(unquote(args.data).split('&'))\n method = 'POST'\nif 'Content-Type: application/x-www-form-urlencoded' in args.headers:\n flags.append('-f')\nheaders = quote(args.headers)\nhttpieArgs = []\nif len(flags) > 0:\n httpieArgs.append(' '.join(flags))\nhttpieArgs.append(method)\nhttpieArgs.append(\"'\" + args.url + \"'\")\nif headers and len(headers) > 0:\n httpieArgs.append(' '.join(headers))\nif data and len(data) > 0:\n httpieArgs.append(' '.join(data))\nif args.verbose:\n httpieArgs.append('--verbose')\nif args.timeout is not None:\n httpieArgs.append('--timeout ' + args.timeout)\ncmd = 'http ' + ' '.join(httpieArgs)\nif not args.quiet:\n print('\\n' + cmd + '\\n')\nsubprocess.call(cmd, shell=True)\n",
"step-5": "#!/usr/bin/env python\n\nimport argparse\nimport subprocess\n\ndef module_exists(module_name):\n try:\n __import__(module_name)\n except ImportError:\n return False\n else:\n return True\n\ndef quote(items):\n return [\"'\" + item + \"'\" for item in items]\n\nif module_exists('urllib.parse'):\n from urllib.parse import unquote\nelse:\n from urllib import unquote\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"url\", help=\"The url to send the request to.\")\nparser.add_argument(\"--data\")\nparser.add_argument(\"-H\", action=\"append\", dest='headers')\n\n# HTTPie arguments\nparser.add_argument(\"--verbose\", action=\"store_true\")\nparser.add_argument(\"--timeout\", type=int)\n\n# curlie arguments\nparser.add_argument(\"-q\", \"--quiet\", action=\"store_true\")\n\n# ignored curl arguments\nparser.add_argument(\"--compressed\", action=\"store_true\")\n\nargs = parser.parse_args()\n\nflags = []\nmethod = \"GET\"\n\ndata = None\nif args.data:\n data = quote(unquote(args.data).split(\"&\"))\n method = \"POST\"\n\nif \"Content-Type: application/x-www-form-urlencoded\" in args.headers:\n flags.append(\"-f\")\n\nheaders = quote(args.headers)\n\nhttpieArgs = []\n\nif len(flags) > 0:\n httpieArgs.append(\" \".join(flags))\n\nhttpieArgs.append(method)\nhttpieArgs.append(\"'\" + args.url + \"'\")\n\nif headers and len(headers) > 0:\n httpieArgs.append(\" \".join(headers))\n\nif data and len(data) > 0:\n httpieArgs.append(' '.join(data))\n\nif args.verbose:\n httpieArgs.append(\"--verbose\")\n\nif args.timeout is not None:\n httpieArgs.append(\"--timeout \" + args.timeout)\n\ncmd = \"http \" + \" \".join(httpieArgs)\n\nif not args.quiet:\n print(\"\\n\" + cmd + \"\\n\")\n\nsubprocess.call(cmd, shell=True)\n",
"step-ids": [
0,
3,
4,
5,
6
]
}
|
[
0,
3,
4,
5,
6
] |
<|reserved_special_token_0|>
class Ui_FindResultWindow(object):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Ui_FindResultWindow(object):
<|reserved_special_token_0|>
def retranslateUi(self, FindResultWindow):
_translate = QtCore.QCoreApplication.translate
FindResultWindow.setWindowTitle(_translate('FindResultWindow',
'Информация о приборах'))
self.btnEdit.setText(_translate('FindResultWindow', 'Изменить данные'))
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Ui_FindResultWindow(object):
def setupUi(self, FindResultWindow):
FindResultWindow.setObjectName('FindResultWindow')
FindResultWindow.resize(801, 546)
self.centralwidget = QtWidgets.QWidget(FindResultWindow)
self.centralwidget.setObjectName('centralwidget')
self.btnEdit = QtWidgets.QPushButton(self.centralwidget)
self.btnEdit.setEnabled(False)
self.btnEdit.setGeometry(QtCore.QRect(330, 470, 151, 51))
self.btnEdit.setCheckable(False)
self.btnEdit.setAutoDefault(False)
self.btnEdit.setObjectName('btnEdit')
self.listWidgetFindResult = QtWidgets.QListWidget(self.centralwidget)
self.listWidgetFindResult.setGeometry(QtCore.QRect(10, 10, 781, 441))
self.listWidgetFindResult.setObjectName('listWidgetFindResult')
FindResultWindow.setCentralWidget(self.centralwidget)
self.retranslateUi(FindResultWindow)
QtCore.QMetaObject.connectSlotsByName(FindResultWindow)
def retranslateUi(self, FindResultWindow):
_translate = QtCore.QCoreApplication.translate
FindResultWindow.setWindowTitle(_translate('FindResultWindow',
'Информация о приборах'))
self.btnEdit.setText(_translate('FindResultWindow', 'Изменить данные'))
<|reserved_special_token_1|>
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_FindResultWindow(object):
def setupUi(self, FindResultWindow):
FindResultWindow.setObjectName('FindResultWindow')
FindResultWindow.resize(801, 546)
self.centralwidget = QtWidgets.QWidget(FindResultWindow)
self.centralwidget.setObjectName('centralwidget')
self.btnEdit = QtWidgets.QPushButton(self.centralwidget)
self.btnEdit.setEnabled(False)
self.btnEdit.setGeometry(QtCore.QRect(330, 470, 151, 51))
self.btnEdit.setCheckable(False)
self.btnEdit.setAutoDefault(False)
self.btnEdit.setObjectName('btnEdit')
self.listWidgetFindResult = QtWidgets.QListWidget(self.centralwidget)
self.listWidgetFindResult.setGeometry(QtCore.QRect(10, 10, 781, 441))
self.listWidgetFindResult.setObjectName('listWidgetFindResult')
FindResultWindow.setCentralWidget(self.centralwidget)
self.retranslateUi(FindResultWindow)
QtCore.QMetaObject.connectSlotsByName(FindResultWindow)
def retranslateUi(self, FindResultWindow):
_translate = QtCore.QCoreApplication.translate
FindResultWindow.setWindowTitle(_translate('FindResultWindow',
'Информация о приборах'))
self.btnEdit.setText(_translate('FindResultWindow', 'Изменить данные'))
<|reserved_special_token_1|>
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'find_result_window.ui'
#
# Created by: PyQt5 UI code generator 5.12.2
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_FindResultWindow(object):
def setupUi(self, FindResultWindow):
FindResultWindow.setObjectName("FindResultWindow")
FindResultWindow.resize(801, 546)
self.centralwidget = QtWidgets.QWidget(FindResultWindow)
self.centralwidget.setObjectName("centralwidget")
self.btnEdit = QtWidgets.QPushButton(self.centralwidget)
self.btnEdit.setEnabled(False)
self.btnEdit.setGeometry(QtCore.QRect(330, 470, 151, 51))
self.btnEdit.setCheckable(False)
self.btnEdit.setAutoDefault(False)
self.btnEdit.setObjectName("btnEdit")
self.listWidgetFindResult = QtWidgets.QListWidget(self.centralwidget)
self.listWidgetFindResult.setGeometry(QtCore.QRect(10, 10, 781, 441))
self.listWidgetFindResult.setObjectName("listWidgetFindResult")
FindResultWindow.setCentralWidget(self.centralwidget)
self.retranslateUi(FindResultWindow)
QtCore.QMetaObject.connectSlotsByName(FindResultWindow)
def retranslateUi(self, FindResultWindow):
_translate = QtCore.QCoreApplication.translate
FindResultWindow.setWindowTitle(_translate("FindResultWindow", "Информация о приборах"))
self.btnEdit.setText(_translate("FindResultWindow", "Изменить данные"))
|
flexible
|
{
"blob_id": "2fdbf418b5cec50ee6568897e0e749681efeef6b",
"index": 6584,
"step-1": "<mask token>\n\n\nclass Ui_FindResultWindow(object):\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass Ui_FindResultWindow(object):\n <mask token>\n\n def retranslateUi(self, FindResultWindow):\n _translate = QtCore.QCoreApplication.translate\n FindResultWindow.setWindowTitle(_translate('FindResultWindow',\n 'Информация о приборах'))\n self.btnEdit.setText(_translate('FindResultWindow', 'Изменить данные'))\n",
"step-3": "<mask token>\n\n\nclass Ui_FindResultWindow(object):\n\n def setupUi(self, FindResultWindow):\n FindResultWindow.setObjectName('FindResultWindow')\n FindResultWindow.resize(801, 546)\n self.centralwidget = QtWidgets.QWidget(FindResultWindow)\n self.centralwidget.setObjectName('centralwidget')\n self.btnEdit = QtWidgets.QPushButton(self.centralwidget)\n self.btnEdit.setEnabled(False)\n self.btnEdit.setGeometry(QtCore.QRect(330, 470, 151, 51))\n self.btnEdit.setCheckable(False)\n self.btnEdit.setAutoDefault(False)\n self.btnEdit.setObjectName('btnEdit')\n self.listWidgetFindResult = QtWidgets.QListWidget(self.centralwidget)\n self.listWidgetFindResult.setGeometry(QtCore.QRect(10, 10, 781, 441))\n self.listWidgetFindResult.setObjectName('listWidgetFindResult')\n FindResultWindow.setCentralWidget(self.centralwidget)\n self.retranslateUi(FindResultWindow)\n QtCore.QMetaObject.connectSlotsByName(FindResultWindow)\n\n def retranslateUi(self, FindResultWindow):\n _translate = QtCore.QCoreApplication.translate\n FindResultWindow.setWindowTitle(_translate('FindResultWindow',\n 'Информация о приборах'))\n self.btnEdit.setText(_translate('FindResultWindow', 'Изменить данные'))\n",
"step-4": "from PyQt5 import QtCore, QtGui, QtWidgets\n\n\nclass Ui_FindResultWindow(object):\n\n def setupUi(self, FindResultWindow):\n FindResultWindow.setObjectName('FindResultWindow')\n FindResultWindow.resize(801, 546)\n self.centralwidget = QtWidgets.QWidget(FindResultWindow)\n self.centralwidget.setObjectName('centralwidget')\n self.btnEdit = QtWidgets.QPushButton(self.centralwidget)\n self.btnEdit.setEnabled(False)\n self.btnEdit.setGeometry(QtCore.QRect(330, 470, 151, 51))\n self.btnEdit.setCheckable(False)\n self.btnEdit.setAutoDefault(False)\n self.btnEdit.setObjectName('btnEdit')\n self.listWidgetFindResult = QtWidgets.QListWidget(self.centralwidget)\n self.listWidgetFindResult.setGeometry(QtCore.QRect(10, 10, 781, 441))\n self.listWidgetFindResult.setObjectName('listWidgetFindResult')\n FindResultWindow.setCentralWidget(self.centralwidget)\n self.retranslateUi(FindResultWindow)\n QtCore.QMetaObject.connectSlotsByName(FindResultWindow)\n\n def retranslateUi(self, FindResultWindow):\n _translate = QtCore.QCoreApplication.translate\n FindResultWindow.setWindowTitle(_translate('FindResultWindow',\n 'Информация о приборах'))\n self.btnEdit.setText(_translate('FindResultWindow', 'Изменить данные'))\n",
"step-5": "# -*- coding: utf-8 -*-\n\n# Form implementation generated from reading ui file 'find_result_window.ui'\n#\n# Created by: PyQt5 UI code generator 5.12.2\n#\n# WARNING! All changes made in this file will be lost!\n\nfrom PyQt5 import QtCore, QtGui, QtWidgets\n\n\nclass Ui_FindResultWindow(object):\n def setupUi(self, FindResultWindow):\n FindResultWindow.setObjectName(\"FindResultWindow\")\n FindResultWindow.resize(801, 546)\n self.centralwidget = QtWidgets.QWidget(FindResultWindow)\n self.centralwidget.setObjectName(\"centralwidget\")\n self.btnEdit = QtWidgets.QPushButton(self.centralwidget)\n self.btnEdit.setEnabled(False)\n self.btnEdit.setGeometry(QtCore.QRect(330, 470, 151, 51))\n self.btnEdit.setCheckable(False)\n self.btnEdit.setAutoDefault(False)\n self.btnEdit.setObjectName(\"btnEdit\")\n self.listWidgetFindResult = QtWidgets.QListWidget(self.centralwidget)\n self.listWidgetFindResult.setGeometry(QtCore.QRect(10, 10, 781, 441))\n self.listWidgetFindResult.setObjectName(\"listWidgetFindResult\")\n FindResultWindow.setCentralWidget(self.centralwidget)\n\n self.retranslateUi(FindResultWindow)\n QtCore.QMetaObject.connectSlotsByName(FindResultWindow)\n\n def retranslateUi(self, FindResultWindow):\n _translate = QtCore.QCoreApplication.translate\n FindResultWindow.setWindowTitle(_translate(\"FindResultWindow\", \"Информация о приборах\"))\n self.btnEdit.setText(_translate(\"FindResultWindow\", \"Изменить данные\"))\n\n\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
#!/usr/bin/env python
# encoding: utf-8
import os
import argparse
import coaddBatchCutout as cbc
def run(args):
min = -0.0
max = 0.5
Q = 10
if os.path.isfile(args.incat):
cbc.coaddBatchCutFull(args.root, args.incat,
filter=args.filter,
idField=args.idField,
prefix=args.prefix,
zCutoutSize=args.zCutout,
zField=args.zField,
onlyColor=args.onlyColor,
noColor=args.noColor,
saveSrc=args.saveSrc,
makeDir=args.makeDir,
raField=args.raField,
decField=args.decField)
else:
raise Exception("### Can not find the input catalog: %s" % args.incat)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("root", help="Root directory of data repository")
parser.add_argument("incat", help="The input catalog for cutout")
parser.add_argument("-s", '--size', dest='size', type=int,
help="Half size of the cutout box", default=200)
parser.add_argument('-f', '--filter', dest='filter', help="Filter",
default='HSC-I')
parser.add_argument('-cf', '--color-filters', dest='colorFilters',
help="Choice of filters for color images", default='riz')
parser.add_argument('-sf', '--size-field', dest='sizeField',
help="Column name for cutout size", default='cutout_size')
parser.add_argument('-info1', '--infoField1', dest='infoField1',
help="Column name for first extra information",
default=None)
parser.add_argument('-info2', '--infoField2', dest='infoField2',
help="Column name for second extra information",
default=None)
parser.add_argument('-oc', '--onlyColor', action="store_true", dest='onlyColor',
default=False)
parser.add_argument('-safe', '--safe', action="store_true", dest='safe',
default=False)
parser.add_argument('-clean', '--clean', action="store_true", dest='clean',
default=False)
parser.add_argument('-v', '--verbose', action="store_true", dest='verbose',
default=False)
parser.add_argument('-src', '--src', action="store_true", dest='saveSrc',
default=True)
parser.add_argument('-makeDir', '--makeDir', action="store_true", dest='makeDir',
default=True)
parser.add_argument('-zc', '--zCutoutSize', action="store_true", dest='zCutout',
default=True)
parser.add_argument('-nc', '--noColor', action="store_true", dest='noColor',
default=True)
parser.add_argument('-p', '--prefix', dest='prefix',
help='Prefix of the output file',
default='redBCG')
parser.add_argument('-id', '--id', dest='idField', help="Column name for ID",
default='ID_CLUSTER')
parser.add_argument('-ra', '--ra', dest='raField', help="Column name for RA",
default='RA_BCG')
parser.add_argument('-dec', '--dec', dest='decField', help="Column name for DEC",
default='DEC_BCG')
parser.add_argument('-z', '--redshift', dest='zField', help="Column name for z",
default='Z_LAMBDA')
args = parser.parse_args()
run(args)
|
normal
|
{
"blob_id": "c0503536672aa824eaf0d19b9d4b5431ef910432",
"index": 1028,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef run(args):\n min = -0.0\n max = 0.5\n Q = 10\n if os.path.isfile(args.incat):\n cbc.coaddBatchCutFull(args.root, args.incat, filter=args.filter,\n idField=args.idField, prefix=args.prefix, zCutoutSize=args.\n zCutout, zField=args.zField, onlyColor=args.onlyColor, noColor=\n args.noColor, saveSrc=args.saveSrc, makeDir=args.makeDir,\n raField=args.raField, decField=args.decField)\n else:\n raise Exception('### Can not find the input catalog: %s' % args.incat)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef run(args):\n min = -0.0\n max = 0.5\n Q = 10\n if os.path.isfile(args.incat):\n cbc.coaddBatchCutFull(args.root, args.incat, filter=args.filter,\n idField=args.idField, prefix=args.prefix, zCutoutSize=args.\n zCutout, zField=args.zField, onlyColor=args.onlyColor, noColor=\n args.noColor, saveSrc=args.saveSrc, makeDir=args.makeDir,\n raField=args.raField, decField=args.decField)\n else:\n raise Exception('### Can not find the input catalog: %s' % args.incat)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('root', help='Root directory of data repository')\n parser.add_argument('incat', help='The input catalog for cutout')\n parser.add_argument('-s', '--size', dest='size', type=int, help=\n 'Half size of the cutout box', default=200)\n parser.add_argument('-f', '--filter', dest='filter', help='Filter',\n default='HSC-I')\n parser.add_argument('-cf', '--color-filters', dest='colorFilters', help\n ='Choice of filters for color images', default='riz')\n parser.add_argument('-sf', '--size-field', dest='sizeField', help=\n 'Column name for cutout size', default='cutout_size')\n parser.add_argument('-info1', '--infoField1', dest='infoField1', help=\n 'Column name for first extra information', default=None)\n parser.add_argument('-info2', '--infoField2', dest='infoField2', help=\n 'Column name for second extra information', default=None)\n parser.add_argument('-oc', '--onlyColor', action='store_true', dest=\n 'onlyColor', default=False)\n parser.add_argument('-safe', '--safe', action='store_true', dest='safe',\n default=False)\n parser.add_argument('-clean', '--clean', action='store_true', dest=\n 'clean', default=False)\n parser.add_argument('-v', '--verbose', action='store_true', dest=\n 'verbose', default=False)\n parser.add_argument('-src', '--src', action='store_true', dest=\n 'saveSrc', default=True)\n parser.add_argument('-makeDir', '--makeDir', action='store_true', dest=\n 'makeDir', default=True)\n parser.add_argument('-zc', '--zCutoutSize', action='store_true', dest=\n 'zCutout', default=True)\n parser.add_argument('-nc', '--noColor', action='store_true', dest=\n 'noColor', default=True)\n parser.add_argument('-p', '--prefix', dest='prefix', help=\n 'Prefix of the output file', default='redBCG')\n parser.add_argument('-id', '--id', dest='idField', help=\n 'Column name for ID', default='ID_CLUSTER')\n parser.add_argument('-ra', '--ra', dest='raField', help=\n 'Column name for RA', default='RA_BCG')\n parser.add_argument('-dec', '--dec', dest='decField', help=\n 'Column name for DEC', default='DEC_BCG')\n parser.add_argument('-z', '--redshift', dest='zField', help=\n 'Column name for z', default='Z_LAMBDA')\n args = parser.parse_args()\n run(args)\n",
"step-4": "import os\nimport argparse\nimport coaddBatchCutout as cbc\n\n\ndef run(args):\n min = -0.0\n max = 0.5\n Q = 10\n if os.path.isfile(args.incat):\n cbc.coaddBatchCutFull(args.root, args.incat, filter=args.filter,\n idField=args.idField, prefix=args.prefix, zCutoutSize=args.\n zCutout, zField=args.zField, onlyColor=args.onlyColor, noColor=\n args.noColor, saveSrc=args.saveSrc, makeDir=args.makeDir,\n raField=args.raField, decField=args.decField)\n else:\n raise Exception('### Can not find the input catalog: %s' % args.incat)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('root', help='Root directory of data repository')\n parser.add_argument('incat', help='The input catalog for cutout')\n parser.add_argument('-s', '--size', dest='size', type=int, help=\n 'Half size of the cutout box', default=200)\n parser.add_argument('-f', '--filter', dest='filter', help='Filter',\n default='HSC-I')\n parser.add_argument('-cf', '--color-filters', dest='colorFilters', help\n ='Choice of filters for color images', default='riz')\n parser.add_argument('-sf', '--size-field', dest='sizeField', help=\n 'Column name for cutout size', default='cutout_size')\n parser.add_argument('-info1', '--infoField1', dest='infoField1', help=\n 'Column name for first extra information', default=None)\n parser.add_argument('-info2', '--infoField2', dest='infoField2', help=\n 'Column name for second extra information', default=None)\n parser.add_argument('-oc', '--onlyColor', action='store_true', dest=\n 'onlyColor', default=False)\n parser.add_argument('-safe', '--safe', action='store_true', dest='safe',\n default=False)\n parser.add_argument('-clean', '--clean', action='store_true', dest=\n 'clean', default=False)\n parser.add_argument('-v', '--verbose', action='store_true', dest=\n 'verbose', default=False)\n parser.add_argument('-src', '--src', action='store_true', dest=\n 'saveSrc', default=True)\n parser.add_argument('-makeDir', '--makeDir', action='store_true', dest=\n 'makeDir', default=True)\n parser.add_argument('-zc', '--zCutoutSize', action='store_true', dest=\n 'zCutout', default=True)\n parser.add_argument('-nc', '--noColor', action='store_true', dest=\n 'noColor', default=True)\n parser.add_argument('-p', '--prefix', dest='prefix', help=\n 'Prefix of the output file', default='redBCG')\n parser.add_argument('-id', '--id', dest='idField', help=\n 'Column name for ID', default='ID_CLUSTER')\n parser.add_argument('-ra', '--ra', dest='raField', help=\n 'Column name for RA', default='RA_BCG')\n parser.add_argument('-dec', '--dec', dest='decField', help=\n 'Column name for DEC', default='DEC_BCG')\n parser.add_argument('-z', '--redshift', dest='zField', help=\n 'Column name for z', default='Z_LAMBDA')\n args = parser.parse_args()\n run(args)\n",
"step-5": "#!/usr/bin/env python\n# encoding: utf-8\n\nimport os\nimport argparse\nimport coaddBatchCutout as cbc\n\n\ndef run(args):\n\n min = -0.0\n max = 0.5\n Q = 10\n\n if os.path.isfile(args.incat):\n\n cbc.coaddBatchCutFull(args.root, args.incat,\n filter=args.filter,\n idField=args.idField,\n prefix=args.prefix,\n zCutoutSize=args.zCutout,\n zField=args.zField,\n onlyColor=args.onlyColor,\n noColor=args.noColor,\n saveSrc=args.saveSrc,\n makeDir=args.makeDir,\n raField=args.raField,\n decField=args.decField)\n else:\n raise Exception(\"### Can not find the input catalog: %s\" % args.incat)\n\n\nif __name__ == '__main__':\n\n parser = argparse.ArgumentParser()\n parser.add_argument(\"root\", help=\"Root directory of data repository\")\n parser.add_argument(\"incat\", help=\"The input catalog for cutout\")\n parser.add_argument(\"-s\", '--size', dest='size', type=int,\n help=\"Half size of the cutout box\", default=200)\n parser.add_argument('-f', '--filter', dest='filter', help=\"Filter\",\n default='HSC-I')\n parser.add_argument('-cf', '--color-filters', dest='colorFilters',\n help=\"Choice of filters for color images\", default='riz')\n parser.add_argument('-sf', '--size-field', dest='sizeField',\n help=\"Column name for cutout size\", default='cutout_size')\n parser.add_argument('-info1', '--infoField1', dest='infoField1',\n help=\"Column name for first extra information\",\n default=None)\n parser.add_argument('-info2', '--infoField2', dest='infoField2',\n help=\"Column name for second extra information\",\n default=None)\n parser.add_argument('-oc', '--onlyColor', action=\"store_true\", dest='onlyColor',\n default=False)\n parser.add_argument('-safe', '--safe', action=\"store_true\", dest='safe',\n default=False)\n parser.add_argument('-clean', '--clean', action=\"store_true\", dest='clean',\n default=False)\n parser.add_argument('-v', '--verbose', action=\"store_true\", dest='verbose',\n default=False)\n parser.add_argument('-src', '--src', action=\"store_true\", dest='saveSrc',\n default=True)\n parser.add_argument('-makeDir', '--makeDir', action=\"store_true\", dest='makeDir',\n default=True)\n parser.add_argument('-zc', '--zCutoutSize', action=\"store_true\", dest='zCutout',\n default=True)\n parser.add_argument('-nc', '--noColor', action=\"store_true\", dest='noColor',\n default=True)\n parser.add_argument('-p', '--prefix', dest='prefix',\n help='Prefix of the output file',\n default='redBCG')\n parser.add_argument('-id', '--id', dest='idField', help=\"Column name for ID\",\n default='ID_CLUSTER')\n parser.add_argument('-ra', '--ra', dest='raField', help=\"Column name for RA\",\n default='RA_BCG')\n parser.add_argument('-dec', '--dec', dest='decField', help=\"Column name for DEC\",\n default='DEC_BCG')\n parser.add_argument('-z', '--redshift', dest='zField', help=\"Column name for z\",\n default='Z_LAMBDA')\n args = parser.parse_args()\n\n run(args)\n\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
__author__ = "Yong Peng"
__version__ = "1.0"
import time
import re
import getpass
from netmiko import (
ConnectHandler,
NetmikoTimeoutException,
NetmikoAuthenticationException,
)
with open('./device_list.txt','r') as f:
device_list = [i.strip() for i in f.readlines() if len(i.strip()) != 0] # read the device list.
print("Data will be collected on below switches:")
for device in device_list:
print(device)
go = input("\nPress y to continue: ")
if go != "y" and go != "Y":
exit(2)
u_id = input("Please input login ID:")
factor_1 = getpass.getpass("ID Password for login:")
# cmd_4_IOS = ['show version | in from','show stack','show flash',\
# 'show license', 'show boot-preference',\
# 'show ip bgp summ', 'show interface brief',\
# 'show ip inter', 'show vlan',\
# 'show vlan brief', 'show lag', 'show lag brief',\
# 'show lldp neighbor', 'show 802-1w', 'show ip route',\
# 'show run']
# cmd_4_IOS = ['show version | in from', 'show flash | in Pri Code|Sec Code']
# cmd_4_IOS = ['show vlan brief', 'show ip interface', 'show version | in from', 'show ip osp inter brief',
# 'show run']n
# cmd_4_IOS = ['show vlan id 464']
with open("temp.txt",'r') as f:
cmd_4_IOS = [i.strip() for i in f.readlines()]
def send_show_command(device, commands):
OutputPath = 'c:/script/output/' + str(device['host']) + '.txt'
result = open(OutputPath, 'w')
flag = True
try:
with ConnectHandler(**device) as ssh:
ssh.enable()
for command in commands:
output = ssh.send_command(command, strip_command=False, strip_prompt=False)
result.write(output + "\n" + 30 * '+' + "\n" + "\n")
except Exception as error:
print(error)
flag = False
result.close()
if flag:
print("Data collection on %s is done. \n \n" % (i))
else:
print("Data collection for %s is NOT done. \n \n" % (i))
switch = {}
for i in device_list:
switch["device_type"] = "ruckus_fastiron"
switch["host"] = i
switch["username"] = u_id
factor_2 = input("Trying to login to %s, enter DUO Code:"%(i))
switch["password"] = str(factor_1) + str(factor_2)
switch['secret'] = '',
switch['port'] = 22
send_show_command(switch, cmd_4_IOS)
print("All collection is done.")
|
normal
|
{
"blob_id": "31a0c9a143a06ac86c8e8616fb273a0af844a352",
"index": 6895,
"step-1": "<mask token>\n\n\ndef send_show_command(device, commands):\n OutputPath = 'c:/script/output/' + str(device['host']) + '.txt'\n result = open(OutputPath, 'w')\n flag = True\n try:\n with ConnectHandler(**device) as ssh:\n ssh.enable()\n for command in commands:\n output = ssh.send_command(command, strip_command=False,\n strip_prompt=False)\n result.write(output + '\\n' + 30 * '+' + '\\n' + '\\n')\n except Exception as error:\n print(error)\n flag = False\n result.close()\n if flag:\n print('Data collection on %s is done. \\n \\n' % i)\n else:\n print('Data collection for %s is NOT done. \\n \\n' % i)\n\n\n<mask token>\n",
"step-2": "<mask token>\nwith open('./device_list.txt', 'r') as f:\n device_list = [i.strip() for i in f.readlines() if len(i.strip()) != 0]\nprint('Data will be collected on below switches:')\nfor device in device_list:\n print(device)\n<mask token>\nif go != 'y' and go != 'Y':\n exit(2)\n<mask token>\nwith open('temp.txt', 'r') as f:\n cmd_4_IOS = [i.strip() for i in f.readlines()]\n\n\ndef send_show_command(device, commands):\n OutputPath = 'c:/script/output/' + str(device['host']) + '.txt'\n result = open(OutputPath, 'w')\n flag = True\n try:\n with ConnectHandler(**device) as ssh:\n ssh.enable()\n for command in commands:\n output = ssh.send_command(command, strip_command=False,\n strip_prompt=False)\n result.write(output + '\\n' + 30 * '+' + '\\n' + '\\n')\n except Exception as error:\n print(error)\n flag = False\n result.close()\n if flag:\n print('Data collection on %s is done. \\n \\n' % i)\n else:\n print('Data collection for %s is NOT done. \\n \\n' % i)\n\n\n<mask token>\nfor i in device_list:\n switch['device_type'] = 'ruckus_fastiron'\n switch['host'] = i\n switch['username'] = u_id\n factor_2 = input('Trying to login to %s, enter DUO Code:' % i)\n switch['password'] = str(factor_1) + str(factor_2)\n switch['secret'] = '',\n switch['port'] = 22\n send_show_command(switch, cmd_4_IOS)\nprint('All collection is done.')\n",
"step-3": "__author__ = 'Yong Peng'\n__version__ = '1.0'\n<mask token>\nwith open('./device_list.txt', 'r') as f:\n device_list = [i.strip() for i in f.readlines() if len(i.strip()) != 0]\nprint('Data will be collected on below switches:')\nfor device in device_list:\n print(device)\ngo = input(\"\"\"\nPress y to continue: \"\"\")\nif go != 'y' and go != 'Y':\n exit(2)\nu_id = input('Please input login ID:')\nfactor_1 = getpass.getpass('ID Password for login:')\nwith open('temp.txt', 'r') as f:\n cmd_4_IOS = [i.strip() for i in f.readlines()]\n\n\ndef send_show_command(device, commands):\n OutputPath = 'c:/script/output/' + str(device['host']) + '.txt'\n result = open(OutputPath, 'w')\n flag = True\n try:\n with ConnectHandler(**device) as ssh:\n ssh.enable()\n for command in commands:\n output = ssh.send_command(command, strip_command=False,\n strip_prompt=False)\n result.write(output + '\\n' + 30 * '+' + '\\n' + '\\n')\n except Exception as error:\n print(error)\n flag = False\n result.close()\n if flag:\n print('Data collection on %s is done. \\n \\n' % i)\n else:\n print('Data collection for %s is NOT done. \\n \\n' % i)\n\n\nswitch = {}\nfor i in device_list:\n switch['device_type'] = 'ruckus_fastiron'\n switch['host'] = i\n switch['username'] = u_id\n factor_2 = input('Trying to login to %s, enter DUO Code:' % i)\n switch['password'] = str(factor_1) + str(factor_2)\n switch['secret'] = '',\n switch['port'] = 22\n send_show_command(switch, cmd_4_IOS)\nprint('All collection is done.')\n",
"step-4": "__author__ = 'Yong Peng'\n__version__ = '1.0'\nimport time\nimport re\nimport getpass\nfrom netmiko import ConnectHandler, NetmikoTimeoutException, NetmikoAuthenticationException\nwith open('./device_list.txt', 'r') as f:\n device_list = [i.strip() for i in f.readlines() if len(i.strip()) != 0]\nprint('Data will be collected on below switches:')\nfor device in device_list:\n print(device)\ngo = input(\"\"\"\nPress y to continue: \"\"\")\nif go != 'y' and go != 'Y':\n exit(2)\nu_id = input('Please input login ID:')\nfactor_1 = getpass.getpass('ID Password for login:')\nwith open('temp.txt', 'r') as f:\n cmd_4_IOS = [i.strip() for i in f.readlines()]\n\n\ndef send_show_command(device, commands):\n OutputPath = 'c:/script/output/' + str(device['host']) + '.txt'\n result = open(OutputPath, 'w')\n flag = True\n try:\n with ConnectHandler(**device) as ssh:\n ssh.enable()\n for command in commands:\n output = ssh.send_command(command, strip_command=False,\n strip_prompt=False)\n result.write(output + '\\n' + 30 * '+' + '\\n' + '\\n')\n except Exception as error:\n print(error)\n flag = False\n result.close()\n if flag:\n print('Data collection on %s is done. \\n \\n' % i)\n else:\n print('Data collection for %s is NOT done. \\n \\n' % i)\n\n\nswitch = {}\nfor i in device_list:\n switch['device_type'] = 'ruckus_fastiron'\n switch['host'] = i\n switch['username'] = u_id\n factor_2 = input('Trying to login to %s, enter DUO Code:' % i)\n switch['password'] = str(factor_1) + str(factor_2)\n switch['secret'] = '',\n switch['port'] = 22\n send_show_command(switch, cmd_4_IOS)\nprint('All collection is done.')\n",
"step-5": "\n__author__ = \"Yong Peng\"\n__version__ = \"1.0\"\n\n\nimport time\nimport re\nimport getpass\nfrom netmiko import (\n ConnectHandler,\n NetmikoTimeoutException,\n NetmikoAuthenticationException,\n)\n\nwith open('./device_list.txt','r') as f:\n device_list = [i.strip() for i in f.readlines() if len(i.strip()) != 0] # read the device list.\n\n\nprint(\"Data will be collected on below switches:\")\nfor device in device_list:\n print(device)\n\ngo = input(\"\\nPress y to continue: \")\n\nif go != \"y\" and go != \"Y\":\n exit(2)\n\nu_id = input(\"Please input login ID:\")\nfactor_1 = getpass.getpass(\"ID Password for login:\")\n\n\n# cmd_4_IOS = ['show version | in from','show stack','show flash',\\\n# 'show license', 'show boot-preference',\\\n# 'show ip bgp summ', 'show interface brief',\\\n# 'show ip inter', 'show vlan',\\\n# 'show vlan brief', 'show lag', 'show lag brief',\\\n# 'show lldp neighbor', 'show 802-1w', 'show ip route',\\\n# 'show run']\n# cmd_4_IOS = ['show version | in from', 'show flash | in Pri Code|Sec Code']\n# cmd_4_IOS = ['show vlan brief', 'show ip interface', 'show version | in from', 'show ip osp inter brief',\n# 'show run']n\n# cmd_4_IOS = ['show vlan id 464']\nwith open(\"temp.txt\",'r') as f:\n cmd_4_IOS = [i.strip() for i in f.readlines()]\n\ndef send_show_command(device, commands):\n OutputPath = 'c:/script/output/' + str(device['host']) + '.txt'\n result = open(OutputPath, 'w')\n flag = True\n try:\n with ConnectHandler(**device) as ssh:\n ssh.enable()\n for command in commands:\n output = ssh.send_command(command, strip_command=False, strip_prompt=False)\n result.write(output + \"\\n\" + 30 * '+' + \"\\n\" + \"\\n\")\n\n except Exception as error:\n print(error)\n flag = False\n result.close()\n if flag:\n print(\"Data collection on %s is done. \\n \\n\" % (i))\n else:\n print(\"Data collection for %s is NOT done. \\n \\n\" % (i))\n\nswitch = {}\nfor i in device_list:\n switch[\"device_type\"] = \"ruckus_fastiron\"\n switch[\"host\"] = i\n switch[\"username\"] = u_id\n factor_2 = input(\"Trying to login to %s, enter DUO Code:\"%(i))\n switch[\"password\"] = str(factor_1) + str(factor_2)\n switch['secret'] = '',\n switch['port'] = 22\n send_show_command(switch, cmd_4_IOS)\n\nprint(\"All collection is done.\")",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
db.news.drop()
db.news.insert_many(scrape(info, url))
<|reserved_special_token_0|>
db.images.drop()
db.images.insert_many(scrape(info, url))
<|reserved_special_token_0|>
db.weather.drop()
db.weather.insert_many(scrape(info, url))
<|reserved_special_token_0|>
db.facts.drop()
<|reserved_special_token_0|>
db.facts.insert(df_json_list)
<|reserved_special_token_0|>
db.hemis.drop()
db.hemis.insert_many(scrape(info, url))
<|reserved_special_token_1|>
<|reserved_special_token_0|>
conn = 'mongodb://localhost:27017'
client = pymongo.MongoClient(conn)
db = client.mars_db
url = (
'https://mars.nasa.gov/news/?page=0&per_page=40&order=publish_date+desc%2Ccreated_at+desc&search=&category=19%2C165%2C184%2C204&blank_scope=Latest'
)
info = 'News'
db.news.drop()
db.news.insert_many(scrape(info, url))
url = 'https://www.jpl.nasa.gov/spaceimages/?search=&category=Mars'
info = 'Images'
db.images.drop()
db.images.insert_many(scrape(info, url))
url = 'https://twitter.com/marswxreport?lang=en'
info = 'Weather'
db.weather.drop()
db.weather.insert_many(scrape(info, url))
url = 'https://space-facts.com/mars/'
info = 'Facts'
db.facts.drop()
df = pd.DataFrame(scrape(info, url))
df_json = df.to_json()
df_json_list = json.loads(df_json).values()
db.facts.insert(df_json_list)
url = (
'https://astrogeology.usgs.gov/search/results?q=hemisphere+enhanced&k1=target&v1=Mars'
)
info = 'Hemis'
db.hemis.drop()
db.hemis.insert_many(scrape(info, url))
<|reserved_special_token_1|>
import pymongo
import pandas as pd
from scrape_mars import scrape
import json
conn = 'mongodb://localhost:27017'
client = pymongo.MongoClient(conn)
db = client.mars_db
url = (
'https://mars.nasa.gov/news/?page=0&per_page=40&order=publish_date+desc%2Ccreated_at+desc&search=&category=19%2C165%2C184%2C204&blank_scope=Latest'
)
info = 'News'
db.news.drop()
db.news.insert_many(scrape(info, url))
url = 'https://www.jpl.nasa.gov/spaceimages/?search=&category=Mars'
info = 'Images'
db.images.drop()
db.images.insert_many(scrape(info, url))
url = 'https://twitter.com/marswxreport?lang=en'
info = 'Weather'
db.weather.drop()
db.weather.insert_many(scrape(info, url))
url = 'https://space-facts.com/mars/'
info = 'Facts'
db.facts.drop()
df = pd.DataFrame(scrape(info, url))
df_json = df.to_json()
df_json_list = json.loads(df_json).values()
db.facts.insert(df_json_list)
url = (
'https://astrogeology.usgs.gov/search/results?q=hemisphere+enhanced&k1=target&v1=Mars'
)
info = 'Hemis'
db.hemis.drop()
db.hemis.insert_many(scrape(info, url))
<|reserved_special_token_1|>
import pymongo
import pandas as pd
from scrape_mars import scrape
import json
# Create connection variable
conn = 'mongodb://localhost:27017'
# Pass connection to the pymongo instance.
client = pymongo.MongoClient(conn)
# Connect to a database. Will create one if not already available.
db = client.mars_db
#News
url = "https://mars.nasa.gov/news/?page=0&per_page=40&order=publish_date+desc%2Ccreated_at+desc&search=&category=19%2C165%2C184%2C204&blank_scope=Latest"
info = "News"
# Drops collection if available to remove duplicates
db.news.drop()
#Insert info
#print(scrape(info,url))
db.news.insert_many(scrape(info,url))
#Images
url = "https://www.jpl.nasa.gov/spaceimages/?search=&category=Mars"
info = "Images"
# Drops collection if available to remove duplicates
db.images.drop()
#Insert info
#print(scrape(info,url))
db.images.insert_many(scrape(info,url))
#Weather
url = "https://twitter.com/marswxreport?lang=en"
info = "Weather"
# Drops collection if available to remove duplicates
db.weather.drop()
#Insert info
#print(scrape(info,url))
db.weather.insert_many(scrape(info,url))
#Facts
url = "https://space-facts.com/mars/"
info = "Facts"
# Drops collection if available to remove duplicates
db.facts.drop()
#Insert info
#print(scrape(info,url))
df = pd.DataFrame(scrape(info,url))
df_json = df.to_json()
df_json_list = json.loads(df_json).values()
db.facts.insert(df_json_list)
#Hemispheres
url = "https://astrogeology.usgs.gov/search/results?q=hemisphere+enhanced&k1=target&v1=Mars"
info = "Hemis"
# Drops collection if available to remove duplicates
db.hemis.drop()
#Insert info
#print(scrape(info,url))
db.hemis.insert_many(scrape(info,url))
|
flexible
|
{
"blob_id": "e3ac8039ffb6787b0e3e80b234c2689c66a184bf",
"index": 1704,
"step-1": "<mask token>\n",
"step-2": "<mask token>\ndb.news.drop()\ndb.news.insert_many(scrape(info, url))\n<mask token>\ndb.images.drop()\ndb.images.insert_many(scrape(info, url))\n<mask token>\ndb.weather.drop()\ndb.weather.insert_many(scrape(info, url))\n<mask token>\ndb.facts.drop()\n<mask token>\ndb.facts.insert(df_json_list)\n<mask token>\ndb.hemis.drop()\ndb.hemis.insert_many(scrape(info, url))\n",
"step-3": "<mask token>\nconn = 'mongodb://localhost:27017'\nclient = pymongo.MongoClient(conn)\ndb = client.mars_db\nurl = (\n 'https://mars.nasa.gov/news/?page=0&per_page=40&order=publish_date+desc%2Ccreated_at+desc&search=&category=19%2C165%2C184%2C204&blank_scope=Latest'\n )\ninfo = 'News'\ndb.news.drop()\ndb.news.insert_many(scrape(info, url))\nurl = 'https://www.jpl.nasa.gov/spaceimages/?search=&category=Mars'\ninfo = 'Images'\ndb.images.drop()\ndb.images.insert_many(scrape(info, url))\nurl = 'https://twitter.com/marswxreport?lang=en'\ninfo = 'Weather'\ndb.weather.drop()\ndb.weather.insert_many(scrape(info, url))\nurl = 'https://space-facts.com/mars/'\ninfo = 'Facts'\ndb.facts.drop()\ndf = pd.DataFrame(scrape(info, url))\ndf_json = df.to_json()\ndf_json_list = json.loads(df_json).values()\ndb.facts.insert(df_json_list)\nurl = (\n 'https://astrogeology.usgs.gov/search/results?q=hemisphere+enhanced&k1=target&v1=Mars'\n )\ninfo = 'Hemis'\ndb.hemis.drop()\ndb.hemis.insert_many(scrape(info, url))\n",
"step-4": "import pymongo\nimport pandas as pd\nfrom scrape_mars import scrape\nimport json\nconn = 'mongodb://localhost:27017'\nclient = pymongo.MongoClient(conn)\ndb = client.mars_db\nurl = (\n 'https://mars.nasa.gov/news/?page=0&per_page=40&order=publish_date+desc%2Ccreated_at+desc&search=&category=19%2C165%2C184%2C204&blank_scope=Latest'\n )\ninfo = 'News'\ndb.news.drop()\ndb.news.insert_many(scrape(info, url))\nurl = 'https://www.jpl.nasa.gov/spaceimages/?search=&category=Mars'\ninfo = 'Images'\ndb.images.drop()\ndb.images.insert_many(scrape(info, url))\nurl = 'https://twitter.com/marswxreport?lang=en'\ninfo = 'Weather'\ndb.weather.drop()\ndb.weather.insert_many(scrape(info, url))\nurl = 'https://space-facts.com/mars/'\ninfo = 'Facts'\ndb.facts.drop()\ndf = pd.DataFrame(scrape(info, url))\ndf_json = df.to_json()\ndf_json_list = json.loads(df_json).values()\ndb.facts.insert(df_json_list)\nurl = (\n 'https://astrogeology.usgs.gov/search/results?q=hemisphere+enhanced&k1=target&v1=Mars'\n )\ninfo = 'Hemis'\ndb.hemis.drop()\ndb.hemis.insert_many(scrape(info, url))\n",
"step-5": "import pymongo\nimport pandas as pd\nfrom scrape_mars import scrape\nimport json\n\n\n# Create connection variable\nconn = 'mongodb://localhost:27017'\n\n\n# Pass connection to the pymongo instance.\nclient = pymongo.MongoClient(conn)\n\n# Connect to a database. Will create one if not already available.\ndb = client.mars_db\n\n\n#News\nurl = \"https://mars.nasa.gov/news/?page=0&per_page=40&order=publish_date+desc%2Ccreated_at+desc&search=&category=19%2C165%2C184%2C204&blank_scope=Latest\"\ninfo = \"News\"\n# Drops collection if available to remove duplicates\ndb.news.drop()\n#Insert info\n#print(scrape(info,url))\ndb.news.insert_many(scrape(info,url))\n\n#Images\nurl = \"https://www.jpl.nasa.gov/spaceimages/?search=&category=Mars\"\ninfo = \"Images\"\n# Drops collection if available to remove duplicates\ndb.images.drop()\n#Insert info\n#print(scrape(info,url))\ndb.images.insert_many(scrape(info,url))\n\n#Weather\nurl = \"https://twitter.com/marswxreport?lang=en\"\ninfo = \"Weather\"\n# Drops collection if available to remove duplicates\ndb.weather.drop()\n#Insert info\n#print(scrape(info,url))\ndb.weather.insert_many(scrape(info,url))\n\n#Facts\nurl = \"https://space-facts.com/mars/\"\ninfo = \"Facts\"\n# Drops collection if available to remove duplicates\ndb.facts.drop()\n#Insert info\n#print(scrape(info,url))\ndf = pd.DataFrame(scrape(info,url))\ndf_json = df.to_json()\ndf_json_list = json.loads(df_json).values()\ndb.facts.insert(df_json_list)\n\n#Hemispheres\nurl = \"https://astrogeology.usgs.gov/search/results?q=hemisphere+enhanced&k1=target&v1=Mars\"\ninfo = \"Hemis\"\n# Drops collection if available to remove duplicates\ndb.hemis.drop()\n#Insert info\n#print(scrape(info,url))\ndb.hemis.insert_many(scrape(info,url))",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from SPARQLWrapper import SPARQLWrapper, JSON
sparql = SPARQLWrapper(
'http://localhost:3030/ds/query'
)
#Pizzas
def get_response_pizzas():
sparql.setQuery('''
PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>
PREFIX saidi: <http://www.semanticweb.org/japor/ontologies/2021/5/PizzasLojanitas#>
SELECT DISTINCT ?name
WHERE {
?s rdfs:subClassOf saidi:NamePizza .
?s rdfs:label ?name
FILTER (lang(?name) = 'es')
}
''')
sparql.setReturnFormat(JSON)
qres = sparql.query().convert()
return qres
#CarnesTopping
def get_response_carnes():
sparql.setQuery('''
PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>
PREFIX saidi: <http://www.semanticweb.org/japor/ontologies/2021/5/PizzasLojanitas#>
SELECT DISTINCT ?name
WHERE {
?s rdfs:subClassOf saidi:CarnesTopping .
?s rdfs:label ?name
FILTER (lang(?name) = 'es')
}
''')
sparql.setReturnFormat(JSON)
qres = sparql.query().convert()
return qres
#EmbutidosTopping
def get_response_embutidos():
sparql.setQuery('''
PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>
PREFIX saidi: <http://www.semanticweb.org/japor/ontologies/2021/5/PizzasLojanitas#>
SELECT DISTINCT ?name
WHERE {
?s rdfs:subClassOf saidi:EmbutidosTopping .
?s rdfs:label ?name
FILTER (lang(?name) = 'es')
}
''')
sparql.setReturnFormat(JSON)
qres = sparql.query().convert()
return qres
#EspeciasTopping
def get_response_especias():
sparql.setQuery('''
PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>
PREFIX saidi: <http://www.semanticweb.org/japor/ontologies/2021/5/PizzasLojanitas#>
SELECT DISTINCT ?name
WHERE {
?s rdfs:subClassOf saidi:EspeciasTopping .
?s rdfs:label ?name
FILTER (lang(?name) = 'es')
}
''')
sparql.setReturnFormat(JSON)
qres = sparql.query().convert()
return qres
#FrutasTopping
def get_response_frutas():
sparql.setQuery('''
PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>
PREFIX saidi: <http://www.semanticweb.org/japor/ontologies/2021/5/PizzasLojanitas#>
SELECT DISTINCT ?name
WHERE {
?s rdfs:subClassOf saidi:FrutasTopping .
?s rdfs:label ?name
FILTER (lang(?name) = 'es')
}
''')
sparql.setReturnFormat(JSON)
qres = sparql.query().convert()
return qres
#QuesosTopping
def get_response_quesos():
sparql.setQuery('''
PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>
PREFIX saidi: <http://www.semanticweb.org/japor/ontologies/2021/5/PizzasLojanitas#>
SELECT DISTINCT ?name
WHERE {
?s rdfs:subClassOf saidi:QuesosTopping .
?s rdfs:label ?name
FILTER (lang(?name) = 'es')
}
''')
sparql.setReturnFormat(JSON)
qres = sparql.query().convert()
return qres
#SalsasTopping
def get_response_salsas():
sparql.setQuery('''
PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>
PREFIX saidi: <http://www.semanticweb.org/japor/ontologies/2021/5/PizzasLojanitas#>
SELECT DISTINCT ?name
WHERE {
?s rdfs:subClassOf saidi:SalsasTopping .
?s rdfs:label ?name
FILTER (lang(?name) = 'es')
}
''')
sparql.setReturnFormat(JSON)
qres = sparql.query().convert()
return qres
#VegetalesTopping
def get_response_vegetales():
sparql.setQuery('''
PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>
PREFIX saidi: <http://www.semanticweb.org/japor/ontologies/2021/5/PizzasLojanitas#>
SELECT DISTINCT ?name
WHERE {
?s rdfs:subClassOf saidi:VegetalesTopping .
?s rdfs:label ?name
FILTER (lang(?name) = 'es')
}
''')
sparql.setReturnFormat(JSON)
qres = sparql.query().convert()
return qres
if __name__ == '__main__':
get_response_pizzas()
get_response_carnes()
get_response_embutidos()
get_response_especias()
get_response_frutas()
get_response_quesos()
get_response_salsas()
get_response_vegetales()
|
normal
|
{
"blob_id": "9690366a88a87951f5c51902118888cce8159ffc",
"index": 7219,
"step-1": "<mask token>\n\n\ndef get_response_carnes():\n sparql.setQuery(\n \"\"\"\n PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>\n PREFIX saidi: <http://www.semanticweb.org/japor/ontologies/2021/5/PizzasLojanitas#>\n SELECT DISTINCT ?name \n WHERE { \n ?s rdfs:subClassOf saidi:CarnesTopping .\n ?s rdfs:label ?name\n FILTER (lang(?name) = 'es')\n }\n\n \"\"\"\n )\n sparql.setReturnFormat(JSON)\n qres = sparql.query().convert()\n return qres\n\n\ndef get_response_embutidos():\n sparql.setQuery(\n \"\"\"\n PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>\n PREFIX saidi: <http://www.semanticweb.org/japor/ontologies/2021/5/PizzasLojanitas#>\n SELECT DISTINCT ?name \n WHERE { \n ?s rdfs:subClassOf saidi:EmbutidosTopping .\n ?s rdfs:label ?name\n FILTER (lang(?name) = 'es')\n }\n\n \"\"\"\n )\n sparql.setReturnFormat(JSON)\n qres = sparql.query().convert()\n return qres\n\n\ndef get_response_especias():\n sparql.setQuery(\n \"\"\"\n PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>\n PREFIX saidi: <http://www.semanticweb.org/japor/ontologies/2021/5/PizzasLojanitas#>\n SELECT DISTINCT ?name \n WHERE { \n ?s rdfs:subClassOf saidi:EspeciasTopping .\n ?s rdfs:label ?name\n FILTER (lang(?name) = 'es')\n }\n\n \"\"\"\n )\n sparql.setReturnFormat(JSON)\n qres = sparql.query().convert()\n return qres\n\n\ndef get_response_frutas():\n sparql.setQuery(\n \"\"\"\n PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>\n PREFIX saidi: <http://www.semanticweb.org/japor/ontologies/2021/5/PizzasLojanitas#>\n SELECT DISTINCT ?name \n WHERE { \n ?s rdfs:subClassOf saidi:FrutasTopping .\n ?s rdfs:label ?name\n FILTER (lang(?name) = 'es')\n }\n\n \"\"\"\n )\n sparql.setReturnFormat(JSON)\n qres = sparql.query().convert()\n return qres\n\n\n<mask token>\n\n\ndef get_response_salsas():\n sparql.setQuery(\n \"\"\"\n PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>\n PREFIX saidi: <http://www.semanticweb.org/japor/ontologies/2021/5/PizzasLojanitas#>\n SELECT DISTINCT ?name \n WHERE { \n ?s rdfs:subClassOf saidi:SalsasTopping .\n ?s rdfs:label ?name\n FILTER (lang(?name) = 'es')\n }\n\n \"\"\"\n )\n sparql.setReturnFormat(JSON)\n qres = sparql.query().convert()\n return qres\n\n\ndef get_response_vegetales():\n sparql.setQuery(\n \"\"\"\n PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>\n PREFIX saidi: <http://www.semanticweb.org/japor/ontologies/2021/5/PizzasLojanitas#>\n SELECT DISTINCT ?name \n WHERE { \n ?s rdfs:subClassOf saidi:VegetalesTopping .\n ?s rdfs:label ?name\n FILTER (lang(?name) = 'es')\n }\n\n \"\"\"\n )\n sparql.setReturnFormat(JSON)\n qres = sparql.query().convert()\n return qres\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef get_response_carnes():\n sparql.setQuery(\n \"\"\"\n PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>\n PREFIX saidi: <http://www.semanticweb.org/japor/ontologies/2021/5/PizzasLojanitas#>\n SELECT DISTINCT ?name \n WHERE { \n ?s rdfs:subClassOf saidi:CarnesTopping .\n ?s rdfs:label ?name\n FILTER (lang(?name) = 'es')\n }\n\n \"\"\"\n )\n sparql.setReturnFormat(JSON)\n qres = sparql.query().convert()\n return qres\n\n\ndef get_response_embutidos():\n sparql.setQuery(\n \"\"\"\n PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>\n PREFIX saidi: <http://www.semanticweb.org/japor/ontologies/2021/5/PizzasLojanitas#>\n SELECT DISTINCT ?name \n WHERE { \n ?s rdfs:subClassOf saidi:EmbutidosTopping .\n ?s rdfs:label ?name\n FILTER (lang(?name) = 'es')\n }\n\n \"\"\"\n )\n sparql.setReturnFormat(JSON)\n qres = sparql.query().convert()\n return qres\n\n\ndef get_response_especias():\n sparql.setQuery(\n \"\"\"\n PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>\n PREFIX saidi: <http://www.semanticweb.org/japor/ontologies/2021/5/PizzasLojanitas#>\n SELECT DISTINCT ?name \n WHERE { \n ?s rdfs:subClassOf saidi:EspeciasTopping .\n ?s rdfs:label ?name\n FILTER (lang(?name) = 'es')\n }\n\n \"\"\"\n )\n sparql.setReturnFormat(JSON)\n qres = sparql.query().convert()\n return qres\n\n\ndef get_response_frutas():\n sparql.setQuery(\n \"\"\"\n PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>\n PREFIX saidi: <http://www.semanticweb.org/japor/ontologies/2021/5/PizzasLojanitas#>\n SELECT DISTINCT ?name \n WHERE { \n ?s rdfs:subClassOf saidi:FrutasTopping .\n ?s rdfs:label ?name\n FILTER (lang(?name) = 'es')\n }\n\n \"\"\"\n )\n sparql.setReturnFormat(JSON)\n qres = sparql.query().convert()\n return qres\n\n\ndef get_response_quesos():\n sparql.setQuery(\n \"\"\"\n PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>\n PREFIX saidi: <http://www.semanticweb.org/japor/ontologies/2021/5/PizzasLojanitas#>\n SELECT DISTINCT ?name \n WHERE { \n ?s rdfs:subClassOf saidi:QuesosTopping .\n ?s rdfs:label ?name\n FILTER (lang(?name) = 'es')\n }\n\n \"\"\"\n )\n sparql.setReturnFormat(JSON)\n qres = sparql.query().convert()\n return qres\n\n\ndef get_response_salsas():\n sparql.setQuery(\n \"\"\"\n PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>\n PREFIX saidi: <http://www.semanticweb.org/japor/ontologies/2021/5/PizzasLojanitas#>\n SELECT DISTINCT ?name \n WHERE { \n ?s rdfs:subClassOf saidi:SalsasTopping .\n ?s rdfs:label ?name\n FILTER (lang(?name) = 'es')\n }\n\n \"\"\"\n )\n sparql.setReturnFormat(JSON)\n qres = sparql.query().convert()\n return qres\n\n\ndef get_response_vegetales():\n sparql.setQuery(\n \"\"\"\n PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>\n PREFIX saidi: <http://www.semanticweb.org/japor/ontologies/2021/5/PizzasLojanitas#>\n SELECT DISTINCT ?name \n WHERE { \n ?s rdfs:subClassOf saidi:VegetalesTopping .\n ?s rdfs:label ?name\n FILTER (lang(?name) = 'es')\n }\n\n \"\"\"\n )\n sparql.setReturnFormat(JSON)\n qres = sparql.query().convert()\n return qres\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef get_response_pizzas():\n sparql.setQuery(\n \"\"\"\n PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>\n PREFIX saidi: <http://www.semanticweb.org/japor/ontologies/2021/5/PizzasLojanitas#>\n SELECT DISTINCT ?name \n WHERE { \n ?s rdfs:subClassOf saidi:NamePizza .\n ?s rdfs:label ?name\n FILTER (lang(?name) = 'es')\n }\n\n \"\"\"\n )\n sparql.setReturnFormat(JSON)\n qres = sparql.query().convert()\n return qres\n\n\ndef get_response_carnes():\n sparql.setQuery(\n \"\"\"\n PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>\n PREFIX saidi: <http://www.semanticweb.org/japor/ontologies/2021/5/PizzasLojanitas#>\n SELECT DISTINCT ?name \n WHERE { \n ?s rdfs:subClassOf saidi:CarnesTopping .\n ?s rdfs:label ?name\n FILTER (lang(?name) = 'es')\n }\n\n \"\"\"\n )\n sparql.setReturnFormat(JSON)\n qres = sparql.query().convert()\n return qres\n\n\ndef get_response_embutidos():\n sparql.setQuery(\n \"\"\"\n PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>\n PREFIX saidi: <http://www.semanticweb.org/japor/ontologies/2021/5/PizzasLojanitas#>\n SELECT DISTINCT ?name \n WHERE { \n ?s rdfs:subClassOf saidi:EmbutidosTopping .\n ?s rdfs:label ?name\n FILTER (lang(?name) = 'es')\n }\n\n \"\"\"\n )\n sparql.setReturnFormat(JSON)\n qres = sparql.query().convert()\n return qres\n\n\ndef get_response_especias():\n sparql.setQuery(\n \"\"\"\n PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>\n PREFIX saidi: <http://www.semanticweb.org/japor/ontologies/2021/5/PizzasLojanitas#>\n SELECT DISTINCT ?name \n WHERE { \n ?s rdfs:subClassOf saidi:EspeciasTopping .\n ?s rdfs:label ?name\n FILTER (lang(?name) = 'es')\n }\n\n \"\"\"\n )\n sparql.setReturnFormat(JSON)\n qres = sparql.query().convert()\n return qres\n\n\ndef get_response_frutas():\n sparql.setQuery(\n \"\"\"\n PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>\n PREFIX saidi: <http://www.semanticweb.org/japor/ontologies/2021/5/PizzasLojanitas#>\n SELECT DISTINCT ?name \n WHERE { \n ?s rdfs:subClassOf saidi:FrutasTopping .\n ?s rdfs:label ?name\n FILTER (lang(?name) = 'es')\n }\n\n \"\"\"\n )\n sparql.setReturnFormat(JSON)\n qres = sparql.query().convert()\n return qres\n\n\ndef get_response_quesos():\n sparql.setQuery(\n \"\"\"\n PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>\n PREFIX saidi: <http://www.semanticweb.org/japor/ontologies/2021/5/PizzasLojanitas#>\n SELECT DISTINCT ?name \n WHERE { \n ?s rdfs:subClassOf saidi:QuesosTopping .\n ?s rdfs:label ?name\n FILTER (lang(?name) = 'es')\n }\n\n \"\"\"\n )\n sparql.setReturnFormat(JSON)\n qres = sparql.query().convert()\n return qres\n\n\ndef get_response_salsas():\n sparql.setQuery(\n \"\"\"\n PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>\n PREFIX saidi: <http://www.semanticweb.org/japor/ontologies/2021/5/PizzasLojanitas#>\n SELECT DISTINCT ?name \n WHERE { \n ?s rdfs:subClassOf saidi:SalsasTopping .\n ?s rdfs:label ?name\n FILTER (lang(?name) = 'es')\n }\n\n \"\"\"\n )\n sparql.setReturnFormat(JSON)\n qres = sparql.query().convert()\n return qres\n\n\ndef get_response_vegetales():\n sparql.setQuery(\n \"\"\"\n PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>\n PREFIX saidi: <http://www.semanticweb.org/japor/ontologies/2021/5/PizzasLojanitas#>\n SELECT DISTINCT ?name \n WHERE { \n ?s rdfs:subClassOf saidi:VegetalesTopping .\n ?s rdfs:label ?name\n FILTER (lang(?name) = 'es')\n }\n\n \"\"\"\n )\n sparql.setReturnFormat(JSON)\n qres = sparql.query().convert()\n return qres\n\n\n<mask token>\n",
"step-4": "<mask token>\nsparql = SPARQLWrapper('http://localhost:3030/ds/query')\n\n\ndef get_response_pizzas():\n sparql.setQuery(\n \"\"\"\n PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>\n PREFIX saidi: <http://www.semanticweb.org/japor/ontologies/2021/5/PizzasLojanitas#>\n SELECT DISTINCT ?name \n WHERE { \n ?s rdfs:subClassOf saidi:NamePizza .\n ?s rdfs:label ?name\n FILTER (lang(?name) = 'es')\n }\n\n \"\"\"\n )\n sparql.setReturnFormat(JSON)\n qres = sparql.query().convert()\n return qres\n\n\ndef get_response_carnes():\n sparql.setQuery(\n \"\"\"\n PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>\n PREFIX saidi: <http://www.semanticweb.org/japor/ontologies/2021/5/PizzasLojanitas#>\n SELECT DISTINCT ?name \n WHERE { \n ?s rdfs:subClassOf saidi:CarnesTopping .\n ?s rdfs:label ?name\n FILTER (lang(?name) = 'es')\n }\n\n \"\"\"\n )\n sparql.setReturnFormat(JSON)\n qres = sparql.query().convert()\n return qres\n\n\ndef get_response_embutidos():\n sparql.setQuery(\n \"\"\"\n PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>\n PREFIX saidi: <http://www.semanticweb.org/japor/ontologies/2021/5/PizzasLojanitas#>\n SELECT DISTINCT ?name \n WHERE { \n ?s rdfs:subClassOf saidi:EmbutidosTopping .\n ?s rdfs:label ?name\n FILTER (lang(?name) = 'es')\n }\n\n \"\"\"\n )\n sparql.setReturnFormat(JSON)\n qres = sparql.query().convert()\n return qres\n\n\ndef get_response_especias():\n sparql.setQuery(\n \"\"\"\n PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>\n PREFIX saidi: <http://www.semanticweb.org/japor/ontologies/2021/5/PizzasLojanitas#>\n SELECT DISTINCT ?name \n WHERE { \n ?s rdfs:subClassOf saidi:EspeciasTopping .\n ?s rdfs:label ?name\n FILTER (lang(?name) = 'es')\n }\n\n \"\"\"\n )\n sparql.setReturnFormat(JSON)\n qres = sparql.query().convert()\n return qres\n\n\ndef get_response_frutas():\n sparql.setQuery(\n \"\"\"\n PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>\n PREFIX saidi: <http://www.semanticweb.org/japor/ontologies/2021/5/PizzasLojanitas#>\n SELECT DISTINCT ?name \n WHERE { \n ?s rdfs:subClassOf saidi:FrutasTopping .\n ?s rdfs:label ?name\n FILTER (lang(?name) = 'es')\n }\n\n \"\"\"\n )\n sparql.setReturnFormat(JSON)\n qres = sparql.query().convert()\n return qres\n\n\ndef get_response_quesos():\n sparql.setQuery(\n \"\"\"\n PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>\n PREFIX saidi: <http://www.semanticweb.org/japor/ontologies/2021/5/PizzasLojanitas#>\n SELECT DISTINCT ?name \n WHERE { \n ?s rdfs:subClassOf saidi:QuesosTopping .\n ?s rdfs:label ?name\n FILTER (lang(?name) = 'es')\n }\n\n \"\"\"\n )\n sparql.setReturnFormat(JSON)\n qres = sparql.query().convert()\n return qres\n\n\ndef get_response_salsas():\n sparql.setQuery(\n \"\"\"\n PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>\n PREFIX saidi: <http://www.semanticweb.org/japor/ontologies/2021/5/PizzasLojanitas#>\n SELECT DISTINCT ?name \n WHERE { \n ?s rdfs:subClassOf saidi:SalsasTopping .\n ?s rdfs:label ?name\n FILTER (lang(?name) = 'es')\n }\n\n \"\"\"\n )\n sparql.setReturnFormat(JSON)\n qres = sparql.query().convert()\n return qres\n\n\ndef get_response_vegetales():\n sparql.setQuery(\n \"\"\"\n PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>\n PREFIX saidi: <http://www.semanticweb.org/japor/ontologies/2021/5/PizzasLojanitas#>\n SELECT DISTINCT ?name \n WHERE { \n ?s rdfs:subClassOf saidi:VegetalesTopping .\n ?s rdfs:label ?name\n FILTER (lang(?name) = 'es')\n }\n\n \"\"\"\n )\n sparql.setReturnFormat(JSON)\n qres = sparql.query().convert()\n return qres\n\n\nif __name__ == '__main__':\n get_response_pizzas()\n get_response_carnes()\n get_response_embutidos()\n get_response_especias()\n get_response_frutas()\n get_response_quesos()\n get_response_salsas()\n get_response_vegetales()\n",
"step-5": "from SPARQLWrapper import SPARQLWrapper, JSON\n\nsparql = SPARQLWrapper(\n 'http://localhost:3030/ds/query'\n \n )\n\n#Pizzas\ndef get_response_pizzas():\n sparql.setQuery('''\n PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>\n PREFIX saidi: <http://www.semanticweb.org/japor/ontologies/2021/5/PizzasLojanitas#>\n SELECT DISTINCT ?name \n WHERE { \n ?s rdfs:subClassOf saidi:NamePizza .\n ?s rdfs:label ?name\n FILTER (lang(?name) = 'es')\n }\n\n ''')\n sparql.setReturnFormat(JSON)\n qres = sparql.query().convert()\n return qres\n\n#CarnesTopping\ndef get_response_carnes():\n sparql.setQuery('''\n PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>\n PREFIX saidi: <http://www.semanticweb.org/japor/ontologies/2021/5/PizzasLojanitas#>\n SELECT DISTINCT ?name \n WHERE { \n ?s rdfs:subClassOf saidi:CarnesTopping .\n ?s rdfs:label ?name\n FILTER (lang(?name) = 'es')\n }\n\n ''')\n sparql.setReturnFormat(JSON)\n qres = sparql.query().convert()\n return qres\n\n#EmbutidosTopping\ndef get_response_embutidos():\n sparql.setQuery('''\n PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>\n PREFIX saidi: <http://www.semanticweb.org/japor/ontologies/2021/5/PizzasLojanitas#>\n SELECT DISTINCT ?name \n WHERE { \n ?s rdfs:subClassOf saidi:EmbutidosTopping .\n ?s rdfs:label ?name\n FILTER (lang(?name) = 'es')\n }\n\n ''')\n sparql.setReturnFormat(JSON)\n qres = sparql.query().convert()\n return qres\n\n#EspeciasTopping\ndef get_response_especias():\n sparql.setQuery('''\n PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>\n PREFIX saidi: <http://www.semanticweb.org/japor/ontologies/2021/5/PizzasLojanitas#>\n SELECT DISTINCT ?name \n WHERE { \n ?s rdfs:subClassOf saidi:EspeciasTopping .\n ?s rdfs:label ?name\n FILTER (lang(?name) = 'es')\n }\n\n ''')\n sparql.setReturnFormat(JSON)\n qres = sparql.query().convert()\n return qres\n\n\n#FrutasTopping\ndef get_response_frutas():\n sparql.setQuery('''\n PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>\n PREFIX saidi: <http://www.semanticweb.org/japor/ontologies/2021/5/PizzasLojanitas#>\n SELECT DISTINCT ?name \n WHERE { \n ?s rdfs:subClassOf saidi:FrutasTopping .\n ?s rdfs:label ?name\n FILTER (lang(?name) = 'es')\n }\n\n ''')\n sparql.setReturnFormat(JSON)\n qres = sparql.query().convert()\n return qres\n\n#QuesosTopping\ndef get_response_quesos():\n sparql.setQuery('''\n PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>\n PREFIX saidi: <http://www.semanticweb.org/japor/ontologies/2021/5/PizzasLojanitas#>\n SELECT DISTINCT ?name \n WHERE { \n ?s rdfs:subClassOf saidi:QuesosTopping .\n ?s rdfs:label ?name\n FILTER (lang(?name) = 'es')\n }\n\n ''')\n sparql.setReturnFormat(JSON)\n qres = sparql.query().convert()\n return qres\n\n#SalsasTopping\ndef get_response_salsas():\n sparql.setQuery('''\n PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>\n PREFIX saidi: <http://www.semanticweb.org/japor/ontologies/2021/5/PizzasLojanitas#>\n SELECT DISTINCT ?name \n WHERE { \n ?s rdfs:subClassOf saidi:SalsasTopping .\n ?s rdfs:label ?name\n FILTER (lang(?name) = 'es')\n }\n\n ''')\n sparql.setReturnFormat(JSON)\n qres = sparql.query().convert()\n return qres\n\n\n#VegetalesTopping\ndef get_response_vegetales():\n sparql.setQuery('''\n PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>\n PREFIX saidi: <http://www.semanticweb.org/japor/ontologies/2021/5/PizzasLojanitas#>\n SELECT DISTINCT ?name \n WHERE { \n ?s rdfs:subClassOf saidi:VegetalesTopping .\n ?s rdfs:label ?name\n FILTER (lang(?name) = 'es')\n }\n\n ''')\n sparql.setReturnFormat(JSON)\n qres = sparql.query().convert()\n return qres\n\n\nif __name__ == '__main__':\n get_response_pizzas()\n get_response_carnes()\n get_response_embutidos()\n get_response_especias()\n get_response_frutas()\n get_response_quesos()\n get_response_salsas()\n get_response_vegetales()\n\n",
"step-ids": [
6,
7,
8,
10,
12
]
}
|
[
6,
7,
8,
10,
12
] |
#encoding: utf-8
"""
Desc:
Author: Makoto OKITA
Date: 2016/09/03
"""
import numpy as np
import chainer
from chainer import cuda, Function, gradient_check, Variable, optimizers, serializers, utils
from chainer import Link, Chain, ChainList
import chainer.functions as F
import chainer.links as L
import itertools
"""
基本処理
"""
class RnnAnalize(Chain):
def __init__(self, v, k, y):
super(RnnAnalize, self).__init__(
embed = L.EmbedID(v, k),
H = L.LSTM(k, k),
W = L.Linear(k, y),
)
def __call__(self, x, y):
accum_loss = None
v, k = self.embed.W.data.shape
self.H.reset_state()
for i in range(len(x)):
nx = Variable(np.array([x[i]], dtype=np.int32))
ny = Variable(np.array([y], dtype=np.int32))
wx = self.embed(nx)
wh = self.H(wx)
ww = self.W(wh)
loss = F.softmax_cross_entropy(ww, ny)
accum_loss = loss if accum_loss is None else accum_loss + loss
return accum_loss, ww
def forward(self, x):
for i in range(len(x)):
nx = Variable(np.array([x[i]], dtype=np.int32))
wx = self.embed(nx)
wh = self.H(wx)
ww = self.W(wh)
return ww
"""
学習・予測処理
"""
class AnazlizeTrainer():
def __init__(self, v, k, y):
self.model = RnnAnalize(v, k, y)
#self.model.compute_accuracy = False # accuracyが必要ない場合はFalseした方が学習が速い?
self.optimizer = optimizers.Adam() # Adam, AdaGrad, AdaDelta, RMSpropGraves, SGD, MomentumSGD
self.optimizer.setup(self.model)
#self.optimizer.add_hook(chainer.optimizer.WeightDecay(0.0005)) #??? 荷重減衰による正則化 ??? saveで保存されない!?
### 学習
def practice(self, x, y):
self.model.H.reset_state()
self.model.zerograds()
loss, y = self.model(x, y)
loss.backward()
#loss.unchain_backward() # truncate
self.optimizer.update()
return loss, y
### 予測
def predict(self, x):
self.model.H.reset_state()
self.model.zerograds()
y = self.model.forward(x)
return F.softmax(y)
def save(self, filename):
#modelとoptimizerを保存
serializers.save_npz(filename +'_model.dat', self.model)
serializers.save_npz(filename +'_optimizer.dat', self.optimizer)
def load(self, filename):
serializers.load_npz(filename +'_model.dat', self.model)
serializers.load_npz(filename +'_optimizer.dat', self.optimizer)
## Test Main
if __name__ == "__main__":
import sys
import io
import re
arg1 = sys.argv[1] if len(sys.argv) == 2 else None
trainData = [[4], [1,2,3], [10,11,12], [1,22,23], [1], [5],[6],[7],[8],[9] ]
#for data in baseData:
# for i in itertools.permutations(data):
# trainData.append( list(i) )
print trainData
print len(trainData)
#dim_in = 1000
#dim_mid = 100
#dim_out = len(trainData)
dim_in = len(trainData)
dim_mid = 50
dim_out = len(trainData)
epoch = 1
## 学習
if arg1 == 'train':
print "training..."
train = AnazlizeTrainer(dim_in, dim_mid, dim_out)
for j in range(epoch):
i = 0
for ids in trainData:
#pp(ids)
if True:
for l in itertools.permutations(ids):
x = list(l)
#print(x)
#loss, y = train.practice(x[::-1], i)
loss, y = train.practice(x, i)
else:
loss, y = train.practice(ids[::-1], i)
#loss, y = train.practice(ids, i)
#print loss.data
i += 1
#if j % 10 == 0:
# print loss.data
print loss.data
train.save('train_analize')
## 予測
elif arg1 == 'predict':
print 'predict...'
train = AnazlizeTrainer(dim_in, dim_mid, dim_out)
train.load('train_analize')
while True:
#train = AnazlizeTrainer(dim_in, dim_mid, dim_out)
#train.load('train_analize')
ids = map(int, raw_input().split())
print ids
y = train.predict(ids[::-1])
print y.data.argmax(1)[0]
rank = y.data.argsort()[0]
uprank = map(int, rank[::-1])
print uprank
#print y.data[0]
for i in uprank:
print '%d, %2f' % (i, y.data[0][i])
print ''
|
normal
|
{
"blob_id": "13e89e13f88ac306a62be3390f5292665f128a4d",
"index": 9332,
"step-1": "#encoding: utf-8\n\"\"\"\nDesc: \nAuthor: Makoto OKITA\nDate: 2016/09/03 \n\"\"\"\nimport numpy as np\nimport chainer\nfrom chainer import cuda, Function, gradient_check, Variable, optimizers, serializers, utils\nfrom chainer import Link, Chain, ChainList\nimport chainer.functions as F\nimport chainer.links as L\nimport itertools\n\n\n\"\"\"\n基本処理\n\"\"\"\nclass RnnAnalize(Chain):\n def __init__(self, v, k, y):\n super(RnnAnalize, self).__init__(\n embed = L.EmbedID(v, k),\n H = L.LSTM(k, k),\n W = L.Linear(k, y),\n )\n\n\n def __call__(self, x, y):\n accum_loss = None\n v, k = self.embed.W.data.shape\n self.H.reset_state() \n for i in range(len(x)):\n nx = Variable(np.array([x[i]], dtype=np.int32))\n ny = Variable(np.array([y], dtype=np.int32))\n \n wx = self.embed(nx)\n wh = self.H(wx)\n ww = self.W(wh)\n \n loss = F.softmax_cross_entropy(ww, ny)\n accum_loss = loss if accum_loss is None else accum_loss + loss\n return accum_loss, ww\n\n def forward(self, x):\n for i in range(len(x)):\n nx = Variable(np.array([x[i]], dtype=np.int32))\n \n wx = self.embed(nx)\n wh = self.H(wx)\n ww = self.W(wh)\n \n return ww\n \n \n\"\"\"\n学習・予測処理\n\"\"\"\nclass AnazlizeTrainer():\n def __init__(self, v, k, y):\n self.model = RnnAnalize(v, k, y)\n #self.model.compute_accuracy = False # accuracyが必要ない場合はFalseした方が学習が速い?\n self.optimizer = optimizers.Adam() # Adam, AdaGrad, AdaDelta, RMSpropGraves, SGD, MomentumSGD\n self.optimizer.setup(self.model)\n #self.optimizer.add_hook(chainer.optimizer.WeightDecay(0.0005)) #??? 荷重減衰による正則化 ??? saveで保存されない!?\n\n \n ### 学習\n def practice(self, x, y):\n self.model.H.reset_state()\n self.model.zerograds() \n loss, y = self.model(x, y)\n loss.backward()\n #loss.unchain_backward() # truncate \n self.optimizer.update()\n return loss, y\n \n \n ### 予測\n def predict(self, x):\n self.model.H.reset_state()\n self.model.zerograds() \n y = self.model.forward(x)\n return F.softmax(y)\n \n \n def save(self, filename):\n #modelとoptimizerを保存\n serializers.save_npz(filename +'_model.dat', self.model)\n serializers.save_npz(filename +'_optimizer.dat', self.optimizer)\n \n \n def load(self, filename):\n serializers.load_npz(filename +'_model.dat', self.model)\n serializers.load_npz(filename +'_optimizer.dat', self.optimizer)\n\n \n## Test Main\nif __name__ == \"__main__\":\n import sys\n import io\n import re\n arg1 = sys.argv[1] if len(sys.argv) == 2 else None \n \n trainData = [[4], [1,2,3], [10,11,12], [1,22,23], [1], [5],[6],[7],[8],[9] ]\n #for data in baseData:\n # for i in itertools.permutations(data):\n # trainData.append( list(i) )\n print trainData\n print len(trainData)\n \n \n #dim_in = 1000\n #dim_mid = 100\n #dim_out = len(trainData)\n dim_in = len(trainData)\n dim_mid = 50\n dim_out = len(trainData)\n epoch = 1\n \n ## 学習\n if arg1 == 'train':\n print \"training...\"\n train = AnazlizeTrainer(dim_in, dim_mid, dim_out)\n for j in range(epoch):\n i = 0\n for ids in trainData:\n #pp(ids)\n if True:\n for l in itertools.permutations(ids):\n x = list(l)\n #print(x)\n #loss, y = train.practice(x[::-1], i)\n loss, y = train.practice(x, i)\n else:\n loss, y = train.practice(ids[::-1], i)\n #loss, y = train.practice(ids, i)\n #print loss.data\n i += 1\n #if j % 10 == 0:\n # print loss.data\n print loss.data\n train.save('train_analize')\n \n\n \n ## 予測\n elif arg1 == 'predict':\n print 'predict...'\n train = AnazlizeTrainer(dim_in, dim_mid, dim_out)\n train.load('train_analize')\n while True: \n #train = AnazlizeTrainer(dim_in, dim_mid, dim_out)\n #train.load('train_analize')\n \n ids = map(int, raw_input().split())\n print ids\n y = train.predict(ids[::-1])\n print y.data.argmax(1)[0]\n rank = y.data.argsort()[0]\n uprank = map(int, rank[::-1])\n print uprank\n #print y.data[0]\n\n for i in uprank:\n print '%d, %2f' % (i, y.data[0][i])\n print ''",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
def solution(record):
answer = []
arr = dict()
history = []
for i in record:
tmp = i.split()
if tmp[0] == 'Enter':
arr[tmp[1]] = tmp[2]
history.append([tmp[1], '님이 들어왔습니다.'])
elif tmp[0] == 'Leave':
history.append([tmp[1], '님이 나갔습니다.'])
elif tmp[0] == 'Change':
arr[tmp[1]] = tmp[2]
for i in history:
answer.append(arr[i[0]] + i[1])
return answer
<|reserved_special_token_1|>
def solution(record):
answer = []
arr = dict()
history = []
for i in record:
tmp = i.split()
if tmp[0] == "Enter" :
arr[tmp[1]] = tmp[2]
history.append([tmp[1], "님이 들어왔습니다."])
elif tmp[0] == "Leave" :
history.append([tmp[1], "님이 나갔습니다."])
elif tmp[0] == "Change" :
arr[tmp[1]] = tmp[2]
for i in history :
answer.append(arr[i[0]] + i[1])
return answer
|
flexible
|
{
"blob_id": "d9f66cc3ba40292c49da08d7573d4c605a2771ae",
"index": 3730,
"step-1": "<mask token>\n",
"step-2": "def solution(record):\n answer = []\n arr = dict()\n history = []\n for i in record:\n tmp = i.split()\n if tmp[0] == 'Enter':\n arr[tmp[1]] = tmp[2]\n history.append([tmp[1], '님이 들어왔습니다.'])\n elif tmp[0] == 'Leave':\n history.append([tmp[1], '님이 나갔습니다.'])\n elif tmp[0] == 'Change':\n arr[tmp[1]] = tmp[2]\n for i in history:\n answer.append(arr[i[0]] + i[1])\n return answer\n",
"step-3": "def solution(record):\n answer = []\n arr = dict()\n history = []\n for i in record:\n tmp = i.split()\n if tmp[0] == \"Enter\" :\n arr[tmp[1]] = tmp[2]\n history.append([tmp[1], \"님이 들어왔습니다.\"])\n elif tmp[0] == \"Leave\" :\n history.append([tmp[1], \"님이 나갔습니다.\"])\n elif tmp[0] == \"Change\" :\n arr[tmp[1]] = tmp[2]\n\n for i in history :\n answer.append(arr[i[0]] + i[1])\n return answer",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
#PortableKanban 4.3.6578.38136 - Encrypted Password Retrieval
#Python3 -m pip install des
#or
#pip install des
import json
import base64
from des import * #python3 -m pip install des, pip install des
import sys
def decode(hash):
hash = base64.b64decode(hash.encode('utf-8'))
key = DesKey(b"7ly6UznJ")
return key.decrypt(hash,initial=b"XuVUm5fR",padding=True).decode('utf-8')
print(decode('XXXXXXXXXXXXXXXXXXXXXX'))
#change this to your encrypted key
|
normal
|
{
"blob_id": "136215a3ba99f74160373181c458db9bec4bb6b7",
"index": 977,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef decode(hash):\n hash = base64.b64decode(hash.encode('utf-8'))\n key = DesKey(b'7ly6UznJ')\n return key.decrypt(hash, initial=b'XuVUm5fR', padding=True).decode('utf-8')\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef decode(hash):\n hash = base64.b64decode(hash.encode('utf-8'))\n key = DesKey(b'7ly6UznJ')\n return key.decrypt(hash, initial=b'XuVUm5fR', padding=True).decode('utf-8')\n\n\nprint(decode('XXXXXXXXXXXXXXXXXXXXXX'))\n",
"step-4": "import json\nimport base64\nfrom des import *\nimport sys\n\n\ndef decode(hash):\n hash = base64.b64decode(hash.encode('utf-8'))\n key = DesKey(b'7ly6UznJ')\n return key.decrypt(hash, initial=b'XuVUm5fR', padding=True).decode('utf-8')\n\n\nprint(decode('XXXXXXXXXXXXXXXXXXXXXX'))\n",
"step-5": "#PortableKanban 4.3.6578.38136 - Encrypted Password Retrieval\r\n#Python3 -m pip install des\r\n#or\r\n#pip install des\r\n\r\nimport json\r\nimport base64\r\nfrom des import * #python3 -m pip install des, pip install des\r\nimport sys\r\n\r\ndef decode(hash):\r\n\thash = base64.b64decode(hash.encode('utf-8'))\r\n\tkey = DesKey(b\"7ly6UznJ\")\r\n\treturn key.decrypt(hash,initial=b\"XuVUm5fR\",padding=True).decode('utf-8')\r\n\r\nprint(decode('XXXXXXXXXXXXXXXXXXXXXX'))\r\n\r\n#change this to your encrypted key\r\n\r\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
test_case = int(input())
while test_case != 0:
test_case -= 1
(n, m) = map(int, input().split())
ans = n * m
A = []
for i in range(n):
t = list(map(int, input().split()))
A.append(t)
for i in range(1, n - 1):
for j in range(1, m - 1):
k = 1
while j - k >= 0 and i - k >= 0 and j + k < m and i + k < n:
l = A[i][j - k]
r = A[i][j + k]
u = A[i - k][j]
d = A[i + k][j]
if l == r and u == d:
ans += 1
else:
break
k += 1
print(ans)
|
normal
|
{
"blob_id": "dbc3e51fed63fe0fadea67d05c4b4efc693938a3",
"index": 1487,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwhile test_case != 0:\n test_case -= 1\n n, m = map(int, input().split())\n ans = n * m\n A = []\n for i in range(n):\n t = list(map(int, input().split()))\n A.append(t)\n for i in range(1, n - 1):\n for j in range(1, m - 1):\n k = 1\n while j - k >= 0 and i - k >= 0 and j + k < m and i + k < n:\n l = A[i][j - k]\n r = A[i][j + k]\n u = A[i - k][j]\n d = A[i + k][j]\n if l == r and u == d:\n ans += 1\n else:\n break\n k += 1\n print(ans)\n",
"step-3": "test_case = int(input())\nwhile test_case != 0:\n test_case -= 1\n n, m = map(int, input().split())\n ans = n * m\n A = []\n for i in range(n):\n t = list(map(int, input().split()))\n A.append(t)\n for i in range(1, n - 1):\n for j in range(1, m - 1):\n k = 1\n while j - k >= 0 and i - k >= 0 and j + k < m and i + k < n:\n l = A[i][j - k]\n r = A[i][j + k]\n u = A[i - k][j]\n d = A[i + k][j]\n if l == r and u == d:\n ans += 1\n else:\n break\n k += 1\n print(ans)\n",
"step-4": "test_case = int(input())\nwhile test_case != 0:\n test_case -= 1\n (n, m) = map(int, input().split())\n ans = n * m\n A = []\n for i in range(n):\n t = list(map(int, input().split()))\n A.append(t)\n\n for i in range(1, n - 1):\n for j in range(1, m - 1):\n k = 1\n while j - k >= 0 and i - k >= 0 and j + k < m and i + k < n:\n l = A[i][j - k]\n r = A[i][j + k]\n u = A[i - k][j]\n d = A[i + k][j]\n if l == r and u == d:\n ans += 1\n else:\n break\n k += 1\n print(ans)\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
class OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties(
object):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
@property
def token_length(self):
"""Gets the token_length of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501
:return: The token_length of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501
:rtype: ConfigNodePropertyString
"""
return self._token_length
@token_length.setter
def token_length(self, token_length):
"""Sets the token_length of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties.
:param token_length: The token_length of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501
:type: ConfigNodePropertyString
"""
self._token_length = token_length
@property
def token_refresh(self):
"""Gets the token_refresh of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501
:return: The token_refresh of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501
:rtype: ConfigNodePropertyBoolean
"""
return self._token_refresh
<|reserved_special_token_0|>
<|reserved_special_token_0|>
@token_cleanup_threshold.setter
def token_cleanup_threshold(self, token_cleanup_threshold):
"""Sets the token_cleanup_threshold of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties.
:param token_cleanup_threshold: The token_cleanup_threshold of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501
:type: ConfigNodePropertyInteger
"""
self._token_cleanup_threshold = token_cleanup_threshold
<|reserved_special_token_0|>
@password_hash_algorithm.setter
def password_hash_algorithm(self, password_hash_algorithm):
"""Sets the password_hash_algorithm of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties.
:param password_hash_algorithm: The password_hash_algorithm of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501
:type: ConfigNodePropertyString
"""
self._password_hash_algorithm = password_hash_algorithm
@property
def password_hash_iterations(self):
"""Gets the password_hash_iterations of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501
:return: The password_hash_iterations of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501
:rtype: ConfigNodePropertyInteger
"""
return self._password_hash_iterations
@password_hash_iterations.setter
def password_hash_iterations(self, password_hash_iterations):
"""Sets the password_hash_iterations of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties.
:param password_hash_iterations: The password_hash_iterations of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501
:type: ConfigNodePropertyInteger
"""
self._password_hash_iterations = password_hash_iterations
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(lambda x: x.to_dict() if hasattr(x,
'to_dict') else x, value))
elif hasattr(value, 'to_dict'):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(lambda item: (item[0], item[1].
to_dict()) if hasattr(item[1], 'to_dict') else item,
value.items()))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
<|reserved_special_token_0|>
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties(
object):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def __init__(self, token_expiration=None, token_length=None,
token_refresh=None, token_cleanup_threshold=None,
password_hash_algorithm=None, password_hash_iterations=None,
password_salt_size=None):
"""OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties - a model defined in OpenAPI"""
self._token_expiration = None
self._token_length = None
self._token_refresh = None
self._token_cleanup_threshold = None
self._password_hash_algorithm = None
self._password_hash_iterations = None
self._password_salt_size = None
self.discriminator = None
if token_expiration is not None:
self.token_expiration = token_expiration
if token_length is not None:
self.token_length = token_length
if token_refresh is not None:
self.token_refresh = token_refresh
if token_cleanup_threshold is not None:
self.token_cleanup_threshold = token_cleanup_threshold
if password_hash_algorithm is not None:
self.password_hash_algorithm = password_hash_algorithm
if password_hash_iterations is not None:
self.password_hash_iterations = password_hash_iterations
if password_salt_size is not None:
self.password_salt_size = password_salt_size
@property
def token_expiration(self):
"""Gets the token_expiration of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501
:return: The token_expiration of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501
:rtype: ConfigNodePropertyString
"""
return self._token_expiration
<|reserved_special_token_0|>
@property
def token_length(self):
"""Gets the token_length of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501
:return: The token_length of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501
:rtype: ConfigNodePropertyString
"""
return self._token_length
@token_length.setter
def token_length(self, token_length):
"""Sets the token_length of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties.
:param token_length: The token_length of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501
:type: ConfigNodePropertyString
"""
self._token_length = token_length
@property
def token_refresh(self):
"""Gets the token_refresh of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501
:return: The token_refresh of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501
:rtype: ConfigNodePropertyBoolean
"""
return self._token_refresh
@token_refresh.setter
def token_refresh(self, token_refresh):
"""Sets the token_refresh of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties.
:param token_refresh: The token_refresh of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501
:type: ConfigNodePropertyBoolean
"""
self._token_refresh = token_refresh
@property
def token_cleanup_threshold(self):
"""Gets the token_cleanup_threshold of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501
:return: The token_cleanup_threshold of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501
:rtype: ConfigNodePropertyInteger
"""
return self._token_cleanup_threshold
@token_cleanup_threshold.setter
def token_cleanup_threshold(self, token_cleanup_threshold):
"""Sets the token_cleanup_threshold of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties.
:param token_cleanup_threshold: The token_cleanup_threshold of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501
:type: ConfigNodePropertyInteger
"""
self._token_cleanup_threshold = token_cleanup_threshold
@property
def password_hash_algorithm(self):
"""Gets the password_hash_algorithm of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501
:return: The password_hash_algorithm of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501
:rtype: ConfigNodePropertyString
"""
return self._password_hash_algorithm
@password_hash_algorithm.setter
def password_hash_algorithm(self, password_hash_algorithm):
"""Sets the password_hash_algorithm of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties.
:param password_hash_algorithm: The password_hash_algorithm of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501
:type: ConfigNodePropertyString
"""
self._password_hash_algorithm = password_hash_algorithm
@property
def password_hash_iterations(self):
"""Gets the password_hash_iterations of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501
:return: The password_hash_iterations of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501
:rtype: ConfigNodePropertyInteger
"""
return self._password_hash_iterations
@password_hash_iterations.setter
def password_hash_iterations(self, password_hash_iterations):
"""Sets the password_hash_iterations of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties.
:param password_hash_iterations: The password_hash_iterations of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501
:type: ConfigNodePropertyInteger
"""
self._password_hash_iterations = password_hash_iterations
<|reserved_special_token_0|>
@password_salt_size.setter
def password_salt_size(self, password_salt_size):
"""Sets the password_salt_size of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties.
:param password_salt_size: The password_salt_size of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501
:type: ConfigNodePropertyInteger
"""
self._password_salt_size = password_salt_size
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(lambda x: x.to_dict() if hasattr(x,
'to_dict') else x, value))
elif hasattr(value, 'to_dict'):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(lambda item: (item[0], item[1].
to_dict()) if hasattr(item[1], 'to_dict') else item,
value.items()))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
<|reserved_special_token_0|>
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties(
object):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def __init__(self, token_expiration=None, token_length=None,
token_refresh=None, token_cleanup_threshold=None,
password_hash_algorithm=None, password_hash_iterations=None,
password_salt_size=None):
"""OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties - a model defined in OpenAPI"""
self._token_expiration = None
self._token_length = None
self._token_refresh = None
self._token_cleanup_threshold = None
self._password_hash_algorithm = None
self._password_hash_iterations = None
self._password_salt_size = None
self.discriminator = None
if token_expiration is not None:
self.token_expiration = token_expiration
if token_length is not None:
self.token_length = token_length
if token_refresh is not None:
self.token_refresh = token_refresh
if token_cleanup_threshold is not None:
self.token_cleanup_threshold = token_cleanup_threshold
if password_hash_algorithm is not None:
self.password_hash_algorithm = password_hash_algorithm
if password_hash_iterations is not None:
self.password_hash_iterations = password_hash_iterations
if password_salt_size is not None:
self.password_salt_size = password_salt_size
@property
def token_expiration(self):
"""Gets the token_expiration of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501
:return: The token_expiration of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501
:rtype: ConfigNodePropertyString
"""
return self._token_expiration
<|reserved_special_token_0|>
@property
def token_length(self):
"""Gets the token_length of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501
:return: The token_length of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501
:rtype: ConfigNodePropertyString
"""
return self._token_length
@token_length.setter
def token_length(self, token_length):
"""Sets the token_length of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties.
:param token_length: The token_length of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501
:type: ConfigNodePropertyString
"""
self._token_length = token_length
@property
def token_refresh(self):
"""Gets the token_refresh of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501
:return: The token_refresh of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501
:rtype: ConfigNodePropertyBoolean
"""
return self._token_refresh
@token_refresh.setter
def token_refresh(self, token_refresh):
"""Sets the token_refresh of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties.
:param token_refresh: The token_refresh of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501
:type: ConfigNodePropertyBoolean
"""
self._token_refresh = token_refresh
@property
def token_cleanup_threshold(self):
"""Gets the token_cleanup_threshold of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501
:return: The token_cleanup_threshold of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501
:rtype: ConfigNodePropertyInteger
"""
return self._token_cleanup_threshold
@token_cleanup_threshold.setter
def token_cleanup_threshold(self, token_cleanup_threshold):
"""Sets the token_cleanup_threshold of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties.
:param token_cleanup_threshold: The token_cleanup_threshold of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501
:type: ConfigNodePropertyInteger
"""
self._token_cleanup_threshold = token_cleanup_threshold
@property
def password_hash_algorithm(self):
"""Gets the password_hash_algorithm of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501
:return: The password_hash_algorithm of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501
:rtype: ConfigNodePropertyString
"""
return self._password_hash_algorithm
@password_hash_algorithm.setter
def password_hash_algorithm(self, password_hash_algorithm):
"""Sets the password_hash_algorithm of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties.
:param password_hash_algorithm: The password_hash_algorithm of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501
:type: ConfigNodePropertyString
"""
self._password_hash_algorithm = password_hash_algorithm
@property
def password_hash_iterations(self):
"""Gets the password_hash_iterations of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501
:return: The password_hash_iterations of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501
:rtype: ConfigNodePropertyInteger
"""
return self._password_hash_iterations
@password_hash_iterations.setter
def password_hash_iterations(self, password_hash_iterations):
"""Sets the password_hash_iterations of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties.
:param password_hash_iterations: The password_hash_iterations of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501
:type: ConfigNodePropertyInteger
"""
self._password_hash_iterations = password_hash_iterations
@property
def password_salt_size(self):
"""Gets the password_salt_size of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501
:return: The password_salt_size of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501
:rtype: ConfigNodePropertyInteger
"""
return self._password_salt_size
@password_salt_size.setter
def password_salt_size(self, password_salt_size):
"""Sets the password_salt_size of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties.
:param password_salt_size: The password_salt_size of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501
:type: ConfigNodePropertyInteger
"""
self._password_salt_size = password_salt_size
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(lambda x: x.to_dict() if hasattr(x,
'to_dict') else x, value))
elif hasattr(value, 'to_dict'):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(lambda item: (item[0], item[1].
to_dict()) if hasattr(item[1], 'to_dict') else item,
value.items()))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
<|reserved_special_token_0|>
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties(
object):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
openapi_types = {'token_expiration': 'ConfigNodePropertyString',
'token_length': 'ConfigNodePropertyString', 'token_refresh':
'ConfigNodePropertyBoolean', 'token_cleanup_threshold':
'ConfigNodePropertyInteger', 'password_hash_algorithm':
'ConfigNodePropertyString', 'password_hash_iterations':
'ConfigNodePropertyInteger', 'password_salt_size':
'ConfigNodePropertyInteger'}
attribute_map = {'token_expiration': 'tokenExpiration', 'token_length':
'tokenLength', 'token_refresh': 'tokenRefresh',
'token_cleanup_threshold': 'tokenCleanupThreshold',
'password_hash_algorithm': 'passwordHashAlgorithm',
'password_hash_iterations': 'passwordHashIterations',
'password_salt_size': 'passwordSaltSize'}
def __init__(self, token_expiration=None, token_length=None,
token_refresh=None, token_cleanup_threshold=None,
password_hash_algorithm=None, password_hash_iterations=None,
password_salt_size=None):
"""OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties - a model defined in OpenAPI"""
self._token_expiration = None
self._token_length = None
self._token_refresh = None
self._token_cleanup_threshold = None
self._password_hash_algorithm = None
self._password_hash_iterations = None
self._password_salt_size = None
self.discriminator = None
if token_expiration is not None:
self.token_expiration = token_expiration
if token_length is not None:
self.token_length = token_length
if token_refresh is not None:
self.token_refresh = token_refresh
if token_cleanup_threshold is not None:
self.token_cleanup_threshold = token_cleanup_threshold
if password_hash_algorithm is not None:
self.password_hash_algorithm = password_hash_algorithm
if password_hash_iterations is not None:
self.password_hash_iterations = password_hash_iterations
if password_salt_size is not None:
self.password_salt_size = password_salt_size
@property
def token_expiration(self):
"""Gets the token_expiration of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501
:return: The token_expiration of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501
:rtype: ConfigNodePropertyString
"""
return self._token_expiration
@token_expiration.setter
def token_expiration(self, token_expiration):
"""Sets the token_expiration of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties.
:param token_expiration: The token_expiration of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501
:type: ConfigNodePropertyString
"""
self._token_expiration = token_expiration
@property
def token_length(self):
"""Gets the token_length of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501
:return: The token_length of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501
:rtype: ConfigNodePropertyString
"""
return self._token_length
@token_length.setter
def token_length(self, token_length):
"""Sets the token_length of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties.
:param token_length: The token_length of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501
:type: ConfigNodePropertyString
"""
self._token_length = token_length
@property
def token_refresh(self):
"""Gets the token_refresh of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501
:return: The token_refresh of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501
:rtype: ConfigNodePropertyBoolean
"""
return self._token_refresh
@token_refresh.setter
def token_refresh(self, token_refresh):
"""Sets the token_refresh of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties.
:param token_refresh: The token_refresh of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501
:type: ConfigNodePropertyBoolean
"""
self._token_refresh = token_refresh
@property
def token_cleanup_threshold(self):
"""Gets the token_cleanup_threshold of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501
:return: The token_cleanup_threshold of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501
:rtype: ConfigNodePropertyInteger
"""
return self._token_cleanup_threshold
@token_cleanup_threshold.setter
def token_cleanup_threshold(self, token_cleanup_threshold):
"""Sets the token_cleanup_threshold of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties.
:param token_cleanup_threshold: The token_cleanup_threshold of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501
:type: ConfigNodePropertyInteger
"""
self._token_cleanup_threshold = token_cleanup_threshold
@property
def password_hash_algorithm(self):
"""Gets the password_hash_algorithm of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501
:return: The password_hash_algorithm of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501
:rtype: ConfigNodePropertyString
"""
return self._password_hash_algorithm
@password_hash_algorithm.setter
def password_hash_algorithm(self, password_hash_algorithm):
"""Sets the password_hash_algorithm of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties.
:param password_hash_algorithm: The password_hash_algorithm of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501
:type: ConfigNodePropertyString
"""
self._password_hash_algorithm = password_hash_algorithm
@property
def password_hash_iterations(self):
"""Gets the password_hash_iterations of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501
:return: The password_hash_iterations of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501
:rtype: ConfigNodePropertyInteger
"""
return self._password_hash_iterations
@password_hash_iterations.setter
def password_hash_iterations(self, password_hash_iterations):
"""Sets the password_hash_iterations of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties.
:param password_hash_iterations: The password_hash_iterations of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501
:type: ConfigNodePropertyInteger
"""
self._password_hash_iterations = password_hash_iterations
@property
def password_salt_size(self):
"""Gets the password_salt_size of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501
:return: The password_salt_size of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501
:rtype: ConfigNodePropertyInteger
"""
return self._password_salt_size
@password_salt_size.setter
def password_salt_size(self, password_salt_size):
"""Sets the password_salt_size of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties.
:param password_salt_size: The password_salt_size of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501
:type: ConfigNodePropertyInteger
"""
self._password_salt_size = password_salt_size
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(lambda x: x.to_dict() if hasattr(x,
'to_dict') else x, value))
elif hasattr(value, 'to_dict'):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(lambda item: (item[0], item[1].
to_dict()) if hasattr(item[1], 'to_dict') else item,
value.items()))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other,
OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties
):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
<|reserved_special_token_1|>
# coding: utf-8
"""
Adobe Experience Manager OSGI config (AEM) API
Swagger AEM OSGI is an OpenAPI specification for Adobe Experience Manager (AEM) OSGI Configurations API # noqa: E501
OpenAPI spec version: 1.0.0-pre.0
Contact: opensource@shinesolutions.com
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
class OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'token_expiration': 'ConfigNodePropertyString',
'token_length': 'ConfigNodePropertyString',
'token_refresh': 'ConfigNodePropertyBoolean',
'token_cleanup_threshold': 'ConfigNodePropertyInteger',
'password_hash_algorithm': 'ConfigNodePropertyString',
'password_hash_iterations': 'ConfigNodePropertyInteger',
'password_salt_size': 'ConfigNodePropertyInteger'
}
attribute_map = {
'token_expiration': 'tokenExpiration',
'token_length': 'tokenLength',
'token_refresh': 'tokenRefresh',
'token_cleanup_threshold': 'tokenCleanupThreshold',
'password_hash_algorithm': 'passwordHashAlgorithm',
'password_hash_iterations': 'passwordHashIterations',
'password_salt_size': 'passwordSaltSize'
}
def __init__(self, token_expiration=None, token_length=None, token_refresh=None, token_cleanup_threshold=None, password_hash_algorithm=None, password_hash_iterations=None, password_salt_size=None): # noqa: E501
"""OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties - a model defined in OpenAPI""" # noqa: E501
self._token_expiration = None
self._token_length = None
self._token_refresh = None
self._token_cleanup_threshold = None
self._password_hash_algorithm = None
self._password_hash_iterations = None
self._password_salt_size = None
self.discriminator = None
if token_expiration is not None:
self.token_expiration = token_expiration
if token_length is not None:
self.token_length = token_length
if token_refresh is not None:
self.token_refresh = token_refresh
if token_cleanup_threshold is not None:
self.token_cleanup_threshold = token_cleanup_threshold
if password_hash_algorithm is not None:
self.password_hash_algorithm = password_hash_algorithm
if password_hash_iterations is not None:
self.password_hash_iterations = password_hash_iterations
if password_salt_size is not None:
self.password_salt_size = password_salt_size
@property
def token_expiration(self):
"""Gets the token_expiration of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501
:return: The token_expiration of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501
:rtype: ConfigNodePropertyString
"""
return self._token_expiration
@token_expiration.setter
def token_expiration(self, token_expiration):
"""Sets the token_expiration of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties.
:param token_expiration: The token_expiration of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501
:type: ConfigNodePropertyString
"""
self._token_expiration = token_expiration
@property
def token_length(self):
"""Gets the token_length of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501
:return: The token_length of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501
:rtype: ConfigNodePropertyString
"""
return self._token_length
@token_length.setter
def token_length(self, token_length):
"""Sets the token_length of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties.
:param token_length: The token_length of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501
:type: ConfigNodePropertyString
"""
self._token_length = token_length
@property
def token_refresh(self):
"""Gets the token_refresh of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501
:return: The token_refresh of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501
:rtype: ConfigNodePropertyBoolean
"""
return self._token_refresh
@token_refresh.setter
def token_refresh(self, token_refresh):
"""Sets the token_refresh of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties.
:param token_refresh: The token_refresh of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501
:type: ConfigNodePropertyBoolean
"""
self._token_refresh = token_refresh
@property
def token_cleanup_threshold(self):
"""Gets the token_cleanup_threshold of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501
:return: The token_cleanup_threshold of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501
:rtype: ConfigNodePropertyInteger
"""
return self._token_cleanup_threshold
@token_cleanup_threshold.setter
def token_cleanup_threshold(self, token_cleanup_threshold):
"""Sets the token_cleanup_threshold of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties.
:param token_cleanup_threshold: The token_cleanup_threshold of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501
:type: ConfigNodePropertyInteger
"""
self._token_cleanup_threshold = token_cleanup_threshold
@property
def password_hash_algorithm(self):
"""Gets the password_hash_algorithm of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501
:return: The password_hash_algorithm of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501
:rtype: ConfigNodePropertyString
"""
return self._password_hash_algorithm
@password_hash_algorithm.setter
def password_hash_algorithm(self, password_hash_algorithm):
"""Sets the password_hash_algorithm of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties.
:param password_hash_algorithm: The password_hash_algorithm of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501
:type: ConfigNodePropertyString
"""
self._password_hash_algorithm = password_hash_algorithm
@property
def password_hash_iterations(self):
"""Gets the password_hash_iterations of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501
:return: The password_hash_iterations of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501
:rtype: ConfigNodePropertyInteger
"""
return self._password_hash_iterations
@password_hash_iterations.setter
def password_hash_iterations(self, password_hash_iterations):
"""Sets the password_hash_iterations of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties.
:param password_hash_iterations: The password_hash_iterations of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501
:type: ConfigNodePropertyInteger
"""
self._password_hash_iterations = password_hash_iterations
@property
def password_salt_size(self):
"""Gets the password_salt_size of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501
:return: The password_salt_size of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501
:rtype: ConfigNodePropertyInteger
"""
return self._password_salt_size
@password_salt_size.setter
def password_salt_size(self, password_salt_size):
"""Sets the password_salt_size of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties.
:param password_salt_size: The password_salt_size of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501
:type: ConfigNodePropertyInteger
"""
self._password_salt_size = password_salt_size
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
flexible
|
{
"blob_id": "0ddac0aac5bd001504ed37d31b74c6442304e350",
"index": 5729,
"step-1": "<mask token>\n\n\nclass OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties(\n object):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n @property\n def token_length(self):\n \"\"\"Gets the token_length of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501\n\n\n :return: The token_length of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501\n :rtype: ConfigNodePropertyString\n \"\"\"\n return self._token_length\n\n @token_length.setter\n def token_length(self, token_length):\n \"\"\"Sets the token_length of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties.\n\n\n :param token_length: The token_length of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501\n :type: ConfigNodePropertyString\n \"\"\"\n self._token_length = token_length\n\n @property\n def token_refresh(self):\n \"\"\"Gets the token_refresh of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501\n\n\n :return: The token_refresh of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501\n :rtype: ConfigNodePropertyBoolean\n \"\"\"\n return self._token_refresh\n <mask token>\n <mask token>\n\n @token_cleanup_threshold.setter\n def token_cleanup_threshold(self, token_cleanup_threshold):\n \"\"\"Sets the token_cleanup_threshold of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties.\n\n\n :param token_cleanup_threshold: The token_cleanup_threshold of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501\n :type: ConfigNodePropertyInteger\n \"\"\"\n self._token_cleanup_threshold = token_cleanup_threshold\n <mask token>\n\n @password_hash_algorithm.setter\n def password_hash_algorithm(self, password_hash_algorithm):\n \"\"\"Sets the password_hash_algorithm of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties.\n\n\n :param password_hash_algorithm: The password_hash_algorithm of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501\n :type: ConfigNodePropertyString\n \"\"\"\n self._password_hash_algorithm = password_hash_algorithm\n\n @property\n def password_hash_iterations(self):\n \"\"\"Gets the password_hash_iterations of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501\n\n\n :return: The password_hash_iterations of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501\n :rtype: ConfigNodePropertyInteger\n \"\"\"\n return self._password_hash_iterations\n\n @password_hash_iterations.setter\n def password_hash_iterations(self, password_hash_iterations):\n \"\"\"Sets the password_hash_iterations of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties.\n\n\n :param password_hash_iterations: The password_hash_iterations of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501\n :type: ConfigNodePropertyInteger\n \"\"\"\n self._password_hash_iterations = password_hash_iterations\n <mask token>\n <mask token>\n\n def to_dict(self):\n \"\"\"Returns the model properties as a dict\"\"\"\n result = {}\n for attr, _ in six.iteritems(self.openapi_types):\n value = getattr(self, attr)\n if isinstance(value, list):\n result[attr] = list(map(lambda x: x.to_dict() if hasattr(x,\n 'to_dict') else x, value))\n elif hasattr(value, 'to_dict'):\n result[attr] = value.to_dict()\n elif isinstance(value, dict):\n result[attr] = dict(map(lambda item: (item[0], item[1].\n to_dict()) if hasattr(item[1], 'to_dict') else item,\n value.items()))\n else:\n result[attr] = value\n return result\n\n def to_str(self):\n \"\"\"Returns the string representation of the model\"\"\"\n return pprint.pformat(self.to_dict())\n\n def __repr__(self):\n \"\"\"For `print` and `pprint`\"\"\"\n return self.to_str()\n <mask token>\n\n def __ne__(self, other):\n \"\"\"Returns true if both objects are not equal\"\"\"\n return not self == other\n",
"step-2": "<mask token>\n\n\nclass OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties(\n object):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def __init__(self, token_expiration=None, token_length=None,\n token_refresh=None, token_cleanup_threshold=None,\n password_hash_algorithm=None, password_hash_iterations=None,\n password_salt_size=None):\n \"\"\"OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties - a model defined in OpenAPI\"\"\"\n self._token_expiration = None\n self._token_length = None\n self._token_refresh = None\n self._token_cleanup_threshold = None\n self._password_hash_algorithm = None\n self._password_hash_iterations = None\n self._password_salt_size = None\n self.discriminator = None\n if token_expiration is not None:\n self.token_expiration = token_expiration\n if token_length is not None:\n self.token_length = token_length\n if token_refresh is not None:\n self.token_refresh = token_refresh\n if token_cleanup_threshold is not None:\n self.token_cleanup_threshold = token_cleanup_threshold\n if password_hash_algorithm is not None:\n self.password_hash_algorithm = password_hash_algorithm\n if password_hash_iterations is not None:\n self.password_hash_iterations = password_hash_iterations\n if password_salt_size is not None:\n self.password_salt_size = password_salt_size\n\n @property\n def token_expiration(self):\n \"\"\"Gets the token_expiration of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501\n\n\n :return: The token_expiration of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501\n :rtype: ConfigNodePropertyString\n \"\"\"\n return self._token_expiration\n <mask token>\n\n @property\n def token_length(self):\n \"\"\"Gets the token_length of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501\n\n\n :return: The token_length of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501\n :rtype: ConfigNodePropertyString\n \"\"\"\n return self._token_length\n\n @token_length.setter\n def token_length(self, token_length):\n \"\"\"Sets the token_length of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties.\n\n\n :param token_length: The token_length of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501\n :type: ConfigNodePropertyString\n \"\"\"\n self._token_length = token_length\n\n @property\n def token_refresh(self):\n \"\"\"Gets the token_refresh of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501\n\n\n :return: The token_refresh of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501\n :rtype: ConfigNodePropertyBoolean\n \"\"\"\n return self._token_refresh\n\n @token_refresh.setter\n def token_refresh(self, token_refresh):\n \"\"\"Sets the token_refresh of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties.\n\n\n :param token_refresh: The token_refresh of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501\n :type: ConfigNodePropertyBoolean\n \"\"\"\n self._token_refresh = token_refresh\n\n @property\n def token_cleanup_threshold(self):\n \"\"\"Gets the token_cleanup_threshold of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501\n\n\n :return: The token_cleanup_threshold of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501\n :rtype: ConfigNodePropertyInteger\n \"\"\"\n return self._token_cleanup_threshold\n\n @token_cleanup_threshold.setter\n def token_cleanup_threshold(self, token_cleanup_threshold):\n \"\"\"Sets the token_cleanup_threshold of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties.\n\n\n :param token_cleanup_threshold: The token_cleanup_threshold of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501\n :type: ConfigNodePropertyInteger\n \"\"\"\n self._token_cleanup_threshold = token_cleanup_threshold\n\n @property\n def password_hash_algorithm(self):\n \"\"\"Gets the password_hash_algorithm of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501\n\n\n :return: The password_hash_algorithm of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501\n :rtype: ConfigNodePropertyString\n \"\"\"\n return self._password_hash_algorithm\n\n @password_hash_algorithm.setter\n def password_hash_algorithm(self, password_hash_algorithm):\n \"\"\"Sets the password_hash_algorithm of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties.\n\n\n :param password_hash_algorithm: The password_hash_algorithm of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501\n :type: ConfigNodePropertyString\n \"\"\"\n self._password_hash_algorithm = password_hash_algorithm\n\n @property\n def password_hash_iterations(self):\n \"\"\"Gets the password_hash_iterations of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501\n\n\n :return: The password_hash_iterations of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501\n :rtype: ConfigNodePropertyInteger\n \"\"\"\n return self._password_hash_iterations\n\n @password_hash_iterations.setter\n def password_hash_iterations(self, password_hash_iterations):\n \"\"\"Sets the password_hash_iterations of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties.\n\n\n :param password_hash_iterations: The password_hash_iterations of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501\n :type: ConfigNodePropertyInteger\n \"\"\"\n self._password_hash_iterations = password_hash_iterations\n <mask token>\n\n @password_salt_size.setter\n def password_salt_size(self, password_salt_size):\n \"\"\"Sets the password_salt_size of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties.\n\n\n :param password_salt_size: The password_salt_size of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501\n :type: ConfigNodePropertyInteger\n \"\"\"\n self._password_salt_size = password_salt_size\n\n def to_dict(self):\n \"\"\"Returns the model properties as a dict\"\"\"\n result = {}\n for attr, _ in six.iteritems(self.openapi_types):\n value = getattr(self, attr)\n if isinstance(value, list):\n result[attr] = list(map(lambda x: x.to_dict() if hasattr(x,\n 'to_dict') else x, value))\n elif hasattr(value, 'to_dict'):\n result[attr] = value.to_dict()\n elif isinstance(value, dict):\n result[attr] = dict(map(lambda item: (item[0], item[1].\n to_dict()) if hasattr(item[1], 'to_dict') else item,\n value.items()))\n else:\n result[attr] = value\n return result\n\n def to_str(self):\n \"\"\"Returns the string representation of the model\"\"\"\n return pprint.pformat(self.to_dict())\n\n def __repr__(self):\n \"\"\"For `print` and `pprint`\"\"\"\n return self.to_str()\n <mask token>\n\n def __ne__(self, other):\n \"\"\"Returns true if both objects are not equal\"\"\"\n return not self == other\n",
"step-3": "<mask token>\n\n\nclass OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties(\n object):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def __init__(self, token_expiration=None, token_length=None,\n token_refresh=None, token_cleanup_threshold=None,\n password_hash_algorithm=None, password_hash_iterations=None,\n password_salt_size=None):\n \"\"\"OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties - a model defined in OpenAPI\"\"\"\n self._token_expiration = None\n self._token_length = None\n self._token_refresh = None\n self._token_cleanup_threshold = None\n self._password_hash_algorithm = None\n self._password_hash_iterations = None\n self._password_salt_size = None\n self.discriminator = None\n if token_expiration is not None:\n self.token_expiration = token_expiration\n if token_length is not None:\n self.token_length = token_length\n if token_refresh is not None:\n self.token_refresh = token_refresh\n if token_cleanup_threshold is not None:\n self.token_cleanup_threshold = token_cleanup_threshold\n if password_hash_algorithm is not None:\n self.password_hash_algorithm = password_hash_algorithm\n if password_hash_iterations is not None:\n self.password_hash_iterations = password_hash_iterations\n if password_salt_size is not None:\n self.password_salt_size = password_salt_size\n\n @property\n def token_expiration(self):\n \"\"\"Gets the token_expiration of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501\n\n\n :return: The token_expiration of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501\n :rtype: ConfigNodePropertyString\n \"\"\"\n return self._token_expiration\n <mask token>\n\n @property\n def token_length(self):\n \"\"\"Gets the token_length of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501\n\n\n :return: The token_length of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501\n :rtype: ConfigNodePropertyString\n \"\"\"\n return self._token_length\n\n @token_length.setter\n def token_length(self, token_length):\n \"\"\"Sets the token_length of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties.\n\n\n :param token_length: The token_length of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501\n :type: ConfigNodePropertyString\n \"\"\"\n self._token_length = token_length\n\n @property\n def token_refresh(self):\n \"\"\"Gets the token_refresh of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501\n\n\n :return: The token_refresh of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501\n :rtype: ConfigNodePropertyBoolean\n \"\"\"\n return self._token_refresh\n\n @token_refresh.setter\n def token_refresh(self, token_refresh):\n \"\"\"Sets the token_refresh of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties.\n\n\n :param token_refresh: The token_refresh of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501\n :type: ConfigNodePropertyBoolean\n \"\"\"\n self._token_refresh = token_refresh\n\n @property\n def token_cleanup_threshold(self):\n \"\"\"Gets the token_cleanup_threshold of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501\n\n\n :return: The token_cleanup_threshold of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501\n :rtype: ConfigNodePropertyInteger\n \"\"\"\n return self._token_cleanup_threshold\n\n @token_cleanup_threshold.setter\n def token_cleanup_threshold(self, token_cleanup_threshold):\n \"\"\"Sets the token_cleanup_threshold of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties.\n\n\n :param token_cleanup_threshold: The token_cleanup_threshold of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501\n :type: ConfigNodePropertyInteger\n \"\"\"\n self._token_cleanup_threshold = token_cleanup_threshold\n\n @property\n def password_hash_algorithm(self):\n \"\"\"Gets the password_hash_algorithm of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501\n\n\n :return: The password_hash_algorithm of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501\n :rtype: ConfigNodePropertyString\n \"\"\"\n return self._password_hash_algorithm\n\n @password_hash_algorithm.setter\n def password_hash_algorithm(self, password_hash_algorithm):\n \"\"\"Sets the password_hash_algorithm of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties.\n\n\n :param password_hash_algorithm: The password_hash_algorithm of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501\n :type: ConfigNodePropertyString\n \"\"\"\n self._password_hash_algorithm = password_hash_algorithm\n\n @property\n def password_hash_iterations(self):\n \"\"\"Gets the password_hash_iterations of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501\n\n\n :return: The password_hash_iterations of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501\n :rtype: ConfigNodePropertyInteger\n \"\"\"\n return self._password_hash_iterations\n\n @password_hash_iterations.setter\n def password_hash_iterations(self, password_hash_iterations):\n \"\"\"Sets the password_hash_iterations of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties.\n\n\n :param password_hash_iterations: The password_hash_iterations of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501\n :type: ConfigNodePropertyInteger\n \"\"\"\n self._password_hash_iterations = password_hash_iterations\n\n @property\n def password_salt_size(self):\n \"\"\"Gets the password_salt_size of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501\n\n\n :return: The password_salt_size of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501\n :rtype: ConfigNodePropertyInteger\n \"\"\"\n return self._password_salt_size\n\n @password_salt_size.setter\n def password_salt_size(self, password_salt_size):\n \"\"\"Sets the password_salt_size of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties.\n\n\n :param password_salt_size: The password_salt_size of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501\n :type: ConfigNodePropertyInteger\n \"\"\"\n self._password_salt_size = password_salt_size\n\n def to_dict(self):\n \"\"\"Returns the model properties as a dict\"\"\"\n result = {}\n for attr, _ in six.iteritems(self.openapi_types):\n value = getattr(self, attr)\n if isinstance(value, list):\n result[attr] = list(map(lambda x: x.to_dict() if hasattr(x,\n 'to_dict') else x, value))\n elif hasattr(value, 'to_dict'):\n result[attr] = value.to_dict()\n elif isinstance(value, dict):\n result[attr] = dict(map(lambda item: (item[0], item[1].\n to_dict()) if hasattr(item[1], 'to_dict') else item,\n value.items()))\n else:\n result[attr] = value\n return result\n\n def to_str(self):\n \"\"\"Returns the string representation of the model\"\"\"\n return pprint.pformat(self.to_dict())\n\n def __repr__(self):\n \"\"\"For `print` and `pprint`\"\"\"\n return self.to_str()\n <mask token>\n\n def __ne__(self, other):\n \"\"\"Returns true if both objects are not equal\"\"\"\n return not self == other\n",
"step-4": "<mask token>\n\n\nclass OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties(\n object):\n <mask token>\n <mask token>\n openapi_types = {'token_expiration': 'ConfigNodePropertyString',\n 'token_length': 'ConfigNodePropertyString', 'token_refresh':\n 'ConfigNodePropertyBoolean', 'token_cleanup_threshold':\n 'ConfigNodePropertyInteger', 'password_hash_algorithm':\n 'ConfigNodePropertyString', 'password_hash_iterations':\n 'ConfigNodePropertyInteger', 'password_salt_size':\n 'ConfigNodePropertyInteger'}\n attribute_map = {'token_expiration': 'tokenExpiration', 'token_length':\n 'tokenLength', 'token_refresh': 'tokenRefresh',\n 'token_cleanup_threshold': 'tokenCleanupThreshold',\n 'password_hash_algorithm': 'passwordHashAlgorithm',\n 'password_hash_iterations': 'passwordHashIterations',\n 'password_salt_size': 'passwordSaltSize'}\n\n def __init__(self, token_expiration=None, token_length=None,\n token_refresh=None, token_cleanup_threshold=None,\n password_hash_algorithm=None, password_hash_iterations=None,\n password_salt_size=None):\n \"\"\"OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties - a model defined in OpenAPI\"\"\"\n self._token_expiration = None\n self._token_length = None\n self._token_refresh = None\n self._token_cleanup_threshold = None\n self._password_hash_algorithm = None\n self._password_hash_iterations = None\n self._password_salt_size = None\n self.discriminator = None\n if token_expiration is not None:\n self.token_expiration = token_expiration\n if token_length is not None:\n self.token_length = token_length\n if token_refresh is not None:\n self.token_refresh = token_refresh\n if token_cleanup_threshold is not None:\n self.token_cleanup_threshold = token_cleanup_threshold\n if password_hash_algorithm is not None:\n self.password_hash_algorithm = password_hash_algorithm\n if password_hash_iterations is not None:\n self.password_hash_iterations = password_hash_iterations\n if password_salt_size is not None:\n self.password_salt_size = password_salt_size\n\n @property\n def token_expiration(self):\n \"\"\"Gets the token_expiration of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501\n\n\n :return: The token_expiration of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501\n :rtype: ConfigNodePropertyString\n \"\"\"\n return self._token_expiration\n\n @token_expiration.setter\n def token_expiration(self, token_expiration):\n \"\"\"Sets the token_expiration of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties.\n\n\n :param token_expiration: The token_expiration of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501\n :type: ConfigNodePropertyString\n \"\"\"\n self._token_expiration = token_expiration\n\n @property\n def token_length(self):\n \"\"\"Gets the token_length of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501\n\n\n :return: The token_length of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501\n :rtype: ConfigNodePropertyString\n \"\"\"\n return self._token_length\n\n @token_length.setter\n def token_length(self, token_length):\n \"\"\"Sets the token_length of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties.\n\n\n :param token_length: The token_length of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501\n :type: ConfigNodePropertyString\n \"\"\"\n self._token_length = token_length\n\n @property\n def token_refresh(self):\n \"\"\"Gets the token_refresh of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501\n\n\n :return: The token_refresh of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501\n :rtype: ConfigNodePropertyBoolean\n \"\"\"\n return self._token_refresh\n\n @token_refresh.setter\n def token_refresh(self, token_refresh):\n \"\"\"Sets the token_refresh of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties.\n\n\n :param token_refresh: The token_refresh of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501\n :type: ConfigNodePropertyBoolean\n \"\"\"\n self._token_refresh = token_refresh\n\n @property\n def token_cleanup_threshold(self):\n \"\"\"Gets the token_cleanup_threshold of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501\n\n\n :return: The token_cleanup_threshold of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501\n :rtype: ConfigNodePropertyInteger\n \"\"\"\n return self._token_cleanup_threshold\n\n @token_cleanup_threshold.setter\n def token_cleanup_threshold(self, token_cleanup_threshold):\n \"\"\"Sets the token_cleanup_threshold of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties.\n\n\n :param token_cleanup_threshold: The token_cleanup_threshold of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501\n :type: ConfigNodePropertyInteger\n \"\"\"\n self._token_cleanup_threshold = token_cleanup_threshold\n\n @property\n def password_hash_algorithm(self):\n \"\"\"Gets the password_hash_algorithm of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501\n\n\n :return: The password_hash_algorithm of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501\n :rtype: ConfigNodePropertyString\n \"\"\"\n return self._password_hash_algorithm\n\n @password_hash_algorithm.setter\n def password_hash_algorithm(self, password_hash_algorithm):\n \"\"\"Sets the password_hash_algorithm of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties.\n\n\n :param password_hash_algorithm: The password_hash_algorithm of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501\n :type: ConfigNodePropertyString\n \"\"\"\n self._password_hash_algorithm = password_hash_algorithm\n\n @property\n def password_hash_iterations(self):\n \"\"\"Gets the password_hash_iterations of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501\n\n\n :return: The password_hash_iterations of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501\n :rtype: ConfigNodePropertyInteger\n \"\"\"\n return self._password_hash_iterations\n\n @password_hash_iterations.setter\n def password_hash_iterations(self, password_hash_iterations):\n \"\"\"Sets the password_hash_iterations of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties.\n\n\n :param password_hash_iterations: The password_hash_iterations of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501\n :type: ConfigNodePropertyInteger\n \"\"\"\n self._password_hash_iterations = password_hash_iterations\n\n @property\n def password_salt_size(self):\n \"\"\"Gets the password_salt_size of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501\n\n\n :return: The password_salt_size of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501\n :rtype: ConfigNodePropertyInteger\n \"\"\"\n return self._password_salt_size\n\n @password_salt_size.setter\n def password_salt_size(self, password_salt_size):\n \"\"\"Sets the password_salt_size of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties.\n\n\n :param password_salt_size: The password_salt_size of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501\n :type: ConfigNodePropertyInteger\n \"\"\"\n self._password_salt_size = password_salt_size\n\n def to_dict(self):\n \"\"\"Returns the model properties as a dict\"\"\"\n result = {}\n for attr, _ in six.iteritems(self.openapi_types):\n value = getattr(self, attr)\n if isinstance(value, list):\n result[attr] = list(map(lambda x: x.to_dict() if hasattr(x,\n 'to_dict') else x, value))\n elif hasattr(value, 'to_dict'):\n result[attr] = value.to_dict()\n elif isinstance(value, dict):\n result[attr] = dict(map(lambda item: (item[0], item[1].\n to_dict()) if hasattr(item[1], 'to_dict') else item,\n value.items()))\n else:\n result[attr] = value\n return result\n\n def to_str(self):\n \"\"\"Returns the string representation of the model\"\"\"\n return pprint.pformat(self.to_dict())\n\n def __repr__(self):\n \"\"\"For `print` and `pprint`\"\"\"\n return self.to_str()\n\n def __eq__(self, other):\n \"\"\"Returns true if both objects are equal\"\"\"\n if not isinstance(other,\n OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties\n ):\n return False\n return self.__dict__ == other.__dict__\n\n def __ne__(self, other):\n \"\"\"Returns true if both objects are not equal\"\"\"\n return not self == other\n",
"step-5": "# coding: utf-8\n\n\"\"\"\n Adobe Experience Manager OSGI config (AEM) API\n\n Swagger AEM OSGI is an OpenAPI specification for Adobe Experience Manager (AEM) OSGI Configurations API # noqa: E501\n\n OpenAPI spec version: 1.0.0-pre.0\n Contact: opensource@shinesolutions.com\n Generated by: https://openapi-generator.tech\n\"\"\"\n\n\nimport pprint\nimport re # noqa: F401\n\nimport six\n\n\nclass OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties(object):\n \"\"\"NOTE: This class is auto generated by OpenAPI Generator.\n Ref: https://openapi-generator.tech\n\n Do not edit the class manually.\n \"\"\"\n\n \"\"\"\n Attributes:\n openapi_types (dict): The key is attribute name\n and the value is attribute type.\n attribute_map (dict): The key is attribute name\n and the value is json key in definition.\n \"\"\"\n openapi_types = {\n 'token_expiration': 'ConfigNodePropertyString',\n 'token_length': 'ConfigNodePropertyString',\n 'token_refresh': 'ConfigNodePropertyBoolean',\n 'token_cleanup_threshold': 'ConfigNodePropertyInteger',\n 'password_hash_algorithm': 'ConfigNodePropertyString',\n 'password_hash_iterations': 'ConfigNodePropertyInteger',\n 'password_salt_size': 'ConfigNodePropertyInteger'\n }\n\n attribute_map = {\n 'token_expiration': 'tokenExpiration',\n 'token_length': 'tokenLength',\n 'token_refresh': 'tokenRefresh',\n 'token_cleanup_threshold': 'tokenCleanupThreshold',\n 'password_hash_algorithm': 'passwordHashAlgorithm',\n 'password_hash_iterations': 'passwordHashIterations',\n 'password_salt_size': 'passwordSaltSize'\n }\n\n def __init__(self, token_expiration=None, token_length=None, token_refresh=None, token_cleanup_threshold=None, password_hash_algorithm=None, password_hash_iterations=None, password_salt_size=None): # noqa: E501\n \"\"\"OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties - a model defined in OpenAPI\"\"\" # noqa: E501\n\n self._token_expiration = None\n self._token_length = None\n self._token_refresh = None\n self._token_cleanup_threshold = None\n self._password_hash_algorithm = None\n self._password_hash_iterations = None\n self._password_salt_size = None\n self.discriminator = None\n\n if token_expiration is not None:\n self.token_expiration = token_expiration\n if token_length is not None:\n self.token_length = token_length\n if token_refresh is not None:\n self.token_refresh = token_refresh\n if token_cleanup_threshold is not None:\n self.token_cleanup_threshold = token_cleanup_threshold\n if password_hash_algorithm is not None:\n self.password_hash_algorithm = password_hash_algorithm\n if password_hash_iterations is not None:\n self.password_hash_iterations = password_hash_iterations\n if password_salt_size is not None:\n self.password_salt_size = password_salt_size\n\n @property\n def token_expiration(self):\n \"\"\"Gets the token_expiration of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501\n\n\n :return: The token_expiration of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501\n :rtype: ConfigNodePropertyString\n \"\"\"\n return self._token_expiration\n\n @token_expiration.setter\n def token_expiration(self, token_expiration):\n \"\"\"Sets the token_expiration of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties.\n\n\n :param token_expiration: The token_expiration of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501\n :type: ConfigNodePropertyString\n \"\"\"\n\n self._token_expiration = token_expiration\n\n @property\n def token_length(self):\n \"\"\"Gets the token_length of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501\n\n\n :return: The token_length of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501\n :rtype: ConfigNodePropertyString\n \"\"\"\n return self._token_length\n\n @token_length.setter\n def token_length(self, token_length):\n \"\"\"Sets the token_length of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties.\n\n\n :param token_length: The token_length of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501\n :type: ConfigNodePropertyString\n \"\"\"\n\n self._token_length = token_length\n\n @property\n def token_refresh(self):\n \"\"\"Gets the token_refresh of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501\n\n\n :return: The token_refresh of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501\n :rtype: ConfigNodePropertyBoolean\n \"\"\"\n return self._token_refresh\n\n @token_refresh.setter\n def token_refresh(self, token_refresh):\n \"\"\"Sets the token_refresh of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties.\n\n\n :param token_refresh: The token_refresh of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501\n :type: ConfigNodePropertyBoolean\n \"\"\"\n\n self._token_refresh = token_refresh\n\n @property\n def token_cleanup_threshold(self):\n \"\"\"Gets the token_cleanup_threshold of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501\n\n\n :return: The token_cleanup_threshold of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501\n :rtype: ConfigNodePropertyInteger\n \"\"\"\n return self._token_cleanup_threshold\n\n @token_cleanup_threshold.setter\n def token_cleanup_threshold(self, token_cleanup_threshold):\n \"\"\"Sets the token_cleanup_threshold of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties.\n\n\n :param token_cleanup_threshold: The token_cleanup_threshold of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501\n :type: ConfigNodePropertyInteger\n \"\"\"\n\n self._token_cleanup_threshold = token_cleanup_threshold\n\n @property\n def password_hash_algorithm(self):\n \"\"\"Gets the password_hash_algorithm of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501\n\n\n :return: The password_hash_algorithm of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501\n :rtype: ConfigNodePropertyString\n \"\"\"\n return self._password_hash_algorithm\n\n @password_hash_algorithm.setter\n def password_hash_algorithm(self, password_hash_algorithm):\n \"\"\"Sets the password_hash_algorithm of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties.\n\n\n :param password_hash_algorithm: The password_hash_algorithm of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501\n :type: ConfigNodePropertyString\n \"\"\"\n\n self._password_hash_algorithm = password_hash_algorithm\n\n @property\n def password_hash_iterations(self):\n \"\"\"Gets the password_hash_iterations of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501\n\n\n :return: The password_hash_iterations of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501\n :rtype: ConfigNodePropertyInteger\n \"\"\"\n return self._password_hash_iterations\n\n @password_hash_iterations.setter\n def password_hash_iterations(self, password_hash_iterations):\n \"\"\"Sets the password_hash_iterations of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties.\n\n\n :param password_hash_iterations: The password_hash_iterations of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501\n :type: ConfigNodePropertyInteger\n \"\"\"\n\n self._password_hash_iterations = password_hash_iterations\n\n @property\n def password_salt_size(self):\n \"\"\"Gets the password_salt_size of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501\n\n\n :return: The password_salt_size of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501\n :rtype: ConfigNodePropertyInteger\n \"\"\"\n return self._password_salt_size\n\n @password_salt_size.setter\n def password_salt_size(self, password_salt_size):\n \"\"\"Sets the password_salt_size of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties.\n\n\n :param password_salt_size: The password_salt_size of this OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties. # noqa: E501\n :type: ConfigNodePropertyInteger\n \"\"\"\n\n self._password_salt_size = password_salt_size\n\n def to_dict(self):\n \"\"\"Returns the model properties as a dict\"\"\"\n result = {}\n\n for attr, _ in six.iteritems(self.openapi_types):\n value = getattr(self, attr)\n if isinstance(value, list):\n result[attr] = list(map(\n lambda x: x.to_dict() if hasattr(x, \"to_dict\") else x,\n value\n ))\n elif hasattr(value, \"to_dict\"):\n result[attr] = value.to_dict()\n elif isinstance(value, dict):\n result[attr] = dict(map(\n lambda item: (item[0], item[1].to_dict())\n if hasattr(item[1], \"to_dict\") else item,\n value.items()\n ))\n else:\n result[attr] = value\n\n return result\n\n def to_str(self):\n \"\"\"Returns the string representation of the model\"\"\"\n return pprint.pformat(self.to_dict())\n\n def __repr__(self):\n \"\"\"For `print` and `pprint`\"\"\"\n return self.to_str()\n\n def __eq__(self, other):\n \"\"\"Returns true if both objects are equal\"\"\"\n if not isinstance(other, OrgApacheJackrabbitOakSecurityAuthenticationTokenTokenConfiguraProperties):\n return False\n\n return self.__dict__ == other.__dict__\n\n def __ne__(self, other):\n \"\"\"Returns true if both objects are not equal\"\"\"\n return not self == other\n",
"step-ids": [
12,
18,
19,
22,
25
]
}
|
[
12,
18,
19,
22,
25
] |
from solution import find_days
import pudb
def test():
T = [1, 2, 3, 1, 0, 4]
# pudb.set_trace()
res = find_days(T)
assert res == [1, 1, 3, 2, 1, 0]
|
normal
|
{
"blob_id": "db36c82717aa0bacffce7a3e2724ed2bb586c7fb",
"index": 7862,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef test():\n T = [1, 2, 3, 1, 0, 4]\n res = find_days(T)\n assert res == [1, 1, 3, 2, 1, 0]\n",
"step-3": "from solution import find_days\nimport pudb\n\n\ndef test():\n T = [1, 2, 3, 1, 0, 4]\n res = find_days(T)\n assert res == [1, 1, 3, 2, 1, 0]\n",
"step-4": "from solution import find_days\nimport pudb\n\n\ndef test():\n T = [1, 2, 3, 1, 0, 4]\n # pudb.set_trace()\n res = find_days(T)\n assert res == [1, 1, 3, 2, 1, 0]\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
#!/usr/bin/env python
# coding: utf-8
# In[ ]:
import numpy as np
import pickle
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_absolute_error
from pyspark.sql.functions import split, concat,col
from sklearn.svm import SVR
test = True
# In[ ]:
dbutils.widgets.removeAll()
dbutils.widgets.text("input_path", "Not found", "input_path")
input_path = dbutils.widgets.get("input_path")
dbutils.widgets.text("model_path", "Not found", "model_path")
model_path = dbutils.widgets.get("model_path")
if test:
print(dbutils.widgets.get("input_path"))
print(dbutils.widgets.get("model_path"))
if input_path == 'Not found':
input_path = '/mnt/<mount-name>/<path>/temperature/data/*.csv'
if model_path == 'Not found':
model_path = '/dbfs/mnt/<mount-name>/<path>/temperature/model/temperature-model.pkl'
# In[ ]:
input_df = spark.read.option("inferSchema","true").option("header", "true").csv(input_path)
if test:
display(input_df)
# In[ ]:
input_df = input_df.withColumn('Year_Month', concat(col('Year'), col('Month')))
cols = ['Year_Month','Day','Mean_Temperature']
input_df = input_df[cols]
if test:
display(input_df)
# In[ ]:
input_pivot_df = input_df.groupBy("Year_Month").pivot("Day").sum("Mean_Temperature")
# In[ ]:
div_data = np.asarray(input_pivot_df.select([c for c in input_pivot_df.columns if c not in {'Year_Month'}]).collect())
X = None; y = None
for i in range(div_data.shape[1]-6):
if X is None:
X = div_data[:, i:i+3]
y = div_data[:, i+3]
else:
if None not in div_data[:, i:i+3] or None not in div_data[:, i+3]:
X = np.concatenate((X, div_data[:, i:i+3]), axis=0)
y = np.concatenate((y, div_data[:, i+3]), axis=0)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
# In[ ]:
if test:
print(X_train)
# In[ ]:
clf = SVR(gamma='auto', C=0.1, epsilon=0.2)
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
mean_absolute_error(y_test, y_pred)
# In[ ]:
pickle.dump(clf, open(model_path, 'wb'))
|
normal
|
{
"blob_id": "e48addecdde632607a9c782ff78a769122daab6f",
"index": 1738,
"step-1": "<mask token>\n",
"step-2": "<mask token>\ndbutils.widgets.removeAll()\ndbutils.widgets.text('input_path', 'Not found', 'input_path')\n<mask token>\ndbutils.widgets.text('model_path', 'Not found', 'model_path')\n<mask token>\nif test:\n print(dbutils.widgets.get('input_path'))\n print(dbutils.widgets.get('model_path'))\n if input_path == 'Not found':\n input_path = '/mnt/<mount-name>/<path>/temperature/data/*.csv'\n if model_path == 'Not found':\n model_path = (\n '/dbfs/mnt/<mount-name>/<path>/temperature/model/temperature-model.pkl'\n )\n<mask token>\nif test:\n display(input_df)\n<mask token>\nif test:\n display(input_df)\n<mask token>\nfor i in range(div_data.shape[1] - 6):\n if X is None:\n X = div_data[:, i:i + 3]\n y = div_data[:, i + 3]\n elif None not in div_data[:, i:i + 3] or None not in div_data[:, i + 3]:\n X = np.concatenate((X, div_data[:, i:i + 3]), axis=0)\n y = np.concatenate((y, div_data[:, i + 3]), axis=0)\n<mask token>\nif test:\n print(X_train)\n<mask token>\nclf.fit(X_train, y_train)\n<mask token>\nmean_absolute_error(y_test, y_pred)\npickle.dump(clf, open(model_path, 'wb'))\n",
"step-3": "<mask token>\ntest = True\ndbutils.widgets.removeAll()\ndbutils.widgets.text('input_path', 'Not found', 'input_path')\ninput_path = dbutils.widgets.get('input_path')\ndbutils.widgets.text('model_path', 'Not found', 'model_path')\nmodel_path = dbutils.widgets.get('model_path')\nif test:\n print(dbutils.widgets.get('input_path'))\n print(dbutils.widgets.get('model_path'))\n if input_path == 'Not found':\n input_path = '/mnt/<mount-name>/<path>/temperature/data/*.csv'\n if model_path == 'Not found':\n model_path = (\n '/dbfs/mnt/<mount-name>/<path>/temperature/model/temperature-model.pkl'\n )\ninput_df = spark.read.option('inferSchema', 'true').option('header', 'true'\n ).csv(input_path)\nif test:\n display(input_df)\ninput_df = input_df.withColumn('Year_Month', concat(col('Year'), col('Month')))\ncols = ['Year_Month', 'Day', 'Mean_Temperature']\ninput_df = input_df[cols]\nif test:\n display(input_df)\ninput_pivot_df = input_df.groupBy('Year_Month').pivot('Day').sum(\n 'Mean_Temperature')\ndiv_data = np.asarray(input_pivot_df.select([c for c in input_pivot_df.\n columns if c not in {'Year_Month'}]).collect())\nX = None\ny = None\nfor i in range(div_data.shape[1] - 6):\n if X is None:\n X = div_data[:, i:i + 3]\n y = div_data[:, i + 3]\n elif None not in div_data[:, i:i + 3] or None not in div_data[:, i + 3]:\n X = np.concatenate((X, div_data[:, i:i + 3]), axis=0)\n y = np.concatenate((y, div_data[:, i + 3]), axis=0)\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2,\n random_state=42)\nif test:\n print(X_train)\nclf = SVR(gamma='auto', C=0.1, epsilon=0.2)\nclf.fit(X_train, y_train)\ny_pred = clf.predict(X_test)\nmean_absolute_error(y_test, y_pred)\npickle.dump(clf, open(model_path, 'wb'))\n",
"step-4": "import numpy as np\nimport pickle\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import mean_absolute_error\nfrom pyspark.sql.functions import split, concat, col\nfrom sklearn.svm import SVR\ntest = True\ndbutils.widgets.removeAll()\ndbutils.widgets.text('input_path', 'Not found', 'input_path')\ninput_path = dbutils.widgets.get('input_path')\ndbutils.widgets.text('model_path', 'Not found', 'model_path')\nmodel_path = dbutils.widgets.get('model_path')\nif test:\n print(dbutils.widgets.get('input_path'))\n print(dbutils.widgets.get('model_path'))\n if input_path == 'Not found':\n input_path = '/mnt/<mount-name>/<path>/temperature/data/*.csv'\n if model_path == 'Not found':\n model_path = (\n '/dbfs/mnt/<mount-name>/<path>/temperature/model/temperature-model.pkl'\n )\ninput_df = spark.read.option('inferSchema', 'true').option('header', 'true'\n ).csv(input_path)\nif test:\n display(input_df)\ninput_df = input_df.withColumn('Year_Month', concat(col('Year'), col('Month')))\ncols = ['Year_Month', 'Day', 'Mean_Temperature']\ninput_df = input_df[cols]\nif test:\n display(input_df)\ninput_pivot_df = input_df.groupBy('Year_Month').pivot('Day').sum(\n 'Mean_Temperature')\ndiv_data = np.asarray(input_pivot_df.select([c for c in input_pivot_df.\n columns if c not in {'Year_Month'}]).collect())\nX = None\ny = None\nfor i in range(div_data.shape[1] - 6):\n if X is None:\n X = div_data[:, i:i + 3]\n y = div_data[:, i + 3]\n elif None not in div_data[:, i:i + 3] or None not in div_data[:, i + 3]:\n X = np.concatenate((X, div_data[:, i:i + 3]), axis=0)\n y = np.concatenate((y, div_data[:, i + 3]), axis=0)\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2,\n random_state=42)\nif test:\n print(X_train)\nclf = SVR(gamma='auto', C=0.1, epsilon=0.2)\nclf.fit(X_train, y_train)\ny_pred = clf.predict(X_test)\nmean_absolute_error(y_test, y_pred)\npickle.dump(clf, open(model_path, 'wb'))\n",
"step-5": "#!/usr/bin/env python\n# coding: utf-8\n\n# In[ ]:\n\n\nimport numpy as np\nimport pickle\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import mean_absolute_error\nfrom pyspark.sql.functions import split, concat,col\nfrom sklearn.svm import SVR\n\ntest = True\n\n\n# In[ ]:\n\n\ndbutils.widgets.removeAll()\n\ndbutils.widgets.text(\"input_path\", \"Not found\", \"input_path\")\ninput_path = dbutils.widgets.get(\"input_path\")\n\ndbutils.widgets.text(\"model_path\", \"Not found\", \"model_path\")\nmodel_path = dbutils.widgets.get(\"model_path\")\n\nif test:\n print(dbutils.widgets.get(\"input_path\"))\n print(dbutils.widgets.get(\"model_path\"))\n \n if input_path == 'Not found':\n input_path = '/mnt/<mount-name>/<path>/temperature/data/*.csv'\n if model_path == 'Not found':\n model_path = '/dbfs/mnt/<mount-name>/<path>/temperature/model/temperature-model.pkl'\n\n\n# In[ ]:\n\n\ninput_df = spark.read.option(\"inferSchema\",\"true\").option(\"header\", \"true\").csv(input_path)\n\nif test:\n display(input_df)\n\n\n# In[ ]:\n\n\ninput_df = input_df.withColumn('Year_Month', concat(col('Year'), col('Month')))\ncols = ['Year_Month','Day','Mean_Temperature']\ninput_df = input_df[cols]\n\nif test:\n display(input_df)\n\n\n# In[ ]:\n\n\ninput_pivot_df = input_df.groupBy(\"Year_Month\").pivot(\"Day\").sum(\"Mean_Temperature\")\n\n\n# In[ ]:\n\n\ndiv_data = np.asarray(input_pivot_df.select([c for c in input_pivot_df.columns if c not in {'Year_Month'}]).collect())\n\nX = None; y = None\nfor i in range(div_data.shape[1]-6):\n if X is None:\n X = div_data[:, i:i+3]\n y = div_data[:, i+3]\n else:\n if None not in div_data[:, i:i+3] or None not in div_data[:, i+3]:\n X = np.concatenate((X, div_data[:, i:i+3]), axis=0)\n y = np.concatenate((y, div_data[:, i+3]), axis=0)\n \nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)\n\n\n# In[ ]:\n\n\nif test:\n print(X_train)\n\n\n# In[ ]:\n\n\nclf = SVR(gamma='auto', C=0.1, epsilon=0.2)\nclf.fit(X_train, y_train) \ny_pred = clf.predict(X_test)\nmean_absolute_error(y_test, y_pred)\n\n\n# In[ ]:\n\n\npickle.dump(clf, open(model_path, 'wb'))\n\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import hive
from ..bind import Instantiator as _Instantiator
from ..event import bind_info as event_bind_info
bind_infos = (event_bind_info,)
def build_scene_instantiator(i, ex, args, meta_args):
bind_bases = tuple((b_i.environment_hive for b_i in bind_infos if b_i.is_enabled(meta_args)))
# Update bind environment to use new bases
environment_class = i.bind_meta_class.start_value
i.bind_meta_class.start_value = environment_class.extend("SceneBindEnvironment", bases=tuple(bind_bases))
Instantiator = _Instantiator.extend("Instantiator", build_scene_instantiator,
bases=tuple(b_i.bind_hive for b_i in bind_infos))
class SceneClass:
def __init__(self):
self._entities = {}
self.scene = None
def get_entity_id(self, identifier):
return self._entities[identifier]
def get_position_absolute(self, entity):
return tuple(entity.worldPosition)
def get_orientation_absolute(self, entity):
return tuple(entity.worldOrientation.to_quaternion())
def get_position_relative(self, entity, other):
return tuple(entity.worldPosition - other.worldPosition)
def get_orientation_relative(self, entity, other):
return tuple(entity.worldOrientation.to_quaternion().rotation_difference(other.worldPosition.to_quaternion()))
def spawn_entity(self, class_name, identifier):
entity = self.scene.addObject(class_name, 'Empty')
# entity.worldTransform = entity.worldTransform.inverted() * entity.worldTransform
self._entities[identifier] = entity
return entity
def get_scene(self):
return self.scene
def build_scene(cls, i, ex, args):
i.bge_scene = hive.property(cls, "scene")
ex.get_entity_id = hive.plugin(cls.get_entity_id, identifier="entity.get")
ex.get_position_absolute = hive.plugin(cls.get_position_absolute, identifier="entity.position.absolute.get")
ex.get_position_relative = hive.plugin(cls.get_position_relative, identifier="entity.position.relative.get")
ex.get_orientation_absolute = hive.plugin(cls.get_orientation_absolute, identifier="entity.orientation.absolute.get")
ex.get_orientation_relative = hive.plugin(cls.get_orientation_relative, identifier="entity.orientation.relative.get")
ex.spawn_entity = hive.plugin(cls.spawn_entity, identifier="entity.spawn")
ex.get_scene = hive.plugin(cls.get_scene, identifier="entity.get_current")
import dragonfly
ex.on_tick = dragonfly.event.Tick()
def f(self):
print("I")
if not hasattr(self, 'a'):
self.a = 1
self.spawn_entity.plugin()("Cube", "c1")
i.mod_tick = hive.modifier(f)
hive.trigger(ex.on_tick, i.mod_tick)
Scene = hive.hive("Scene", build_scene, builder_cls=SceneClass)
|
normal
|
{
"blob_id": "23d4619527b5fce7fed0b0a66d834e26bb984129",
"index": 6443,
"step-1": "<mask token>\n\n\nclass SceneClass:\n\n def __init__(self):\n self._entities = {}\n self.scene = None\n\n def get_entity_id(self, identifier):\n return self._entities[identifier]\n\n def get_position_absolute(self, entity):\n return tuple(entity.worldPosition)\n\n def get_orientation_absolute(self, entity):\n return tuple(entity.worldOrientation.to_quaternion())\n\n def get_position_relative(self, entity, other):\n return tuple(entity.worldPosition - other.worldPosition)\n\n def get_orientation_relative(self, entity, other):\n return tuple(entity.worldOrientation.to_quaternion().\n rotation_difference(other.worldPosition.to_quaternion()))\n\n def spawn_entity(self, class_name, identifier):\n entity = self.scene.addObject(class_name, 'Empty')\n self._entities[identifier] = entity\n return entity\n\n def get_scene(self):\n return self.scene\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef build_scene_instantiator(i, ex, args, meta_args):\n bind_bases = tuple(b_i.environment_hive for b_i in bind_infos if b_i.\n is_enabled(meta_args))\n environment_class = i.bind_meta_class.start_value\n i.bind_meta_class.start_value = environment_class.extend(\n 'SceneBindEnvironment', bases=tuple(bind_bases))\n\n\n<mask token>\n\n\nclass SceneClass:\n\n def __init__(self):\n self._entities = {}\n self.scene = None\n\n def get_entity_id(self, identifier):\n return self._entities[identifier]\n\n def get_position_absolute(self, entity):\n return tuple(entity.worldPosition)\n\n def get_orientation_absolute(self, entity):\n return tuple(entity.worldOrientation.to_quaternion())\n\n def get_position_relative(self, entity, other):\n return tuple(entity.worldPosition - other.worldPosition)\n\n def get_orientation_relative(self, entity, other):\n return tuple(entity.worldOrientation.to_quaternion().\n rotation_difference(other.worldPosition.to_quaternion()))\n\n def spawn_entity(self, class_name, identifier):\n entity = self.scene.addObject(class_name, 'Empty')\n self._entities[identifier] = entity\n return entity\n\n def get_scene(self):\n return self.scene\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef build_scene_instantiator(i, ex, args, meta_args):\n bind_bases = tuple(b_i.environment_hive for b_i in bind_infos if b_i.\n is_enabled(meta_args))\n environment_class = i.bind_meta_class.start_value\n i.bind_meta_class.start_value = environment_class.extend(\n 'SceneBindEnvironment', bases=tuple(bind_bases))\n\n\n<mask token>\n\n\nclass SceneClass:\n\n def __init__(self):\n self._entities = {}\n self.scene = None\n\n def get_entity_id(self, identifier):\n return self._entities[identifier]\n\n def get_position_absolute(self, entity):\n return tuple(entity.worldPosition)\n\n def get_orientation_absolute(self, entity):\n return tuple(entity.worldOrientation.to_quaternion())\n\n def get_position_relative(self, entity, other):\n return tuple(entity.worldPosition - other.worldPosition)\n\n def get_orientation_relative(self, entity, other):\n return tuple(entity.worldOrientation.to_quaternion().\n rotation_difference(other.worldPosition.to_quaternion()))\n\n def spawn_entity(self, class_name, identifier):\n entity = self.scene.addObject(class_name, 'Empty')\n self._entities[identifier] = entity\n return entity\n\n def get_scene(self):\n return self.scene\n\n\ndef build_scene(cls, i, ex, args):\n i.bge_scene = hive.property(cls, 'scene')\n ex.get_entity_id = hive.plugin(cls.get_entity_id, identifier='entity.get')\n ex.get_position_absolute = hive.plugin(cls.get_position_absolute,\n identifier='entity.position.absolute.get')\n ex.get_position_relative = hive.plugin(cls.get_position_relative,\n identifier='entity.position.relative.get')\n ex.get_orientation_absolute = hive.plugin(cls.get_orientation_absolute,\n identifier='entity.orientation.absolute.get')\n ex.get_orientation_relative = hive.plugin(cls.get_orientation_relative,\n identifier='entity.orientation.relative.get')\n ex.spawn_entity = hive.plugin(cls.spawn_entity, identifier='entity.spawn')\n ex.get_scene = hive.plugin(cls.get_scene, identifier='entity.get_current')\n import dragonfly\n ex.on_tick = dragonfly.event.Tick()\n\n def f(self):\n print('I')\n if not hasattr(self, 'a'):\n self.a = 1\n self.spawn_entity.plugin()('Cube', 'c1')\n i.mod_tick = hive.modifier(f)\n hive.trigger(ex.on_tick, i.mod_tick)\n\n\n<mask token>\n",
"step-4": "<mask token>\nbind_infos = event_bind_info,\n\n\ndef build_scene_instantiator(i, ex, args, meta_args):\n bind_bases = tuple(b_i.environment_hive for b_i in bind_infos if b_i.\n is_enabled(meta_args))\n environment_class = i.bind_meta_class.start_value\n i.bind_meta_class.start_value = environment_class.extend(\n 'SceneBindEnvironment', bases=tuple(bind_bases))\n\n\nInstantiator = _Instantiator.extend('Instantiator',\n build_scene_instantiator, bases=tuple(b_i.bind_hive for b_i in bind_infos))\n\n\nclass SceneClass:\n\n def __init__(self):\n self._entities = {}\n self.scene = None\n\n def get_entity_id(self, identifier):\n return self._entities[identifier]\n\n def get_position_absolute(self, entity):\n return tuple(entity.worldPosition)\n\n def get_orientation_absolute(self, entity):\n return tuple(entity.worldOrientation.to_quaternion())\n\n def get_position_relative(self, entity, other):\n return tuple(entity.worldPosition - other.worldPosition)\n\n def get_orientation_relative(self, entity, other):\n return tuple(entity.worldOrientation.to_quaternion().\n rotation_difference(other.worldPosition.to_quaternion()))\n\n def spawn_entity(self, class_name, identifier):\n entity = self.scene.addObject(class_name, 'Empty')\n self._entities[identifier] = entity\n return entity\n\n def get_scene(self):\n return self.scene\n\n\ndef build_scene(cls, i, ex, args):\n i.bge_scene = hive.property(cls, 'scene')\n ex.get_entity_id = hive.plugin(cls.get_entity_id, identifier='entity.get')\n ex.get_position_absolute = hive.plugin(cls.get_position_absolute,\n identifier='entity.position.absolute.get')\n ex.get_position_relative = hive.plugin(cls.get_position_relative,\n identifier='entity.position.relative.get')\n ex.get_orientation_absolute = hive.plugin(cls.get_orientation_absolute,\n identifier='entity.orientation.absolute.get')\n ex.get_orientation_relative = hive.plugin(cls.get_orientation_relative,\n identifier='entity.orientation.relative.get')\n ex.spawn_entity = hive.plugin(cls.spawn_entity, identifier='entity.spawn')\n ex.get_scene = hive.plugin(cls.get_scene, identifier='entity.get_current')\n import dragonfly\n ex.on_tick = dragonfly.event.Tick()\n\n def f(self):\n print('I')\n if not hasattr(self, 'a'):\n self.a = 1\n self.spawn_entity.plugin()('Cube', 'c1')\n i.mod_tick = hive.modifier(f)\n hive.trigger(ex.on_tick, i.mod_tick)\n\n\nScene = hive.hive('Scene', build_scene, builder_cls=SceneClass)\n",
"step-5": "import hive\n\nfrom ..bind import Instantiator as _Instantiator\nfrom ..event import bind_info as event_bind_info\n\nbind_infos = (event_bind_info,)\n\n\ndef build_scene_instantiator(i, ex, args, meta_args):\n bind_bases = tuple((b_i.environment_hive for b_i in bind_infos if b_i.is_enabled(meta_args)))\n\n # Update bind environment to use new bases\n environment_class = i.bind_meta_class.start_value\n i.bind_meta_class.start_value = environment_class.extend(\"SceneBindEnvironment\", bases=tuple(bind_bases))\n\n\nInstantiator = _Instantiator.extend(\"Instantiator\", build_scene_instantiator,\n bases=tuple(b_i.bind_hive for b_i in bind_infos))\n\n\nclass SceneClass:\n\n def __init__(self):\n self._entities = {}\n self.scene = None\n\n def get_entity_id(self, identifier):\n return self._entities[identifier]\n\n def get_position_absolute(self, entity):\n return tuple(entity.worldPosition)\n\n def get_orientation_absolute(self, entity):\n return tuple(entity.worldOrientation.to_quaternion())\n\n def get_position_relative(self, entity, other):\n return tuple(entity.worldPosition - other.worldPosition)\n\n def get_orientation_relative(self, entity, other):\n return tuple(entity.worldOrientation.to_quaternion().rotation_difference(other.worldPosition.to_quaternion()))\n\n def spawn_entity(self, class_name, identifier):\n entity = self.scene.addObject(class_name, 'Empty')\n # entity.worldTransform = entity.worldTransform.inverted() * entity.worldTransform\n\n self._entities[identifier] = entity\n return entity\n\n def get_scene(self):\n return self.scene\n\n\ndef build_scene(cls, i, ex, args):\n i.bge_scene = hive.property(cls, \"scene\")\n\n ex.get_entity_id = hive.plugin(cls.get_entity_id, identifier=\"entity.get\")\n ex.get_position_absolute = hive.plugin(cls.get_position_absolute, identifier=\"entity.position.absolute.get\")\n ex.get_position_relative = hive.plugin(cls.get_position_relative, identifier=\"entity.position.relative.get\")\n ex.get_orientation_absolute = hive.plugin(cls.get_orientation_absolute, identifier=\"entity.orientation.absolute.get\")\n ex.get_orientation_relative = hive.plugin(cls.get_orientation_relative, identifier=\"entity.orientation.relative.get\")\n ex.spawn_entity = hive.plugin(cls.spawn_entity, identifier=\"entity.spawn\")\n ex.get_scene = hive.plugin(cls.get_scene, identifier=\"entity.get_current\")\n\n import dragonfly\n ex.on_tick = dragonfly.event.Tick()\n\n def f(self):\n print(\"I\")\n if not hasattr(self, 'a'):\n self.a = 1\n\n self.spawn_entity.plugin()(\"Cube\", \"c1\")\n\n i.mod_tick = hive.modifier(f)\n hive.trigger(ex.on_tick, i.mod_tick)\n\n\nScene = hive.hive(\"Scene\", build_scene, builder_cls=SceneClass)\n",
"step-ids": [
9,
10,
11,
12,
14
]
}
|
[
9,
10,
11,
12,
14
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
#!/usr/bin/env python3
"""Test telegram_menu package."""
|
flexible
|
{
"blob_id": "8d4ffed90e103e61a85a54d6163770966fb2e5c9",
"index": 5049,
"step-1": "<mask token>\n",
"step-2": "#!/usr/bin/env python3\n\n\"\"\"Test telegram_menu package.\"\"\"\n",
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0,
1
]
}
|
[
0,
1
] |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.6 on 2017-10-16 12:35
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('blog', '0033_auto_20171016_1334'),
]
operations = [
migrations.AlterField(
model_name='sponsor',
name='email_text_markdown',
field=models.CharField(default='', max_length=1000),
),
]
|
normal
|
{
"blob_id": "d0dfea27128ca6966c85da6529ead5c95c86c4cf",
"index": 1183,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('blog', '0033_auto_20171016_1334')]\n operations = [migrations.AlterField(model_name='sponsor', name=\n 'email_text_markdown', field=models.CharField(default='',\n max_length=1000))]\n",
"step-4": "from __future__ import unicode_literals\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n dependencies = [('blog', '0033_auto_20171016_1334')]\n operations = [migrations.AlterField(model_name='sponsor', name=\n 'email_text_markdown', field=models.CharField(default='',\n max_length=1000))]\n",
"step-5": "# -*- coding: utf-8 -*-\n# Generated by Django 1.11.6 on 2017-10-16 12:35\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('blog', '0033_auto_20171016_1334'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='sponsor',\n name='email_text_markdown',\n field=models.CharField(default='', max_length=1000),\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
print(s_stemmer.stem('writing'))
<|reserved_special_token_1|>
<|reserved_special_token_0|>
p_stemmer = PorterStemmer()
s_stemmer = SnowballStemmer(language='english')
print(s_stemmer.stem('writing'))
<|reserved_special_token_1|>
from nltk.stem.porter import PorterStemmer
from nltk.stem.snowball import SnowballStemmer
p_stemmer = PorterStemmer()
s_stemmer = SnowballStemmer(language='english')
print(s_stemmer.stem('writing'))
<|reserved_special_token_1|>
from nltk.stem.porter import PorterStemmer
from nltk.stem.snowball import SnowballStemmer
p_stemmer = PorterStemmer()
s_stemmer = SnowballStemmer(language="english")
print(s_stemmer.stem("writing"))
|
flexible
|
{
"blob_id": "67e6d39ef291e4bb30c0b6bab7b71d97c86b0ef1",
"index": 4108,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(s_stemmer.stem('writing'))\n",
"step-3": "<mask token>\np_stemmer = PorterStemmer()\ns_stemmer = SnowballStemmer(language='english')\nprint(s_stemmer.stem('writing'))\n",
"step-4": "from nltk.stem.porter import PorterStemmer\nfrom nltk.stem.snowball import SnowballStemmer\np_stemmer = PorterStemmer()\ns_stemmer = SnowballStemmer(language='english')\nprint(s_stemmer.stem('writing'))\n",
"step-5": "from nltk.stem.porter import PorterStemmer\r\nfrom nltk.stem.snowball import SnowballStemmer\r\n\r\np_stemmer = PorterStemmer()\r\ns_stemmer = SnowballStemmer(language=\"english\")\r\n\r\n\r\nprint(s_stemmer.stem(\"writing\"))\r\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
cursor.execute(
'SELECT tweet_date, COUNT(*) FROM projekt_election.tweet as tweet , projekt_election.hashtag_use as use WHERE tweet.tweet_id = use.tweet_id GROUP BY tweet_date ORDER BY tweet_date ASC'
)
<|reserved_special_token_0|>
del data2['tweet_date']
del data2['tweet_date_with_time']
del data2['weekday']
print(data2.groupby('week_number')['count'].apply(list))
<|reserved_special_token_0|>
ax1.bar(bar_l, data4['Montag'], width=bar_width, label='Montag', alpha=0.5,
color='#1858ef')
ax1.bar(bar_l, data4['Dienstag'], width=bar_width, bottom=data4['Montag'],
label='Dienstag', alpha=0.5, color='#6618ef')
ax1.bar(bar_l, data4['Mittwoch'], width=bar_width, bottom=[(i + j) for i, j in
zip(data4['Montag'], data4['Dienstag'])], label='Mittwoch', alpha=0.5,
color='#ef1829')
ax1.bar(bar_l, data4['Donnerstag'], width=bar_width, bottom=[(i + j + k) for
i, j, k in zip(data4['Montag'], data4['Dienstag'], data4['Mittwoch'])],
label='Donnerstag', alpha=0.5, color='#ef7c18')
ax1.bar(bar_l, data4['Freitag'], width=bar_width, bottom=[(i + j + k + l) for
i, j, k, l in zip(data4['Montag'], data4['Dienstag'], data4['Mittwoch'],
data4['Donnerstag'])], label='Freitag', alpha=0.5, color='#efc718')
ax1.bar(bar_l, data4['Samstag'], width=bar_width, bottom=[(i + j + k + l +
m) for i, j, k, l, m in zip(data4['Montag'], data4['Dienstag'], data4[
'Mittwoch'], data4['Donnerstag'], data4['Freitag'])], label='Samstag',
alpha=0.5, color='#63ef18')
ax1.bar(bar_l, data4['Sonntag'], width=bar_width, bottom=[(i + j + k + l +
m + n) for i, j, k, l, m, n in zip(data4['Montag'], data4['Dienstag'],
data4['Mittwoch'], data4['Donnerstag'], data4['Freitag'], data4[
'Samstag'])], label='Sonntag', alpha=0.5, color='#18efa3')
plt.xticks(tick_pos, data4['Kalendarwoche'])
ax1.set_ylabel('Häufigkeit')
ax1.set_xlabel('Kalendarwoche')
plt.legend(loc='upper left')
plt.xlim([min(tick_pos) - bar_width, max(tick_pos) + bar_width])
<|reserved_special_token_0|>
grouped.set_index('calendar week').plot.bar(rot=45, title=
'Nutzung von #makeamericagreatagain in Kalendarwoche', figsize=(15, 10),
fontsize=10)
<|reserved_special_token_0|>
data5.set_index('tweet_date').plot.bar(rot=90, title=
'Häufigkeit aller Hashtag in Tagen', figsize=(50, 25), color='#ef6618',
fontsize=14)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
database = psycopg2.connect(database='TeamYellow_election', user='student',
password='password', host='agdbs-edu01.imp.fu-berlin.de', port='5432')
cursor = database.cursor()
cursor.execute(
'SELECT tweet_date, COUNT(*) FROM projekt_election.tweet as tweet , projekt_election.hashtag_use as use WHERE tweet.tweet_id = use.tweet_id GROUP BY tweet_date ORDER BY tweet_date ASC'
)
result = cursor.fetchall()
data = DataFrame(result, columns=['tweet_date', 'count'])
data['tweet_date_with_time'] = data['tweet_date'].astype('datetime64[ns]')
data['week_number'] = data['tweet_date_with_time'].dt.week
data['weekday'] = data['tweet_date_with_time'].dt.dayofweek
data2 = data.copy()
del data2['tweet_date']
del data2['tweet_date_with_time']
del data2['weekday']
print(data2.groupby('week_number')['count'].apply(list))
data3 = pd.DataFrame({'KW01': [0, 0, 1, 0, 3, 0, 0], 'KW02': [3, 1, 7, 1, 0,
1, 0], 'KW03': [0, 2, 6, 1, 11, 3, 2], 'KW04': [13, 5, 1, 3, 6, 2, 1],
'KW05': [0, 1, 2, 0, 4, 3, 4], 'KW06': [2, 6, 1, 2, 1, 5, 0], 'KW07': [
1, 3, 5, 2, 5, 2, 1], 'KW08': [2, 7, 1, 3, 5, 1, 3], 'KW09': [3, 10, 9,
3, 3, 6, 2], 'KW10': [0, 1, 2, 0, 2, 4, 0], 'KW11': [2, 3, 8, 0, 3, 10,
5], 'KW12': [0, 11, 4, 1, 0, 0, 0], 'KW13': [1, 0, 3, 2, 1, 6, 5],
'KW14': [4, 5, 0, 0, 1, 1, 2], 'KW15': [2, 4, 1, 2, 0, 4, 2], 'KW16': [
0, 11, 4, 2, 3, 4, 1], 'KW17': [2, 6, 0, 1, 1, 0, 0], 'KW18': [4, 8, 0,
1, 1, 0, 0], 'KW19': [2, 8, 3, 0, 0, 0, 0], 'KW20': [1, 1, 1, 0, 5, 0,
1], 'KW21': [0, 0, 2, 1, 1, 0, 0], 'KW22': [0, 0, 1, 4, 2, 3, 0],
'KW23': [0, 0, 1, 0, 1, 2, 0], 'KW24': [0, 0, 3, 0, 1, 4, 1], 'KW25': [
0, 0, 1, 10, 0, 0, 0], 'KW26': [1, 1, 0, 0, 2, 3, 0], 'KW27': [1, 0, 0,
2, 0, 0, 0], 'KW28': [1, 2, 2, 1, 0, 1, 0], 'KW29': [0, 1, 2, 7, 2, 1,
0], 'KW30': [1, 3, 3, 4, 0, 1, 1], 'KW31': [3, 2, 2, 0, 1, 4, 1],
'KW32': [1, 6, 0, 0, 0, 1, 0], 'KW33': [0, 0, 4, 0, 1, 1, 0], 'KW34': [
1, 0, 1, 2, 1, 2, 1], 'KW35': [2, 0, 1, 3, 1, 0, 0], 'KW36': [1, 1, 2,
2, 2, 0, 0], 'KW37': [0, 1, 1, 2, 4, 0, 0], 'KW38': [0, 3, 0, 2, 1, 1,
0], 'KW39': [3, 18, 0, 0, 0, 0, 0]})
data4 = data3.transpose()
data4.columns = ['Montag', 'Dienstag', 'Mittwoch', 'Donnerstag', 'Freitag',
'Samstag', 'Sonntag']
data4['Kalendarwoche'] = data4.index
f, ax1 = plt.subplots(1, figsize=(25, 20))
bar_width = 0.75
bar_l = [(i + 1) for i in range(len(data4['Montag']))]
tick_pos = [(i + bar_width / 2) for i in bar_l]
ax1.bar(bar_l, data4['Montag'], width=bar_width, label='Montag', alpha=0.5,
color='#1858ef')
ax1.bar(bar_l, data4['Dienstag'], width=bar_width, bottom=data4['Montag'],
label='Dienstag', alpha=0.5, color='#6618ef')
ax1.bar(bar_l, data4['Mittwoch'], width=bar_width, bottom=[(i + j) for i, j in
zip(data4['Montag'], data4['Dienstag'])], label='Mittwoch', alpha=0.5,
color='#ef1829')
ax1.bar(bar_l, data4['Donnerstag'], width=bar_width, bottom=[(i + j + k) for
i, j, k in zip(data4['Montag'], data4['Dienstag'], data4['Mittwoch'])],
label='Donnerstag', alpha=0.5, color='#ef7c18')
ax1.bar(bar_l, data4['Freitag'], width=bar_width, bottom=[(i + j + k + l) for
i, j, k, l in zip(data4['Montag'], data4['Dienstag'], data4['Mittwoch'],
data4['Donnerstag'])], label='Freitag', alpha=0.5, color='#efc718')
ax1.bar(bar_l, data4['Samstag'], width=bar_width, bottom=[(i + j + k + l +
m) for i, j, k, l, m in zip(data4['Montag'], data4['Dienstag'], data4[
'Mittwoch'], data4['Donnerstag'], data4['Freitag'])], label='Samstag',
alpha=0.5, color='#63ef18')
ax1.bar(bar_l, data4['Sonntag'], width=bar_width, bottom=[(i + j + k + l +
m + n) for i, j, k, l, m, n in zip(data4['Montag'], data4['Dienstag'],
data4['Mittwoch'], data4['Donnerstag'], data4['Freitag'], data4[
'Samstag'])], label='Sonntag', alpha=0.5, color='#18efa3')
plt.xticks(tick_pos, data4['Kalendarwoche'])
ax1.set_ylabel('Häufigkeit')
ax1.set_xlabel('Kalendarwoche')
plt.legend(loc='upper left')
plt.xlim([min(tick_pos) - bar_width, max(tick_pos) + bar_width])
kw = lambda x: x.isocalendar()[1]
grouped = data.groupby([data['tweet_date_with_time'].map(kw)], sort=False).agg(
{'count': 'sum'})
grouped['calendar week'] = ('KW1', 'KW2', 'KW3', 'KW4', 'KW5', 'KW6', 'KW7',
'KW8', 'KW9', 'KW10', 'KW11', 'KW12', 'KW13', 'KW14', 'KW15', 'KW16',
'KW17', 'KW18', 'KW19', 'KW20', 'KW21', 'KW22', 'KW23', 'KW24', 'KW25',
'KW26', 'KW27', 'KW28', 'KW29', 'KW30', 'KW31', 'KW32', 'KW33', 'KW34',
'KW35', 'KW36', 'KW37', 'KW38', 'KW39')
grouped.set_index('calendar week').plot.bar(rot=45, title=
'Nutzung von #makeamericagreatagain in Kalendarwoche', figsize=(15, 10),
fontsize=10)
data5 = data[['tweet_date', 'count']].copy()
data5.set_index('tweet_date').plot.bar(rot=90, title=
'Häufigkeit aller Hashtag in Tagen', figsize=(50, 25), color='#ef6618',
fontsize=14)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
import pandas as pd
import matplotlib.pyplot as plt
import psycopg2
from pandas.core.frame import DataFrame
database = psycopg2.connect(database='TeamYellow_election', user='student',
password='password', host='agdbs-edu01.imp.fu-berlin.de', port='5432')
cursor = database.cursor()
cursor.execute(
'SELECT tweet_date, COUNT(*) FROM projekt_election.tweet as tweet , projekt_election.hashtag_use as use WHERE tweet.tweet_id = use.tweet_id GROUP BY tweet_date ORDER BY tweet_date ASC'
)
result = cursor.fetchall()
data = DataFrame(result, columns=['tweet_date', 'count'])
data['tweet_date_with_time'] = data['tweet_date'].astype('datetime64[ns]')
data['week_number'] = data['tweet_date_with_time'].dt.week
data['weekday'] = data['tweet_date_with_time'].dt.dayofweek
data2 = data.copy()
del data2['tweet_date']
del data2['tweet_date_with_time']
del data2['weekday']
print(data2.groupby('week_number')['count'].apply(list))
data3 = pd.DataFrame({'KW01': [0, 0, 1, 0, 3, 0, 0], 'KW02': [3, 1, 7, 1, 0,
1, 0], 'KW03': [0, 2, 6, 1, 11, 3, 2], 'KW04': [13, 5, 1, 3, 6, 2, 1],
'KW05': [0, 1, 2, 0, 4, 3, 4], 'KW06': [2, 6, 1, 2, 1, 5, 0], 'KW07': [
1, 3, 5, 2, 5, 2, 1], 'KW08': [2, 7, 1, 3, 5, 1, 3], 'KW09': [3, 10, 9,
3, 3, 6, 2], 'KW10': [0, 1, 2, 0, 2, 4, 0], 'KW11': [2, 3, 8, 0, 3, 10,
5], 'KW12': [0, 11, 4, 1, 0, 0, 0], 'KW13': [1, 0, 3, 2, 1, 6, 5],
'KW14': [4, 5, 0, 0, 1, 1, 2], 'KW15': [2, 4, 1, 2, 0, 4, 2], 'KW16': [
0, 11, 4, 2, 3, 4, 1], 'KW17': [2, 6, 0, 1, 1, 0, 0], 'KW18': [4, 8, 0,
1, 1, 0, 0], 'KW19': [2, 8, 3, 0, 0, 0, 0], 'KW20': [1, 1, 1, 0, 5, 0,
1], 'KW21': [0, 0, 2, 1, 1, 0, 0], 'KW22': [0, 0, 1, 4, 2, 3, 0],
'KW23': [0, 0, 1, 0, 1, 2, 0], 'KW24': [0, 0, 3, 0, 1, 4, 1], 'KW25': [
0, 0, 1, 10, 0, 0, 0], 'KW26': [1, 1, 0, 0, 2, 3, 0], 'KW27': [1, 0, 0,
2, 0, 0, 0], 'KW28': [1, 2, 2, 1, 0, 1, 0], 'KW29': [0, 1, 2, 7, 2, 1,
0], 'KW30': [1, 3, 3, 4, 0, 1, 1], 'KW31': [3, 2, 2, 0, 1, 4, 1],
'KW32': [1, 6, 0, 0, 0, 1, 0], 'KW33': [0, 0, 4, 0, 1, 1, 0], 'KW34': [
1, 0, 1, 2, 1, 2, 1], 'KW35': [2, 0, 1, 3, 1, 0, 0], 'KW36': [1, 1, 2,
2, 2, 0, 0], 'KW37': [0, 1, 1, 2, 4, 0, 0], 'KW38': [0, 3, 0, 2, 1, 1,
0], 'KW39': [3, 18, 0, 0, 0, 0, 0]})
data4 = data3.transpose()
data4.columns = ['Montag', 'Dienstag', 'Mittwoch', 'Donnerstag', 'Freitag',
'Samstag', 'Sonntag']
data4['Kalendarwoche'] = data4.index
f, ax1 = plt.subplots(1, figsize=(25, 20))
bar_width = 0.75
bar_l = [(i + 1) for i in range(len(data4['Montag']))]
tick_pos = [(i + bar_width / 2) for i in bar_l]
ax1.bar(bar_l, data4['Montag'], width=bar_width, label='Montag', alpha=0.5,
color='#1858ef')
ax1.bar(bar_l, data4['Dienstag'], width=bar_width, bottom=data4['Montag'],
label='Dienstag', alpha=0.5, color='#6618ef')
ax1.bar(bar_l, data4['Mittwoch'], width=bar_width, bottom=[(i + j) for i, j in
zip(data4['Montag'], data4['Dienstag'])], label='Mittwoch', alpha=0.5,
color='#ef1829')
ax1.bar(bar_l, data4['Donnerstag'], width=bar_width, bottom=[(i + j + k) for
i, j, k in zip(data4['Montag'], data4['Dienstag'], data4['Mittwoch'])],
label='Donnerstag', alpha=0.5, color='#ef7c18')
ax1.bar(bar_l, data4['Freitag'], width=bar_width, bottom=[(i + j + k + l) for
i, j, k, l in zip(data4['Montag'], data4['Dienstag'], data4['Mittwoch'],
data4['Donnerstag'])], label='Freitag', alpha=0.5, color='#efc718')
ax1.bar(bar_l, data4['Samstag'], width=bar_width, bottom=[(i + j + k + l +
m) for i, j, k, l, m in zip(data4['Montag'], data4['Dienstag'], data4[
'Mittwoch'], data4['Donnerstag'], data4['Freitag'])], label='Samstag',
alpha=0.5, color='#63ef18')
ax1.bar(bar_l, data4['Sonntag'], width=bar_width, bottom=[(i + j + k + l +
m + n) for i, j, k, l, m, n in zip(data4['Montag'], data4['Dienstag'],
data4['Mittwoch'], data4['Donnerstag'], data4['Freitag'], data4[
'Samstag'])], label='Sonntag', alpha=0.5, color='#18efa3')
plt.xticks(tick_pos, data4['Kalendarwoche'])
ax1.set_ylabel('Häufigkeit')
ax1.set_xlabel('Kalendarwoche')
plt.legend(loc='upper left')
plt.xlim([min(tick_pos) - bar_width, max(tick_pos) + bar_width])
kw = lambda x: x.isocalendar()[1]
grouped = data.groupby([data['tweet_date_with_time'].map(kw)], sort=False).agg(
{'count': 'sum'})
grouped['calendar week'] = ('KW1', 'KW2', 'KW3', 'KW4', 'KW5', 'KW6', 'KW7',
'KW8', 'KW9', 'KW10', 'KW11', 'KW12', 'KW13', 'KW14', 'KW15', 'KW16',
'KW17', 'KW18', 'KW19', 'KW20', 'KW21', 'KW22', 'KW23', 'KW24', 'KW25',
'KW26', 'KW27', 'KW28', 'KW29', 'KW30', 'KW31', 'KW32', 'KW33', 'KW34',
'KW35', 'KW36', 'KW37', 'KW38', 'KW39')
grouped.set_index('calendar week').plot.bar(rot=45, title=
'Nutzung von #makeamericagreatagain in Kalendarwoche', figsize=(15, 10),
fontsize=10)
data5 = data[['tweet_date', 'count']].copy()
data5.set_index('tweet_date').plot.bar(rot=90, title=
'Häufigkeit aller Hashtag in Tagen', figsize=(50, 25), color='#ef6618',
fontsize=14)
<|reserved_special_token_1|>
# -*- coding: utf-8 -*-
"""
Created on Sat Jul 1 10:18:11 2017
@author: Duong
"""
import pandas as pd
import matplotlib.pyplot as plt
import psycopg2
from pandas.core.frame import DataFrame
# DBS verbinden
database = psycopg2.connect(database="TeamYellow_election", user="student", password="password", host="agdbs-edu01.imp.fu-berlin.de", port="5432")
# SQl-Abfrage
cursor = database.cursor()
cursor.execute(
'SELECT tweet_date, COUNT(*) FROM projekt_election.tweet as tweet , projekt_election.hashtag_use as use WHERE tweet.tweet_id = use.tweet_id GROUP BY tweet_date ORDER BY tweet_date ASC')
result = cursor.fetchall()
# Dataframe erstellen
data=DataFrame(result, columns=['tweet_date', 'count'])
#Umwandlung des Datentyp der Spalte tweet_date
data['tweet_date_with_time'] = data['tweet_date'].astype('datetime64[ns]')
data['week_number'] = data['tweet_date_with_time'].dt.week
data['weekday']= data['tweet_date_with_time'].dt.dayofweek
# Gruppierung der Kalendarwochen mit einzelnen Counts
data2=data.copy()
del data2['tweet_date']
del data2['tweet_date_with_time']
del data2['weekday']
print(data2.groupby('week_number')['count'].apply(list))
# Aufbau Dataframe auf Erkenntnisse aus data2-Prints
data3 = pd.DataFrame({'KW01': [0, 0, 1, 0, 3, 0, 0],
'KW02': [3, 1, 7, 1, 0, 1, 0],
'KW03': [0, 2, 6, 1, 11, 3, 2],
'KW04': [13, 5, 1, 3, 6, 2, 1],
'KW05': [0, 1, 2, 0, 4, 3, 4],
'KW06': [2, 6, 1, 2, 1, 5, 0],
'KW07': [1, 3, 5, 2, 5, 2, 1],
'KW08': [2, 7, 1, 3, 5, 1, 3],
'KW09': [3, 10, 9, 3, 3, 6, 2],
'KW10': [0, 1, 2, 0, 2, 4, 0],
'KW11': [2, 3, 8, 0, 3, 10, 5],
'KW12': [0, 11, 4, 1, 0, 0, 0],
'KW13': [1, 0, 3, 2, 1, 6, 5],
'KW14': [4, 5, 0, 0, 1, 1, 2],
'KW15': [2, 4, 1, 2, 0, 4, 2],
'KW16': [0, 11, 4, 2, 3, 4, 1],
'KW17': [2, 6, 0, 1, 1, 0, 0],
'KW18': [4, 8, 0, 1, 1, 0, 0],
'KW19': [2, 8, 3, 0, 0, 0, 0],
'KW20': [1, 1, 1, 0, 5, 0, 1],
'KW21': [0, 0, 2, 1, 1, 0, 0],
'KW22': [0, 0, 1, 4, 2, 3, 0],
'KW23': [0, 0, 1, 0, 1, 2, 0],
'KW24': [0, 0, 3, 0, 1, 4, 1],
'KW25': [0, 0, 1, 10, 0, 0, 0],
'KW26': [1, 1, 0, 0, 2, 3, 0],
'KW27': [1, 0, 0, 2, 0, 0, 0],
'KW28': [1, 2, 2, 1, 0, 1, 0],
'KW29': [0, 1, 2, 7, 2, 1, 0],
'KW30': [1, 3, 3, 4, 0, 1, 1],
'KW31': [3, 2, 2, 0, 1, 4, 1],
'KW32': [1, 6, 0, 0, 0, 1, 0],
'KW33': [0, 0, 4, 0, 1, 1, 0],
'KW34': [1, 0, 1, 2, 1, 2, 1],
'KW35': [2, 0, 1, 3, 1, 0, 0],
'KW36': [1, 1, 2, 2, 2, 0, 0],
'KW37': [0, 1, 1, 2, 4, 0, 0],
'KW38': [0, 3, 0, 2, 1, 1, 0],
'KW39': [3, 18, 0, 0, 0, 0, 0]})
data4= data3.transpose()
data4.columns =['Montag', 'Dienstag', 'Mittwoch', 'Donnerstag', 'Freitag', 'Samstag', 'Sonntag']
data4['Kalendarwoche']=data4.index
############################# Bau eines Stacked Bar Chart ############################################
#Grundgerüst des Balkendiagramms
f, ax1 = plt.subplots(1, figsize=(25,20))
# Balkengröße
bar_width = 0.75
# Balken fangen von links an
bar_l = [i+1 for i in range(len(data4['Montag']))]
# Position der X-Achsen Werte
tick_pos = [i+(bar_width/2) for i in bar_l]
# Beginn der Erstellung der Balken nach Wochentagen
ax1.bar(bar_l,
data4['Montag'],
width=bar_width,
label='Montag',
alpha=0.5,
color='#1858ef')
ax1.bar(bar_l,
data4['Dienstag'],
width=bar_width,
bottom=data4['Montag'],
label='Dienstag',
alpha=0.5,
color='#6618ef')
ax1.bar(bar_l,
data4['Mittwoch'],
width=bar_width,
bottom=[i+j for i,j in zip(data4['Montag'],data4['Dienstag'])],
label='Mittwoch',
alpha=0.5,
color='#ef1829')
ax1.bar(bar_l,
data4['Donnerstag'],
width=bar_width,
bottom=[i+j+k for i,j,k in zip(data4['Montag'],data4['Dienstag'], data4['Mittwoch'])],
label='Donnerstag',
alpha=0.5,
color='#ef7c18')
ax1.bar(bar_l,
data4['Freitag'],
width=bar_width,
bottom=[i+j+k+l for i,j,k,l in zip(data4['Montag'],data4['Dienstag'],
data4['Mittwoch'], data4['Donnerstag'])],
label='Freitag',
alpha=0.5,
color='#efc718')
ax1.bar(bar_l,
data4['Samstag'],
width=bar_width,
bottom=[i+j+k+l+m for i,j,k,l,m in zip(data4['Montag'],data4['Dienstag'],
data4['Mittwoch'], data4['Donnerstag'], data4['Freitag'])],
label='Samstag',
alpha=0.5,
color='#63ef18')
ax1.bar(bar_l,
data4['Sonntag'],
width=bar_width,
bottom=[i+j+k+l+m+n for i,j,k,l,m,n in zip(data4['Montag'],data4['Dienstag'],
data4['Mittwoch'], data4['Donnerstag'], data4['Freitag'],
data4['Samstag'])],
label='Sonntag',
alpha=0.5,
color='#18efa3')
# X-Achse mit Werte versehen
plt.xticks(tick_pos, data4['Kalendarwoche'])
#Legende
ax1.set_ylabel("Häufigkeit")
ax1.set_xlabel("Kalendarwoche")
plt.legend(loc='upper left')
# Zwischen den Diagrammen Platz lassen
plt.xlim([min(tick_pos)-bar_width, max(tick_pos)+bar_width])
############### Balkendiagramm nach Kalendarwoche#########################################
kw = lambda x: x.isocalendar()[1]
grouped = data.groupby([data['tweet_date_with_time'].map(kw)], sort=False).agg({'count': 'sum'})
grouped['calendar week']= ('KW1','KW2','KW3','KW4','KW5','KW6','KW7','KW8','KW9','KW10','KW11','KW12','KW13',
'KW14','KW15','KW16','KW17','KW18','KW19','KW20','KW21','KW22','KW23','KW24','KW25','KW26', 'KW27','KW28','KW29',
'KW30','KW31','KW32','KW33','KW34','KW35','KW36','KW37','KW38','KW39')
#Balkendiagramm für alle Hashtag in Kalendarwoche
grouped.set_index('calendar week').plot.bar(rot=45, title='Nutzung von #makeamericagreatagain in Kalendarwoche', figsize=(15,10), fontsize=10)
############## Balkendiagramm für alle Hashtag pro Tag #####################################
data5=data[['tweet_date','count']].copy()
#Balkendiagramm für alle Hashtag in Tagen
data5.set_index('tweet_date').plot.bar(rot=90, title='Häufigkeit aller Hashtag in Tagen', figsize=(50,25), color ='#ef6618', fontsize=14)
|
flexible
|
{
"blob_id": "076b852010ddcea69a294f9f2a653bb2fa2f2676",
"index": 3531,
"step-1": "<mask token>\n",
"step-2": "<mask token>\ncursor.execute(\n 'SELECT tweet_date, COUNT(*) FROM projekt_election.tweet as tweet , projekt_election.hashtag_use as use WHERE tweet.tweet_id = use.tweet_id GROUP BY tweet_date ORDER BY tweet_date ASC'\n )\n<mask token>\ndel data2['tweet_date']\ndel data2['tweet_date_with_time']\ndel data2['weekday']\nprint(data2.groupby('week_number')['count'].apply(list))\n<mask token>\nax1.bar(bar_l, data4['Montag'], width=bar_width, label='Montag', alpha=0.5,\n color='#1858ef')\nax1.bar(bar_l, data4['Dienstag'], width=bar_width, bottom=data4['Montag'],\n label='Dienstag', alpha=0.5, color='#6618ef')\nax1.bar(bar_l, data4['Mittwoch'], width=bar_width, bottom=[(i + j) for i, j in\n zip(data4['Montag'], data4['Dienstag'])], label='Mittwoch', alpha=0.5,\n color='#ef1829')\nax1.bar(bar_l, data4['Donnerstag'], width=bar_width, bottom=[(i + j + k) for\n i, j, k in zip(data4['Montag'], data4['Dienstag'], data4['Mittwoch'])],\n label='Donnerstag', alpha=0.5, color='#ef7c18')\nax1.bar(bar_l, data4['Freitag'], width=bar_width, bottom=[(i + j + k + l) for\n i, j, k, l in zip(data4['Montag'], data4['Dienstag'], data4['Mittwoch'],\n data4['Donnerstag'])], label='Freitag', alpha=0.5, color='#efc718')\nax1.bar(bar_l, data4['Samstag'], width=bar_width, bottom=[(i + j + k + l +\n m) for i, j, k, l, m in zip(data4['Montag'], data4['Dienstag'], data4[\n 'Mittwoch'], data4['Donnerstag'], data4['Freitag'])], label='Samstag',\n alpha=0.5, color='#63ef18')\nax1.bar(bar_l, data4['Sonntag'], width=bar_width, bottom=[(i + j + k + l +\n m + n) for i, j, k, l, m, n in zip(data4['Montag'], data4['Dienstag'],\n data4['Mittwoch'], data4['Donnerstag'], data4['Freitag'], data4[\n 'Samstag'])], label='Sonntag', alpha=0.5, color='#18efa3')\nplt.xticks(tick_pos, data4['Kalendarwoche'])\nax1.set_ylabel('Häufigkeit')\nax1.set_xlabel('Kalendarwoche')\nplt.legend(loc='upper left')\nplt.xlim([min(tick_pos) - bar_width, max(tick_pos) + bar_width])\n<mask token>\ngrouped.set_index('calendar week').plot.bar(rot=45, title=\n 'Nutzung von #makeamericagreatagain in Kalendarwoche', figsize=(15, 10),\n fontsize=10)\n<mask token>\ndata5.set_index('tweet_date').plot.bar(rot=90, title=\n 'Häufigkeit aller Hashtag in Tagen', figsize=(50, 25), color='#ef6618',\n fontsize=14)\n",
"step-3": "<mask token>\ndatabase = psycopg2.connect(database='TeamYellow_election', user='student',\n password='password', host='agdbs-edu01.imp.fu-berlin.de', port='5432')\ncursor = database.cursor()\ncursor.execute(\n 'SELECT tweet_date, COUNT(*) FROM projekt_election.tweet as tweet , projekt_election.hashtag_use as use WHERE tweet.tweet_id = use.tweet_id GROUP BY tweet_date ORDER BY tweet_date ASC'\n )\nresult = cursor.fetchall()\ndata = DataFrame(result, columns=['tweet_date', 'count'])\ndata['tweet_date_with_time'] = data['tweet_date'].astype('datetime64[ns]')\ndata['week_number'] = data['tweet_date_with_time'].dt.week\ndata['weekday'] = data['tweet_date_with_time'].dt.dayofweek\ndata2 = data.copy()\ndel data2['tweet_date']\ndel data2['tweet_date_with_time']\ndel data2['weekday']\nprint(data2.groupby('week_number')['count'].apply(list))\ndata3 = pd.DataFrame({'KW01': [0, 0, 1, 0, 3, 0, 0], 'KW02': [3, 1, 7, 1, 0,\n 1, 0], 'KW03': [0, 2, 6, 1, 11, 3, 2], 'KW04': [13, 5, 1, 3, 6, 2, 1],\n 'KW05': [0, 1, 2, 0, 4, 3, 4], 'KW06': [2, 6, 1, 2, 1, 5, 0], 'KW07': [\n 1, 3, 5, 2, 5, 2, 1], 'KW08': [2, 7, 1, 3, 5, 1, 3], 'KW09': [3, 10, 9,\n 3, 3, 6, 2], 'KW10': [0, 1, 2, 0, 2, 4, 0], 'KW11': [2, 3, 8, 0, 3, 10,\n 5], 'KW12': [0, 11, 4, 1, 0, 0, 0], 'KW13': [1, 0, 3, 2, 1, 6, 5],\n 'KW14': [4, 5, 0, 0, 1, 1, 2], 'KW15': [2, 4, 1, 2, 0, 4, 2], 'KW16': [\n 0, 11, 4, 2, 3, 4, 1], 'KW17': [2, 6, 0, 1, 1, 0, 0], 'KW18': [4, 8, 0,\n 1, 1, 0, 0], 'KW19': [2, 8, 3, 0, 0, 0, 0], 'KW20': [1, 1, 1, 0, 5, 0, \n 1], 'KW21': [0, 0, 2, 1, 1, 0, 0], 'KW22': [0, 0, 1, 4, 2, 3, 0],\n 'KW23': [0, 0, 1, 0, 1, 2, 0], 'KW24': [0, 0, 3, 0, 1, 4, 1], 'KW25': [\n 0, 0, 1, 10, 0, 0, 0], 'KW26': [1, 1, 0, 0, 2, 3, 0], 'KW27': [1, 0, 0,\n 2, 0, 0, 0], 'KW28': [1, 2, 2, 1, 0, 1, 0], 'KW29': [0, 1, 2, 7, 2, 1, \n 0], 'KW30': [1, 3, 3, 4, 0, 1, 1], 'KW31': [3, 2, 2, 0, 1, 4, 1],\n 'KW32': [1, 6, 0, 0, 0, 1, 0], 'KW33': [0, 0, 4, 0, 1, 1, 0], 'KW34': [\n 1, 0, 1, 2, 1, 2, 1], 'KW35': [2, 0, 1, 3, 1, 0, 0], 'KW36': [1, 1, 2, \n 2, 2, 0, 0], 'KW37': [0, 1, 1, 2, 4, 0, 0], 'KW38': [0, 3, 0, 2, 1, 1, \n 0], 'KW39': [3, 18, 0, 0, 0, 0, 0]})\ndata4 = data3.transpose()\ndata4.columns = ['Montag', 'Dienstag', 'Mittwoch', 'Donnerstag', 'Freitag',\n 'Samstag', 'Sonntag']\ndata4['Kalendarwoche'] = data4.index\nf, ax1 = plt.subplots(1, figsize=(25, 20))\nbar_width = 0.75\nbar_l = [(i + 1) for i in range(len(data4['Montag']))]\ntick_pos = [(i + bar_width / 2) for i in bar_l]\nax1.bar(bar_l, data4['Montag'], width=bar_width, label='Montag', alpha=0.5,\n color='#1858ef')\nax1.bar(bar_l, data4['Dienstag'], width=bar_width, bottom=data4['Montag'],\n label='Dienstag', alpha=0.5, color='#6618ef')\nax1.bar(bar_l, data4['Mittwoch'], width=bar_width, bottom=[(i + j) for i, j in\n zip(data4['Montag'], data4['Dienstag'])], label='Mittwoch', alpha=0.5,\n color='#ef1829')\nax1.bar(bar_l, data4['Donnerstag'], width=bar_width, bottom=[(i + j + k) for\n i, j, k in zip(data4['Montag'], data4['Dienstag'], data4['Mittwoch'])],\n label='Donnerstag', alpha=0.5, color='#ef7c18')\nax1.bar(bar_l, data4['Freitag'], width=bar_width, bottom=[(i + j + k + l) for\n i, j, k, l in zip(data4['Montag'], data4['Dienstag'], data4['Mittwoch'],\n data4['Donnerstag'])], label='Freitag', alpha=0.5, color='#efc718')\nax1.bar(bar_l, data4['Samstag'], width=bar_width, bottom=[(i + j + k + l +\n m) for i, j, k, l, m in zip(data4['Montag'], data4['Dienstag'], data4[\n 'Mittwoch'], data4['Donnerstag'], data4['Freitag'])], label='Samstag',\n alpha=0.5, color='#63ef18')\nax1.bar(bar_l, data4['Sonntag'], width=bar_width, bottom=[(i + j + k + l +\n m + n) for i, j, k, l, m, n in zip(data4['Montag'], data4['Dienstag'],\n data4['Mittwoch'], data4['Donnerstag'], data4['Freitag'], data4[\n 'Samstag'])], label='Sonntag', alpha=0.5, color='#18efa3')\nplt.xticks(tick_pos, data4['Kalendarwoche'])\nax1.set_ylabel('Häufigkeit')\nax1.set_xlabel('Kalendarwoche')\nplt.legend(loc='upper left')\nplt.xlim([min(tick_pos) - bar_width, max(tick_pos) + bar_width])\nkw = lambda x: x.isocalendar()[1]\ngrouped = data.groupby([data['tweet_date_with_time'].map(kw)], sort=False).agg(\n {'count': 'sum'})\ngrouped['calendar week'] = ('KW1', 'KW2', 'KW3', 'KW4', 'KW5', 'KW6', 'KW7',\n 'KW8', 'KW9', 'KW10', 'KW11', 'KW12', 'KW13', 'KW14', 'KW15', 'KW16',\n 'KW17', 'KW18', 'KW19', 'KW20', 'KW21', 'KW22', 'KW23', 'KW24', 'KW25',\n 'KW26', 'KW27', 'KW28', 'KW29', 'KW30', 'KW31', 'KW32', 'KW33', 'KW34',\n 'KW35', 'KW36', 'KW37', 'KW38', 'KW39')\ngrouped.set_index('calendar week').plot.bar(rot=45, title=\n 'Nutzung von #makeamericagreatagain in Kalendarwoche', figsize=(15, 10),\n fontsize=10)\ndata5 = data[['tweet_date', 'count']].copy()\ndata5.set_index('tweet_date').plot.bar(rot=90, title=\n 'Häufigkeit aller Hashtag in Tagen', figsize=(50, 25), color='#ef6618',\n fontsize=14)\n",
"step-4": "<mask token>\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport psycopg2\nfrom pandas.core.frame import DataFrame\ndatabase = psycopg2.connect(database='TeamYellow_election', user='student',\n password='password', host='agdbs-edu01.imp.fu-berlin.de', port='5432')\ncursor = database.cursor()\ncursor.execute(\n 'SELECT tweet_date, COUNT(*) FROM projekt_election.tweet as tweet , projekt_election.hashtag_use as use WHERE tweet.tweet_id = use.tweet_id GROUP BY tweet_date ORDER BY tweet_date ASC'\n )\nresult = cursor.fetchall()\ndata = DataFrame(result, columns=['tweet_date', 'count'])\ndata['tweet_date_with_time'] = data['tweet_date'].astype('datetime64[ns]')\ndata['week_number'] = data['tweet_date_with_time'].dt.week\ndata['weekday'] = data['tweet_date_with_time'].dt.dayofweek\ndata2 = data.copy()\ndel data2['tweet_date']\ndel data2['tweet_date_with_time']\ndel data2['weekday']\nprint(data2.groupby('week_number')['count'].apply(list))\ndata3 = pd.DataFrame({'KW01': [0, 0, 1, 0, 3, 0, 0], 'KW02': [3, 1, 7, 1, 0,\n 1, 0], 'KW03': [0, 2, 6, 1, 11, 3, 2], 'KW04': [13, 5, 1, 3, 6, 2, 1],\n 'KW05': [0, 1, 2, 0, 4, 3, 4], 'KW06': [2, 6, 1, 2, 1, 5, 0], 'KW07': [\n 1, 3, 5, 2, 5, 2, 1], 'KW08': [2, 7, 1, 3, 5, 1, 3], 'KW09': [3, 10, 9,\n 3, 3, 6, 2], 'KW10': [0, 1, 2, 0, 2, 4, 0], 'KW11': [2, 3, 8, 0, 3, 10,\n 5], 'KW12': [0, 11, 4, 1, 0, 0, 0], 'KW13': [1, 0, 3, 2, 1, 6, 5],\n 'KW14': [4, 5, 0, 0, 1, 1, 2], 'KW15': [2, 4, 1, 2, 0, 4, 2], 'KW16': [\n 0, 11, 4, 2, 3, 4, 1], 'KW17': [2, 6, 0, 1, 1, 0, 0], 'KW18': [4, 8, 0,\n 1, 1, 0, 0], 'KW19': [2, 8, 3, 0, 0, 0, 0], 'KW20': [1, 1, 1, 0, 5, 0, \n 1], 'KW21': [0, 0, 2, 1, 1, 0, 0], 'KW22': [0, 0, 1, 4, 2, 3, 0],\n 'KW23': [0, 0, 1, 0, 1, 2, 0], 'KW24': [0, 0, 3, 0, 1, 4, 1], 'KW25': [\n 0, 0, 1, 10, 0, 0, 0], 'KW26': [1, 1, 0, 0, 2, 3, 0], 'KW27': [1, 0, 0,\n 2, 0, 0, 0], 'KW28': [1, 2, 2, 1, 0, 1, 0], 'KW29': [0, 1, 2, 7, 2, 1, \n 0], 'KW30': [1, 3, 3, 4, 0, 1, 1], 'KW31': [3, 2, 2, 0, 1, 4, 1],\n 'KW32': [1, 6, 0, 0, 0, 1, 0], 'KW33': [0, 0, 4, 0, 1, 1, 0], 'KW34': [\n 1, 0, 1, 2, 1, 2, 1], 'KW35': [2, 0, 1, 3, 1, 0, 0], 'KW36': [1, 1, 2, \n 2, 2, 0, 0], 'KW37': [0, 1, 1, 2, 4, 0, 0], 'KW38': [0, 3, 0, 2, 1, 1, \n 0], 'KW39': [3, 18, 0, 0, 0, 0, 0]})\ndata4 = data3.transpose()\ndata4.columns = ['Montag', 'Dienstag', 'Mittwoch', 'Donnerstag', 'Freitag',\n 'Samstag', 'Sonntag']\ndata4['Kalendarwoche'] = data4.index\nf, ax1 = plt.subplots(1, figsize=(25, 20))\nbar_width = 0.75\nbar_l = [(i + 1) for i in range(len(data4['Montag']))]\ntick_pos = [(i + bar_width / 2) for i in bar_l]\nax1.bar(bar_l, data4['Montag'], width=bar_width, label='Montag', alpha=0.5,\n color='#1858ef')\nax1.bar(bar_l, data4['Dienstag'], width=bar_width, bottom=data4['Montag'],\n label='Dienstag', alpha=0.5, color='#6618ef')\nax1.bar(bar_l, data4['Mittwoch'], width=bar_width, bottom=[(i + j) for i, j in\n zip(data4['Montag'], data4['Dienstag'])], label='Mittwoch', alpha=0.5,\n color='#ef1829')\nax1.bar(bar_l, data4['Donnerstag'], width=bar_width, bottom=[(i + j + k) for\n i, j, k in zip(data4['Montag'], data4['Dienstag'], data4['Mittwoch'])],\n label='Donnerstag', alpha=0.5, color='#ef7c18')\nax1.bar(bar_l, data4['Freitag'], width=bar_width, bottom=[(i + j + k + l) for\n i, j, k, l in zip(data4['Montag'], data4['Dienstag'], data4['Mittwoch'],\n data4['Donnerstag'])], label='Freitag', alpha=0.5, color='#efc718')\nax1.bar(bar_l, data4['Samstag'], width=bar_width, bottom=[(i + j + k + l +\n m) for i, j, k, l, m in zip(data4['Montag'], data4['Dienstag'], data4[\n 'Mittwoch'], data4['Donnerstag'], data4['Freitag'])], label='Samstag',\n alpha=0.5, color='#63ef18')\nax1.bar(bar_l, data4['Sonntag'], width=bar_width, bottom=[(i + j + k + l +\n m + n) for i, j, k, l, m, n in zip(data4['Montag'], data4['Dienstag'],\n data4['Mittwoch'], data4['Donnerstag'], data4['Freitag'], data4[\n 'Samstag'])], label='Sonntag', alpha=0.5, color='#18efa3')\nplt.xticks(tick_pos, data4['Kalendarwoche'])\nax1.set_ylabel('Häufigkeit')\nax1.set_xlabel('Kalendarwoche')\nplt.legend(loc='upper left')\nplt.xlim([min(tick_pos) - bar_width, max(tick_pos) + bar_width])\nkw = lambda x: x.isocalendar()[1]\ngrouped = data.groupby([data['tweet_date_with_time'].map(kw)], sort=False).agg(\n {'count': 'sum'})\ngrouped['calendar week'] = ('KW1', 'KW2', 'KW3', 'KW4', 'KW5', 'KW6', 'KW7',\n 'KW8', 'KW9', 'KW10', 'KW11', 'KW12', 'KW13', 'KW14', 'KW15', 'KW16',\n 'KW17', 'KW18', 'KW19', 'KW20', 'KW21', 'KW22', 'KW23', 'KW24', 'KW25',\n 'KW26', 'KW27', 'KW28', 'KW29', 'KW30', 'KW31', 'KW32', 'KW33', 'KW34',\n 'KW35', 'KW36', 'KW37', 'KW38', 'KW39')\ngrouped.set_index('calendar week').plot.bar(rot=45, title=\n 'Nutzung von #makeamericagreatagain in Kalendarwoche', figsize=(15, 10),\n fontsize=10)\ndata5 = data[['tweet_date', 'count']].copy()\ndata5.set_index('tweet_date').plot.bar(rot=90, title=\n 'Häufigkeit aller Hashtag in Tagen', figsize=(50, 25), color='#ef6618',\n fontsize=14)\n",
"step-5": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sat Jul 1 10:18:11 2017\r\n\r\n@author: Duong\r\n\"\"\"\r\n\r\nimport pandas as pd\r\nimport matplotlib.pyplot as plt\r\nimport psycopg2\r\nfrom pandas.core.frame import DataFrame\r\n\r\n\r\n\r\n\r\n\r\n# DBS verbinden\r\ndatabase = psycopg2.connect(database=\"TeamYellow_election\", user=\"student\", password=\"password\", host=\"agdbs-edu01.imp.fu-berlin.de\", port=\"5432\")\r\n\r\n# SQl-Abfrage\r\ncursor = database.cursor()\r\ncursor.execute(\r\n 'SELECT tweet_date, COUNT(*) FROM projekt_election.tweet as tweet , projekt_election.hashtag_use as use WHERE tweet.tweet_id = use.tweet_id GROUP BY tweet_date ORDER BY tweet_date ASC')\r\nresult = cursor.fetchall()\r\n\r\n# Dataframe erstellen\r\ndata=DataFrame(result, columns=['tweet_date', 'count'])\r\n\r\n\r\n#Umwandlung des Datentyp der Spalte tweet_date\r\ndata['tweet_date_with_time'] = data['tweet_date'].astype('datetime64[ns]')\r\ndata['week_number'] = data['tweet_date_with_time'].dt.week\r\ndata['weekday']= data['tweet_date_with_time'].dt.dayofweek\r\n\r\n\r\n# Gruppierung der Kalendarwochen mit einzelnen Counts\r\ndata2=data.copy()\r\ndel data2['tweet_date']\r\ndel data2['tweet_date_with_time']\r\ndel data2['weekday']\r\n\r\nprint(data2.groupby('week_number')['count'].apply(list))\r\n\r\n# Aufbau Dataframe auf Erkenntnisse aus data2-Prints\r\ndata3 = pd.DataFrame({'KW01': [0, 0, 1, 0, 3, 0, 0],\r\n 'KW02': [3, 1, 7, 1, 0, 1, 0],\r\n 'KW03': [0, 2, 6, 1, 11, 3, 2],\r\n 'KW04': [13, 5, 1, 3, 6, 2, 1],\r\n 'KW05': [0, 1, 2, 0, 4, 3, 4],\r\n 'KW06': [2, 6, 1, 2, 1, 5, 0],\r\n 'KW07': [1, 3, 5, 2, 5, 2, 1],\r\n 'KW08': [2, 7, 1, 3, 5, 1, 3],\r\n 'KW09': [3, 10, 9, 3, 3, 6, 2],\r\n 'KW10': [0, 1, 2, 0, 2, 4, 0],\r\n 'KW11': [2, 3, 8, 0, 3, 10, 5],\r\n 'KW12': [0, 11, 4, 1, 0, 0, 0],\r\n 'KW13': [1, 0, 3, 2, 1, 6, 5],\r\n 'KW14': [4, 5, 0, 0, 1, 1, 2],\r\n 'KW15': [2, 4, 1, 2, 0, 4, 2],\r\n 'KW16': [0, 11, 4, 2, 3, 4, 1],\r\n 'KW17': [2, 6, 0, 1, 1, 0, 0],\r\n 'KW18': [4, 8, 0, 1, 1, 0, 0],\r\n 'KW19': [2, 8, 3, 0, 0, 0, 0],\r\n 'KW20': [1, 1, 1, 0, 5, 0, 1],\r\n 'KW21': [0, 0, 2, 1, 1, 0, 0],\r\n 'KW22': [0, 0, 1, 4, 2, 3, 0],\r\n 'KW23': [0, 0, 1, 0, 1, 2, 0],\r\n 'KW24': [0, 0, 3, 0, 1, 4, 1],\r\n 'KW25': [0, 0, 1, 10, 0, 0, 0],\r\n 'KW26': [1, 1, 0, 0, 2, 3, 0],\r\n 'KW27': [1, 0, 0, 2, 0, 0, 0],\r\n 'KW28': [1, 2, 2, 1, 0, 1, 0],\r\n 'KW29': [0, 1, 2, 7, 2, 1, 0],\r\n 'KW30': [1, 3, 3, 4, 0, 1, 1],\r\n 'KW31': [3, 2, 2, 0, 1, 4, 1],\r\n 'KW32': [1, 6, 0, 0, 0, 1, 0],\r\n 'KW33': [0, 0, 4, 0, 1, 1, 0],\r\n 'KW34': [1, 0, 1, 2, 1, 2, 1],\r\n 'KW35': [2, 0, 1, 3, 1, 0, 0],\r\n 'KW36': [1, 1, 2, 2, 2, 0, 0],\r\n 'KW37': [0, 1, 1, 2, 4, 0, 0],\r\n 'KW38': [0, 3, 0, 2, 1, 1, 0],\r\n 'KW39': [3, 18, 0, 0, 0, 0, 0]})\r\n\r\n\r\ndata4= data3.transpose()\r\ndata4.columns =['Montag', 'Dienstag', 'Mittwoch', 'Donnerstag', 'Freitag', 'Samstag', 'Sonntag']\r\ndata4['Kalendarwoche']=data4.index\r\n\r\n############################# Bau eines Stacked Bar Chart ############################################\r\n\r\n#Grundgerüst des Balkendiagramms\r\nf, ax1 = plt.subplots(1, figsize=(25,20))\r\n\r\n# Balkengröße\r\nbar_width = 0.75\r\n\r\n# Balken fangen von links an\r\nbar_l = [i+1 for i in range(len(data4['Montag']))]\r\n\r\n# Position der X-Achsen Werte\r\ntick_pos = [i+(bar_width/2) for i in bar_l]\r\n\r\n# Beginn der Erstellung der Balken nach Wochentagen\r\nax1.bar(bar_l,\r\n data4['Montag'],\r\n width=bar_width,\r\n label='Montag',\r\n alpha=0.5,\r\n color='#1858ef')\r\n\r\n\r\nax1.bar(bar_l,\r\n data4['Dienstag'],\r\n width=bar_width,\r\n bottom=data4['Montag'],\r\n label='Dienstag',\r\n alpha=0.5,\r\n color='#6618ef')\r\n\r\nax1.bar(bar_l,\r\n data4['Mittwoch'],\r\n width=bar_width,\r\n bottom=[i+j for i,j in zip(data4['Montag'],data4['Dienstag'])],\r\n label='Mittwoch',\r\n alpha=0.5,\r\n color='#ef1829')\r\n\r\nax1.bar(bar_l,\r\n data4['Donnerstag'],\r\n width=bar_width,\r\n bottom=[i+j+k for i,j,k in zip(data4['Montag'],data4['Dienstag'], data4['Mittwoch'])],\r\n label='Donnerstag',\r\n alpha=0.5,\r\n color='#ef7c18')\r\n\r\nax1.bar(bar_l,\r\n data4['Freitag'],\r\n width=bar_width,\r\n bottom=[i+j+k+l for i,j,k,l in zip(data4['Montag'],data4['Dienstag'], \r\n data4['Mittwoch'], data4['Donnerstag'])],\r\n label='Freitag',\r\n alpha=0.5,\r\n color='#efc718')\r\n\r\nax1.bar(bar_l,\r\n data4['Samstag'],\r\n width=bar_width,\r\n bottom=[i+j+k+l+m for i,j,k,l,m in zip(data4['Montag'],data4['Dienstag'], \r\n data4['Mittwoch'], data4['Donnerstag'], data4['Freitag'])],\r\n label='Samstag',\r\n alpha=0.5,\r\n color='#63ef18')\r\n\r\n\r\nax1.bar(bar_l,\r\n data4['Sonntag'],\r\n width=bar_width,\r\n bottom=[i+j+k+l+m+n for i,j,k,l,m,n in zip(data4['Montag'],data4['Dienstag'], \r\n data4['Mittwoch'], data4['Donnerstag'], data4['Freitag'],\r\n data4['Samstag'])],\r\n label='Sonntag',\r\n alpha=0.5,\r\n color='#18efa3')\r\n\r\n# X-Achse mit Werte versehen\r\nplt.xticks(tick_pos, data4['Kalendarwoche'])\r\n\r\n#Legende\r\nax1.set_ylabel(\"Häufigkeit\")\r\nax1.set_xlabel(\"Kalendarwoche\")\r\nplt.legend(loc='upper left')\r\n\r\n# Zwischen den Diagrammen Platz lassen\r\nplt.xlim([min(tick_pos)-bar_width, max(tick_pos)+bar_width])\r\n\r\n############### Balkendiagramm nach Kalendarwoche#########################################\r\n\r\nkw = lambda x: x.isocalendar()[1]\r\ngrouped = data.groupby([data['tweet_date_with_time'].map(kw)], sort=False).agg({'count': 'sum'})\r\n\r\ngrouped['calendar week']= ('KW1','KW2','KW3','KW4','KW5','KW6','KW7','KW8','KW9','KW10','KW11','KW12','KW13',\r\n 'KW14','KW15','KW16','KW17','KW18','KW19','KW20','KW21','KW22','KW23','KW24','KW25','KW26', 'KW27','KW28','KW29',\r\n 'KW30','KW31','KW32','KW33','KW34','KW35','KW36','KW37','KW38','KW39')\r\n\r\n\r\n\r\n#Balkendiagramm für alle Hashtag in Kalendarwoche\r\ngrouped.set_index('calendar week').plot.bar(rot=45, title='Nutzung von #makeamericagreatagain in Kalendarwoche', figsize=(15,10), fontsize=10)\r\n\r\n############## Balkendiagramm für alle Hashtag pro Tag #####################################\r\ndata5=data[['tweet_date','count']].copy()\r\n#Balkendiagramm für alle Hashtag in Tagen\r\ndata5.set_index('tweet_date').plot.bar(rot=90, title='Häufigkeit aller Hashtag in Tagen', figsize=(50,25), color ='#ef6618', fontsize=14)\r\n\r\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
@register(NewsModel)
class ProjectTranslationOptions(TranslationOptions):
fields = 'name', 'text'
<|reserved_special_token_1|>
<|reserved_special_token_0|>
@register(PageTitleModel)
class TitleTranslationOptions(TranslationOptions):
<|reserved_special_token_0|>
@register(NewsModel)
class ProjectTranslationOptions(TranslationOptions):
fields = 'name', 'text'
<|reserved_special_token_1|>
<|reserved_special_token_0|>
@register(PageTitleModel)
class TitleTranslationOptions(TranslationOptions):
fields = 'name',
@register(NewsModel)
class ProjectTranslationOptions(TranslationOptions):
fields = 'name', 'text'
<|reserved_special_token_1|>
from modeltranslation.translator import register, TranslationOptions
from .models import *
@register(PageTitleModel)
class TitleTranslationOptions(TranslationOptions):
fields = 'name',
@register(NewsModel)
class ProjectTranslationOptions(TranslationOptions):
fields = 'name', 'text'
<|reserved_special_token_1|>
from modeltranslation.translator import register, TranslationOptions
from .models import *
@register(PageTitleModel)
class TitleTranslationOptions(TranslationOptions):
fields = (
'name',
)
@register(NewsModel)
class ProjectTranslationOptions(TranslationOptions):
fields = (
'name',
'text',
)
|
flexible
|
{
"blob_id": "9c29f04746de6847ad1bbdf08964d14e6c3766db",
"index": 8700,
"step-1": "<mask token>\n\n\n@register(NewsModel)\nclass ProjectTranslationOptions(TranslationOptions):\n fields = 'name', 'text'\n",
"step-2": "<mask token>\n\n\n@register(PageTitleModel)\nclass TitleTranslationOptions(TranslationOptions):\n <mask token>\n\n\n@register(NewsModel)\nclass ProjectTranslationOptions(TranslationOptions):\n fields = 'name', 'text'\n",
"step-3": "<mask token>\n\n\n@register(PageTitleModel)\nclass TitleTranslationOptions(TranslationOptions):\n fields = 'name',\n\n\n@register(NewsModel)\nclass ProjectTranslationOptions(TranslationOptions):\n fields = 'name', 'text'\n",
"step-4": "from modeltranslation.translator import register, TranslationOptions\nfrom .models import *\n\n\n@register(PageTitleModel)\nclass TitleTranslationOptions(TranslationOptions):\n fields = 'name',\n\n\n@register(NewsModel)\nclass ProjectTranslationOptions(TranslationOptions):\n fields = 'name', 'text'\n",
"step-5": "from modeltranslation.translator import register, TranslationOptions\nfrom .models import *\n\n\n@register(PageTitleModel)\nclass TitleTranslationOptions(TranslationOptions):\n fields = (\n 'name',\n )\n\n\n@register(NewsModel)\nclass ProjectTranslationOptions(TranslationOptions):\n fields = (\n 'name',\n 'text',\n )\n\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class BackendSerializer(serializers.ModelSerializer):
class Meta:
model = Backend
fields = '__all__'
<|reserved_special_token_1|>
from rest_framework import serializers
from .models import Backend
class BackendSerializer(serializers.ModelSerializer):
class Meta:
model = Backend
fields = '__all__'
|
flexible
|
{
"blob_id": "b4787d65fb8adf5dc6a99c1a13922c8f9acc2087",
"index": 1971,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass BackendSerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = Backend\n fields = '__all__'\n",
"step-3": "from rest_framework import serializers\nfrom .models import Backend\n\n\nclass BackendSerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = Backend\n fields = '__all__'\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.6 on 2017-10-18 07:31
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0008_alter_user_username_max_length'),
]
operations = [
migrations.CreateModel(
name='Member',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('username', models.CharField(max_length=50, unique=True, verbose_name='\u7528\u6237\u540d')),
('password', models.CharField(max_length=200, verbose_name='\u5bc6\u7801')),
('email', models.EmailField(blank=True, max_length=254, null=True, unique=True, verbose_name='\u7535\u5b50\u90ae\u4ef6')),
('phone', models.CharField(blank=True, max_length=20, null=True, unique=True, verbose_name='phone')),
('gender', models.SmallIntegerField(choices=[(0, 'unset'), (1, 'male'), (2, 'female')], default=0, null=True, verbose_name='gender')),
('real_name', models.CharField(blank=True, max_length=100, null=True, verbose_name='real name')),
('birth_of_date', models.DateField(null=True, verbose_name='birth of date')),
('is_superuser', models.BooleanField(default=False, verbose_name='whether super user or not')),
('is_staff', models.BooleanField(default=False, verbose_name='whether enter backend or not')),
('last_login', models.DateTimeField(null=True, verbose_name='last login datetime')),
('create', models.DateTimeField(auto_now_add=True, verbose_name='create datetime')),
('modify', models.DateTimeField(auto_now=True, verbose_name='modify datetime')),
('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),
('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')),
],
options={
'db_table': 'member',
},
),
]
|
normal
|
{
"blob_id": "ab343f88c84d45cf90bddd52623362f047c72d3c",
"index": 5754,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n initial = True\n dependencies = [('auth', '0008_alter_user_username_max_length')]\n operations = [migrations.CreateModel(name='Member', fields=[('id',\n models.AutoField(auto_created=True, primary_key=True, serialize=\n False, verbose_name='ID')), ('username', models.CharField(\n max_length=50, unique=True, verbose_name='用户名')), ('password',\n models.CharField(max_length=200, verbose_name='密码')), ('email',\n models.EmailField(blank=True, max_length=254, null=True, unique=\n True, verbose_name='电子邮件')), ('phone', models.CharField(blank=True,\n max_length=20, null=True, unique=True, verbose_name='phone')), (\n 'gender', models.SmallIntegerField(choices=[(0, 'unset'), (1,\n 'male'), (2, 'female')], default=0, null=True, verbose_name=\n 'gender')), ('real_name', models.CharField(blank=True, max_length=\n 100, null=True, verbose_name='real name')), ('birth_of_date',\n models.DateField(null=True, verbose_name='birth of date')), (\n 'is_superuser', models.BooleanField(default=False, verbose_name=\n 'whether super user or not')), ('is_staff', models.BooleanField(\n default=False, verbose_name='whether enter backend or not')), (\n 'last_login', models.DateTimeField(null=True, verbose_name=\n 'last login datetime')), ('create', models.DateTimeField(\n auto_now_add=True, verbose_name='create datetime')), ('modify',\n models.DateTimeField(auto_now=True, verbose_name='modify datetime')\n ), ('groups', models.ManyToManyField(blank=True, help_text=\n 'The groups this user belongs to. A user will get all permissions granted to each of their groups.'\n , related_name='user_set', related_query_name='user', to=\n 'auth.Group', verbose_name='groups')), ('user_permissions', models.\n ManyToManyField(blank=True, help_text=\n 'Specific permissions for this user.', related_name='user_set',\n related_query_name='user', to='auth.Permission', verbose_name=\n 'user permissions'))], options={'db_table': 'member'})]\n",
"step-4": "from __future__ import unicode_literals\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n initial = True\n dependencies = [('auth', '0008_alter_user_username_max_length')]\n operations = [migrations.CreateModel(name='Member', fields=[('id',\n models.AutoField(auto_created=True, primary_key=True, serialize=\n False, verbose_name='ID')), ('username', models.CharField(\n max_length=50, unique=True, verbose_name='用户名')), ('password',\n models.CharField(max_length=200, verbose_name='密码')), ('email',\n models.EmailField(blank=True, max_length=254, null=True, unique=\n True, verbose_name='电子邮件')), ('phone', models.CharField(blank=True,\n max_length=20, null=True, unique=True, verbose_name='phone')), (\n 'gender', models.SmallIntegerField(choices=[(0, 'unset'), (1,\n 'male'), (2, 'female')], default=0, null=True, verbose_name=\n 'gender')), ('real_name', models.CharField(blank=True, max_length=\n 100, null=True, verbose_name='real name')), ('birth_of_date',\n models.DateField(null=True, verbose_name='birth of date')), (\n 'is_superuser', models.BooleanField(default=False, verbose_name=\n 'whether super user or not')), ('is_staff', models.BooleanField(\n default=False, verbose_name='whether enter backend or not')), (\n 'last_login', models.DateTimeField(null=True, verbose_name=\n 'last login datetime')), ('create', models.DateTimeField(\n auto_now_add=True, verbose_name='create datetime')), ('modify',\n models.DateTimeField(auto_now=True, verbose_name='modify datetime')\n ), ('groups', models.ManyToManyField(blank=True, help_text=\n 'The groups this user belongs to. A user will get all permissions granted to each of their groups.'\n , related_name='user_set', related_query_name='user', to=\n 'auth.Group', verbose_name='groups')), ('user_permissions', models.\n ManyToManyField(blank=True, help_text=\n 'Specific permissions for this user.', related_name='user_set',\n related_query_name='user', to='auth.Permission', verbose_name=\n 'user permissions'))], options={'db_table': 'member'})]\n",
"step-5": "# -*- coding: utf-8 -*-\n# Generated by Django 1.11.6 on 2017-10-18 07:31\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n initial = True\n\n dependencies = [\n ('auth', '0008_alter_user_username_max_length'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Member',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('username', models.CharField(max_length=50, unique=True, verbose_name='\\u7528\\u6237\\u540d')),\n ('password', models.CharField(max_length=200, verbose_name='\\u5bc6\\u7801')),\n ('email', models.EmailField(blank=True, max_length=254, null=True, unique=True, verbose_name='\\u7535\\u5b50\\u90ae\\u4ef6')),\n ('phone', models.CharField(blank=True, max_length=20, null=True, unique=True, verbose_name='phone')),\n ('gender', models.SmallIntegerField(choices=[(0, 'unset'), (1, 'male'), (2, 'female')], default=0, null=True, verbose_name='gender')),\n ('real_name', models.CharField(blank=True, max_length=100, null=True, verbose_name='real name')),\n ('birth_of_date', models.DateField(null=True, verbose_name='birth of date')),\n ('is_superuser', models.BooleanField(default=False, verbose_name='whether super user or not')),\n ('is_staff', models.BooleanField(default=False, verbose_name='whether enter backend or not')),\n ('last_login', models.DateTimeField(null=True, verbose_name='last login datetime')),\n ('create', models.DateTimeField(auto_now_add=True, verbose_name='create datetime')),\n ('modify', models.DateTimeField(auto_now=True, verbose_name='modify datetime')),\n ('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),\n ('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')),\n ],\n options={\n 'db_table': 'member',\n },\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
# Generated by Selenium IDE
import pytest
import time
import json
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.support import expected_conditions
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
class TestSTCHANGE():
def setup_method(self, method):
self.driver = webdriver.Chrome()
self.vars = {}
def teardown_method(self, method):
self.driver.quit()
def test_sTCHANGE(self):
# Test name: ST CHANGE
# Step # | name | target | value
# 1 | open | /main/desktop-login.html |
self.driver.get("http://10.51.30.52:8090/main/desktop-login.html")
# 2 | setWindowSize | 976x696 |
self.driver.set_window_size(976, 696)
# 3 | click | id=idInputUsername |
self.driver.find_element(By.ID, "idInputUsername").click()
# 4 | type | id=idInputUsername | SUPERVISOR
self.driver.find_element(By.ID, "idInputUsername").send_keys("SUPERVISOR")
# 5 | click | id=login-panel |
self.driver.find_element(By.ID, "login-panel").click()
# 6 | click | id=idInputPassword |
self.driver.find_element(By.ID, "idInputPassword").click()
# 7 | type | id=idInputPassword | **
self.driver.find_element(By.ID, "idInputPassword").send_keys("**")
# 8 | click | id=submit.button |
self.driver.find_element(By.ID, "submit.button").click()
# 9 | click | id=BVMAPS |
self.driver.find_element(By.ID, "BVMAPS").click()
# 10 | click | css=#UI_BADGES_GRID\.gridView\.row\#22_Tcell\#0 > div > div |
self.driver.find_element(By.CSS_SELECTOR, "#UI_BADGES_GRID\\.gridView\\.row\\#22_Tcell\\#0 > div > div").click()
# 11 | click | id=badge.html.ribbon.properties |
self.driver.find_element(By.ID, "badge.html.ribbon.properties").click()
# 12 | click | id=__selection_4 |
self.driver.find_element(By.ID, "__selection_4").click()
# 13 | mouseDown | css=#\__pan_4 > .listItemNormal:nth-child(2) |
element = self.driver.find_element(By.CSS_SELECTOR, "#\\__pan_4 > .listItemNormal:nth-child(2)")
actions = ActionChains(self.driver)
actions.move_to_element(element).click_and_hold().perform()
# 14 | mouseUp | id=__selection_5 |
element = self.driver.find_element(By.ID, "__selection_5")
actions = ActionChains(self.driver)
actions.move_to_element(element).release().perform()
# 15 | click | css=#PROPERTIES_CONTROLS td:nth-child(2) .middlePart |
self.driver.find_element(By.CSS_SELECTOR, "#PROPERTIES_CONTROLS td:nth-child(2) .middlePart").click()
# 16 | click | id=badge.html.ribbon.properties.apply |
self.driver.find_element(By.ID, "badge.html.ribbon.properties.apply").click()
# 17 | click | css=body > img |
self.driver.find_element(By.CSS_SELECTOR, "body > img").click()
# 18 | click | css=a > img |
self.driver.find_element(By.CSS_SELECTOR, "a > img").click()
# 19 | click | id=main.html.btn_logout |
self.driver.find_element(By.ID, "main.html.btn_logout").click()
|
normal
|
{
"blob_id": "87f8cc65cf7d0ea932de79a6daf5b29ad387ec6f",
"index": 7103,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass TestSTCHANGE:\n <mask token>\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass TestSTCHANGE:\n\n def setup_method(self, method):\n self.driver = webdriver.Chrome()\n self.vars = {}\n\n def teardown_method(self, method):\n self.driver.quit()\n\n def test_sTCHANGE(self):\n self.driver.get('http://10.51.30.52:8090/main/desktop-login.html')\n self.driver.set_window_size(976, 696)\n self.driver.find_element(By.ID, 'idInputUsername').click()\n self.driver.find_element(By.ID, 'idInputUsername').send_keys(\n 'SUPERVISOR')\n self.driver.find_element(By.ID, 'login-panel').click()\n self.driver.find_element(By.ID, 'idInputPassword').click()\n self.driver.find_element(By.ID, 'idInputPassword').send_keys('**')\n self.driver.find_element(By.ID, 'submit.button').click()\n self.driver.find_element(By.ID, 'BVMAPS').click()\n self.driver.find_element(By.CSS_SELECTOR,\n '#UI_BADGES_GRID\\\\.gridView\\\\.row\\\\#22_Tcell\\\\#0 > div > div'\n ).click()\n self.driver.find_element(By.ID, 'badge.html.ribbon.properties').click()\n self.driver.find_element(By.ID, '__selection_4').click()\n element = self.driver.find_element(By.CSS_SELECTOR,\n '#\\\\__pan_4 > .listItemNormal:nth-child(2)')\n actions = ActionChains(self.driver)\n actions.move_to_element(element).click_and_hold().perform()\n element = self.driver.find_element(By.ID, '__selection_5')\n actions = ActionChains(self.driver)\n actions.move_to_element(element).release().perform()\n self.driver.find_element(By.CSS_SELECTOR,\n '#PROPERTIES_CONTROLS td:nth-child(2) .middlePart').click()\n self.driver.find_element(By.ID, 'badge.html.ribbon.properties.apply'\n ).click()\n self.driver.find_element(By.CSS_SELECTOR, 'body > img').click()\n self.driver.find_element(By.CSS_SELECTOR, 'a > img').click()\n self.driver.find_element(By.ID, 'main.html.btn_logout').click()\n",
"step-4": "import pytest\nimport time\nimport json\nfrom selenium import webdriver\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.common.action_chains import ActionChains\nfrom selenium.webdriver.support import expected_conditions\nfrom selenium.webdriver.support.wait import WebDriverWait\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.webdriver.common.desired_capabilities import DesiredCapabilities\n\n\nclass TestSTCHANGE:\n\n def setup_method(self, method):\n self.driver = webdriver.Chrome()\n self.vars = {}\n\n def teardown_method(self, method):\n self.driver.quit()\n\n def test_sTCHANGE(self):\n self.driver.get('http://10.51.30.52:8090/main/desktop-login.html')\n self.driver.set_window_size(976, 696)\n self.driver.find_element(By.ID, 'idInputUsername').click()\n self.driver.find_element(By.ID, 'idInputUsername').send_keys(\n 'SUPERVISOR')\n self.driver.find_element(By.ID, 'login-panel').click()\n self.driver.find_element(By.ID, 'idInputPassword').click()\n self.driver.find_element(By.ID, 'idInputPassword').send_keys('**')\n self.driver.find_element(By.ID, 'submit.button').click()\n self.driver.find_element(By.ID, 'BVMAPS').click()\n self.driver.find_element(By.CSS_SELECTOR,\n '#UI_BADGES_GRID\\\\.gridView\\\\.row\\\\#22_Tcell\\\\#0 > div > div'\n ).click()\n self.driver.find_element(By.ID, 'badge.html.ribbon.properties').click()\n self.driver.find_element(By.ID, '__selection_4').click()\n element = self.driver.find_element(By.CSS_SELECTOR,\n '#\\\\__pan_4 > .listItemNormal:nth-child(2)')\n actions = ActionChains(self.driver)\n actions.move_to_element(element).click_and_hold().perform()\n element = self.driver.find_element(By.ID, '__selection_5')\n actions = ActionChains(self.driver)\n actions.move_to_element(element).release().perform()\n self.driver.find_element(By.CSS_SELECTOR,\n '#PROPERTIES_CONTROLS td:nth-child(2) .middlePart').click()\n self.driver.find_element(By.ID, 'badge.html.ribbon.properties.apply'\n ).click()\n self.driver.find_element(By.CSS_SELECTOR, 'body > img').click()\n self.driver.find_element(By.CSS_SELECTOR, 'a > img').click()\n self.driver.find_element(By.ID, 'main.html.btn_logout').click()\n",
"step-5": "# Generated by Selenium IDE\nimport pytest\nimport time\nimport json\nfrom selenium import webdriver\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.common.action_chains import ActionChains\nfrom selenium.webdriver.support import expected_conditions\nfrom selenium.webdriver.support.wait import WebDriverWait\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.webdriver.common.desired_capabilities import DesiredCapabilities\n\n\nclass TestSTCHANGE():\n def setup_method(self, method):\n self.driver = webdriver.Chrome()\n self.vars = {}\n\n def teardown_method(self, method):\n self.driver.quit()\n\n def test_sTCHANGE(self):\n # Test name: ST CHANGE\n # Step # | name | target | value\n # 1 | open | /main/desktop-login.html |\n self.driver.get(\"http://10.51.30.52:8090/main/desktop-login.html\")\n # 2 | setWindowSize | 976x696 |\n self.driver.set_window_size(976, 696)\n # 3 | click | id=idInputUsername |\n self.driver.find_element(By.ID, \"idInputUsername\").click()\n # 4 | type | id=idInputUsername | SUPERVISOR\n self.driver.find_element(By.ID, \"idInputUsername\").send_keys(\"SUPERVISOR\")\n # 5 | click | id=login-panel |\n self.driver.find_element(By.ID, \"login-panel\").click()\n # 6 | click | id=idInputPassword |\n self.driver.find_element(By.ID, \"idInputPassword\").click()\n # 7 | type | id=idInputPassword | **\n self.driver.find_element(By.ID, \"idInputPassword\").send_keys(\"**\")\n # 8 | click | id=submit.button |\n self.driver.find_element(By.ID, \"submit.button\").click()\n # 9 | click | id=BVMAPS |\n self.driver.find_element(By.ID, \"BVMAPS\").click()\n # 10 | click | css=#UI_BADGES_GRID\\.gridView\\.row\\#22_Tcell\\#0 > div > div |\n self.driver.find_element(By.CSS_SELECTOR, \"#UI_BADGES_GRID\\\\.gridView\\\\.row\\\\#22_Tcell\\\\#0 > div > div\").click()\n # 11 | click | id=badge.html.ribbon.properties |\n self.driver.find_element(By.ID, \"badge.html.ribbon.properties\").click()\n # 12 | click | id=__selection_4 |\n self.driver.find_element(By.ID, \"__selection_4\").click()\n # 13 | mouseDown | css=#\\__pan_4 > .listItemNormal:nth-child(2) |\n element = self.driver.find_element(By.CSS_SELECTOR, \"#\\\\__pan_4 > .listItemNormal:nth-child(2)\")\n actions = ActionChains(self.driver)\n actions.move_to_element(element).click_and_hold().perform()\n # 14 | mouseUp | id=__selection_5 |\n element = self.driver.find_element(By.ID, \"__selection_5\")\n actions = ActionChains(self.driver)\n actions.move_to_element(element).release().perform()\n # 15 | click | css=#PROPERTIES_CONTROLS td:nth-child(2) .middlePart |\n self.driver.find_element(By.CSS_SELECTOR, \"#PROPERTIES_CONTROLS td:nth-child(2) .middlePart\").click()\n # 16 | click | id=badge.html.ribbon.properties.apply |\n self.driver.find_element(By.ID, \"badge.html.ribbon.properties.apply\").click()\n # 17 | click | css=body > img |\n self.driver.find_element(By.CSS_SELECTOR, \"body > img\").click()\n # 18 | click | css=a > img |\n self.driver.find_element(By.CSS_SELECTOR, \"a > img\").click()\n # 19 | click | id=main.html.btn_logout |\n self.driver.find_element(By.ID, \"main.html.btn_logout\").click()\n",
"step-ids": [
0,
1,
4,
5,
6
]
}
|
[
0,
1,
4,
5,
6
] |
<|reserved_special_token_0|>
@app.route('/hello/')
def hello():
return render_template('index.html', greeting='here we are')
<|reserved_special_token_0|>
@app.route('/api/1.0/create_playlists', methods=['POST'])
def do_create_playlists():
create_playlists(ALL_DBS)
retval = get_all_playlists(ALL_DBS)
return jsonify({'all_playlists': retval})
@app.route('/api/1.0/get_playlists', methods=['POST'])
def get_playlists():
retval = get_all_playlists(ALL_DBS)
return jsonify({'all_playlists': retval})
<|reserved_special_token_0|>
@app.route('/api/1.0/get_all_topics', methods=['POST'])
def get_topics():
retval = get_all_topics(ALL_DBS)
return jsonify({'all_topics': retval})
<|reserved_special_token_0|>
@app.route('/api/1.0/add_topic', methods=['POST'])
def add_topic():
retval = add_new_topic(request.json, ALL_DBS)
return jsonify({'return_code': retval})
@app.route('/api/1.0/<string:api_call>', methods=['POST'])
def generic_api_call(api_call):
if not request.json:
abort(400)
param1 = request.json.get('param1', 'no param 1')
param2 = request.json.get('param2', 'no param 2')
retval = {'param_1': param1, 'api_call': api_call, 'param_2': param2}
return jsonify(retval)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
@app.route('/')
def index():
return 'index'
@app.route('/hello/')
def hello():
return render_template('index.html', greeting='here we are')
<|reserved_special_token_0|>
@app.route('/api/1.0/create_playlists', methods=['POST'])
def do_create_playlists():
create_playlists(ALL_DBS)
retval = get_all_playlists(ALL_DBS)
return jsonify({'all_playlists': retval})
@app.route('/api/1.0/get_playlists', methods=['POST'])
def get_playlists():
retval = get_all_playlists(ALL_DBS)
return jsonify({'all_playlists': retval})
@app.route('/api/1.0/get_all_categories', methods=['POST'])
def get_categories():
retval = get_all_categories(ALL_DBS)
return jsonify({'all_categories': retval})
@app.route('/api/1.0/get_all_topics', methods=['POST'])
def get_topics():
retval = get_all_topics(ALL_DBS)
return jsonify({'all_topics': retval})
@app.route('/api/1.0/add_category', methods=['POST'])
def add_category():
retval = add_new_category(request.json, ALL_DBS)
return retval
@app.route('/api/1.0/add_topic', methods=['POST'])
def add_topic():
retval = add_new_topic(request.json, ALL_DBS)
return jsonify({'return_code': retval})
@app.route('/api/1.0/<string:api_call>', methods=['POST'])
def generic_api_call(api_call):
if not request.json:
abort(400)
param1 = request.json.get('param1', 'no param 1')
param2 = request.json.get('param2', 'no param 2')
retval = {'param_1': param1, 'api_call': api_call, 'param_2': param2}
return jsonify(retval)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
@app.route('/')
def index():
return 'index'
@app.route('/hello/')
def hello():
return render_template('index.html', greeting='here we are')
@app.route('/tools/')
def tools():
return render_template('tools.html')
@app.route('/api/1.0/create_playlists', methods=['POST'])
def do_create_playlists():
create_playlists(ALL_DBS)
retval = get_all_playlists(ALL_DBS)
return jsonify({'all_playlists': retval})
@app.route('/api/1.0/get_playlists', methods=['POST'])
def get_playlists():
retval = get_all_playlists(ALL_DBS)
return jsonify({'all_playlists': retval})
@app.route('/api/1.0/get_all_categories', methods=['POST'])
def get_categories():
retval = get_all_categories(ALL_DBS)
return jsonify({'all_categories': retval})
@app.route('/api/1.0/get_all_topics', methods=['POST'])
def get_topics():
retval = get_all_topics(ALL_DBS)
return jsonify({'all_topics': retval})
@app.route('/api/1.0/add_category', methods=['POST'])
def add_category():
retval = add_new_category(request.json, ALL_DBS)
return retval
@app.route('/api/1.0/add_topic', methods=['POST'])
def add_topic():
retval = add_new_topic(request.json, ALL_DBS)
return jsonify({'return_code': retval})
@app.route('/api/1.0/<string:api_call>', methods=['POST'])
def generic_api_call(api_call):
if not request.json:
abort(400)
param1 = request.json.get('param1', 'no param 1')
param2 = request.json.get('param2', 'no param 2')
retval = {'param_1': param1, 'api_call': api_call, 'param_2': param2}
return jsonify(retval)
if __name__ == '__main__':
ALL_DBS = connect_to_db()
app.run(debug=True)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
from flask import Flask
from flask import render_template
from flask import jsonify
from flask import request
from playlists import get_all_playlists, create_playlists, get_all_categories, add_new_category, add_new_topic, get_all_topics
from db import connect_to_db
ALL_DBS = None
app = Flask(__name__)
@app.route('/')
def index():
return 'index'
@app.route('/hello/')
def hello():
return render_template('index.html', greeting='here we are')
@app.route('/tools/')
def tools():
return render_template('tools.html')
@app.route('/api/1.0/create_playlists', methods=['POST'])
def do_create_playlists():
create_playlists(ALL_DBS)
retval = get_all_playlists(ALL_DBS)
return jsonify({'all_playlists': retval})
@app.route('/api/1.0/get_playlists', methods=['POST'])
def get_playlists():
retval = get_all_playlists(ALL_DBS)
return jsonify({'all_playlists': retval})
@app.route('/api/1.0/get_all_categories', methods=['POST'])
def get_categories():
retval = get_all_categories(ALL_DBS)
return jsonify({'all_categories': retval})
@app.route('/api/1.0/get_all_topics', methods=['POST'])
def get_topics():
retval = get_all_topics(ALL_DBS)
return jsonify({'all_topics': retval})
@app.route('/api/1.0/add_category', methods=['POST'])
def add_category():
retval = add_new_category(request.json, ALL_DBS)
return retval
@app.route('/api/1.0/add_topic', methods=['POST'])
def add_topic():
retval = add_new_topic(request.json, ALL_DBS)
return jsonify({'return_code': retval})
@app.route('/api/1.0/<string:api_call>', methods=['POST'])
def generic_api_call(api_call):
if not request.json:
abort(400)
param1 = request.json.get('param1', 'no param 1')
param2 = request.json.get('param2', 'no param 2')
retval = {'param_1': param1, 'api_call': api_call, 'param_2': param2}
return jsonify(retval)
if __name__ == '__main__':
ALL_DBS = connect_to_db()
app.run(debug=True)
<|reserved_special_token_1|>
"""
Copyright (C) 2014, Jill Huchital
"""
# test comment
from flask import Flask
from flask import render_template
from flask import jsonify
from flask import request
from playlists import get_all_playlists, create_playlists, get_all_categories, add_new_category, add_new_topic, get_all_topics
from db import connect_to_db
ALL_DBS = None
app = Flask(__name__)
@app.route('/')
def index():
# return render_template('index.html', greeting='here we are then')
return "index"
@app.route('/hello/')
def hello():
return render_template('index.html', greeting='here we are')
@app.route('/tools/')
def tools():
return render_template('tools.html')
@app.route('/api/1.0/create_playlists', methods = ['POST'])
def do_create_playlists():
create_playlists(ALL_DBS)
retval = get_all_playlists(ALL_DBS)
return jsonify({'all_playlists': retval})
@app.route('/api/1.0/get_playlists', methods = ['POST'])
def get_playlists():
retval = get_all_playlists(ALL_DBS)
return jsonify({'all_playlists': retval})
@app.route('/api/1.0/get_all_categories', methods = ['POST'])
def get_categories():
retval = get_all_categories(ALL_DBS)
return jsonify({'all_categories': retval})
@app.route('/api/1.0/get_all_topics', methods = ['POST'])
def get_topics():
retval = get_all_topics(ALL_DBS)
return jsonify({'all_topics': retval})
@app.route('/api/1.0/add_category', methods = ['POST'])
def add_category():
retval = add_new_category(request.json, ALL_DBS)
return retval
@app.route('/api/1.0/add_topic', methods = ['POST'])
def add_topic():
retval = add_new_topic(request.json, ALL_DBS)
return jsonify({'return_code': retval})
@app.route('/api/1.0/<string:api_call>', methods = ['POST'])
def generic_api_call(api_call):
if not request.json:
abort(400)
param1 = request.json.get('param1', 'no param 1')
param2 = request.json.get('param2', 'no param 2')
retval = {'param_1': param1,
'api_call': api_call,
'param_2': param2}
return jsonify(retval)
if __name__ == '__main__':
# debug = True makes the server restart when the Python files change. TODO: make it
# depend on whether we're running locally or in production.
ALL_DBS = connect_to_db()
# create_playlists(ALL_DBS)
app.run(debug = True)
|
flexible
|
{
"blob_id": "5193de15052f81460a23d993cfa039fa90c9de5e",
"index": 897,
"step-1": "<mask token>\n\n\n@app.route('/hello/')\ndef hello():\n return render_template('index.html', greeting='here we are')\n\n\n<mask token>\n\n\n@app.route('/api/1.0/create_playlists', methods=['POST'])\ndef do_create_playlists():\n create_playlists(ALL_DBS)\n retval = get_all_playlists(ALL_DBS)\n return jsonify({'all_playlists': retval})\n\n\n@app.route('/api/1.0/get_playlists', methods=['POST'])\ndef get_playlists():\n retval = get_all_playlists(ALL_DBS)\n return jsonify({'all_playlists': retval})\n\n\n<mask token>\n\n\n@app.route('/api/1.0/get_all_topics', methods=['POST'])\ndef get_topics():\n retval = get_all_topics(ALL_DBS)\n return jsonify({'all_topics': retval})\n\n\n<mask token>\n\n\n@app.route('/api/1.0/add_topic', methods=['POST'])\ndef add_topic():\n retval = add_new_topic(request.json, ALL_DBS)\n return jsonify({'return_code': retval})\n\n\n@app.route('/api/1.0/<string:api_call>', methods=['POST'])\ndef generic_api_call(api_call):\n if not request.json:\n abort(400)\n param1 = request.json.get('param1', 'no param 1')\n param2 = request.json.get('param2', 'no param 2')\n retval = {'param_1': param1, 'api_call': api_call, 'param_2': param2}\n return jsonify(retval)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\n@app.route('/')\ndef index():\n return 'index'\n\n\n@app.route('/hello/')\ndef hello():\n return render_template('index.html', greeting='here we are')\n\n\n<mask token>\n\n\n@app.route('/api/1.0/create_playlists', methods=['POST'])\ndef do_create_playlists():\n create_playlists(ALL_DBS)\n retval = get_all_playlists(ALL_DBS)\n return jsonify({'all_playlists': retval})\n\n\n@app.route('/api/1.0/get_playlists', methods=['POST'])\ndef get_playlists():\n retval = get_all_playlists(ALL_DBS)\n return jsonify({'all_playlists': retval})\n\n\n@app.route('/api/1.0/get_all_categories', methods=['POST'])\ndef get_categories():\n retval = get_all_categories(ALL_DBS)\n return jsonify({'all_categories': retval})\n\n\n@app.route('/api/1.0/get_all_topics', methods=['POST'])\ndef get_topics():\n retval = get_all_topics(ALL_DBS)\n return jsonify({'all_topics': retval})\n\n\n@app.route('/api/1.0/add_category', methods=['POST'])\ndef add_category():\n retval = add_new_category(request.json, ALL_DBS)\n return retval\n\n\n@app.route('/api/1.0/add_topic', methods=['POST'])\ndef add_topic():\n retval = add_new_topic(request.json, ALL_DBS)\n return jsonify({'return_code': retval})\n\n\n@app.route('/api/1.0/<string:api_call>', methods=['POST'])\ndef generic_api_call(api_call):\n if not request.json:\n abort(400)\n param1 = request.json.get('param1', 'no param 1')\n param2 = request.json.get('param2', 'no param 2')\n retval = {'param_1': param1, 'api_call': api_call, 'param_2': param2}\n return jsonify(retval)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\n@app.route('/')\ndef index():\n return 'index'\n\n\n@app.route('/hello/')\ndef hello():\n return render_template('index.html', greeting='here we are')\n\n\n@app.route('/tools/')\ndef tools():\n return render_template('tools.html')\n\n\n@app.route('/api/1.0/create_playlists', methods=['POST'])\ndef do_create_playlists():\n create_playlists(ALL_DBS)\n retval = get_all_playlists(ALL_DBS)\n return jsonify({'all_playlists': retval})\n\n\n@app.route('/api/1.0/get_playlists', methods=['POST'])\ndef get_playlists():\n retval = get_all_playlists(ALL_DBS)\n return jsonify({'all_playlists': retval})\n\n\n@app.route('/api/1.0/get_all_categories', methods=['POST'])\ndef get_categories():\n retval = get_all_categories(ALL_DBS)\n return jsonify({'all_categories': retval})\n\n\n@app.route('/api/1.0/get_all_topics', methods=['POST'])\ndef get_topics():\n retval = get_all_topics(ALL_DBS)\n return jsonify({'all_topics': retval})\n\n\n@app.route('/api/1.0/add_category', methods=['POST'])\ndef add_category():\n retval = add_new_category(request.json, ALL_DBS)\n return retval\n\n\n@app.route('/api/1.0/add_topic', methods=['POST'])\ndef add_topic():\n retval = add_new_topic(request.json, ALL_DBS)\n return jsonify({'return_code': retval})\n\n\n@app.route('/api/1.0/<string:api_call>', methods=['POST'])\ndef generic_api_call(api_call):\n if not request.json:\n abort(400)\n param1 = request.json.get('param1', 'no param 1')\n param2 = request.json.get('param2', 'no param 2')\n retval = {'param_1': param1, 'api_call': api_call, 'param_2': param2}\n return jsonify(retval)\n\n\nif __name__ == '__main__':\n ALL_DBS = connect_to_db()\n app.run(debug=True)\n",
"step-4": "<mask token>\nfrom flask import Flask\nfrom flask import render_template\nfrom flask import jsonify\nfrom flask import request\nfrom playlists import get_all_playlists, create_playlists, get_all_categories, add_new_category, add_new_topic, get_all_topics\nfrom db import connect_to_db\nALL_DBS = None\napp = Flask(__name__)\n\n\n@app.route('/')\ndef index():\n return 'index'\n\n\n@app.route('/hello/')\ndef hello():\n return render_template('index.html', greeting='here we are')\n\n\n@app.route('/tools/')\ndef tools():\n return render_template('tools.html')\n\n\n@app.route('/api/1.0/create_playlists', methods=['POST'])\ndef do_create_playlists():\n create_playlists(ALL_DBS)\n retval = get_all_playlists(ALL_DBS)\n return jsonify({'all_playlists': retval})\n\n\n@app.route('/api/1.0/get_playlists', methods=['POST'])\ndef get_playlists():\n retval = get_all_playlists(ALL_DBS)\n return jsonify({'all_playlists': retval})\n\n\n@app.route('/api/1.0/get_all_categories', methods=['POST'])\ndef get_categories():\n retval = get_all_categories(ALL_DBS)\n return jsonify({'all_categories': retval})\n\n\n@app.route('/api/1.0/get_all_topics', methods=['POST'])\ndef get_topics():\n retval = get_all_topics(ALL_DBS)\n return jsonify({'all_topics': retval})\n\n\n@app.route('/api/1.0/add_category', methods=['POST'])\ndef add_category():\n retval = add_new_category(request.json, ALL_DBS)\n return retval\n\n\n@app.route('/api/1.0/add_topic', methods=['POST'])\ndef add_topic():\n retval = add_new_topic(request.json, ALL_DBS)\n return jsonify({'return_code': retval})\n\n\n@app.route('/api/1.0/<string:api_call>', methods=['POST'])\ndef generic_api_call(api_call):\n if not request.json:\n abort(400)\n param1 = request.json.get('param1', 'no param 1')\n param2 = request.json.get('param2', 'no param 2')\n retval = {'param_1': param1, 'api_call': api_call, 'param_2': param2}\n return jsonify(retval)\n\n\nif __name__ == '__main__':\n ALL_DBS = connect_to_db()\n app.run(debug=True)\n",
"step-5": "\"\"\"\nCopyright (C) 2014, Jill Huchital\n\"\"\"\n\n# test comment\n\nfrom flask import Flask\nfrom flask import render_template\nfrom flask import jsonify\nfrom flask import request\n\nfrom playlists import get_all_playlists, create_playlists, get_all_categories, add_new_category, add_new_topic, get_all_topics\nfrom db import connect_to_db\n\nALL_DBS = None\n\napp = Flask(__name__)\n\n@app.route('/')\ndef index():\n # return render_template('index.html', greeting='here we are then')\n return \"index\"\n\n@app.route('/hello/')\ndef hello():\n return render_template('index.html', greeting='here we are')\n\n@app.route('/tools/')\ndef tools():\n return render_template('tools.html')\n\n@app.route('/api/1.0/create_playlists', methods = ['POST'])\ndef do_create_playlists():\n create_playlists(ALL_DBS)\n retval = get_all_playlists(ALL_DBS)\n return jsonify({'all_playlists': retval})\n\n@app.route('/api/1.0/get_playlists', methods = ['POST'])\ndef get_playlists():\n retval = get_all_playlists(ALL_DBS)\n return jsonify({'all_playlists': retval})\n\n@app.route('/api/1.0/get_all_categories', methods = ['POST'])\ndef get_categories():\n retval = get_all_categories(ALL_DBS)\n return jsonify({'all_categories': retval})\n\n@app.route('/api/1.0/get_all_topics', methods = ['POST'])\ndef get_topics():\n retval = get_all_topics(ALL_DBS)\n return jsonify({'all_topics': retval})\n\n@app.route('/api/1.0/add_category', methods = ['POST'])\ndef add_category():\n retval = add_new_category(request.json, ALL_DBS)\n return retval\n\n@app.route('/api/1.0/add_topic', methods = ['POST'])\ndef add_topic():\n retval = add_new_topic(request.json, ALL_DBS)\n return jsonify({'return_code': retval})\n\n@app.route('/api/1.0/<string:api_call>', methods = ['POST'])\ndef generic_api_call(api_call):\n if not request.json:\n abort(400)\n param1 = request.json.get('param1', 'no param 1')\n param2 = request.json.get('param2', 'no param 2')\n retval = {'param_1': param1,\n 'api_call': api_call,\n 'param_2': param2}\n return jsonify(retval)\n\nif __name__ == '__main__':\n # debug = True makes the server restart when the Python files change. TODO: make it\n # depend on whether we're running locally or in production.\n ALL_DBS = connect_to_db()\n # create_playlists(ALL_DBS)\n app.run(debug = True)\n",
"step-ids": [
6,
9,
11,
13,
14
]
}
|
[
6,
9,
11,
13,
14
] |
<|reserved_special_token_0|>
def send_show_command(device, commands):
OutputPath = 'c:/script/output/' + str(device['host']) + '.txt'
result = open(OutputPath, 'w')
flag = True
try:
with ConnectHandler(**device) as ssh:
ssh.enable()
for command in commands:
output = ssh.send_command(command, strip_command=False,
strip_prompt=False)
result.write(output + '\n' + 30 * '+' + '\n' + '\n')
except Exception as error:
print(error)
flag = False
result.close()
if flag:
print('Data collection on %s is done. \n \n' % i)
else:
print('Data collection for %s is NOT done. \n \n' % i)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
with open('./device_list.txt', 'r') as f:
device_list = [i.strip() for i in f.readlines() if len(i.strip()) != 0]
print('Data will be collected on below switches:')
for device in device_list:
print(device)
<|reserved_special_token_0|>
if go != 'y' and go != 'Y':
exit(2)
<|reserved_special_token_0|>
with open('temp.txt', 'r') as f:
cmd_4_IOS = [i.strip() for i in f.readlines()]
def send_show_command(device, commands):
OutputPath = 'c:/script/output/' + str(device['host']) + '.txt'
result = open(OutputPath, 'w')
flag = True
try:
with ConnectHandler(**device) as ssh:
ssh.enable()
for command in commands:
output = ssh.send_command(command, strip_command=False,
strip_prompt=False)
result.write(output + '\n' + 30 * '+' + '\n' + '\n')
except Exception as error:
print(error)
flag = False
result.close()
if flag:
print('Data collection on %s is done. \n \n' % i)
else:
print('Data collection for %s is NOT done. \n \n' % i)
<|reserved_special_token_0|>
for i in device_list:
switch['device_type'] = 'ruckus_fastiron'
switch['host'] = i
switch['username'] = u_id
factor_2 = input('Trying to login to %s, enter DUO Code:' % i)
switch['password'] = str(factor_1) + str(factor_2)
switch['secret'] = '',
switch['port'] = 22
send_show_command(switch, cmd_4_IOS)
print('All collection is done.')
<|reserved_special_token_1|>
__author__ = 'Yong Peng'
__version__ = '1.0'
<|reserved_special_token_0|>
with open('./device_list.txt', 'r') as f:
device_list = [i.strip() for i in f.readlines() if len(i.strip()) != 0]
print('Data will be collected on below switches:')
for device in device_list:
print(device)
go = input("""
Press y to continue: """)
if go != 'y' and go != 'Y':
exit(2)
u_id = input('Please input login ID:')
factor_1 = getpass.getpass('ID Password for login:')
with open('temp.txt', 'r') as f:
cmd_4_IOS = [i.strip() for i in f.readlines()]
def send_show_command(device, commands):
OutputPath = 'c:/script/output/' + str(device['host']) + '.txt'
result = open(OutputPath, 'w')
flag = True
try:
with ConnectHandler(**device) as ssh:
ssh.enable()
for command in commands:
output = ssh.send_command(command, strip_command=False,
strip_prompt=False)
result.write(output + '\n' + 30 * '+' + '\n' + '\n')
except Exception as error:
print(error)
flag = False
result.close()
if flag:
print('Data collection on %s is done. \n \n' % i)
else:
print('Data collection for %s is NOT done. \n \n' % i)
switch = {}
for i in device_list:
switch['device_type'] = 'ruckus_fastiron'
switch['host'] = i
switch['username'] = u_id
factor_2 = input('Trying to login to %s, enter DUO Code:' % i)
switch['password'] = str(factor_1) + str(factor_2)
switch['secret'] = '',
switch['port'] = 22
send_show_command(switch, cmd_4_IOS)
print('All collection is done.')
<|reserved_special_token_1|>
__author__ = 'Yong Peng'
__version__ = '1.0'
import time
import re
import getpass
from netmiko import ConnectHandler, NetmikoTimeoutException, NetmikoAuthenticationException
with open('./device_list.txt', 'r') as f:
device_list = [i.strip() for i in f.readlines() if len(i.strip()) != 0]
print('Data will be collected on below switches:')
for device in device_list:
print(device)
go = input("""
Press y to continue: """)
if go != 'y' and go != 'Y':
exit(2)
u_id = input('Please input login ID:')
factor_1 = getpass.getpass('ID Password for login:')
with open('temp.txt', 'r') as f:
cmd_4_IOS = [i.strip() for i in f.readlines()]
def send_show_command(device, commands):
OutputPath = 'c:/script/output/' + str(device['host']) + '.txt'
result = open(OutputPath, 'w')
flag = True
try:
with ConnectHandler(**device) as ssh:
ssh.enable()
for command in commands:
output = ssh.send_command(command, strip_command=False,
strip_prompt=False)
result.write(output + '\n' + 30 * '+' + '\n' + '\n')
except Exception as error:
print(error)
flag = False
result.close()
if flag:
print('Data collection on %s is done. \n \n' % i)
else:
print('Data collection for %s is NOT done. \n \n' % i)
switch = {}
for i in device_list:
switch['device_type'] = 'ruckus_fastiron'
switch['host'] = i
switch['username'] = u_id
factor_2 = input('Trying to login to %s, enter DUO Code:' % i)
switch['password'] = str(factor_1) + str(factor_2)
switch['secret'] = '',
switch['port'] = 22
send_show_command(switch, cmd_4_IOS)
print('All collection is done.')
<|reserved_special_token_1|>
__author__ = "Yong Peng"
__version__ = "1.0"
import time
import re
import getpass
from netmiko import (
ConnectHandler,
NetmikoTimeoutException,
NetmikoAuthenticationException,
)
with open('./device_list.txt','r') as f:
device_list = [i.strip() for i in f.readlines() if len(i.strip()) != 0] # read the device list.
print("Data will be collected on below switches:")
for device in device_list:
print(device)
go = input("\nPress y to continue: ")
if go != "y" and go != "Y":
exit(2)
u_id = input("Please input login ID:")
factor_1 = getpass.getpass("ID Password for login:")
# cmd_4_IOS = ['show version | in from','show stack','show flash',\
# 'show license', 'show boot-preference',\
# 'show ip bgp summ', 'show interface brief',\
# 'show ip inter', 'show vlan',\
# 'show vlan brief', 'show lag', 'show lag brief',\
# 'show lldp neighbor', 'show 802-1w', 'show ip route',\
# 'show run']
# cmd_4_IOS = ['show version | in from', 'show flash | in Pri Code|Sec Code']
# cmd_4_IOS = ['show vlan brief', 'show ip interface', 'show version | in from', 'show ip osp inter brief',
# 'show run']n
# cmd_4_IOS = ['show vlan id 464']
with open("temp.txt",'r') as f:
cmd_4_IOS = [i.strip() for i in f.readlines()]
def send_show_command(device, commands):
OutputPath = 'c:/script/output/' + str(device['host']) + '.txt'
result = open(OutputPath, 'w')
flag = True
try:
with ConnectHandler(**device) as ssh:
ssh.enable()
for command in commands:
output = ssh.send_command(command, strip_command=False, strip_prompt=False)
result.write(output + "\n" + 30 * '+' + "\n" + "\n")
except Exception as error:
print(error)
flag = False
result.close()
if flag:
print("Data collection on %s is done. \n \n" % (i))
else:
print("Data collection for %s is NOT done. \n \n" % (i))
switch = {}
for i in device_list:
switch["device_type"] = "ruckus_fastiron"
switch["host"] = i
switch["username"] = u_id
factor_2 = input("Trying to login to %s, enter DUO Code:"%(i))
switch["password"] = str(factor_1) + str(factor_2)
switch['secret'] = '',
switch['port'] = 22
send_show_command(switch, cmd_4_IOS)
print("All collection is done.")
|
flexible
|
{
"blob_id": "31a0c9a143a06ac86c8e8616fb273a0af844a352",
"index": 6895,
"step-1": "<mask token>\n\n\ndef send_show_command(device, commands):\n OutputPath = 'c:/script/output/' + str(device['host']) + '.txt'\n result = open(OutputPath, 'w')\n flag = True\n try:\n with ConnectHandler(**device) as ssh:\n ssh.enable()\n for command in commands:\n output = ssh.send_command(command, strip_command=False,\n strip_prompt=False)\n result.write(output + '\\n' + 30 * '+' + '\\n' + '\\n')\n except Exception as error:\n print(error)\n flag = False\n result.close()\n if flag:\n print('Data collection on %s is done. \\n \\n' % i)\n else:\n print('Data collection for %s is NOT done. \\n \\n' % i)\n\n\n<mask token>\n",
"step-2": "<mask token>\nwith open('./device_list.txt', 'r') as f:\n device_list = [i.strip() for i in f.readlines() if len(i.strip()) != 0]\nprint('Data will be collected on below switches:')\nfor device in device_list:\n print(device)\n<mask token>\nif go != 'y' and go != 'Y':\n exit(2)\n<mask token>\nwith open('temp.txt', 'r') as f:\n cmd_4_IOS = [i.strip() for i in f.readlines()]\n\n\ndef send_show_command(device, commands):\n OutputPath = 'c:/script/output/' + str(device['host']) + '.txt'\n result = open(OutputPath, 'w')\n flag = True\n try:\n with ConnectHandler(**device) as ssh:\n ssh.enable()\n for command in commands:\n output = ssh.send_command(command, strip_command=False,\n strip_prompt=False)\n result.write(output + '\\n' + 30 * '+' + '\\n' + '\\n')\n except Exception as error:\n print(error)\n flag = False\n result.close()\n if flag:\n print('Data collection on %s is done. \\n \\n' % i)\n else:\n print('Data collection for %s is NOT done. \\n \\n' % i)\n\n\n<mask token>\nfor i in device_list:\n switch['device_type'] = 'ruckus_fastiron'\n switch['host'] = i\n switch['username'] = u_id\n factor_2 = input('Trying to login to %s, enter DUO Code:' % i)\n switch['password'] = str(factor_1) + str(factor_2)\n switch['secret'] = '',\n switch['port'] = 22\n send_show_command(switch, cmd_4_IOS)\nprint('All collection is done.')\n",
"step-3": "__author__ = 'Yong Peng'\n__version__ = '1.0'\n<mask token>\nwith open('./device_list.txt', 'r') as f:\n device_list = [i.strip() for i in f.readlines() if len(i.strip()) != 0]\nprint('Data will be collected on below switches:')\nfor device in device_list:\n print(device)\ngo = input(\"\"\"\nPress y to continue: \"\"\")\nif go != 'y' and go != 'Y':\n exit(2)\nu_id = input('Please input login ID:')\nfactor_1 = getpass.getpass('ID Password for login:')\nwith open('temp.txt', 'r') as f:\n cmd_4_IOS = [i.strip() for i in f.readlines()]\n\n\ndef send_show_command(device, commands):\n OutputPath = 'c:/script/output/' + str(device['host']) + '.txt'\n result = open(OutputPath, 'w')\n flag = True\n try:\n with ConnectHandler(**device) as ssh:\n ssh.enable()\n for command in commands:\n output = ssh.send_command(command, strip_command=False,\n strip_prompt=False)\n result.write(output + '\\n' + 30 * '+' + '\\n' + '\\n')\n except Exception as error:\n print(error)\n flag = False\n result.close()\n if flag:\n print('Data collection on %s is done. \\n \\n' % i)\n else:\n print('Data collection for %s is NOT done. \\n \\n' % i)\n\n\nswitch = {}\nfor i in device_list:\n switch['device_type'] = 'ruckus_fastiron'\n switch['host'] = i\n switch['username'] = u_id\n factor_2 = input('Trying to login to %s, enter DUO Code:' % i)\n switch['password'] = str(factor_1) + str(factor_2)\n switch['secret'] = '',\n switch['port'] = 22\n send_show_command(switch, cmd_4_IOS)\nprint('All collection is done.')\n",
"step-4": "__author__ = 'Yong Peng'\n__version__ = '1.0'\nimport time\nimport re\nimport getpass\nfrom netmiko import ConnectHandler, NetmikoTimeoutException, NetmikoAuthenticationException\nwith open('./device_list.txt', 'r') as f:\n device_list = [i.strip() for i in f.readlines() if len(i.strip()) != 0]\nprint('Data will be collected on below switches:')\nfor device in device_list:\n print(device)\ngo = input(\"\"\"\nPress y to continue: \"\"\")\nif go != 'y' and go != 'Y':\n exit(2)\nu_id = input('Please input login ID:')\nfactor_1 = getpass.getpass('ID Password for login:')\nwith open('temp.txt', 'r') as f:\n cmd_4_IOS = [i.strip() for i in f.readlines()]\n\n\ndef send_show_command(device, commands):\n OutputPath = 'c:/script/output/' + str(device['host']) + '.txt'\n result = open(OutputPath, 'w')\n flag = True\n try:\n with ConnectHandler(**device) as ssh:\n ssh.enable()\n for command in commands:\n output = ssh.send_command(command, strip_command=False,\n strip_prompt=False)\n result.write(output + '\\n' + 30 * '+' + '\\n' + '\\n')\n except Exception as error:\n print(error)\n flag = False\n result.close()\n if flag:\n print('Data collection on %s is done. \\n \\n' % i)\n else:\n print('Data collection for %s is NOT done. \\n \\n' % i)\n\n\nswitch = {}\nfor i in device_list:\n switch['device_type'] = 'ruckus_fastiron'\n switch['host'] = i\n switch['username'] = u_id\n factor_2 = input('Trying to login to %s, enter DUO Code:' % i)\n switch['password'] = str(factor_1) + str(factor_2)\n switch['secret'] = '',\n switch['port'] = 22\n send_show_command(switch, cmd_4_IOS)\nprint('All collection is done.')\n",
"step-5": "\n__author__ = \"Yong Peng\"\n__version__ = \"1.0\"\n\n\nimport time\nimport re\nimport getpass\nfrom netmiko import (\n ConnectHandler,\n NetmikoTimeoutException,\n NetmikoAuthenticationException,\n)\n\nwith open('./device_list.txt','r') as f:\n device_list = [i.strip() for i in f.readlines() if len(i.strip()) != 0] # read the device list.\n\n\nprint(\"Data will be collected on below switches:\")\nfor device in device_list:\n print(device)\n\ngo = input(\"\\nPress y to continue: \")\n\nif go != \"y\" and go != \"Y\":\n exit(2)\n\nu_id = input(\"Please input login ID:\")\nfactor_1 = getpass.getpass(\"ID Password for login:\")\n\n\n# cmd_4_IOS = ['show version | in from','show stack','show flash',\\\n# 'show license', 'show boot-preference',\\\n# 'show ip bgp summ', 'show interface brief',\\\n# 'show ip inter', 'show vlan',\\\n# 'show vlan brief', 'show lag', 'show lag brief',\\\n# 'show lldp neighbor', 'show 802-1w', 'show ip route',\\\n# 'show run']\n# cmd_4_IOS = ['show version | in from', 'show flash | in Pri Code|Sec Code']\n# cmd_4_IOS = ['show vlan brief', 'show ip interface', 'show version | in from', 'show ip osp inter brief',\n# 'show run']n\n# cmd_4_IOS = ['show vlan id 464']\nwith open(\"temp.txt\",'r') as f:\n cmd_4_IOS = [i.strip() for i in f.readlines()]\n\ndef send_show_command(device, commands):\n OutputPath = 'c:/script/output/' + str(device['host']) + '.txt'\n result = open(OutputPath, 'w')\n flag = True\n try:\n with ConnectHandler(**device) as ssh:\n ssh.enable()\n for command in commands:\n output = ssh.send_command(command, strip_command=False, strip_prompt=False)\n result.write(output + \"\\n\" + 30 * '+' + \"\\n\" + \"\\n\")\n\n except Exception as error:\n print(error)\n flag = False\n result.close()\n if flag:\n print(\"Data collection on %s is done. \\n \\n\" % (i))\n else:\n print(\"Data collection for %s is NOT done. \\n \\n\" % (i))\n\nswitch = {}\nfor i in device_list:\n switch[\"device_type\"] = \"ruckus_fastiron\"\n switch[\"host\"] = i\n switch[\"username\"] = u_id\n factor_2 = input(\"Trying to login to %s, enter DUO Code:\"%(i))\n switch[\"password\"] = str(factor_1) + str(factor_2)\n switch['secret'] = '',\n switch['port'] = 22\n send_show_command(switch, cmd_4_IOS)\n\nprint(\"All collection is done.\")",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def main():
"""
Trains an autoencoder on (generated) data and checks adversarial robustness
"""
architecture = [10, 5, 10]
print('----------Training autoencoder----------')
aut = autoencoder(architecture=architecture)
data = pd.read_csv('datasets/sine_curve.csv', header=None)
aut.train(data, epochs=20, learning_rate=0.01)
if not aut.saveflag:
aut.saveAE()
print('Saving the autoencoder after training')
print('------Checking properties of autoencoders-------')
boundingBox = 1
prop1 = ['adversarial-example', 0.1]
prop2 = ['adversarial-robustness', [1] * 10, 0.1, 0.1]
prop3 = ['fairness', 1, 0.1]
enc = smtEncoding()
counterExample = enc.checkProperties(autoencoder=aut, prop=prop2,
boundingBox=1)
mara = marabouEncoding()
mara.checkProperties(autoencoder=aut, prop=prop2, boundingBox=1, folder
='Demo-aut/autoencoder.onnx')
if counterExample == None:
print('Autoencoder satisfies property is the given region')
else:
print('Autoencoder does not satisfy property in the given region for',
counterExample)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def main():
"""
Trains an autoencoder on (generated) data and checks adversarial robustness
"""
architecture = [10, 5, 10]
print('----------Training autoencoder----------')
aut = autoencoder(architecture=architecture)
data = pd.read_csv('datasets/sine_curve.csv', header=None)
aut.train(data, epochs=20, learning_rate=0.01)
if not aut.saveflag:
aut.saveAE()
print('Saving the autoencoder after training')
print('------Checking properties of autoencoders-------')
boundingBox = 1
prop1 = ['adversarial-example', 0.1]
prop2 = ['adversarial-robustness', [1] * 10, 0.1, 0.1]
prop3 = ['fairness', 1, 0.1]
enc = smtEncoding()
counterExample = enc.checkProperties(autoencoder=aut, prop=prop2,
boundingBox=1)
mara = marabouEncoding()
mara.checkProperties(autoencoder=aut, prop=prop2, boundingBox=1, folder
='Demo-aut/autoencoder.onnx')
if counterExample == None:
print('Autoencoder satisfies property is the given region')
else:
print('Autoencoder does not satisfy property in the given region for',
counterExample)
main()
<|reserved_special_token_1|>
import pandas as pd
import copy as cp
import numpy as np
from autoencoder import *
from encoding import smtEncoding
import matplotlib
import matplotlib.pyplot as plt
from data_generator import *
from marabou_encoding import marabouEncoding
def main():
"""
Trains an autoencoder on (generated) data and checks adversarial robustness
"""
architecture = [10, 5, 10]
print('----------Training autoencoder----------')
aut = autoencoder(architecture=architecture)
data = pd.read_csv('datasets/sine_curve.csv', header=None)
aut.train(data, epochs=20, learning_rate=0.01)
if not aut.saveflag:
aut.saveAE()
print('Saving the autoencoder after training')
print('------Checking properties of autoencoders-------')
boundingBox = 1
prop1 = ['adversarial-example', 0.1]
prop2 = ['adversarial-robustness', [1] * 10, 0.1, 0.1]
prop3 = ['fairness', 1, 0.1]
enc = smtEncoding()
counterExample = enc.checkProperties(autoencoder=aut, prop=prop2,
boundingBox=1)
mara = marabouEncoding()
mara.checkProperties(autoencoder=aut, prop=prop2, boundingBox=1, folder
='Demo-aut/autoencoder.onnx')
if counterExample == None:
print('Autoencoder satisfies property is the given region')
else:
print('Autoencoder does not satisfy property in the given region for',
counterExample)
main()
<|reserved_special_token_1|>
import pandas as pd
import copy as cp
import numpy as np
from autoencoder import *
from encoding import smtEncoding
import matplotlib
import matplotlib.pyplot as plt
from data_generator import *
from marabou_encoding import marabouEncoding
def main():
'''
Trains an autoencoder on (generated) data and checks adversarial robustness
'''
architecture = [10,5,10] # Change the architecture of the autoencoder according to requirement
print('----------Training autoencoder----------')
aut = autoencoder(architecture=architecture)
data = pd.read_csv('datasets/sine_curve.csv', header=None)
aut.train(data, epochs=20, learning_rate=0.01)
if not aut.saveflag:
aut.saveAE()
print("Saving the autoencoder after training")
#plot_output([data, aut.predict(data)], ['Original', 'Reconstructed'])
print("------Checking properties of autoencoders-------")
# Parameters that can be modified
boundingBox = 1 # Region around origin where the properties need to checked
prop1 = ['adversarial-example', 0.1]
prop2 = ['adversarial-robustness', [1]*10, 0.1, 0.1]
prop3 = ['fairness', 1, 0.1]
enc = smtEncoding()
counterExample = enc.checkProperties(autoencoder=aut, prop=prop2, boundingBox=1)
# For marabou
mara = marabouEncoding()
mara.checkProperties(autoencoder=aut, prop=prop2, boundingBox=1, folder = "Demo-aut/autoencoder.onnx")
if counterExample == None:
print("Autoencoder satisfies property is the given region")
else:
print("Autoencoder does not satisfy property in the given region for", counterExample)
main()
|
flexible
|
{
"blob_id": "44e1208a2165fe68f71d0aa49baa29b26c961e02",
"index": 5681,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef main():\n \"\"\"\n\tTrains an autoencoder on (generated) data and checks adversarial robustness\n\t\"\"\"\n architecture = [10, 5, 10]\n print('----------Training autoencoder----------')\n aut = autoencoder(architecture=architecture)\n data = pd.read_csv('datasets/sine_curve.csv', header=None)\n aut.train(data, epochs=20, learning_rate=0.01)\n if not aut.saveflag:\n aut.saveAE()\n print('Saving the autoencoder after training')\n print('------Checking properties of autoencoders-------')\n boundingBox = 1\n prop1 = ['adversarial-example', 0.1]\n prop2 = ['adversarial-robustness', [1] * 10, 0.1, 0.1]\n prop3 = ['fairness', 1, 0.1]\n enc = smtEncoding()\n counterExample = enc.checkProperties(autoencoder=aut, prop=prop2,\n boundingBox=1)\n mara = marabouEncoding()\n mara.checkProperties(autoencoder=aut, prop=prop2, boundingBox=1, folder\n ='Demo-aut/autoencoder.onnx')\n if counterExample == None:\n print('Autoencoder satisfies property is the given region')\n else:\n print('Autoencoder does not satisfy property in the given region for',\n counterExample)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef main():\n \"\"\"\n\tTrains an autoencoder on (generated) data and checks adversarial robustness\n\t\"\"\"\n architecture = [10, 5, 10]\n print('----------Training autoencoder----------')\n aut = autoencoder(architecture=architecture)\n data = pd.read_csv('datasets/sine_curve.csv', header=None)\n aut.train(data, epochs=20, learning_rate=0.01)\n if not aut.saveflag:\n aut.saveAE()\n print('Saving the autoencoder after training')\n print('------Checking properties of autoencoders-------')\n boundingBox = 1\n prop1 = ['adversarial-example', 0.1]\n prop2 = ['adversarial-robustness', [1] * 10, 0.1, 0.1]\n prop3 = ['fairness', 1, 0.1]\n enc = smtEncoding()\n counterExample = enc.checkProperties(autoencoder=aut, prop=prop2,\n boundingBox=1)\n mara = marabouEncoding()\n mara.checkProperties(autoencoder=aut, prop=prop2, boundingBox=1, folder\n ='Demo-aut/autoencoder.onnx')\n if counterExample == None:\n print('Autoencoder satisfies property is the given region')\n else:\n print('Autoencoder does not satisfy property in the given region for',\n counterExample)\n\n\nmain()\n",
"step-4": "import pandas as pd\nimport copy as cp\nimport numpy as np\nfrom autoencoder import *\nfrom encoding import smtEncoding\nimport matplotlib\nimport matplotlib.pyplot as plt\nfrom data_generator import *\nfrom marabou_encoding import marabouEncoding\n\n\ndef main():\n \"\"\"\n\tTrains an autoencoder on (generated) data and checks adversarial robustness\n\t\"\"\"\n architecture = [10, 5, 10]\n print('----------Training autoencoder----------')\n aut = autoencoder(architecture=architecture)\n data = pd.read_csv('datasets/sine_curve.csv', header=None)\n aut.train(data, epochs=20, learning_rate=0.01)\n if not aut.saveflag:\n aut.saveAE()\n print('Saving the autoencoder after training')\n print('------Checking properties of autoencoders-------')\n boundingBox = 1\n prop1 = ['adversarial-example', 0.1]\n prop2 = ['adversarial-robustness', [1] * 10, 0.1, 0.1]\n prop3 = ['fairness', 1, 0.1]\n enc = smtEncoding()\n counterExample = enc.checkProperties(autoencoder=aut, prop=prop2,\n boundingBox=1)\n mara = marabouEncoding()\n mara.checkProperties(autoencoder=aut, prop=prop2, boundingBox=1, folder\n ='Demo-aut/autoencoder.onnx')\n if counterExample == None:\n print('Autoencoder satisfies property is the given region')\n else:\n print('Autoencoder does not satisfy property in the given region for',\n counterExample)\n\n\nmain()\n",
"step-5": "import pandas as pd\nimport copy as cp\nimport numpy as np\nfrom autoencoder import *\nfrom encoding import smtEncoding\nimport matplotlib\nimport matplotlib.pyplot as plt\nfrom data_generator import *\nfrom marabou_encoding import marabouEncoding\n\ndef main():\n\t\n\t'''\n\tTrains an autoencoder on (generated) data and checks adversarial robustness\n\t'''\n\t\n\tarchitecture = [10,5,10] # Change the architecture of the autoencoder according to requirement\n\t\n\n\n\tprint('----------Training autoencoder----------')\n\taut = autoencoder(architecture=architecture)\n\tdata = pd.read_csv('datasets/sine_curve.csv', header=None)\n\t\n\taut.train(data, epochs=20, learning_rate=0.01)\n\t\n\tif not aut.saveflag:\n\t\taut.saveAE()\n\t\tprint(\"Saving the autoencoder after training\")\n\t\n\n\t#plot_output([data, aut.predict(data)], ['Original', 'Reconstructed'])\t\n\t\n\n\tprint(\"------Checking properties of autoencoders-------\")\n\n\n\t# Parameters that can be modified\n\tboundingBox = 1 # Region around origin where the properties need to checked\n\tprop1 = ['adversarial-example', 0.1]\n\tprop2 = ['adversarial-robustness', [1]*10, 0.1, 0.1]\n\tprop3 = ['fairness', 1, 0.1]\n\n\tenc = smtEncoding()\n\tcounterExample = enc.checkProperties(autoencoder=aut, prop=prop2, boundingBox=1)\n\n\t# For marabou\n\tmara = marabouEncoding()\n\tmara.checkProperties(autoencoder=aut, prop=prop2, boundingBox=1, folder = \"Demo-aut/autoencoder.onnx\")\n\t\n\n\tif counterExample == None:\n\t\tprint(\"Autoencoder satisfies property is the given region\")\n\telse:\n\t\tprint(\"Autoencoder does not satisfy property in the given region for\", counterExample)\n\nmain()\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
class UserInfo(models.Model):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
class Meta:
verbose_name = '用户'
verbose_name_plural = verbose_name
class Address(models.Model):
aname = models.CharField('收货人', max_length=50, null=False)
ads = models.CharField('地址', max_length=300, null=False)
phone = models.CharField('电话', max_length=20, null=False)
user = models.ForeignKey(UserInfo)
def __str__(self):
return self.aname
class Meta:
verbose_name = '收货地址'
verbose_name_plural = verbose_name
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class UserInfo(models.Model):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def __str__(self):
return self.uname
class Meta:
verbose_name = '用户'
verbose_name_plural = verbose_name
class Address(models.Model):
aname = models.CharField('收货人', max_length=50, null=False)
ads = models.CharField('地址', max_length=300, null=False)
phone = models.CharField('电话', max_length=20, null=False)
user = models.ForeignKey(UserInfo)
def __str__(self):
return self.aname
class Meta:
verbose_name = '收货地址'
verbose_name_plural = verbose_name
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class UserInfo(models.Model):
uname = models.CharField('用户名', max_length=50, null=False)
upassword = models.CharField('密码', max_length=200, null=False)
email = models.CharField('邮箱', max_length=50, null=True)
phone = models.CharField('手机号', max_length=20, null=False)
time = models.DateTimeField('注册时间', auto_now=True)
isban = models.BooleanField('禁用', default=False)
isdelete = models.BooleanField('删除', default=False)
def __str__(self):
return self.uname
class Meta:
verbose_name = '用户'
verbose_name_plural = verbose_name
class Address(models.Model):
aname = models.CharField('收货人', max_length=50, null=False)
ads = models.CharField('地址', max_length=300, null=False)
phone = models.CharField('电话', max_length=20, null=False)
user = models.ForeignKey(UserInfo)
def __str__(self):
return self.aname
class Meta:
verbose_name = '收货地址'
verbose_name_plural = verbose_name
<|reserved_special_token_1|>
from django.db import models
class UserInfo(models.Model):
uname = models.CharField('用户名', max_length=50, null=False)
upassword = models.CharField('密码', max_length=200, null=False)
email = models.CharField('邮箱', max_length=50, null=True)
phone = models.CharField('手机号', max_length=20, null=False)
time = models.DateTimeField('注册时间', auto_now=True)
isban = models.BooleanField('禁用', default=False)
isdelete = models.BooleanField('删除', default=False)
def __str__(self):
return self.uname
class Meta:
verbose_name = '用户'
verbose_name_plural = verbose_name
class Address(models.Model):
aname = models.CharField('收货人', max_length=50, null=False)
ads = models.CharField('地址', max_length=300, null=False)
phone = models.CharField('电话', max_length=20, null=False)
user = models.ForeignKey(UserInfo)
def __str__(self):
return self.aname
class Meta:
verbose_name = '收货地址'
verbose_name_plural = verbose_name
<|reserved_special_token_1|>
from django.db import models
# Create your models here.
class UserInfo(models.Model):
uname = models.CharField('用户名', max_length=50, null=False)
upassword = models.CharField('密码', max_length=200, null=False)
email = models.CharField('邮箱', max_length=50, null=True)
phone = models.CharField('手机号', max_length=20, null=False)
time = models.DateTimeField('注册时间', auto_now=True)
isban = models.BooleanField('禁用', default=False)
isdelete = models.BooleanField('删除', default=False)
def __str__(self):
return self.uname
class Meta:
verbose_name = '用户'
verbose_name_plural = verbose_name
class Address(models.Model):
aname = models.CharField('收货人', max_length=50, null=False)
ads = models.CharField('地址', max_length=300, null=False)
phone = models.CharField('电话', max_length=20, null=False)
user = models.ForeignKey(UserInfo)
def __str__(self):
return self.aname
class Meta:
verbose_name = '收货地址'
verbose_name_plural = verbose_name
|
flexible
|
{
"blob_id": "dbec74ecf488ca98f3f441e252f79bc2bc0959c1",
"index": 4068,
"step-1": "<mask token>\n\n\nclass UserInfo(models.Model):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\n class Meta:\n verbose_name = '用户'\n verbose_name_plural = verbose_name\n\n\nclass Address(models.Model):\n aname = models.CharField('收货人', max_length=50, null=False)\n ads = models.CharField('地址', max_length=300, null=False)\n phone = models.CharField('电话', max_length=20, null=False)\n user = models.ForeignKey(UserInfo)\n\n def __str__(self):\n return self.aname\n\n\n class Meta:\n verbose_name = '收货地址'\n verbose_name_plural = verbose_name\n",
"step-2": "<mask token>\n\n\nclass UserInfo(models.Model):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def __str__(self):\n return self.uname\n\n\n class Meta:\n verbose_name = '用户'\n verbose_name_plural = verbose_name\n\n\nclass Address(models.Model):\n aname = models.CharField('收货人', max_length=50, null=False)\n ads = models.CharField('地址', max_length=300, null=False)\n phone = models.CharField('电话', max_length=20, null=False)\n user = models.ForeignKey(UserInfo)\n\n def __str__(self):\n return self.aname\n\n\n class Meta:\n verbose_name = '收货地址'\n verbose_name_plural = verbose_name\n",
"step-3": "<mask token>\n\n\nclass UserInfo(models.Model):\n uname = models.CharField('用户名', max_length=50, null=False)\n upassword = models.CharField('密码', max_length=200, null=False)\n email = models.CharField('邮箱', max_length=50, null=True)\n phone = models.CharField('手机号', max_length=20, null=False)\n time = models.DateTimeField('注册时间', auto_now=True)\n isban = models.BooleanField('禁用', default=False)\n isdelete = models.BooleanField('删除', default=False)\n\n def __str__(self):\n return self.uname\n\n\n class Meta:\n verbose_name = '用户'\n verbose_name_plural = verbose_name\n\n\nclass Address(models.Model):\n aname = models.CharField('收货人', max_length=50, null=False)\n ads = models.CharField('地址', max_length=300, null=False)\n phone = models.CharField('电话', max_length=20, null=False)\n user = models.ForeignKey(UserInfo)\n\n def __str__(self):\n return self.aname\n\n\n class Meta:\n verbose_name = '收货地址'\n verbose_name_plural = verbose_name\n",
"step-4": "from django.db import models\n\n\nclass UserInfo(models.Model):\n uname = models.CharField('用户名', max_length=50, null=False)\n upassword = models.CharField('密码', max_length=200, null=False)\n email = models.CharField('邮箱', max_length=50, null=True)\n phone = models.CharField('手机号', max_length=20, null=False)\n time = models.DateTimeField('注册时间', auto_now=True)\n isban = models.BooleanField('禁用', default=False)\n isdelete = models.BooleanField('删除', default=False)\n\n def __str__(self):\n return self.uname\n\n\n class Meta:\n verbose_name = '用户'\n verbose_name_plural = verbose_name\n\n\nclass Address(models.Model):\n aname = models.CharField('收货人', max_length=50, null=False)\n ads = models.CharField('地址', max_length=300, null=False)\n phone = models.CharField('电话', max_length=20, null=False)\n user = models.ForeignKey(UserInfo)\n\n def __str__(self):\n return self.aname\n\n\n class Meta:\n verbose_name = '收货地址'\n verbose_name_plural = verbose_name\n",
"step-5": "from django.db import models\n\n# Create your models here.\nclass UserInfo(models.Model):\n uname = models.CharField('用户名', max_length=50, null=False)\n upassword = models.CharField('密码', max_length=200, null=False)\n email = models.CharField('邮箱', max_length=50, null=True)\n phone = models.CharField('手机号', max_length=20, null=False)\n time = models.DateTimeField('注册时间', auto_now=True)\n isban = models.BooleanField('禁用', default=False)\n isdelete = models.BooleanField('删除', default=False)\n\n def __str__(self):\n return self.uname\n\n class Meta:\n verbose_name = '用户'\n verbose_name_plural = verbose_name\n\n\nclass Address(models.Model):\n aname = models.CharField('收货人', max_length=50, null=False)\n ads = models.CharField('地址', max_length=300, null=False)\n phone = models.CharField('电话', max_length=20, null=False)\n user = models.ForeignKey(UserInfo)\n\n def __str__(self):\n return self.aname\n\n class Meta:\n verbose_name = '收货地址'\n verbose_name_plural = verbose_name\n\n\n",
"step-ids": [
4,
5,
6,
7,
8
]
}
|
[
4,
5,
6,
7,
8
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
async def main(URL, buy_time):
browser, page = await get_window()
await page.goto(
'https://account.xiaomi.com/pass/serviceLogin?callback=http%3A%2F%2Forder.mi.com%2Flogin%2Fcallback%3Ffollowup%3Dhttps%253A%252F%252Fwww.mi.com%252F%26sign%3DNzY3MDk1YzczNmUwMGM4ODAxOWE0NjRiNTU5ZGQyMzFhYjFmOGU0Nw%2C%2C&sid=mi_eshop&_bannerBiz=mistore&_qrsize=180'
)
await asyncio.sleep(30)
await page.goto(URL)
await asyncio.sleep(10)
await sleep_time(buy_time)
old_url = page.url
while True:
index = 0
try:
print(f'重试 {index}')
await page.click('[class="btn btn-primary"]')
break
except:
index += 1
await asyncio.sleep(CLICK_FREQUENCY)
while True:
if page.url != old_url:
break
await asyncio.sleep(CLICK_FREQUENCY)
while True:
try:
await page.click('[class="btn btn-primary"]')
break
except:
await asyncio.sleep(CLICK_FREQUENCY)
await asyncio.sleep(100)
await close_window(browser)
if __name__ == '__main__':
URL = input('宝贝链接:\n')
buy_time = input('请输入开售时间 【2020-02-06(空格)12:55:50】\n')
asyncio.run(main(URL, buy_time))
<|reserved_special_token_1|>
from helper import *
async def main(URL, buy_time):
browser, page = await get_window()
await page.goto(
'https://account.xiaomi.com/pass/serviceLogin?callback=http%3A%2F%2Forder.mi.com%2Flogin%2Fcallback%3Ffollowup%3Dhttps%253A%252F%252Fwww.mi.com%252F%26sign%3DNzY3MDk1YzczNmUwMGM4ODAxOWE0NjRiNTU5ZGQyMzFhYjFmOGU0Nw%2C%2C&sid=mi_eshop&_bannerBiz=mistore&_qrsize=180'
)
await asyncio.sleep(30)
await page.goto(URL)
await asyncio.sleep(10)
await sleep_time(buy_time)
old_url = page.url
while True:
index = 0
try:
print(f'重试 {index}')
await page.click('[class="btn btn-primary"]')
break
except:
index += 1
await asyncio.sleep(CLICK_FREQUENCY)
while True:
if page.url != old_url:
break
await asyncio.sleep(CLICK_FREQUENCY)
while True:
try:
await page.click('[class="btn btn-primary"]')
break
except:
await asyncio.sleep(CLICK_FREQUENCY)
await asyncio.sleep(100)
await close_window(browser)
if __name__ == '__main__':
URL = input('宝贝链接:\n')
buy_time = input('请输入开售时间 【2020-02-06(空格)12:55:50】\n')
asyncio.run(main(URL, buy_time))
<|reserved_special_token_1|>
from helper import *
async def main(URL, buy_time):
browser, page = await get_window()
# 30s登陆时间
await page.goto('https://account.xiaomi.com/pass/serviceLogin?callback=http%3A%2F%2Forder.mi.com%2Flogin%2Fcallback%3Ffollowup%3Dhttps%253A%252F%252Fwww.mi.com%252F%26sign%3DNzY3MDk1YzczNmUwMGM4ODAxOWE0NjRiNTU5ZGQyMzFhYjFmOGU0Nw%2C%2C&sid=mi_eshop&_bannerBiz=mistore&_qrsize=180')
await asyncio.sleep(30)
# 选款式时间10s
await page.goto(URL)
await asyncio.sleep(10)
await sleep_time(buy_time)
old_url = page.url
#加入购物车
while True:
index = 0
try:
print(f'重试 {index}')
# 找到“加入购物车”,点击
await page.click('[class="btn btn-primary"]')
break
except:
index += 1
await asyncio.sleep(CLICK_FREQUENCY)
# 等待页面跳转
while True:
if page.url != old_url:
break
await asyncio.sleep(CLICK_FREQUENCY)
while True:
try:
# 找到“进入购物车”,点击
await page.click('[class="btn btn-primary"]')
break
except:
await asyncio.sleep(CLICK_FREQUENCY)
# 付款
await asyncio.sleep(100)
await close_window(browser)
if __name__ == '__main__':
URL = input('宝贝链接:\n')
buy_time = input('请输入开售时间 【2020-02-06(空格)12:55:50】\n')
asyncio.run(main(URL, buy_time))
|
flexible
|
{
"blob_id": "1e87f625fb7bd9f9bf4233229332c909702954a5",
"index": 4334,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nasync def main(URL, buy_time):\n browser, page = await get_window()\n await page.goto(\n 'https://account.xiaomi.com/pass/serviceLogin?callback=http%3A%2F%2Forder.mi.com%2Flogin%2Fcallback%3Ffollowup%3Dhttps%253A%252F%252Fwww.mi.com%252F%26sign%3DNzY3MDk1YzczNmUwMGM4ODAxOWE0NjRiNTU5ZGQyMzFhYjFmOGU0Nw%2C%2C&sid=mi_eshop&_bannerBiz=mistore&_qrsize=180'\n )\n await asyncio.sleep(30)\n await page.goto(URL)\n await asyncio.sleep(10)\n await sleep_time(buy_time)\n old_url = page.url\n while True:\n index = 0\n try:\n print(f'重试 {index}')\n await page.click('[class=\"btn btn-primary\"]')\n break\n except:\n index += 1\n await asyncio.sleep(CLICK_FREQUENCY)\n while True:\n if page.url != old_url:\n break\n await asyncio.sleep(CLICK_FREQUENCY)\n while True:\n try:\n await page.click('[class=\"btn btn-primary\"]')\n break\n except:\n await asyncio.sleep(CLICK_FREQUENCY)\n await asyncio.sleep(100)\n await close_window(browser)\n\n\nif __name__ == '__main__':\n URL = input('宝贝链接:\\n')\n buy_time = input('请输入开售时间 【2020-02-06(空格)12:55:50】\\n')\n asyncio.run(main(URL, buy_time))\n",
"step-3": "from helper import *\n\n\nasync def main(URL, buy_time):\n browser, page = await get_window()\n await page.goto(\n 'https://account.xiaomi.com/pass/serviceLogin?callback=http%3A%2F%2Forder.mi.com%2Flogin%2Fcallback%3Ffollowup%3Dhttps%253A%252F%252Fwww.mi.com%252F%26sign%3DNzY3MDk1YzczNmUwMGM4ODAxOWE0NjRiNTU5ZGQyMzFhYjFmOGU0Nw%2C%2C&sid=mi_eshop&_bannerBiz=mistore&_qrsize=180'\n )\n await asyncio.sleep(30)\n await page.goto(URL)\n await asyncio.sleep(10)\n await sleep_time(buy_time)\n old_url = page.url\n while True:\n index = 0\n try:\n print(f'重试 {index}')\n await page.click('[class=\"btn btn-primary\"]')\n break\n except:\n index += 1\n await asyncio.sleep(CLICK_FREQUENCY)\n while True:\n if page.url != old_url:\n break\n await asyncio.sleep(CLICK_FREQUENCY)\n while True:\n try:\n await page.click('[class=\"btn btn-primary\"]')\n break\n except:\n await asyncio.sleep(CLICK_FREQUENCY)\n await asyncio.sleep(100)\n await close_window(browser)\n\n\nif __name__ == '__main__':\n URL = input('宝贝链接:\\n')\n buy_time = input('请输入开售时间 【2020-02-06(空格)12:55:50】\\n')\n asyncio.run(main(URL, buy_time))\n",
"step-4": "from helper import *\n\n\nasync def main(URL, buy_time):\n browser, page = await get_window()\n # 30s登陆时间\n await page.goto('https://account.xiaomi.com/pass/serviceLogin?callback=http%3A%2F%2Forder.mi.com%2Flogin%2Fcallback%3Ffollowup%3Dhttps%253A%252F%252Fwww.mi.com%252F%26sign%3DNzY3MDk1YzczNmUwMGM4ODAxOWE0NjRiNTU5ZGQyMzFhYjFmOGU0Nw%2C%2C&sid=mi_eshop&_bannerBiz=mistore&_qrsize=180')\n await asyncio.sleep(30)\n\n # 选款式时间10s\n await page.goto(URL)\n await asyncio.sleep(10)\n\n await sleep_time(buy_time)\n old_url = page.url\n\n #加入购物车\n while True:\n index = 0\n try:\n print(f'重试 {index}')\n # 找到“加入购物车”,点击\n await page.click('[class=\"btn btn-primary\"]')\n break\n except:\n index += 1\n await asyncio.sleep(CLICK_FREQUENCY)\n\n # 等待页面跳转\n while True:\n if page.url != old_url:\n break\n await asyncio.sleep(CLICK_FREQUENCY)\n\n while True:\n try:\n # 找到“进入购物车”,点击\n await page.click('[class=\"btn btn-primary\"]')\n break\n except:\n await asyncio.sleep(CLICK_FREQUENCY)\n # 付款\n await asyncio.sleep(100)\n await close_window(browser)\n\nif __name__ == '__main__':\n URL = input('宝贝链接:\\n')\n buy_time = input('请输入开售时间 【2020-02-06(空格)12:55:50】\\n')\n asyncio.run(main(URL, buy_time))",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
# Python 3 program - Currency Sum Validator
# def bill_count
def bill_count(amount_user, list_of_money_bills):
n = len(list_of_money_bills)
# Initialize Result
ans = []
# Traverse through all the list
i = n - 1
while (i >= 0):
# Find list
while (amount_user >= list_of_money_bills[i]):
amount_user -= list_of_money_bills[i]
ans.append(list_of_money_bills[i])
i -= 1
# Print result
a = dict({i: ans.count(i) for i in ans})
values = a.values()
total = sum(values)
print("The minimum count of money bills required to equal the user money amount is:" + str(total))
# Driver Code
if __name__ == '__main__':
amount_user = int(input("Enter the total amount that the user has: "))
list_of_money_bills = [int(x) for x in input("Enter the list of available money bills:").split()]
bill_count(amount_user, list_of_money_bills)
# This code is contributed by
# Akanksha Bothe
|
normal
|
{
"blob_id": "53c5f298dbfb21d7688fef8f0312858e2fd73d79",
"index": 4423,
"step-1": "<mask token>\n",
"step-2": "def bill_count(amount_user, list_of_money_bills):\n n = len(list_of_money_bills)\n ans = []\n i = n - 1\n while i >= 0:\n while amount_user >= list_of_money_bills[i]:\n amount_user -= list_of_money_bills[i]\n ans.append(list_of_money_bills[i])\n i -= 1\n a = dict({i: ans.count(i) for i in ans})\n values = a.values()\n total = sum(values)\n print(\n 'The minimum count of money bills required to equal the user money amount is:'\n + str(total))\n\n\n<mask token>\n",
"step-3": "def bill_count(amount_user, list_of_money_bills):\n n = len(list_of_money_bills)\n ans = []\n i = n - 1\n while i >= 0:\n while amount_user >= list_of_money_bills[i]:\n amount_user -= list_of_money_bills[i]\n ans.append(list_of_money_bills[i])\n i -= 1\n a = dict({i: ans.count(i) for i in ans})\n values = a.values()\n total = sum(values)\n print(\n 'The minimum count of money bills required to equal the user money amount is:'\n + str(total))\n\n\nif __name__ == '__main__':\n amount_user = int(input('Enter the total amount that the user has: '))\n list_of_money_bills = [int(x) for x in input(\n 'Enter the list of available money bills:').split()]\n bill_count(amount_user, list_of_money_bills)\n",
"step-4": "# Python 3 program - Currency Sum Validator\n\n\n# def bill_count\ndef bill_count(amount_user, list_of_money_bills):\n\n\n\n n = len(list_of_money_bills)\n\n # Initialize Result\n ans = []\n\n # Traverse through all the list\n i = n - 1\n\n while (i >= 0):\n\n # Find list\n while (amount_user >= list_of_money_bills[i]):\n amount_user -= list_of_money_bills[i]\n ans.append(list_of_money_bills[i])\n\n i -= 1\n\n # Print result\n\n\n a = dict({i: ans.count(i) for i in ans})\n\n\n\n values = a.values()\n total = sum(values)\n print(\"The minimum count of money bills required to equal the user money amount is:\" + str(total))\n\n\n# Driver Code\nif __name__ == '__main__':\n\n\n\n amount_user = int(input(\"Enter the total amount that the user has: \"))\n list_of_money_bills = [int(x) for x in input(\"Enter the list of available money bills:\").split()]\n bill_count(amount_user, list_of_money_bills)\n# This code is contributed by\n# Akanksha Bothe\n\n\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
def get_sweeps(ref_params_d, n_writers):
params_d = copy.deepcopy(ref_params_d)
params_d['writer']['nprocs'].values = [n_writers]
params_d['writer']['decomposition'].values = [n_writers]
all_dicts = []
all_sweeps = []
for r in [8]:
par_r = copy.deepcopy(params_d)
par_r['reader']['nprocs'].values = [n_writers // r]
par_r['reader']['decomposition'].values = [n_writers // r]
for d in ['512MB']:
par_r_d = copy.deepcopy(par_r)
par_r_d['writer']['configfile'].values = [
'staging-perf-test-{}-{}to1.txt'.format(d, r)]
par_r_d['reader']['configfile'].values = [
'staging-perf-test-{}-{}to1.txt'.format(d, r)]
for e in ['bp4', 'sst-rdma', 'sst-tcp', 'ssc', 'insitumpi']:
par_r_d_e = copy.deepcopy(par_r_d)
par_r_d_e['writer']['xmlfile'].values = [
'staging-perf-test-{}.xml'.format(e)]
par_r_d_e['reader']['xmlfile'].values = [
'staging-perf-test-{}.xml'.format(e)]
all_dicts.append(par_r_d_e)
for d in all_dicts:
sweep_params = []
sweep_params.extend(list(d['writer'].values()))
sweep_params.extend(list(d['reader'].values()))
sep_node_layout = get_separate_node_layout(32, 32)
shared_node_layout = None
if d['writer']['nprocs'].values[0] // d['reader']['nprocs'].values[0
] == 8:
shared_node_layout = get_shared_node_layout(32, 4)
elif n_writers // 32 < 4096:
shared_node_layout = get_shared_node_layout(16, 16)
rc_dependency = None
if 'bp4' in d['writer']['xmlfile'].values[0]:
rc_dependency = {'reader': 'writer'}
sweep_sep = p.Sweep(parameters=sweep_params, node_layout={'summit':
sep_node_layout}, rc_dependency=rc_dependency)
if 'insitumpi' in d['writer']['xmlfile'].values[0]:
sweep_sep.launch_mode = 'mpmd'
if 'ssc' in d['writer']['xmlfile'].values[0]:
sweep_sep.launch_mode = 'mpmd'
sweep_shared = None
if shared_node_layout:
sweep_shared = p.Sweep(parameters=sweep_params, node_layout={
'summit': shared_node_layout}, rc_dependency=rc_dependency)
if n_writers // 32 < 4096:
all_sweeps.append(sweep_sep)
if sweep_shared:
all_sweeps.append(sweep_shared)
return all_sweeps
class Adios_iotest(Campaign):
name = 'ADIOS_IOTEST'
codes = [('writer', dict(exe='adios_iotest')), ('reader', dict(exe=
'adios_iotest'))]
supported_machines = ['local', 'theta', 'summit']
kill_on_partial_failure = True
run_dir_setup_script = None
run_post_process_script = 'cleanup.sh'
umask = '027'
scheduler_options = {'theta': {'project': 'CSC249ADCD01', 'queue':
'batch'}, 'summit': {'project': 'csc303'}}
app_config_scripts = {'local': 'env_setup.sh', 'theta': 'env_setup.sh',
'summit': 'env_setup.sh'}
input_files = ['staging-perf-test-16MB-2to1.txt',
'staging-perf-test-16MB-8to1.txt', 'staging-perf-test-1MB-2to1.txt',
'staging-perf-test-1MB-8to1.txt',
'staging-perf-test-512MB-2to1.txt',
'staging-perf-test-512MB-8to1.txt', 'staging-perf-test-bp4.xml',
'staging-perf-test-insitumpi.xml', 'staging-perf-test-ssc.xml',
'staging-perf-test-sst-rdma.xml', 'staging-perf-test-sst-tcp.xml']
params = {}
params['writer'] = {}
params['reader'] = {}
params['writer']['nprocs'] = p.ParamRunner('writer', 'nprocs', [])
params['writer']['appid'] = p.ParamCmdLineOption('writer', 'appid',
'-a', [1])
params['writer']['configfile'] = p.ParamCmdLineOption('writer',
'configFile', '-c', [])
params['writer']['scaling'] = p.ParamCmdLineOption('writer', 'scaling',
'-w', [None])
params['writer']['xmlfile'] = p.ParamCmdLineOption('writer', 'xmlfile',
'-x', [])
params['writer']['decomposition'] = p.ParamCmdLineOption('writer',
'decomposition', '-d', [])
params['reader']['nprocs'] = p.ParamRunner('reader', 'nprocs', [])
params['reader']['appid'] = p.ParamCmdLineOption('reader', 'appid',
'-a', [2])
params['reader']['configfile'] = p.ParamCmdLineOption('reader',
'configFile', '-c', [])
params['reader']['scaling'] = p.ParamCmdLineOption('reader', 'scaling',
'-w', [None])
params['reader']['xmlfile'] = p.ParamCmdLineOption('reader', 'xmlfile',
'-x', [])
params['reader']['decomposition'] = p.ParamCmdLineOption('reader',
'decomposition', '-d', [])
sweeps = []
for n in [8]:
group_sweeps = get_sweeps(params, n * 32)
s_group = p.SweepGroup('{}-nodes'.format(n), walltime=7200,
per_run_timeout=600, component_inputs={'writer': input_files},
parameter_groups=group_sweeps)
sweeps.append(s_group)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def get_shared_node_layout(n_writers, n_readers):
nc = SummitNode()
for i in range(n_writers):
nc.cpu[i] = 'writer:{}'.format(i)
for i in range(n_readers):
nc.cpu[i + n_writers] = 'reader:{}'.format(i)
return [nc]
<|reserved_special_token_0|>
def get_sweeps(ref_params_d, n_writers):
params_d = copy.deepcopy(ref_params_d)
params_d['writer']['nprocs'].values = [n_writers]
params_d['writer']['decomposition'].values = [n_writers]
all_dicts = []
all_sweeps = []
for r in [8]:
par_r = copy.deepcopy(params_d)
par_r['reader']['nprocs'].values = [n_writers // r]
par_r['reader']['decomposition'].values = [n_writers // r]
for d in ['512MB']:
par_r_d = copy.deepcopy(par_r)
par_r_d['writer']['configfile'].values = [
'staging-perf-test-{}-{}to1.txt'.format(d, r)]
par_r_d['reader']['configfile'].values = [
'staging-perf-test-{}-{}to1.txt'.format(d, r)]
for e in ['bp4', 'sst-rdma', 'sst-tcp', 'ssc', 'insitumpi']:
par_r_d_e = copy.deepcopy(par_r_d)
par_r_d_e['writer']['xmlfile'].values = [
'staging-perf-test-{}.xml'.format(e)]
par_r_d_e['reader']['xmlfile'].values = [
'staging-perf-test-{}.xml'.format(e)]
all_dicts.append(par_r_d_e)
for d in all_dicts:
sweep_params = []
sweep_params.extend(list(d['writer'].values()))
sweep_params.extend(list(d['reader'].values()))
sep_node_layout = get_separate_node_layout(32, 32)
shared_node_layout = None
if d['writer']['nprocs'].values[0] // d['reader']['nprocs'].values[0
] == 8:
shared_node_layout = get_shared_node_layout(32, 4)
elif n_writers // 32 < 4096:
shared_node_layout = get_shared_node_layout(16, 16)
rc_dependency = None
if 'bp4' in d['writer']['xmlfile'].values[0]:
rc_dependency = {'reader': 'writer'}
sweep_sep = p.Sweep(parameters=sweep_params, node_layout={'summit':
sep_node_layout}, rc_dependency=rc_dependency)
if 'insitumpi' in d['writer']['xmlfile'].values[0]:
sweep_sep.launch_mode = 'mpmd'
if 'ssc' in d['writer']['xmlfile'].values[0]:
sweep_sep.launch_mode = 'mpmd'
sweep_shared = None
if shared_node_layout:
sweep_shared = p.Sweep(parameters=sweep_params, node_layout={
'summit': shared_node_layout}, rc_dependency=rc_dependency)
if n_writers // 32 < 4096:
all_sweeps.append(sweep_sep)
if sweep_shared:
all_sweeps.append(sweep_shared)
return all_sweeps
class Adios_iotest(Campaign):
name = 'ADIOS_IOTEST'
codes = [('writer', dict(exe='adios_iotest')), ('reader', dict(exe=
'adios_iotest'))]
supported_machines = ['local', 'theta', 'summit']
kill_on_partial_failure = True
run_dir_setup_script = None
run_post_process_script = 'cleanup.sh'
umask = '027'
scheduler_options = {'theta': {'project': 'CSC249ADCD01', 'queue':
'batch'}, 'summit': {'project': 'csc303'}}
app_config_scripts = {'local': 'env_setup.sh', 'theta': 'env_setup.sh',
'summit': 'env_setup.sh'}
input_files = ['staging-perf-test-16MB-2to1.txt',
'staging-perf-test-16MB-8to1.txt', 'staging-perf-test-1MB-2to1.txt',
'staging-perf-test-1MB-8to1.txt',
'staging-perf-test-512MB-2to1.txt',
'staging-perf-test-512MB-8to1.txt', 'staging-perf-test-bp4.xml',
'staging-perf-test-insitumpi.xml', 'staging-perf-test-ssc.xml',
'staging-perf-test-sst-rdma.xml', 'staging-perf-test-sst-tcp.xml']
params = {}
params['writer'] = {}
params['reader'] = {}
params['writer']['nprocs'] = p.ParamRunner('writer', 'nprocs', [])
params['writer']['appid'] = p.ParamCmdLineOption('writer', 'appid',
'-a', [1])
params['writer']['configfile'] = p.ParamCmdLineOption('writer',
'configFile', '-c', [])
params['writer']['scaling'] = p.ParamCmdLineOption('writer', 'scaling',
'-w', [None])
params['writer']['xmlfile'] = p.ParamCmdLineOption('writer', 'xmlfile',
'-x', [])
params['writer']['decomposition'] = p.ParamCmdLineOption('writer',
'decomposition', '-d', [])
params['reader']['nprocs'] = p.ParamRunner('reader', 'nprocs', [])
params['reader']['appid'] = p.ParamCmdLineOption('reader', 'appid',
'-a', [2])
params['reader']['configfile'] = p.ParamCmdLineOption('reader',
'configFile', '-c', [])
params['reader']['scaling'] = p.ParamCmdLineOption('reader', 'scaling',
'-w', [None])
params['reader']['xmlfile'] = p.ParamCmdLineOption('reader', 'xmlfile',
'-x', [])
params['reader']['decomposition'] = p.ParamCmdLineOption('reader',
'decomposition', '-d', [])
sweeps = []
for n in [8]:
group_sweeps = get_sweeps(params, n * 32)
s_group = p.SweepGroup('{}-nodes'.format(n), walltime=7200,
per_run_timeout=600, component_inputs={'writer': input_files},
parameter_groups=group_sweeps)
sweeps.append(s_group)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def get_shared_node_layout(n_writers, n_readers):
nc = SummitNode()
for i in range(n_writers):
nc.cpu[i] = 'writer:{}'.format(i)
for i in range(n_readers):
nc.cpu[i + n_writers] = 'reader:{}'.format(i)
return [nc]
def get_separate_node_layout(n_writers, n_readers):
nc_w = SummitNode()
for i in range(n_writers):
nc_w.cpu[i] = 'writer:{}'.format(i)
nc_r = SummitNode()
for i in range(n_readers):
nc_r.cpu[i] = 'reader:{}'.format(i)
return [nc_w, nc_r]
def get_sweeps(ref_params_d, n_writers):
params_d = copy.deepcopy(ref_params_d)
params_d['writer']['nprocs'].values = [n_writers]
params_d['writer']['decomposition'].values = [n_writers]
all_dicts = []
all_sweeps = []
for r in [8]:
par_r = copy.deepcopy(params_d)
par_r['reader']['nprocs'].values = [n_writers // r]
par_r['reader']['decomposition'].values = [n_writers // r]
for d in ['512MB']:
par_r_d = copy.deepcopy(par_r)
par_r_d['writer']['configfile'].values = [
'staging-perf-test-{}-{}to1.txt'.format(d, r)]
par_r_d['reader']['configfile'].values = [
'staging-perf-test-{}-{}to1.txt'.format(d, r)]
for e in ['bp4', 'sst-rdma', 'sst-tcp', 'ssc', 'insitumpi']:
par_r_d_e = copy.deepcopy(par_r_d)
par_r_d_e['writer']['xmlfile'].values = [
'staging-perf-test-{}.xml'.format(e)]
par_r_d_e['reader']['xmlfile'].values = [
'staging-perf-test-{}.xml'.format(e)]
all_dicts.append(par_r_d_e)
for d in all_dicts:
sweep_params = []
sweep_params.extend(list(d['writer'].values()))
sweep_params.extend(list(d['reader'].values()))
sep_node_layout = get_separate_node_layout(32, 32)
shared_node_layout = None
if d['writer']['nprocs'].values[0] // d['reader']['nprocs'].values[0
] == 8:
shared_node_layout = get_shared_node_layout(32, 4)
elif n_writers // 32 < 4096:
shared_node_layout = get_shared_node_layout(16, 16)
rc_dependency = None
if 'bp4' in d['writer']['xmlfile'].values[0]:
rc_dependency = {'reader': 'writer'}
sweep_sep = p.Sweep(parameters=sweep_params, node_layout={'summit':
sep_node_layout}, rc_dependency=rc_dependency)
if 'insitumpi' in d['writer']['xmlfile'].values[0]:
sweep_sep.launch_mode = 'mpmd'
if 'ssc' in d['writer']['xmlfile'].values[0]:
sweep_sep.launch_mode = 'mpmd'
sweep_shared = None
if shared_node_layout:
sweep_shared = p.Sweep(parameters=sweep_params, node_layout={
'summit': shared_node_layout}, rc_dependency=rc_dependency)
if n_writers // 32 < 4096:
all_sweeps.append(sweep_sep)
if sweep_shared:
all_sweeps.append(sweep_shared)
return all_sweeps
class Adios_iotest(Campaign):
name = 'ADIOS_IOTEST'
codes = [('writer', dict(exe='adios_iotest')), ('reader', dict(exe=
'adios_iotest'))]
supported_machines = ['local', 'theta', 'summit']
kill_on_partial_failure = True
run_dir_setup_script = None
run_post_process_script = 'cleanup.sh'
umask = '027'
scheduler_options = {'theta': {'project': 'CSC249ADCD01', 'queue':
'batch'}, 'summit': {'project': 'csc303'}}
app_config_scripts = {'local': 'env_setup.sh', 'theta': 'env_setup.sh',
'summit': 'env_setup.sh'}
input_files = ['staging-perf-test-16MB-2to1.txt',
'staging-perf-test-16MB-8to1.txt', 'staging-perf-test-1MB-2to1.txt',
'staging-perf-test-1MB-8to1.txt',
'staging-perf-test-512MB-2to1.txt',
'staging-perf-test-512MB-8to1.txt', 'staging-perf-test-bp4.xml',
'staging-perf-test-insitumpi.xml', 'staging-perf-test-ssc.xml',
'staging-perf-test-sst-rdma.xml', 'staging-perf-test-sst-tcp.xml']
params = {}
params['writer'] = {}
params['reader'] = {}
params['writer']['nprocs'] = p.ParamRunner('writer', 'nprocs', [])
params['writer']['appid'] = p.ParamCmdLineOption('writer', 'appid',
'-a', [1])
params['writer']['configfile'] = p.ParamCmdLineOption('writer',
'configFile', '-c', [])
params['writer']['scaling'] = p.ParamCmdLineOption('writer', 'scaling',
'-w', [None])
params['writer']['xmlfile'] = p.ParamCmdLineOption('writer', 'xmlfile',
'-x', [])
params['writer']['decomposition'] = p.ParamCmdLineOption('writer',
'decomposition', '-d', [])
params['reader']['nprocs'] = p.ParamRunner('reader', 'nprocs', [])
params['reader']['appid'] = p.ParamCmdLineOption('reader', 'appid',
'-a', [2])
params['reader']['configfile'] = p.ParamCmdLineOption('reader',
'configFile', '-c', [])
params['reader']['scaling'] = p.ParamCmdLineOption('reader', 'scaling',
'-w', [None])
params['reader']['xmlfile'] = p.ParamCmdLineOption('reader', 'xmlfile',
'-x', [])
params['reader']['decomposition'] = p.ParamCmdLineOption('reader',
'decomposition', '-d', [])
sweeps = []
for n in [8]:
group_sweeps = get_sweeps(params, n * 32)
s_group = p.SweepGroup('{}-nodes'.format(n), walltime=7200,
per_run_timeout=600, component_inputs={'writer': input_files},
parameter_groups=group_sweeps)
sweeps.append(s_group)
<|reserved_special_token_1|>
from codar.cheetah import Campaign
from codar.cheetah import parameters as p
from codar.savanna.machines import SummitNode
import copy
def get_shared_node_layout(n_writers, n_readers):
nc = SummitNode()
for i in range(n_writers):
nc.cpu[i] = 'writer:{}'.format(i)
for i in range(n_readers):
nc.cpu[i + n_writers] = 'reader:{}'.format(i)
return [nc]
def get_separate_node_layout(n_writers, n_readers):
nc_w = SummitNode()
for i in range(n_writers):
nc_w.cpu[i] = 'writer:{}'.format(i)
nc_r = SummitNode()
for i in range(n_readers):
nc_r.cpu[i] = 'reader:{}'.format(i)
return [nc_w, nc_r]
def get_sweeps(ref_params_d, n_writers):
params_d = copy.deepcopy(ref_params_d)
params_d['writer']['nprocs'].values = [n_writers]
params_d['writer']['decomposition'].values = [n_writers]
all_dicts = []
all_sweeps = []
for r in [8]:
par_r = copy.deepcopy(params_d)
par_r['reader']['nprocs'].values = [n_writers // r]
par_r['reader']['decomposition'].values = [n_writers // r]
for d in ['512MB']:
par_r_d = copy.deepcopy(par_r)
par_r_d['writer']['configfile'].values = [
'staging-perf-test-{}-{}to1.txt'.format(d, r)]
par_r_d['reader']['configfile'].values = [
'staging-perf-test-{}-{}to1.txt'.format(d, r)]
for e in ['bp4', 'sst-rdma', 'sst-tcp', 'ssc', 'insitumpi']:
par_r_d_e = copy.deepcopy(par_r_d)
par_r_d_e['writer']['xmlfile'].values = [
'staging-perf-test-{}.xml'.format(e)]
par_r_d_e['reader']['xmlfile'].values = [
'staging-perf-test-{}.xml'.format(e)]
all_dicts.append(par_r_d_e)
for d in all_dicts:
sweep_params = []
sweep_params.extend(list(d['writer'].values()))
sweep_params.extend(list(d['reader'].values()))
sep_node_layout = get_separate_node_layout(32, 32)
shared_node_layout = None
if d['writer']['nprocs'].values[0] // d['reader']['nprocs'].values[0
] == 8:
shared_node_layout = get_shared_node_layout(32, 4)
elif n_writers // 32 < 4096:
shared_node_layout = get_shared_node_layout(16, 16)
rc_dependency = None
if 'bp4' in d['writer']['xmlfile'].values[0]:
rc_dependency = {'reader': 'writer'}
sweep_sep = p.Sweep(parameters=sweep_params, node_layout={'summit':
sep_node_layout}, rc_dependency=rc_dependency)
if 'insitumpi' in d['writer']['xmlfile'].values[0]:
sweep_sep.launch_mode = 'mpmd'
if 'ssc' in d['writer']['xmlfile'].values[0]:
sweep_sep.launch_mode = 'mpmd'
sweep_shared = None
if shared_node_layout:
sweep_shared = p.Sweep(parameters=sweep_params, node_layout={
'summit': shared_node_layout}, rc_dependency=rc_dependency)
if n_writers // 32 < 4096:
all_sweeps.append(sweep_sep)
if sweep_shared:
all_sweeps.append(sweep_shared)
return all_sweeps
class Adios_iotest(Campaign):
name = 'ADIOS_IOTEST'
codes = [('writer', dict(exe='adios_iotest')), ('reader', dict(exe=
'adios_iotest'))]
supported_machines = ['local', 'theta', 'summit']
kill_on_partial_failure = True
run_dir_setup_script = None
run_post_process_script = 'cleanup.sh'
umask = '027'
scheduler_options = {'theta': {'project': 'CSC249ADCD01', 'queue':
'batch'}, 'summit': {'project': 'csc303'}}
app_config_scripts = {'local': 'env_setup.sh', 'theta': 'env_setup.sh',
'summit': 'env_setup.sh'}
input_files = ['staging-perf-test-16MB-2to1.txt',
'staging-perf-test-16MB-8to1.txt', 'staging-perf-test-1MB-2to1.txt',
'staging-perf-test-1MB-8to1.txt',
'staging-perf-test-512MB-2to1.txt',
'staging-perf-test-512MB-8to1.txt', 'staging-perf-test-bp4.xml',
'staging-perf-test-insitumpi.xml', 'staging-perf-test-ssc.xml',
'staging-perf-test-sst-rdma.xml', 'staging-perf-test-sst-tcp.xml']
params = {}
params['writer'] = {}
params['reader'] = {}
params['writer']['nprocs'] = p.ParamRunner('writer', 'nprocs', [])
params['writer']['appid'] = p.ParamCmdLineOption('writer', 'appid',
'-a', [1])
params['writer']['configfile'] = p.ParamCmdLineOption('writer',
'configFile', '-c', [])
params['writer']['scaling'] = p.ParamCmdLineOption('writer', 'scaling',
'-w', [None])
params['writer']['xmlfile'] = p.ParamCmdLineOption('writer', 'xmlfile',
'-x', [])
params['writer']['decomposition'] = p.ParamCmdLineOption('writer',
'decomposition', '-d', [])
params['reader']['nprocs'] = p.ParamRunner('reader', 'nprocs', [])
params['reader']['appid'] = p.ParamCmdLineOption('reader', 'appid',
'-a', [2])
params['reader']['configfile'] = p.ParamCmdLineOption('reader',
'configFile', '-c', [])
params['reader']['scaling'] = p.ParamCmdLineOption('reader', 'scaling',
'-w', [None])
params['reader']['xmlfile'] = p.ParamCmdLineOption('reader', 'xmlfile',
'-x', [])
params['reader']['decomposition'] = p.ParamCmdLineOption('reader',
'decomposition', '-d', [])
sweeps = []
for n in [8]:
group_sweeps = get_sweeps(params, n * 32)
s_group = p.SweepGroup('{}-nodes'.format(n), walltime=7200,
per_run_timeout=600, component_inputs={'writer': input_files},
parameter_groups=group_sweeps)
sweeps.append(s_group)
<|reserved_special_token_1|>
from codar.cheetah import Campaign
from codar.cheetah import parameters as p
from codar.savanna.machines import SummitNode
import copy
def get_shared_node_layout (n_writers, n_readers):
nc = SummitNode()
for i in range(n_writers):
nc.cpu[i] = "writer:{}".format(i)
for i in range(n_readers):
nc.cpu[i+n_writers] = "reader:{}".format(i)
return [nc]
def get_separate_node_layout (n_writers, n_readers):
nc_w = SummitNode()
for i in range(n_writers):
nc_w.cpu[i] = "writer:{}".format(i)
nc_r = SummitNode()
for i in range(n_readers):
nc_r.cpu[i] = "reader:{}".format(i)
return [nc_w,nc_r]
def get_sweeps(ref_params_d, n_writers):
params_d = copy.deepcopy(ref_params_d)
params_d['writer']['nprocs'].values=[n_writers]
params_d['writer']['decomposition'].values=[n_writers]
all_dicts = []
all_sweeps = []
# Loop over ratio of the no. of reader ranks
for r in [8]:
par_r = copy.deepcopy(params_d)
par_r['reader']['nprocs'].values = [n_writers//r]
par_r['reader']['decomposition'].values = [n_writers//r]
# Loop over data size per process
for d in ['512MB']:
par_r_d = copy.deepcopy(par_r)
par_r_d['writer']['configfile'].values = ['staging-perf-test-{}-{}to1.txt'.format(d,r)]
par_r_d['reader']['configfile'].values = ['staging-perf-test-{}-{}to1.txt'.format(d,r)]
# Loop over engines
for e in ["bp4","sst-rdma","sst-tcp","ssc","insitumpi"]:
par_r_d_e = copy.deepcopy(par_r_d)
par_r_d_e['writer']['xmlfile'].values = ['staging-perf-test-{}.xml'.format(e)]
par_r_d_e['reader']['xmlfile'].values = ['staging-perf-test-{}.xml'.format(e)]
all_dicts.append(par_r_d_e)
for d in all_dicts:
sweep_params = []
sweep_params.extend(list(d['writer'].values()))
sweep_params.extend(list(d['reader'].values()))
sep_node_layout = get_separate_node_layout(32, 32)
shared_node_layout = None
if d['writer']['nprocs'].values[0] // d['reader']['nprocs'].values[0] == 8:
shared_node_layout = get_shared_node_layout(32,4)
elif n_writers//32 < 4096:
shared_node_layout = get_shared_node_layout(16,16)
rc_dependency = None
if 'bp4' in d['writer']['xmlfile'].values[0]:
rc_dependency = {'reader': 'writer'}
sweep_sep = p.Sweep(parameters = sweep_params, node_layout = {'summit':sep_node_layout}, rc_dependency=rc_dependency)
if 'insitumpi' in d['writer']['xmlfile'].values[0]:
sweep_sep.launch_mode='mpmd'
if 'ssc' in d['writer']['xmlfile'].values[0]:
sweep_sep.launch_mode='mpmd'
sweep_shared = None
if shared_node_layout:
sweep_shared = p.Sweep(parameters = sweep_params, node_layout = {'summit':shared_node_layout}, rc_dependency=rc_dependency)
if n_writers//32 < 4096:
all_sweeps.append(sweep_sep)
if sweep_shared:
all_sweeps.append(sweep_shared)
return all_sweeps
class Adios_iotest(Campaign):
# A name for the campaign
name = "ADIOS_IOTEST"
# A list of the codes that will be part of the workflow
# If there is an adios xml file associated with the codes, list it here
codes = [ ("writer", dict(exe="adios_iotest")),
("reader", dict(exe="adios_iotest"))
]
# A list of machines that this campaign must be supported on
supported_machines = ['local', 'theta', 'summit']
# Option to kill an experiment (just one experiment, not the full sweep or campaign) if one of the codes fails
kill_on_partial_failure = True
# Some pre-processing in the experiment directory
# This is performed when the campaign directory is created (before the campaign is launched)
run_dir_setup_script = None
# A post-processing script to be run in the experiment directory after the experiment completes
# For example, removing some large files after the experiment is done
run_post_process_script = 'cleanup.sh'
# umask applied to your directory in the campaign so that colleagues can view files
umask = '027'
# Scheduler information: job queue, account-id etc. Leave it to None if running on a local machine
scheduler_options = {'theta': {'project':'CSC249ADCD01', 'queue': 'batch'},
'summit': {'project':'csc303'}}
# Setup your environment. Loading modules, setting the LD_LIBRARY_PATH etc.
# Ensure this script is executable
app_config_scripts = {'local': 'env_setup.sh', 'theta': 'env_setup.sh', 'summit':'env_setup.sh'}
input_files = [
'staging-perf-test-16MB-2to1.txt',
'staging-perf-test-16MB-8to1.txt',
'staging-perf-test-1MB-2to1.txt',
'staging-perf-test-1MB-8to1.txt',
'staging-perf-test-512MB-2to1.txt',
'staging-perf-test-512MB-8to1.txt',
'staging-perf-test-bp4.xml',
'staging-perf-test-insitumpi.xml',
'staging-perf-test-ssc.xml',
'staging-perf-test-sst-rdma.xml',
'staging-perf-test-sst-tcp.xml'
]
# Create the sweep parameters for a sweep
params = {}
params['writer'] = {}
params['reader'] = {}
params['writer']['nprocs'] = p.ParamRunner ('writer', 'nprocs', [])
params['writer']['appid'] = p.ParamCmdLineOption ('writer', 'appid', '-a', [1])
params['writer']['configfile'] = p.ParamCmdLineOption ('writer', 'configFile', '-c', [])
params['writer']['scaling'] = p.ParamCmdLineOption ('writer', 'scaling', '-w', [None])
params['writer']['xmlfile'] = p.ParamCmdLineOption ('writer', 'xmlfile', '-x', [])
params['writer']['decomposition'] = p.ParamCmdLineOption ('writer', 'decomposition', '-d', [])
params['reader']['nprocs'] = p.ParamRunner ('reader', 'nprocs', [])
params['reader']['appid'] = p.ParamCmdLineOption ('reader', 'appid', '-a', [2])
params['reader']['configfile'] = p.ParamCmdLineOption ('reader', 'configFile', '-c', [])
params['reader']['scaling'] = p.ParamCmdLineOption ('reader', 'scaling', '-w', [None])
params['reader']['xmlfile'] = p.ParamCmdLineOption ('reader', 'xmlfile', '-x', [])
params['reader']['decomposition'] = p.ParamCmdLineOption ('reader', 'decomposition', '-d', [])
sweeps = []
for n in [8]:
group_sweeps = get_sweeps (params, n*32)
# pdb.set_trace()
s_group = p.SweepGroup("{}-nodes".format(n),
walltime=7200,
per_run_timeout=600,
component_inputs={'writer':input_files},
#nodes=128,
parameter_groups=group_sweeps,)
sweeps.append(s_group)
|
flexible
|
{
"blob_id": "475cc5130e847b1a74a33bfa5cbc202a6bf31621",
"index": 6932,
"step-1": "<mask token>\n\n\ndef get_sweeps(ref_params_d, n_writers):\n params_d = copy.deepcopy(ref_params_d)\n params_d['writer']['nprocs'].values = [n_writers]\n params_d['writer']['decomposition'].values = [n_writers]\n all_dicts = []\n all_sweeps = []\n for r in [8]:\n par_r = copy.deepcopy(params_d)\n par_r['reader']['nprocs'].values = [n_writers // r]\n par_r['reader']['decomposition'].values = [n_writers // r]\n for d in ['512MB']:\n par_r_d = copy.deepcopy(par_r)\n par_r_d['writer']['configfile'].values = [\n 'staging-perf-test-{}-{}to1.txt'.format(d, r)]\n par_r_d['reader']['configfile'].values = [\n 'staging-perf-test-{}-{}to1.txt'.format(d, r)]\n for e in ['bp4', 'sst-rdma', 'sst-tcp', 'ssc', 'insitumpi']:\n par_r_d_e = copy.deepcopy(par_r_d)\n par_r_d_e['writer']['xmlfile'].values = [\n 'staging-perf-test-{}.xml'.format(e)]\n par_r_d_e['reader']['xmlfile'].values = [\n 'staging-perf-test-{}.xml'.format(e)]\n all_dicts.append(par_r_d_e)\n for d in all_dicts:\n sweep_params = []\n sweep_params.extend(list(d['writer'].values()))\n sweep_params.extend(list(d['reader'].values()))\n sep_node_layout = get_separate_node_layout(32, 32)\n shared_node_layout = None\n if d['writer']['nprocs'].values[0] // d['reader']['nprocs'].values[0\n ] == 8:\n shared_node_layout = get_shared_node_layout(32, 4)\n elif n_writers // 32 < 4096:\n shared_node_layout = get_shared_node_layout(16, 16)\n rc_dependency = None\n if 'bp4' in d['writer']['xmlfile'].values[0]:\n rc_dependency = {'reader': 'writer'}\n sweep_sep = p.Sweep(parameters=sweep_params, node_layout={'summit':\n sep_node_layout}, rc_dependency=rc_dependency)\n if 'insitumpi' in d['writer']['xmlfile'].values[0]:\n sweep_sep.launch_mode = 'mpmd'\n if 'ssc' in d['writer']['xmlfile'].values[0]:\n sweep_sep.launch_mode = 'mpmd'\n sweep_shared = None\n if shared_node_layout:\n sweep_shared = p.Sweep(parameters=sweep_params, node_layout={\n 'summit': shared_node_layout}, rc_dependency=rc_dependency)\n if n_writers // 32 < 4096:\n all_sweeps.append(sweep_sep)\n if sweep_shared:\n all_sweeps.append(sweep_shared)\n return all_sweeps\n\n\nclass Adios_iotest(Campaign):\n name = 'ADIOS_IOTEST'\n codes = [('writer', dict(exe='adios_iotest')), ('reader', dict(exe=\n 'adios_iotest'))]\n supported_machines = ['local', 'theta', 'summit']\n kill_on_partial_failure = True\n run_dir_setup_script = None\n run_post_process_script = 'cleanup.sh'\n umask = '027'\n scheduler_options = {'theta': {'project': 'CSC249ADCD01', 'queue':\n 'batch'}, 'summit': {'project': 'csc303'}}\n app_config_scripts = {'local': 'env_setup.sh', 'theta': 'env_setup.sh',\n 'summit': 'env_setup.sh'}\n input_files = ['staging-perf-test-16MB-2to1.txt',\n 'staging-perf-test-16MB-8to1.txt', 'staging-perf-test-1MB-2to1.txt',\n 'staging-perf-test-1MB-8to1.txt',\n 'staging-perf-test-512MB-2to1.txt',\n 'staging-perf-test-512MB-8to1.txt', 'staging-perf-test-bp4.xml',\n 'staging-perf-test-insitumpi.xml', 'staging-perf-test-ssc.xml',\n 'staging-perf-test-sst-rdma.xml', 'staging-perf-test-sst-tcp.xml']\n params = {}\n params['writer'] = {}\n params['reader'] = {}\n params['writer']['nprocs'] = p.ParamRunner('writer', 'nprocs', [])\n params['writer']['appid'] = p.ParamCmdLineOption('writer', 'appid',\n '-a', [1])\n params['writer']['configfile'] = p.ParamCmdLineOption('writer',\n 'configFile', '-c', [])\n params['writer']['scaling'] = p.ParamCmdLineOption('writer', 'scaling',\n '-w', [None])\n params['writer']['xmlfile'] = p.ParamCmdLineOption('writer', 'xmlfile',\n '-x', [])\n params['writer']['decomposition'] = p.ParamCmdLineOption('writer',\n 'decomposition', '-d', [])\n params['reader']['nprocs'] = p.ParamRunner('reader', 'nprocs', [])\n params['reader']['appid'] = p.ParamCmdLineOption('reader', 'appid',\n '-a', [2])\n params['reader']['configfile'] = p.ParamCmdLineOption('reader',\n 'configFile', '-c', [])\n params['reader']['scaling'] = p.ParamCmdLineOption('reader', 'scaling',\n '-w', [None])\n params['reader']['xmlfile'] = p.ParamCmdLineOption('reader', 'xmlfile',\n '-x', [])\n params['reader']['decomposition'] = p.ParamCmdLineOption('reader',\n 'decomposition', '-d', [])\n sweeps = []\n for n in [8]:\n group_sweeps = get_sweeps(params, n * 32)\n s_group = p.SweepGroup('{}-nodes'.format(n), walltime=7200,\n per_run_timeout=600, component_inputs={'writer': input_files},\n parameter_groups=group_sweeps)\n sweeps.append(s_group)\n",
"step-2": "<mask token>\n\n\ndef get_shared_node_layout(n_writers, n_readers):\n nc = SummitNode()\n for i in range(n_writers):\n nc.cpu[i] = 'writer:{}'.format(i)\n for i in range(n_readers):\n nc.cpu[i + n_writers] = 'reader:{}'.format(i)\n return [nc]\n\n\n<mask token>\n\n\ndef get_sweeps(ref_params_d, n_writers):\n params_d = copy.deepcopy(ref_params_d)\n params_d['writer']['nprocs'].values = [n_writers]\n params_d['writer']['decomposition'].values = [n_writers]\n all_dicts = []\n all_sweeps = []\n for r in [8]:\n par_r = copy.deepcopy(params_d)\n par_r['reader']['nprocs'].values = [n_writers // r]\n par_r['reader']['decomposition'].values = [n_writers // r]\n for d in ['512MB']:\n par_r_d = copy.deepcopy(par_r)\n par_r_d['writer']['configfile'].values = [\n 'staging-perf-test-{}-{}to1.txt'.format(d, r)]\n par_r_d['reader']['configfile'].values = [\n 'staging-perf-test-{}-{}to1.txt'.format(d, r)]\n for e in ['bp4', 'sst-rdma', 'sst-tcp', 'ssc', 'insitumpi']:\n par_r_d_e = copy.deepcopy(par_r_d)\n par_r_d_e['writer']['xmlfile'].values = [\n 'staging-perf-test-{}.xml'.format(e)]\n par_r_d_e['reader']['xmlfile'].values = [\n 'staging-perf-test-{}.xml'.format(e)]\n all_dicts.append(par_r_d_e)\n for d in all_dicts:\n sweep_params = []\n sweep_params.extend(list(d['writer'].values()))\n sweep_params.extend(list(d['reader'].values()))\n sep_node_layout = get_separate_node_layout(32, 32)\n shared_node_layout = None\n if d['writer']['nprocs'].values[0] // d['reader']['nprocs'].values[0\n ] == 8:\n shared_node_layout = get_shared_node_layout(32, 4)\n elif n_writers // 32 < 4096:\n shared_node_layout = get_shared_node_layout(16, 16)\n rc_dependency = None\n if 'bp4' in d['writer']['xmlfile'].values[0]:\n rc_dependency = {'reader': 'writer'}\n sweep_sep = p.Sweep(parameters=sweep_params, node_layout={'summit':\n sep_node_layout}, rc_dependency=rc_dependency)\n if 'insitumpi' in d['writer']['xmlfile'].values[0]:\n sweep_sep.launch_mode = 'mpmd'\n if 'ssc' in d['writer']['xmlfile'].values[0]:\n sweep_sep.launch_mode = 'mpmd'\n sweep_shared = None\n if shared_node_layout:\n sweep_shared = p.Sweep(parameters=sweep_params, node_layout={\n 'summit': shared_node_layout}, rc_dependency=rc_dependency)\n if n_writers // 32 < 4096:\n all_sweeps.append(sweep_sep)\n if sweep_shared:\n all_sweeps.append(sweep_shared)\n return all_sweeps\n\n\nclass Adios_iotest(Campaign):\n name = 'ADIOS_IOTEST'\n codes = [('writer', dict(exe='adios_iotest')), ('reader', dict(exe=\n 'adios_iotest'))]\n supported_machines = ['local', 'theta', 'summit']\n kill_on_partial_failure = True\n run_dir_setup_script = None\n run_post_process_script = 'cleanup.sh'\n umask = '027'\n scheduler_options = {'theta': {'project': 'CSC249ADCD01', 'queue':\n 'batch'}, 'summit': {'project': 'csc303'}}\n app_config_scripts = {'local': 'env_setup.sh', 'theta': 'env_setup.sh',\n 'summit': 'env_setup.sh'}\n input_files = ['staging-perf-test-16MB-2to1.txt',\n 'staging-perf-test-16MB-8to1.txt', 'staging-perf-test-1MB-2to1.txt',\n 'staging-perf-test-1MB-8to1.txt',\n 'staging-perf-test-512MB-2to1.txt',\n 'staging-perf-test-512MB-8to1.txt', 'staging-perf-test-bp4.xml',\n 'staging-perf-test-insitumpi.xml', 'staging-perf-test-ssc.xml',\n 'staging-perf-test-sst-rdma.xml', 'staging-perf-test-sst-tcp.xml']\n params = {}\n params['writer'] = {}\n params['reader'] = {}\n params['writer']['nprocs'] = p.ParamRunner('writer', 'nprocs', [])\n params['writer']['appid'] = p.ParamCmdLineOption('writer', 'appid',\n '-a', [1])\n params['writer']['configfile'] = p.ParamCmdLineOption('writer',\n 'configFile', '-c', [])\n params['writer']['scaling'] = p.ParamCmdLineOption('writer', 'scaling',\n '-w', [None])\n params['writer']['xmlfile'] = p.ParamCmdLineOption('writer', 'xmlfile',\n '-x', [])\n params['writer']['decomposition'] = p.ParamCmdLineOption('writer',\n 'decomposition', '-d', [])\n params['reader']['nprocs'] = p.ParamRunner('reader', 'nprocs', [])\n params['reader']['appid'] = p.ParamCmdLineOption('reader', 'appid',\n '-a', [2])\n params['reader']['configfile'] = p.ParamCmdLineOption('reader',\n 'configFile', '-c', [])\n params['reader']['scaling'] = p.ParamCmdLineOption('reader', 'scaling',\n '-w', [None])\n params['reader']['xmlfile'] = p.ParamCmdLineOption('reader', 'xmlfile',\n '-x', [])\n params['reader']['decomposition'] = p.ParamCmdLineOption('reader',\n 'decomposition', '-d', [])\n sweeps = []\n for n in [8]:\n group_sweeps = get_sweeps(params, n * 32)\n s_group = p.SweepGroup('{}-nodes'.format(n), walltime=7200,\n per_run_timeout=600, component_inputs={'writer': input_files},\n parameter_groups=group_sweeps)\n sweeps.append(s_group)\n",
"step-3": "<mask token>\n\n\ndef get_shared_node_layout(n_writers, n_readers):\n nc = SummitNode()\n for i in range(n_writers):\n nc.cpu[i] = 'writer:{}'.format(i)\n for i in range(n_readers):\n nc.cpu[i + n_writers] = 'reader:{}'.format(i)\n return [nc]\n\n\ndef get_separate_node_layout(n_writers, n_readers):\n nc_w = SummitNode()\n for i in range(n_writers):\n nc_w.cpu[i] = 'writer:{}'.format(i)\n nc_r = SummitNode()\n for i in range(n_readers):\n nc_r.cpu[i] = 'reader:{}'.format(i)\n return [nc_w, nc_r]\n\n\ndef get_sweeps(ref_params_d, n_writers):\n params_d = copy.deepcopy(ref_params_d)\n params_d['writer']['nprocs'].values = [n_writers]\n params_d['writer']['decomposition'].values = [n_writers]\n all_dicts = []\n all_sweeps = []\n for r in [8]:\n par_r = copy.deepcopy(params_d)\n par_r['reader']['nprocs'].values = [n_writers // r]\n par_r['reader']['decomposition'].values = [n_writers // r]\n for d in ['512MB']:\n par_r_d = copy.deepcopy(par_r)\n par_r_d['writer']['configfile'].values = [\n 'staging-perf-test-{}-{}to1.txt'.format(d, r)]\n par_r_d['reader']['configfile'].values = [\n 'staging-perf-test-{}-{}to1.txt'.format(d, r)]\n for e in ['bp4', 'sst-rdma', 'sst-tcp', 'ssc', 'insitumpi']:\n par_r_d_e = copy.deepcopy(par_r_d)\n par_r_d_e['writer']['xmlfile'].values = [\n 'staging-perf-test-{}.xml'.format(e)]\n par_r_d_e['reader']['xmlfile'].values = [\n 'staging-perf-test-{}.xml'.format(e)]\n all_dicts.append(par_r_d_e)\n for d in all_dicts:\n sweep_params = []\n sweep_params.extend(list(d['writer'].values()))\n sweep_params.extend(list(d['reader'].values()))\n sep_node_layout = get_separate_node_layout(32, 32)\n shared_node_layout = None\n if d['writer']['nprocs'].values[0] // d['reader']['nprocs'].values[0\n ] == 8:\n shared_node_layout = get_shared_node_layout(32, 4)\n elif n_writers // 32 < 4096:\n shared_node_layout = get_shared_node_layout(16, 16)\n rc_dependency = None\n if 'bp4' in d['writer']['xmlfile'].values[0]:\n rc_dependency = {'reader': 'writer'}\n sweep_sep = p.Sweep(parameters=sweep_params, node_layout={'summit':\n sep_node_layout}, rc_dependency=rc_dependency)\n if 'insitumpi' in d['writer']['xmlfile'].values[0]:\n sweep_sep.launch_mode = 'mpmd'\n if 'ssc' in d['writer']['xmlfile'].values[0]:\n sweep_sep.launch_mode = 'mpmd'\n sweep_shared = None\n if shared_node_layout:\n sweep_shared = p.Sweep(parameters=sweep_params, node_layout={\n 'summit': shared_node_layout}, rc_dependency=rc_dependency)\n if n_writers // 32 < 4096:\n all_sweeps.append(sweep_sep)\n if sweep_shared:\n all_sweeps.append(sweep_shared)\n return all_sweeps\n\n\nclass Adios_iotest(Campaign):\n name = 'ADIOS_IOTEST'\n codes = [('writer', dict(exe='adios_iotest')), ('reader', dict(exe=\n 'adios_iotest'))]\n supported_machines = ['local', 'theta', 'summit']\n kill_on_partial_failure = True\n run_dir_setup_script = None\n run_post_process_script = 'cleanup.sh'\n umask = '027'\n scheduler_options = {'theta': {'project': 'CSC249ADCD01', 'queue':\n 'batch'}, 'summit': {'project': 'csc303'}}\n app_config_scripts = {'local': 'env_setup.sh', 'theta': 'env_setup.sh',\n 'summit': 'env_setup.sh'}\n input_files = ['staging-perf-test-16MB-2to1.txt',\n 'staging-perf-test-16MB-8to1.txt', 'staging-perf-test-1MB-2to1.txt',\n 'staging-perf-test-1MB-8to1.txt',\n 'staging-perf-test-512MB-2to1.txt',\n 'staging-perf-test-512MB-8to1.txt', 'staging-perf-test-bp4.xml',\n 'staging-perf-test-insitumpi.xml', 'staging-perf-test-ssc.xml',\n 'staging-perf-test-sst-rdma.xml', 'staging-perf-test-sst-tcp.xml']\n params = {}\n params['writer'] = {}\n params['reader'] = {}\n params['writer']['nprocs'] = p.ParamRunner('writer', 'nprocs', [])\n params['writer']['appid'] = p.ParamCmdLineOption('writer', 'appid',\n '-a', [1])\n params['writer']['configfile'] = p.ParamCmdLineOption('writer',\n 'configFile', '-c', [])\n params['writer']['scaling'] = p.ParamCmdLineOption('writer', 'scaling',\n '-w', [None])\n params['writer']['xmlfile'] = p.ParamCmdLineOption('writer', 'xmlfile',\n '-x', [])\n params['writer']['decomposition'] = p.ParamCmdLineOption('writer',\n 'decomposition', '-d', [])\n params['reader']['nprocs'] = p.ParamRunner('reader', 'nprocs', [])\n params['reader']['appid'] = p.ParamCmdLineOption('reader', 'appid',\n '-a', [2])\n params['reader']['configfile'] = p.ParamCmdLineOption('reader',\n 'configFile', '-c', [])\n params['reader']['scaling'] = p.ParamCmdLineOption('reader', 'scaling',\n '-w', [None])\n params['reader']['xmlfile'] = p.ParamCmdLineOption('reader', 'xmlfile',\n '-x', [])\n params['reader']['decomposition'] = p.ParamCmdLineOption('reader',\n 'decomposition', '-d', [])\n sweeps = []\n for n in [8]:\n group_sweeps = get_sweeps(params, n * 32)\n s_group = p.SweepGroup('{}-nodes'.format(n), walltime=7200,\n per_run_timeout=600, component_inputs={'writer': input_files},\n parameter_groups=group_sweeps)\n sweeps.append(s_group)\n",
"step-4": "from codar.cheetah import Campaign\nfrom codar.cheetah import parameters as p\nfrom codar.savanna.machines import SummitNode\nimport copy\n\n\ndef get_shared_node_layout(n_writers, n_readers):\n nc = SummitNode()\n for i in range(n_writers):\n nc.cpu[i] = 'writer:{}'.format(i)\n for i in range(n_readers):\n nc.cpu[i + n_writers] = 'reader:{}'.format(i)\n return [nc]\n\n\ndef get_separate_node_layout(n_writers, n_readers):\n nc_w = SummitNode()\n for i in range(n_writers):\n nc_w.cpu[i] = 'writer:{}'.format(i)\n nc_r = SummitNode()\n for i in range(n_readers):\n nc_r.cpu[i] = 'reader:{}'.format(i)\n return [nc_w, nc_r]\n\n\ndef get_sweeps(ref_params_d, n_writers):\n params_d = copy.deepcopy(ref_params_d)\n params_d['writer']['nprocs'].values = [n_writers]\n params_d['writer']['decomposition'].values = [n_writers]\n all_dicts = []\n all_sweeps = []\n for r in [8]:\n par_r = copy.deepcopy(params_d)\n par_r['reader']['nprocs'].values = [n_writers // r]\n par_r['reader']['decomposition'].values = [n_writers // r]\n for d in ['512MB']:\n par_r_d = copy.deepcopy(par_r)\n par_r_d['writer']['configfile'].values = [\n 'staging-perf-test-{}-{}to1.txt'.format(d, r)]\n par_r_d['reader']['configfile'].values = [\n 'staging-perf-test-{}-{}to1.txt'.format(d, r)]\n for e in ['bp4', 'sst-rdma', 'sst-tcp', 'ssc', 'insitumpi']:\n par_r_d_e = copy.deepcopy(par_r_d)\n par_r_d_e['writer']['xmlfile'].values = [\n 'staging-perf-test-{}.xml'.format(e)]\n par_r_d_e['reader']['xmlfile'].values = [\n 'staging-perf-test-{}.xml'.format(e)]\n all_dicts.append(par_r_d_e)\n for d in all_dicts:\n sweep_params = []\n sweep_params.extend(list(d['writer'].values()))\n sweep_params.extend(list(d['reader'].values()))\n sep_node_layout = get_separate_node_layout(32, 32)\n shared_node_layout = None\n if d['writer']['nprocs'].values[0] // d['reader']['nprocs'].values[0\n ] == 8:\n shared_node_layout = get_shared_node_layout(32, 4)\n elif n_writers // 32 < 4096:\n shared_node_layout = get_shared_node_layout(16, 16)\n rc_dependency = None\n if 'bp4' in d['writer']['xmlfile'].values[0]:\n rc_dependency = {'reader': 'writer'}\n sweep_sep = p.Sweep(parameters=sweep_params, node_layout={'summit':\n sep_node_layout}, rc_dependency=rc_dependency)\n if 'insitumpi' in d['writer']['xmlfile'].values[0]:\n sweep_sep.launch_mode = 'mpmd'\n if 'ssc' in d['writer']['xmlfile'].values[0]:\n sweep_sep.launch_mode = 'mpmd'\n sweep_shared = None\n if shared_node_layout:\n sweep_shared = p.Sweep(parameters=sweep_params, node_layout={\n 'summit': shared_node_layout}, rc_dependency=rc_dependency)\n if n_writers // 32 < 4096:\n all_sweeps.append(sweep_sep)\n if sweep_shared:\n all_sweeps.append(sweep_shared)\n return all_sweeps\n\n\nclass Adios_iotest(Campaign):\n name = 'ADIOS_IOTEST'\n codes = [('writer', dict(exe='adios_iotest')), ('reader', dict(exe=\n 'adios_iotest'))]\n supported_machines = ['local', 'theta', 'summit']\n kill_on_partial_failure = True\n run_dir_setup_script = None\n run_post_process_script = 'cleanup.sh'\n umask = '027'\n scheduler_options = {'theta': {'project': 'CSC249ADCD01', 'queue':\n 'batch'}, 'summit': {'project': 'csc303'}}\n app_config_scripts = {'local': 'env_setup.sh', 'theta': 'env_setup.sh',\n 'summit': 'env_setup.sh'}\n input_files = ['staging-perf-test-16MB-2to1.txt',\n 'staging-perf-test-16MB-8to1.txt', 'staging-perf-test-1MB-2to1.txt',\n 'staging-perf-test-1MB-8to1.txt',\n 'staging-perf-test-512MB-2to1.txt',\n 'staging-perf-test-512MB-8to1.txt', 'staging-perf-test-bp4.xml',\n 'staging-perf-test-insitumpi.xml', 'staging-perf-test-ssc.xml',\n 'staging-perf-test-sst-rdma.xml', 'staging-perf-test-sst-tcp.xml']\n params = {}\n params['writer'] = {}\n params['reader'] = {}\n params['writer']['nprocs'] = p.ParamRunner('writer', 'nprocs', [])\n params['writer']['appid'] = p.ParamCmdLineOption('writer', 'appid',\n '-a', [1])\n params['writer']['configfile'] = p.ParamCmdLineOption('writer',\n 'configFile', '-c', [])\n params['writer']['scaling'] = p.ParamCmdLineOption('writer', 'scaling',\n '-w', [None])\n params['writer']['xmlfile'] = p.ParamCmdLineOption('writer', 'xmlfile',\n '-x', [])\n params['writer']['decomposition'] = p.ParamCmdLineOption('writer',\n 'decomposition', '-d', [])\n params['reader']['nprocs'] = p.ParamRunner('reader', 'nprocs', [])\n params['reader']['appid'] = p.ParamCmdLineOption('reader', 'appid',\n '-a', [2])\n params['reader']['configfile'] = p.ParamCmdLineOption('reader',\n 'configFile', '-c', [])\n params['reader']['scaling'] = p.ParamCmdLineOption('reader', 'scaling',\n '-w', [None])\n params['reader']['xmlfile'] = p.ParamCmdLineOption('reader', 'xmlfile',\n '-x', [])\n params['reader']['decomposition'] = p.ParamCmdLineOption('reader',\n 'decomposition', '-d', [])\n sweeps = []\n for n in [8]:\n group_sweeps = get_sweeps(params, n * 32)\n s_group = p.SweepGroup('{}-nodes'.format(n), walltime=7200,\n per_run_timeout=600, component_inputs={'writer': input_files},\n parameter_groups=group_sweeps)\n sweeps.append(s_group)\n",
"step-5": "from codar.cheetah import Campaign\nfrom codar.cheetah import parameters as p\nfrom codar.savanna.machines import SummitNode\nimport copy\n\ndef get_shared_node_layout (n_writers, n_readers):\n nc = SummitNode()\n for i in range(n_writers):\n nc.cpu[i] = \"writer:{}\".format(i)\n for i in range(n_readers):\n nc.cpu[i+n_writers] = \"reader:{}\".format(i)\n return [nc]\n\ndef get_separate_node_layout (n_writers, n_readers):\n nc_w = SummitNode()\n for i in range(n_writers):\n nc_w.cpu[i] = \"writer:{}\".format(i)\n\n nc_r = SummitNode()\n for i in range(n_readers):\n nc_r.cpu[i] = \"reader:{}\".format(i)\n\n return [nc_w,nc_r]\n\ndef get_sweeps(ref_params_d, n_writers):\n params_d = copy.deepcopy(ref_params_d)\n params_d['writer']['nprocs'].values=[n_writers]\n params_d['writer']['decomposition'].values=[n_writers]\n\n all_dicts = []\n all_sweeps = []\n\n # Loop over ratio of the no. of reader ranks\n for r in [8]:\n par_r = copy.deepcopy(params_d)\n par_r['reader']['nprocs'].values = [n_writers//r]\n par_r['reader']['decomposition'].values = [n_writers//r]\n\n # Loop over data size per process\n for d in ['512MB']:\n par_r_d = copy.deepcopy(par_r)\n par_r_d['writer']['configfile'].values = ['staging-perf-test-{}-{}to1.txt'.format(d,r)]\n par_r_d['reader']['configfile'].values = ['staging-perf-test-{}-{}to1.txt'.format(d,r)]\n\n # Loop over engines\n for e in [\"bp4\",\"sst-rdma\",\"sst-tcp\",\"ssc\",\"insitumpi\"]:\n par_r_d_e = copy.deepcopy(par_r_d)\n par_r_d_e['writer']['xmlfile'].values = ['staging-perf-test-{}.xml'.format(e)]\n par_r_d_e['reader']['xmlfile'].values = ['staging-perf-test-{}.xml'.format(e)]\n\n all_dicts.append(par_r_d_e)\n\n for d in all_dicts:\n sweep_params = []\n sweep_params.extend(list(d['writer'].values()))\n sweep_params.extend(list(d['reader'].values()))\n\n sep_node_layout = get_separate_node_layout(32, 32)\n shared_node_layout = None\n\n if d['writer']['nprocs'].values[0] // d['reader']['nprocs'].values[0] == 8:\n shared_node_layout = get_shared_node_layout(32,4)\n elif n_writers//32 < 4096:\n shared_node_layout = get_shared_node_layout(16,16)\n\n rc_dependency = None\n if 'bp4' in d['writer']['xmlfile'].values[0]:\n rc_dependency = {'reader': 'writer'}\n sweep_sep = p.Sweep(parameters = sweep_params, node_layout = {'summit':sep_node_layout}, rc_dependency=rc_dependency)\n if 'insitumpi' in d['writer']['xmlfile'].values[0]:\n sweep_sep.launch_mode='mpmd'\n if 'ssc' in d['writer']['xmlfile'].values[0]:\n sweep_sep.launch_mode='mpmd'\n\n sweep_shared = None\n if shared_node_layout:\n sweep_shared = p.Sweep(parameters = sweep_params, node_layout = {'summit':shared_node_layout}, rc_dependency=rc_dependency)\n\n if n_writers//32 < 4096:\n all_sweeps.append(sweep_sep)\n if sweep_shared:\n all_sweeps.append(sweep_shared)\n\n return all_sweeps\n\n\nclass Adios_iotest(Campaign):\n\n # A name for the campaign\n name = \"ADIOS_IOTEST\"\n\n # A list of the codes that will be part of the workflow\n # If there is an adios xml file associated with the codes, list it here\n codes = [ (\"writer\", dict(exe=\"adios_iotest\")),\n (\"reader\", dict(exe=\"adios_iotest\"))\n ]\n\n # A list of machines that this campaign must be supported on\n supported_machines = ['local', 'theta', 'summit']\n\n # Option to kill an experiment (just one experiment, not the full sweep or campaign) if one of the codes fails\n kill_on_partial_failure = True\n\n # Some pre-processing in the experiment directory\n # This is performed when the campaign directory is created (before the campaign is launched)\n run_dir_setup_script = None\n\n # A post-processing script to be run in the experiment directory after the experiment completes\n # For example, removing some large files after the experiment is done\n run_post_process_script = 'cleanup.sh'\n\n # umask applied to your directory in the campaign so that colleagues can view files\n umask = '027'\n\n # Scheduler information: job queue, account-id etc. Leave it to None if running on a local machine\n scheduler_options = {'theta': {'project':'CSC249ADCD01', 'queue': 'batch'},\n 'summit': {'project':'csc303'}}\n\n # Setup your environment. Loading modules, setting the LD_LIBRARY_PATH etc.\n # Ensure this script is executable\n app_config_scripts = {'local': 'env_setup.sh', 'theta': 'env_setup.sh', 'summit':'env_setup.sh'}\n\n input_files = [\n 'staging-perf-test-16MB-2to1.txt',\n 'staging-perf-test-16MB-8to1.txt',\n 'staging-perf-test-1MB-2to1.txt',\n 'staging-perf-test-1MB-8to1.txt',\n 'staging-perf-test-512MB-2to1.txt',\n 'staging-perf-test-512MB-8to1.txt',\n 'staging-perf-test-bp4.xml',\n 'staging-perf-test-insitumpi.xml',\n 'staging-perf-test-ssc.xml',\n 'staging-perf-test-sst-rdma.xml',\n 'staging-perf-test-sst-tcp.xml'\n ]\n\n # Create the sweep parameters for a sweep\n params = {}\n params['writer'] = {}\n params['reader'] = {}\n\n params['writer']['nprocs'] = p.ParamRunner ('writer', 'nprocs', [])\n params['writer']['appid'] = p.ParamCmdLineOption ('writer', 'appid', '-a', [1])\n params['writer']['configfile'] = p.ParamCmdLineOption ('writer', 'configFile', '-c', [])\n params['writer']['scaling'] = p.ParamCmdLineOption ('writer', 'scaling', '-w', [None])\n params['writer']['xmlfile'] = p.ParamCmdLineOption ('writer', 'xmlfile', '-x', [])\n params['writer']['decomposition'] = p.ParamCmdLineOption ('writer', 'decomposition', '-d', [])\n\n params['reader']['nprocs'] = p.ParamRunner ('reader', 'nprocs', [])\n params['reader']['appid'] = p.ParamCmdLineOption ('reader', 'appid', '-a', [2])\n params['reader']['configfile'] = p.ParamCmdLineOption ('reader', 'configFile', '-c', [])\n params['reader']['scaling'] = p.ParamCmdLineOption ('reader', 'scaling', '-w', [None])\n params['reader']['xmlfile'] = p.ParamCmdLineOption ('reader', 'xmlfile', '-x', [])\n params['reader']['decomposition'] = p.ParamCmdLineOption ('reader', 'decomposition', '-d', [])\n\n sweeps = []\n for n in [8]:\n group_sweeps = get_sweeps (params, n*32)\n # pdb.set_trace()\n s_group = p.SweepGroup(\"{}-nodes\".format(n),\n walltime=7200,\n per_run_timeout=600,\n component_inputs={'writer':input_files},\n #nodes=128,\n parameter_groups=group_sweeps,)\n sweeps.append(s_group)\n\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
from keras.preprocessing.image import img_to_array
from keras.models import load_model
import tensorflow as tf
import numpy as np
import argparse
import imutils
import pickle
import cv2
# USAGE
# python classify.py --model output/fashion.model --categorybin output/category_lb.pickle
# --colorbin output/color_lb.pickle --image examples/black_dress.jpg
# construct the argument parse and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-m", "--model", required=True, help="path to trained model model")
ap.add_argument("-l", "--categorybin", required=True, help="path to output category label binarizer")
ap.add_argument("-c", "--colorbin", required=True, help="path to output color label binarizer")
ap.add_argument("-i", "--image", required=True, help="path to input image")
args = vars(ap.parse_args())
# load the image
image = cv2.imread(args["image"])
output = imutils.resize(image, width=400)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
# pre-process the image for classification
image = cv2.resize(image, (96, 96))
image = image.astype("float") / 255.0
image = img_to_array(image)
image = np.expand_dims(image, axis=0)
# load the trained convolutional neural network from disk, followed
# by the category and color label binarizers, respectively
print("[INFO] loading network...")
model = load_model(args["model"], custom_objects={"tf": tf})
categoryLB = pickle.loads(open(args["categorybin"], "rb").read())
colorLB = pickle.loads(open(args["colorbin"], "rb").read())
# classify the input image using Keras' multi-output functionality
print("[INFO] classifying image...")
(categoryProba, colorProba) = model.predict(image)
# find indexes of both the category and color outputs with the
# largest probabilities, then determine the corresponding class
# labels
categoryIdx = categoryProba[0].argmax()
colorIdx = colorProba[0].argmax()
categoryLabel = categoryLB.classes_[categoryIdx]
colorLabel = colorLB.classes_[colorIdx]
# draw the category label and color label on the image
categoryText = "category: {} ({:.2f}%)".format(categoryLabel, categoryProba[0][categoryIdx] * 100)
colorText = "color: {} ({:.2f}%)".format(colorLabel, colorProba[0][colorIdx] * 100)
cv2.putText(output, categoryText, (10, 25), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 255, 0), 2)
cv2.putText(output, colorText, (10, 55), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 255, 0), 2)
# display the predictions to the terminal as well
print("[INFO] {}".format(categoryText))
print("[INFO] {}".format(colorText))
# show the output image
cv2.imshow("Output", output)
cv2.waitKey(0)
|
normal
|
{
"blob_id": "8ff9961c1415c04899bbc15ba64811a1b3ade262",
"index": 3082,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nap.add_argument('-m', '--model', required=True, help=\n 'path to trained model model')\nap.add_argument('-l', '--categorybin', required=True, help=\n 'path to output category label binarizer')\nap.add_argument('-c', '--colorbin', required=True, help=\n 'path to output color label binarizer')\nap.add_argument('-i', '--image', required=True, help='path to input image')\n<mask token>\nprint('[INFO] loading network...')\n<mask token>\nprint('[INFO] classifying image...')\n<mask token>\ncv2.putText(output, categoryText, (10, 25), cv2.FONT_HERSHEY_SIMPLEX, 0.7,\n (0, 255, 0), 2)\ncv2.putText(output, colorText, (10, 55), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0,\n 255, 0), 2)\nprint('[INFO] {}'.format(categoryText))\nprint('[INFO] {}'.format(colorText))\ncv2.imshow('Output', output)\ncv2.waitKey(0)\n",
"step-3": "<mask token>\nap = argparse.ArgumentParser()\nap.add_argument('-m', '--model', required=True, help=\n 'path to trained model model')\nap.add_argument('-l', '--categorybin', required=True, help=\n 'path to output category label binarizer')\nap.add_argument('-c', '--colorbin', required=True, help=\n 'path to output color label binarizer')\nap.add_argument('-i', '--image', required=True, help='path to input image')\nargs = vars(ap.parse_args())\nimage = cv2.imread(args['image'])\noutput = imutils.resize(image, width=400)\nimage = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\nimage = cv2.resize(image, (96, 96))\nimage = image.astype('float') / 255.0\nimage = img_to_array(image)\nimage = np.expand_dims(image, axis=0)\nprint('[INFO] loading network...')\nmodel = load_model(args['model'], custom_objects={'tf': tf})\ncategoryLB = pickle.loads(open(args['categorybin'], 'rb').read())\ncolorLB = pickle.loads(open(args['colorbin'], 'rb').read())\nprint('[INFO] classifying image...')\ncategoryProba, colorProba = model.predict(image)\ncategoryIdx = categoryProba[0].argmax()\ncolorIdx = colorProba[0].argmax()\ncategoryLabel = categoryLB.classes_[categoryIdx]\ncolorLabel = colorLB.classes_[colorIdx]\ncategoryText = 'category: {} ({:.2f}%)'.format(categoryLabel, categoryProba\n [0][categoryIdx] * 100)\ncolorText = 'color: {} ({:.2f}%)'.format(colorLabel, colorProba[0][colorIdx\n ] * 100)\ncv2.putText(output, categoryText, (10, 25), cv2.FONT_HERSHEY_SIMPLEX, 0.7,\n (0, 255, 0), 2)\ncv2.putText(output, colorText, (10, 55), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0,\n 255, 0), 2)\nprint('[INFO] {}'.format(categoryText))\nprint('[INFO] {}'.format(colorText))\ncv2.imshow('Output', output)\ncv2.waitKey(0)\n",
"step-4": "from keras.preprocessing.image import img_to_array\nfrom keras.models import load_model\nimport tensorflow as tf\nimport numpy as np\nimport argparse\nimport imutils\nimport pickle\nimport cv2\nap = argparse.ArgumentParser()\nap.add_argument('-m', '--model', required=True, help=\n 'path to trained model model')\nap.add_argument('-l', '--categorybin', required=True, help=\n 'path to output category label binarizer')\nap.add_argument('-c', '--colorbin', required=True, help=\n 'path to output color label binarizer')\nap.add_argument('-i', '--image', required=True, help='path to input image')\nargs = vars(ap.parse_args())\nimage = cv2.imread(args['image'])\noutput = imutils.resize(image, width=400)\nimage = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\nimage = cv2.resize(image, (96, 96))\nimage = image.astype('float') / 255.0\nimage = img_to_array(image)\nimage = np.expand_dims(image, axis=0)\nprint('[INFO] loading network...')\nmodel = load_model(args['model'], custom_objects={'tf': tf})\ncategoryLB = pickle.loads(open(args['categorybin'], 'rb').read())\ncolorLB = pickle.loads(open(args['colorbin'], 'rb').read())\nprint('[INFO] classifying image...')\ncategoryProba, colorProba = model.predict(image)\ncategoryIdx = categoryProba[0].argmax()\ncolorIdx = colorProba[0].argmax()\ncategoryLabel = categoryLB.classes_[categoryIdx]\ncolorLabel = colorLB.classes_[colorIdx]\ncategoryText = 'category: {} ({:.2f}%)'.format(categoryLabel, categoryProba\n [0][categoryIdx] * 100)\ncolorText = 'color: {} ({:.2f}%)'.format(colorLabel, colorProba[0][colorIdx\n ] * 100)\ncv2.putText(output, categoryText, (10, 25), cv2.FONT_HERSHEY_SIMPLEX, 0.7,\n (0, 255, 0), 2)\ncv2.putText(output, colorText, (10, 55), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0,\n 255, 0), 2)\nprint('[INFO] {}'.format(categoryText))\nprint('[INFO] {}'.format(colorText))\ncv2.imshow('Output', output)\ncv2.waitKey(0)\n",
"step-5": "from keras.preprocessing.image import img_to_array\nfrom keras.models import load_model\nimport tensorflow as tf\nimport numpy as np\nimport argparse\nimport imutils\nimport pickle\nimport cv2\n\n# USAGE\n# python classify.py --model output/fashion.model --categorybin output/category_lb.pickle\n# --colorbin output/color_lb.pickle --image examples/black_dress.jpg\n\n# construct the argument parse and parse the arguments\nap = argparse.ArgumentParser()\nap.add_argument(\"-m\", \"--model\", required=True, help=\"path to trained model model\")\nap.add_argument(\"-l\", \"--categorybin\", required=True, help=\"path to output category label binarizer\")\nap.add_argument(\"-c\", \"--colorbin\", required=True, help=\"path to output color label binarizer\")\nap.add_argument(\"-i\", \"--image\", required=True, help=\"path to input image\")\nargs = vars(ap.parse_args())\n\n# load the image\nimage = cv2.imread(args[\"image\"])\noutput = imutils.resize(image, width=400)\nimage = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n\n# pre-process the image for classification\nimage = cv2.resize(image, (96, 96))\nimage = image.astype(\"float\") / 255.0\nimage = img_to_array(image)\nimage = np.expand_dims(image, axis=0)\n\n# load the trained convolutional neural network from disk, followed\n# by the category and color label binarizers, respectively\nprint(\"[INFO] loading network...\")\nmodel = load_model(args[\"model\"], custom_objects={\"tf\": tf})\ncategoryLB = pickle.loads(open(args[\"categorybin\"], \"rb\").read())\ncolorLB = pickle.loads(open(args[\"colorbin\"], \"rb\").read())\n\n# classify the input image using Keras' multi-output functionality\nprint(\"[INFO] classifying image...\")\n(categoryProba, colorProba) = model.predict(image)\n\n# find indexes of both the category and color outputs with the\n# largest probabilities, then determine the corresponding class\n# labels\ncategoryIdx = categoryProba[0].argmax()\ncolorIdx = colorProba[0].argmax()\ncategoryLabel = categoryLB.classes_[categoryIdx]\ncolorLabel = colorLB.classes_[colorIdx]\n\n# draw the category label and color label on the image\ncategoryText = \"category: {} ({:.2f}%)\".format(categoryLabel, categoryProba[0][categoryIdx] * 100)\ncolorText = \"color: {} ({:.2f}%)\".format(colorLabel, colorProba[0][colorIdx] * 100)\ncv2.putText(output, categoryText, (10, 25), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 255, 0), 2)\ncv2.putText(output, colorText, (10, 55), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 255, 0), 2)\n\n# display the predictions to the terminal as well\nprint(\"[INFO] {}\".format(categoryText))\nprint(\"[INFO] {}\".format(colorText))\n\n# show the output image\ncv2.imshow(\"Output\", output)\ncv2.waitKey(0)\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
#!/usr/bin/env python
import sys
def solve():
numEngines = int(sys.stdin.readline())
engines = []
for _ in range(numEngines):
engine = sys.stdin.readline()
engines.append(engine)
numQueries = int(sys.stdin.readline())
queries = []
for _ in range(numQueries):
query = sys.stdin.readline()
queries.append(query)
remainingEngines = set(engines)
switches = 0
for query in queries:
remainingEngines.discard(query)
if not remainingEngines:
remainingEngines = set(engines)
remainingEngines.discard(query)
switches += 1
return switches
cases = int(sys.stdin.readline())
for case in range(cases):
print 'Case #%d: %s' % (case + 1, solve())
|
normal
|
{
"blob_id": "174f5b04f02ec0c9651d5e34c8b04df8bfd4dff4",
"index": 1943,
"step-1": "#!/usr/bin/env python\n\nimport sys\n\ndef solve():\n\tnumEngines = int(sys.stdin.readline())\n\tengines = []\n\tfor _ in range(numEngines):\n\t\tengine = sys.stdin.readline()\n\t\tengines.append(engine)\n\n\tnumQueries = int(sys.stdin.readline())\n\tqueries = []\n\tfor _ in range(numQueries):\n\t\tquery = sys.stdin.readline()\n\t\tqueries.append(query)\n\n\tremainingEngines = set(engines)\n\tswitches = 0\n\tfor query in queries:\n\t\tremainingEngines.discard(query)\n\t\tif not remainingEngines:\n\t\t\tremainingEngines = set(engines)\n\t\t\tremainingEngines.discard(query)\n\t\t\tswitches += 1\n\n\treturn switches\n\ncases = int(sys.stdin.readline())\nfor case in range(cases):\n\tprint 'Case #%d: %s' % (case + 1, solve())\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
"""Activate coverage at python startup if appropriate.
The python site initialisation will ensure that anything we import
will be removed and not visible at the end of python startup. However
we minimise all work by putting these init actions in this separate
module and only importing what is needed when needed.
For normal python startup when coverage should not be activated the pth
file checks a single env var and does not import or call the init fn
here.
For python startup when an ancestor process has set the env indicating
that code coverage is being collected we activate coverage based on
info passed via env vars.
"""
import os
def multiprocessing_start(obj):
cov = init()
if cov:
multiprocessing.util.Finalize(None, multiprocessing_finish, args=(cov,), exitpriority=1000)
def multiprocessing_finish(cov):
cov.stop()
cov.save()
try:
import multiprocessing.util
except ImportError:
pass
else:
multiprocessing.util.register_after_fork(multiprocessing_start, multiprocessing_start)
def init():
# Only continue if ancestor process has set everything needed in
# the env.
cov_source = os.environ.get('COV_CORE_SOURCE')
cov_config = os.environ.get('COV_CORE_CONFIG')
cov_datafile = os.environ.get('COV_CORE_DATAFILE')
if cov_datafile:
# Import what we need to activate coverage.
import coverage
# Determine all source roots.
if not cov_source:
cov_source = None
else:
cov_source = cov_source.split(os.pathsep)
if not cov_config:
cov_config = True
# Activate coverage for this process.
cov = coverage.coverage(
source=cov_source,
data_suffix=True,
config_file=cov_config,
auto_data=True,
data_file=cov_datafile
)
cov.load()
cov.start()
cov._warn_no_data = False
cov._warn_unimported_source = False
return cov
|
normal
|
{
"blob_id": "243794d36a1c6861c2c3308fe6a52ec19b73df72",
"index": 7820,
"step-1": "<mask token>\n\n\ndef multiprocessing_start(obj):\n cov = init()\n if cov:\n multiprocessing.util.Finalize(None, multiprocessing_finish, args=(\n cov,), exitpriority=1000)\n\n\n<mask token>\n\n\ndef init():\n cov_source = os.environ.get('COV_CORE_SOURCE')\n cov_config = os.environ.get('COV_CORE_CONFIG')\n cov_datafile = os.environ.get('COV_CORE_DATAFILE')\n if cov_datafile:\n import coverage\n if not cov_source:\n cov_source = None\n else:\n cov_source = cov_source.split(os.pathsep)\n if not cov_config:\n cov_config = True\n cov = coverage.coverage(source=cov_source, data_suffix=True,\n config_file=cov_config, auto_data=True, data_file=cov_datafile)\n cov.load()\n cov.start()\n cov._warn_no_data = False\n cov._warn_unimported_source = False\n return cov\n",
"step-2": "<mask token>\n\n\ndef multiprocessing_start(obj):\n cov = init()\n if cov:\n multiprocessing.util.Finalize(None, multiprocessing_finish, args=(\n cov,), exitpriority=1000)\n\n\ndef multiprocessing_finish(cov):\n cov.stop()\n cov.save()\n\n\n<mask token>\n\n\ndef init():\n cov_source = os.environ.get('COV_CORE_SOURCE')\n cov_config = os.environ.get('COV_CORE_CONFIG')\n cov_datafile = os.environ.get('COV_CORE_DATAFILE')\n if cov_datafile:\n import coverage\n if not cov_source:\n cov_source = None\n else:\n cov_source = cov_source.split(os.pathsep)\n if not cov_config:\n cov_config = True\n cov = coverage.coverage(source=cov_source, data_suffix=True,\n config_file=cov_config, auto_data=True, data_file=cov_datafile)\n cov.load()\n cov.start()\n cov._warn_no_data = False\n cov._warn_unimported_source = False\n return cov\n",
"step-3": "<mask token>\n\n\ndef multiprocessing_start(obj):\n cov = init()\n if cov:\n multiprocessing.util.Finalize(None, multiprocessing_finish, args=(\n cov,), exitpriority=1000)\n\n\ndef multiprocessing_finish(cov):\n cov.stop()\n cov.save()\n\n\ntry:\n import multiprocessing.util\nexcept ImportError:\n pass\nelse:\n multiprocessing.util.register_after_fork(multiprocessing_start,\n multiprocessing_start)\n\n\ndef init():\n cov_source = os.environ.get('COV_CORE_SOURCE')\n cov_config = os.environ.get('COV_CORE_CONFIG')\n cov_datafile = os.environ.get('COV_CORE_DATAFILE')\n if cov_datafile:\n import coverage\n if not cov_source:\n cov_source = None\n else:\n cov_source = cov_source.split(os.pathsep)\n if not cov_config:\n cov_config = True\n cov = coverage.coverage(source=cov_source, data_suffix=True,\n config_file=cov_config, auto_data=True, data_file=cov_datafile)\n cov.load()\n cov.start()\n cov._warn_no_data = False\n cov._warn_unimported_source = False\n return cov\n",
"step-4": "<mask token>\nimport os\n\n\ndef multiprocessing_start(obj):\n cov = init()\n if cov:\n multiprocessing.util.Finalize(None, multiprocessing_finish, args=(\n cov,), exitpriority=1000)\n\n\ndef multiprocessing_finish(cov):\n cov.stop()\n cov.save()\n\n\ntry:\n import multiprocessing.util\nexcept ImportError:\n pass\nelse:\n multiprocessing.util.register_after_fork(multiprocessing_start,\n multiprocessing_start)\n\n\ndef init():\n cov_source = os.environ.get('COV_CORE_SOURCE')\n cov_config = os.environ.get('COV_CORE_CONFIG')\n cov_datafile = os.environ.get('COV_CORE_DATAFILE')\n if cov_datafile:\n import coverage\n if not cov_source:\n cov_source = None\n else:\n cov_source = cov_source.split(os.pathsep)\n if not cov_config:\n cov_config = True\n cov = coverage.coverage(source=cov_source, data_suffix=True,\n config_file=cov_config, auto_data=True, data_file=cov_datafile)\n cov.load()\n cov.start()\n cov._warn_no_data = False\n cov._warn_unimported_source = False\n return cov\n",
"step-5": "\"\"\"Activate coverage at python startup if appropriate.\n\nThe python site initialisation will ensure that anything we import\nwill be removed and not visible at the end of python startup. However\nwe minimise all work by putting these init actions in this separate\nmodule and only importing what is needed when needed.\n\nFor normal python startup when coverage should not be activated the pth\nfile checks a single env var and does not import or call the init fn\nhere.\n\nFor python startup when an ancestor process has set the env indicating\nthat code coverage is being collected we activate coverage based on\ninfo passed via env vars.\n\"\"\"\nimport os\n\n\ndef multiprocessing_start(obj):\n cov = init()\n if cov:\n multiprocessing.util.Finalize(None, multiprocessing_finish, args=(cov,), exitpriority=1000)\n\n\ndef multiprocessing_finish(cov):\n cov.stop()\n cov.save()\n\n\ntry:\n import multiprocessing.util\nexcept ImportError:\n pass\nelse:\n multiprocessing.util.register_after_fork(multiprocessing_start, multiprocessing_start)\n\n\ndef init():\n # Only continue if ancestor process has set everything needed in\n # the env.\n\n cov_source = os.environ.get('COV_CORE_SOURCE')\n cov_config = os.environ.get('COV_CORE_CONFIG')\n cov_datafile = os.environ.get('COV_CORE_DATAFILE')\n if cov_datafile:\n # Import what we need to activate coverage.\n import coverage\n\n # Determine all source roots.\n if not cov_source:\n cov_source = None\n else:\n cov_source = cov_source.split(os.pathsep)\n if not cov_config:\n cov_config = True\n\n # Activate coverage for this process.\n cov = coverage.coverage(\n source=cov_source,\n data_suffix=True,\n config_file=cov_config,\n auto_data=True,\n data_file=cov_datafile\n )\n cov.load()\n cov.start()\n cov._warn_no_data = False\n cov._warn_unimported_source = False\n return cov\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
<|reserved_special_token_0|>
class TestActor(Actor):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class TestActor(Actor):
<|reserved_special_token_0|>
def act(self):
self.key_commands()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class TestActor(Actor):
def __init__(self):
super(TestActor, self).__init__()
def act(self):
self.key_commands()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class TestActor(Actor):
def __init__(self):
super(TestActor, self).__init__()
def act(self):
self.key_commands()
def key_commands(self):
if PlayerInput.is_key_down(pygame.K_LEFT):
self.set_location(self.x - 1, self.y)
if PlayerInput.is_key_down(pygame.K_RIGHT):
self.set_location(self.x + 1, self.y)
if PlayerInput.is_key_down(pygame.K_UP):
self.set_location(self.x, self.y - 1)
if PlayerInput.is_key_down(pygame.K_DOWN):
self.set_location(self.x, self.y + 1)
<|reserved_special_token_1|>
import pygame
from Actor import Actor
import PlayerInput
class TestActor(Actor):
def __init__(self):
super(TestActor, self).__init__()
def act(self):
self.key_commands()
def key_commands(self):
if PlayerInput.is_key_down(pygame.K_LEFT):
self.set_location(self.x - 1, self.y)
if PlayerInput.is_key_down(pygame.K_RIGHT):
self.set_location(self.x + 1, self.y)
if PlayerInput.is_key_down(pygame.K_UP):
self.set_location(self.x, self.y - 1)
if PlayerInput.is_key_down(pygame.K_DOWN):
self.set_location(self.x, self.y + 1)
|
flexible
|
{
"blob_id": "9cb11c2bf032aa16abd3463ecdb8997addedc912",
"index": 1570,
"step-1": "<mask token>\n\n\nclass TestActor(Actor):\n <mask token>\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass TestActor(Actor):\n <mask token>\n\n def act(self):\n self.key_commands()\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass TestActor(Actor):\n\n def __init__(self):\n super(TestActor, self).__init__()\n\n def act(self):\n self.key_commands()\n <mask token>\n",
"step-4": "<mask token>\n\n\nclass TestActor(Actor):\n\n def __init__(self):\n super(TestActor, self).__init__()\n\n def act(self):\n self.key_commands()\n\n def key_commands(self):\n if PlayerInput.is_key_down(pygame.K_LEFT):\n self.set_location(self.x - 1, self.y)\n if PlayerInput.is_key_down(pygame.K_RIGHT):\n self.set_location(self.x + 1, self.y)\n if PlayerInput.is_key_down(pygame.K_UP):\n self.set_location(self.x, self.y - 1)\n if PlayerInput.is_key_down(pygame.K_DOWN):\n self.set_location(self.x, self.y + 1)\n",
"step-5": "import pygame\nfrom Actor import Actor\nimport PlayerInput\n\n\nclass TestActor(Actor):\n\n def __init__(self):\n super(TestActor, self).__init__()\n\n def act(self):\n self.key_commands()\n\n def key_commands(self):\n if PlayerInput.is_key_down(pygame.K_LEFT):\n self.set_location(self.x - 1, self.y)\n if PlayerInput.is_key_down(pygame.K_RIGHT):\n self.set_location(self.x + 1, self.y)\n if PlayerInput.is_key_down(pygame.K_UP):\n self.set_location(self.x, self.y - 1)\n if PlayerInput.is_key_down(pygame.K_DOWN):\n self.set_location(self.x, self.y + 1)\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
def main():
N, K, D = map(int, input().split())
rules = [tuple(map(int, input().split())) for _ in range(K)]
minv, maxv = min([r[0] for r in rules]), max([r[1] for r in rules])
while minv + 1 < maxv:
midv = (minv + maxv) // 2
cnt, max_in = 0, 0
for A, B, C in rules:
if midv < A:
continue
n = (min(midv, B) - A) // C
max_in = max(A + n * C, max_in)
cnt += n + 1
if cnt >= D:
maxv = max_in
else:
minv = midv + 1
if minv < maxv:
cnt, max_in = 0, 0
for A, B, C in rules:
if minv < A:
continue
max_in = max(A + (min(minv, B) - A) // C * C, max_in)
cnt += (min(minv, B) - A) // C + 1
if cnt >= D:
maxv = max_in
print(maxv)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def input(_type=str):
return _type(sys.stdin.readline().strip())
def main():
N, K, D = map(int, input().split())
rules = [tuple(map(int, input().split())) for _ in range(K)]
minv, maxv = min([r[0] for r in rules]), max([r[1] for r in rules])
while minv + 1 < maxv:
midv = (minv + maxv) // 2
cnt, max_in = 0, 0
for A, B, C in rules:
if midv < A:
continue
n = (min(midv, B) - A) // C
max_in = max(A + n * C, max_in)
cnt += n + 1
if cnt >= D:
maxv = max_in
else:
minv = midv + 1
if minv < maxv:
cnt, max_in = 0, 0
for A, B, C in rules:
if minv < A:
continue
max_in = max(A + (min(minv, B) - A) // C * C, max_in)
cnt += (min(minv, B) - A) // C + 1
if cnt >= D:
maxv = max_in
print(maxv)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def input(_type=str):
return _type(sys.stdin.readline().strip())
def main():
N, K, D = map(int, input().split())
rules = [tuple(map(int, input().split())) for _ in range(K)]
minv, maxv = min([r[0] for r in rules]), max([r[1] for r in rules])
while minv + 1 < maxv:
midv = (minv + maxv) // 2
cnt, max_in = 0, 0
for A, B, C in rules:
if midv < A:
continue
n = (min(midv, B) - A) // C
max_in = max(A + n * C, max_in)
cnt += n + 1
if cnt >= D:
maxv = max_in
else:
minv = midv + 1
if minv < maxv:
cnt, max_in = 0, 0
for A, B, C in rules:
if minv < A:
continue
max_in = max(A + (min(minv, B) - A) // C * C, max_in)
cnt += (min(minv, B) - A) // C + 1
if cnt >= D:
maxv = max_in
print(maxv)
main()
<|reserved_special_token_1|>
import sys
def input(_type=str):
return _type(sys.stdin.readline().strip())
def main():
N, K, D = map(int, input().split())
rules = [tuple(map(int, input().split())) for _ in range(K)]
minv, maxv = min([r[0] for r in rules]), max([r[1] for r in rules])
while minv + 1 < maxv:
midv = (minv + maxv) // 2
cnt, max_in = 0, 0
for A, B, C in rules:
if midv < A:
continue
n = (min(midv, B) - A) // C
max_in = max(A + n * C, max_in)
cnt += n + 1
if cnt >= D:
maxv = max_in
else:
minv = midv + 1
if minv < maxv:
cnt, max_in = 0, 0
for A, B, C in rules:
if minv < A:
continue
max_in = max(A + (min(minv, B) - A) // C * C, max_in)
cnt += (min(minv, B) - A) // C + 1
if cnt >= D:
maxv = max_in
print(maxv)
main()
<|reserved_special_token_1|>
import sys
def input(_type=str):
return _type(sys.stdin.readline().strip())
def main():
N, K, D = map(int, input().split())
rules = [tuple(map(int, input().split())) for _ in range(K)]
minv, maxv = min([r[0] for r in rules]), max([r[1] for r in rules])
while minv + 1 < maxv:
midv = (minv + maxv)//2
cnt, max_in = 0, 0
for A, B, C in rules:
if midv < A:
continue
n = (min(midv, B)-A)//C
max_in = max(A + n * C, max_in)
cnt += n + 1
# print(minv, midv, maxv, max_in, cnt)
if cnt >= D:
maxv = max_in
else:
minv = midv + 1
if minv < maxv:
cnt, max_in = 0, 0
for A, B, C in rules:
if minv < A:
continue
max_in = max(A + (min(minv, B)-A)//C * C, max_in)
cnt += (min(minv, B) - A)//C + 1
if cnt >= D:
maxv = max_in
print(maxv)
main()
# 10 20 30 40 50
# 30 60 90
# 20 45 70
# 70 95
|
flexible
|
{
"blob_id": "f0b98a3d6015d57a49e315ac984cac1cccf0b382",
"index": 6084,
"step-1": "<mask token>\n\n\ndef main():\n N, K, D = map(int, input().split())\n rules = [tuple(map(int, input().split())) for _ in range(K)]\n minv, maxv = min([r[0] for r in rules]), max([r[1] for r in rules])\n while minv + 1 < maxv:\n midv = (minv + maxv) // 2\n cnt, max_in = 0, 0\n for A, B, C in rules:\n if midv < A:\n continue\n n = (min(midv, B) - A) // C\n max_in = max(A + n * C, max_in)\n cnt += n + 1\n if cnt >= D:\n maxv = max_in\n else:\n minv = midv + 1\n if minv < maxv:\n cnt, max_in = 0, 0\n for A, B, C in rules:\n if minv < A:\n continue\n max_in = max(A + (min(minv, B) - A) // C * C, max_in)\n cnt += (min(minv, B) - A) // C + 1\n if cnt >= D:\n maxv = max_in\n print(maxv)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef input(_type=str):\n return _type(sys.stdin.readline().strip())\n\n\ndef main():\n N, K, D = map(int, input().split())\n rules = [tuple(map(int, input().split())) for _ in range(K)]\n minv, maxv = min([r[0] for r in rules]), max([r[1] for r in rules])\n while minv + 1 < maxv:\n midv = (minv + maxv) // 2\n cnt, max_in = 0, 0\n for A, B, C in rules:\n if midv < A:\n continue\n n = (min(midv, B) - A) // C\n max_in = max(A + n * C, max_in)\n cnt += n + 1\n if cnt >= D:\n maxv = max_in\n else:\n minv = midv + 1\n if minv < maxv:\n cnt, max_in = 0, 0\n for A, B, C in rules:\n if minv < A:\n continue\n max_in = max(A + (min(minv, B) - A) // C * C, max_in)\n cnt += (min(minv, B) - A) // C + 1\n if cnt >= D:\n maxv = max_in\n print(maxv)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef input(_type=str):\n return _type(sys.stdin.readline().strip())\n\n\ndef main():\n N, K, D = map(int, input().split())\n rules = [tuple(map(int, input().split())) for _ in range(K)]\n minv, maxv = min([r[0] for r in rules]), max([r[1] for r in rules])\n while minv + 1 < maxv:\n midv = (minv + maxv) // 2\n cnt, max_in = 0, 0\n for A, B, C in rules:\n if midv < A:\n continue\n n = (min(midv, B) - A) // C\n max_in = max(A + n * C, max_in)\n cnt += n + 1\n if cnt >= D:\n maxv = max_in\n else:\n minv = midv + 1\n if minv < maxv:\n cnt, max_in = 0, 0\n for A, B, C in rules:\n if minv < A:\n continue\n max_in = max(A + (min(minv, B) - A) // C * C, max_in)\n cnt += (min(minv, B) - A) // C + 1\n if cnt >= D:\n maxv = max_in\n print(maxv)\n\n\nmain()\n",
"step-4": "import sys\n\n\ndef input(_type=str):\n return _type(sys.stdin.readline().strip())\n\n\ndef main():\n N, K, D = map(int, input().split())\n rules = [tuple(map(int, input().split())) for _ in range(K)]\n minv, maxv = min([r[0] for r in rules]), max([r[1] for r in rules])\n while minv + 1 < maxv:\n midv = (minv + maxv) // 2\n cnt, max_in = 0, 0\n for A, B, C in rules:\n if midv < A:\n continue\n n = (min(midv, B) - A) // C\n max_in = max(A + n * C, max_in)\n cnt += n + 1\n if cnt >= D:\n maxv = max_in\n else:\n minv = midv + 1\n if minv < maxv:\n cnt, max_in = 0, 0\n for A, B, C in rules:\n if minv < A:\n continue\n max_in = max(A + (min(minv, B) - A) // C * C, max_in)\n cnt += (min(minv, B) - A) // C + 1\n if cnt >= D:\n maxv = max_in\n print(maxv)\n\n\nmain()\n",
"step-5": "import sys\ndef input(_type=str):\n\treturn _type(sys.stdin.readline().strip())\n\ndef main():\n\tN, K, D = map(int, input().split())\n\trules = [tuple(map(int, input().split())) for _ in range(K)]\n\tminv, maxv = min([r[0] for r in rules]), max([r[1] for r in rules])\n\twhile minv + 1 < maxv:\n\t\tmidv = (minv + maxv)//2 \n\t\tcnt, max_in = 0, 0\n\t\tfor A, B, C in rules:\n\t\t\tif midv < A:\n\t\t\t\tcontinue\n\t\t\tn = (min(midv, B)-A)//C\n\t\t\tmax_in = max(A + n * C, max_in)\n\t\t\tcnt += n + 1\n\t\t# print(minv, midv, maxv, max_in, cnt)\n\t\tif cnt >= D:\n\t\t\tmaxv = max_in\n\t\telse:\n\t\t\tminv = midv + 1\n\n\tif minv < maxv:\n\t\tcnt, max_in = 0, 0\n\t\tfor A, B, C in rules:\n\t\t\tif minv < A:\n\t\t\t\tcontinue\n\t\t\tmax_in = max(A + (min(minv, B)-A)//C * C, max_in)\n\t\t\tcnt += (min(minv, B) - A)//C + 1\n\t\tif cnt >= D:\n\t\t\tmaxv = max_in\n\tprint(maxv)\n\nmain()\n\n# 10 20 30 40 50\n# 30 60 90\n# 20 45 70\n# 70 95",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
import turtle
def distance(x1, y1, x2, y2):
return ((x1 - x2) * (x1 - x2) + (y1 - y2) * (y1 - y2)) ** 0.5
x1, y1 = eval(input("Enter x1 and y1 for point 1: "))
x2, y2 = eval(input("Enter x2 and y2 for point 2: "))
distanceBetweenPoints = distance(x1, y1, x2, y2)
turtle.penup()
turtle.goto(x1, y1)
turtle.pendown()
turtle.write("Point 1")
turtle.goto(x2, y2)
turtle.write("Point 2")
#Center of line
turtle.penup()
turtle.goto((x1 + x2) / 2, (y1 + y2) / 2)
turtle.write("Distance")
turtle.done()
|
normal
|
{
"blob_id": "9f8065dfdfe07985244e18d92b59e1c045388a72",
"index": 2557,
"step-1": "<mask token>\n\n\ndef distance(x1, y1, x2, y2):\n return ((x1 - x2) * (x1 - x2) + (y1 - y2) * (y1 - y2)) ** 0.5\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef distance(x1, y1, x2, y2):\n return ((x1 - x2) * (x1 - x2) + (y1 - y2) * (y1 - y2)) ** 0.5\n\n\n<mask token>\nturtle.penup()\nturtle.goto(x1, y1)\nturtle.pendown()\nturtle.write('Point 1')\nturtle.goto(x2, y2)\nturtle.write('Point 2')\nturtle.penup()\nturtle.goto((x1 + x2) / 2, (y1 + y2) / 2)\nturtle.write('Distance')\nturtle.done()\n",
"step-3": "<mask token>\n\n\ndef distance(x1, y1, x2, y2):\n return ((x1 - x2) * (x1 - x2) + (y1 - y2) * (y1 - y2)) ** 0.5\n\n\nx1, y1 = eval(input('Enter x1 and y1 for point 1: '))\nx2, y2 = eval(input('Enter x2 and y2 for point 2: '))\ndistanceBetweenPoints = distance(x1, y1, x2, y2)\nturtle.penup()\nturtle.goto(x1, y1)\nturtle.pendown()\nturtle.write('Point 1')\nturtle.goto(x2, y2)\nturtle.write('Point 2')\nturtle.penup()\nturtle.goto((x1 + x2) / 2, (y1 + y2) / 2)\nturtle.write('Distance')\nturtle.done()\n",
"step-4": "import turtle\n\n\ndef distance(x1, y1, x2, y2):\n return ((x1 - x2) * (x1 - x2) + (y1 - y2) * (y1 - y2)) ** 0.5\n\n\nx1, y1 = eval(input('Enter x1 and y1 for point 1: '))\nx2, y2 = eval(input('Enter x2 and y2 for point 2: '))\ndistanceBetweenPoints = distance(x1, y1, x2, y2)\nturtle.penup()\nturtle.goto(x1, y1)\nturtle.pendown()\nturtle.write('Point 1')\nturtle.goto(x2, y2)\nturtle.write('Point 2')\nturtle.penup()\nturtle.goto((x1 + x2) / 2, (y1 + y2) / 2)\nturtle.write('Distance')\nturtle.done()\n",
"step-5": "import turtle\n\ndef distance(x1, y1, x2, y2):\n return ((x1 - x2) * (x1 - x2) + (y1 - y2) * (y1 - y2)) ** 0.5\n\nx1, y1 = eval(input(\"Enter x1 and y1 for point 1: \"))\nx2, y2 = eval(input(\"Enter x2 and y2 for point 2: \"))\n\ndistanceBetweenPoints = distance(x1, y1, x2, y2)\n\nturtle.penup()\nturtle.goto(x1, y1)\nturtle.pendown()\nturtle.write(\"Point 1\")\nturtle.goto(x2, y2)\nturtle.write(\"Point 2\")\n\n#Center of line\nturtle.penup()\nturtle.goto((x1 + x2) / 2, (y1 + y2) / 2)\nturtle.write(\"Distance\")\n\n\n\nturtle.done()",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
def play_emergency_sound():
print('Playing emergency sound. There are ' + str(threading.
active_count()) + ' threads active')
while getattr(emergency_sound_thread, 'do_run', True):
pygame.mixer.init()
pygame.mixer.Channel(0).play(pygame.mixer.Sound(
'audio/alien_danger.wav'))
while pygame.mixer.Channel(0).get_busy() == True:
sleep(0.25)
print('Stopping emergency sound')
<|reserved_special_token_0|>
def get_keypress():
fd = sys.stdin.fileno()
old_settings = termios.tcgetattr(fd)
try:
tty.setraw(sys.stdin.fileno())
key = sys.stdin.read(1)
finally:
termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)
return key
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def play_emergency_sound():
print('Playing emergency sound. There are ' + str(threading.
active_count()) + ' threads active')
while getattr(emergency_sound_thread, 'do_run', True):
pygame.mixer.init()
pygame.mixer.Channel(0).play(pygame.mixer.Sound(
'audio/alien_danger.wav'))
while pygame.mixer.Channel(0).get_busy() == True:
sleep(0.25)
print('Stopping emergency sound')
def play_background_sound():
print('Playing background sound. There are ' + str(threading.
active_count()) + ' threads active')
while getattr(background_sound_thread, 'do_run', True):
pygame.mixer.init()
pygame.mixer.Channel(1).play(pygame.mixer.Sound('audio/buzzer.wav'))
while pygame.mixer.Channel(1).get_busy() == True:
sleep(0.25)
print('Stopping background sound')
def get_keypress():
fd = sys.stdin.fileno()
old_settings = termios.tcgetattr(fd)
try:
tty.setraw(sys.stdin.fileno())
key = sys.stdin.read(1)
finally:
termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)
return key
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def play_emergency_sound():
print('Playing emergency sound. There are ' + str(threading.
active_count()) + ' threads active')
while getattr(emergency_sound_thread, 'do_run', True):
pygame.mixer.init()
pygame.mixer.Channel(0).play(pygame.mixer.Sound(
'audio/alien_danger.wav'))
while pygame.mixer.Channel(0).get_busy() == True:
sleep(0.25)
print('Stopping emergency sound')
def play_background_sound():
print('Playing background sound. There are ' + str(threading.
active_count()) + ' threads active')
while getattr(background_sound_thread, 'do_run', True):
pygame.mixer.init()
pygame.mixer.Channel(1).play(pygame.mixer.Sound('audio/buzzer.wav'))
while pygame.mixer.Channel(1).get_busy() == True:
sleep(0.25)
print('Stopping background sound')
def get_keypress():
fd = sys.stdin.fileno()
old_settings = termios.tcgetattr(fd)
try:
tty.setraw(sys.stdin.fileno())
key = sys.stdin.read(1)
finally:
termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)
return key
while True:
key = get_keypress()
if key == '0':
print('Exiting!')
exit(0)
if key == '1':
print('1 pressed')
global background_sound_thread
background_sound_thread = threading.Thread(target=
play_background_sound, args=())
background_sound_thread.start()
if key == '2':
print('1 pressed')
global emergency_sound_thread
emergency_sound_thread = threading.Thread(target=
play_emergency_sound, args=())
emergency_sound_thread.start()
if key == 'z':
print('z pressed')
background_sound_thread.do_run = False
if key == 'x':
print('x pressed')
emergency_sound_thread.do_run = False
<|reserved_special_token_1|>
from time import sleep
import sys, termios, tty, os, pygame, threading
def play_emergency_sound():
print('Playing emergency sound. There are ' + str(threading.
active_count()) + ' threads active')
while getattr(emergency_sound_thread, 'do_run', True):
pygame.mixer.init()
pygame.mixer.Channel(0).play(pygame.mixer.Sound(
'audio/alien_danger.wav'))
while pygame.mixer.Channel(0).get_busy() == True:
sleep(0.25)
print('Stopping emergency sound')
def play_background_sound():
print('Playing background sound. There are ' + str(threading.
active_count()) + ' threads active')
while getattr(background_sound_thread, 'do_run', True):
pygame.mixer.init()
pygame.mixer.Channel(1).play(pygame.mixer.Sound('audio/buzzer.wav'))
while pygame.mixer.Channel(1).get_busy() == True:
sleep(0.25)
print('Stopping background sound')
def get_keypress():
fd = sys.stdin.fileno()
old_settings = termios.tcgetattr(fd)
try:
tty.setraw(sys.stdin.fileno())
key = sys.stdin.read(1)
finally:
termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)
return key
while True:
key = get_keypress()
if key == '0':
print('Exiting!')
exit(0)
if key == '1':
print('1 pressed')
global background_sound_thread
background_sound_thread = threading.Thread(target=
play_background_sound, args=())
background_sound_thread.start()
if key == '2':
print('1 pressed')
global emergency_sound_thread
emergency_sound_thread = threading.Thread(target=
play_emergency_sound, args=())
emergency_sound_thread.start()
if key == 'z':
print('z pressed')
background_sound_thread.do_run = False
if key == 'x':
print('x pressed')
emergency_sound_thread.do_run = False
<|reserved_special_token_1|>
# ===================================================================
# Setup
# ===================================================================
from time import sleep
import sys, termios, tty, os, pygame, threading
# ===================================================================
# Functions
# ===================================================================
def play_emergency_sound():
print("Playing emergency sound. There are " + str( threading.active_count() ) + " threads active")
while getattr(emergency_sound_thread, "do_run", True):
pygame.mixer.init()
pygame.mixer.Channel(0).play( pygame.mixer.Sound('audio/alien_danger.wav') )
while pygame.mixer.Channel(0).get_busy() == True:
sleep(.25)
print( "Stopping emergency sound" )
def play_background_sound():
print("Playing background sound. There are " + str( threading.active_count() ) + " threads active")
while getattr(background_sound_thread, "do_run", True):
pygame.mixer.init()
pygame.mixer.Channel(1).play( pygame.mixer.Sound('audio/buzzer.wav') )
while pygame.mixer.Channel(1).get_busy() == True:
sleep(.25)
print( "Stopping background sound" )
def get_keypress():
fd = sys.stdin.fileno()
old_settings = termios.tcgetattr(fd)
try:
tty.setraw(sys.stdin.fileno())
key = sys.stdin.read(1)
finally:
termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)
return key
# ===================================================================
# Main program
# ===================================================================
while True:
key = get_keypress()
if (key == "0"):
print("Exiting!")
exit(0)
if (key == "1"):
print("1 pressed")
global background_sound_thread
background_sound_thread = threading.Thread( target=play_background_sound, args=() )
background_sound_thread.start()
if (key == "2"):
print("1 pressed")
global emergency_sound_thread
emergency_sound_thread = threading.Thread( target=play_emergency_sound, args=() )
emergency_sound_thread.start()
if (key == "z"):
print("z pressed")
background_sound_thread.do_run = False
if (key == "x"):
print("x pressed")
emergency_sound_thread.do_run = False
|
flexible
|
{
"blob_id": "44274446673225c769f63191d43e4747d8ddfbf7",
"index": 6934,
"step-1": "<mask token>\n\n\ndef play_emergency_sound():\n print('Playing emergency sound. There are ' + str(threading.\n active_count()) + ' threads active')\n while getattr(emergency_sound_thread, 'do_run', True):\n pygame.mixer.init()\n pygame.mixer.Channel(0).play(pygame.mixer.Sound(\n 'audio/alien_danger.wav'))\n while pygame.mixer.Channel(0).get_busy() == True:\n sleep(0.25)\n print('Stopping emergency sound')\n\n\n<mask token>\n\n\ndef get_keypress():\n fd = sys.stdin.fileno()\n old_settings = termios.tcgetattr(fd)\n try:\n tty.setraw(sys.stdin.fileno())\n key = sys.stdin.read(1)\n finally:\n termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)\n return key\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef play_emergency_sound():\n print('Playing emergency sound. There are ' + str(threading.\n active_count()) + ' threads active')\n while getattr(emergency_sound_thread, 'do_run', True):\n pygame.mixer.init()\n pygame.mixer.Channel(0).play(pygame.mixer.Sound(\n 'audio/alien_danger.wav'))\n while pygame.mixer.Channel(0).get_busy() == True:\n sleep(0.25)\n print('Stopping emergency sound')\n\n\ndef play_background_sound():\n print('Playing background sound. There are ' + str(threading.\n active_count()) + ' threads active')\n while getattr(background_sound_thread, 'do_run', True):\n pygame.mixer.init()\n pygame.mixer.Channel(1).play(pygame.mixer.Sound('audio/buzzer.wav'))\n while pygame.mixer.Channel(1).get_busy() == True:\n sleep(0.25)\n print('Stopping background sound')\n\n\ndef get_keypress():\n fd = sys.stdin.fileno()\n old_settings = termios.tcgetattr(fd)\n try:\n tty.setraw(sys.stdin.fileno())\n key = sys.stdin.read(1)\n finally:\n termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)\n return key\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef play_emergency_sound():\n print('Playing emergency sound. There are ' + str(threading.\n active_count()) + ' threads active')\n while getattr(emergency_sound_thread, 'do_run', True):\n pygame.mixer.init()\n pygame.mixer.Channel(0).play(pygame.mixer.Sound(\n 'audio/alien_danger.wav'))\n while pygame.mixer.Channel(0).get_busy() == True:\n sleep(0.25)\n print('Stopping emergency sound')\n\n\ndef play_background_sound():\n print('Playing background sound. There are ' + str(threading.\n active_count()) + ' threads active')\n while getattr(background_sound_thread, 'do_run', True):\n pygame.mixer.init()\n pygame.mixer.Channel(1).play(pygame.mixer.Sound('audio/buzzer.wav'))\n while pygame.mixer.Channel(1).get_busy() == True:\n sleep(0.25)\n print('Stopping background sound')\n\n\ndef get_keypress():\n fd = sys.stdin.fileno()\n old_settings = termios.tcgetattr(fd)\n try:\n tty.setraw(sys.stdin.fileno())\n key = sys.stdin.read(1)\n finally:\n termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)\n return key\n\n\nwhile True:\n key = get_keypress()\n if key == '0':\n print('Exiting!')\n exit(0)\n if key == '1':\n print('1 pressed')\n global background_sound_thread\n background_sound_thread = threading.Thread(target=\n play_background_sound, args=())\n background_sound_thread.start()\n if key == '2':\n print('1 pressed')\n global emergency_sound_thread\n emergency_sound_thread = threading.Thread(target=\n play_emergency_sound, args=())\n emergency_sound_thread.start()\n if key == 'z':\n print('z pressed')\n background_sound_thread.do_run = False\n if key == 'x':\n print('x pressed')\n emergency_sound_thread.do_run = False\n",
"step-4": "from time import sleep\nimport sys, termios, tty, os, pygame, threading\n\n\ndef play_emergency_sound():\n print('Playing emergency sound. There are ' + str(threading.\n active_count()) + ' threads active')\n while getattr(emergency_sound_thread, 'do_run', True):\n pygame.mixer.init()\n pygame.mixer.Channel(0).play(pygame.mixer.Sound(\n 'audio/alien_danger.wav'))\n while pygame.mixer.Channel(0).get_busy() == True:\n sleep(0.25)\n print('Stopping emergency sound')\n\n\ndef play_background_sound():\n print('Playing background sound. There are ' + str(threading.\n active_count()) + ' threads active')\n while getattr(background_sound_thread, 'do_run', True):\n pygame.mixer.init()\n pygame.mixer.Channel(1).play(pygame.mixer.Sound('audio/buzzer.wav'))\n while pygame.mixer.Channel(1).get_busy() == True:\n sleep(0.25)\n print('Stopping background sound')\n\n\ndef get_keypress():\n fd = sys.stdin.fileno()\n old_settings = termios.tcgetattr(fd)\n try:\n tty.setraw(sys.stdin.fileno())\n key = sys.stdin.read(1)\n finally:\n termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)\n return key\n\n\nwhile True:\n key = get_keypress()\n if key == '0':\n print('Exiting!')\n exit(0)\n if key == '1':\n print('1 pressed')\n global background_sound_thread\n background_sound_thread = threading.Thread(target=\n play_background_sound, args=())\n background_sound_thread.start()\n if key == '2':\n print('1 pressed')\n global emergency_sound_thread\n emergency_sound_thread = threading.Thread(target=\n play_emergency_sound, args=())\n emergency_sound_thread.start()\n if key == 'z':\n print('z pressed')\n background_sound_thread.do_run = False\n if key == 'x':\n print('x pressed')\n emergency_sound_thread.do_run = False\n",
"step-5": "# ===================================================================\n# Setup\n# ===================================================================\nfrom time import sleep\nimport sys, termios, tty, os, pygame, threading\n\n# ===================================================================\n# Functions\n# ===================================================================\n\ndef play_emergency_sound():\n print(\"Playing emergency sound. There are \" + str( threading.active_count() ) + \" threads active\")\n while getattr(emergency_sound_thread, \"do_run\", True):\n pygame.mixer.init()\n pygame.mixer.Channel(0).play( pygame.mixer.Sound('audio/alien_danger.wav') )\n while pygame.mixer.Channel(0).get_busy() == True:\n sleep(.25)\n print( \"Stopping emergency sound\" )\n\n\ndef play_background_sound():\n print(\"Playing background sound. There are \" + str( threading.active_count() ) + \" threads active\")\n while getattr(background_sound_thread, \"do_run\", True):\n pygame.mixer.init()\n pygame.mixer.Channel(1).play( pygame.mixer.Sound('audio/buzzer.wav') )\n while pygame.mixer.Channel(1).get_busy() == True:\n sleep(.25)\n print( \"Stopping background sound\" )\n\n\ndef get_keypress():\n fd = sys.stdin.fileno()\n old_settings = termios.tcgetattr(fd)\n try:\n tty.setraw(sys.stdin.fileno())\n key = sys.stdin.read(1)\n \n finally:\n termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)\n return key\n\n\n# ===================================================================\n# Main program \n# ===================================================================\n\n\nwhile True:\n key = get_keypress()\n\n if (key == \"0\"):\n print(\"Exiting!\")\n exit(0)\n \n if (key == \"1\"):\n print(\"1 pressed\")\n global background_sound_thread\n background_sound_thread = threading.Thread( target=play_background_sound, args=() )\n background_sound_thread.start()\n\n if (key == \"2\"):\n print(\"1 pressed\")\n global emergency_sound_thread\n emergency_sound_thread = threading.Thread( target=play_emergency_sound, args=() )\n emergency_sound_thread.start()\n\n if (key == \"z\"):\n print(\"z pressed\")\n background_sound_thread.do_run = False\n\n if (key == \"x\"):\n print(\"x pressed\")\n emergency_sound_thread.do_run = False\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
OK = 200
CREATED = 201
NOT_MODIFIED = 304
UNAUTHORIZED = 401
FORBIDDEN = 403
BAD_REQUEST = 400
NOT_FOUND = 404
CONFLICT = 409
UNPROCESSABLE = 422
INTERNAL_SERVER_ERROR = 500
NOT_IMPLEMENTED = 501
SERVICE_UNAVAILABLE = 503
ADMIN = 'admin'
ELITE = 'elite'
NOOB = 'noob'
WITHDRAW = 'withdraw'
FUND = 'fund'
|
flexible
|
{
"blob_id": "d90942f22cbbd9cfc3a431b7857cd909a7690966",
"index": 92,
"step-1": "<mask token>\n",
"step-2": "OK = 200\nCREATED = 201\nNOT_MODIFIED = 304\nUNAUTHORIZED = 401\nFORBIDDEN = 403\nBAD_REQUEST = 400\nNOT_FOUND = 404\nCONFLICT = 409\nUNPROCESSABLE = 422\nINTERNAL_SERVER_ERROR = 500\nNOT_IMPLEMENTED = 501\nSERVICE_UNAVAILABLE = 503\nADMIN = 'admin'\nELITE = 'elite'\nNOOB = 'noob'\nWITHDRAW = 'withdraw'\nFUND = 'fund'\n",
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0,
1
]
}
|
[
0,
1
] |
# -*- coding: utf-8 -*-
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
from matplotlib.font_manager import FontProperties
from sklearn import svm
data=np.loadtxt('yucedata1.txt')
X=data[:,0]
y=data[:,1]
plt.figure(1,figsize=(8,6))
myfont = FontProperties(fname=r"c:\windows\fonts\simsun.ttc", size=14)
plt.scatter(X,y,color="red",label="ini_data",linewidth=3)
plt.xlabel(u'Exam1 Score',fontproperties=myfont)
plt.ylabel('Exam2 Score')
plt.legend()
# plt.show()
X=X.reshape(-1,1)
print X
clf = svm.SVR(kernel='linear').fit(X, y)
# clf = svm.SVC(kernel='poly',degree=5,gamma=1,coef0=0).fit(X, y)
# clf = svm.SVR(kernel='rbf',C=100,gamma=20).fit(X, y)
'''gamma越大,多项式项数越多,导致高方差'''
# print u'精准度为: %.2f' % clf.score(X, y)
X1=np.linspace(0,25,100).reshape(-1,1)
y1=clf.predict(X1)
plt.plot(X1,y1,color="orange",label="Fitting Line",linewidth=2)
plt.show()
|
normal
|
{
"blob_id": "73d7b1895282df5b744d8c03ec7e6f8530366b76",
"index": 865,
"step-1": "# -*- coding: utf-8 -*-\r\nimport numpy as np\r\nimport matplotlib as mpl\r\nimport matplotlib.pyplot as plt \r\nfrom matplotlib.font_manager import FontProperties \r\nfrom sklearn import svm\r\n\r\n\r\ndata=np.loadtxt('yucedata1.txt')\r\n\r\nX=data[:,0]\r\ny=data[:,1]\r\n\r\nplt.figure(1,figsize=(8,6))\r\nmyfont = FontProperties(fname=r\"c:\\windows\\fonts\\simsun.ttc\", size=14) \r\nplt.scatter(X,y,color=\"red\",label=\"ini_data\",linewidth=3)\r\nplt.xlabel(u'Exam1 Score',fontproperties=myfont)\r\nplt.ylabel('Exam2 Score')\r\nplt.legend()\r\n\r\n# plt.show()\r\nX=X.reshape(-1,1)\r\nprint X\r\nclf = svm.SVR(kernel='linear').fit(X, y)\r\n# clf = svm.SVC(kernel='poly',degree=5,gamma=1,coef0=0).fit(X, y)\r\n# clf = svm.SVR(kernel='rbf',C=100,gamma=20).fit(X, y)\r\n\r\n'''gamma越大,多项式项数越多,导致高方差'''\r\n\r\n\r\n# print u'精准度为: %.2f' % clf.score(X, y)\r\n\r\nX1=np.linspace(0,25,100).reshape(-1,1)\r\n\r\ny1=clf.predict(X1)\r\n\r\nplt.plot(X1,y1,color=\"orange\",label=\"Fitting Line\",linewidth=2) \r\n\r\n\r\nplt.show()\r\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
from Monument import Monument, Dataset
import importer_utils as utils
import importer as importer
class RoRo(Monument):
def set_adm_location(self):
counties = self.data_files["counties"]
self.set_from_dict_match(counties, "iso_code",
"judetul_iso", "located_adm")
def set_location(self):
"""
Set Location property from article linked in localitate.
Run this after set_adm_location. localitate can
contain several links (we take the 1st which seems to
be the most granular one) and a mix of administrative
types. Compare with admin location so that they're not
the same.
"""
if self.has_non_empty_attribute("localitate"):
loc_item = None
if utils.count_wikilinks(self.localitate) > 0:
loc_link = utils.get_wikilinks(self.localitate)[0]
loc_item = utils.q_from_wikipedia("ro", loc_link.title)
adm_item = self.get_statement_values("located_adm")
if loc_item and loc_item != adm_item[0]:
self.add_statement("location", loc_item)
if not loc_item:
self.add_to_report("localitate", self.localitate, "location")
def set_heritage_id(self):
self.add_statement("romanian_monument_id", self.cod)
def update_descriptions(self):
adm_code = self.judetul_iso
counties = self.data_files["counties"]
county_item = utils.get_item_from_dict_by_key(dict_name=counties,
search_term=adm_code,
return_content_of="itemLabel",
search_in="iso_code")
if len(county_item) == 1:
place_name = "{}, Romania".format(county_item[0])
else:
place_name = "Romania"
desc = "heritage site in {}".format(place_name)
self.add_description("en", desc)
self.add_disambiguator(str(self.cod))
def set_address(self):
street_patterns = ("piața", "str.", "bd.")
if self.has_non_empty_attribute("adresa"):
adr_lower = self.adresa.lower()
adr_nice = utils.remove_markup(self.adresa)
if any(pattern in adr_lower for pattern in street_patterns):
if self.has_non_empty_attribute("localitate"):
town = utils.remove_markup(self.localitate)
adr_nice = "{}, {}".format(adr_nice, town)
self.add_statement("located_street", adr_nice)
else:
directions = utils.package_monolingual(adr_nice, 'ro')
self.add_statement("directions", directions)
def update_labels(self):
romanian = utils.remove_markup(self.denumire)
self.add_label("ro", romanian)
def __init__(self, db_row_dict, mapping, data_files, existing, repository):
Monument.__init__(self, db_row_dict, mapping,
data_files, existing, repository)
self.set_monuments_all_id("cod")
self.set_changed()
self.set_wlm_source()
self.set_heritage_id()
self.set_heritage()
self.set_country()
self.set_adm_location()
self.set_address()
self.set_location()
self.set_coords()
self.set_commonscat()
self.set_image("imagine")
self.update_labels()
self.update_descriptions()
self.set_wd_item(self.find_matching_wikidata(mapping))
if __name__ == "__main__":
"""Command line entry point for importer."""
args = importer.handle_args()
dataset = Dataset("ro", "ro", RoRo)
dataset.data_files = {"counties": "romania_counties.json"}
importer.main(args, dataset)
|
normal
|
{
"blob_id": "5f8a9d82a3245671b438475d1fac7be4db769fbe",
"index": 8493,
"step-1": "<mask token>\n\n\nclass RoRo(Monument):\n\n def set_adm_location(self):\n counties = self.data_files['counties']\n self.set_from_dict_match(counties, 'iso_code', 'judetul_iso',\n 'located_adm')\n <mask token>\n\n def set_heritage_id(self):\n self.add_statement('romanian_monument_id', self.cod)\n <mask token>\n <mask token>\n <mask token>\n\n def __init__(self, db_row_dict, mapping, data_files, existing, repository):\n Monument.__init__(self, db_row_dict, mapping, data_files, existing,\n repository)\n self.set_monuments_all_id('cod')\n self.set_changed()\n self.set_wlm_source()\n self.set_heritage_id()\n self.set_heritage()\n self.set_country()\n self.set_adm_location()\n self.set_address()\n self.set_location()\n self.set_coords()\n self.set_commonscat()\n self.set_image('imagine')\n self.update_labels()\n self.update_descriptions()\n self.set_wd_item(self.find_matching_wikidata(mapping))\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass RoRo(Monument):\n\n def set_adm_location(self):\n counties = self.data_files['counties']\n self.set_from_dict_match(counties, 'iso_code', 'judetul_iso',\n 'located_adm')\n <mask token>\n\n def set_heritage_id(self):\n self.add_statement('romanian_monument_id', self.cod)\n\n def update_descriptions(self):\n adm_code = self.judetul_iso\n counties = self.data_files['counties']\n county_item = utils.get_item_from_dict_by_key(dict_name=counties,\n search_term=adm_code, return_content_of='itemLabel', search_in=\n 'iso_code')\n if len(county_item) == 1:\n place_name = '{}, Romania'.format(county_item[0])\n else:\n place_name = 'Romania'\n desc = 'heritage site in {}'.format(place_name)\n self.add_description('en', desc)\n self.add_disambiguator(str(self.cod))\n <mask token>\n <mask token>\n\n def __init__(self, db_row_dict, mapping, data_files, existing, repository):\n Monument.__init__(self, db_row_dict, mapping, data_files, existing,\n repository)\n self.set_monuments_all_id('cod')\n self.set_changed()\n self.set_wlm_source()\n self.set_heritage_id()\n self.set_heritage()\n self.set_country()\n self.set_adm_location()\n self.set_address()\n self.set_location()\n self.set_coords()\n self.set_commonscat()\n self.set_image('imagine')\n self.update_labels()\n self.update_descriptions()\n self.set_wd_item(self.find_matching_wikidata(mapping))\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass RoRo(Monument):\n\n def set_adm_location(self):\n counties = self.data_files['counties']\n self.set_from_dict_match(counties, 'iso_code', 'judetul_iso',\n 'located_adm')\n\n def set_location(self):\n \"\"\"\n Set Location property from article linked in localitate.\n\n Run this after set_adm_location. localitate can\n contain several links (we take the 1st which seems to\n be the most granular one) and a mix of administrative\n types. Compare with admin location so that they're not\n the same.\n \"\"\"\n if self.has_non_empty_attribute('localitate'):\n loc_item = None\n if utils.count_wikilinks(self.localitate) > 0:\n loc_link = utils.get_wikilinks(self.localitate)[0]\n loc_item = utils.q_from_wikipedia('ro', loc_link.title)\n adm_item = self.get_statement_values('located_adm')\n if loc_item and loc_item != adm_item[0]:\n self.add_statement('location', loc_item)\n if not loc_item:\n self.add_to_report('localitate', self.localitate, 'location')\n\n def set_heritage_id(self):\n self.add_statement('romanian_monument_id', self.cod)\n\n def update_descriptions(self):\n adm_code = self.judetul_iso\n counties = self.data_files['counties']\n county_item = utils.get_item_from_dict_by_key(dict_name=counties,\n search_term=adm_code, return_content_of='itemLabel', search_in=\n 'iso_code')\n if len(county_item) == 1:\n place_name = '{}, Romania'.format(county_item[0])\n else:\n place_name = 'Romania'\n desc = 'heritage site in {}'.format(place_name)\n self.add_description('en', desc)\n self.add_disambiguator(str(self.cod))\n\n def set_address(self):\n street_patterns = 'piața', 'str.', 'bd.'\n if self.has_non_empty_attribute('adresa'):\n adr_lower = self.adresa.lower()\n adr_nice = utils.remove_markup(self.adresa)\n if any(pattern in adr_lower for pattern in street_patterns):\n if self.has_non_empty_attribute('localitate'):\n town = utils.remove_markup(self.localitate)\n adr_nice = '{}, {}'.format(adr_nice, town)\n self.add_statement('located_street', adr_nice)\n else:\n directions = utils.package_monolingual(adr_nice, 'ro')\n self.add_statement('directions', directions)\n\n def update_labels(self):\n romanian = utils.remove_markup(self.denumire)\n self.add_label('ro', romanian)\n\n def __init__(self, db_row_dict, mapping, data_files, existing, repository):\n Monument.__init__(self, db_row_dict, mapping, data_files, existing,\n repository)\n self.set_monuments_all_id('cod')\n self.set_changed()\n self.set_wlm_source()\n self.set_heritage_id()\n self.set_heritage()\n self.set_country()\n self.set_adm_location()\n self.set_address()\n self.set_location()\n self.set_coords()\n self.set_commonscat()\n self.set_image('imagine')\n self.update_labels()\n self.update_descriptions()\n self.set_wd_item(self.find_matching_wikidata(mapping))\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\nclass RoRo(Monument):\n\n def set_adm_location(self):\n counties = self.data_files['counties']\n self.set_from_dict_match(counties, 'iso_code', 'judetul_iso',\n 'located_adm')\n\n def set_location(self):\n \"\"\"\n Set Location property from article linked in localitate.\n\n Run this after set_adm_location. localitate can\n contain several links (we take the 1st which seems to\n be the most granular one) and a mix of administrative\n types. Compare with admin location so that they're not\n the same.\n \"\"\"\n if self.has_non_empty_attribute('localitate'):\n loc_item = None\n if utils.count_wikilinks(self.localitate) > 0:\n loc_link = utils.get_wikilinks(self.localitate)[0]\n loc_item = utils.q_from_wikipedia('ro', loc_link.title)\n adm_item = self.get_statement_values('located_adm')\n if loc_item and loc_item != adm_item[0]:\n self.add_statement('location', loc_item)\n if not loc_item:\n self.add_to_report('localitate', self.localitate, 'location')\n\n def set_heritage_id(self):\n self.add_statement('romanian_monument_id', self.cod)\n\n def update_descriptions(self):\n adm_code = self.judetul_iso\n counties = self.data_files['counties']\n county_item = utils.get_item_from_dict_by_key(dict_name=counties,\n search_term=adm_code, return_content_of='itemLabel', search_in=\n 'iso_code')\n if len(county_item) == 1:\n place_name = '{}, Romania'.format(county_item[0])\n else:\n place_name = 'Romania'\n desc = 'heritage site in {}'.format(place_name)\n self.add_description('en', desc)\n self.add_disambiguator(str(self.cod))\n\n def set_address(self):\n street_patterns = 'piața', 'str.', 'bd.'\n if self.has_non_empty_attribute('adresa'):\n adr_lower = self.adresa.lower()\n adr_nice = utils.remove_markup(self.adresa)\n if any(pattern in adr_lower for pattern in street_patterns):\n if self.has_non_empty_attribute('localitate'):\n town = utils.remove_markup(self.localitate)\n adr_nice = '{}, {}'.format(adr_nice, town)\n self.add_statement('located_street', adr_nice)\n else:\n directions = utils.package_monolingual(adr_nice, 'ro')\n self.add_statement('directions', directions)\n\n def update_labels(self):\n romanian = utils.remove_markup(self.denumire)\n self.add_label('ro', romanian)\n\n def __init__(self, db_row_dict, mapping, data_files, existing, repository):\n Monument.__init__(self, db_row_dict, mapping, data_files, existing,\n repository)\n self.set_monuments_all_id('cod')\n self.set_changed()\n self.set_wlm_source()\n self.set_heritage_id()\n self.set_heritage()\n self.set_country()\n self.set_adm_location()\n self.set_address()\n self.set_location()\n self.set_coords()\n self.set_commonscat()\n self.set_image('imagine')\n self.update_labels()\n self.update_descriptions()\n self.set_wd_item(self.find_matching_wikidata(mapping))\n\n\nif __name__ == '__main__':\n \"\"\"Command line entry point for importer.\"\"\"\n args = importer.handle_args()\n dataset = Dataset('ro', 'ro', RoRo)\n dataset.data_files = {'counties': 'romania_counties.json'}\n importer.main(args, dataset)\n",
"step-5": "from Monument import Monument, Dataset\nimport importer_utils as utils\nimport importer as importer\n\n\nclass RoRo(Monument):\n\n def set_adm_location(self):\n counties = self.data_files[\"counties\"]\n self.set_from_dict_match(counties, \"iso_code\",\n \"judetul_iso\", \"located_adm\")\n\n def set_location(self):\n \"\"\"\n Set Location property from article linked in localitate.\n\n Run this after set_adm_location. localitate can\n contain several links (we take the 1st which seems to\n be the most granular one) and a mix of administrative\n types. Compare with admin location so that they're not\n the same.\n \"\"\"\n if self.has_non_empty_attribute(\"localitate\"):\n loc_item = None\n if utils.count_wikilinks(self.localitate) > 0:\n loc_link = utils.get_wikilinks(self.localitate)[0]\n loc_item = utils.q_from_wikipedia(\"ro\", loc_link.title)\n adm_item = self.get_statement_values(\"located_adm\")\n if loc_item and loc_item != adm_item[0]:\n self.add_statement(\"location\", loc_item)\n\n if not loc_item:\n self.add_to_report(\"localitate\", self.localitate, \"location\")\n\n def set_heritage_id(self):\n self.add_statement(\"romanian_monument_id\", self.cod)\n\n def update_descriptions(self):\n adm_code = self.judetul_iso\n counties = self.data_files[\"counties\"]\n county_item = utils.get_item_from_dict_by_key(dict_name=counties,\n search_term=adm_code,\n return_content_of=\"itemLabel\",\n search_in=\"iso_code\")\n if len(county_item) == 1:\n place_name = \"{}, Romania\".format(county_item[0])\n else:\n place_name = \"Romania\"\n desc = \"heritage site in {}\".format(place_name)\n self.add_description(\"en\", desc)\n self.add_disambiguator(str(self.cod))\n\n def set_address(self):\n street_patterns = (\"piața\", \"str.\", \"bd.\")\n if self.has_non_empty_attribute(\"adresa\"):\n adr_lower = self.adresa.lower()\n adr_nice = utils.remove_markup(self.adresa)\n if any(pattern in adr_lower for pattern in street_patterns):\n if self.has_non_empty_attribute(\"localitate\"):\n town = utils.remove_markup(self.localitate)\n adr_nice = \"{}, {}\".format(adr_nice, town)\n self.add_statement(\"located_street\", adr_nice)\n else:\n directions = utils.package_monolingual(adr_nice, 'ro')\n self.add_statement(\"directions\", directions)\n\n def update_labels(self):\n romanian = utils.remove_markup(self.denumire)\n self.add_label(\"ro\", romanian)\n\n def __init__(self, db_row_dict, mapping, data_files, existing, repository):\n Monument.__init__(self, db_row_dict, mapping,\n data_files, existing, repository)\n self.set_monuments_all_id(\"cod\")\n self.set_changed()\n self.set_wlm_source()\n self.set_heritage_id()\n self.set_heritage()\n self.set_country()\n self.set_adm_location()\n self.set_address()\n self.set_location()\n self.set_coords()\n self.set_commonscat()\n self.set_image(\"imagine\")\n self.update_labels()\n self.update_descriptions()\n self.set_wd_item(self.find_matching_wikidata(mapping))\n\n\nif __name__ == \"__main__\":\n \"\"\"Command line entry point for importer.\"\"\"\n args = importer.handle_args()\n dataset = Dataset(\"ro\", \"ro\", RoRo)\n dataset.data_files = {\"counties\": \"romania_counties.json\"}\n importer.main(args, dataset)\n",
"step-ids": [
4,
5,
8,
9,
11
]
}
|
[
4,
5,
8,
9,
11
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Migration(migrations.Migration):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Migration(migrations.Migration):
initial = True
dependencies = []
operations = [migrations.CreateModel(name='Destination', fields=[('id',
models.AutoField(auto_created=True, primary_key=True, serialize=
False, verbose_name='ID')), ('name', models.CharField(max_length=50
)), ('image', models.ImageField(upload_to='img/destinations'))]),
migrations.CreateModel(name='Gallery', fields=[('id', models.
AutoField(auto_created=True, primary_key=True, serialize=False,
verbose_name='ID')), ('title', models.CharField(max_length=50)), (
'image', models.ImageField(upload_to='img/tours'))]), migrations.
CreateModel(name='Tour', fields=[('id', models.AutoField(
auto_created=True, primary_key=True, serialize=False, verbose_name=
'ID')), ('title', models.CharField(max_length=50, verbose_name=
'title for admin')), ('status', models.BooleanField(default=False)),
('price', models.IntegerField()), ('stars', models.IntegerField(
choices=[(1, 1), (2, 2), (3, 3), (4, 4), (5, 5)])), ('feautured',
models.BooleanField(default=True)), ('destination', models.
ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=
'tours.Destination'))]), migrations.CreateModel(name='TourDetail',
fields=[('id', models.AutoField(auto_created=True, primary_key=True,
serialize=False, verbose_name='ID')), ('title', models.CharField(
max_length=50)), ('descreption', models.TextField()), ('tour',
models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=
'tours.Tour'))]), migrations.AddField(model_name='gallery', name=
'tour', field=models.ForeignKey(on_delete=django.db.models.deletion
.CASCADE, to='tours.Tour'))]
<|reserved_special_token_1|>
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = []
operations = [migrations.CreateModel(name='Destination', fields=[('id',
models.AutoField(auto_created=True, primary_key=True, serialize=
False, verbose_name='ID')), ('name', models.CharField(max_length=50
)), ('image', models.ImageField(upload_to='img/destinations'))]),
migrations.CreateModel(name='Gallery', fields=[('id', models.
AutoField(auto_created=True, primary_key=True, serialize=False,
verbose_name='ID')), ('title', models.CharField(max_length=50)), (
'image', models.ImageField(upload_to='img/tours'))]), migrations.
CreateModel(name='Tour', fields=[('id', models.AutoField(
auto_created=True, primary_key=True, serialize=False, verbose_name=
'ID')), ('title', models.CharField(max_length=50, verbose_name=
'title for admin')), ('status', models.BooleanField(default=False)),
('price', models.IntegerField()), ('stars', models.IntegerField(
choices=[(1, 1), (2, 2), (3, 3), (4, 4), (5, 5)])), ('feautured',
models.BooleanField(default=True)), ('destination', models.
ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=
'tours.Destination'))]), migrations.CreateModel(name='TourDetail',
fields=[('id', models.AutoField(auto_created=True, primary_key=True,
serialize=False, verbose_name='ID')), ('title', models.CharField(
max_length=50)), ('descreption', models.TextField()), ('tour',
models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=
'tours.Tour'))]), migrations.AddField(model_name='gallery', name=
'tour', field=models.ForeignKey(on_delete=django.db.models.deletion
.CASCADE, to='tours.Tour'))]
<|reserved_special_token_1|>
# Generated by Django 2.1.5 on 2019-01-20 18:11
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Destination',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
('image', models.ImageField(upload_to='img/destinations')),
],
),
migrations.CreateModel(
name='Gallery',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=50)),
('image', models.ImageField(upload_to='img/tours')),
],
),
migrations.CreateModel(
name='Tour',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=50, verbose_name='title for admin')),
('status', models.BooleanField(default=False)),
('price', models.IntegerField()),
('stars', models.IntegerField(choices=[(1, 1), (2, 2), (3, 3), (4, 4), (5, 5)])),
('feautured', models.BooleanField(default=True)),
('destination', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='tours.Destination')),
],
),
migrations.CreateModel(
name='TourDetail',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=50)),
('descreption', models.TextField()),
('tour', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='tours.Tour')),
],
),
migrations.AddField(
model_name='gallery',
name='tour',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='tours.Tour'),
),
]
|
flexible
|
{
"blob_id": "6907a1e08d728732eebf81fec7c0dab8729448e2",
"index": 9712,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n initial = True\n dependencies = []\n operations = [migrations.CreateModel(name='Destination', fields=[('id',\n models.AutoField(auto_created=True, primary_key=True, serialize=\n False, verbose_name='ID')), ('name', models.CharField(max_length=50\n )), ('image', models.ImageField(upload_to='img/destinations'))]),\n migrations.CreateModel(name='Gallery', fields=[('id', models.\n AutoField(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')), ('title', models.CharField(max_length=50)), (\n 'image', models.ImageField(upload_to='img/tours'))]), migrations.\n CreateModel(name='Tour', fields=[('id', models.AutoField(\n auto_created=True, primary_key=True, serialize=False, verbose_name=\n 'ID')), ('title', models.CharField(max_length=50, verbose_name=\n 'title for admin')), ('status', models.BooleanField(default=False)),\n ('price', models.IntegerField()), ('stars', models.IntegerField(\n choices=[(1, 1), (2, 2), (3, 3), (4, 4), (5, 5)])), ('feautured',\n models.BooleanField(default=True)), ('destination', models.\n ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=\n 'tours.Destination'))]), migrations.CreateModel(name='TourDetail',\n fields=[('id', models.AutoField(auto_created=True, primary_key=True,\n serialize=False, verbose_name='ID')), ('title', models.CharField(\n max_length=50)), ('descreption', models.TextField()), ('tour',\n models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=\n 'tours.Tour'))]), migrations.AddField(model_name='gallery', name=\n 'tour', field=models.ForeignKey(on_delete=django.db.models.deletion\n .CASCADE, to='tours.Tour'))]\n",
"step-4": "from django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n initial = True\n dependencies = []\n operations = [migrations.CreateModel(name='Destination', fields=[('id',\n models.AutoField(auto_created=True, primary_key=True, serialize=\n False, verbose_name='ID')), ('name', models.CharField(max_length=50\n )), ('image', models.ImageField(upload_to='img/destinations'))]),\n migrations.CreateModel(name='Gallery', fields=[('id', models.\n AutoField(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')), ('title', models.CharField(max_length=50)), (\n 'image', models.ImageField(upload_to='img/tours'))]), migrations.\n CreateModel(name='Tour', fields=[('id', models.AutoField(\n auto_created=True, primary_key=True, serialize=False, verbose_name=\n 'ID')), ('title', models.CharField(max_length=50, verbose_name=\n 'title for admin')), ('status', models.BooleanField(default=False)),\n ('price', models.IntegerField()), ('stars', models.IntegerField(\n choices=[(1, 1), (2, 2), (3, 3), (4, 4), (5, 5)])), ('feautured',\n models.BooleanField(default=True)), ('destination', models.\n ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=\n 'tours.Destination'))]), migrations.CreateModel(name='TourDetail',\n fields=[('id', models.AutoField(auto_created=True, primary_key=True,\n serialize=False, verbose_name='ID')), ('title', models.CharField(\n max_length=50)), ('descreption', models.TextField()), ('tour',\n models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=\n 'tours.Tour'))]), migrations.AddField(model_name='gallery', name=\n 'tour', field=models.ForeignKey(on_delete=django.db.models.deletion\n .CASCADE, to='tours.Tour'))]\n",
"step-5": "# Generated by Django 2.1.5 on 2019-01-20 18:11\n\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n initial = True\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='Destination',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('name', models.CharField(max_length=50)),\n ('image', models.ImageField(upload_to='img/destinations')),\n ],\n ),\n migrations.CreateModel(\n name='Gallery',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('title', models.CharField(max_length=50)),\n ('image', models.ImageField(upload_to='img/tours')),\n ],\n ),\n migrations.CreateModel(\n name='Tour',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('title', models.CharField(max_length=50, verbose_name='title for admin')),\n ('status', models.BooleanField(default=False)),\n ('price', models.IntegerField()),\n ('stars', models.IntegerField(choices=[(1, 1), (2, 2), (3, 3), (4, 4), (5, 5)])),\n ('feautured', models.BooleanField(default=True)),\n ('destination', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='tours.Destination')),\n ],\n ),\n migrations.CreateModel(\n name='TourDetail',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('title', models.CharField(max_length=50)),\n ('descreption', models.TextField()),\n ('tour', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='tours.Tour')),\n ],\n ),\n migrations.AddField(\n model_name='gallery',\n name='tour',\n field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='tours.Tour'),\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
def weights_init(m):
if type(m) == nn.Linear:
m.weight.data.normal_(0.0, 0.001)
m.bias.data.fill_(0.0)
def update_lr(optimizer, lr):
for param_group in optimizer.param_groups:
param_group['lr'] = lr
<|reserved_special_token_0|>
class ConvNet(nn.Module):
def __init__(self, input_size, hidden_layers, num_classes, norm_layer=None
):
super(ConvNet, self).__init__()
layers = []
layers.append(nn.Conv2d(input_size, hidden_layers[0], kernel_size=3,
stride=1, padding=1))
layers.append(nn.Dropout(dropout_p))
if norm_layer == 'BN':
layers.append(nn.BatchNorm2d(hidden_layers[0], eps=1e-05,
momentum=0.1, affine=True, track_running_stats=True))
layers.append(nn.ReLU())
layers.append(nn.MaxPool2d(kernel_size=2, stride=2))
for Din, Dout in zip(hidden_layers[:-1], hidden_layers[1:]):
layers.append(nn.Conv2d(Din, Dout, kernel_size=3, stride=1,
padding=1))
layers.append(nn.Dropout(dropout_p))
if norm_layer == 'BN':
layers.append(nn.BatchNorm2d(Dout, eps=1e-05, momentum=0.1,
affine=True, track_running_stats=True))
layers.append(nn.ReLU())
layers.append(nn.MaxPool2d(kernel_size=2, stride=2))
self.ConvBlocks = nn.Sequential(*layers)
self.Dout = hidden_layers[-1]
self.Dense = nn.Linear(hidden_layers[-1], num_classes)
def forward(self, x):
out = self.ConvBlocks(x)
out = out.view(-1, 512)
out = self.Dense(out)
return out
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def weights_init(m):
if type(m) == nn.Linear:
m.weight.data.normal_(0.0, 0.001)
m.bias.data.fill_(0.0)
def update_lr(optimizer, lr):
for param_group in optimizer.param_groups:
param_group['lr'] = lr
<|reserved_special_token_0|>
class ConvNet(nn.Module):
def __init__(self, input_size, hidden_layers, num_classes, norm_layer=None
):
super(ConvNet, self).__init__()
layers = []
layers.append(nn.Conv2d(input_size, hidden_layers[0], kernel_size=3,
stride=1, padding=1))
layers.append(nn.Dropout(dropout_p))
if norm_layer == 'BN':
layers.append(nn.BatchNorm2d(hidden_layers[0], eps=1e-05,
momentum=0.1, affine=True, track_running_stats=True))
layers.append(nn.ReLU())
layers.append(nn.MaxPool2d(kernel_size=2, stride=2))
for Din, Dout in zip(hidden_layers[:-1], hidden_layers[1:]):
layers.append(nn.Conv2d(Din, Dout, kernel_size=3, stride=1,
padding=1))
layers.append(nn.Dropout(dropout_p))
if norm_layer == 'BN':
layers.append(nn.BatchNorm2d(Dout, eps=1e-05, momentum=0.1,
affine=True, track_running_stats=True))
layers.append(nn.ReLU())
layers.append(nn.MaxPool2d(kernel_size=2, stride=2))
self.ConvBlocks = nn.Sequential(*layers)
self.Dout = hidden_layers[-1]
self.Dense = nn.Linear(hidden_layers[-1], num_classes)
def forward(self, x):
out = self.ConvBlocks(x)
out = out.view(-1, 512)
out = self.Dense(out)
return out
<|reserved_special_token_0|>
def VisualizeFilter(model):
kernel_map = np.zeros((7 * 4 + 3, 15 * 4 + 3, 3))
kernels = list(model.parameters())[0]
kernels = kernels.to('cpu')
kernels = kernels.data.numpy()
kernels = (kernels - kernels.min()) / (kernels.max() - kernels.min())
cnt = 0
for i in range(0, 8 * 4, 4):
for j in range(0, 16 * 4, 4):
kernel_map[i:i + 3, j:j + 3, :] = kernels[cnt]
cnt = cnt + 1
plt.figure(figsize=(20, 10))
plt.imshow(kernel_map)
plt.show()
pass
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def weights_init(m):
if type(m) == nn.Linear:
m.weight.data.normal_(0.0, 0.001)
m.bias.data.fill_(0.0)
def update_lr(optimizer, lr):
for param_group in optimizer.param_groups:
param_group['lr'] = lr
<|reserved_special_token_0|>
print('Using device: %s' % device)
<|reserved_special_token_0|>
print(hidden_size)
<|reserved_special_token_0|>
data_aug_transforms += [transforms.RandomCrop(32, padding=4), transforms.
RandomHorizontalFlip(), transforms.RandomVerticalFlip(), transforms.
RandomRotation(2), transforms.RandomGrayscale(), transforms.ColorJitter
(brightness=0.1, contrast=0.05, saturation=0.5, hue=0.05), transforms.
RandomAffine(0, translate=[0.2, 0.2], scale=None, shear=0, resample=
False, fillcolor=0)]
<|reserved_special_token_0|>
class ConvNet(nn.Module):
def __init__(self, input_size, hidden_layers, num_classes, norm_layer=None
):
super(ConvNet, self).__init__()
layers = []
layers.append(nn.Conv2d(input_size, hidden_layers[0], kernel_size=3,
stride=1, padding=1))
layers.append(nn.Dropout(dropout_p))
if norm_layer == 'BN':
layers.append(nn.BatchNorm2d(hidden_layers[0], eps=1e-05,
momentum=0.1, affine=True, track_running_stats=True))
layers.append(nn.ReLU())
layers.append(nn.MaxPool2d(kernel_size=2, stride=2))
for Din, Dout in zip(hidden_layers[:-1], hidden_layers[1:]):
layers.append(nn.Conv2d(Din, Dout, kernel_size=3, stride=1,
padding=1))
layers.append(nn.Dropout(dropout_p))
if norm_layer == 'BN':
layers.append(nn.BatchNorm2d(Dout, eps=1e-05, momentum=0.1,
affine=True, track_running_stats=True))
layers.append(nn.ReLU())
layers.append(nn.MaxPool2d(kernel_size=2, stride=2))
self.ConvBlocks = nn.Sequential(*layers)
self.Dout = hidden_layers[-1]
self.Dense = nn.Linear(hidden_layers[-1], num_classes)
def forward(self, x):
out = self.ConvBlocks(x)
out = out.view(-1, 512)
out = self.Dense(out)
return out
def PrintModelSize(model, disp=True):
model_sz = 0
for parameter in model.parameters():
model_sz += parameter.nelement()
if disp == True:
print('\nNumber of parameters: ', model_sz)
print('\n')
return model_sz
def VisualizeFilter(model):
kernel_map = np.zeros((7 * 4 + 3, 15 * 4 + 3, 3))
kernels = list(model.parameters())[0]
kernels = kernels.to('cpu')
kernels = kernels.data.numpy()
kernels = (kernels - kernels.min()) / (kernels.max() - kernels.min())
cnt = 0
for i in range(0, 8 * 4, 4):
for j in range(0, 16 * 4, 4):
kernel_map[i:i + 3, j:j + 3, :] = kernels[cnt]
cnt = cnt + 1
plt.figure(figsize=(20, 10))
plt.imshow(kernel_map)
plt.show()
pass
<|reserved_special_token_0|>
model.apply(weights_init)
print(model)
for i, (images, labels) in enumerate(train_loader):
images = images.to(device)
break
PrintModelSize(model)
<|reserved_special_token_0|>
for epoch in range(num_epochs):
model.train()
loss_iter = 0
for i, (images, labels) in enumerate(train_loader):
images = images.to(device)
labels = labels.to(device)
outputs = model(images)
loss = criterion(outputs, labels)
optimizer.zero_grad()
loss.backward()
optimizer.step()
loss_iter += loss.item()
if (i + 1) % 100 == 0:
print('Epoch [{}/{}], Step [{}/{}], Loss: {:.4f}'.format(epoch +
1, num_epochs, i + 1, total_step, loss.item()))
loss_train.append(loss_iter / (len(train_loader) * batch_size))
lr *= learning_rate_decay
update_lr(optimizer, lr)
model.eval()
with torch.no_grad():
correct = 0
total = 0
loss_iter = 0
for images, labels in val_loader:
images = images.to(device)
labels = labels.to(device)
outputs = model(images)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
loss = criterion(outputs, labels)
loss_iter += loss.item()
loss_val.append(loss_iter / (len(val_loader) * batch_size))
accuracy = 100 * correct / total
accuracy_val.append(accuracy)
print('Validation accuracy is: {} %'.format(accuracy))
if accuracy > best_accuracy:
best_model.load_state_dict(model.state_dict())
best_accuracy = accuracy
model.eval()
plt.figure(2)
plt.plot(loss_train, 'r', label='Train loss')
plt.plot(loss_val, 'g', label='Val loss')
plt.legend()
plt.show()
plt.figure(3)
plt.plot(accuracy_val, 'r', label='Val accuracy')
plt.legend()
plt.show()
model.load_state_dict(best_model.state_dict())
with torch.no_grad():
correct = 0
total = 0
for images, labels in test_loader:
images = images.to(device)
labels = labels.to(device)
outputs = model(images)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
if total == 1000:
break
print('Accuracy of the network on the {} test images: {} %'.format(
total, 100 * correct / total))
VisualizeFilter(model)
torch.save(model.state_dict(), 'model.ckpt')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def weights_init(m):
if type(m) == nn.Linear:
m.weight.data.normal_(0.0, 0.001)
m.bias.data.fill_(0.0)
def update_lr(optimizer, lr):
for param_group in optimizer.param_groups:
param_group['lr'] = lr
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
print('Using device: %s' % device)
input_size = 3
num_classes = 10
hidden_size = [128, 512, 512, 512, 512]
num_epochs = 20
batch_size = 200
learning_rate = 0.002
learning_rate_decay = 0.95
reg = 0.001
num_training = 49000
num_validation = 1000
norm_layer = None
print(hidden_size)
dropout_p = 0
data_aug_transforms = []
data_aug_transforms += [transforms.RandomCrop(32, padding=4), transforms.
RandomHorizontalFlip(), transforms.RandomVerticalFlip(), transforms.
RandomRotation(2), transforms.RandomGrayscale(), transforms.ColorJitter
(brightness=0.1, contrast=0.05, saturation=0.5, hue=0.05), transforms.
RandomAffine(0, translate=[0.2, 0.2], scale=None, shear=0, resample=
False, fillcolor=0)]
norm_transform = transforms.Compose(data_aug_transforms + [transforms.
ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
test_transform = transforms.Compose([transforms.ToTensor(), transforms.
Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
cifar_dataset = torchvision.datasets.CIFAR10(root='datasets/', train=True,
transform=norm_transform, download=True)
test_dataset = torchvision.datasets.CIFAR10(root='datasets/', train=False,
transform=test_transform)
mask = list(range(num_training))
train_dataset = torch.utils.data.Subset(cifar_dataset, mask)
mask = list(range(num_training, num_training + num_validation))
val_dataset = torch.utils.data.Subset(cifar_dataset, mask)
train_loader = torch.utils.data.DataLoader(dataset=train_dataset,
batch_size=batch_size, shuffle=True)
val_loader = torch.utils.data.DataLoader(dataset=val_dataset, batch_size=
batch_size, shuffle=False)
test_loader = torch.utils.data.DataLoader(dataset=test_dataset, batch_size=
batch_size, shuffle=False)
class ConvNet(nn.Module):
def __init__(self, input_size, hidden_layers, num_classes, norm_layer=None
):
super(ConvNet, self).__init__()
layers = []
layers.append(nn.Conv2d(input_size, hidden_layers[0], kernel_size=3,
stride=1, padding=1))
layers.append(nn.Dropout(dropout_p))
if norm_layer == 'BN':
layers.append(nn.BatchNorm2d(hidden_layers[0], eps=1e-05,
momentum=0.1, affine=True, track_running_stats=True))
layers.append(nn.ReLU())
layers.append(nn.MaxPool2d(kernel_size=2, stride=2))
for Din, Dout in zip(hidden_layers[:-1], hidden_layers[1:]):
layers.append(nn.Conv2d(Din, Dout, kernel_size=3, stride=1,
padding=1))
layers.append(nn.Dropout(dropout_p))
if norm_layer == 'BN':
layers.append(nn.BatchNorm2d(Dout, eps=1e-05, momentum=0.1,
affine=True, track_running_stats=True))
layers.append(nn.ReLU())
layers.append(nn.MaxPool2d(kernel_size=2, stride=2))
self.ConvBlocks = nn.Sequential(*layers)
self.Dout = hidden_layers[-1]
self.Dense = nn.Linear(hidden_layers[-1], num_classes)
def forward(self, x):
out = self.ConvBlocks(x)
out = out.view(-1, 512)
out = self.Dense(out)
return out
def PrintModelSize(model, disp=True):
model_sz = 0
for parameter in model.parameters():
model_sz += parameter.nelement()
if disp == True:
print('\nNumber of parameters: ', model_sz)
print('\n')
return model_sz
def VisualizeFilter(model):
kernel_map = np.zeros((7 * 4 + 3, 15 * 4 + 3, 3))
kernels = list(model.parameters())[0]
kernels = kernels.to('cpu')
kernels = kernels.data.numpy()
kernels = (kernels - kernels.min()) / (kernels.max() - kernels.min())
cnt = 0
for i in range(0, 8 * 4, 4):
for j in range(0, 16 * 4, 4):
kernel_map[i:i + 3, j:j + 3, :] = kernels[cnt]
cnt = cnt + 1
plt.figure(figsize=(20, 10))
plt.imshow(kernel_map)
plt.show()
pass
model = ConvNet(input_size, hidden_size, num_classes, norm_layer=norm_layer
).to(device)
model.apply(weights_init)
print(model)
for i, (images, labels) in enumerate(train_loader):
images = images.to(device)
break
PrintModelSize(model)
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate,
weight_decay=reg)
lr = learning_rate
total_step = len(train_loader)
loss_train = []
loss_val = []
best_accuracy = 0
accuracy_val = []
best_model = type(model)(input_size, hidden_size, num_classes, norm_layer=
norm_layer)
for epoch in range(num_epochs):
model.train()
loss_iter = 0
for i, (images, labels) in enumerate(train_loader):
images = images.to(device)
labels = labels.to(device)
outputs = model(images)
loss = criterion(outputs, labels)
optimizer.zero_grad()
loss.backward()
optimizer.step()
loss_iter += loss.item()
if (i + 1) % 100 == 0:
print('Epoch [{}/{}], Step [{}/{}], Loss: {:.4f}'.format(epoch +
1, num_epochs, i + 1, total_step, loss.item()))
loss_train.append(loss_iter / (len(train_loader) * batch_size))
lr *= learning_rate_decay
update_lr(optimizer, lr)
model.eval()
with torch.no_grad():
correct = 0
total = 0
loss_iter = 0
for images, labels in val_loader:
images = images.to(device)
labels = labels.to(device)
outputs = model(images)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
loss = criterion(outputs, labels)
loss_iter += loss.item()
loss_val.append(loss_iter / (len(val_loader) * batch_size))
accuracy = 100 * correct / total
accuracy_val.append(accuracy)
print('Validation accuracy is: {} %'.format(accuracy))
if accuracy > best_accuracy:
best_model.load_state_dict(model.state_dict())
best_accuracy = accuracy
model.eval()
plt.figure(2)
plt.plot(loss_train, 'r', label='Train loss')
plt.plot(loss_val, 'g', label='Val loss')
plt.legend()
plt.show()
plt.figure(3)
plt.plot(accuracy_val, 'r', label='Val accuracy')
plt.legend()
plt.show()
model.load_state_dict(best_model.state_dict())
with torch.no_grad():
correct = 0
total = 0
for images, labels in test_loader:
images = images.to(device)
labels = labels.to(device)
outputs = model(images)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
if total == 1000:
break
print('Accuracy of the network on the {} test images: {} %'.format(
total, 100 * correct / total))
VisualizeFilter(model)
torch.save(model.state_dict(), 'model.ckpt')
<|reserved_special_token_1|>
import torch
import torch.nn as nn
import torchvision
import torchvision.transforms as transforms
import numpy as np
import matplotlib.pyplot as plt
def weights_init(m):
if type(m) == nn.Linear:
m.weight.data.normal_(0.0, 1e-3)
m.bias.data.fill_(0.)
def update_lr(optimizer, lr):
for param_group in optimizer.param_groups:
param_group['lr'] = lr
#--------------------------------
# Device configuration
#--------------------------------
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
print('Using device: %s'%device)
#--------------------------------
# Hyper-parameters
#--------------------------------
input_size = 3
num_classes = 10
hidden_size = [128, 512, 512, 512, 512]
num_epochs = 20
batch_size = 200
learning_rate = 2e-3
learning_rate_decay = 0.95
reg=0.001
num_training= 49000
num_validation =1000
norm_layer = None #norm_layer="BN"
print(hidden_size)
dropout_p = 0 #probability of dropout
#-------------------------------------------------
# Load the CIFAR-10 dataset
#-------------------------------------------------
#################################################################################
# TODO: Q3.a Choose the right data augmentation transforms with the right #
# hyper-parameters and put them in the data_aug_transforms variable #
#################################################################################
data_aug_transforms = []
# *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
data_aug_transforms += [transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.RandomVerticalFlip(),
transforms.RandomRotation(2),
transforms.RandomGrayscale(),
transforms.ColorJitter(brightness=0.1, contrast=0.05, saturation=0.5, hue=0.05),
transforms.RandomAffine(0, translate=[0.2,0.2], scale=None, shear=0, resample=False, fillcolor=0),
]
# *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
norm_transform = transforms.Compose(data_aug_transforms+[transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
])
test_transform = transforms.Compose([transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
])
cifar_dataset = torchvision.datasets.CIFAR10(root='datasets/',
train=True,
transform=norm_transform,
download=True)
test_dataset = torchvision.datasets.CIFAR10(root='datasets/',
train=False,
transform=test_transform
)
#-------------------------------------------------
# Prepare the training and validation splits
#-------------------------------------------------
mask = list(range(num_training))
train_dataset = torch.utils.data.Subset(cifar_dataset, mask)
mask = list(range(num_training, num_training + num_validation))
val_dataset = torch.utils.data.Subset(cifar_dataset, mask)
#-------------------------------------------------
# Data loader
#-------------------------------------------------
train_loader = torch.utils.data.DataLoader(dataset=train_dataset,
batch_size=batch_size,
shuffle=True)
val_loader = torch.utils.data.DataLoader(dataset=val_dataset,
batch_size=batch_size,
shuffle=False)
test_loader = torch.utils.data.DataLoader(dataset=test_dataset,
batch_size=batch_size,
shuffle=False)
#-------------------------------------------------
# Convolutional neural network (Q1.a and Q2.a)
# Set norm_layer for different networks whether using batch normalization
#-------------------------------------------------
class ConvNet(nn.Module):
def __init__(self, input_size, hidden_layers, num_classes, norm_layer=None):
super(ConvNet, self).__init__()
#################################################################################
# TODO: Initialize the modules required to implement the convolutional layer #
# described in the exercise. #
# For Q1.a make use of conv2d and relu layers from the torch.nn module. #
# For Q2.a make use of BatchNorm2d layer from the torch.nn module. #
# For Q3.b Use Dropout layer from the torch.nn module. #
#################################################################################
layers = []
# *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
# First ConvBlock with input size (i.e. C=3) and first hidden layer(i.e. 128)
layers.append(nn.Conv2d(input_size, hidden_layers[0], kernel_size=3, stride=1, padding=1))
layers.append(nn.Dropout(dropout_p))
if norm_layer=="BN":
layers.append(nn.BatchNorm2d(hidden_layers[0], eps=1e-05, momentum=0.1,
affine=True, track_running_stats=True))
layers.append(nn.ReLU())
layers.append(nn.MaxPool2d(kernel_size=2, stride=2))
# Adding the other blocks
for Din, Dout in zip(hidden_layers[:-1], hidden_layers[1:]):
layers.append(nn.Conv2d(Din, Dout, kernel_size=3, stride=1, padding=1))
layers.append(nn.Dropout(dropout_p))
if norm_layer=="BN":
layers.append(nn.BatchNorm2d(Dout, eps=1e-05, momentum=0.1,
affine=True, track_running_stats=True))
layers.append(nn.ReLU())
layers.append(nn.MaxPool2d(kernel_size=2, stride=2))
# stacking convolutional blocks
self.ConvBlocks = nn.Sequential(*layers)
self.Dout = hidden_layers[-1]
# Fully connected layer
self.Dense = nn.Linear(hidden_layers[-1], num_classes)
# *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
def forward(self, x):
#################################################################################
# TODO: Implement the forward pass computations #
#################################################################################
# *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
out = self.ConvBlocks(x)
out = out.view(-1, 512)
out = self.Dense(out)
# *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
return out
#-------------------------------------------------
# Calculate the model size (Q1.b)
# if disp is true, print the model parameters, otherwise, only return the number of parameters.
#-------------------------------------------------
def PrintModelSize(model, disp=True):
#################################################################################
# TODO: Implement the function to count the number of trainable parameters in #
# the input model. This useful to track the capacity of the model you are #
# training #
#################################################################################
# *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
model_sz = 0
for parameter in model.parameters():
model_sz += parameter.nelement()
if disp == True:
print("\nNumber of parameters: ", model_sz)
print("\n")
# *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
return model_sz
#-------------------------------------------------
# Calculate the model size (Q1.c)
# visualize the convolution filters of the first convolution layer of the input model
#-------------------------------------------------
def VisualizeFilter(model):
#################################################################################
# TODO: Implement the functiont to visualize the weights in the first conv layer#
# in the model. Visualize them as a single image of stacked filters. #
# You can use matlplotlib.imshow to visualize an image in python #
#################################################################################
# *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
kernel_map = np.zeros((7*4 + 3, 15*4 + 3, 3))
kernels = list(model.parameters())[0]
kernels = kernels.to("cpu")
kernels = kernels.data.numpy()
kernels = (kernels - kernels.min()) / (kernels.max() - kernels.min())
cnt = 0
for i in range(0, 8*4,4):
for j in range(0, 16*4, 4):
kernel_map[i:i+3, j:j+3, :] = kernels[cnt]
cnt = cnt + 1
plt.figure(figsize=(20, 10))
plt.imshow(kernel_map)
plt.show()
pass
# *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
#======================================================================================
# Q1.a: Implementing convolutional neural net in PyTorch
#======================================================================================
# In this question we will implement a convolutional neural networks using the PyTorch
# library. Please complete the code for the ConvNet class evaluating the model
#--------------------------------------------------------------------------------------
model = ConvNet(input_size, hidden_size, num_classes, norm_layer=norm_layer).to(device)
# Q2.a - Initialize the model with correct batch norm layer
model.apply(weights_init)
# Print the model
print(model)
for i, (images, labels) in enumerate(train_loader):
images = images.to(device)
break
# Print model size
#======================================================================================
# Q1.b: Implementing the function to count the number of trainable parameters in the model
#======================================================================================
PrintModelSize(model)
#======================================================================================
# Q1.a: Implementing the function to visualize the filters in the first conv layers.
# Visualize the filters before training
#======================================================================================
#VisualizeFilter(model)
# Loss and optimizer
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate, weight_decay=reg)
# Train the model
lr = learning_rate
total_step = len(train_loader)
loss_train = []
loss_val = []
best_accuracy = 0
accuracy_val = []
best_model = type(model)(input_size, hidden_size, num_classes, norm_layer=norm_layer) # get a new instance
#best_model = ConvNet(input_size, hidden_size, num_classes, norm_layer=norm_layer)
for epoch in range(num_epochs):
model.train()
loss_iter = 0
for i, (images, labels) in enumerate(train_loader):
# Move tensors to the configured device
images = images.to(device)
labels = labels.to(device)
# Forward pass
outputs = model(images)
loss = criterion(outputs, labels)
# Backward and optimize
optimizer.zero_grad()
loss.backward()
optimizer.step()
loss_iter += loss.item()
if (i+1) % 100 == 0:
print ('Epoch [{}/{}], Step [{}/{}], Loss: {:.4f}'
.format(epoch+1, num_epochs, i+1, total_step, loss.item()))
loss_train.append(loss_iter/(len(train_loader)*batch_size))
# Code to update the lr
lr *= learning_rate_decay
update_lr(optimizer, lr)
model.eval()
with torch.no_grad():
correct = 0
total = 0
loss_iter = 0
for images, labels in val_loader:
images = images.to(device)
labels = labels.to(device)
outputs = model(images)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
loss = criterion(outputs, labels)
loss_iter += loss.item()
loss_val.append(loss_iter/(len(val_loader)*batch_size))
accuracy = 100 * correct / total
accuracy_val.append(accuracy)
print('Validation accuracy is: {} %'.format(accuracy))
#################################################################################
# TODO: Q2.b Implement the early stopping mechanism to save the model which has #
# the model with the best validation accuracy so-far (use best_model). #
#################################################################################
# *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
if accuracy > best_accuracy:
best_model.load_state_dict(model.state_dict())
best_accuracy=accuracy
# *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
# Test the model
# In test phase, we don't need to compute gradients (for memory efficiency)
model.eval()
plt.figure(2)
plt.plot(loss_train, 'r', label='Train loss')
plt.plot(loss_val, 'g', label='Val loss')
plt.legend()
plt.show()
plt.figure(3)
plt.plot(accuracy_val, 'r', label='Val accuracy')
plt.legend()
plt.show()
#################################################################################
# TODO: Q2.b Implement the early stopping mechanism to load the weights from the#
# best model so far and perform testing with this model. #
#################################################################################
# *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
model.load_state_dict(best_model.state_dict())
# *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
#Compute accuracy on the test set
with torch.no_grad():
correct = 0
total = 0
for images, labels in test_loader:
images = images.to(device)
labels = labels.to(device)
outputs = model(images)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
if total == 1000:
break
print('Accuracy of the network on the {} test images: {} %'.format(total, 100 * correct / total))
# Q1.c: Implementing the function to visualize the filters in the first conv layers.
# Visualize the filters before training
VisualizeFilter(model)
# Save the model checkpoint
torch.save(model.state_dict(), 'model.ckpt')
|
flexible
|
{
"blob_id": "0553bd4c7261197a1a80c5551305a16e7bfdc761",
"index": 2398,
"step-1": "<mask token>\n\n\ndef weights_init(m):\n if type(m) == nn.Linear:\n m.weight.data.normal_(0.0, 0.001)\n m.bias.data.fill_(0.0)\n\n\ndef update_lr(optimizer, lr):\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr\n\n\n<mask token>\n\n\nclass ConvNet(nn.Module):\n\n def __init__(self, input_size, hidden_layers, num_classes, norm_layer=None\n ):\n super(ConvNet, self).__init__()\n layers = []\n layers.append(nn.Conv2d(input_size, hidden_layers[0], kernel_size=3,\n stride=1, padding=1))\n layers.append(nn.Dropout(dropout_p))\n if norm_layer == 'BN':\n layers.append(nn.BatchNorm2d(hidden_layers[0], eps=1e-05,\n momentum=0.1, affine=True, track_running_stats=True))\n layers.append(nn.ReLU())\n layers.append(nn.MaxPool2d(kernel_size=2, stride=2))\n for Din, Dout in zip(hidden_layers[:-1], hidden_layers[1:]):\n layers.append(nn.Conv2d(Din, Dout, kernel_size=3, stride=1,\n padding=1))\n layers.append(nn.Dropout(dropout_p))\n if norm_layer == 'BN':\n layers.append(nn.BatchNorm2d(Dout, eps=1e-05, momentum=0.1,\n affine=True, track_running_stats=True))\n layers.append(nn.ReLU())\n layers.append(nn.MaxPool2d(kernel_size=2, stride=2))\n self.ConvBlocks = nn.Sequential(*layers)\n self.Dout = hidden_layers[-1]\n self.Dense = nn.Linear(hidden_layers[-1], num_classes)\n\n def forward(self, x):\n out = self.ConvBlocks(x)\n out = out.view(-1, 512)\n out = self.Dense(out)\n return out\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef weights_init(m):\n if type(m) == nn.Linear:\n m.weight.data.normal_(0.0, 0.001)\n m.bias.data.fill_(0.0)\n\n\ndef update_lr(optimizer, lr):\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr\n\n\n<mask token>\n\n\nclass ConvNet(nn.Module):\n\n def __init__(self, input_size, hidden_layers, num_classes, norm_layer=None\n ):\n super(ConvNet, self).__init__()\n layers = []\n layers.append(nn.Conv2d(input_size, hidden_layers[0], kernel_size=3,\n stride=1, padding=1))\n layers.append(nn.Dropout(dropout_p))\n if norm_layer == 'BN':\n layers.append(nn.BatchNorm2d(hidden_layers[0], eps=1e-05,\n momentum=0.1, affine=True, track_running_stats=True))\n layers.append(nn.ReLU())\n layers.append(nn.MaxPool2d(kernel_size=2, stride=2))\n for Din, Dout in zip(hidden_layers[:-1], hidden_layers[1:]):\n layers.append(nn.Conv2d(Din, Dout, kernel_size=3, stride=1,\n padding=1))\n layers.append(nn.Dropout(dropout_p))\n if norm_layer == 'BN':\n layers.append(nn.BatchNorm2d(Dout, eps=1e-05, momentum=0.1,\n affine=True, track_running_stats=True))\n layers.append(nn.ReLU())\n layers.append(nn.MaxPool2d(kernel_size=2, stride=2))\n self.ConvBlocks = nn.Sequential(*layers)\n self.Dout = hidden_layers[-1]\n self.Dense = nn.Linear(hidden_layers[-1], num_classes)\n\n def forward(self, x):\n out = self.ConvBlocks(x)\n out = out.view(-1, 512)\n out = self.Dense(out)\n return out\n\n\n<mask token>\n\n\ndef VisualizeFilter(model):\n kernel_map = np.zeros((7 * 4 + 3, 15 * 4 + 3, 3))\n kernels = list(model.parameters())[0]\n kernels = kernels.to('cpu')\n kernels = kernels.data.numpy()\n kernels = (kernels - kernels.min()) / (kernels.max() - kernels.min())\n cnt = 0\n for i in range(0, 8 * 4, 4):\n for j in range(0, 16 * 4, 4):\n kernel_map[i:i + 3, j:j + 3, :] = kernels[cnt]\n cnt = cnt + 1\n plt.figure(figsize=(20, 10))\n plt.imshow(kernel_map)\n plt.show()\n pass\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef weights_init(m):\n if type(m) == nn.Linear:\n m.weight.data.normal_(0.0, 0.001)\n m.bias.data.fill_(0.0)\n\n\ndef update_lr(optimizer, lr):\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr\n\n\n<mask token>\nprint('Using device: %s' % device)\n<mask token>\nprint(hidden_size)\n<mask token>\ndata_aug_transforms += [transforms.RandomCrop(32, padding=4), transforms.\n RandomHorizontalFlip(), transforms.RandomVerticalFlip(), transforms.\n RandomRotation(2), transforms.RandomGrayscale(), transforms.ColorJitter\n (brightness=0.1, contrast=0.05, saturation=0.5, hue=0.05), transforms.\n RandomAffine(0, translate=[0.2, 0.2], scale=None, shear=0, resample=\n False, fillcolor=0)]\n<mask token>\n\n\nclass ConvNet(nn.Module):\n\n def __init__(self, input_size, hidden_layers, num_classes, norm_layer=None\n ):\n super(ConvNet, self).__init__()\n layers = []\n layers.append(nn.Conv2d(input_size, hidden_layers[0], kernel_size=3,\n stride=1, padding=1))\n layers.append(nn.Dropout(dropout_p))\n if norm_layer == 'BN':\n layers.append(nn.BatchNorm2d(hidden_layers[0], eps=1e-05,\n momentum=0.1, affine=True, track_running_stats=True))\n layers.append(nn.ReLU())\n layers.append(nn.MaxPool2d(kernel_size=2, stride=2))\n for Din, Dout in zip(hidden_layers[:-1], hidden_layers[1:]):\n layers.append(nn.Conv2d(Din, Dout, kernel_size=3, stride=1,\n padding=1))\n layers.append(nn.Dropout(dropout_p))\n if norm_layer == 'BN':\n layers.append(nn.BatchNorm2d(Dout, eps=1e-05, momentum=0.1,\n affine=True, track_running_stats=True))\n layers.append(nn.ReLU())\n layers.append(nn.MaxPool2d(kernel_size=2, stride=2))\n self.ConvBlocks = nn.Sequential(*layers)\n self.Dout = hidden_layers[-1]\n self.Dense = nn.Linear(hidden_layers[-1], num_classes)\n\n def forward(self, x):\n out = self.ConvBlocks(x)\n out = out.view(-1, 512)\n out = self.Dense(out)\n return out\n\n\ndef PrintModelSize(model, disp=True):\n model_sz = 0\n for parameter in model.parameters():\n model_sz += parameter.nelement()\n if disp == True:\n print('\\nNumber of parameters: ', model_sz)\n print('\\n')\n return model_sz\n\n\ndef VisualizeFilter(model):\n kernel_map = np.zeros((7 * 4 + 3, 15 * 4 + 3, 3))\n kernels = list(model.parameters())[0]\n kernels = kernels.to('cpu')\n kernels = kernels.data.numpy()\n kernels = (kernels - kernels.min()) / (kernels.max() - kernels.min())\n cnt = 0\n for i in range(0, 8 * 4, 4):\n for j in range(0, 16 * 4, 4):\n kernel_map[i:i + 3, j:j + 3, :] = kernels[cnt]\n cnt = cnt + 1\n plt.figure(figsize=(20, 10))\n plt.imshow(kernel_map)\n plt.show()\n pass\n\n\n<mask token>\nmodel.apply(weights_init)\nprint(model)\nfor i, (images, labels) in enumerate(train_loader):\n images = images.to(device)\n break\nPrintModelSize(model)\n<mask token>\nfor epoch in range(num_epochs):\n model.train()\n loss_iter = 0\n for i, (images, labels) in enumerate(train_loader):\n images = images.to(device)\n labels = labels.to(device)\n outputs = model(images)\n loss = criterion(outputs, labels)\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n loss_iter += loss.item()\n if (i + 1) % 100 == 0:\n print('Epoch [{}/{}], Step [{}/{}], Loss: {:.4f}'.format(epoch +\n 1, num_epochs, i + 1, total_step, loss.item()))\n loss_train.append(loss_iter / (len(train_loader) * batch_size))\n lr *= learning_rate_decay\n update_lr(optimizer, lr)\n model.eval()\n with torch.no_grad():\n correct = 0\n total = 0\n loss_iter = 0\n for images, labels in val_loader:\n images = images.to(device)\n labels = labels.to(device)\n outputs = model(images)\n _, predicted = torch.max(outputs.data, 1)\n total += labels.size(0)\n correct += (predicted == labels).sum().item()\n loss = criterion(outputs, labels)\n loss_iter += loss.item()\n loss_val.append(loss_iter / (len(val_loader) * batch_size))\n accuracy = 100 * correct / total\n accuracy_val.append(accuracy)\n print('Validation accuracy is: {} %'.format(accuracy))\n if accuracy > best_accuracy:\n best_model.load_state_dict(model.state_dict())\n best_accuracy = accuracy\nmodel.eval()\nplt.figure(2)\nplt.plot(loss_train, 'r', label='Train loss')\nplt.plot(loss_val, 'g', label='Val loss')\nplt.legend()\nplt.show()\nplt.figure(3)\nplt.plot(accuracy_val, 'r', label='Val accuracy')\nplt.legend()\nplt.show()\nmodel.load_state_dict(best_model.state_dict())\nwith torch.no_grad():\n correct = 0\n total = 0\n for images, labels in test_loader:\n images = images.to(device)\n labels = labels.to(device)\n outputs = model(images)\n _, predicted = torch.max(outputs.data, 1)\n total += labels.size(0)\n correct += (predicted == labels).sum().item()\n if total == 1000:\n break\n print('Accuracy of the network on the {} test images: {} %'.format(\n total, 100 * correct / total))\nVisualizeFilter(model)\ntorch.save(model.state_dict(), 'model.ckpt')\n",
"step-4": "<mask token>\n\n\ndef weights_init(m):\n if type(m) == nn.Linear:\n m.weight.data.normal_(0.0, 0.001)\n m.bias.data.fill_(0.0)\n\n\ndef update_lr(optimizer, lr):\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr\n\n\ndevice = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\nprint('Using device: %s' % device)\ninput_size = 3\nnum_classes = 10\nhidden_size = [128, 512, 512, 512, 512]\nnum_epochs = 20\nbatch_size = 200\nlearning_rate = 0.002\nlearning_rate_decay = 0.95\nreg = 0.001\nnum_training = 49000\nnum_validation = 1000\nnorm_layer = None\nprint(hidden_size)\ndropout_p = 0\ndata_aug_transforms = []\ndata_aug_transforms += [transforms.RandomCrop(32, padding=4), transforms.\n RandomHorizontalFlip(), transforms.RandomVerticalFlip(), transforms.\n RandomRotation(2), transforms.RandomGrayscale(), transforms.ColorJitter\n (brightness=0.1, contrast=0.05, saturation=0.5, hue=0.05), transforms.\n RandomAffine(0, translate=[0.2, 0.2], scale=None, shear=0, resample=\n False, fillcolor=0)]\nnorm_transform = transforms.Compose(data_aug_transforms + [transforms.\n ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])\ntest_transform = transforms.Compose([transforms.ToTensor(), transforms.\n Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])\ncifar_dataset = torchvision.datasets.CIFAR10(root='datasets/', train=True,\n transform=norm_transform, download=True)\ntest_dataset = torchvision.datasets.CIFAR10(root='datasets/', train=False,\n transform=test_transform)\nmask = list(range(num_training))\ntrain_dataset = torch.utils.data.Subset(cifar_dataset, mask)\nmask = list(range(num_training, num_training + num_validation))\nval_dataset = torch.utils.data.Subset(cifar_dataset, mask)\ntrain_loader = torch.utils.data.DataLoader(dataset=train_dataset,\n batch_size=batch_size, shuffle=True)\nval_loader = torch.utils.data.DataLoader(dataset=val_dataset, batch_size=\n batch_size, shuffle=False)\ntest_loader = torch.utils.data.DataLoader(dataset=test_dataset, batch_size=\n batch_size, shuffle=False)\n\n\nclass ConvNet(nn.Module):\n\n def __init__(self, input_size, hidden_layers, num_classes, norm_layer=None\n ):\n super(ConvNet, self).__init__()\n layers = []\n layers.append(nn.Conv2d(input_size, hidden_layers[0], kernel_size=3,\n stride=1, padding=1))\n layers.append(nn.Dropout(dropout_p))\n if norm_layer == 'BN':\n layers.append(nn.BatchNorm2d(hidden_layers[0], eps=1e-05,\n momentum=0.1, affine=True, track_running_stats=True))\n layers.append(nn.ReLU())\n layers.append(nn.MaxPool2d(kernel_size=2, stride=2))\n for Din, Dout in zip(hidden_layers[:-1], hidden_layers[1:]):\n layers.append(nn.Conv2d(Din, Dout, kernel_size=3, stride=1,\n padding=1))\n layers.append(nn.Dropout(dropout_p))\n if norm_layer == 'BN':\n layers.append(nn.BatchNorm2d(Dout, eps=1e-05, momentum=0.1,\n affine=True, track_running_stats=True))\n layers.append(nn.ReLU())\n layers.append(nn.MaxPool2d(kernel_size=2, stride=2))\n self.ConvBlocks = nn.Sequential(*layers)\n self.Dout = hidden_layers[-1]\n self.Dense = nn.Linear(hidden_layers[-1], num_classes)\n\n def forward(self, x):\n out = self.ConvBlocks(x)\n out = out.view(-1, 512)\n out = self.Dense(out)\n return out\n\n\ndef PrintModelSize(model, disp=True):\n model_sz = 0\n for parameter in model.parameters():\n model_sz += parameter.nelement()\n if disp == True:\n print('\\nNumber of parameters: ', model_sz)\n print('\\n')\n return model_sz\n\n\ndef VisualizeFilter(model):\n kernel_map = np.zeros((7 * 4 + 3, 15 * 4 + 3, 3))\n kernels = list(model.parameters())[0]\n kernels = kernels.to('cpu')\n kernels = kernels.data.numpy()\n kernels = (kernels - kernels.min()) / (kernels.max() - kernels.min())\n cnt = 0\n for i in range(0, 8 * 4, 4):\n for j in range(0, 16 * 4, 4):\n kernel_map[i:i + 3, j:j + 3, :] = kernels[cnt]\n cnt = cnt + 1\n plt.figure(figsize=(20, 10))\n plt.imshow(kernel_map)\n plt.show()\n pass\n\n\nmodel = ConvNet(input_size, hidden_size, num_classes, norm_layer=norm_layer\n ).to(device)\nmodel.apply(weights_init)\nprint(model)\nfor i, (images, labels) in enumerate(train_loader):\n images = images.to(device)\n break\nPrintModelSize(model)\ncriterion = nn.CrossEntropyLoss()\noptimizer = torch.optim.Adam(model.parameters(), lr=learning_rate,\n weight_decay=reg)\nlr = learning_rate\ntotal_step = len(train_loader)\nloss_train = []\nloss_val = []\nbest_accuracy = 0\naccuracy_val = []\nbest_model = type(model)(input_size, hidden_size, num_classes, norm_layer=\n norm_layer)\nfor epoch in range(num_epochs):\n model.train()\n loss_iter = 0\n for i, (images, labels) in enumerate(train_loader):\n images = images.to(device)\n labels = labels.to(device)\n outputs = model(images)\n loss = criterion(outputs, labels)\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n loss_iter += loss.item()\n if (i + 1) % 100 == 0:\n print('Epoch [{}/{}], Step [{}/{}], Loss: {:.4f}'.format(epoch +\n 1, num_epochs, i + 1, total_step, loss.item()))\n loss_train.append(loss_iter / (len(train_loader) * batch_size))\n lr *= learning_rate_decay\n update_lr(optimizer, lr)\n model.eval()\n with torch.no_grad():\n correct = 0\n total = 0\n loss_iter = 0\n for images, labels in val_loader:\n images = images.to(device)\n labels = labels.to(device)\n outputs = model(images)\n _, predicted = torch.max(outputs.data, 1)\n total += labels.size(0)\n correct += (predicted == labels).sum().item()\n loss = criterion(outputs, labels)\n loss_iter += loss.item()\n loss_val.append(loss_iter / (len(val_loader) * batch_size))\n accuracy = 100 * correct / total\n accuracy_val.append(accuracy)\n print('Validation accuracy is: {} %'.format(accuracy))\n if accuracy > best_accuracy:\n best_model.load_state_dict(model.state_dict())\n best_accuracy = accuracy\nmodel.eval()\nplt.figure(2)\nplt.plot(loss_train, 'r', label='Train loss')\nplt.plot(loss_val, 'g', label='Val loss')\nplt.legend()\nplt.show()\nplt.figure(3)\nplt.plot(accuracy_val, 'r', label='Val accuracy')\nplt.legend()\nplt.show()\nmodel.load_state_dict(best_model.state_dict())\nwith torch.no_grad():\n correct = 0\n total = 0\n for images, labels in test_loader:\n images = images.to(device)\n labels = labels.to(device)\n outputs = model(images)\n _, predicted = torch.max(outputs.data, 1)\n total += labels.size(0)\n correct += (predicted == labels).sum().item()\n if total == 1000:\n break\n print('Accuracy of the network on the {} test images: {} %'.format(\n total, 100 * correct / total))\nVisualizeFilter(model)\ntorch.save(model.state_dict(), 'model.ckpt')\n",
"step-5": "import torch\nimport torch.nn as nn\nimport torchvision\nimport torchvision.transforms as transforms\nimport numpy as np\n\nimport matplotlib.pyplot as plt\n\n\ndef weights_init(m):\n if type(m) == nn.Linear:\n m.weight.data.normal_(0.0, 1e-3)\n m.bias.data.fill_(0.)\n\ndef update_lr(optimizer, lr):\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr\n\n\n\n#--------------------------------\n# Device configuration\n#--------------------------------\ndevice = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\nprint('Using device: %s'%device)\n\n#--------------------------------\n# Hyper-parameters\n#--------------------------------\ninput_size = 3\nnum_classes = 10\nhidden_size = [128, 512, 512, 512, 512]\nnum_epochs = 20\nbatch_size = 200\nlearning_rate = 2e-3\nlearning_rate_decay = 0.95\nreg=0.001\nnum_training= 49000\nnum_validation =1000\nnorm_layer = None #norm_layer=\"BN\"\nprint(hidden_size)\n\ndropout_p = 0 #probability of dropout\n\n\n\n#-------------------------------------------------\n# Load the CIFAR-10 dataset\n#-------------------------------------------------\n#################################################################################\n# TODO: Q3.a Choose the right data augmentation transforms with the right #\n# hyper-parameters and put them in the data_aug_transforms variable #\n#################################################################################\ndata_aug_transforms = []\n# *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n\ndata_aug_transforms += [transforms.RandomCrop(32, padding=4), \n transforms.RandomHorizontalFlip(), \n transforms.RandomVerticalFlip(), \n transforms.RandomRotation(2),\n transforms.RandomGrayscale(),\n transforms.ColorJitter(brightness=0.1, contrast=0.05, saturation=0.5, hue=0.05),\n transforms.RandomAffine(0, translate=[0.2,0.2], scale=None, shear=0, resample=False, fillcolor=0),\n ]\n\n# *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\nnorm_transform = transforms.Compose(data_aug_transforms+[transforms.ToTensor(),\n transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))\n ])\ntest_transform = transforms.Compose([transforms.ToTensor(),\n transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))\n ])\ncifar_dataset = torchvision.datasets.CIFAR10(root='datasets/',\n train=True,\n transform=norm_transform,\n download=True)\n\ntest_dataset = torchvision.datasets.CIFAR10(root='datasets/',\n train=False,\n transform=test_transform\n )\n\n#-------------------------------------------------\n# Prepare the training and validation splits\n#-------------------------------------------------\nmask = list(range(num_training))\ntrain_dataset = torch.utils.data.Subset(cifar_dataset, mask)\nmask = list(range(num_training, num_training + num_validation))\nval_dataset = torch.utils.data.Subset(cifar_dataset, mask)\n\n#-------------------------------------------------\n# Data loader\n#-------------------------------------------------\ntrain_loader = torch.utils.data.DataLoader(dataset=train_dataset,\n batch_size=batch_size,\n shuffle=True)\n\nval_loader = torch.utils.data.DataLoader(dataset=val_dataset,\n batch_size=batch_size,\n shuffle=False)\n\ntest_loader = torch.utils.data.DataLoader(dataset=test_dataset,\n batch_size=batch_size,\n shuffle=False)\n\n\n#-------------------------------------------------\n# Convolutional neural network (Q1.a and Q2.a)\n# Set norm_layer for different networks whether using batch normalization\n#-------------------------------------------------\nclass ConvNet(nn.Module):\n def __init__(self, input_size, hidden_layers, num_classes, norm_layer=None):\n super(ConvNet, self).__init__()\n #################################################################################\n # TODO: Initialize the modules required to implement the convolutional layer #\n # described in the exercise. #\n # For Q1.a make use of conv2d and relu layers from the torch.nn module. #\n # For Q2.a make use of BatchNorm2d layer from the torch.nn module. #\n # For Q3.b Use Dropout layer from the torch.nn module. #\n #################################################################################\n layers = []\n # *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n\n # First ConvBlock with input size (i.e. C=3) and first hidden layer(i.e. 128)\n layers.append(nn.Conv2d(input_size, hidden_layers[0], kernel_size=3, stride=1, padding=1))\n layers.append(nn.Dropout(dropout_p))\n if norm_layer==\"BN\":\n layers.append(nn.BatchNorm2d(hidden_layers[0], eps=1e-05, momentum=0.1, \n affine=True, track_running_stats=True))\n layers.append(nn.ReLU())\n layers.append(nn.MaxPool2d(kernel_size=2, stride=2))\n\n # Adding the other blocks\n for Din, Dout in zip(hidden_layers[:-1], hidden_layers[1:]):\n \n layers.append(nn.Conv2d(Din, Dout, kernel_size=3, stride=1, padding=1))\n layers.append(nn.Dropout(dropout_p))\n if norm_layer==\"BN\":\n layers.append(nn.BatchNorm2d(Dout, eps=1e-05, momentum=0.1, \n affine=True, track_running_stats=True))\n layers.append(nn.ReLU())\n layers.append(nn.MaxPool2d(kernel_size=2, stride=2))\n\t\t\n # stacking convolutional blocks\n self.ConvBlocks = nn.Sequential(*layers)\n self.Dout = hidden_layers[-1]\n\n # Fully connected layer\n self.Dense = nn.Linear(hidden_layers[-1], num_classes)\n\n # *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n\n def forward(self, x):\n #################################################################################\n # TODO: Implement the forward pass computations #\n #################################################################################\n # *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n\n out = self.ConvBlocks(x)\n out = out.view(-1, 512)\n out = self.Dense(out)\n\n # *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n return out\n\n\n\n#-------------------------------------------------\n# Calculate the model size (Q1.b)\n# if disp is true, print the model parameters, otherwise, only return the number of parameters.\n#-------------------------------------------------\ndef PrintModelSize(model, disp=True):\n #################################################################################\n # TODO: Implement the function to count the number of trainable parameters in #\n # the input model. This useful to track the capacity of the model you are #\n # training #\n #################################################################################\n # *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n model_sz = 0\n for parameter in model.parameters():\n model_sz += parameter.nelement()\n if disp == True:\n print(\"\\nNumber of parameters: \", model_sz)\n print(\"\\n\")\n # *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n return model_sz\n\n\n\n#-------------------------------------------------\n# Calculate the model size (Q1.c)\n# visualize the convolution filters of the first convolution layer of the input model\n#-------------------------------------------------\ndef VisualizeFilter(model):\n #################################################################################\n # TODO: Implement the functiont to visualize the weights in the first conv layer#\n # in the model. Visualize them as a single image of stacked filters. #\n # You can use matlplotlib.imshow to visualize an image in python #\n #################################################################################\n # *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n kernel_map = np.zeros((7*4 + 3, 15*4 + 3, 3))\n\n kernels = list(model.parameters())[0]\n kernels = kernels.to(\"cpu\")\n kernels = kernels.data.numpy()\n\n kernels = (kernels - kernels.min()) / (kernels.max() - kernels.min())\n\n cnt = 0\n for i in range(0, 8*4,4):\n for j in range(0, 16*4, 4):\n kernel_map[i:i+3, j:j+3, :] = kernels[cnt]\n cnt = cnt + 1\n\n plt.figure(figsize=(20, 10))\n plt.imshow(kernel_map)\n plt.show()\n\n pass\n # *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n\n\n\n#======================================================================================\n# Q1.a: Implementing convolutional neural net in PyTorch\n#======================================================================================\n# In this question we will implement a convolutional neural networks using the PyTorch\n# library. Please complete the code for the ConvNet class evaluating the model\n#--------------------------------------------------------------------------------------\n\nmodel = ConvNet(input_size, hidden_size, num_classes, norm_layer=norm_layer).to(device)\n# Q2.a - Initialize the model with correct batch norm layer\n\nmodel.apply(weights_init)\n# Print the model\nprint(model)\n\nfor i, (images, labels) in enumerate(train_loader):\n\timages = images.to(device)\n\n\tbreak\n\n# Print model size\n#======================================================================================\n# Q1.b: Implementing the function to count the number of trainable parameters in the model\n#======================================================================================\nPrintModelSize(model)\n#======================================================================================\n# Q1.a: Implementing the function to visualize the filters in the first conv layers.\n# Visualize the filters before training\n#======================================================================================\n#VisualizeFilter(model)\n\n\n\n# Loss and optimizer\ncriterion = nn.CrossEntropyLoss()\noptimizer = torch.optim.Adam(model.parameters(), lr=learning_rate, weight_decay=reg)\n\n# Train the model\nlr = learning_rate\ntotal_step = len(train_loader)\nloss_train = []\nloss_val = []\nbest_accuracy = 0\naccuracy_val = []\nbest_model = type(model)(input_size, hidden_size, num_classes, norm_layer=norm_layer) # get a new instance\n#best_model = ConvNet(input_size, hidden_size, num_classes, norm_layer=norm_layer)\nfor epoch in range(num_epochs):\n\n model.train()\n\n loss_iter = 0\n for i, (images, labels) in enumerate(train_loader):\n # Move tensors to the configured device\n images = images.to(device)\n labels = labels.to(device)\n\n # Forward pass\n outputs = model(images)\n loss = criterion(outputs, labels)\n\n # Backward and optimize\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n loss_iter += loss.item()\n \n if (i+1) % 100 == 0:\n print ('Epoch [{}/{}], Step [{}/{}], Loss: {:.4f}'\n .format(epoch+1, num_epochs, i+1, total_step, loss.item()))\n \n loss_train.append(loss_iter/(len(train_loader)*batch_size))\n\n \n # Code to update the lr\n lr *= learning_rate_decay\n update_lr(optimizer, lr)\n \n \n model.eval()\n with torch.no_grad():\n correct = 0\n total = 0\n loss_iter = 0\n for images, labels in val_loader:\n images = images.to(device)\n labels = labels.to(device)\n \n outputs = model(images)\n _, predicted = torch.max(outputs.data, 1)\n \n total += labels.size(0)\n correct += (predicted == labels).sum().item()\n \n loss = criterion(outputs, labels)\n loss_iter += loss.item()\n \n loss_val.append(loss_iter/(len(val_loader)*batch_size))\n\n accuracy = 100 * correct / total\n accuracy_val.append(accuracy)\n print('Validation accuracy is: {} %'.format(accuracy))\n #################################################################################\n # TODO: Q2.b Implement the early stopping mechanism to save the model which has #\n # the model with the best validation accuracy so-far (use best_model). #\n #################################################################################\n\n # *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n if accuracy > best_accuracy:\n best_model.load_state_dict(model.state_dict())\n best_accuracy=accuracy\n \n\n # *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n\n \n\n# Test the model\n# In test phase, we don't need to compute gradients (for memory efficiency)\nmodel.eval()\n\n\n\nplt.figure(2)\nplt.plot(loss_train, 'r', label='Train loss')\nplt.plot(loss_val, 'g', label='Val loss')\nplt.legend()\nplt.show()\n\nplt.figure(3)\nplt.plot(accuracy_val, 'r', label='Val accuracy')\nplt.legend()\nplt.show()\n\n\n\n#################################################################################\n# TODO: Q2.b Implement the early stopping mechanism to load the weights from the#\n# best model so far and perform testing with this model. #\n#################################################################################\n# *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\nmodel.load_state_dict(best_model.state_dict())\n# *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n\n#Compute accuracy on the test set\nwith torch.no_grad():\n correct = 0\n total = 0\n for images, labels in test_loader:\n images = images.to(device)\n labels = labels.to(device)\n outputs = model(images)\n _, predicted = torch.max(outputs.data, 1)\n total += labels.size(0)\n correct += (predicted == labels).sum().item()\n if total == 1000:\n break\n\n print('Accuracy of the network on the {} test images: {} %'.format(total, 100 * correct / total))\n\n\n\n# Q1.c: Implementing the function to visualize the filters in the first conv layers.\n# Visualize the filters before training\nVisualizeFilter(model)\n\n\n\n# Save the model checkpoint\ntorch.save(model.state_dict(), 'model.ckpt')\n\n",
"step-ids": [
5,
6,
8,
9,
11
]
}
|
[
5,
6,
8,
9,
11
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
hacker_legends.append('Anonymous')
print(hacker_legends)
<|reserved_special_token_0|>
networking.insert(3, 'SSH')
print(networking)
<|reserved_special_token_0|>
ip_addy.remove(5102018)
print(ip_addy)
<|reserved_special_token_0|>
cyber_traits.pop(2)
print(cyber_traits)
<|reserved_special_token_0|>
sec_co.extend(new_co)
print(sec_co)
<|reserved_special_token_0|>
print(cyber_attacks[3])
<|reserved_special_token_0|>
print(dns_list.count(98.105))
<|reserved_special_token_0|>
mr_robot.reverse()
print(mr_robot)
<|reserved_special_token_0|>
ssh_list.sort()
print(ssh_list)
ssh_list.sort(reverse=True)
print(ssh_list)
<|reserved_special_token_0|>
print(max(network_list))
<|reserved_special_token_0|>
print(min(network_list))
<|reserved_special_token_0|>
print(sum(occurences))
<|reserved_special_token_1|>
hacker_legends = ['LulzSec', 'Gary McKinnon', 'Adrian Lamo',
'Jonathan James', 'Kevin Poulsen']
hacker_legends.append('Anonymous')
print(hacker_legends)
networking = ['packet', 'LAN', 'WAN', 'port', 'firewall', 'VPN']
networking.insert(3, 'SSH')
print(networking)
ip_addy = [255.224, 192.168, 1331904083.25, 5102018, 10.255, 172.31]
ip_addy.remove(5102018)
print(ip_addy)
cyber_traits = ['detailed oriented', 'methodically', 'lazy', 'persistent',
'curious', 'instinctive']
cyber_traits.pop(2)
print(cyber_traits)
sec_co = ['IBM', 'Raytheon', 'Mimecast', 'Cisco']
new_co = ['Checkp Point Software', 'Palo Alto Networks', 'Symantec',
'Trend Micro']
sec_co.extend(new_co)
print(sec_co)
cyber_attacks = ['Equifax Data Breach', 'Uber Data Breach', 'Yahoo!',
'WannaCry', 'Deep Root Analytics']
print(cyber_attacks[3])
dns_list = [98.105, 98.1115, 99.105, 98.111, 98.105, 98.106, 98.501]
print(dns_list.count(98.105))
mr_robot = ['bigger', 'something', 'represents', 'it', 'mistake', 'a',
'just', 'never', 'is', 'bug', 'a']
mr_robot.reverse()
print(mr_robot)
ssh_list = [1331903959.94555, 1331901011.84795, 1331903492.37203,
1331901032.03789, 1331903508.24007, 1331903476.8]
ssh_list.sort()
print(ssh_list)
ssh_list.sort(reverse=True)
print(ssh_list)
network_list = [39104, 38694, 38702, 38787, 39860]
print(max(network_list))
network_list = [39104, 38694, 38702, 38787, 39860]
print(min(network_list))
occurences = [3, 2.5, 9, 7, 21, 6, 8]
print(sum(occurences))
<|reserved_special_token_1|>
#List methods allow you to modify lists. The following are some list methods for you to practice with. Feel free to google resources to help you with this assignment.
#append(element) adds a single element to the list
#1. 'Anonymous' is also deserving to be in the hacker legends list. Add him in to the hacker legends list and print your results.
hacker_legends = ['LulzSec', 'Gary McKinnon', 'Adrian Lamo', 'Jonathan James', 'Kevin Poulsen']
hacker_legends.append('Anonymous')
print(hacker_legends)
#insert (index, element) adds a new element at any position in your list.
#2. You just created a networking study list and forgot to add in 'SSH'. Please add that into the 3rd position in the networking list and print your results.
networking = ['packet', 'LAN', 'WAN', 'port', 'firewall', 'VPN']
networking.insert(3, 'SSH')
print(networking)
#remove(element) removes a single element from the list
#3. The cyber security analyst entered the wrong IP address in the list below. Please remove the non-float integer from the ip addy list and print your results.
ip_addy = [255.224, 192.168, 1331904083.25, 5102018, 10.255, 172.31]
ip_addy.remove(5102018)
print(ip_addy)
#pop(index) removes the element at the given index position
#4. The cyber traits list below is a list of traits that fit a career in cyber security. Everything is accurate, except for 'lazy'. Please remove 'lazy' from the list and print your results.
cyber_traits = ['detailed oriented', 'methodically', 'lazy', 'persistent', 'curious', 'instinctive']
cyber_traits.pop(2)
print(cyber_traits)
#extend(list) adds elements from another list
#5. Combine the new co list with the sec co list and print your results.
sec_co = ['IBM', 'Raytheon', 'Mimecast', 'Cisco']
new_co= ['Checkp Point Software', 'Palo Alto Networks', 'Symantec', 'Trend Micro']
sec_co.extend(new_co)
print(sec_co)
#index(element) searches an element in the list and returns its index
#6. There were some headline grabbing cyber attacks in 2017. In the cyber attacks list below, find the index position of 'WannaCry' and print your result.
cyber_attacks = ['Equifax Data Breach', 'Uber Data Breach', 'Yahoo!','WannaCry', 'Deep Root Analytics']
print(cyber_attacks[3])
#count(element) counts how many times an element is in a list
#7. In the dns list below, find the number of ocurrence for 98.105 and print your results.
dns_list = [98.105, 98.1115, 99.105, 98.111, 98.105, 98.106, 98.501]
print(dns_list.count(98.105))
#reverse() reverses the elements of a given list
#8. Decipher Mr. Robot's quote using the reverse method and print his message.
mr_robot = ['bigger', 'something', 'represents', 'it', 'mistake', 'a', 'just', 'never', 'is', 'bug', 'a']
mr_robot.reverse()
print(mr_robot)
#sort () sorts elements of a given list in a specific order (ascending or descending)
#9 Sort the following list of SSH Ids in ascending order
ssh_list = [1331903959.94555, 1331901011.84795, 1331903492.37203, 1331901032.03789, 1331903508.24007, 1331903476.8]
ssh_list.sort()
print(ssh_list)
#print the list in descending order
ssh_list.sort(reverse=True)
print(ssh_list)
#max() returns the largest element in the list
#10 Find the largest integer in the network list below:
network_list = [39104, 38694, 38702, 38787, 39860]
print(max(network_list))
#min() returns the smallest element in the list
#11 Find the smallest integet in the network list below:
network_list = [39104, 38694, 38702, 38787, 39860]
print(min(network_list))
#sum() calculates the sum of the all the elements in the list
#12 Find the sum of the following occurence list below:
occurences = [3, 2.5, 9, 7, 21, 6, 8]
print(sum(occurences))
|
flexible
|
{
"blob_id": "53fd020946a2baddb1bb0463d2a56744de6e3822",
"index": 5506,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nhacker_legends.append('Anonymous')\nprint(hacker_legends)\n<mask token>\nnetworking.insert(3, 'SSH')\nprint(networking)\n<mask token>\nip_addy.remove(5102018)\nprint(ip_addy)\n<mask token>\ncyber_traits.pop(2)\nprint(cyber_traits)\n<mask token>\nsec_co.extend(new_co)\nprint(sec_co)\n<mask token>\nprint(cyber_attacks[3])\n<mask token>\nprint(dns_list.count(98.105))\n<mask token>\nmr_robot.reverse()\nprint(mr_robot)\n<mask token>\nssh_list.sort()\nprint(ssh_list)\nssh_list.sort(reverse=True)\nprint(ssh_list)\n<mask token>\nprint(max(network_list))\n<mask token>\nprint(min(network_list))\n<mask token>\nprint(sum(occurences))\n",
"step-3": "hacker_legends = ['LulzSec', 'Gary McKinnon', 'Adrian Lamo',\n 'Jonathan James', 'Kevin Poulsen']\nhacker_legends.append('Anonymous')\nprint(hacker_legends)\nnetworking = ['packet', 'LAN', 'WAN', 'port', 'firewall', 'VPN']\nnetworking.insert(3, 'SSH')\nprint(networking)\nip_addy = [255.224, 192.168, 1331904083.25, 5102018, 10.255, 172.31]\nip_addy.remove(5102018)\nprint(ip_addy)\ncyber_traits = ['detailed oriented', 'methodically', 'lazy', 'persistent',\n 'curious', 'instinctive']\ncyber_traits.pop(2)\nprint(cyber_traits)\nsec_co = ['IBM', 'Raytheon', 'Mimecast', 'Cisco']\nnew_co = ['Checkp Point Software', 'Palo Alto Networks', 'Symantec',\n 'Trend Micro']\nsec_co.extend(new_co)\nprint(sec_co)\ncyber_attacks = ['Equifax Data Breach', 'Uber Data Breach', 'Yahoo!',\n 'WannaCry', 'Deep Root Analytics']\nprint(cyber_attacks[3])\ndns_list = [98.105, 98.1115, 99.105, 98.111, 98.105, 98.106, 98.501]\nprint(dns_list.count(98.105))\nmr_robot = ['bigger', 'something', 'represents', 'it', 'mistake', 'a',\n 'just', 'never', 'is', 'bug', 'a']\nmr_robot.reverse()\nprint(mr_robot)\nssh_list = [1331903959.94555, 1331901011.84795, 1331903492.37203, \n 1331901032.03789, 1331903508.24007, 1331903476.8]\nssh_list.sort()\nprint(ssh_list)\nssh_list.sort(reverse=True)\nprint(ssh_list)\nnetwork_list = [39104, 38694, 38702, 38787, 39860]\nprint(max(network_list))\nnetwork_list = [39104, 38694, 38702, 38787, 39860]\nprint(min(network_list))\noccurences = [3, 2.5, 9, 7, 21, 6, 8]\nprint(sum(occurences))\n",
"step-4": "#List methods allow you to modify lists. The following are some list methods for you to practice with. Feel free to google resources to help you with this assignment.\n\n#append(element) adds a single element to the list\n#1. 'Anonymous' is also deserving to be in the hacker legends list. Add him in to the hacker legends list and print your results.\n\nhacker_legends = ['LulzSec', 'Gary McKinnon', 'Adrian Lamo', 'Jonathan James', 'Kevin Poulsen']\n\nhacker_legends.append('Anonymous')\nprint(hacker_legends)\n\n#insert (index, element) adds a new element at any position in your list.\n#2. You just created a networking study list and forgot to add in 'SSH'. Please add that into the 3rd position in the networking list and print your results.\n\nnetworking = ['packet', 'LAN', 'WAN', 'port', 'firewall', 'VPN']\n\nnetworking.insert(3, 'SSH')\nprint(networking)\n\n#remove(element) removes a single element from the list\n#3. The cyber security analyst entered the wrong IP address in the list below. Please remove the non-float integer from the ip addy list and print your results.\n\nip_addy = [255.224, 192.168, 1331904083.25, 5102018, 10.255, 172.31]\n\nip_addy.remove(5102018)\nprint(ip_addy)\n\n#pop(index) removes the element at the given index position\n#4. The cyber traits list below is a list of traits that fit a career in cyber security. Everything is accurate, except for 'lazy'. Please remove 'lazy' from the list and print your results.\n\ncyber_traits = ['detailed oriented', 'methodically', 'lazy', 'persistent', 'curious', 'instinctive']\n\ncyber_traits.pop(2)\nprint(cyber_traits)\n\n#extend(list) adds elements from another list \n#5. Combine the new co list with the sec co list and print your results.\n\nsec_co = ['IBM', 'Raytheon', 'Mimecast', 'Cisco']\nnew_co= ['Checkp Point Software', 'Palo Alto Networks', 'Symantec', 'Trend Micro']\n\nsec_co.extend(new_co)\nprint(sec_co)\n\n#index(element) searches an element in the list and returns its index\n#6. There were some headline grabbing cyber attacks in 2017. In the cyber attacks list below, find the index position of 'WannaCry' and print your result.\n\ncyber_attacks = ['Equifax Data Breach', 'Uber Data Breach', 'Yahoo!','WannaCry', 'Deep Root Analytics']\n\nprint(cyber_attacks[3])\n\n#count(element) counts how many times an element is in a list\n#7. In the dns list below, find the number of ocurrence for 98.105 and print your results.\n\ndns_list = [98.105, 98.1115, 99.105, 98.111, 98.105, 98.106, 98.501]\n\nprint(dns_list.count(98.105))\n\n#reverse() reverses the elements of a given list\n#8. Decipher Mr. Robot's quote using the reverse method and print his message.\n\nmr_robot = ['bigger', 'something', 'represents', 'it', 'mistake', 'a', 'just', 'never', 'is', 'bug', 'a']\n\nmr_robot.reverse()\nprint(mr_robot)\n\n#sort () sorts elements of a given list in a specific order (ascending or descending)\n#9 Sort the following list of SSH Ids in ascending order\n\nssh_list = [1331903959.94555, 1331901011.84795, 1331903492.37203, 1331901032.03789, 1331903508.24007, 1331903476.8]\n\nssh_list.sort()\nprint(ssh_list)\n\n#print the list in descending order\nssh_list.sort(reverse=True)\nprint(ssh_list)\n\n#max() returns the largest element in the list\n#10 Find the largest integer in the network list below:\n\nnetwork_list = [39104, 38694, 38702, 38787, 39860]\n\nprint(max(network_list))\n\n#min() returns the smallest element in the list\n#11 Find the smallest integet in the network list below:\n\nnetwork_list = [39104, 38694, 38702, 38787, 39860]\n\nprint(min(network_list))\n\n#sum() calculates the sum of the all the elements in the list\n#12 Find the sum of the following occurence list below:\n\noccurences = [3, 2.5, 9, 7, 21, 6, 8]\nprint(sum(occurences))\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
from distributions.zero_inflated_poisson import ZeroInflatedPoisson
from distributions.negative_binomial import NegativeBinomial
from distributions.zero_inflated_negative_binomial import ZeroInflatedNegativeBinomial
from distributions.zero_inflated import ZeroInflated
from distributions.categorized import Categorized
from distributions.pareto import Pareto
|
flexible
|
{
"blob_id": "dfae1007adc557a15d03b78f2bf790fb5b06141a",
"index": 4442,
"step-1": "<mask token>\n",
"step-2": "from distributions.zero_inflated_poisson import ZeroInflatedPoisson\nfrom distributions.negative_binomial import NegativeBinomial\nfrom distributions.zero_inflated_negative_binomial import ZeroInflatedNegativeBinomial\nfrom distributions.zero_inflated import ZeroInflated\nfrom distributions.categorized import Categorized\nfrom distributions.pareto import Pareto\n",
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0,
1
]
}
|
[
0,
1
] |
# Getting familiar with OOP and using Functions and Classes :)
class Dog():
species = 'mammal'
def __init__(self,breed,name):
self.breed = breed
self.name = name
def bark(self,number):
print(f'Woof! My name is {self.name} and the number is {number}')
my_dog = Dog('Corgi','RTZY')
print(type(my_dog))
print(my_dog.breed)
print(my_dog.name)
my_dog.bark(10)
class Circle():
pi = 3.14
def __init__(self,radius = 1):
self.radius = radius
self.area = radius * radius * Circle.pi
def get_circumference(self):
return (self.radius * Circle.pi) * 2
my_circle = Circle(30)
print(my_circle.area)
test = my_circle.get_circumference()
print(test)
class Animal():
def __init__(self):
print('Animal Created')
def who_am_i(self):
print('I am an animal')
def eat(self):
print('I am eating')
print('\n')
class Dog(Animal):
def __init__(self):
Animal.__init__(self)
print('Dog Created')
def bark(self):
print('Woof! Woof!')
mydog = Dog()
print(mydog.bark())
|
normal
|
{
"blob_id": "c8137aacfb0f35c9630515442d5bdda870e9908a",
"index": 4827,
"step-1": "<mask token>\n\n\nclass Circle:\n <mask token>\n\n def __init__(self, radius=1):\n self.radius = radius\n self.area = radius * radius * Circle.pi\n\n def get_circumference(self):\n return self.radius * Circle.pi * 2\n\n\n<mask token>\n\n\nclass Animal:\n\n def __init__(self):\n print('Animal Created')\n\n def who_am_i(self):\n print('I am an animal')\n\n def eat(self):\n print('I am eating')\n\n\n<mask token>\n\n\nclass Dog(Animal):\n\n def __init__(self):\n Animal.__init__(self)\n print('Dog Created')\n\n def bark(self):\n print('Woof! Woof!')\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Circle:\n pi = 3.14\n\n def __init__(self, radius=1):\n self.radius = radius\n self.area = radius * radius * Circle.pi\n\n def get_circumference(self):\n return self.radius * Circle.pi * 2\n\n\n<mask token>\n\n\nclass Animal:\n\n def __init__(self):\n print('Animal Created')\n\n def who_am_i(self):\n print('I am an animal')\n\n def eat(self):\n print('I am eating')\n\n\n<mask token>\n\n\nclass Dog(Animal):\n\n def __init__(self):\n Animal.__init__(self)\n print('Dog Created')\n\n def bark(self):\n print('Woof! Woof!')\n\n\n<mask token>\n",
"step-3": "class Dog:\n <mask token>\n\n def __init__(self, breed, name):\n self.breed = breed\n self.name = name\n <mask token>\n\n\n<mask token>\n\n\nclass Circle:\n pi = 3.14\n\n def __init__(self, radius=1):\n self.radius = radius\n self.area = radius * radius * Circle.pi\n\n def get_circumference(self):\n return self.radius * Circle.pi * 2\n\n\n<mask token>\n\n\nclass Animal:\n\n def __init__(self):\n print('Animal Created')\n\n def who_am_i(self):\n print('I am an animal')\n\n def eat(self):\n print('I am eating')\n\n\n<mask token>\n\n\nclass Dog(Animal):\n\n def __init__(self):\n Animal.__init__(self)\n print('Dog Created')\n\n def bark(self):\n print('Woof! Woof!')\n\n\n<mask token>\n",
"step-4": "class Dog:\n species = 'mammal'\n\n def __init__(self, breed, name):\n self.breed = breed\n self.name = name\n\n def bark(self, number):\n print(f'Woof! My name is {self.name} and the number is {number}')\n\n\n<mask token>\nprint(type(my_dog))\nprint(my_dog.breed)\nprint(my_dog.name)\nmy_dog.bark(10)\n\n\nclass Circle:\n pi = 3.14\n\n def __init__(self, radius=1):\n self.radius = radius\n self.area = radius * radius * Circle.pi\n\n def get_circumference(self):\n return self.radius * Circle.pi * 2\n\n\n<mask token>\nprint(my_circle.area)\n<mask token>\nprint(test)\n\n\nclass Animal:\n\n def __init__(self):\n print('Animal Created')\n\n def who_am_i(self):\n print('I am an animal')\n\n def eat(self):\n print('I am eating')\n\n\nprint('\\n')\n\n\nclass Dog(Animal):\n\n def __init__(self):\n Animal.__init__(self)\n print('Dog Created')\n\n def bark(self):\n print('Woof! Woof!')\n\n\n<mask token>\nprint(mydog.bark())\n",
"step-5": "# Getting familiar with OOP and using Functions and Classes :)\nclass Dog():\n \n species = 'mammal'\n\n def __init__(self,breed,name):\n\n self.breed = breed\n self.name = name\n \n def bark(self,number):\n print(f'Woof! My name is {self.name} and the number is {number}')\n\nmy_dog = Dog('Corgi','RTZY')\nprint(type(my_dog))\nprint(my_dog.breed)\nprint(my_dog.name)\nmy_dog.bark(10)\n\nclass Circle():\n \n pi = 3.14\n\n def __init__(self,radius = 1):\n self.radius = radius\n self.area = radius * radius * Circle.pi\n \n def get_circumference(self):\n return (self.radius * Circle.pi) * 2\n\nmy_circle = Circle(30)\nprint(my_circle.area)\ntest = my_circle.get_circumference()\nprint(test)\n\nclass Animal():\n\n def __init__(self):\n print('Animal Created')\n \n def who_am_i(self):\n print('I am an animal')\n \n def eat(self):\n print('I am eating')\n\nprint('\\n')\nclass Dog(Animal):\n \n def __init__(self):\n Animal.__init__(self)\n print('Dog Created')\n def bark(self):\n print('Woof! Woof!')\n\nmydog = Dog()\nprint(mydog.bark())",
"step-ids": [
10,
11,
13,
16,
18
]
}
|
[
10,
11,
13,
16,
18
] |
<|reserved_special_token_0|>
def old_bracket(taxable_income, joint=True):
rate = [0.1, 0.15, 0.25, 0.28, 0.33, 0.35, 0.396]
if not joint:
bracket = [0, 9325, 37950, 91900, 191650, 416700, 418400]
else:
bracket = [0, 18650, 75900, 153100, 233350, 416700, 470700]
return tax_calculator(taxable_income, bracket, rate)
<|reserved_special_token_0|>
def MTG_IR_deduction_old(UPB, rate):
return min(1000000.0, UPB) * rate
<|reserved_special_token_0|>
def SALT_deduction_new(taxable_income, efficient_state_rate, local_tax):
return min(10000.0, taxable_income * efficient_state_rate + local_tax)
<|reserved_special_token_0|>
def PersonalExemption_deduction_new():
return 0
<|reserved_special_token_0|>
def ChildCare_Credit_new(taxable_income, child, joint=True):
if joint:
phaseout = round(max(taxable_income - 230000, 0) / 20 + 1e-07)
return int(max(0, 1600 * child - phaseout))
else:
phaseout = round(max(taxable_income - 115000, 0) / 20 + 1e-07)
return int(max(0, 1600 * child - phaseout))
def AMT_exemption(taxable_income, joint=True):
if joint:
return max(0, 84500 - max(taxable_income - 160900, 0) / 4)
else:
return max(0, 54300 - max(taxable_income - 120700, 0) / 4)
def tax_comparison(taxable_income, member, child, UPB, rate,
efficient_state_rate, local_tax, joint=True, existing_mtg=False,
display=True, detail=False):
old_PersonalExemption_deduction = PersonalExemption_deduction_old(
taxable_income, member, joint=joint)
old_ChildCare_Credit = ChildCare_Credit_old(taxable_income, child,
joint=joint)
new_ChildCare_Credit = ChildCare_Credit_new(taxable_income, child,
joint=joint)
old_MTG_IR_deduction = MTG_IR_deduction_old(UPB, rate)
new_MTG_IR_deduction = MTG_IR_deduction_new(UPB, rate, existing_mtg=
existing_mtg)
old_SALT_deduction = SALT_deduction_old(taxable_income,
efficient_state_rate, local_tax)
new_SALT_deduction = SALT_deduction_new(taxable_income,
efficient_state_rate, local_tax)
if joint:
old_standard_deduction = 12600
new_standard_deduction = 24000
else:
old_standard_deduction = 6300
new_standard_deduction = 12000
old_tax_beforeCCTC_standard = old_bracket(taxable_income -
old_standard_deduction - old_PersonalExemption_deduction, joint=joint)
new_tax_beforeCCTC_standard = new_bracket(taxable_income -
new_standard_deduction, joint=joint)
old_tax_standard = max(0, old_tax_beforeCCTC_standard -
old_ChildCare_Credit)
new_tax_standard = max(0, new_tax_beforeCCTC_standard -
new_ChildCare_Credit)
old_tax_beforeCCTC_itemized = old_bracket(taxable_income -
old_MTG_IR_deduction - old_SALT_deduction -
old_PersonalExemption_deduction, joint=joint)
new_tax_beforeCCTC_itemized = new_bracket(taxable_income -
new_MTG_IR_deduction - new_SALT_deduction, joint=joint)
old_tax_itemized = max(0, old_tax_beforeCCTC_itemized -
old_ChildCare_Credit)
new_tax_itemized = max(0, new_tax_beforeCCTC_itemized -
new_ChildCare_Credit)
AMT_exemption_amount = AMT_exemption(taxable_income, joint=joint)
old_tax_beforeCCTC_AMT = AMT_bracket(taxable_income -
AMT_exemption_amount - old_MTG_IR_deduction, joint=joint)
old_tax_AMT = max(0, old_tax_beforeCCTC_AMT - old_ChildCare_Credit)
tax_old = max(min(old_tax_standard, old_tax_itemized), old_tax_AMT)
tax_new = min(new_tax_standard, new_tax_itemized)
if display:
print('Current Tax Should Pay: $%3.2f' % tax_old)
print(' Standard: $%3.2f' % old_tax_standard)
print(' Itemized: $%3.2f' % old_tax_itemized)
print(' AMT tax: $%3.2f' % old_tax_AMT)
print('New Tax Should Pay: $%3.2f' % tax_new)
print(' Standard: $%3.2f' % new_tax_standard)
print(' Itemized: $%3.2f' % new_tax_itemized)
if detail:
print('***********************************************')
print('${:,} taxable income'.format(taxable_income) +
', joint = %r' % joint)
print('%d Family Member, %d child(ren)' % (member, child))
print('Existing Mortgage: %r' % existing_mtg +
', ${:,} Mortgage Balance'.format(UPB) +
', %3.2f%% Interest Rate' % (rate * 100))
print('${:,} Local Tax'.format(local_tax) +
', %d%% State/City Tax Rate' % (efficient_state_rate * 100))
print('***********************************************')
table = BeautifulTable()
table.column_headers = ['Item', 'Current', 'New']
table.append_row(['Standard Deduction', old_standard_deduction,
new_standard_deduction])
table.append_row(['Personal Exemption',
old_PersonalExemption_deduction, 'NA'])
table.append_row(['Child Care Tax Credit', old_ChildCare_Credit,
new_ChildCare_Credit])
table.append_row(['Mortgage Interest Deduction',
old_MTG_IR_deduction, new_MTG_IR_deduction])
table.append_row(['State and Local Tax Deduction',
old_SALT_deduction, new_SALT_deduction])
table.append_row(['AMT Exemption (not including MTG Interest)',
AMT_exemption_amount, 'NA'])
table.append_row(['Tax', tax_old, tax_new])
print(table)
return [tax_old, tax_new, old_tax_standard, new_tax_standard,
old_tax_itemized, new_tax_itemized, old_tax_AMT]
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def old_bracket(taxable_income, joint=True):
rate = [0.1, 0.15, 0.25, 0.28, 0.33, 0.35, 0.396]
if not joint:
bracket = [0, 9325, 37950, 91900, 191650, 416700, 418400]
else:
bracket = [0, 18650, 75900, 153100, 233350, 416700, 470700]
return tax_calculator(taxable_income, bracket, rate)
def new_bracket(taxable_income, joint=True):
rate = [0.12, 0.25, 0.35, 0.396]
if not joint:
bracket = [0, 45000, 200000, 500000]
else:
bracket = [0, 90000, 260000, 1000000]
return tax_calculator(taxable_income, bracket, rate)
<|reserved_special_token_0|>
def MTG_IR_deduction_old(UPB, rate):
return min(1000000.0, UPB) * rate
def MTG_IR_deduction_new(UPB, rate, existing_mtg=False):
if existing_mtg:
return min(1000000.0, UPB) * rate
else:
return min(750000.0, UPB) * rate
<|reserved_special_token_0|>
def SALT_deduction_new(taxable_income, efficient_state_rate, local_tax):
return min(10000.0, taxable_income * efficient_state_rate + local_tax)
<|reserved_special_token_0|>
def PersonalExemption_deduction_new():
return 0
def ChildCare_Credit_old(taxable_income, child, joint=True):
if joint:
phaseout = round(max(taxable_income - 110000, 0) / 20 + 1e-07)
return int(max(0, 1000 * child - phaseout))
else:
phaseout = round(max(taxable_income - 55000, 0) / 20 + 1e-07)
return int(max(0, 1000 * child - phaseout))
def ChildCare_Credit_new(taxable_income, child, joint=True):
if joint:
phaseout = round(max(taxable_income - 230000, 0) / 20 + 1e-07)
return int(max(0, 1600 * child - phaseout))
else:
phaseout = round(max(taxable_income - 115000, 0) / 20 + 1e-07)
return int(max(0, 1600 * child - phaseout))
def AMT_exemption(taxable_income, joint=True):
if joint:
return max(0, 84500 - max(taxable_income - 160900, 0) / 4)
else:
return max(0, 54300 - max(taxable_income - 120700, 0) / 4)
def tax_comparison(taxable_income, member, child, UPB, rate,
efficient_state_rate, local_tax, joint=True, existing_mtg=False,
display=True, detail=False):
old_PersonalExemption_deduction = PersonalExemption_deduction_old(
taxable_income, member, joint=joint)
old_ChildCare_Credit = ChildCare_Credit_old(taxable_income, child,
joint=joint)
new_ChildCare_Credit = ChildCare_Credit_new(taxable_income, child,
joint=joint)
old_MTG_IR_deduction = MTG_IR_deduction_old(UPB, rate)
new_MTG_IR_deduction = MTG_IR_deduction_new(UPB, rate, existing_mtg=
existing_mtg)
old_SALT_deduction = SALT_deduction_old(taxable_income,
efficient_state_rate, local_tax)
new_SALT_deduction = SALT_deduction_new(taxable_income,
efficient_state_rate, local_tax)
if joint:
old_standard_deduction = 12600
new_standard_deduction = 24000
else:
old_standard_deduction = 6300
new_standard_deduction = 12000
old_tax_beforeCCTC_standard = old_bracket(taxable_income -
old_standard_deduction - old_PersonalExemption_deduction, joint=joint)
new_tax_beforeCCTC_standard = new_bracket(taxable_income -
new_standard_deduction, joint=joint)
old_tax_standard = max(0, old_tax_beforeCCTC_standard -
old_ChildCare_Credit)
new_tax_standard = max(0, new_tax_beforeCCTC_standard -
new_ChildCare_Credit)
old_tax_beforeCCTC_itemized = old_bracket(taxable_income -
old_MTG_IR_deduction - old_SALT_deduction -
old_PersonalExemption_deduction, joint=joint)
new_tax_beforeCCTC_itemized = new_bracket(taxable_income -
new_MTG_IR_deduction - new_SALT_deduction, joint=joint)
old_tax_itemized = max(0, old_tax_beforeCCTC_itemized -
old_ChildCare_Credit)
new_tax_itemized = max(0, new_tax_beforeCCTC_itemized -
new_ChildCare_Credit)
AMT_exemption_amount = AMT_exemption(taxable_income, joint=joint)
old_tax_beforeCCTC_AMT = AMT_bracket(taxable_income -
AMT_exemption_amount - old_MTG_IR_deduction, joint=joint)
old_tax_AMT = max(0, old_tax_beforeCCTC_AMT - old_ChildCare_Credit)
tax_old = max(min(old_tax_standard, old_tax_itemized), old_tax_AMT)
tax_new = min(new_tax_standard, new_tax_itemized)
if display:
print('Current Tax Should Pay: $%3.2f' % tax_old)
print(' Standard: $%3.2f' % old_tax_standard)
print(' Itemized: $%3.2f' % old_tax_itemized)
print(' AMT tax: $%3.2f' % old_tax_AMT)
print('New Tax Should Pay: $%3.2f' % tax_new)
print(' Standard: $%3.2f' % new_tax_standard)
print(' Itemized: $%3.2f' % new_tax_itemized)
if detail:
print('***********************************************')
print('${:,} taxable income'.format(taxable_income) +
', joint = %r' % joint)
print('%d Family Member, %d child(ren)' % (member, child))
print('Existing Mortgage: %r' % existing_mtg +
', ${:,} Mortgage Balance'.format(UPB) +
', %3.2f%% Interest Rate' % (rate * 100))
print('${:,} Local Tax'.format(local_tax) +
', %d%% State/City Tax Rate' % (efficient_state_rate * 100))
print('***********************************************')
table = BeautifulTable()
table.column_headers = ['Item', 'Current', 'New']
table.append_row(['Standard Deduction', old_standard_deduction,
new_standard_deduction])
table.append_row(['Personal Exemption',
old_PersonalExemption_deduction, 'NA'])
table.append_row(['Child Care Tax Credit', old_ChildCare_Credit,
new_ChildCare_Credit])
table.append_row(['Mortgage Interest Deduction',
old_MTG_IR_deduction, new_MTG_IR_deduction])
table.append_row(['State and Local Tax Deduction',
old_SALT_deduction, new_SALT_deduction])
table.append_row(['AMT Exemption (not including MTG Interest)',
AMT_exemption_amount, 'NA'])
table.append_row(['Tax', tax_old, tax_new])
print(table)
return [tax_old, tax_new, old_tax_standard, new_tax_standard,
old_tax_itemized, new_tax_itemized, old_tax_AMT]
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def old_bracket(taxable_income, joint=True):
rate = [0.1, 0.15, 0.25, 0.28, 0.33, 0.35, 0.396]
if not joint:
bracket = [0, 9325, 37950, 91900, 191650, 416700, 418400]
else:
bracket = [0, 18650, 75900, 153100, 233350, 416700, 470700]
return tax_calculator(taxable_income, bracket, rate)
def new_bracket(taxable_income, joint=True):
rate = [0.12, 0.25, 0.35, 0.396]
if not joint:
bracket = [0, 45000, 200000, 500000]
else:
bracket = [0, 90000, 260000, 1000000]
return tax_calculator(taxable_income, bracket, rate)
def AMT_bracket(taxable_income, joint=True):
rate = [0.26, 0.28]
if not joint:
bracket = [0, 93900]
else:
bracket = [0, 187800]
return tax_calculator(taxable_income, bracket, rate)
<|reserved_special_token_0|>
def MTG_IR_deduction_old(UPB, rate):
return min(1000000.0, UPB) * rate
def MTG_IR_deduction_new(UPB, rate, existing_mtg=False):
if existing_mtg:
return min(1000000.0, UPB) * rate
else:
return min(750000.0, UPB) * rate
def SALT_deduction_old(taxable_income, efficient_state_rate, local_tax):
return taxable_income * efficient_state_rate + local_tax
def SALT_deduction_new(taxable_income, efficient_state_rate, local_tax):
return min(10000.0, taxable_income * efficient_state_rate + local_tax)
def PersonalExemption_deduction_old(taxable_income, member, joint=True):
if joint:
phaseout = min(0.02 * round(max(taxable_income - 311300, 0) / 2500 +
1e-07), 1)
return int(4050 * member * (1 - phaseout))
else:
phaseout = min(0.02 * round(max(taxable_income - 259000, 0) / 2500 +
1e-07), 1)
return int(4050 * member * (1 - phaseout))
def PersonalExemption_deduction_new():
return 0
def ChildCare_Credit_old(taxable_income, child, joint=True):
if joint:
phaseout = round(max(taxable_income - 110000, 0) / 20 + 1e-07)
return int(max(0, 1000 * child - phaseout))
else:
phaseout = round(max(taxable_income - 55000, 0) / 20 + 1e-07)
return int(max(0, 1000 * child - phaseout))
def ChildCare_Credit_new(taxable_income, child, joint=True):
if joint:
phaseout = round(max(taxable_income - 230000, 0) / 20 + 1e-07)
return int(max(0, 1600 * child - phaseout))
else:
phaseout = round(max(taxable_income - 115000, 0) / 20 + 1e-07)
return int(max(0, 1600 * child - phaseout))
def AMT_exemption(taxable_income, joint=True):
if joint:
return max(0, 84500 - max(taxable_income - 160900, 0) / 4)
else:
return max(0, 54300 - max(taxable_income - 120700, 0) / 4)
def tax_comparison(taxable_income, member, child, UPB, rate,
efficient_state_rate, local_tax, joint=True, existing_mtg=False,
display=True, detail=False):
old_PersonalExemption_deduction = PersonalExemption_deduction_old(
taxable_income, member, joint=joint)
old_ChildCare_Credit = ChildCare_Credit_old(taxable_income, child,
joint=joint)
new_ChildCare_Credit = ChildCare_Credit_new(taxable_income, child,
joint=joint)
old_MTG_IR_deduction = MTG_IR_deduction_old(UPB, rate)
new_MTG_IR_deduction = MTG_IR_deduction_new(UPB, rate, existing_mtg=
existing_mtg)
old_SALT_deduction = SALT_deduction_old(taxable_income,
efficient_state_rate, local_tax)
new_SALT_deduction = SALT_deduction_new(taxable_income,
efficient_state_rate, local_tax)
if joint:
old_standard_deduction = 12600
new_standard_deduction = 24000
else:
old_standard_deduction = 6300
new_standard_deduction = 12000
old_tax_beforeCCTC_standard = old_bracket(taxable_income -
old_standard_deduction - old_PersonalExemption_deduction, joint=joint)
new_tax_beforeCCTC_standard = new_bracket(taxable_income -
new_standard_deduction, joint=joint)
old_tax_standard = max(0, old_tax_beforeCCTC_standard -
old_ChildCare_Credit)
new_tax_standard = max(0, new_tax_beforeCCTC_standard -
new_ChildCare_Credit)
old_tax_beforeCCTC_itemized = old_bracket(taxable_income -
old_MTG_IR_deduction - old_SALT_deduction -
old_PersonalExemption_deduction, joint=joint)
new_tax_beforeCCTC_itemized = new_bracket(taxable_income -
new_MTG_IR_deduction - new_SALT_deduction, joint=joint)
old_tax_itemized = max(0, old_tax_beforeCCTC_itemized -
old_ChildCare_Credit)
new_tax_itemized = max(0, new_tax_beforeCCTC_itemized -
new_ChildCare_Credit)
AMT_exemption_amount = AMT_exemption(taxable_income, joint=joint)
old_tax_beforeCCTC_AMT = AMT_bracket(taxable_income -
AMT_exemption_amount - old_MTG_IR_deduction, joint=joint)
old_tax_AMT = max(0, old_tax_beforeCCTC_AMT - old_ChildCare_Credit)
tax_old = max(min(old_tax_standard, old_tax_itemized), old_tax_AMT)
tax_new = min(new_tax_standard, new_tax_itemized)
if display:
print('Current Tax Should Pay: $%3.2f' % tax_old)
print(' Standard: $%3.2f' % old_tax_standard)
print(' Itemized: $%3.2f' % old_tax_itemized)
print(' AMT tax: $%3.2f' % old_tax_AMT)
print('New Tax Should Pay: $%3.2f' % tax_new)
print(' Standard: $%3.2f' % new_tax_standard)
print(' Itemized: $%3.2f' % new_tax_itemized)
if detail:
print('***********************************************')
print('${:,} taxable income'.format(taxable_income) +
', joint = %r' % joint)
print('%d Family Member, %d child(ren)' % (member, child))
print('Existing Mortgage: %r' % existing_mtg +
', ${:,} Mortgage Balance'.format(UPB) +
', %3.2f%% Interest Rate' % (rate * 100))
print('${:,} Local Tax'.format(local_tax) +
', %d%% State/City Tax Rate' % (efficient_state_rate * 100))
print('***********************************************')
table = BeautifulTable()
table.column_headers = ['Item', 'Current', 'New']
table.append_row(['Standard Deduction', old_standard_deduction,
new_standard_deduction])
table.append_row(['Personal Exemption',
old_PersonalExemption_deduction, 'NA'])
table.append_row(['Child Care Tax Credit', old_ChildCare_Credit,
new_ChildCare_Credit])
table.append_row(['Mortgage Interest Deduction',
old_MTG_IR_deduction, new_MTG_IR_deduction])
table.append_row(['State and Local Tax Deduction',
old_SALT_deduction, new_SALT_deduction])
table.append_row(['AMT Exemption (not including MTG Interest)',
AMT_exemption_amount, 'NA'])
table.append_row(['Tax', tax_old, tax_new])
print(table)
return [tax_old, tax_new, old_tax_standard, new_tax_standard,
old_tax_itemized, new_tax_itemized, old_tax_AMT]
<|reserved_special_token_1|>
<|reserved_special_token_0|>
from beautifultable import BeautifulTable
def tax_calculator(taxable_income, bracket, rate):
bracket2 = bracket[1:]
bracket2.append(float('Inf'))
bracket3 = [(y - x) for x, y in zip(bracket, bracket2)]
income_seg = [min(max(0, taxable_income - x), y) for x, y in zip(
bracket, bracket3)]
return sum([(x * y) for x, y in zip(income_seg, rate)])
def old_bracket(taxable_income, joint=True):
rate = [0.1, 0.15, 0.25, 0.28, 0.33, 0.35, 0.396]
if not joint:
bracket = [0, 9325, 37950, 91900, 191650, 416700, 418400]
else:
bracket = [0, 18650, 75900, 153100, 233350, 416700, 470700]
return tax_calculator(taxable_income, bracket, rate)
def new_bracket(taxable_income, joint=True):
rate = [0.12, 0.25, 0.35, 0.396]
if not joint:
bracket = [0, 45000, 200000, 500000]
else:
bracket = [0, 90000, 260000, 1000000]
return tax_calculator(taxable_income, bracket, rate)
def AMT_bracket(taxable_income, joint=True):
rate = [0.26, 0.28]
if not joint:
bracket = [0, 93900]
else:
bracket = [0, 187800]
return tax_calculator(taxable_income, bracket, rate)
<|reserved_special_token_0|>
def MTG_IR_deduction_old(UPB, rate):
return min(1000000.0, UPB) * rate
def MTG_IR_deduction_new(UPB, rate, existing_mtg=False):
if existing_mtg:
return min(1000000.0, UPB) * rate
else:
return min(750000.0, UPB) * rate
def SALT_deduction_old(taxable_income, efficient_state_rate, local_tax):
return taxable_income * efficient_state_rate + local_tax
def SALT_deduction_new(taxable_income, efficient_state_rate, local_tax):
return min(10000.0, taxable_income * efficient_state_rate + local_tax)
def PersonalExemption_deduction_old(taxable_income, member, joint=True):
if joint:
phaseout = min(0.02 * round(max(taxable_income - 311300, 0) / 2500 +
1e-07), 1)
return int(4050 * member * (1 - phaseout))
else:
phaseout = min(0.02 * round(max(taxable_income - 259000, 0) / 2500 +
1e-07), 1)
return int(4050 * member * (1 - phaseout))
def PersonalExemption_deduction_new():
return 0
def ChildCare_Credit_old(taxable_income, child, joint=True):
if joint:
phaseout = round(max(taxable_income - 110000, 0) / 20 + 1e-07)
return int(max(0, 1000 * child - phaseout))
else:
phaseout = round(max(taxable_income - 55000, 0) / 20 + 1e-07)
return int(max(0, 1000 * child - phaseout))
def ChildCare_Credit_new(taxable_income, child, joint=True):
if joint:
phaseout = round(max(taxable_income - 230000, 0) / 20 + 1e-07)
return int(max(0, 1600 * child - phaseout))
else:
phaseout = round(max(taxable_income - 115000, 0) / 20 + 1e-07)
return int(max(0, 1600 * child - phaseout))
def AMT_exemption(taxable_income, joint=True):
if joint:
return max(0, 84500 - max(taxable_income - 160900, 0) / 4)
else:
return max(0, 54300 - max(taxable_income - 120700, 0) / 4)
def tax_comparison(taxable_income, member, child, UPB, rate,
efficient_state_rate, local_tax, joint=True, existing_mtg=False,
display=True, detail=False):
old_PersonalExemption_deduction = PersonalExemption_deduction_old(
taxable_income, member, joint=joint)
old_ChildCare_Credit = ChildCare_Credit_old(taxable_income, child,
joint=joint)
new_ChildCare_Credit = ChildCare_Credit_new(taxable_income, child,
joint=joint)
old_MTG_IR_deduction = MTG_IR_deduction_old(UPB, rate)
new_MTG_IR_deduction = MTG_IR_deduction_new(UPB, rate, existing_mtg=
existing_mtg)
old_SALT_deduction = SALT_deduction_old(taxable_income,
efficient_state_rate, local_tax)
new_SALT_deduction = SALT_deduction_new(taxable_income,
efficient_state_rate, local_tax)
if joint:
old_standard_deduction = 12600
new_standard_deduction = 24000
else:
old_standard_deduction = 6300
new_standard_deduction = 12000
old_tax_beforeCCTC_standard = old_bracket(taxable_income -
old_standard_deduction - old_PersonalExemption_deduction, joint=joint)
new_tax_beforeCCTC_standard = new_bracket(taxable_income -
new_standard_deduction, joint=joint)
old_tax_standard = max(0, old_tax_beforeCCTC_standard -
old_ChildCare_Credit)
new_tax_standard = max(0, new_tax_beforeCCTC_standard -
new_ChildCare_Credit)
old_tax_beforeCCTC_itemized = old_bracket(taxable_income -
old_MTG_IR_deduction - old_SALT_deduction -
old_PersonalExemption_deduction, joint=joint)
new_tax_beforeCCTC_itemized = new_bracket(taxable_income -
new_MTG_IR_deduction - new_SALT_deduction, joint=joint)
old_tax_itemized = max(0, old_tax_beforeCCTC_itemized -
old_ChildCare_Credit)
new_tax_itemized = max(0, new_tax_beforeCCTC_itemized -
new_ChildCare_Credit)
AMT_exemption_amount = AMT_exemption(taxable_income, joint=joint)
old_tax_beforeCCTC_AMT = AMT_bracket(taxable_income -
AMT_exemption_amount - old_MTG_IR_deduction, joint=joint)
old_tax_AMT = max(0, old_tax_beforeCCTC_AMT - old_ChildCare_Credit)
tax_old = max(min(old_tax_standard, old_tax_itemized), old_tax_AMT)
tax_new = min(new_tax_standard, new_tax_itemized)
if display:
print('Current Tax Should Pay: $%3.2f' % tax_old)
print(' Standard: $%3.2f' % old_tax_standard)
print(' Itemized: $%3.2f' % old_tax_itemized)
print(' AMT tax: $%3.2f' % old_tax_AMT)
print('New Tax Should Pay: $%3.2f' % tax_new)
print(' Standard: $%3.2f' % new_tax_standard)
print(' Itemized: $%3.2f' % new_tax_itemized)
if detail:
print('***********************************************')
print('${:,} taxable income'.format(taxable_income) +
', joint = %r' % joint)
print('%d Family Member, %d child(ren)' % (member, child))
print('Existing Mortgage: %r' % existing_mtg +
', ${:,} Mortgage Balance'.format(UPB) +
', %3.2f%% Interest Rate' % (rate * 100))
print('${:,} Local Tax'.format(local_tax) +
', %d%% State/City Tax Rate' % (efficient_state_rate * 100))
print('***********************************************')
table = BeautifulTable()
table.column_headers = ['Item', 'Current', 'New']
table.append_row(['Standard Deduction', old_standard_deduction,
new_standard_deduction])
table.append_row(['Personal Exemption',
old_PersonalExemption_deduction, 'NA'])
table.append_row(['Child Care Tax Credit', old_ChildCare_Credit,
new_ChildCare_Credit])
table.append_row(['Mortgage Interest Deduction',
old_MTG_IR_deduction, new_MTG_IR_deduction])
table.append_row(['State and Local Tax Deduction',
old_SALT_deduction, new_SALT_deduction])
table.append_row(['AMT Exemption (not including MTG Interest)',
AMT_exemption_amount, 'NA'])
table.append_row(['Tax', tax_old, tax_new])
print(table)
return [tax_old, tax_new, old_tax_standard, new_tax_standard,
old_tax_itemized, new_tax_itemized, old_tax_AMT]
<|reserved_special_token_1|>
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Nov 5 11:56:41 2017
@author: cgao
"""
from beautifultable import BeautifulTable
#1. 新旧税率Bracket
def tax_calculator(taxable_income, bracket, rate):
bracket2 = bracket[1:]
bracket2.append(float('Inf'))
bracket3 = [y-x for x,y in zip(bracket, bracket2)]
income_seg = [min(max(0, taxable_income - x), y) for x, y in zip(bracket, bracket3)]
return sum([x*y for x, y in zip(income_seg, rate)])
def old_bracket(taxable_income, joint = True):
rate= [0.1, 0.15, 0.25, 0.28, 0.33, 0.35, 0.396]
if not joint:
bracket = [0, 9325, 37950, 91900, 191650, 416700, 418400]
else:
bracket = [0, 18650, 75900, 153100, 233350, 416700, 470700]
return tax_calculator(taxable_income, bracket, rate)
def new_bracket(taxable_income, joint = True):
rate= [0.12, 0.25, 0.35, 0.396]
if not joint:
bracket = [0, 45000, 200000, 500000]
else:
bracket = [0, 90000, 260000, 1000000]
return tax_calculator(taxable_income, bracket, rate)
def AMT_bracket(taxable_income, joint = True):
rate= [0.26, 0.28]
if not joint:
bracket = [0, 93900]
else:
bracket = [0, 187800]
return tax_calculator(taxable_income, bracket, rate)
#2. 增加标准扣除(Standard Deduction)额度
'''
if joint:
old_standard_deduction = 12600
new_standard_deduction = 24000
else:
old_standard_deduction = 6300
new_standard_deduction = 12000
'''
#3. 减少利息扣除
def MTG_IR_deduction_old(UPB, rate):
return min(1000000.0, UPB)*rate
# existing_mtg = True: existing loan. Grand fathered 1.0 Million limit
def MTG_IR_deduction_new(UPB, rate, existing_mtg = False):
if existing_mtg:
return min(1000000.0, UPB)*rate
else:
return min(750000.0, UPB)*rate
#4. 减少州与地方税收(房产税等)扣除
def SALT_deduction_old(taxable_income, efficient_state_rate, local_tax):
return taxable_income*efficient_state_rate + local_tax
def SALT_deduction_new(taxable_income, efficient_state_rate, local_tax):
return min(10000.0, taxable_income*efficient_state_rate + local_tax)
#5. 取消Personal Exemption
def PersonalExemption_deduction_old(taxable_income, member, joint = True):
if joint:
phaseout = min(0.02*round((max(taxable_income - 311300, 0)/2500 + 1e-7)), 1)
return int(4050*member*(1 - phaseout))
else:
phaseout = min(0.02*round(max(taxable_income - 259000, 0)/2500 + 1e-7), 1)
return int(4050*member*(1 - phaseout))
def PersonalExemption_deduction_new():
return 0
#6. Child Care Tax Credit
def ChildCare_Credit_old(taxable_income, child, joint = True):
if joint:
phaseout = round(max(taxable_income - 110000, 0)/20 + 1e-7)
return int(max(0,1000*child - phaseout))
else:
phaseout = round(max(taxable_income - 55000, 0)/20 + 1e-7)
return int(max(0,1000*child - phaseout))
def ChildCare_Credit_new(taxable_income, child, joint = True):
if joint:
phaseout = round(max(taxable_income - 230000, 0)/20 + 1e-7)
return int(max(0,1600*child - phaseout))
else:
phaseout = round(max(taxable_income - 115000, 0)/20 + 1e-7)
return int(max(0,1600*child - phaseout))
#7. 取消AMT (Alternative Minimum Tax)
def AMT_exemption(taxable_income, joint = True):
if joint:
return max(0, 84500 - max(taxable_income - 160900, 0)/4)
else:
return max(0, 54300 - max(taxable_income - 120700, 0)/4)
#8. 逐步取消遗产税 (Estate Tax)
#9. 综合影响
def tax_comparison(taxable_income, member, child, UPB, rate, efficient_state_rate, local_tax, joint = True, existing_mtg = False, display = True, detail = False):
# Personal exemption (applied to both standard and itemized)
old_PersonalExemption_deduction = PersonalExemption_deduction_old(taxable_income, member, joint = joint)
# Child care tax credit (applied to both standard and itemized)
old_ChildCare_Credit = ChildCare_Credit_old(taxable_income, child, joint = joint)
new_ChildCare_Credit = ChildCare_Credit_new(taxable_income, child, joint = joint)
# Mortgage Interest Rate deduction (applied to itemized and AMT)
old_MTG_IR_deduction= MTG_IR_deduction_old(UPB, rate)
new_MTG_IR_deduction= MTG_IR_deduction_new(UPB, rate, existing_mtg = existing_mtg)
# State and local tax (applied to itemized only)
old_SALT_deduction = SALT_deduction_old(taxable_income, efficient_state_rate, local_tax)
new_SALT_deduction = SALT_deduction_new(taxable_income, efficient_state_rate, local_tax)
# calculate standard tax
if joint:
old_standard_deduction = 12600
new_standard_deduction = 24000
else:
old_standard_deduction = 6300
new_standard_deduction = 12000
# tax before Child care credit
old_tax_beforeCCTC_standard = old_bracket(taxable_income - old_standard_deduction - old_PersonalExemption_deduction, joint = joint)
new_tax_beforeCCTC_standard = new_bracket(taxable_income - new_standard_deduction, joint = joint)
# tax before Child after credit
old_tax_standard = max(0, old_tax_beforeCCTC_standard - old_ChildCare_Credit)
new_tax_standard = max(0, new_tax_beforeCCTC_standard - new_ChildCare_Credit)
# calculate itemized tax
# tax before Child care credit
old_tax_beforeCCTC_itemized = old_bracket(taxable_income - old_MTG_IR_deduction - old_SALT_deduction - old_PersonalExemption_deduction, joint = joint)
new_tax_beforeCCTC_itemized = new_bracket(taxable_income - new_MTG_IR_deduction - new_SALT_deduction, joint = joint)
# tax before Child after credit
old_tax_itemized = max(0, old_tax_beforeCCTC_itemized - old_ChildCare_Credit)
new_tax_itemized = max(0, new_tax_beforeCCTC_itemized - new_ChildCare_Credit)
# calculate AMT tax
AMT_exemption_amount = AMT_exemption(taxable_income, joint = joint)
# tax before Child care credit
old_tax_beforeCCTC_AMT = AMT_bracket(taxable_income - AMT_exemption_amount - old_MTG_IR_deduction, joint = joint)
# tax before Child after credit
old_tax_AMT = max(0, old_tax_beforeCCTC_AMT - old_ChildCare_Credit)
tax_old = max(min(old_tax_standard, old_tax_itemized),old_tax_AMT)
tax_new = min(new_tax_standard, new_tax_itemized)
if display:
print("Current Tax Should Pay: $%3.2f"%tax_old)
print(" Standard: $%3.2f"%old_tax_standard)
print(" Itemized: $%3.2f"%old_tax_itemized)
print(" AMT tax: $%3.2f"%old_tax_AMT)
print("New Tax Should Pay: $%3.2f"%tax_new)
print(" Standard: $%3.2f"%new_tax_standard)
print(" Itemized: $%3.2f"%new_tax_itemized)
if detail:
print("***********************************************")
print("${:,} taxable income".format(taxable_income) + ', joint = %r'%joint)
print("%d Family Member, %d child(ren)"%(member, child))
print('Existing Mortgage: %r'%existing_mtg + ', ${:,} Mortgage Balance'.format(UPB) + ', %3.2f%% Interest Rate'%(rate*100),)
print('${:,} Local Tax'.format(local_tax) + ', %d%% State/City Tax Rate'%(efficient_state_rate*100),)
print("***********************************************")
table = BeautifulTable()
table.column_headers = ["Item", "Current", "New"]
table.append_row(["Standard Deduction", old_standard_deduction, new_standard_deduction])
table.append_row(["Personal Exemption", old_PersonalExemption_deduction, 'NA'])
table.append_row(["Child Care Tax Credit", old_ChildCare_Credit, new_ChildCare_Credit])
table.append_row(["Mortgage Interest Deduction", old_MTG_IR_deduction, new_MTG_IR_deduction])
table.append_row(["State and Local Tax Deduction", old_SALT_deduction, new_SALT_deduction])
table.append_row(["AMT Exemption (not including MTG Interest)", AMT_exemption_amount, "NA"])
table.append_row(["Tax", tax_old, tax_new])
print(table)
return [tax_old, tax_new, old_tax_standard, new_tax_standard, old_tax_itemized, new_tax_itemized, old_tax_AMT]
|
flexible
|
{
"blob_id": "70cb5673a13967247b6da1fa5948000db39a92c8",
"index": 7253,
"step-1": "<mask token>\n\n\ndef old_bracket(taxable_income, joint=True):\n rate = [0.1, 0.15, 0.25, 0.28, 0.33, 0.35, 0.396]\n if not joint:\n bracket = [0, 9325, 37950, 91900, 191650, 416700, 418400]\n else:\n bracket = [0, 18650, 75900, 153100, 233350, 416700, 470700]\n return tax_calculator(taxable_income, bracket, rate)\n\n\n<mask token>\n\n\ndef MTG_IR_deduction_old(UPB, rate):\n return min(1000000.0, UPB) * rate\n\n\n<mask token>\n\n\ndef SALT_deduction_new(taxable_income, efficient_state_rate, local_tax):\n return min(10000.0, taxable_income * efficient_state_rate + local_tax)\n\n\n<mask token>\n\n\ndef PersonalExemption_deduction_new():\n return 0\n\n\n<mask token>\n\n\ndef ChildCare_Credit_new(taxable_income, child, joint=True):\n if joint:\n phaseout = round(max(taxable_income - 230000, 0) / 20 + 1e-07)\n return int(max(0, 1600 * child - phaseout))\n else:\n phaseout = round(max(taxable_income - 115000, 0) / 20 + 1e-07)\n return int(max(0, 1600 * child - phaseout))\n\n\ndef AMT_exemption(taxable_income, joint=True):\n if joint:\n return max(0, 84500 - max(taxable_income - 160900, 0) / 4)\n else:\n return max(0, 54300 - max(taxable_income - 120700, 0) / 4)\n\n\ndef tax_comparison(taxable_income, member, child, UPB, rate,\n efficient_state_rate, local_tax, joint=True, existing_mtg=False,\n display=True, detail=False):\n old_PersonalExemption_deduction = PersonalExemption_deduction_old(\n taxable_income, member, joint=joint)\n old_ChildCare_Credit = ChildCare_Credit_old(taxable_income, child,\n joint=joint)\n new_ChildCare_Credit = ChildCare_Credit_new(taxable_income, child,\n joint=joint)\n old_MTG_IR_deduction = MTG_IR_deduction_old(UPB, rate)\n new_MTG_IR_deduction = MTG_IR_deduction_new(UPB, rate, existing_mtg=\n existing_mtg)\n old_SALT_deduction = SALT_deduction_old(taxable_income,\n efficient_state_rate, local_tax)\n new_SALT_deduction = SALT_deduction_new(taxable_income,\n efficient_state_rate, local_tax)\n if joint:\n old_standard_deduction = 12600\n new_standard_deduction = 24000\n else:\n old_standard_deduction = 6300\n new_standard_deduction = 12000\n old_tax_beforeCCTC_standard = old_bracket(taxable_income -\n old_standard_deduction - old_PersonalExemption_deduction, joint=joint)\n new_tax_beforeCCTC_standard = new_bracket(taxable_income -\n new_standard_deduction, joint=joint)\n old_tax_standard = max(0, old_tax_beforeCCTC_standard -\n old_ChildCare_Credit)\n new_tax_standard = max(0, new_tax_beforeCCTC_standard -\n new_ChildCare_Credit)\n old_tax_beforeCCTC_itemized = old_bracket(taxable_income -\n old_MTG_IR_deduction - old_SALT_deduction -\n old_PersonalExemption_deduction, joint=joint)\n new_tax_beforeCCTC_itemized = new_bracket(taxable_income -\n new_MTG_IR_deduction - new_SALT_deduction, joint=joint)\n old_tax_itemized = max(0, old_tax_beforeCCTC_itemized -\n old_ChildCare_Credit)\n new_tax_itemized = max(0, new_tax_beforeCCTC_itemized -\n new_ChildCare_Credit)\n AMT_exemption_amount = AMT_exemption(taxable_income, joint=joint)\n old_tax_beforeCCTC_AMT = AMT_bracket(taxable_income -\n AMT_exemption_amount - old_MTG_IR_deduction, joint=joint)\n old_tax_AMT = max(0, old_tax_beforeCCTC_AMT - old_ChildCare_Credit)\n tax_old = max(min(old_tax_standard, old_tax_itemized), old_tax_AMT)\n tax_new = min(new_tax_standard, new_tax_itemized)\n if display:\n print('Current Tax Should Pay: $%3.2f' % tax_old)\n print(' Standard: $%3.2f' % old_tax_standard)\n print(' Itemized: $%3.2f' % old_tax_itemized)\n print(' AMT tax: $%3.2f' % old_tax_AMT)\n print('New Tax Should Pay: $%3.2f' % tax_new)\n print(' Standard: $%3.2f' % new_tax_standard)\n print(' Itemized: $%3.2f' % new_tax_itemized)\n if detail:\n print('***********************************************')\n print('${:,} taxable income'.format(taxable_income) + \n ', joint = %r' % joint)\n print('%d Family Member, %d child(ren)' % (member, child))\n print('Existing Mortgage: %r' % existing_mtg +\n ', ${:,} Mortgage Balance'.format(UPB) + \n ', %3.2f%% Interest Rate' % (rate * 100))\n print('${:,} Local Tax'.format(local_tax) + \n ', %d%% State/City Tax Rate' % (efficient_state_rate * 100))\n print('***********************************************')\n table = BeautifulTable()\n table.column_headers = ['Item', 'Current', 'New']\n table.append_row(['Standard Deduction', old_standard_deduction,\n new_standard_deduction])\n table.append_row(['Personal Exemption',\n old_PersonalExemption_deduction, 'NA'])\n table.append_row(['Child Care Tax Credit', old_ChildCare_Credit,\n new_ChildCare_Credit])\n table.append_row(['Mortgage Interest Deduction',\n old_MTG_IR_deduction, new_MTG_IR_deduction])\n table.append_row(['State and Local Tax Deduction',\n old_SALT_deduction, new_SALT_deduction])\n table.append_row(['AMT Exemption (not including MTG Interest)',\n AMT_exemption_amount, 'NA'])\n table.append_row(['Tax', tax_old, tax_new])\n print(table)\n return [tax_old, tax_new, old_tax_standard, new_tax_standard,\n old_tax_itemized, new_tax_itemized, old_tax_AMT]\n",
"step-2": "<mask token>\n\n\ndef old_bracket(taxable_income, joint=True):\n rate = [0.1, 0.15, 0.25, 0.28, 0.33, 0.35, 0.396]\n if not joint:\n bracket = [0, 9325, 37950, 91900, 191650, 416700, 418400]\n else:\n bracket = [0, 18650, 75900, 153100, 233350, 416700, 470700]\n return tax_calculator(taxable_income, bracket, rate)\n\n\ndef new_bracket(taxable_income, joint=True):\n rate = [0.12, 0.25, 0.35, 0.396]\n if not joint:\n bracket = [0, 45000, 200000, 500000]\n else:\n bracket = [0, 90000, 260000, 1000000]\n return tax_calculator(taxable_income, bracket, rate)\n\n\n<mask token>\n\n\ndef MTG_IR_deduction_old(UPB, rate):\n return min(1000000.0, UPB) * rate\n\n\ndef MTG_IR_deduction_new(UPB, rate, existing_mtg=False):\n if existing_mtg:\n return min(1000000.0, UPB) * rate\n else:\n return min(750000.0, UPB) * rate\n\n\n<mask token>\n\n\ndef SALT_deduction_new(taxable_income, efficient_state_rate, local_tax):\n return min(10000.0, taxable_income * efficient_state_rate + local_tax)\n\n\n<mask token>\n\n\ndef PersonalExemption_deduction_new():\n return 0\n\n\ndef ChildCare_Credit_old(taxable_income, child, joint=True):\n if joint:\n phaseout = round(max(taxable_income - 110000, 0) / 20 + 1e-07)\n return int(max(0, 1000 * child - phaseout))\n else:\n phaseout = round(max(taxable_income - 55000, 0) / 20 + 1e-07)\n return int(max(0, 1000 * child - phaseout))\n\n\ndef ChildCare_Credit_new(taxable_income, child, joint=True):\n if joint:\n phaseout = round(max(taxable_income - 230000, 0) / 20 + 1e-07)\n return int(max(0, 1600 * child - phaseout))\n else:\n phaseout = round(max(taxable_income - 115000, 0) / 20 + 1e-07)\n return int(max(0, 1600 * child - phaseout))\n\n\ndef AMT_exemption(taxable_income, joint=True):\n if joint:\n return max(0, 84500 - max(taxable_income - 160900, 0) / 4)\n else:\n return max(0, 54300 - max(taxable_income - 120700, 0) / 4)\n\n\ndef tax_comparison(taxable_income, member, child, UPB, rate,\n efficient_state_rate, local_tax, joint=True, existing_mtg=False,\n display=True, detail=False):\n old_PersonalExemption_deduction = PersonalExemption_deduction_old(\n taxable_income, member, joint=joint)\n old_ChildCare_Credit = ChildCare_Credit_old(taxable_income, child,\n joint=joint)\n new_ChildCare_Credit = ChildCare_Credit_new(taxable_income, child,\n joint=joint)\n old_MTG_IR_deduction = MTG_IR_deduction_old(UPB, rate)\n new_MTG_IR_deduction = MTG_IR_deduction_new(UPB, rate, existing_mtg=\n existing_mtg)\n old_SALT_deduction = SALT_deduction_old(taxable_income,\n efficient_state_rate, local_tax)\n new_SALT_deduction = SALT_deduction_new(taxable_income,\n efficient_state_rate, local_tax)\n if joint:\n old_standard_deduction = 12600\n new_standard_deduction = 24000\n else:\n old_standard_deduction = 6300\n new_standard_deduction = 12000\n old_tax_beforeCCTC_standard = old_bracket(taxable_income -\n old_standard_deduction - old_PersonalExemption_deduction, joint=joint)\n new_tax_beforeCCTC_standard = new_bracket(taxable_income -\n new_standard_deduction, joint=joint)\n old_tax_standard = max(0, old_tax_beforeCCTC_standard -\n old_ChildCare_Credit)\n new_tax_standard = max(0, new_tax_beforeCCTC_standard -\n new_ChildCare_Credit)\n old_tax_beforeCCTC_itemized = old_bracket(taxable_income -\n old_MTG_IR_deduction - old_SALT_deduction -\n old_PersonalExemption_deduction, joint=joint)\n new_tax_beforeCCTC_itemized = new_bracket(taxable_income -\n new_MTG_IR_deduction - new_SALT_deduction, joint=joint)\n old_tax_itemized = max(0, old_tax_beforeCCTC_itemized -\n old_ChildCare_Credit)\n new_tax_itemized = max(0, new_tax_beforeCCTC_itemized -\n new_ChildCare_Credit)\n AMT_exemption_amount = AMT_exemption(taxable_income, joint=joint)\n old_tax_beforeCCTC_AMT = AMT_bracket(taxable_income -\n AMT_exemption_amount - old_MTG_IR_deduction, joint=joint)\n old_tax_AMT = max(0, old_tax_beforeCCTC_AMT - old_ChildCare_Credit)\n tax_old = max(min(old_tax_standard, old_tax_itemized), old_tax_AMT)\n tax_new = min(new_tax_standard, new_tax_itemized)\n if display:\n print('Current Tax Should Pay: $%3.2f' % tax_old)\n print(' Standard: $%3.2f' % old_tax_standard)\n print(' Itemized: $%3.2f' % old_tax_itemized)\n print(' AMT tax: $%3.2f' % old_tax_AMT)\n print('New Tax Should Pay: $%3.2f' % tax_new)\n print(' Standard: $%3.2f' % new_tax_standard)\n print(' Itemized: $%3.2f' % new_tax_itemized)\n if detail:\n print('***********************************************')\n print('${:,} taxable income'.format(taxable_income) + \n ', joint = %r' % joint)\n print('%d Family Member, %d child(ren)' % (member, child))\n print('Existing Mortgage: %r' % existing_mtg +\n ', ${:,} Mortgage Balance'.format(UPB) + \n ', %3.2f%% Interest Rate' % (rate * 100))\n print('${:,} Local Tax'.format(local_tax) + \n ', %d%% State/City Tax Rate' % (efficient_state_rate * 100))\n print('***********************************************')\n table = BeautifulTable()\n table.column_headers = ['Item', 'Current', 'New']\n table.append_row(['Standard Deduction', old_standard_deduction,\n new_standard_deduction])\n table.append_row(['Personal Exemption',\n old_PersonalExemption_deduction, 'NA'])\n table.append_row(['Child Care Tax Credit', old_ChildCare_Credit,\n new_ChildCare_Credit])\n table.append_row(['Mortgage Interest Deduction',\n old_MTG_IR_deduction, new_MTG_IR_deduction])\n table.append_row(['State and Local Tax Deduction',\n old_SALT_deduction, new_SALT_deduction])\n table.append_row(['AMT Exemption (not including MTG Interest)',\n AMT_exemption_amount, 'NA'])\n table.append_row(['Tax', tax_old, tax_new])\n print(table)\n return [tax_old, tax_new, old_tax_standard, new_tax_standard,\n old_tax_itemized, new_tax_itemized, old_tax_AMT]\n",
"step-3": "<mask token>\n\n\ndef old_bracket(taxable_income, joint=True):\n rate = [0.1, 0.15, 0.25, 0.28, 0.33, 0.35, 0.396]\n if not joint:\n bracket = [0, 9325, 37950, 91900, 191650, 416700, 418400]\n else:\n bracket = [0, 18650, 75900, 153100, 233350, 416700, 470700]\n return tax_calculator(taxable_income, bracket, rate)\n\n\ndef new_bracket(taxable_income, joint=True):\n rate = [0.12, 0.25, 0.35, 0.396]\n if not joint:\n bracket = [0, 45000, 200000, 500000]\n else:\n bracket = [0, 90000, 260000, 1000000]\n return tax_calculator(taxable_income, bracket, rate)\n\n\ndef AMT_bracket(taxable_income, joint=True):\n rate = [0.26, 0.28]\n if not joint:\n bracket = [0, 93900]\n else:\n bracket = [0, 187800]\n return tax_calculator(taxable_income, bracket, rate)\n\n\n<mask token>\n\n\ndef MTG_IR_deduction_old(UPB, rate):\n return min(1000000.0, UPB) * rate\n\n\ndef MTG_IR_deduction_new(UPB, rate, existing_mtg=False):\n if existing_mtg:\n return min(1000000.0, UPB) * rate\n else:\n return min(750000.0, UPB) * rate\n\n\ndef SALT_deduction_old(taxable_income, efficient_state_rate, local_tax):\n return taxable_income * efficient_state_rate + local_tax\n\n\ndef SALT_deduction_new(taxable_income, efficient_state_rate, local_tax):\n return min(10000.0, taxable_income * efficient_state_rate + local_tax)\n\n\ndef PersonalExemption_deduction_old(taxable_income, member, joint=True):\n if joint:\n phaseout = min(0.02 * round(max(taxable_income - 311300, 0) / 2500 +\n 1e-07), 1)\n return int(4050 * member * (1 - phaseout))\n else:\n phaseout = min(0.02 * round(max(taxable_income - 259000, 0) / 2500 +\n 1e-07), 1)\n return int(4050 * member * (1 - phaseout))\n\n\ndef PersonalExemption_deduction_new():\n return 0\n\n\ndef ChildCare_Credit_old(taxable_income, child, joint=True):\n if joint:\n phaseout = round(max(taxable_income - 110000, 0) / 20 + 1e-07)\n return int(max(0, 1000 * child - phaseout))\n else:\n phaseout = round(max(taxable_income - 55000, 0) / 20 + 1e-07)\n return int(max(0, 1000 * child - phaseout))\n\n\ndef ChildCare_Credit_new(taxable_income, child, joint=True):\n if joint:\n phaseout = round(max(taxable_income - 230000, 0) / 20 + 1e-07)\n return int(max(0, 1600 * child - phaseout))\n else:\n phaseout = round(max(taxable_income - 115000, 0) / 20 + 1e-07)\n return int(max(0, 1600 * child - phaseout))\n\n\ndef AMT_exemption(taxable_income, joint=True):\n if joint:\n return max(0, 84500 - max(taxable_income - 160900, 0) / 4)\n else:\n return max(0, 54300 - max(taxable_income - 120700, 0) / 4)\n\n\ndef tax_comparison(taxable_income, member, child, UPB, rate,\n efficient_state_rate, local_tax, joint=True, existing_mtg=False,\n display=True, detail=False):\n old_PersonalExemption_deduction = PersonalExemption_deduction_old(\n taxable_income, member, joint=joint)\n old_ChildCare_Credit = ChildCare_Credit_old(taxable_income, child,\n joint=joint)\n new_ChildCare_Credit = ChildCare_Credit_new(taxable_income, child,\n joint=joint)\n old_MTG_IR_deduction = MTG_IR_deduction_old(UPB, rate)\n new_MTG_IR_deduction = MTG_IR_deduction_new(UPB, rate, existing_mtg=\n existing_mtg)\n old_SALT_deduction = SALT_deduction_old(taxable_income,\n efficient_state_rate, local_tax)\n new_SALT_deduction = SALT_deduction_new(taxable_income,\n efficient_state_rate, local_tax)\n if joint:\n old_standard_deduction = 12600\n new_standard_deduction = 24000\n else:\n old_standard_deduction = 6300\n new_standard_deduction = 12000\n old_tax_beforeCCTC_standard = old_bracket(taxable_income -\n old_standard_deduction - old_PersonalExemption_deduction, joint=joint)\n new_tax_beforeCCTC_standard = new_bracket(taxable_income -\n new_standard_deduction, joint=joint)\n old_tax_standard = max(0, old_tax_beforeCCTC_standard -\n old_ChildCare_Credit)\n new_tax_standard = max(0, new_tax_beforeCCTC_standard -\n new_ChildCare_Credit)\n old_tax_beforeCCTC_itemized = old_bracket(taxable_income -\n old_MTG_IR_deduction - old_SALT_deduction -\n old_PersonalExemption_deduction, joint=joint)\n new_tax_beforeCCTC_itemized = new_bracket(taxable_income -\n new_MTG_IR_deduction - new_SALT_deduction, joint=joint)\n old_tax_itemized = max(0, old_tax_beforeCCTC_itemized -\n old_ChildCare_Credit)\n new_tax_itemized = max(0, new_tax_beforeCCTC_itemized -\n new_ChildCare_Credit)\n AMT_exemption_amount = AMT_exemption(taxable_income, joint=joint)\n old_tax_beforeCCTC_AMT = AMT_bracket(taxable_income -\n AMT_exemption_amount - old_MTG_IR_deduction, joint=joint)\n old_tax_AMT = max(0, old_tax_beforeCCTC_AMT - old_ChildCare_Credit)\n tax_old = max(min(old_tax_standard, old_tax_itemized), old_tax_AMT)\n tax_new = min(new_tax_standard, new_tax_itemized)\n if display:\n print('Current Tax Should Pay: $%3.2f' % tax_old)\n print(' Standard: $%3.2f' % old_tax_standard)\n print(' Itemized: $%3.2f' % old_tax_itemized)\n print(' AMT tax: $%3.2f' % old_tax_AMT)\n print('New Tax Should Pay: $%3.2f' % tax_new)\n print(' Standard: $%3.2f' % new_tax_standard)\n print(' Itemized: $%3.2f' % new_tax_itemized)\n if detail:\n print('***********************************************')\n print('${:,} taxable income'.format(taxable_income) + \n ', joint = %r' % joint)\n print('%d Family Member, %d child(ren)' % (member, child))\n print('Existing Mortgage: %r' % existing_mtg +\n ', ${:,} Mortgage Balance'.format(UPB) + \n ', %3.2f%% Interest Rate' % (rate * 100))\n print('${:,} Local Tax'.format(local_tax) + \n ', %d%% State/City Tax Rate' % (efficient_state_rate * 100))\n print('***********************************************')\n table = BeautifulTable()\n table.column_headers = ['Item', 'Current', 'New']\n table.append_row(['Standard Deduction', old_standard_deduction,\n new_standard_deduction])\n table.append_row(['Personal Exemption',\n old_PersonalExemption_deduction, 'NA'])\n table.append_row(['Child Care Tax Credit', old_ChildCare_Credit,\n new_ChildCare_Credit])\n table.append_row(['Mortgage Interest Deduction',\n old_MTG_IR_deduction, new_MTG_IR_deduction])\n table.append_row(['State and Local Tax Deduction',\n old_SALT_deduction, new_SALT_deduction])\n table.append_row(['AMT Exemption (not including MTG Interest)',\n AMT_exemption_amount, 'NA'])\n table.append_row(['Tax', tax_old, tax_new])\n print(table)\n return [tax_old, tax_new, old_tax_standard, new_tax_standard,\n old_tax_itemized, new_tax_itemized, old_tax_AMT]\n",
"step-4": "<mask token>\nfrom beautifultable import BeautifulTable\n\n\ndef tax_calculator(taxable_income, bracket, rate):\n bracket2 = bracket[1:]\n bracket2.append(float('Inf'))\n bracket3 = [(y - x) for x, y in zip(bracket, bracket2)]\n income_seg = [min(max(0, taxable_income - x), y) for x, y in zip(\n bracket, bracket3)]\n return sum([(x * y) for x, y in zip(income_seg, rate)])\n\n\ndef old_bracket(taxable_income, joint=True):\n rate = [0.1, 0.15, 0.25, 0.28, 0.33, 0.35, 0.396]\n if not joint:\n bracket = [0, 9325, 37950, 91900, 191650, 416700, 418400]\n else:\n bracket = [0, 18650, 75900, 153100, 233350, 416700, 470700]\n return tax_calculator(taxable_income, bracket, rate)\n\n\ndef new_bracket(taxable_income, joint=True):\n rate = [0.12, 0.25, 0.35, 0.396]\n if not joint:\n bracket = [0, 45000, 200000, 500000]\n else:\n bracket = [0, 90000, 260000, 1000000]\n return tax_calculator(taxable_income, bracket, rate)\n\n\ndef AMT_bracket(taxable_income, joint=True):\n rate = [0.26, 0.28]\n if not joint:\n bracket = [0, 93900]\n else:\n bracket = [0, 187800]\n return tax_calculator(taxable_income, bracket, rate)\n\n\n<mask token>\n\n\ndef MTG_IR_deduction_old(UPB, rate):\n return min(1000000.0, UPB) * rate\n\n\ndef MTG_IR_deduction_new(UPB, rate, existing_mtg=False):\n if existing_mtg:\n return min(1000000.0, UPB) * rate\n else:\n return min(750000.0, UPB) * rate\n\n\ndef SALT_deduction_old(taxable_income, efficient_state_rate, local_tax):\n return taxable_income * efficient_state_rate + local_tax\n\n\ndef SALT_deduction_new(taxable_income, efficient_state_rate, local_tax):\n return min(10000.0, taxable_income * efficient_state_rate + local_tax)\n\n\ndef PersonalExemption_deduction_old(taxable_income, member, joint=True):\n if joint:\n phaseout = min(0.02 * round(max(taxable_income - 311300, 0) / 2500 +\n 1e-07), 1)\n return int(4050 * member * (1 - phaseout))\n else:\n phaseout = min(0.02 * round(max(taxable_income - 259000, 0) / 2500 +\n 1e-07), 1)\n return int(4050 * member * (1 - phaseout))\n\n\ndef PersonalExemption_deduction_new():\n return 0\n\n\ndef ChildCare_Credit_old(taxable_income, child, joint=True):\n if joint:\n phaseout = round(max(taxable_income - 110000, 0) / 20 + 1e-07)\n return int(max(0, 1000 * child - phaseout))\n else:\n phaseout = round(max(taxable_income - 55000, 0) / 20 + 1e-07)\n return int(max(0, 1000 * child - phaseout))\n\n\ndef ChildCare_Credit_new(taxable_income, child, joint=True):\n if joint:\n phaseout = round(max(taxable_income - 230000, 0) / 20 + 1e-07)\n return int(max(0, 1600 * child - phaseout))\n else:\n phaseout = round(max(taxable_income - 115000, 0) / 20 + 1e-07)\n return int(max(0, 1600 * child - phaseout))\n\n\ndef AMT_exemption(taxable_income, joint=True):\n if joint:\n return max(0, 84500 - max(taxable_income - 160900, 0) / 4)\n else:\n return max(0, 54300 - max(taxable_income - 120700, 0) / 4)\n\n\ndef tax_comparison(taxable_income, member, child, UPB, rate,\n efficient_state_rate, local_tax, joint=True, existing_mtg=False,\n display=True, detail=False):\n old_PersonalExemption_deduction = PersonalExemption_deduction_old(\n taxable_income, member, joint=joint)\n old_ChildCare_Credit = ChildCare_Credit_old(taxable_income, child,\n joint=joint)\n new_ChildCare_Credit = ChildCare_Credit_new(taxable_income, child,\n joint=joint)\n old_MTG_IR_deduction = MTG_IR_deduction_old(UPB, rate)\n new_MTG_IR_deduction = MTG_IR_deduction_new(UPB, rate, existing_mtg=\n existing_mtg)\n old_SALT_deduction = SALT_deduction_old(taxable_income,\n efficient_state_rate, local_tax)\n new_SALT_deduction = SALT_deduction_new(taxable_income,\n efficient_state_rate, local_tax)\n if joint:\n old_standard_deduction = 12600\n new_standard_deduction = 24000\n else:\n old_standard_deduction = 6300\n new_standard_deduction = 12000\n old_tax_beforeCCTC_standard = old_bracket(taxable_income -\n old_standard_deduction - old_PersonalExemption_deduction, joint=joint)\n new_tax_beforeCCTC_standard = new_bracket(taxable_income -\n new_standard_deduction, joint=joint)\n old_tax_standard = max(0, old_tax_beforeCCTC_standard -\n old_ChildCare_Credit)\n new_tax_standard = max(0, new_tax_beforeCCTC_standard -\n new_ChildCare_Credit)\n old_tax_beforeCCTC_itemized = old_bracket(taxable_income -\n old_MTG_IR_deduction - old_SALT_deduction -\n old_PersonalExemption_deduction, joint=joint)\n new_tax_beforeCCTC_itemized = new_bracket(taxable_income -\n new_MTG_IR_deduction - new_SALT_deduction, joint=joint)\n old_tax_itemized = max(0, old_tax_beforeCCTC_itemized -\n old_ChildCare_Credit)\n new_tax_itemized = max(0, new_tax_beforeCCTC_itemized -\n new_ChildCare_Credit)\n AMT_exemption_amount = AMT_exemption(taxable_income, joint=joint)\n old_tax_beforeCCTC_AMT = AMT_bracket(taxable_income -\n AMT_exemption_amount - old_MTG_IR_deduction, joint=joint)\n old_tax_AMT = max(0, old_tax_beforeCCTC_AMT - old_ChildCare_Credit)\n tax_old = max(min(old_tax_standard, old_tax_itemized), old_tax_AMT)\n tax_new = min(new_tax_standard, new_tax_itemized)\n if display:\n print('Current Tax Should Pay: $%3.2f' % tax_old)\n print(' Standard: $%3.2f' % old_tax_standard)\n print(' Itemized: $%3.2f' % old_tax_itemized)\n print(' AMT tax: $%3.2f' % old_tax_AMT)\n print('New Tax Should Pay: $%3.2f' % tax_new)\n print(' Standard: $%3.2f' % new_tax_standard)\n print(' Itemized: $%3.2f' % new_tax_itemized)\n if detail:\n print('***********************************************')\n print('${:,} taxable income'.format(taxable_income) + \n ', joint = %r' % joint)\n print('%d Family Member, %d child(ren)' % (member, child))\n print('Existing Mortgage: %r' % existing_mtg +\n ', ${:,} Mortgage Balance'.format(UPB) + \n ', %3.2f%% Interest Rate' % (rate * 100))\n print('${:,} Local Tax'.format(local_tax) + \n ', %d%% State/City Tax Rate' % (efficient_state_rate * 100))\n print('***********************************************')\n table = BeautifulTable()\n table.column_headers = ['Item', 'Current', 'New']\n table.append_row(['Standard Deduction', old_standard_deduction,\n new_standard_deduction])\n table.append_row(['Personal Exemption',\n old_PersonalExemption_deduction, 'NA'])\n table.append_row(['Child Care Tax Credit', old_ChildCare_Credit,\n new_ChildCare_Credit])\n table.append_row(['Mortgage Interest Deduction',\n old_MTG_IR_deduction, new_MTG_IR_deduction])\n table.append_row(['State and Local Tax Deduction',\n old_SALT_deduction, new_SALT_deduction])\n table.append_row(['AMT Exemption (not including MTG Interest)',\n AMT_exemption_amount, 'NA'])\n table.append_row(['Tax', tax_old, tax_new])\n print(table)\n return [tax_old, tax_new, old_tax_standard, new_tax_standard,\n old_tax_itemized, new_tax_itemized, old_tax_AMT]\n",
"step-5": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Nov 5 11:56:41 2017\n\n@author: cgao\n\"\"\"\n\nfrom beautifultable import BeautifulTable\n\n\n\n#1. 新旧税率Bracket\ndef tax_calculator(taxable_income, bracket, rate):\n bracket2 = bracket[1:]\n bracket2.append(float('Inf'))\n bracket3 = [y-x for x,y in zip(bracket, bracket2)]\n income_seg = [min(max(0, taxable_income - x), y) for x, y in zip(bracket, bracket3)]\n return sum([x*y for x, y in zip(income_seg, rate)])\n\ndef old_bracket(taxable_income, joint = True):\n rate= [0.1, 0.15, 0.25, 0.28, 0.33, 0.35, 0.396]\n if not joint:\n bracket = [0, 9325, 37950, 91900, 191650, 416700, 418400]\n else:\n bracket = [0, 18650, 75900, 153100, 233350, 416700, 470700]\n return tax_calculator(taxable_income, bracket, rate) \n\ndef new_bracket(taxable_income, joint = True):\n rate= [0.12, 0.25, 0.35, 0.396]\n if not joint:\n bracket = [0, 45000, 200000, 500000]\n else:\n bracket = [0, 90000, 260000, 1000000]\n return tax_calculator(taxable_income, bracket, rate) \n\n\ndef AMT_bracket(taxable_income, joint = True):\n rate= [0.26, 0.28]\n if not joint:\n bracket = [0, 93900]\n else:\n bracket = [0, 187800]\n return tax_calculator(taxable_income, bracket, rate) \n\n#2. 增加标准扣除(Standard Deduction)额度\n'''\n if joint:\n old_standard_deduction = 12600\n new_standard_deduction = 24000\n else:\n old_standard_deduction = 6300\n new_standard_deduction = 12000\n'''\n\n#3. 减少利息扣除\ndef MTG_IR_deduction_old(UPB, rate):\n return min(1000000.0, UPB)*rate\n# existing_mtg = True: existing loan. Grand fathered 1.0 Million limit\ndef MTG_IR_deduction_new(UPB, rate, existing_mtg = False):\n if existing_mtg:\n return min(1000000.0, UPB)*rate\n else:\n return min(750000.0, UPB)*rate\n\n#4. 减少州与地方税收(房产税等)扣除\ndef SALT_deduction_old(taxable_income, efficient_state_rate, local_tax):\n return taxable_income*efficient_state_rate + local_tax\n\ndef SALT_deduction_new(taxable_income, efficient_state_rate, local_tax):\n return min(10000.0, taxable_income*efficient_state_rate + local_tax)\n\n#5. 取消Personal Exemption\ndef PersonalExemption_deduction_old(taxable_income, member, joint = True):\n if joint:\n phaseout = min(0.02*round((max(taxable_income - 311300, 0)/2500 + 1e-7)), 1)\n return int(4050*member*(1 - phaseout))\n else:\n phaseout = min(0.02*round(max(taxable_income - 259000, 0)/2500 + 1e-7), 1)\n return int(4050*member*(1 - phaseout))\n \ndef PersonalExemption_deduction_new():\n return 0\n\n#6. Child Care Tax Credit\ndef ChildCare_Credit_old(taxable_income, child, joint = True):\n if joint:\n phaseout = round(max(taxable_income - 110000, 0)/20 + 1e-7)\n return int(max(0,1000*child - phaseout))\n else:\n phaseout = round(max(taxable_income - 55000, 0)/20 + 1e-7)\n return int(max(0,1000*child - phaseout))\n\n \ndef ChildCare_Credit_new(taxable_income, child, joint = True):\n if joint:\n phaseout = round(max(taxable_income - 230000, 0)/20 + 1e-7)\n return int(max(0,1600*child - phaseout))\n else:\n phaseout = round(max(taxable_income - 115000, 0)/20 + 1e-7)\n return int(max(0,1600*child - phaseout))\n \n#7. 取消AMT (Alternative Minimum Tax)\ndef AMT_exemption(taxable_income, joint = True):\n if joint:\n return max(0, 84500 - max(taxable_income - 160900, 0)/4)\n else:\n return max(0, 54300 - max(taxable_income - 120700, 0)/4)\n \n#8. 逐步取消遗产税 (Estate Tax)\n\n#9. 综合影响\ndef tax_comparison(taxable_income, member, child, UPB, rate, efficient_state_rate, local_tax, joint = True, existing_mtg = False, display = True, detail = False):\n# Personal exemption (applied to both standard and itemized)\n old_PersonalExemption_deduction = PersonalExemption_deduction_old(taxable_income, member, joint = joint)\n# Child care tax credit (applied to both standard and itemized)\n old_ChildCare_Credit = ChildCare_Credit_old(taxable_income, child, joint = joint)\n new_ChildCare_Credit = ChildCare_Credit_new(taxable_income, child, joint = joint)\n# Mortgage Interest Rate deduction (applied to itemized and AMT)\n old_MTG_IR_deduction= MTG_IR_deduction_old(UPB, rate)\n new_MTG_IR_deduction= MTG_IR_deduction_new(UPB, rate, existing_mtg = existing_mtg)\n# State and local tax (applied to itemized only)\n old_SALT_deduction = SALT_deduction_old(taxable_income, efficient_state_rate, local_tax) \n new_SALT_deduction = SALT_deduction_new(taxable_income, efficient_state_rate, local_tax)\n# calculate standard tax\n if joint:\n old_standard_deduction = 12600\n new_standard_deduction = 24000\n else:\n old_standard_deduction = 6300\n new_standard_deduction = 12000\n # tax before Child care credit\n old_tax_beforeCCTC_standard = old_bracket(taxable_income - old_standard_deduction - old_PersonalExemption_deduction, joint = joint)\n new_tax_beforeCCTC_standard = new_bracket(taxable_income - new_standard_deduction, joint = joint)\n # tax before Child after credit\n old_tax_standard = max(0, old_tax_beforeCCTC_standard - old_ChildCare_Credit)\n new_tax_standard = max(0, new_tax_beforeCCTC_standard - new_ChildCare_Credit)\n# calculate itemized tax \n # tax before Child care credit\n old_tax_beforeCCTC_itemized = old_bracket(taxable_income - old_MTG_IR_deduction - old_SALT_deduction - old_PersonalExemption_deduction, joint = joint)\n new_tax_beforeCCTC_itemized = new_bracket(taxable_income - new_MTG_IR_deduction - new_SALT_deduction, joint = joint)\n # tax before Child after credit\n old_tax_itemized = max(0, old_tax_beforeCCTC_itemized - old_ChildCare_Credit)\n new_tax_itemized = max(0, new_tax_beforeCCTC_itemized - new_ChildCare_Credit)\n# calculate AMT tax \n AMT_exemption_amount = AMT_exemption(taxable_income, joint = joint)\n # tax before Child care credit\n old_tax_beforeCCTC_AMT = AMT_bracket(taxable_income - AMT_exemption_amount - old_MTG_IR_deduction, joint = joint)\n # tax before Child after credit\n old_tax_AMT = max(0, old_tax_beforeCCTC_AMT - old_ChildCare_Credit)\n tax_old = max(min(old_tax_standard, old_tax_itemized),old_tax_AMT)\n tax_new = min(new_tax_standard, new_tax_itemized)\n if display:\n print(\"Current Tax Should Pay: $%3.2f\"%tax_old)\n print(\" Standard: $%3.2f\"%old_tax_standard)\n print(\" Itemized: $%3.2f\"%old_tax_itemized)\n print(\" AMT tax: $%3.2f\"%old_tax_AMT)\n print(\"New Tax Should Pay: $%3.2f\"%tax_new)\n print(\" Standard: $%3.2f\"%new_tax_standard)\n print(\" Itemized: $%3.2f\"%new_tax_itemized) \n if detail:\n print(\"***********************************************\")\n print(\"${:,} taxable income\".format(taxable_income) + ', joint = %r'%joint)\n print(\"%d Family Member, %d child(ren)\"%(member, child))\n print('Existing Mortgage: %r'%existing_mtg + ', ${:,} Mortgage Balance'.format(UPB) + ', %3.2f%% Interest Rate'%(rate*100),)\n print('${:,} Local Tax'.format(local_tax) + ', %d%% State/City Tax Rate'%(efficient_state_rate*100),)\n print(\"***********************************************\")\n table = BeautifulTable()\n table.column_headers = [\"Item\", \"Current\", \"New\"]\n table.append_row([\"Standard Deduction\", old_standard_deduction, new_standard_deduction])\n table.append_row([\"Personal Exemption\", old_PersonalExemption_deduction, 'NA'])\n table.append_row([\"Child Care Tax Credit\", old_ChildCare_Credit, new_ChildCare_Credit])\n table.append_row([\"Mortgage Interest Deduction\", old_MTG_IR_deduction, new_MTG_IR_deduction])\n table.append_row([\"State and Local Tax Deduction\", old_SALT_deduction, new_SALT_deduction])\n table.append_row([\"AMT Exemption (not including MTG Interest)\", AMT_exemption_amount, \"NA\"])\n table.append_row([\"Tax\", tax_old, tax_new])\n print(table)\n return [tax_old, tax_new, old_tax_standard, new_tax_standard, old_tax_itemized, new_tax_itemized, old_tax_AMT]\n\n",
"step-ids": [
7,
10,
13,
15,
16
]
}
|
[
7,
10,
13,
15,
16
] |
marks = {
"S":"subject",
"O":"object",
"A":"attribute",
"C":"clause",
}
marks_reverse = {
"subject":"S",
"object":"O",
"attribute":"A",
"clause":"C",
}
|
normal
|
{
"blob_id": "c66b07c45f4a675a6c7fcec82048a3197910d0d8",
"index": 3435,
"step-1": "<mask token>\n",
"step-2": "marks = {'S': 'subject', 'O': 'object', 'A': 'attribute', 'C': 'clause'}\nmarks_reverse = {'subject': 'S', 'object': 'O', 'attribute': 'A', 'clause': 'C'\n }\n",
"step-3": "marks = {\n \"S\":\"subject\",\n \"O\":\"object\",\n \"A\":\"attribute\",\n \"C\":\"clause\",\n}\nmarks_reverse = {\n \"subject\":\"S\",\n \"object\":\"O\",\n \"attribute\":\"A\",\n \"clause\":\"C\",\n}\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
#!/usr/bin/env python
import sys
total = 0
for line in sys.stdin:
edges = [int(x) for x in line.split("x")]
edges.sort()
ribbon = sum(x * 2 for x in edges[:2])
l, w, h = edges
bow = l * w * h
total += bow + ribbon
print(total)
|
normal
|
{
"blob_id": "ed85cb61f4bc8bf758dafb10ffbabf87fb4521d0",
"index": 9281,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor line in sys.stdin:\n edges = [int(x) for x in line.split('x')]\n edges.sort()\n ribbon = sum(x * 2 for x in edges[:2])\n l, w, h = edges\n bow = l * w * h\n total += bow + ribbon\nprint(total)\n",
"step-3": "<mask token>\ntotal = 0\nfor line in sys.stdin:\n edges = [int(x) for x in line.split('x')]\n edges.sort()\n ribbon = sum(x * 2 for x in edges[:2])\n l, w, h = edges\n bow = l * w * h\n total += bow + ribbon\nprint(total)\n",
"step-4": "import sys\ntotal = 0\nfor line in sys.stdin:\n edges = [int(x) for x in line.split('x')]\n edges.sort()\n ribbon = sum(x * 2 for x in edges[:2])\n l, w, h = edges\n bow = l * w * h\n total += bow + ribbon\nprint(total)\n",
"step-5": "#!/usr/bin/env python\n\nimport sys\n\ntotal = 0\nfor line in sys.stdin:\n edges = [int(x) for x in line.split(\"x\")]\n\n edges.sort()\n ribbon = sum(x * 2 for x in edges[:2])\n\n l, w, h = edges\n bow = l * w * h\n\n total += bow + ribbon\n\nprint(total)\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.